From 51af2e2203abf41f42f41decc05e6a49e6b6d40d Mon Sep 17 00:00:00 2001 From: Shivansh Arora Date: Tue, 9 Jul 2024 13:18:25 +0530 Subject: [PATCH 01/12] Add UTs for RemoteIndexMetadataManager (#14660) Signed-off-by: Shivansh Arora Co-authored-by: Arpit-Bandejiya --- .../remote/RemoteIndexMetadataManager.java | 21 -- .../RemoteIndexMetadataManagerTests.java | 190 ++++++++++++++++++ 2 files changed, 190 insertions(+), 21 deletions(-) create mode 100644 server/src/test/java/org/opensearch/gateway/remote/RemoteIndexMetadataManagerTests.java diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteIndexMetadataManager.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteIndexMetadataManager.java index a84161b202a22..c595f19279354 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteIndexMetadataManager.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteIndexMetadataManager.java @@ -26,10 +26,7 @@ import org.opensearch.threadpool.ThreadPool; import java.io.IOException; -import java.util.HashMap; import java.util.Locale; -import java.util.Map; -import java.util.Objects; /** * A Manager which provides APIs to write and read Index Metadata to remote store @@ -136,24 +133,6 @@ IndexMetadata getIndexMetadata(ClusterMetadataManifest.UploadedIndexMetadata upl } } - /** - * Fetch latest index metadata from remote cluster state - * - * @param clusterMetadataManifest manifest file of cluster - * @param clusterUUID uuid of cluster state to refer to in remote - * @return {@code Map} latest IndexUUID to IndexMetadata map - */ - Map getIndexMetadataMap(String clusterUUID, ClusterMetadataManifest clusterMetadataManifest) { - assert Objects.equals(clusterUUID, clusterMetadataManifest.getClusterUUID()) - : "Corrupt ClusterMetadataManifest found. Cluster UUID mismatch."; - Map remoteIndexMetadata = new HashMap<>(); - for (ClusterMetadataManifest.UploadedIndexMetadata uploadedIndexMetadata : clusterMetadataManifest.getIndices()) { - IndexMetadata indexMetadata = getIndexMetadata(uploadedIndexMetadata, clusterUUID); - remoteIndexMetadata.put(uploadedIndexMetadata.getIndexUUID(), indexMetadata); - } - return remoteIndexMetadata; - } - public TimeValue getIndexMetadataUploadTimeout() { return this.indexMetadataUploadTimeout; } diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteIndexMetadataManagerTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteIndexMetadataManagerTests.java new file mode 100644 index 0000000000000..817fc7b55d09a --- /dev/null +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteIndexMetadataManagerTests.java @@ -0,0 +1,190 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote; + +import org.opensearch.Version; +import org.opensearch.action.LatchedActionListener; +import org.opensearch.cluster.metadata.AliasMetadata; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.Nullable; +import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.BlobStore; +import org.opensearch.common.blobstore.stream.write.WritePriority; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.TestCapturingListener; +import org.opensearch.core.action.ActionListener; +import org.opensearch.core.compress.Compressor; +import org.opensearch.core.compress.NoneCompressor; +import org.opensearch.gateway.remote.model.RemoteReadResult; +import org.opensearch.index.remote.RemoteStoreUtils; +import org.opensearch.index.translog.transfer.BlobStoreTransferService; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.util.concurrent.CountDownLatch; + +import static org.opensearch.gateway.remote.RemoteClusterStateService.FORMAT_PARAMS; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; +import static org.opensearch.gateway.remote.RemoteClusterStateUtils.PATH_DELIMITER; +import static org.opensearch.gateway.remote.model.RemoteIndexMetadata.INDEX; +import static org.opensearch.gateway.remote.model.RemoteIndexMetadata.INDEX_METADATA_FORMAT; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyIterable; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class RemoteIndexMetadataManagerTests extends OpenSearchTestCase { + + private RemoteIndexMetadataManager remoteIndexMetadataManager; + private BlobStoreRepository blobStoreRepository; + private BlobStoreTransferService blobStoreTransferService; + private Compressor compressor; + private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); + + @Before + public void setup() { + ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + blobStoreRepository = mock(BlobStoreRepository.class); + BlobPath blobPath = new BlobPath().add("random-path"); + when((blobStoreRepository.basePath())).thenReturn(blobPath); + blobStoreTransferService = mock(BlobStoreTransferService.class); + compressor = new NoneCompressor(); + when(blobStoreRepository.getCompressor()).thenReturn(compressor); + remoteIndexMetadataManager = new RemoteIndexMetadataManager( + clusterSettings, + "test-cluster", + blobStoreRepository, + blobStoreTransferService, + threadPool + ); + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + threadPool.shutdown(); + } + + public void testGetAsyncIndexMetadataWriteAction_Success() throws Exception { + IndexMetadata indexMetadata = getIndexMetadata(randomAlphaOfLength(10), randomBoolean(), randomAlphaOfLength(10)); + BlobContainer blobContainer = mock(AsyncMultiStreamBlobContainer.class); + BlobStore blobStore = mock(BlobStore.class); + when(blobStore.blobContainer(any())).thenReturn(blobContainer); + TestCapturingListener listener = new TestCapturingListener<>(); + CountDownLatch latch = new CountDownLatch(1); + String expectedFilePrefix = String.join(DELIMITER, "metadata", RemoteStoreUtils.invertLong(indexMetadata.getVersion())); + + doAnswer((invocationOnMock -> { + invocationOnMock.getArgument(4, ActionListener.class).onResponse(null); + return null; + })).when(blobStoreTransferService).uploadBlob(any(), any(), any(), eq(WritePriority.URGENT), any(ActionListener.class)); + + remoteIndexMetadataManager.getAsyncIndexMetadataWriteAction( + indexMetadata, + "cluster-uuid", + new LatchedActionListener<>(listener, latch) + ).run(); + latch.await(); + + assertNull(listener.getFailure()); + assertNotNull(listener.getResult()); + ClusterMetadataManifest.UploadedMetadata uploadedMetadata = listener.getResult(); + assertEquals(INDEX + "--" + indexMetadata.getIndex().getName(), uploadedMetadata.getComponent()); + String uploadedFileName = uploadedMetadata.getUploadedFilename(); + String[] pathTokens = uploadedFileName.split(PATH_DELIMITER); + assertEquals(7, pathTokens.length); + assertEquals(INDEX, pathTokens[4]); + assertEquals(indexMetadata.getIndex().getUUID(), pathTokens[5]); + assertTrue(pathTokens[6].startsWith(expectedFilePrefix)); + } + + public void testGetAsyncIndexMetadataWriteAction_IOFailure() throws Exception { + IndexMetadata indexMetadata = getIndexMetadata(randomAlphaOfLength(10), randomBoolean(), randomAlphaOfLength(10)); + BlobContainer blobContainer = mock(AsyncMultiStreamBlobContainer.class); + BlobStore blobStore = mock(BlobStore.class); + when(blobStore.blobContainer(any())).thenReturn(blobContainer); + TestCapturingListener listener = new TestCapturingListener<>(); + CountDownLatch latch = new CountDownLatch(1); + + doAnswer((invocationOnMock -> { + invocationOnMock.getArgument(4, ActionListener.class).onFailure(new IOException("failure")); + return null; + })).when(blobStoreTransferService).uploadBlob(any(), any(), any(), eq(WritePriority.URGENT), any(ActionListener.class)); + + remoteIndexMetadataManager.getAsyncIndexMetadataWriteAction( + indexMetadata, + "cluster-uuid", + new LatchedActionListener<>(listener, latch) + ).run(); + latch.await(); + assertNull(listener.getResult()); + assertNotNull(listener.getFailure()); + assertTrue(listener.getFailure() instanceof RemoteStateTransferException); + } + + public void testGetAsyncIndexMetadataReadAction_Success() throws Exception { + IndexMetadata indexMetadata = getIndexMetadata(randomAlphaOfLength(10), randomBoolean(), randomAlphaOfLength(10)); + String fileName = randomAlphaOfLength(10); + fileName = fileName + DELIMITER + '2'; + when(blobStoreTransferService.downloadBlob(anyIterable(), anyString())).thenReturn( + INDEX_METADATA_FORMAT.serialize(indexMetadata, fileName, compressor, FORMAT_PARAMS).streamInput() + ); + TestCapturingListener listener = new TestCapturingListener<>(); + CountDownLatch latch = new CountDownLatch(1); + + remoteIndexMetadataManager.getAsyncIndexMetadataReadAction("cluster-uuid", fileName, new LatchedActionListener<>(listener, latch)) + .run(); + latch.await(); + assertNull(listener.getFailure()); + assertNotNull(listener.getResult()); + assertEquals(indexMetadata, listener.getResult().getObj()); + } + + public void testGetAsyncIndexMetadataReadAction_IOFailure() throws Exception { + String fileName = randomAlphaOfLength(10); + fileName = fileName + DELIMITER + '2'; + Exception exception = new IOException("testing failure"); + doThrow(exception).when(blobStoreTransferService).downloadBlob(anyIterable(), anyString()); + TestCapturingListener listener = new TestCapturingListener<>(); + CountDownLatch latch = new CountDownLatch(1); + + remoteIndexMetadataManager.getAsyncIndexMetadataReadAction("cluster-uuid", fileName, new LatchedActionListener<>(listener, latch)) + .run(); + latch.await(); + assertNull(listener.getResult()); + assertNotNull(listener.getFailure()); + assertEquals(exception, listener.getFailure()); + } + + private IndexMetadata getIndexMetadata(String name, @Nullable Boolean writeIndex, String... aliases) { + IndexMetadata.Builder builder = IndexMetadata.builder(name) + .settings( + Settings.builder() + .put("index.version.created", Version.CURRENT.id) + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 1) + ); + for (String alias : aliases) { + builder.putAlias(AliasMetadata.builder(alias).writeIndex(writeIndex).build()); + } + return builder.build(); + } +} From 2e639131b3bb88316f53bfeb9262cbcd81606d50 Mon Sep 17 00:00:00 2001 From: gaobinlong Date: Tue, 9 Jul 2024 19:29:44 +0800 Subject: [PATCH 02/12] Fix match_phrase_prefix_query not working on text field with multiple values and index_prefixes (#10959) * Fix match_phrase_prefix_query not working on text field with multiple values and index_prefixes Signed-off-by: Gao Binlong * Add more test Signed-off-by: Gao Binlong * modify change log Signed-off-by: Gao Binlong * Fix test failure Signed-off-by: Gao Binlong * Change the indexAnalyzer used by prefix field Signed-off-by: Gao Binlong * Skip old version for yaml test Signed-off-by: Gao Binlong * Optimize some code Signed-off-by: Gao Binlong * Fix test failure Signed-off-by: Gao Binlong * Modify yaml test description Signed-off-by: Gao Binlong * Remove the name parameter for setAnalyzer() Signed-off-by: Gao Binlong --------- Signed-off-by: Gao Binlong --- CHANGELOG.md | 1 + .../test/search/190_index_prefix_search.yml | 52 ++++++++++++++++++- .../index/mapper/TextFieldMapper.java | 28 +++++++--- .../index/mapper/TextFieldMapperTests.java | 51 ++++++++++++++++++ .../index/mapper/TextFieldTypeTests.java | 1 + 5 files changed, 124 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4d0990db31d20..2cffa99fb66a5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -49,6 +49,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix bug in SBP cancellation logic ([#13259](https://github.com/opensearch-project/OpenSearch/pull/13474)) - Fix handling of Short and Byte data types in ScriptProcessor ingest pipeline ([#14379](https://github.com/opensearch-project/OpenSearch/issues/14379)) - Switch to iterative version of WKT format parser ([#14086](https://github.com/opensearch-project/OpenSearch/pull/14086)) +- Fix match_phrase_prefix_query not working on text field with multiple values and index_prefixes ([#10959](https://github.com/opensearch-project/OpenSearch/pull/10959)) - Fix the computed max shards of cluster to avoid int overflow ([#14155](https://github.com/opensearch-project/OpenSearch/pull/14155)) - Fixed rest-high-level client searchTemplate & mtermVectors endpoints to have a leading slash ([#14465](https://github.com/opensearch-project/OpenSearch/pull/14465)) - Write shard level metadata blob when snapshotting searchable snapshot indexes ([#13190](https://github.com/opensearch-project/OpenSearch/pull/13190)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml index 25d3dd160e031..8b031c132f979 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml @@ -10,7 +10,12 @@ setup: index_prefixes: min_chars: 2 max_chars: 5 - + text_with_pos_inc_gap: + type: text + position_increment_gap: 201 + index_prefixes: + min_chars: 2 + max_chars: 5 - do: index: index: test @@ -23,6 +28,18 @@ setup: id: 2 body: { text: sentence with UPPERCASE WORDS } + - do: + index: + index: test + id: 3 + body: { text: ["foo", "b-12"] } + + - do: + index: + index: test + id: 4 + body: { text_with_pos_inc_gap: ["foo", "b-12"] } + - do: indices.refresh: index: [test] @@ -116,3 +133,36 @@ setup: ] - match: {hits.total: 1} + +# related issue: https://github.com/opensearch-project/OpenSearch/issues/9203 +--- +"search index prefixes with multiple values": + - skip: + version: " - 2.99.99" + reason: "the bug was fixed in 3.0.0" + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase_prefix: + text: "b-12" + + - match: {hits.total: 1} + +--- +"search index prefixes with multiple values and custom position_increment_gap": + - skip: + version: " - 2.99.99" + reason: "the bug was fixed in 3.0.0" + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase_prefix: + text_with_pos_inc_gap: "b-12" + + - match: {hits.total: 1} diff --git a/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java index d0e041e68a81d..ba053a3aeee1d 100644 --- a/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java @@ -448,7 +448,6 @@ protected PrefixFieldMapper buildPrefixMapper(BuilderContext context, FieldType pft.setStoreTermVectorOffsets(true); } PrefixFieldType prefixFieldType = new PrefixFieldType(tft, fullName + "._index_prefix", indexPrefixes.get()); - prefixFieldType.setAnalyzer(analyzers.getIndexAnalyzer()); tft.setPrefixFieldType(prefixFieldType); return new PrefixFieldMapper(pft, prefixFieldType); } @@ -522,12 +521,14 @@ private static class PrefixWrappedAnalyzer extends AnalyzerWrapper { private final int minChars; private final int maxChars; private final Analyzer delegate; + private final int positionIncrementGap; - PrefixWrappedAnalyzer(Analyzer delegate, int minChars, int maxChars) { + PrefixWrappedAnalyzer(Analyzer delegate, int minChars, int maxChars, int positionIncrementGap) { super(delegate.getReuseStrategy()); this.delegate = delegate; this.minChars = minChars; this.maxChars = maxChars; + this.positionIncrementGap = positionIncrementGap; } @Override @@ -535,6 +536,11 @@ protected Analyzer getWrappedAnalyzer(String fieldName) { return delegate; } + @Override + public int getPositionIncrementGap(String fieldName) { + return positionIncrementGap; + } + @Override protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) { TokenFilter filter = new EdgeNGramTokenFilter(components.getTokenStream(), minChars, maxChars, false); @@ -588,17 +594,18 @@ static final class PrefixFieldType extends StringFieldType { final int minChars; final int maxChars; - final TextFieldType parentField; + final TextFieldType parent; PrefixFieldType(TextFieldType parentField, String name, PrefixConfig config) { this(parentField, name, config.minChars, config.maxChars); } - PrefixFieldType(TextFieldType parentField, String name, int minChars, int maxChars) { - super(name, true, false, false, parentField.getTextSearchInfo(), Collections.emptyMap()); + PrefixFieldType(TextFieldType parent, String name, int minChars, int maxChars) { + super(name, true, false, false, parent.getTextSearchInfo(), Collections.emptyMap()); this.minChars = minChars; this.maxChars = maxChars; - this.parentField = parentField; + this.parent = parent; + setAnalyzer(parent.indexAnalyzer()); } @Override @@ -609,8 +616,13 @@ public ValueFetcher valueFetcher(QueryShardContext context, SearchLookup searchL } void setAnalyzer(NamedAnalyzer delegate) { + String analyzerName = delegate.name(); setIndexAnalyzer( - new NamedAnalyzer(delegate.name(), AnalyzerScope.INDEX, new PrefixWrappedAnalyzer(delegate.analyzer(), minChars, maxChars)) + new NamedAnalyzer( + analyzerName, + AnalyzerScope.INDEX, + new PrefixWrappedAnalyzer(delegate.analyzer(), minChars, maxChars, delegate.getPositionIncrementGap(analyzerName)) + ) ); } @@ -639,7 +651,7 @@ public Query prefixQuery(String value, MultiTermQuery.RewriteMethod method, bool Automaton automaton = Operations.concatenate(automata); AutomatonQuery query = AutomatonQueries.createAutomatonQuery(new Term(name(), value + "*"), automaton, method); return new BooleanQuery.Builder().add(query, BooleanClause.Occur.SHOULD) - .add(new TermQuery(new Term(parentField.name(), value)), BooleanClause.Occur.SHOULD) + .add(new TermQuery(new Term(parent.name(), value)), BooleanClause.Occur.SHOULD) .build(); } diff --git a/server/src/test/java/org/opensearch/index/mapper/TextFieldMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/TextFieldMapperTests.java index a22bfa5e845b1..0253caea9759d 100644 --- a/server/src/test/java/org/opensearch/index/mapper/TextFieldMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/TextFieldMapperTests.java @@ -380,6 +380,57 @@ public void testIndexOptions() throws IOException { } } + public void testPositionIncrementGapOnIndexPrefixField() throws IOException { + // test default position_increment_gap + MapperService mapperService = createMapperService( + fieldMapping(b -> b.field("type", "text").field("analyzer", "default").startObject("index_prefixes").endObject()) + ); + ParsedDocument doc = mapperService.documentMapper().parse(source(b -> b.array("field", new String[] { "a", "b 12" }))); + + withLuceneIndex(mapperService, iw -> iw.addDocument(doc.rootDoc()), reader -> { + TermsEnum terms = getOnlyLeafReader(reader).terms("field").iterator(); + assertTrue(terms.seekExact(new BytesRef("12"))); + PostingsEnum postings = terms.postings(null, PostingsEnum.POSITIONS); + assertEquals(0, postings.nextDoc()); + assertEquals(TextFieldMapper.Defaults.POSITION_INCREMENT_GAP + 2, postings.nextPosition()); + }); + + withLuceneIndex(mapperService, iw -> iw.addDocument(doc.rootDoc()), reader -> { + TermsEnum terms = getOnlyLeafReader(reader).terms("field._index_prefix").iterator(); + assertTrue(terms.seekExact(new BytesRef("12"))); + PostingsEnum postings = terms.postings(null, PostingsEnum.POSITIONS); + assertEquals(0, postings.nextDoc()); + assertEquals(TextFieldMapper.Defaults.POSITION_INCREMENT_GAP + 2, postings.nextPosition()); + }); + + // test custom position_increment_gap + final int positionIncrementGap = randomIntBetween(1, 1000); + MapperService mapperService2 = createMapperService( + fieldMapping( + b -> b.field("type", "text") + .field("position_increment_gap", positionIncrementGap) + .field("analyzer", "default") + .startObject("index_prefixes") + .endObject() + ) + ); + ParsedDocument doc2 = mapperService2.documentMapper().parse(source(b -> b.array("field", new String[] { "a", "b 12" }))); + withLuceneIndex(mapperService2, iw -> iw.addDocument(doc2.rootDoc()), reader -> { + TermsEnum terms = getOnlyLeafReader(reader).terms("field").iterator(); + assertTrue(terms.seekExact(new BytesRef("12"))); + PostingsEnum postings = terms.postings(null, PostingsEnum.POSITIONS); + assertEquals(0, postings.nextDoc()); + assertEquals(positionIncrementGap + 2, postings.nextPosition()); + }); + withLuceneIndex(mapperService2, iw -> iw.addDocument(doc2.rootDoc()), reader -> { + TermsEnum terms = getOnlyLeafReader(reader).terms("field._index_prefix").iterator(); + assertTrue(terms.seekExact(new BytesRef("12"))); + PostingsEnum postings = terms.postings(null, PostingsEnum.POSITIONS); + assertEquals(0, postings.nextDoc()); + assertEquals(positionIncrementGap + 2, postings.nextPosition()); + }); + } + public void testDefaultPositionIncrementGap() throws IOException { MapperService mapperService = createMapperService(fieldMapping(this::minimalMapping)); ParsedDocument doc = mapperService.documentMapper().parse(source(b -> b.array("field", new String[] { "a", "b" }))); diff --git a/server/src/test/java/org/opensearch/index/mapper/TextFieldTypeTests.java b/server/src/test/java/org/opensearch/index/mapper/TextFieldTypeTests.java index 9c177bbec61fd..e672f94819541 100644 --- a/server/src/test/java/org/opensearch/index/mapper/TextFieldTypeTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/TextFieldTypeTests.java @@ -167,6 +167,7 @@ public void testFuzzyQuery() { public void testIndexPrefixes() { TextFieldType ft = createFieldType(true); + ft.setIndexAnalyzer(Lucene.STANDARD_ANALYZER); ft.setPrefixFieldType(new TextFieldMapper.PrefixFieldType(ft, "field._index_prefix", 2, 10)); Query q = ft.prefixQuery("goin", CONSTANT_SCORE_REWRITE, false, randomMockShardContext()); From 6d0484a75bbb5897f29660ede02ced7c796ba8cf Mon Sep 17 00:00:00 2001 From: rishavz_sagar Date: Tue, 9 Jul 2024 20:20:38 +0530 Subject: [PATCH 03/12] Offline calculation of total shard per node and caching it for weight calculation inside LocalShardBalancer (#14675) Signed-off-by: RS146BIJAY --- .../allocation/allocator/LocalShardsBalancer.java | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java index 6978c988fd648..00eb79add9f1d 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/allocator/LocalShardsBalancer.java @@ -68,6 +68,7 @@ public class LocalShardsBalancer extends ShardsBalancer { private final float avgPrimaryShardsPerNode; private final BalancedShardsAllocator.NodeSorter sorter; private final Set inEligibleTargetNode; + private int totalShardCount = 0; public LocalShardsBalancer( Logger logger, @@ -125,8 +126,7 @@ public float avgPrimaryShardsPerNode() { */ @Override public float avgShardsPerNode() { - float totalShards = nodes.values().stream().map(BalancedShardsAllocator.ModelNode::numShards).reduce(0, Integer::sum); - return totalShards / nodes.size(); + return totalShardCount / nodes.size(); } /** @@ -598,6 +598,7 @@ void moveShards() { final BalancedShardsAllocator.ModelNode sourceNode = nodes.get(shardRouting.currentNodeId()); final BalancedShardsAllocator.ModelNode targetNode = nodes.get(moveDecision.getTargetNode().getId()); sourceNode.removeShard(shardRouting); + --totalShardCount; Tuple relocatingShards = routingNodes.relocateShard( shardRouting, targetNode.getNodeId(), @@ -605,6 +606,7 @@ void moveShards() { allocation.changes() ); targetNode.addShard(relocatingShards.v2()); + ++totalShardCount; if (logger.isTraceEnabled()) { logger.trace("Moved shard [{}] to node [{}]", shardRouting, targetNode.getRoutingNode()); } @@ -724,6 +726,7 @@ private Map buildModelFromAssigned() /* we skip relocating shards here since we expect an initializing shard with the same id coming in */ if (RoutingPool.LOCAL_ONLY.equals(RoutingPool.getShardPool(shard, allocation)) && shard.state() != RELOCATING) { node.addShard(shard); + ++totalShardCount; if (logger.isTraceEnabled()) { logger.trace("Assigned shard [{}] to node [{}]", shard, node.getNodeId()); } @@ -815,6 +818,7 @@ void allocateUnassigned() { ); shard = routingNodes.initializeShard(shard, minNode.getNodeId(), null, shardSize, allocation.changes()); minNode.addShard(shard); + ++totalShardCount; if (!shard.primary()) { // copy over the same replica shards to the secondary array so they will get allocated // in a subsequent iteration, allowing replicas of other shards to be allocated first @@ -844,6 +848,7 @@ void allocateUnassigned() { allocation.routingTable() ); minNode.addShard(shard.initialize(minNode.getNodeId(), null, shardSize)); + ++totalShardCount; } else { if (logger.isTraceEnabled()) { logger.trace("No Node found to assign shard [{}]", shard); @@ -1011,18 +1016,21 @@ private boolean tryRelocateShard(BalancedShardsAllocator.ModelNode minNode, Bala } final Decision decision = new Decision.Multi().add(allocationDecision).add(rebalanceDecision); maxNode.removeShard(shard); + --totalShardCount; long shardSize = allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); if (decision.type() == Decision.Type.YES) { /* only allocate on the cluster if we are not throttled */ logger.debug("Relocate [{}] from [{}] to [{}]", shard, maxNode.getNodeId(), minNode.getNodeId()); minNode.addShard(routingNodes.relocateShard(shard, minNode.getNodeId(), shardSize, allocation.changes()).v1()); + ++totalShardCount; return true; } else { /* allocate on the model even if throttled */ logger.debug("Simulate relocation of [{}] from [{}] to [{}]", shard, maxNode.getNodeId(), minNode.getNodeId()); assert decision.type() == Decision.Type.THROTTLE; minNode.addShard(shard.relocate(minNode.getNodeId(), shardSize)); + ++totalShardCount; return false; } } From acc46316550ee203851d5c622d3b4724646d3f3e Mon Sep 17 00:00:00 2001 From: Chenyang Ji Date: Tue, 9 Jul 2024 11:14:55 -0700 Subject: [PATCH 04/12] [bug fix] validate lower bound for top n size (#14587) Signed-off-by: Chenyang Ji --- .../plugin/insights/core/service/TopQueriesService.java | 8 +++----- .../insights/core/service/TopQueriesServiceTests.java | 4 ++++ 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/TopQueriesService.java b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/TopQueriesService.java index c21b89be4dcca..bbe8b8fc40dac 100644 --- a/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/TopQueriesService.java +++ b/plugins/query-insights/src/main/java/org/opensearch/plugin/insights/core/service/TopQueriesService.java @@ -138,17 +138,15 @@ public int getTopNSize() { * @param size the wanted top N size */ public void validateTopNSize(final int size) { - if (size > QueryInsightsSettings.MAX_N_SIZE) { + if (size < 1 || size > QueryInsightsSettings.MAX_N_SIZE) { throw new IllegalArgumentException( "Top N size setting for [" + metricType + "]" - + " should be smaller than max top N size [" + + " should be between 1 and " + QueryInsightsSettings.MAX_N_SIZE - + "was (" + + ", was (" + size - + " > " - + QueryInsightsSettings.MAX_N_SIZE + ")" ); } diff --git a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/TopQueriesServiceTests.java b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/TopQueriesServiceTests.java index 3efd4c86833cc..8478fe1621698 100644 --- a/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/TopQueriesServiceTests.java +++ b/plugins/query-insights/src/test/java/org/opensearch/plugin/insights/core/service/TopQueriesServiceTests.java @@ -78,6 +78,10 @@ public void testValidateTopNSize() { assertThrows(IllegalArgumentException.class, () -> { topQueriesService.validateTopNSize(QueryInsightsSettings.MAX_N_SIZE + 1); }); } + public void testValidateNegativeTopNSize() { + assertThrows(IllegalArgumentException.class, () -> { topQueriesService.validateTopNSize(-1); }); + } + public void testGetTopQueriesWhenNotEnabled() { topQueriesService.setEnabled(false); assertThrows(IllegalArgumentException.class, () -> { topQueriesService.getTopQueriesRecords(false); }); From bf56227ad1d679573beffd94f419f7b73cd07188 Mon Sep 17 00:00:00 2001 From: Craig Perkins Date: Tue, 9 Jul 2024 14:39:24 -0400 Subject: [PATCH 05/12] Create SystemIndexRegistry with helper method matchesSystemIndex (#14415) * Create new extension point in SystemIndexPlugin for a single plugin to get registered system indices Signed-off-by: Craig Perkins * Add to CHANGELOG Signed-off-by: Craig Perkins * WIP on system indices from IndexNameExpressionResolver Signed-off-by: Craig Perkins * Add test in IndexNameExpressionResolverTests Signed-off-by: Craig Perkins * Remove changes in SystemIndexPlugin Signed-off-by: Craig Perkins * Add method in IndexNameExpressionResolver to get matching system indices Signed-off-by: Craig Perkins * Show how resolver can be chained to get system indices Signed-off-by: Craig Perkins * Fix forbiddenApis check Signed-off-by: Craig Perkins * Update CHANGELOG Signed-off-by: Craig Perkins * Make SystemIndices internal Signed-off-by: Craig Perkins * Remove unneeded changes Signed-off-by: Craig Perkins * Fix CI failures Signed-off-by: Craig Perkins * Fix precommit errors Signed-off-by: Craig Perkins * Use Regex instead of WildcardMatcher Signed-off-by: Craig Perkins * Address code review feedback Signed-off-by: Craig Perkins * Allow caller to pass index expressions Signed-off-by: Craig Perkins * Create SystemIndexRegistry Signed-off-by: Craig Perkins * Update CHANGELOG Signed-off-by: Craig Perkins * Remove singleton limitation Signed-off-by: Craig Perkins * Add javadoc Signed-off-by: Craig Perkins * Add @ExperimentalApi annotation Signed-off-by: Craig Perkins --------- Signed-off-by: Craig Perkins --- CHANGELOG.md | 1 + .../indices/SystemIndexDescriptor.java | 4 +- .../indices/SystemIndexRegistry.java | 130 ++++++++++++++++++ .../org/opensearch/indices/SystemIndices.java | 88 +----------- .../main/java/org/opensearch/node/Node.java | 17 ++- .../indices/SystemIndicesTests.java | 99 ++++++++++++- 6 files changed, 242 insertions(+), 97 deletions(-) create mode 100644 server/src/main/java/org/opensearch/indices/SystemIndexRegistry.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 2cffa99fb66a5..fe8d5d524097e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Add batching supported processor base type AbstractBatchingProcessor ([#14554](https://github.com/opensearch-project/OpenSearch/pull/14554)) - Fix race condition while parsing derived fields from search definition ([14445](https://github.com/opensearch-project/OpenSearch/pull/14445)) - Add allowlist setting for ingest-common and search-pipeline-common processors ([#14439](https://github.com/opensearch-project/OpenSearch/issues/14439)) +- Create SystemIndexRegistry with helper method matchesSystemIndex ([#14415](https://github.com/opensearch-project/OpenSearch/pull/14415)) ### Dependencies - Bump `org.gradle.test-retry` from 1.5.8 to 1.5.9 ([#13442](https://github.com/opensearch-project/OpenSearch/pull/13442)) diff --git a/server/src/main/java/org/opensearch/indices/SystemIndexDescriptor.java b/server/src/main/java/org/opensearch/indices/SystemIndexDescriptor.java index f3592a3561d3a..f3212b1e2fae1 100644 --- a/server/src/main/java/org/opensearch/indices/SystemIndexDescriptor.java +++ b/server/src/main/java/org/opensearch/indices/SystemIndexDescriptor.java @@ -33,6 +33,7 @@ package org.opensearch.indices; import org.apache.lucene.util.automaton.CharacterRunAutomaton; +import org.opensearch.common.annotation.PublicApi; import org.opensearch.common.regex.Regex; import java.util.Objects; @@ -40,8 +41,9 @@ /** * Describes a system index. Provides the information required to create and maintain the system index. * - * @opensearch.internal + * @opensearch.api */ +@PublicApi(since = "2.16.0") public class SystemIndexDescriptor { private final String indexPattern; private final String description; diff --git a/server/src/main/java/org/opensearch/indices/SystemIndexRegistry.java b/server/src/main/java/org/opensearch/indices/SystemIndexRegistry.java new file mode 100644 index 0000000000000..d9608e220d924 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/SystemIndexRegistry.java @@ -0,0 +1,130 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices; + +import org.apache.lucene.util.automaton.Automaton; +import org.apache.lucene.util.automaton.Operations; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.regex.Regex; +import org.opensearch.tasks.TaskResultsService; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static java.util.Collections.unmodifiableMap; +import static org.opensearch.tasks.TaskResultsService.TASK_INDEX; + +/** + * This class holds the {@link SystemIndexDescriptor} objects that represent system indices the + * node knows about. This class also contains static methods that identify if index expressions match + * registered system index patterns + * + * @opensearch.api + */ +@ExperimentalApi +public class SystemIndexRegistry { + private static final SystemIndexDescriptor TASK_INDEX_DESCRIPTOR = new SystemIndexDescriptor(TASK_INDEX + "*", "Task Result Index"); + private static final Map> SERVER_SYSTEM_INDEX_DESCRIPTORS = singletonMap( + TaskResultsService.class.getName(), + singletonList(TASK_INDEX_DESCRIPTOR) + ); + + private volatile static String[] SYSTEM_INDEX_PATTERNS = new String[0]; + volatile static Collection SYSTEM_INDEX_DESCRIPTORS = Collections.emptyList(); + + static void register(Map> pluginAndModulesDescriptors) { + final Map> descriptorsMap = buildSystemIndexDescriptorMap(pluginAndModulesDescriptors); + checkForOverlappingPatterns(descriptorsMap); + List descriptors = pluginAndModulesDescriptors.values() + .stream() + .flatMap(Collection::stream) + .collect(Collectors.toList()); + descriptors.add(TASK_INDEX_DESCRIPTOR); + + SYSTEM_INDEX_DESCRIPTORS = descriptors.stream().collect(Collectors.toUnmodifiableList()); + SYSTEM_INDEX_PATTERNS = descriptors.stream().map(SystemIndexDescriptor::getIndexPattern).toArray(String[]::new); + } + + public static List matchesSystemIndexPattern(String... indexExpressions) { + return Arrays.stream(indexExpressions) + .filter(pattern -> Regex.simpleMatch(SYSTEM_INDEX_PATTERNS, pattern)) + .collect(Collectors.toList()); + } + + /** + * Given a collection of {@link SystemIndexDescriptor}s and their sources, checks to see if the index patterns of the listed + * descriptors overlap with any of the other patterns. If any do, throws an exception. + * + * @param sourceToDescriptors A map of source (plugin) names to the SystemIndexDescriptors they provide. + * @throws IllegalStateException Thrown if any of the index patterns overlaps with another. + */ + static void checkForOverlappingPatterns(Map> sourceToDescriptors) { + List> sourceDescriptorPair = sourceToDescriptors.entrySet() + .stream() + .flatMap(entry -> entry.getValue().stream().map(descriptor -> new Tuple<>(entry.getKey(), descriptor))) + .sorted(Comparator.comparing(d -> d.v1() + ":" + d.v2().getIndexPattern())) // Consistent ordering -> consistent error message + .collect(Collectors.toList()); + + // This is O(n^2) with the number of system index descriptors, and each check is quadratic with the number of states in the + // automaton, but the absolute number of system index descriptors should be quite small (~10s at most), and the number of states + // per pattern should be low as well. If these assumptions change, this might need to be reworked. + sourceDescriptorPair.forEach(descriptorToCheck -> { + List> descriptorsMatchingThisPattern = sourceDescriptorPair.stream() + + .filter(d -> descriptorToCheck.v2() != d.v2()) // Exclude the pattern currently being checked + .filter(d -> overlaps(descriptorToCheck.v2(), d.v2())) + .collect(Collectors.toList()); + if (descriptorsMatchingThisPattern.isEmpty() == false) { + throw new IllegalStateException( + "a system index descriptor [" + + descriptorToCheck.v2() + + "] from [" + + descriptorToCheck.v1() + + "] overlaps with other system index descriptors: [" + + descriptorsMatchingThisPattern.stream() + .map(descriptor -> descriptor.v2() + " from [" + descriptor.v1() + "]") + .collect(Collectors.joining(", ")) + ); + } + }); + } + + private static boolean overlaps(SystemIndexDescriptor a1, SystemIndexDescriptor a2) { + Automaton a1Automaton = Regex.simpleMatchToAutomaton(a1.getIndexPattern()); + Automaton a2Automaton = Regex.simpleMatchToAutomaton(a2.getIndexPattern()); + return Operations.isEmpty(Operations.intersection(a1Automaton, a2Automaton)) == false; + } + + private static Map> buildSystemIndexDescriptorMap( + Map> pluginAndModulesMap + ) { + final Map> map = new HashMap<>( + pluginAndModulesMap.size() + SERVER_SYSTEM_INDEX_DESCRIPTORS.size() + ); + map.putAll(pluginAndModulesMap); + // put the server items last since we expect less of them + SERVER_SYSTEM_INDEX_DESCRIPTORS.forEach((source, descriptors) -> { + if (map.putIfAbsent(source, descriptors) != null) { + throw new IllegalArgumentException( + "plugin or module attempted to define the same source [" + source + "] as a built-in system index" + ); + } + }); + return unmodifiableMap(map); + } +} diff --git a/server/src/main/java/org/opensearch/indices/SystemIndices.java b/server/src/main/java/org/opensearch/indices/SystemIndices.java index a85e938c61b7a..bbf58fe91512f 100644 --- a/server/src/main/java/org/opensearch/indices/SystemIndices.java +++ b/server/src/main/java/org/opensearch/indices/SystemIndices.java @@ -40,25 +40,15 @@ import org.apache.lucene.util.automaton.MinimizationOperations; import org.apache.lucene.util.automaton.Operations; import org.opensearch.common.Nullable; -import org.opensearch.common.collect.Tuple; import org.opensearch.common.regex.Regex; import org.opensearch.core.index.Index; -import org.opensearch.tasks.TaskResultsService; import java.util.Collection; -import java.util.Comparator; -import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.stream.Collectors; -import static java.util.Collections.singletonList; -import static java.util.Collections.singletonMap; -import static java.util.Collections.unmodifiableList; -import static java.util.Collections.unmodifiableMap; -import static org.opensearch.tasks.TaskResultsService.TASK_INDEX; - /** * This class holds the {@link SystemIndexDescriptor} objects that represent system indices the * node knows about. Methods for determining if an index should be a system index are also provided @@ -69,21 +59,11 @@ public class SystemIndices { private static final Logger logger = LogManager.getLogger(SystemIndices.class); - private static final Map> SERVER_SYSTEM_INDEX_DESCRIPTORS = singletonMap( - TaskResultsService.class.getName(), - singletonList(new SystemIndexDescriptor(TASK_INDEX + "*", "Task Result Index")) - ); - private final CharacterRunAutomaton runAutomaton; - private final Collection systemIndexDescriptors; public SystemIndices(Map> pluginAndModulesDescriptors) { - final Map> descriptorsMap = buildSystemIndexDescriptorMap(pluginAndModulesDescriptors); - checkForOverlappingPatterns(descriptorsMap); - this.systemIndexDescriptors = unmodifiableList( - descriptorsMap.values().stream().flatMap(Collection::stream).collect(Collectors.toList()) - ); - this.runAutomaton = buildCharacterRunAutomaton(systemIndexDescriptors); + SystemIndexRegistry.register(pluginAndModulesDescriptors); + this.runAutomaton = buildCharacterRunAutomaton(SystemIndexRegistry.SYSTEM_INDEX_DESCRIPTORS); } /** @@ -111,7 +91,7 @@ public boolean isSystemIndex(String indexName) { * @throws IllegalStateException if multiple descriptors match the name */ public @Nullable SystemIndexDescriptor findMatchingDescriptor(String name) { - final List matchingDescriptors = systemIndexDescriptors.stream() + final List matchingDescriptors = SystemIndexRegistry.SYSTEM_INDEX_DESCRIPTORS.stream() .filter(descriptor -> descriptor.matchesIndexPattern(name)) .collect(Collectors.toList()); @@ -168,66 +148,4 @@ private static CharacterRunAutomaton buildCharacterRunAutomaton(Collection> sourceToDescriptors) { - List> sourceDescriptorPair = sourceToDescriptors.entrySet() - .stream() - .flatMap(entry -> entry.getValue().stream().map(descriptor -> new Tuple<>(entry.getKey(), descriptor))) - .sorted(Comparator.comparing(d -> d.v1() + ":" + d.v2().getIndexPattern())) // Consistent ordering -> consistent error message - .collect(Collectors.toList()); - - // This is O(n^2) with the number of system index descriptors, and each check is quadratic with the number of states in the - // automaton, but the absolute number of system index descriptors should be quite small (~10s at most), and the number of states - // per pattern should be low as well. If these assumptions change, this might need to be reworked. - sourceDescriptorPair.forEach(descriptorToCheck -> { - List> descriptorsMatchingThisPattern = sourceDescriptorPair.stream() - - .filter(d -> descriptorToCheck.v2() != d.v2()) // Exclude the pattern currently being checked - .filter(d -> overlaps(descriptorToCheck.v2(), d.v2())) - .collect(Collectors.toList()); - if (descriptorsMatchingThisPattern.isEmpty() == false) { - throw new IllegalStateException( - "a system index descriptor [" - + descriptorToCheck.v2() - + "] from [" - + descriptorToCheck.v1() - + "] overlaps with other system index descriptors: [" - + descriptorsMatchingThisPattern.stream() - .map(descriptor -> descriptor.v2() + " from [" + descriptor.v1() + "]") - .collect(Collectors.joining(", ")) - ); - } - }); - } - - private static boolean overlaps(SystemIndexDescriptor a1, SystemIndexDescriptor a2) { - Automaton a1Automaton = Regex.simpleMatchToAutomaton(a1.getIndexPattern()); - Automaton a2Automaton = Regex.simpleMatchToAutomaton(a2.getIndexPattern()); - return Operations.isEmpty(Operations.intersection(a1Automaton, a2Automaton)) == false; - } - - private static Map> buildSystemIndexDescriptorMap( - Map> pluginAndModulesMap - ) { - final Map> map = new HashMap<>( - pluginAndModulesMap.size() + SERVER_SYSTEM_INDEX_DESCRIPTORS.size() - ); - map.putAll(pluginAndModulesMap); - // put the server items last since we expect less of them - SERVER_SYSTEM_INDEX_DESCRIPTORS.forEach((source, descriptors) -> { - if (map.putIfAbsent(source, descriptors) != null) { - throw new IllegalArgumentException( - "plugin or module attempted to define the same source [" + source + "] as a built-in system index" - ); - } - }); - return unmodifiableMap(map); - } } diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 85ef547e27787..96a716af7f1a1 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -695,6 +695,14 @@ protected Node( repositoriesServiceReference::get, rerouteServiceReference::get ); + final Map> systemIndexDescriptorMap = Collections.unmodifiableMap( + pluginsService.filterPlugins(SystemIndexPlugin.class) + .stream() + .collect( + Collectors.toMap(plugin -> plugin.getClass().getSimpleName(), plugin -> plugin.getSystemIndexDescriptors(settings)) + ) + ); + final SystemIndices systemIndices = new SystemIndices(systemIndexDescriptorMap); final ClusterModule clusterModule = new ClusterModule( settings, clusterService, @@ -819,15 +827,6 @@ protected Node( .flatMap(m -> m.entrySet().stream()) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); - final Map> systemIndexDescriptorMap = Collections.unmodifiableMap( - pluginsService.filterPlugins(SystemIndexPlugin.class) - .stream() - .collect( - Collectors.toMap(plugin -> plugin.getClass().getSimpleName(), plugin -> plugin.getSystemIndexDescriptors(settings)) - ) - ); - final SystemIndices systemIndices = new SystemIndices(systemIndexDescriptorMap); - final RerouteService rerouteService = new BatchedRerouteService(clusterService, clusterModule.getAllocationService()::reroute); rerouteServiceReference.set(rerouteService); clusterService.setRerouteService(rerouteService); diff --git a/server/src/test/java/org/opensearch/indices/SystemIndicesTests.java b/server/src/test/java/org/opensearch/indices/SystemIndicesTests.java index 2b40c01c61242..8ac457c32d53a 100644 --- a/server/src/test/java/org/opensearch/indices/SystemIndicesTests.java +++ b/server/src/test/java/org/opensearch/indices/SystemIndicesTests.java @@ -32,12 +32,17 @@ package org.opensearch.indices; +import org.opensearch.common.settings.Settings; +import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.SystemIndexPlugin; import org.opensearch.tasks.TaskResultsService; import org.opensearch.test.OpenSearchTestCase; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import static java.util.Collections.emptyMap; @@ -67,7 +72,7 @@ public void testBasicOverlappingPatterns() { IllegalStateException exception = expectThrows( IllegalStateException.class, - () -> SystemIndices.checkForOverlappingPatterns(descriptors) + () -> SystemIndexRegistry.checkForOverlappingPatterns(descriptors) ); assertThat( exception.getMessage(), @@ -104,7 +109,7 @@ public void testComplexOverlappingPatterns() { IllegalStateException exception = expectThrows( IllegalStateException.class, - () -> SystemIndices.checkForOverlappingPatterns(descriptors) + () -> SystemIndexRegistry.checkForOverlappingPatterns(descriptors) ); assertThat( exception.getMessage(), @@ -133,4 +138,94 @@ public void testPluginCannotOverrideBuiltInSystemIndex() { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new SystemIndices(pluginMap)); assertThat(e.getMessage(), containsString("plugin or module attempted to define the same source")); } + + public void testSystemIndexMatching() { + SystemIndexPlugin plugin1 = new SystemIndexPlugin1(); + SystemIndexPlugin plugin2 = new SystemIndexPlugin2(); + SystemIndexPlugin plugin3 = new SystemIndexPatternPlugin(); + SystemIndices pluginSystemIndices = new SystemIndices( + Map.of( + SystemIndexPlugin1.class.getCanonicalName(), + plugin1.getSystemIndexDescriptors(Settings.EMPTY), + SystemIndexPlugin2.class.getCanonicalName(), + plugin2.getSystemIndexDescriptors(Settings.EMPTY), + SystemIndexPatternPlugin.class.getCanonicalName(), + plugin3.getSystemIndexDescriptors(Settings.EMPTY) + ) + ); + + assertThat( + SystemIndexRegistry.matchesSystemIndexPattern(".system-index1", ".system-index2"), + equalTo(List.of(SystemIndexPlugin1.SYSTEM_INDEX_1, SystemIndexPlugin2.SYSTEM_INDEX_2)) + ); + assertThat(SystemIndexRegistry.matchesSystemIndexPattern(".system-index1"), equalTo(List.of(SystemIndexPlugin1.SYSTEM_INDEX_1))); + assertThat(SystemIndexRegistry.matchesSystemIndexPattern(".system-index2"), equalTo(List.of(SystemIndexPlugin2.SYSTEM_INDEX_2))); + assertThat(SystemIndexRegistry.matchesSystemIndexPattern(".system-index-pattern1"), equalTo(List.of(".system-index-pattern1"))); + assertThat( + SystemIndexRegistry.matchesSystemIndexPattern(".system-index-pattern-sub*"), + equalTo(List.of(".system-index-pattern-sub*")) + ); + assertThat( + SystemIndexRegistry.matchesSystemIndexPattern(".system-index-pattern1", ".system-index-pattern2"), + equalTo(List.of(".system-index-pattern1", ".system-index-pattern2")) + ); + assertThat( + SystemIndexRegistry.matchesSystemIndexPattern(".system-index1", ".system-index-pattern1"), + equalTo(List.of(".system-index1", ".system-index-pattern1")) + ); + assertThat( + SystemIndexRegistry.matchesSystemIndexPattern(".system-index1", ".system-index-pattern1", ".not-system"), + equalTo(List.of(".system-index1", ".system-index-pattern1")) + ); + assertThat(SystemIndexRegistry.matchesSystemIndexPattern(".not-system"), equalTo(Collections.emptyList())); + } + + public void testRegisteredSystemIndexExpansion() { + SystemIndexPlugin plugin1 = new SystemIndexPlugin1(); + SystemIndexPlugin plugin2 = new SystemIndexPlugin2(); + SystemIndices pluginSystemIndices = new SystemIndices( + Map.of( + SystemIndexPlugin1.class.getCanonicalName(), + plugin1.getSystemIndexDescriptors(Settings.EMPTY), + SystemIndexPlugin2.class.getCanonicalName(), + plugin2.getSystemIndexDescriptors(Settings.EMPTY) + ) + ); + List systemIndices = SystemIndexRegistry.matchesSystemIndexPattern( + SystemIndexPlugin1.SYSTEM_INDEX_1, + SystemIndexPlugin2.SYSTEM_INDEX_2 + ); + assertEquals(2, systemIndices.size()); + assertTrue(systemIndices.containsAll(List.of(SystemIndexPlugin1.SYSTEM_INDEX_1, SystemIndexPlugin2.SYSTEM_INDEX_2))); + } + + static final class SystemIndexPlugin1 extends Plugin implements SystemIndexPlugin { + public static final String SYSTEM_INDEX_1 = ".system-index1"; + + @Override + public Collection getSystemIndexDescriptors(Settings settings) { + final SystemIndexDescriptor systemIndexDescriptor = new SystemIndexDescriptor(SYSTEM_INDEX_1, "System index 1"); + return Collections.singletonList(systemIndexDescriptor); + } + } + + static final class SystemIndexPlugin2 extends Plugin implements SystemIndexPlugin { + public static final String SYSTEM_INDEX_2 = ".system-index2"; + + @Override + public Collection getSystemIndexDescriptors(Settings settings) { + final SystemIndexDescriptor systemIndexDescriptor = new SystemIndexDescriptor(SYSTEM_INDEX_2, "System index 2"); + return Collections.singletonList(systemIndexDescriptor); + } + } + + static final class SystemIndexPatternPlugin extends Plugin implements SystemIndexPlugin { + public static final String SYSTEM_INDEX_PATTERN = ".system-index-pattern*"; + + @Override + public Collection getSystemIndexDescriptors(Settings settings) { + final SystemIndexDescriptor systemIndexDescriptor = new SystemIndexDescriptor(SYSTEM_INDEX_PATTERN, "System index pattern"); + return Collections.singletonList(systemIndexDescriptor); + } + } } From 13f04d97e3438b0389d1e851dfd08678ab644361 Mon Sep 17 00:00:00 2001 From: Sandesh Kumar Date: Tue, 9 Jul 2024 11:49:03 -0700 Subject: [PATCH 06/12] Refactor Grok validate pattern to iterative approach (#14206) * grok validate patterns recusrion to iterative Signed-off-by: Sandesh Kumar * Add max depth in resolving a pattern to avoid OOM Signed-off-by: Sandesh Kumar * change path from deque to arraylist Signed-off-by: Sandesh Kumar * rename queue to stack Signed-off-by: Sandesh Kumar * Change max depth to 500 Signed-off-by: Sandesh Kumar * typo originPatternName fix Signed-off-by: Sandesh Kumar * spotless Signed-off-by: Sandesh Kumar --------- Signed-off-by: Sandesh Kumar --- CHANGELOG.md | 1 + .../main/java/org/opensearch/grok/Grok.java | 122 +++++++++++++----- .../java/org/opensearch/grok/GrokTests.java | 10 ++ 3 files changed, 99 insertions(+), 34 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fe8d5d524097e..0f683154c52f9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -61,6 +61,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix file cache initialization ([#14004](https://github.com/opensearch-project/OpenSearch/pull/14004)) - Handle NPE in GetResult if "found" field is missing ([#14552](https://github.com/opensearch-project/OpenSearch/pull/14552)) - Refactoring FilterPath.parse by using an iterative approach ([#14200](https://github.com/opensearch-project/OpenSearch/pull/14200)) +- Refactoring Grok.validatePatternBank by using an iterative approach ([#14206](https://github.com/opensearch-project/OpenSearch/pull/14206)) ### Security diff --git a/libs/grok/src/main/java/org/opensearch/grok/Grok.java b/libs/grok/src/main/java/org/opensearch/grok/Grok.java index 7aa3347ba4f4b..aa5b1a936b99d 100644 --- a/libs/grok/src/main/java/org/opensearch/grok/Grok.java +++ b/libs/grok/src/main/java/org/opensearch/grok/Grok.java @@ -37,14 +37,18 @@ import java.io.InputStream; import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; +import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Collections; +import java.util.Deque; +import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; import java.util.Map; -import java.util.Stack; +import java.util.Set; import java.util.function.Consumer; import org.jcodings.specific.UTF8Encoding; @@ -86,6 +90,7 @@ public final class Grok { UTF8Encoding.INSTANCE, Syntax.DEFAULT ); + private static final int MAX_PATTERN_DEPTH_SIZE = 500; private static final int MAX_TO_REGEX_ITERATIONS = 100_000; // sanity limit @@ -128,7 +133,7 @@ private Grok( expressionBytes.length, Option.DEFAULT, UTF8Encoding.INSTANCE, - message -> logCallBack.accept(message) + logCallBack::accept ); List captureConfig = new ArrayList<>(); @@ -144,7 +149,7 @@ private Grok( */ private void validatePatternBank() { for (String patternName : patternBank.keySet()) { - validatePatternBank(patternName, new Stack<>()); + validatePatternBank(patternName); } } @@ -156,33 +161,84 @@ private void validatePatternBank() { * a reference to another named pattern. This method will navigate to all these named patterns and * check for a circular reference. */ - private void validatePatternBank(String patternName, Stack path) { - String pattern = patternBank.get(patternName); - boolean isSelfReference = pattern.contains("%{" + patternName + "}") || pattern.contains("%{" + patternName + ":"); - if (isSelfReference) { - throwExceptionForCircularReference(patternName, pattern); - } else if (path.contains(patternName)) { - // current pattern name is already in the path, fetch its predecessor - String prevPatternName = path.pop(); - String prevPattern = patternBank.get(prevPatternName); - throwExceptionForCircularReference(prevPatternName, prevPattern, patternName, path); - } - path.push(patternName); - for (int i = pattern.indexOf("%{"); i != -1; i = pattern.indexOf("%{", i + 1)) { - int begin = i + 2; - int syntaxEndIndex = pattern.indexOf('}', begin); - if (syntaxEndIndex == -1) { - throw new IllegalArgumentException("Malformed pattern [" + patternName + "][" + pattern + "]"); + private void validatePatternBank(String initialPatternName) { + Deque stack = new ArrayDeque<>(); + Set visitedPatterns = new HashSet<>(); + Map> pathMap = new HashMap<>(); + + List initialPath = new ArrayList<>(); + initialPath.add(initialPatternName); + pathMap.put(initialPatternName, initialPath); + stack.push(new Frame(initialPatternName, initialPath, 0)); + + while (!stack.isEmpty()) { + Frame frame = stack.peek(); + String patternName = frame.patternName; + List path = frame.path; + int startIndex = frame.startIndex; + String pattern = patternBank.get(patternName); + + if (visitedPatterns.contains(patternName)) { + stack.pop(); + continue; + } + + visitedPatterns.add(patternName); + boolean foundDependency = false; + + for (int i = startIndex; i < pattern.length(); i++) { + if (pattern.startsWith("%{", i)) { + int begin = i + 2; + int syntaxEndIndex = pattern.indexOf('}', begin); + if (syntaxEndIndex == -1) { + throw new IllegalArgumentException("Malformed pattern [" + patternName + "][" + pattern + "]"); + } + + int semanticNameIndex = pattern.indexOf(':', begin); + int end = semanticNameIndex == -1 ? syntaxEndIndex : Math.min(syntaxEndIndex, semanticNameIndex); + + String dependsOnPattern = pattern.substring(begin, end); + + if (dependsOnPattern.equals(patternName)) { + throwExceptionForCircularReference(patternName, pattern); + } + + if (pathMap.containsKey(dependsOnPattern)) { + throwExceptionForCircularReference(patternName, pattern, dependsOnPattern, path.subList(0, path.size() - 1)); + } + + List newPath = new ArrayList<>(path); + newPath.add(dependsOnPattern); + pathMap.put(dependsOnPattern, newPath); + + stack.push(new Frame(dependsOnPattern, newPath, 0)); + frame.startIndex = i + 1; + foundDependency = true; + break; + } } - int semanticNameIndex = pattern.indexOf(':', begin); - int end = syntaxEndIndex; - if (semanticNameIndex != -1) { - end = Math.min(syntaxEndIndex, semanticNameIndex); + + if (!foundDependency) { + pathMap.remove(patternName); + stack.pop(); + } + + if (stack.size() > MAX_PATTERN_DEPTH_SIZE) { + throw new IllegalArgumentException("Pattern references exceeded maximum depth of " + MAX_PATTERN_DEPTH_SIZE); } - String dependsOnPattern = pattern.substring(begin, end); - validatePatternBank(dependsOnPattern, path); } - path.pop(); + } + + private static class Frame { + String patternName; + List path; + int startIndex; + + Frame(String patternName, List path, int startIndex) { + this.patternName = patternName; + this.path = path; + this.startIndex = startIndex; + } } private static void throwExceptionForCircularReference(String patternName, String pattern) { @@ -192,13 +248,13 @@ private static void throwExceptionForCircularReference(String patternName, Strin private static void throwExceptionForCircularReference( String patternName, String pattern, - String originPatterName, - Stack path + String originPatternName, + List path ) { StringBuilder message = new StringBuilder("circular reference in pattern ["); message.append(patternName).append("][").append(pattern).append("]"); - if (originPatterName != null) { - message.append(" back to pattern [").append(originPatterName).append("]"); + if (originPatternName != null) { + message.append(" back to pattern [").append(originPatternName).append("]"); } if (path != null && path.size() > 1) { message.append(" via patterns [").append(String.join("=>", path)).append("]"); @@ -217,9 +273,7 @@ private String groupMatch(String name, Region region, String pattern) { int begin = region.getBeg(number); int end = region.getEnd(number); return new String(pattern.getBytes(StandardCharsets.UTF_8), begin, end - begin, StandardCharsets.UTF_8); - } catch (StringIndexOutOfBoundsException e) { - return null; - } catch (ValueException e) { + } catch (StringIndexOutOfBoundsException | ValueException e) { return null; } } diff --git a/libs/grok/src/test/java/org/opensearch/grok/GrokTests.java b/libs/grok/src/test/java/org/opensearch/grok/GrokTests.java index a37689e051c67..8476d541aa46e 100644 --- a/libs/grok/src/test/java/org/opensearch/grok/GrokTests.java +++ b/libs/grok/src/test/java/org/opensearch/grok/GrokTests.java @@ -377,6 +377,16 @@ public void testCircularReference() { "circular reference in pattern [NAME5][!!!%{NAME1}!!!] back to pattern [NAME1] " + "via patterns [NAME1=>NAME2=>NAME3=>NAME4]", e.getMessage() ); + + e = expectThrows(IllegalArgumentException.class, () -> { + Map bank = new TreeMap<>(); + for (int i = 1; i <= 501; i++) { + bank.put("NAME" + i, "!!!%{NAME" + (i + 1) + "}!!!"); + } + String pattern = "%{NAME1}"; + new Grok(bank, pattern, false, logger::warn); + }); + assertEquals("Pattern references exceeded maximum depth of 500", e.getMessage()); } public void testMalformedPattern() { From b8dc46d93711882a8fae8b953f11934d940f12a5 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Tue, 9 Jul 2024 16:15:40 -0400 Subject: [PATCH 07/12] Bump opentelemetry from 1.39.0 to 1.40.0 (#14674) Signed-off-by: Andriy Redko --- CHANGELOG.md | 5 +++-- buildSrc/version.properties | 4 ++-- .../licenses/opentelemetry-api-1.39.0.jar.sha1 | 1 - .../licenses/opentelemetry-api-1.40.0.jar.sha1 | 1 + .../opentelemetry-api-incubator-1.39.0-alpha.jar.sha1 | 1 - .../opentelemetry-api-incubator-1.40.0-alpha.jar.sha1 | 1 + .../licenses/opentelemetry-context-1.39.0.jar.sha1 | 1 - .../licenses/opentelemetry-context-1.40.0.jar.sha1 | 1 + .../licenses/opentelemetry-exporter-common-1.39.0.jar.sha1 | 1 - .../licenses/opentelemetry-exporter-common-1.40.0.jar.sha1 | 1 + .../licenses/opentelemetry-exporter-logging-1.39.0.jar.sha1 | 1 - .../licenses/opentelemetry-exporter-logging-1.40.0.jar.sha1 | 1 + .../licenses/opentelemetry-exporter-otlp-1.39.0.jar.sha1 | 1 - .../licenses/opentelemetry-exporter-otlp-1.40.0.jar.sha1 | 1 + .../opentelemetry-exporter-otlp-common-1.39.0.jar.sha1 | 1 - .../opentelemetry-exporter-otlp-common-1.40.0.jar.sha1 | 1 + .../opentelemetry-exporter-sender-okhttp-1.39.0.jar.sha1 | 1 - .../opentelemetry-exporter-sender-okhttp-1.40.0.jar.sha1 | 1 + .../licenses/opentelemetry-sdk-1.39.0.jar.sha1 | 1 - .../licenses/opentelemetry-sdk-1.40.0.jar.sha1 | 1 + .../licenses/opentelemetry-sdk-common-1.39.0.jar.sha1 | 1 - .../licenses/opentelemetry-sdk-common-1.40.0.jar.sha1 | 1 + .../licenses/opentelemetry-sdk-logs-1.39.0.jar.sha1 | 1 - .../licenses/opentelemetry-sdk-logs-1.40.0.jar.sha1 | 1 + .../licenses/opentelemetry-sdk-metrics-1.39.0.jar.sha1 | 1 - .../licenses/opentelemetry-sdk-metrics-1.40.0.jar.sha1 | 1 + .../licenses/opentelemetry-sdk-trace-1.39.0.jar.sha1 | 1 - .../licenses/opentelemetry-sdk-trace-1.40.0.jar.sha1 | 1 + .../licenses/opentelemetry-semconv-1.25.0-alpha.jar.sha1 | 1 - .../licenses/opentelemetry-semconv-1.26.0-alpha.jar.sha1 | 1 + 30 files changed, 19 insertions(+), 18 deletions(-) delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-api-1.39.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-api-1.40.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.39.0-alpha.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.40.0-alpha.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-context-1.39.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-context-1.40.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.39.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.40.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.39.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.40.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.39.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.40.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.39.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.40.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.39.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.40.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-1.39.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-1.40.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.39.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.40.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.39.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.40.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.39.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.40.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.39.0.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.40.0.jar.sha1 delete mode 100644 plugins/telemetry-otel/licenses/opentelemetry-semconv-1.25.0-alpha.jar.sha1 create mode 100644 plugins/telemetry-otel/licenses/opentelemetry-semconv-1.26.0-alpha.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 0f683154c52f9..1807fb6d5e00c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -26,8 +26,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.nimbusds:nimbus-jose-jwt` from 9.37.3 to 9.40 ([#14398](https://github.com/opensearch-project/OpenSearch/pull/14398)) - Bump `org.apache.commons:commons-configuration2` from 2.10.1 to 2.11.0 ([#14399](https://github.com/opensearch-project/OpenSearch/pull/14399)) - Bump `com.gradle.develocity` from 3.17.4 to 3.17.5 ([#14397](https://github.com/opensearch-project/OpenSearch/pull/14397)) -- Bump `opentelemetry` from 1.36.0 to 1.39.0 ([#14457](https://github.com/opensearch-project/OpenSearch/pull/14457)) -- Bump `azure-identity` from 1.11.4 to 1.13.0, Bump `msal4j` from 1.14.3 to 1.15.1, Bump `msal4j-persistence-extension` from 1.2.0 to 1.3.0 ([#14506](https://github.com/opensearch-project/OpenSearch/pull/14506)) +- Bump `opentelemetry` from 1.36.0 to 1.40.0 ([#14457](https://github.com/opensearch-project/OpenSearch/pull/14457), [#14674](https://github.com/opensearch-project/OpenSearch/pull/14674)) +- Bump `opentelemetry-semconv` from 1.25.0-alpha to 1.26.0-alpha ([#14674](https://github.com/opensearch-project/OpenSearch/pull/14674)) +- Bump `azure-identity` from 1.11.4 to 1.13.0, Bump `msal4j` from 1.14.3 to 1.15.1, Bump `msal4j-persistence-extension` from 1.2.0 to 1.3.0 ([#14506](https://github.com/opensearch-project/OpenSearch/pull/14673)) - Bump `com.azure:azure-storage-common` from 12.21.2 to 12.25.1 ([#14517](https://github.com/opensearch-project/OpenSearch/pull/14517)) - Bump `com.microsoft.azure:msal4j` from 1.15.1 to 1.16.0 ([#14610](https://github.com/opensearch-project/OpenSearch/pull/14610)) - Bump `com.github.spullara.mustache.java:compiler` from 0.9.13 to 0.9.14 ([#14672](https://github.com/opensearch-project/OpenSearch/pull/14672)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index a99bd4801b7f3..a04fb68f47f55 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -74,5 +74,5 @@ jzlib = 1.1.3 resteasy = 6.2.4.Final # opentelemetry dependencies -opentelemetry = 1.39.0 -opentelemetrysemconv = 1.25.0-alpha +opentelemetry = 1.40.0 +opentelemetrysemconv = 1.26.0-alpha diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.39.0.jar.sha1 deleted file mode 100644 index 415fe8f3d8aaa..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-api-1.39.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -396b89a66526bd5694ad3bef4604b876177e0b44 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-1.40.0.jar.sha1 new file mode 100644 index 0000000000000..04ec81edf969c --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-api-1.40.0.jar.sha1 @@ -0,0 +1 @@ +6db562f2b74ffaa7253d740e9aa7a3c4f2e392ec \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.39.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.39.0-alpha.jar.sha1 deleted file mode 100644 index 9c3c9f43d153c..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.39.0-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1a1fd96155e1b58726300bbf8457630713035e51 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.40.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.40.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..bcd7c886b5f6c --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-api-incubator-1.40.0-alpha.jar.sha1 @@ -0,0 +1 @@ +43115633361430a3c6aaa39fd78363014ac79270 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.39.0.jar.sha1 deleted file mode 100644 index 115d4ccb1f34b..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-context-1.39.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f0601fb1c06f661afeffbc73a1dbe29797b2f13b \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-context-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-context-1.40.0.jar.sha1 new file mode 100644 index 0000000000000..9716ec518c886 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-context-1.40.0.jar.sha1 @@ -0,0 +1 @@ +bf1db0f288b9baaabdb439ab6179b673b751511e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.39.0.jar.sha1 deleted file mode 100644 index a10b92995becd..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.39.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -570d71e39e36fe2caad142557bde0c11fcdb3b92 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.40.0.jar.sha1 new file mode 100644 index 0000000000000..c0e79b05aa675 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-common-1.40.0.jar.sha1 @@ -0,0 +1 @@ +b883b179c242a1761df2d408fe01ec41b17327a3 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.39.0.jar.sha1 deleted file mode 100644 index f43393104296a..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.39.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f5b528f8d6f8531836eabba698979516964b24ed \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.40.0.jar.sha1 new file mode 100644 index 0000000000000..1df0ad183c475 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-logging-1.40.0.jar.sha1 @@ -0,0 +1 @@ +a8c1f9b05ac9fb1259517cf53950ccecaf84ebe1 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.39.0.jar.sha1 deleted file mode 100644 index 5adba2ba0f342..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.39.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -04fc0e4983253ea58430c3d24b6b3c5c95f84dc9 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.40.0.jar.sha1 new file mode 100644 index 0000000000000..ebeb639a8459c --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-1.40.0.jar.sha1 @@ -0,0 +1 @@ +8d8b92bcdb0ace48fb5764cc1ad7a0de197d5b8c \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.39.0.jar.sha1 deleted file mode 100644 index ea9c293f25025..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.39.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a2b8571e36b11c3153d31ec87ec69cc168af8036 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.40.0.jar.sha1 new file mode 100644 index 0000000000000..b630c808d4763 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-otlp-common-1.40.0.jar.sha1 @@ -0,0 +1 @@ +80fa10130cc7e7626e2581aa7c5871eab7381889 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.39.0.jar.sha1 deleted file mode 100644 index dcf23f16ac89f..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.39.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1a8947a2e28924ad9374e319150a23837926ca4b \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.40.0.jar.sha1 new file mode 100644 index 0000000000000..eda90dc825e6f --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-exporter-sender-okhttp-1.40.0.jar.sha1 @@ -0,0 +1 @@ +006dcdbf8eb911ad4d11c54fa824f5a97f582850 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.39.0.jar.sha1 deleted file mode 100644 index f603af04d8012..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.39.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ba9afdf3ef1ea51e42999fd68c959e3ceb219399 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.40.0.jar.sha1 new file mode 100644 index 0000000000000..cdd7dc6551b33 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-1.40.0.jar.sha1 @@ -0,0 +1 @@ +59f260c5412b79a5a40c7d433600248727cd195a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.39.0.jar.sha1 deleted file mode 100644 index f9419f6ccfbee..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.39.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fb8168627bf0059445f61081eaa47c4ab787fc2e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.40.0.jar.sha1 new file mode 100644 index 0000000000000..668291498bbae --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-common-1.40.0.jar.sha1 @@ -0,0 +1 @@ +7042214012232a5d6a251aca4aa5932014a4946b \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.39.0.jar.sha1 deleted file mode 100644 index 63269f239eacd..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.39.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b6b45155399bc9fa563945f3e3a77416d7165948 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.40.0.jar.sha1 new file mode 100644 index 0000000000000..74f0786e21954 --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-logs-1.40.0.jar.sha1 @@ -0,0 +1 @@ +1c6b884d65f79d40429263ac0ab7ed1422237837 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.39.0.jar.sha1 deleted file mode 100644 index f18c8259c1adc..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.39.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -522d46926cc06a4c18829da7e4c4340bdf5673c3 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.40.0.jar.sha1 new file mode 100644 index 0000000000000..23ef1bf6e6b2c --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-metrics-1.40.0.jar.sha1 @@ -0,0 +1 @@ +a1c9b33a8660ace82aecb7f1c7ea50093dc87f0a \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.39.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.39.0.jar.sha1 deleted file mode 100644 index 03b81424f46d5..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.39.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0b72722a5bbea5f46319bf08b2caed5b8f987a92 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.40.0.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.40.0.jar.sha1 new file mode 100644 index 0000000000000..aea753f0df18b --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-sdk-trace-1.40.0.jar.sha1 @@ -0,0 +1 @@ +5145f077bf2821ad243617baf8c1810d29af8566 \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.25.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.25.0-alpha.jar.sha1 deleted file mode 100644 index 7cf8e7e8ede28..0000000000000 --- a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.25.0-alpha.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -76b3d4ca0a8f20b27c1590ceece54f0c7fb5857e \ No newline at end of file diff --git a/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.26.0-alpha.jar.sha1 b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.26.0-alpha.jar.sha1 new file mode 100644 index 0000000000000..7124dcb31da3f --- /dev/null +++ b/plugins/telemetry-otel/licenses/opentelemetry-semconv-1.26.0-alpha.jar.sha1 @@ -0,0 +1 @@ +955de1d2de4d3d2bb6ba2498f19c9a06da2f3956 \ No newline at end of file From a04cf24923e7379d94314ae4f03e2bddfddf1765 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Tue, 9 Jul 2024 17:07:54 -0400 Subject: [PATCH 08/12] Bump jackson from 2.17.1 to 2.17.2 (#14687) Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + buildSrc/version.properties | 4 ++-- client/sniffer/licenses/jackson-core-2.17.1.jar.sha1 | 1 - client/sniffer/licenses/jackson-core-2.17.2.jar.sha1 | 1 + .../upgrade-cli/licenses/jackson-annotations-2.17.1.jar.sha1 | 1 - .../upgrade-cli/licenses/jackson-annotations-2.17.2.jar.sha1 | 1 + .../upgrade-cli/licenses/jackson-databind-2.17.1.jar.sha1 | 1 - .../upgrade-cli/licenses/jackson-databind-2.17.2.jar.sha1 | 1 + libs/core/licenses/jackson-core-2.17.1.jar.sha1 | 1 - libs/core/licenses/jackson-core-2.17.2.jar.sha1 | 1 + libs/x-content/licenses/jackson-core-2.17.1.jar.sha1 | 1 - libs/x-content/licenses/jackson-core-2.17.2.jar.sha1 | 1 + .../licenses/jackson-dataformat-cbor-2.17.1.jar.sha1 | 1 - .../licenses/jackson-dataformat-cbor-2.17.2.jar.sha1 | 1 + .../licenses/jackson-dataformat-smile-2.17.1.jar.sha1 | 1 - .../licenses/jackson-dataformat-smile-2.17.2.jar.sha1 | 1 + .../licenses/jackson-dataformat-yaml-2.17.1.jar.sha1 | 1 - .../licenses/jackson-dataformat-yaml-2.17.2.jar.sha1 | 1 + .../ingest-geoip/licenses/jackson-annotations-2.17.1.jar.sha1 | 1 - .../ingest-geoip/licenses/jackson-annotations-2.17.2.jar.sha1 | 1 + .../ingest-geoip/licenses/jackson-databind-2.17.1.jar.sha1 | 1 - .../ingest-geoip/licenses/jackson-databind-2.17.2.jar.sha1 | 1 + .../crypto-kms/licenses/jackson-annotations-2.17.1.jar.sha1 | 1 - .../crypto-kms/licenses/jackson-annotations-2.17.2.jar.sha1 | 1 + plugins/crypto-kms/licenses/jackson-databind-2.17.1.jar.sha1 | 1 - plugins/crypto-kms/licenses/jackson-databind-2.17.2.jar.sha1 | 1 + .../licenses/jackson-annotations-2.17.1.jar.sha1 | 1 - .../licenses/jackson-annotations-2.17.2.jar.sha1 | 1 + .../discovery-ec2/licenses/jackson-databind-2.17.1.jar.sha1 | 1 - .../discovery-ec2/licenses/jackson-databind-2.17.2.jar.sha1 | 1 + .../licenses/jackson-annotations-2.17.1.jar.sha1 | 1 - .../licenses/jackson-annotations-2.17.2.jar.sha1 | 1 + .../licenses/jackson-databind-2.17.1.jar.sha1 | 1 - .../licenses/jackson-databind-2.17.2.jar.sha1 | 1 + .../licenses/jackson-dataformat-xml-2.17.1.jar.sha1 | 1 - .../licenses/jackson-dataformat-xml-2.17.2.jar.sha1 | 1 + .../licenses/jackson-datatype-jsr310-2.17.1.jar.sha1 | 1 - .../licenses/jackson-datatype-jsr310-2.17.2.jar.sha1 | 1 + .../licenses/jackson-module-jaxb-annotations-2.17.1.jar.sha1 | 1 - .../licenses/jackson-module-jaxb-annotations-2.17.2.jar.sha1 | 1 + .../licenses/jackson-annotations-2.17.1.jar.sha1 | 1 - .../licenses/jackson-annotations-2.17.2.jar.sha1 | 1 + .../repository-s3/licenses/jackson-databind-2.17.1.jar.sha1 | 1 - .../repository-s3/licenses/jackson-databind-2.17.2.jar.sha1 | 1 + server/licenses/jackson-core-2.17.1.jar.sha1 | 1 - server/licenses/jackson-core-2.17.2.jar.sha1 | 1 + server/licenses/jackson-dataformat-cbor-2.17.1.jar.sha1 | 1 - server/licenses/jackson-dataformat-cbor-2.17.2.jar.sha1 | 1 + server/licenses/jackson-dataformat-smile-2.17.1.jar.sha1 | 1 - server/licenses/jackson-dataformat-smile-2.17.2.jar.sha1 | 1 + server/licenses/jackson-dataformat-yaml-2.17.1.jar.sha1 | 1 - server/licenses/jackson-dataformat-yaml-2.17.2.jar.sha1 | 1 + 52 files changed, 28 insertions(+), 27 deletions(-) delete mode 100644 client/sniffer/licenses/jackson-core-2.17.1.jar.sha1 create mode 100644 client/sniffer/licenses/jackson-core-2.17.2.jar.sha1 delete mode 100644 distribution/tools/upgrade-cli/licenses/jackson-annotations-2.17.1.jar.sha1 create mode 100644 distribution/tools/upgrade-cli/licenses/jackson-annotations-2.17.2.jar.sha1 delete mode 100644 distribution/tools/upgrade-cli/licenses/jackson-databind-2.17.1.jar.sha1 create mode 100644 distribution/tools/upgrade-cli/licenses/jackson-databind-2.17.2.jar.sha1 delete mode 100644 libs/core/licenses/jackson-core-2.17.1.jar.sha1 create mode 100644 libs/core/licenses/jackson-core-2.17.2.jar.sha1 delete mode 100644 libs/x-content/licenses/jackson-core-2.17.1.jar.sha1 create mode 100644 libs/x-content/licenses/jackson-core-2.17.2.jar.sha1 delete mode 100644 libs/x-content/licenses/jackson-dataformat-cbor-2.17.1.jar.sha1 create mode 100644 libs/x-content/licenses/jackson-dataformat-cbor-2.17.2.jar.sha1 delete mode 100644 libs/x-content/licenses/jackson-dataformat-smile-2.17.1.jar.sha1 create mode 100644 libs/x-content/licenses/jackson-dataformat-smile-2.17.2.jar.sha1 delete mode 100644 libs/x-content/licenses/jackson-dataformat-yaml-2.17.1.jar.sha1 create mode 100644 libs/x-content/licenses/jackson-dataformat-yaml-2.17.2.jar.sha1 delete mode 100644 modules/ingest-geoip/licenses/jackson-annotations-2.17.1.jar.sha1 create mode 100644 modules/ingest-geoip/licenses/jackson-annotations-2.17.2.jar.sha1 delete mode 100644 modules/ingest-geoip/licenses/jackson-databind-2.17.1.jar.sha1 create mode 100644 modules/ingest-geoip/licenses/jackson-databind-2.17.2.jar.sha1 delete mode 100644 plugins/crypto-kms/licenses/jackson-annotations-2.17.1.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/jackson-annotations-2.17.2.jar.sha1 delete mode 100644 plugins/crypto-kms/licenses/jackson-databind-2.17.1.jar.sha1 create mode 100644 plugins/crypto-kms/licenses/jackson-databind-2.17.2.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/jackson-annotations-2.17.1.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/jackson-annotations-2.17.2.jar.sha1 delete mode 100644 plugins/discovery-ec2/licenses/jackson-databind-2.17.1.jar.sha1 create mode 100644 plugins/discovery-ec2/licenses/jackson-databind-2.17.2.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/jackson-annotations-2.17.1.jar.sha1 create mode 100644 plugins/repository-azure/licenses/jackson-annotations-2.17.2.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/jackson-databind-2.17.1.jar.sha1 create mode 100644 plugins/repository-azure/licenses/jackson-databind-2.17.2.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/jackson-dataformat-xml-2.17.1.jar.sha1 create mode 100644 plugins/repository-azure/licenses/jackson-dataformat-xml-2.17.2.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/jackson-datatype-jsr310-2.17.1.jar.sha1 create mode 100644 plugins/repository-azure/licenses/jackson-datatype-jsr310-2.17.2.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.17.1.jar.sha1 create mode 100644 plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.17.2.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/jackson-annotations-2.17.1.jar.sha1 create mode 100644 plugins/repository-s3/licenses/jackson-annotations-2.17.2.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/jackson-databind-2.17.1.jar.sha1 create mode 100644 plugins/repository-s3/licenses/jackson-databind-2.17.2.jar.sha1 delete mode 100644 server/licenses/jackson-core-2.17.1.jar.sha1 create mode 100644 server/licenses/jackson-core-2.17.2.jar.sha1 delete mode 100644 server/licenses/jackson-dataformat-cbor-2.17.1.jar.sha1 create mode 100644 server/licenses/jackson-dataformat-cbor-2.17.2.jar.sha1 delete mode 100644 server/licenses/jackson-dataformat-smile-2.17.1.jar.sha1 create mode 100644 server/licenses/jackson-dataformat-smile-2.17.2.jar.sha1 delete mode 100644 server/licenses/jackson-dataformat-yaml-2.17.1.jar.sha1 create mode 100644 server/licenses/jackson-dataformat-yaml-2.17.2.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 1807fb6d5e00c..fe26423cde573 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,6 +33,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.microsoft.azure:msal4j` from 1.15.1 to 1.16.0 ([#14610](https://github.com/opensearch-project/OpenSearch/pull/14610)) - Bump `com.github.spullara.mustache.java:compiler` from 0.9.13 to 0.9.14 ([#14672](https://github.com/opensearch-project/OpenSearch/pull/14672)) - Bump `net.minidev:accessors-smart` from 2.5.0 to 2.5.1 ([#14673](https://github.com/opensearch-project/OpenSearch/pull/14673)) +- Bump `jackson` from 2.17.1 to 2.17.2 ([#14687](https://github.com/opensearch-project/OpenSearch/pull/14687)) ### Changed - [Tiered Caching] Move query recomputation logic outside write lock ([#14187](https://github.com/opensearch-project/OpenSearch/pull/14187)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index a04fb68f47f55..d62f8c51e616b 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -7,8 +7,8 @@ bundled_jdk = 21.0.3+9 # optional dependencies spatial4j = 0.7 jts = 1.15.0 -jackson = 2.17.1 -jackson_databind = 2.17.1 +jackson = 2.17.2 +jackson_databind = 2.17.2 snakeyaml = 2.1 icu4j = 70.1 supercsv = 2.4.0 diff --git a/client/sniffer/licenses/jackson-core-2.17.1.jar.sha1 b/client/sniffer/licenses/jackson-core-2.17.1.jar.sha1 deleted file mode 100644 index 82dab5981e652..0000000000000 --- a/client/sniffer/licenses/jackson-core-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5e52a11644cd59a28ef79f02bddc2cc3bab45edb \ No newline at end of file diff --git a/client/sniffer/licenses/jackson-core-2.17.2.jar.sha1 b/client/sniffer/licenses/jackson-core-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..e15f2340980bc --- /dev/null +++ b/client/sniffer/licenses/jackson-core-2.17.2.jar.sha1 @@ -0,0 +1 @@ +969a35cb35c86512acbadcdbbbfb044c877db814 \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.17.1.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.17.1.jar.sha1 deleted file mode 100644 index 4ceead1b7ae4f..0000000000000 --- a/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fca7ef6192c9ad05d07bc50da991bf937a84af3a \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.17.2.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..411e1d62459fd --- /dev/null +++ b/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.17.2.jar.sha1 @@ -0,0 +1 @@ +147b7b9412ffff24339f8aba080b292448e08698 \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.17.1.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.17.1.jar.sha1 deleted file mode 100644 index 7cf1ac1b60301..0000000000000 --- a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0524dcbcccdde7d45a679dfc333e4763feb09079 \ No newline at end of file diff --git a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.17.2.jar.sha1 b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..f2b4dbdc5decb --- /dev/null +++ b/distribution/tools/upgrade-cli/licenses/jackson-databind-2.17.2.jar.sha1 @@ -0,0 +1 @@ +e6deb029e5901e027c129341fac39e515066b68c \ No newline at end of file diff --git a/libs/core/licenses/jackson-core-2.17.1.jar.sha1 b/libs/core/licenses/jackson-core-2.17.1.jar.sha1 deleted file mode 100644 index 82dab5981e652..0000000000000 --- a/libs/core/licenses/jackson-core-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5e52a11644cd59a28ef79f02bddc2cc3bab45edb \ No newline at end of file diff --git a/libs/core/licenses/jackson-core-2.17.2.jar.sha1 b/libs/core/licenses/jackson-core-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..e15f2340980bc --- /dev/null +++ b/libs/core/licenses/jackson-core-2.17.2.jar.sha1 @@ -0,0 +1 @@ +969a35cb35c86512acbadcdbbbfb044c877db814 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-core-2.17.1.jar.sha1 b/libs/x-content/licenses/jackson-core-2.17.1.jar.sha1 deleted file mode 100644 index 82dab5981e652..0000000000000 --- a/libs/x-content/licenses/jackson-core-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5e52a11644cd59a28ef79f02bddc2cc3bab45edb \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-core-2.17.2.jar.sha1 b/libs/x-content/licenses/jackson-core-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..e15f2340980bc --- /dev/null +++ b/libs/x-content/licenses/jackson-core-2.17.2.jar.sha1 @@ -0,0 +1 @@ +969a35cb35c86512acbadcdbbbfb044c877db814 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-cbor-2.17.1.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-cbor-2.17.1.jar.sha1 deleted file mode 100644 index ff42ed1f92cfe..0000000000000 --- a/libs/x-content/licenses/jackson-dataformat-cbor-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ba5d8e6ecc62aa0e49c0ce935b8696352dbebc71 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-cbor-2.17.2.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-cbor-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..069e088413ef1 --- /dev/null +++ b/libs/x-content/licenses/jackson-dataformat-cbor-2.17.2.jar.sha1 @@ -0,0 +1 @@ +57fa7c1b5104bbc4599278d13933a937ee058e68 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-smile-2.17.1.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-smile-2.17.1.jar.sha1 deleted file mode 100644 index 47d19067cf2a6..0000000000000 --- a/libs/x-content/licenses/jackson-dataformat-smile-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -89683ac4f0a0c2c4f69ea56b90480ed40266dac8 \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-smile-2.17.2.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-smile-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..28d8c8382aed3 --- /dev/null +++ b/libs/x-content/licenses/jackson-dataformat-smile-2.17.2.jar.sha1 @@ -0,0 +1 @@ +20e956b9b6f67138edd39fab7a506ded19638bcb \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-yaml-2.17.1.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-yaml-2.17.1.jar.sha1 deleted file mode 100644 index 7946e994c7104..0000000000000 --- a/libs/x-content/licenses/jackson-dataformat-yaml-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b4c7b8a9ea3f398116a75c146b982b22afebc4ee \ No newline at end of file diff --git a/libs/x-content/licenses/jackson-dataformat-yaml-2.17.2.jar.sha1 b/libs/x-content/licenses/jackson-dataformat-yaml-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..f3e25b7eb253c --- /dev/null +++ b/libs/x-content/licenses/jackson-dataformat-yaml-2.17.2.jar.sha1 @@ -0,0 +1 @@ +78d2c73dbec62044d7cf3b544b2e0d24a1a093b0 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-annotations-2.17.1.jar.sha1 b/modules/ingest-geoip/licenses/jackson-annotations-2.17.1.jar.sha1 deleted file mode 100644 index 4ceead1b7ae4f..0000000000000 --- a/modules/ingest-geoip/licenses/jackson-annotations-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fca7ef6192c9ad05d07bc50da991bf937a84af3a \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-annotations-2.17.2.jar.sha1 b/modules/ingest-geoip/licenses/jackson-annotations-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..411e1d62459fd --- /dev/null +++ b/modules/ingest-geoip/licenses/jackson-annotations-2.17.2.jar.sha1 @@ -0,0 +1 @@ +147b7b9412ffff24339f8aba080b292448e08698 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-databind-2.17.1.jar.sha1 b/modules/ingest-geoip/licenses/jackson-databind-2.17.1.jar.sha1 deleted file mode 100644 index 7cf1ac1b60301..0000000000000 --- a/modules/ingest-geoip/licenses/jackson-databind-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0524dcbcccdde7d45a679dfc333e4763feb09079 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/jackson-databind-2.17.2.jar.sha1 b/modules/ingest-geoip/licenses/jackson-databind-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..f2b4dbdc5decb --- /dev/null +++ b/modules/ingest-geoip/licenses/jackson-databind-2.17.2.jar.sha1 @@ -0,0 +1 @@ +e6deb029e5901e027c129341fac39e515066b68c \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/jackson-annotations-2.17.1.jar.sha1 b/plugins/crypto-kms/licenses/jackson-annotations-2.17.1.jar.sha1 deleted file mode 100644 index 4ceead1b7ae4f..0000000000000 --- a/plugins/crypto-kms/licenses/jackson-annotations-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fca7ef6192c9ad05d07bc50da991bf937a84af3a \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/jackson-annotations-2.17.2.jar.sha1 b/plugins/crypto-kms/licenses/jackson-annotations-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..411e1d62459fd --- /dev/null +++ b/plugins/crypto-kms/licenses/jackson-annotations-2.17.2.jar.sha1 @@ -0,0 +1 @@ +147b7b9412ffff24339f8aba080b292448e08698 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/jackson-databind-2.17.1.jar.sha1 b/plugins/crypto-kms/licenses/jackson-databind-2.17.1.jar.sha1 deleted file mode 100644 index 7cf1ac1b60301..0000000000000 --- a/plugins/crypto-kms/licenses/jackson-databind-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0524dcbcccdde7d45a679dfc333e4763feb09079 \ No newline at end of file diff --git a/plugins/crypto-kms/licenses/jackson-databind-2.17.2.jar.sha1 b/plugins/crypto-kms/licenses/jackson-databind-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..f2b4dbdc5decb --- /dev/null +++ b/plugins/crypto-kms/licenses/jackson-databind-2.17.2.jar.sha1 @@ -0,0 +1 @@ +e6deb029e5901e027c129341fac39e515066b68c \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-annotations-2.17.1.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-annotations-2.17.1.jar.sha1 deleted file mode 100644 index 4ceead1b7ae4f..0000000000000 --- a/plugins/discovery-ec2/licenses/jackson-annotations-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fca7ef6192c9ad05d07bc50da991bf937a84af3a \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-annotations-2.17.2.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-annotations-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..411e1d62459fd --- /dev/null +++ b/plugins/discovery-ec2/licenses/jackson-annotations-2.17.2.jar.sha1 @@ -0,0 +1 @@ +147b7b9412ffff24339f8aba080b292448e08698 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-databind-2.17.1.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-databind-2.17.1.jar.sha1 deleted file mode 100644 index 7cf1ac1b60301..0000000000000 --- a/plugins/discovery-ec2/licenses/jackson-databind-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0524dcbcccdde7d45a679dfc333e4763feb09079 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-databind-2.17.2.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-databind-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..f2b4dbdc5decb --- /dev/null +++ b/plugins/discovery-ec2/licenses/jackson-databind-2.17.2.jar.sha1 @@ -0,0 +1 @@ +e6deb029e5901e027c129341fac39e515066b68c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-annotations-2.17.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-annotations-2.17.1.jar.sha1 deleted file mode 100644 index 4ceead1b7ae4f..0000000000000 --- a/plugins/repository-azure/licenses/jackson-annotations-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fca7ef6192c9ad05d07bc50da991bf937a84af3a \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-annotations-2.17.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-annotations-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..411e1d62459fd --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-annotations-2.17.2.jar.sha1 @@ -0,0 +1 @@ +147b7b9412ffff24339f8aba080b292448e08698 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-databind-2.17.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-databind-2.17.1.jar.sha1 deleted file mode 100644 index 7cf1ac1b60301..0000000000000 --- a/plugins/repository-azure/licenses/jackson-databind-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0524dcbcccdde7d45a679dfc333e4763feb09079 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-databind-2.17.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-databind-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..f2b4dbdc5decb --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-databind-2.17.2.jar.sha1 @@ -0,0 +1 @@ +e6deb029e5901e027c129341fac39e515066b68c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-dataformat-xml-2.17.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.17.1.jar.sha1 deleted file mode 100644 index 3915ab2616beb..0000000000000 --- a/plugins/repository-azure/licenses/jackson-dataformat-xml-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e6a168dba62aa63743b9e2b83f4e0f0dfdc143d3 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-dataformat-xml-2.17.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..f9c31c168926d --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-dataformat-xml-2.17.2.jar.sha1 @@ -0,0 +1 @@ +ad58f5bd089e743ac6e5999b2d1e3cf8515cea9a \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.17.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.17.1.jar.sha1 deleted file mode 100644 index db26ebbf738f7..0000000000000 --- a/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0969b0c3cb8c75d759e9a6c585c44c9b9f3a4f75 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.17.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..a61bf643d69e6 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-datatype-jsr310-2.17.2.jar.sha1 @@ -0,0 +1 @@ +267b85e9ba2892a37be6d80aa9ca1438a0d8c210 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.17.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.17.1.jar.sha1 deleted file mode 100644 index bb8ecfe34d295..0000000000000 --- a/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f77e7bf0e64dfcf53bfdcf2764ad7ab92b78a4da \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.17.2.jar.sha1 b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..d9d7975146c22 --- /dev/null +++ b/plugins/repository-azure/licenses/jackson-module-jaxb-annotations-2.17.2.jar.sha1 @@ -0,0 +1 @@ +c2978b818ef2f2b2738b387c143624eab611d917 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-annotations-2.17.1.jar.sha1 b/plugins/repository-s3/licenses/jackson-annotations-2.17.1.jar.sha1 deleted file mode 100644 index 4ceead1b7ae4f..0000000000000 --- a/plugins/repository-s3/licenses/jackson-annotations-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fca7ef6192c9ad05d07bc50da991bf937a84af3a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-annotations-2.17.2.jar.sha1 b/plugins/repository-s3/licenses/jackson-annotations-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..411e1d62459fd --- /dev/null +++ b/plugins/repository-s3/licenses/jackson-annotations-2.17.2.jar.sha1 @@ -0,0 +1 @@ +147b7b9412ffff24339f8aba080b292448e08698 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-databind-2.17.1.jar.sha1 b/plugins/repository-s3/licenses/jackson-databind-2.17.1.jar.sha1 deleted file mode 100644 index 7cf1ac1b60301..0000000000000 --- a/plugins/repository-s3/licenses/jackson-databind-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0524dcbcccdde7d45a679dfc333e4763feb09079 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-databind-2.17.2.jar.sha1 b/plugins/repository-s3/licenses/jackson-databind-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..f2b4dbdc5decb --- /dev/null +++ b/plugins/repository-s3/licenses/jackson-databind-2.17.2.jar.sha1 @@ -0,0 +1 @@ +e6deb029e5901e027c129341fac39e515066b68c \ No newline at end of file diff --git a/server/licenses/jackson-core-2.17.1.jar.sha1 b/server/licenses/jackson-core-2.17.1.jar.sha1 deleted file mode 100644 index 82dab5981e652..0000000000000 --- a/server/licenses/jackson-core-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5e52a11644cd59a28ef79f02bddc2cc3bab45edb \ No newline at end of file diff --git a/server/licenses/jackson-core-2.17.2.jar.sha1 b/server/licenses/jackson-core-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..e15f2340980bc --- /dev/null +++ b/server/licenses/jackson-core-2.17.2.jar.sha1 @@ -0,0 +1 @@ +969a35cb35c86512acbadcdbbbfb044c877db814 \ No newline at end of file diff --git a/server/licenses/jackson-dataformat-cbor-2.17.1.jar.sha1 b/server/licenses/jackson-dataformat-cbor-2.17.1.jar.sha1 deleted file mode 100644 index ff42ed1f92cfe..0000000000000 --- a/server/licenses/jackson-dataformat-cbor-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ba5d8e6ecc62aa0e49c0ce935b8696352dbebc71 \ No newline at end of file diff --git a/server/licenses/jackson-dataformat-cbor-2.17.2.jar.sha1 b/server/licenses/jackson-dataformat-cbor-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..069e088413ef1 --- /dev/null +++ b/server/licenses/jackson-dataformat-cbor-2.17.2.jar.sha1 @@ -0,0 +1 @@ +57fa7c1b5104bbc4599278d13933a937ee058e68 \ No newline at end of file diff --git a/server/licenses/jackson-dataformat-smile-2.17.1.jar.sha1 b/server/licenses/jackson-dataformat-smile-2.17.1.jar.sha1 deleted file mode 100644 index 47d19067cf2a6..0000000000000 --- a/server/licenses/jackson-dataformat-smile-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -89683ac4f0a0c2c4f69ea56b90480ed40266dac8 \ No newline at end of file diff --git a/server/licenses/jackson-dataformat-smile-2.17.2.jar.sha1 b/server/licenses/jackson-dataformat-smile-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..28d8c8382aed3 --- /dev/null +++ b/server/licenses/jackson-dataformat-smile-2.17.2.jar.sha1 @@ -0,0 +1 @@ +20e956b9b6f67138edd39fab7a506ded19638bcb \ No newline at end of file diff --git a/server/licenses/jackson-dataformat-yaml-2.17.1.jar.sha1 b/server/licenses/jackson-dataformat-yaml-2.17.1.jar.sha1 deleted file mode 100644 index 7946e994c7104..0000000000000 --- a/server/licenses/jackson-dataformat-yaml-2.17.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b4c7b8a9ea3f398116a75c146b982b22afebc4ee \ No newline at end of file diff --git a/server/licenses/jackson-dataformat-yaml-2.17.2.jar.sha1 b/server/licenses/jackson-dataformat-yaml-2.17.2.jar.sha1 new file mode 100644 index 0000000000000..f3e25b7eb253c --- /dev/null +++ b/server/licenses/jackson-dataformat-yaml-2.17.2.jar.sha1 @@ -0,0 +1 @@ +78d2c73dbec62044d7cf3b544b2e0d24a1a093b0 \ No newline at end of file From 5ef41cf483581b86abc145091fea9dd6f636f503 Mon Sep 17 00:00:00 2001 From: Zelin Hao Date: Tue, 9 Jul 2024 14:57:05 -0700 Subject: [PATCH 09/12] Add release notes for release 1.3.18 (#14699) Signed-off-by: Zelin Hao --- release-notes/opensearch.release-notes-1.3.18.md | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 release-notes/opensearch.release-notes-1.3.18.md diff --git a/release-notes/opensearch.release-notes-1.3.18.md b/release-notes/opensearch.release-notes-1.3.18.md new file mode 100644 index 0000000000000..75c38dd285a63 --- /dev/null +++ b/release-notes/opensearch.release-notes-1.3.18.md @@ -0,0 +1,4 @@ +## 2024-07-09 Version 1.3.18 Release Notes + +### Upgrades +- Bump `netty` from 4.1.110.Final to 4.1.111.Final ([#14356](https://github.com/opensearch-project/OpenSearch/pull/14356)) From c4d960fcca1ae6c3191a5c474cbc867b4431d621 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Tue, 9 Jul 2024 21:20:17 -0400 Subject: [PATCH 10/12] Bump reactor from 3.5.19 to 3.5.20 (#14697) Signed-off-by: Andriy Redko --- CHANGELOG.md | 4 ++-- buildSrc/version.properties | 4 ++-- .../licenses/reactor-netty-core-1.1.20.jar.sha1 | 1 - .../licenses/reactor-netty-core-1.1.21.jar.sha1 | 1 + .../licenses/reactor-netty-http-1.1.20.jar.sha1 | 1 - .../licenses/reactor-netty-http-1.1.21.jar.sha1 | 1 + .../licenses/reactor-netty-core-1.1.20.jar.sha1 | 1 - .../licenses/reactor-netty-core-1.1.21.jar.sha1 | 1 + .../licenses/reactor-netty-http-1.1.20.jar.sha1 | 1 - .../licenses/reactor-netty-http-1.1.21.jar.sha1 | 1 + server/licenses/reactor-core-3.5.18.jar.sha1 | 1 - server/licenses/reactor-core-3.5.19.jar.sha1 | 1 + 12 files changed, 9 insertions(+), 9 deletions(-) delete mode 100644 plugins/repository-azure/licenses/reactor-netty-core-1.1.20.jar.sha1 create mode 100644 plugins/repository-azure/licenses/reactor-netty-core-1.1.21.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/reactor-netty-http-1.1.20.jar.sha1 create mode 100644 plugins/repository-azure/licenses/reactor-netty-http-1.1.21.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.20.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.21.jar.sha1 delete mode 100644 plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.20.jar.sha1 create mode 100644 plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.21.jar.sha1 delete mode 100644 server/licenses/reactor-core-3.5.18.jar.sha1 create mode 100644 server/licenses/reactor-core-3.5.19.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index fe26423cde573..62bb73d80f2c1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,8 +20,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Update to Apache Lucene 9.11.0 ([#14042](https://github.com/opensearch-project/OpenSearch/pull/14042)) - Bump `netty` from 4.1.110.Final to 4.1.111.Final ([#14356](https://github.com/opensearch-project/OpenSearch/pull/14356)) - Bump `org.wiremock:wiremock-standalone` from 3.3.1 to 3.6.0 ([#14361](https://github.com/opensearch-project/OpenSearch/pull/14361)) -- Bump `reactor` from 3.5.17 to 3.5.18 ([#14395](https://github.com/opensearch-project/OpenSearch/pull/14395)) -- Bump `reactor-netty` from 1.1.19 to 1.1.20 ([#14395](https://github.com/opensearch-project/OpenSearch/pull/14395)) +- Bump `reactor` from 3.5.17 to 3.5.19 ([#14395](https://github.com/opensearch-project/OpenSearch/pull/14395), [#14697](https://github.com/opensearch-project/OpenSearch/pull/14697)) +- Bump `reactor-netty` from 1.1.19 to 1.1.21 ([#14395](https://github.com/opensearch-project/OpenSearch/pull/14395), [#14697](https://github.com/opensearch-project/OpenSearch/pull/14697)) - Bump `commons-net:commons-net` from 3.10.0 to 3.11.1 ([#14396](https://github.com/opensearch-project/OpenSearch/pull/14396)) - Bump `com.nimbusds:nimbus-jose-jwt` from 9.37.3 to 9.40 ([#14398](https://github.com/opensearch-project/OpenSearch/pull/14398)) - Bump `org.apache.commons:commons-configuration2` from 2.10.1 to 2.11.0 ([#14399](https://github.com/opensearch-project/OpenSearch/pull/14399)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index d62f8c51e616b..855ccc1f87413 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -33,8 +33,8 @@ netty = 4.1.111.Final joda = 2.12.7 # project reactor -reactor_netty = 1.1.20 -reactor = 3.5.18 +reactor_netty = 1.1.21 +reactor = 3.5.19 # client dependencies httpclient5 = 5.2.1 diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.20.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.20.jar.sha1 deleted file mode 100644 index 2f4d023c88c80..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-core-1.1.20.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1a5ef52a470a82d9313e2e1ad8ba064bdbd38948 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.1.21.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.1.21.jar.sha1 new file mode 100644 index 0000000000000..21c16c7430016 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-core-1.1.21.jar.sha1 @@ -0,0 +1 @@ +acb98bd08107287c454ce74e7b1ed8e7a018a662 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.20.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.20.jar.sha1 deleted file mode 100644 index 6c031e00e39c1..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-http-1.1.20.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8d4ee98405a5856cf0c9d7c1a70f3f14631e3c46 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-http-1.1.21.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-http-1.1.21.jar.sha1 new file mode 100644 index 0000000000000..648df22873d56 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-http-1.1.21.jar.sha1 @@ -0,0 +1 @@ +b83542bb35630ef815b4177e3c670f62e952e695 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.20.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.20.jar.sha1 deleted file mode 100644 index 2f4d023c88c80..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.20.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1a5ef52a470a82d9313e2e1ad8ba064bdbd38948 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.21.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.21.jar.sha1 new file mode 100644 index 0000000000000..21c16c7430016 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-core-1.1.21.jar.sha1 @@ -0,0 +1 @@ +acb98bd08107287c454ce74e7b1ed8e7a018a662 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.20.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.20.jar.sha1 deleted file mode 100644 index 6c031e00e39c1..0000000000000 --- a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.20.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8d4ee98405a5856cf0c9d7c1a70f3f14631e3c46 \ No newline at end of file diff --git a/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.21.jar.sha1 b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.21.jar.sha1 new file mode 100644 index 0000000000000..648df22873d56 --- /dev/null +++ b/plugins/transport-reactor-netty4/licenses/reactor-netty-http-1.1.21.jar.sha1 @@ -0,0 +1 @@ +b83542bb35630ef815b4177e3c670f62e952e695 \ No newline at end of file diff --git a/server/licenses/reactor-core-3.5.18.jar.sha1 b/server/licenses/reactor-core-3.5.18.jar.sha1 deleted file mode 100644 index c503f768beafa..0000000000000 --- a/server/licenses/reactor-core-3.5.18.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3a8157f7d66d71a407eb77ba12bce72a38c5b4da \ No newline at end of file diff --git a/server/licenses/reactor-core-3.5.19.jar.sha1 b/server/licenses/reactor-core-3.5.19.jar.sha1 new file mode 100644 index 0000000000000..04b59d2faae04 --- /dev/null +++ b/server/licenses/reactor-core-3.5.19.jar.sha1 @@ -0,0 +1 @@ +1d49ce1d0df79f28d3927da5f4c46a895b94335f \ No newline at end of file From b068355d0d81e031a4022efcb713c93fa4ff0b31 Mon Sep 17 00:00:00 2001 From: Shivansh Arora Date: Wed, 10 Jul 2024 12:18:49 +0530 Subject: [PATCH 11/12] Add unit tests for read flow of RemoteClusterStateService and bug fix for transient settings (#14476) Signed-off-by: Shivansh Arora --- .../PublicationTransportHandler.java | 1 - .../remote/ClusterStateDiffManifest.java | 28 +- .../remote/RemoteClusterStateService.java | 28 +- ...oteClusterStateAttributesManagerTests.java | 165 +-- .../RemoteClusterStateServiceTests.java | 1214 ++++++++++++++++- .../remote/RemoteClusterStateTestUtils.java | 227 +++ .../RemoteGlobalMetadataManagerTests.java | 125 +- .../test/TestClusterStateCustom.java | 68 + 8 files changed, 1531 insertions(+), 325 deletions(-) create mode 100644 server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateTestUtils.java create mode 100644 test/framework/src/main/java/org/opensearch/test/TestClusterStateCustom.java diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java index 36eabd51ffda1..62885a12222be 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java @@ -284,7 +284,6 @@ PublishWithJoinResponse handleIncomingRemotePublishRequest(RemotePublishRequest ) ); ClusterState clusterState = remoteClusterStateService.getClusterStateUsingDiff( - request.getClusterName(), manifest, lastSeen, transportService.getLocalNode().getId() diff --git a/server/src/main/java/org/opensearch/gateway/remote/ClusterStateDiffManifest.java b/server/src/main/java/org/opensearch/gateway/remote/ClusterStateDiffManifest.java index 65ae2675a95da..aca53c92781e4 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/ClusterStateDiffManifest.java +++ b/server/src/main/java/org/opensearch/gateway/remote/ClusterStateDiffManifest.java @@ -25,6 +25,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; @@ -152,17 +153,17 @@ public ClusterStateDiffManifest( this.settingsMetadataUpdated = settingsMetadataUpdated; this.transientSettingsMetadataUpdated = transientSettingsMetadataUpdate; this.templatesMetadataUpdated = templatesMetadataUpdated; - this.customMetadataUpdated = customMetadataUpdated; - this.customMetadataDeleted = customMetadataDeleted; - this.indicesUpdated = indicesUpdated; - this.indicesDeleted = indicesDeleted; + this.customMetadataUpdated = Collections.unmodifiableList(customMetadataUpdated); + this.customMetadataDeleted = Collections.unmodifiableList(customMetadataDeleted); + this.indicesUpdated = Collections.unmodifiableList(indicesUpdated); + this.indicesDeleted = Collections.unmodifiableList(indicesDeleted); this.clusterBlocksUpdated = clusterBlocksUpdated; this.discoveryNodesUpdated = discoveryNodesUpdated; - this.indicesRoutingUpdated = indicesRoutingUpdated; - this.indicesRoutingDeleted = indicesRoutingDeleted; + this.indicesRoutingUpdated = Collections.unmodifiableList(indicesRoutingUpdated); + this.indicesRoutingDeleted = Collections.unmodifiableList(indicesRoutingDeleted); this.hashesOfConsistentSettingsUpdated = hashesOfConsistentSettingsUpdated; - this.clusterStateCustomUpdated = clusterStateCustomUpdated; - this.clusterStateCustomDeleted = clusterStateCustomDeleted; + this.clusterStateCustomUpdated = Collections.unmodifiableList(clusterStateCustomUpdated); + this.clusterStateCustomDeleted = Collections.unmodifiableList(clusterStateCustomDeleted); } public ClusterStateDiffManifest(StreamInput in) throws IOException { @@ -563,7 +564,16 @@ public static class Builder { private List clusterStateCustomUpdated; private List clusterStateCustomDeleted; - public Builder() {} + public Builder() { + customMetadataUpdated = Collections.emptyList(); + customMetadataDeleted = Collections.emptyList(); + indicesUpdated = Collections.emptyList(); + indicesDeleted = Collections.emptyList(); + indicesRoutingUpdated = Collections.emptyList(); + indicesRoutingDeleted = Collections.emptyList(); + clusterStateCustomUpdated = Collections.emptyList(); + clusterStateCustomDeleted = Collections.emptyList(); + } public Builder fromStateUUID(String fromStateUUID) { this.fromStateUUID = fromStateUUID; diff --git a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java index 74abe9cd257b4..3e63f9114ea16 100644 --- a/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java +++ b/server/src/main/java/org/opensearch/gateway/remote/RemoteClusterStateService.java @@ -976,7 +976,8 @@ public ClusterState getLatestClusterState(String clusterName, String clusterUUID return getClusterStateForManifest(clusterName, clusterMetadataManifest.get(), nodeId, includeEphemeral); } - private ClusterState readClusterStateInParallel( + // package private for testing + ClusterState readClusterStateInParallel( ClusterState previousState, ClusterMetadataManifest manifest, String clusterUUID, @@ -1285,7 +1286,7 @@ public ClusterState getClusterStateForManifest( manifest.getCustomMetadataMap(), manifest.getCoordinationMetadata() != null, manifest.getSettingsMetadata() != null, - manifest.getTransientSettingsMetadata() != null, + includeEphemeral && manifest.getTransientSettingsMetadata() != null, manifest.getTemplatesMetadata() != null, includeEphemeral && manifest.getDiscoveryNodesMetadata() != null, includeEphemeral && manifest.getClusterBlocksMetadata() != null, @@ -1321,13 +1322,9 @@ public ClusterState getClusterStateForManifest( } - public ClusterState getClusterStateUsingDiff( - String clusterName, - ClusterMetadataManifest manifest, - ClusterState previousState, - String localNodeId - ) throws IOException { - assert manifest.getDiffManifest() != null; + public ClusterState getClusterStateUsingDiff(ClusterMetadataManifest manifest, ClusterState previousState, String localNodeId) + throws IOException { + assert manifest.getDiffManifest() != null : "Diff manifest null which is required for downloading cluster state"; ClusterStateDiffManifest diff = manifest.getDiffManifest(); List updatedIndices = diff.getIndicesUpdated().stream().map(idx -> { Optional uploadedIndexMetadataOptional = manifest.getIndices() @@ -1586,6 +1583,19 @@ private boolean isValidClusterUUID(ClusterMetadataManifest manifest) { return manifest.isClusterUUIDCommitted(); } + // package private setter which are required for injecting mock managers, these setters are not supposed to be used elsewhere + void setRemoteIndexMetadataManager(RemoteIndexMetadataManager remoteIndexMetadataManager) { + this.remoteIndexMetadataManager = remoteIndexMetadataManager; + } + + void setRemoteGlobalMetadataManager(RemoteGlobalMetadataManager remoteGlobalMetadataManager) { + this.remoteGlobalMetadataManager = remoteGlobalMetadataManager; + } + + void setRemoteClusterStateAttributesManager(RemoteClusterStateAttributesManager remoteClusterStateAttributeManager) { + this.remoteClusterStateAttributesManager = remoteClusterStateAttributeManager; + } + public void writeMetadataFailed() { getStats().stateFailed(); } diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateAttributesManagerTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateAttributesManagerTests.java index fe9ed57fa77b8..3f2edd1a6c5a5 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateAttributesManagerTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateAttributesManagerTests.java @@ -8,9 +8,7 @@ package org.opensearch.gateway.remote; -import org.opensearch.Version; import org.opensearch.action.LatchedActionListener; -import org.opensearch.cluster.AbstractNamedDiffable; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterState.Custom; @@ -23,11 +21,8 @@ import org.opensearch.common.util.TestCapturingListener; import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.compress.Compressor; import org.opensearch.core.compress.NoneCompressor; -import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.gateway.remote.model.RemoteClusterBlocks; import org.opensearch.gateway.remote.model.RemoteClusterStateCustoms; import org.opensearch.gateway.remote.model.RemoteDiscoveryNodes; @@ -51,6 +46,10 @@ import static org.opensearch.gateway.remote.RemoteClusterStateAttributesManager.CLUSTER_STATE_ATTRIBUTE; import static org.opensearch.gateway.remote.RemoteClusterStateAttributesManager.CLUSTER_STATE_ATTRIBUTES_CURRENT_CODEC_VERSION; import static org.opensearch.gateway.remote.RemoteClusterStateAttributesManager.DISCOVERY_NODES; +import static org.opensearch.gateway.remote.RemoteClusterStateTestUtils.TestClusterStateCustom1; +import static org.opensearch.gateway.remote.RemoteClusterStateTestUtils.TestClusterStateCustom2; +import static org.opensearch.gateway.remote.RemoteClusterStateTestUtils.TestClusterStateCustom3; +import static org.opensearch.gateway.remote.RemoteClusterStateTestUtils.TestClusterStateCustom4; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.CLUSTER_STATE_EPHEMERAL_PATH_TOKEN; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.CUSTOM_DELIMITER; @@ -338,22 +337,22 @@ public void testGetAsyncMetadataReadAction_Exception() throws IOException, Inter public void testGetUpdatedCustoms() { Map previousCustoms = Map.of( - TestCustom1.TYPE, - new TestCustom1("data1"), - TestCustom2.TYPE, - new TestCustom2("data2"), - TestCustom3.TYPE, - new TestCustom3("data3") + TestClusterStateCustom1.TYPE, + new TestClusterStateCustom1("data1"), + TestClusterStateCustom2.TYPE, + new TestClusterStateCustom2("data2"), + TestClusterStateCustom3.TYPE, + new TestClusterStateCustom3("data3") ); ClusterState previousState = ClusterState.builder(new ClusterName("test-cluster")).customs(previousCustoms).build(); Map currentCustoms = Map.of( - TestCustom2.TYPE, - new TestCustom2("data2"), - TestCustom3.TYPE, - new TestCustom3("data3-changed"), - TestCustom4.TYPE, - new TestCustom4("data4") + TestClusterStateCustom2.TYPE, + new TestClusterStateCustom2("data2"), + TestClusterStateCustom3.TYPE, + new TestClusterStateCustom3("data3-changed"), + TestClusterStateCustom4.TYPE, + new TestClusterStateCustom4("data4") ); ClusterState currentState = ClusterState.builder(new ClusterName("test-cluster")).customs(currentCustoms).build(); @@ -368,136 +367,14 @@ public void testGetUpdatedCustoms() { assertThat(customsDiff.getDeletes(), is(Collections.emptyList())); Map expectedCustoms = Map.of( - TestCustom3.TYPE, - new TestCustom3("data3-changed"), - TestCustom4.TYPE, - new TestCustom4("data4") + TestClusterStateCustom3.TYPE, + new TestClusterStateCustom3("data3-changed"), + TestClusterStateCustom4.TYPE, + new TestClusterStateCustom4("data4") ); customsDiff = remoteClusterStateAttributesManager.getUpdatedCustoms(currentState, previousState, true, false); assertThat(customsDiff.getUpserts(), is(expectedCustoms)); - assertThat(customsDiff.getDeletes(), is(List.of(TestCustom1.TYPE))); - } - - private static abstract class AbstractTestCustom extends AbstractNamedDiffable implements ClusterState.Custom { - - private final String value; - - AbstractTestCustom(String value) { - this.value = value; - } - - AbstractTestCustom(StreamInput in) throws IOException { - this.value = in.readString(); - } - - @Override - public Version getMinimalSupportedVersion() { - return Version.CURRENT; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(value); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return builder; - } - - @Override - public boolean isPrivate() { - return true; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - AbstractTestCustom that = (AbstractTestCustom) o; - - if (!value.equals(that.value)) return false; - - return true; - } - - @Override - public int hashCode() { - return value.hashCode(); - } - } - - private static class TestCustom1 extends AbstractTestCustom { - - private static final String TYPE = "custom_1"; - - TestCustom1(String value) { - super(value); - } - - TestCustom1(StreamInput in) throws IOException { - super(in); - } - - @Override - public String getWriteableName() { - return TYPE; - } - } - - private static class TestCustom2 extends AbstractTestCustom { - - private static final String TYPE = "custom_2"; - - TestCustom2(String value) { - super(value); - } - - TestCustom2(StreamInput in) throws IOException { - super(in); - } - - @Override - public String getWriteableName() { - return TYPE; - } - } - - private static class TestCustom3 extends AbstractTestCustom { - - private static final String TYPE = "custom_3"; - - TestCustom3(String value) { - super(value); - } - - TestCustom3(StreamInput in) throws IOException { - super(in); - } - - @Override - public String getWriteableName() { - return TYPE; - } - } - - private static class TestCustom4 extends AbstractTestCustom { - - private static final String TYPE = "custom_4"; - - TestCustom4(String value) { - super(value); - } - - TestCustom4(StreamInput in) throws IOException { - super(in); - } - - @Override - public String getWriteableName() { - return TYPE; - } + assertThat(customsDiff.getDeletes(), is(List.of(TestClusterStateCustom1.TYPE))); } } diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java index d983a4d8c4027..91ddd64cc2ccc 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateServiceTests.java @@ -9,12 +9,14 @@ package org.opensearch.gateway.remote; import org.opensearch.Version; +import org.opensearch.action.LatchedActionListener; import org.opensearch.cluster.ClusterModule; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.RepositoryCleanupInProgress; -import org.opensearch.cluster.RepositoryCleanupInProgress.Entry; +import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.coordination.CoordinationMetadata; +import org.opensearch.cluster.metadata.DiffableStringMap; import org.opensearch.cluster.metadata.IndexGraveyard; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexTemplateMetadata; @@ -22,10 +24,12 @@ import org.opensearch.cluster.metadata.TemplatesMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.cluster.routing.remote.InternalRemoteRoutingTableService; import org.opensearch.cluster.routing.remote.NoopRemoteRoutingTableService; import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.CheckedRunnable; import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobMetadata; @@ -38,6 +42,7 @@ import org.opensearch.common.compress.DeflateCompressor; import org.opensearch.common.lucene.store.ByteArrayIndexInput; import org.opensearch.common.network.NetworkModule; +import org.opensearch.common.remote.AbstractRemoteWritableBlobEntity; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; @@ -45,13 +50,17 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.compress.Compressor; import org.opensearch.core.index.Index; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedIndexMetadata; import org.opensearch.gateway.remote.ClusterMetadataManifest.UploadedMetadataAttribute; import org.opensearch.gateway.remote.model.RemoteClusterMetadataManifest; import org.opensearch.gateway.remote.model.RemoteClusterStateManifestInfo; -import org.opensearch.gateway.remote.model.RemoteIndexMetadata; +import org.opensearch.gateway.remote.model.RemotePersistentSettingsMetadata; +import org.opensearch.gateway.remote.model.RemoteReadResult; +import org.opensearch.gateway.remote.model.RemoteTransientSettingsMetadata; import org.opensearch.index.remote.RemoteIndexPathUploader; import org.opensearch.indices.IndicesModule; import org.opensearch.repositories.FilterRepository; @@ -61,7 +70,6 @@ import org.opensearch.repositories.blobstore.ChecksumWritableBlobStoreFormat; import org.opensearch.repositories.fs.FsRepository; import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.test.TestCustomMetadata; import org.opensearch.test.VersionUtils; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -75,7 +83,6 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; -import java.util.EnumSet; import java.util.HashMap; import java.util.Iterator; import java.util.List; @@ -92,23 +99,51 @@ import java.util.stream.Stream; import org.mockito.ArgumentCaptor; +import org.mockito.ArgumentMatcher; import org.mockito.ArgumentMatchers; import org.mockito.Mockito; +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; import static java.util.stream.Collectors.toList; import static org.opensearch.common.util.FeatureFlags.REMOTE_PUBLICATION_EXPERIMENTAL; import static org.opensearch.gateway.remote.ClusterMetadataManifest.CODEC_V1; +import static org.opensearch.gateway.remote.ClusterMetadataManifest.CODEC_V2; +import static org.opensearch.gateway.remote.RemoteClusterStateAttributesManager.CLUSTER_BLOCKS; +import static org.opensearch.gateway.remote.RemoteClusterStateAttributesManager.CLUSTER_STATE_ATTRIBUTE; +import static org.opensearch.gateway.remote.RemoteClusterStateTestUtils.CustomMetadata1; +import static org.opensearch.gateway.remote.RemoteClusterStateTestUtils.CustomMetadata2; +import static org.opensearch.gateway.remote.RemoteClusterStateTestUtils.CustomMetadata3; +import static org.opensearch.gateway.remote.RemoteClusterStateTestUtils.TestClusterStateCustom1; +import static org.opensearch.gateway.remote.RemoteClusterStateTestUtils.TestClusterStateCustom2; +import static org.opensearch.gateway.remote.RemoteClusterStateTestUtils.TestClusterStateCustom3; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.FORMAT_PARAMS; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.getFormattedIndexFileName; +import static org.opensearch.gateway.remote.model.RemoteClusterBlocks.CLUSTER_BLOCKS_FORMAT; +import static org.opensearch.gateway.remote.model.RemoteClusterBlocksTests.randomClusterBlocks; import static org.opensearch.gateway.remote.model.RemoteClusterMetadataManifest.MANIFEST_CURRENT_CODEC_VERSION; +import static org.opensearch.gateway.remote.model.RemoteClusterStateCustoms.CLUSTER_STATE_CUSTOM; import static org.opensearch.gateway.remote.model.RemoteCoordinationMetadata.COORDINATION_METADATA; import static org.opensearch.gateway.remote.model.RemoteCoordinationMetadata.COORDINATION_METADATA_FORMAT; +import static org.opensearch.gateway.remote.model.RemoteCustomMetadata.CUSTOM_DELIMITER; +import static org.opensearch.gateway.remote.model.RemoteCustomMetadata.CUSTOM_METADATA; +import static org.opensearch.gateway.remote.model.RemoteCustomMetadata.readFrom; +import static org.opensearch.gateway.remote.model.RemoteDiscoveryNodes.DISCOVERY_NODES; +import static org.opensearch.gateway.remote.model.RemoteDiscoveryNodes.DISCOVERY_NODES_FORMAT; +import static org.opensearch.gateway.remote.model.RemoteDiscoveryNodesTests.getDiscoveryNodes; import static org.opensearch.gateway.remote.model.RemoteGlobalMetadata.GLOBAL_METADATA_FORMAT; +import static org.opensearch.gateway.remote.model.RemoteHashesOfConsistentSettings.HASHES_OF_CONSISTENT_SETTINGS; +import static org.opensearch.gateway.remote.model.RemoteHashesOfConsistentSettings.HASHES_OF_CONSISTENT_SETTINGS_FORMAT; +import static org.opensearch.gateway.remote.model.RemoteHashesOfConsistentSettingsTests.getHashesOfConsistentSettings; +import static org.opensearch.gateway.remote.model.RemoteIndexMetadata.INDEX; +import static org.opensearch.gateway.remote.model.RemoteIndexMetadata.INDEX_METADATA_FORMAT; import static org.opensearch.gateway.remote.model.RemotePersistentSettingsMetadata.SETTINGS_METADATA_FORMAT; import static org.opensearch.gateway.remote.model.RemotePersistentSettingsMetadata.SETTING_METADATA; import static org.opensearch.gateway.remote.model.RemoteTemplatesMetadata.TEMPLATES_METADATA; import static org.opensearch.gateway.remote.model.RemoteTemplatesMetadata.TEMPLATES_METADATA_FORMAT; +import static org.opensearch.gateway.remote.model.RemoteTemplatesMetadataTests.getTemplatesMetadata; +import static org.opensearch.gateway.remote.model.RemoteTransientSettingsMetadata.TRANSIENT_SETTING_METADATA; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_CLUSTER_STATE_REPOSITORY_NAME_ATTRIBUTE_KEY; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_SETTINGS_ATTRIBUTE_KEY_PREFIX; import static org.opensearch.node.remotestore.RemoteStoreNodeAttribute.REMOTE_STORE_REPOSITORY_TYPE_ATTRIBUTE_KEY_FORMAT; @@ -120,11 +155,19 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.ArgumentMatchers.anyMap; import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.argThat; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class RemoteClusterStateServiceTests extends OpenSearchTestCase { @@ -135,11 +178,22 @@ public class RemoteClusterStateServiceTests extends OpenSearchTestCase { private Supplier repositoriesServiceSupplier; private RepositoriesService repositoriesService; private BlobStoreRepository blobStoreRepository; + private Compressor compressor; private BlobStore blobStore; private Settings settings; private boolean publicationEnabled; + private NamedWriteableRegistry namedWriteableRegistry; private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); + private static final String NODE_ID = "test-node"; + private static final String COORDINATION_METADATA_FILENAME = "coordination-metadata-file__1"; + private static final String PERSISTENT_SETTINGS_FILENAME = "persistent-settings-file__1"; + private static final String TRANSIENT_SETTINGS_FILENAME = "transient-settings-file__1"; + private static final String TEMPLATES_METADATA_FILENAME = "templates-metadata-file__1"; + private static final String DISCOVERY_NODES_FILENAME = "discovery-nodes-file__1"; + private static final String CLUSTER_BLOCKS_FILENAME = "cluster-blocks-file__1"; + private static final String HASHES_OF_CONSISTENT_SETTINGS_FILENAME = "consistent-settings-hashes-file__1"; + @Before public void setup() { repositoriesServiceSupplier = mock(Supplier.class); @@ -164,6 +218,11 @@ public void setup() { .put(RemoteClusterStateService.REMOTE_CLUSTER_STATE_ENABLED_SETTING.getKey(), true) .put("node.attr." + REMOTE_STORE_ROUTING_TABLE_REPOSITORY_NAME_ATTRIBUTE_KEY, "routing_repository") .build(); + List writeableEntries = ClusterModule.getNamedWriteables(); + writeableEntries.add(new NamedWriteableRegistry.Entry(Metadata.Custom.class, CustomMetadata1.TYPE, CustomMetadata1::new)); + writeableEntries.add(new NamedWriteableRegistry.Entry(Metadata.Custom.class, CustomMetadata2.TYPE, CustomMetadata2::new)); + writeableEntries.add(new NamedWriteableRegistry.Entry(Metadata.Custom.class, CustomMetadata3.TYPE, CustomMetadata3::new)); + namedWriteableRegistry = new NamedWriteableRegistry(writeableEntries); clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); clusterService = mock(ClusterService.class); @@ -176,6 +235,7 @@ public void setup() { ).flatMap(Function.identity()).collect(toList()) ); + compressor = new DeflateCompressor(); blobStoreRepository = mock(BlobStoreRepository.class); blobStore = mock(BlobStore.class); when(blobStoreRepository.blobStore()).thenReturn(blobStore); @@ -191,7 +251,7 @@ public void setup() { () -> 0L, threadPool, List.of(new RemoteIndexPathUploader(threadPool, settings, repositoriesServiceSupplier, clusterSettings)), - writableRegistry() + namedWriteableRegistry ); } @@ -275,6 +335,7 @@ public void testWriteFullMetadataSuccess() throws IOException { assertThat(manifest.getSettingsMetadata(), notNullValue()); assertThat(manifest.getTemplatesMetadata(), notNullValue()); assertFalse(manifest.getCustomMetadataMap().isEmpty()); + assertThat(manifest.getCustomMetadataMap().containsKey(CustomMetadata1.TYPE), is(true)); assertThat(manifest.getClusterBlocksMetadata(), nullValue()); assertThat(manifest.getDiscoveryNodesMetadata(), nullValue()); assertThat(manifest.getTransientSettingsMetadata(), nullValue()); @@ -298,7 +359,12 @@ public void testWriteFullMetadataSuccessPublicationEnabled() throws IOException writableRegistry() ); final ClusterState clusterState = generateClusterStateWithOneIndex().nodes(nodesWithLocalNodeClusterManager()) - .customs(Map.of(RepositoryCleanupInProgress.TYPE, new RepositoryCleanupInProgress(List.of(new Entry("test-repo", 10L))))) + .customs( + Map.of( + RepositoryCleanupInProgress.TYPE, + new RepositoryCleanupInProgress(List.of(new RepositoryCleanupInProgress.Entry("test-repo", 10L))) + ) + ) .build(); mockBlobStoreObjects(); remoteClusterStateService.start(); @@ -330,6 +396,7 @@ public void testWriteFullMetadataSuccessPublicationEnabled() throws IOException assertThat(manifest.getSettingsMetadata(), notNullValue()); assertThat(manifest.getTemplatesMetadata(), notNullValue()); assertFalse(manifest.getCustomMetadataMap().isEmpty()); + assertThat(manifest.getCustomMetadataMap().containsKey(CustomMetadata1.TYPE), is(true)); assertThat(manifest.getClusterStateCustomMap().size(), is(1)); assertThat(manifest.getClusterStateCustomMap().containsKey(RepositoryCleanupInProgress.TYPE), is(true)); } @@ -388,7 +455,7 @@ public void testWriteFullMetadataInParallelSuccess() throws IOException { .provideStream(0) .getInputStream() .readAllBytes(); - IndexMetadata writtenIndexMetadata = RemoteIndexMetadata.INDEX_METADATA_FORMAT.deserialize( + IndexMetadata writtenIndexMetadata = INDEX_METADATA_FORMAT.deserialize( capturedWriteContext.get("metadata").getFileName(), blobStoreRepository.getNamedXContentRegistry(), new BytesArray(writtenBytes) @@ -445,24 +512,35 @@ public void testTimeoutWhileWritingManifestFile() throws IOException { ArgumentCaptor> actionListenerArgumentCaptor = ArgumentCaptor.forClass(ActionListener.class); - doAnswer((i) -> { // For Global Metadata - actionListenerArgumentCaptor.getValue().onResponse(null); - return null; - }).doAnswer((i) -> { // For Index Metadata - actionListenerArgumentCaptor.getValue().onResponse(null); - return null; - }).doAnswer((i) -> { + doAnswer((i) -> { // For Manifest file perform No Op, so latch in code will timeout return null; }).when(container).asyncBlobUpload(any(WriteContext.class), actionListenerArgumentCaptor.capture()); remoteClusterStateService.start(); - try { - remoteClusterStateService.writeFullMetadata(clusterState, randomAlphaOfLength(10)); - } catch (Exception e) { - assertTrue(e instanceof RemoteStateTransferException); - assertTrue(e.getMessage().contains("Timed out waiting for transfer of following metadata to complete")); - } + RemoteClusterStateService spiedService = spy(remoteClusterStateService); + when( + spiedService.writeMetadataInParallel( + any(), + anyList(), + anyMap(), + anyMap(), + anyBoolean(), + anyBoolean(), + anyBoolean(), + anyBoolean(), + anyBoolean(), + anyBoolean(), + anyMap(), + anyBoolean(), + anyList() + ) + ).thenReturn(new RemoteClusterStateUtils.UploadedMetadataResults()); + RemoteStateTransferException ex = expectThrows( + RemoteStateTransferException.class, + () -> spiedService.writeFullMetadata(clusterState, randomAlphaOfLength(10)) + ); + assertTrue(ex.getMessage().contains("Timed out waiting for transfer of manifest file to complete")); } public void testWriteFullMetadataInParallelFailureForIndexMetadata() throws IOException { @@ -658,6 +736,991 @@ public void testWriteIncrementalMetadataSuccessWhenPublicationEnabled() throws I assertThat(manifest.getIndicesRouting().size(), is(1)); } + public void testTimeoutWhileWritingMetadata() throws IOException { + AsyncMultiStreamBlobContainer container = (AsyncMultiStreamBlobContainer) mockBlobStoreObjects(AsyncMultiStreamBlobContainer.class); + doNothing().when(container).asyncBlobUpload(any(), any()); + int writeTimeout = 2; + Settings newSettings = Settings.builder() + .put("cluster.remote_store.state.global_metadata.upload_timeout", writeTimeout + "s") + .build(); + clusterSettings.applySettings(newSettings); + remoteClusterStateService.start(); + RemoteStateTransferException exception = assertThrows( + RemoteStateTransferException.class, + () -> remoteClusterStateService.writeMetadataInParallel( + ClusterState.EMPTY_STATE, + emptyList(), + emptyMap(), + emptyMap(), + true, + true, + true, + true, + true, + true, + emptyMap(), + true, + emptyList() + ) + ); + assertTrue(exception.getMessage().startsWith("Timed out waiting for transfer of following metadata to complete")); + } + + public void testGetClusterStateForManifest_IncludeEphemeral() throws IOException { + ClusterMetadataManifest manifest = generateClusterMetadataManifestWithAllAttributes().build(); + mockBlobStoreObjects(); + remoteClusterStateService.start(); + RemoteReadResult mockedResult = mock(RemoteReadResult.class); + RemoteIndexMetadataManager mockedIndexManager = mock(RemoteIndexMetadataManager.class); + RemoteGlobalMetadataManager mockedGlobalMetadataManager = mock(RemoteGlobalMetadataManager.class); + RemoteClusterStateAttributesManager mockedClusterStateAttributeManager = mock(RemoteClusterStateAttributesManager.class); + remoteClusterStateService.setRemoteIndexMetadataManager(mockedIndexManager); + remoteClusterStateService.setRemoteGlobalMetadataManager(mockedGlobalMetadataManager); + remoteClusterStateService.setRemoteClusterStateAttributesManager(mockedClusterStateAttributeManager); + ArgumentCaptor> listenerArgumentCaptor = ArgumentCaptor.forClass( + LatchedActionListener.class + ); + when(mockedIndexManager.getAsyncIndexMetadataReadAction(any(), anyString(), listenerArgumentCaptor.capture())).thenReturn( + () -> listenerArgumentCaptor.getValue().onResponse(mockedResult) + ); + when(mockedGlobalMetadataManager.getAsyncMetadataReadAction(any(), anyString(), listenerArgumentCaptor.capture())).thenReturn( + () -> listenerArgumentCaptor.getValue().onResponse(mockedResult) + ); + when(mockedClusterStateAttributeManager.getAsyncMetadataReadAction(anyString(), any(), listenerArgumentCaptor.capture())) + .thenReturn(() -> listenerArgumentCaptor.getValue().onResponse(mockedResult)); + when(mockedResult.getComponent()).thenReturn(COORDINATION_METADATA); + RemoteClusterStateService mockService = spy(remoteClusterStateService); + mockService.getClusterStateForManifest(ClusterName.DEFAULT.value(), manifest, NODE_ID, true); + verify(mockService, times(1)).readClusterStateInParallel( + any(), + eq(manifest), + eq(manifest.getClusterUUID()), + eq(NODE_ID), + eq(manifest.getIndices()), + eq(manifest.getCustomMetadataMap()), + eq(true), + eq(true), + eq(true), + eq(true), + eq(true), + eq(true), + eq(manifest.getIndicesRouting()), + eq(true), + eq(manifest.getClusterStateCustomMap()), + eq(true) + ); + } + + public void testGetClusterStateForManifest_ExcludeEphemeral() throws IOException { + ClusterMetadataManifest manifest = generateClusterMetadataManifestWithAllAttributes().build(); + mockBlobStoreObjects(); + remoteClusterStateService.start(); + RemoteReadResult mockedResult = mock(RemoteReadResult.class); + RemoteIndexMetadataManager mockedIndexManager = mock(RemoteIndexMetadataManager.class); + RemoteGlobalMetadataManager mockedGlobalMetadataManager = mock(RemoteGlobalMetadataManager.class); + RemoteClusterStateAttributesManager mockedClusterStateAttributeManager = mock(RemoteClusterStateAttributesManager.class); + ArgumentCaptor> listenerArgumentCaptor = ArgumentCaptor.forClass( + LatchedActionListener.class + ); + when(mockedIndexManager.getAsyncIndexMetadataReadAction(any(), anyString(), listenerArgumentCaptor.capture())).thenReturn( + () -> listenerArgumentCaptor.getValue().onResponse(mockedResult) + ); + when(mockedGlobalMetadataManager.getAsyncMetadataReadAction(any(), anyString(), listenerArgumentCaptor.capture())).thenReturn( + () -> listenerArgumentCaptor.getValue().onResponse(mockedResult) + ); + when(mockedClusterStateAttributeManager.getAsyncMetadataReadAction(anyString(), any(), listenerArgumentCaptor.capture())) + .thenReturn(() -> listenerArgumentCaptor.getValue().onResponse(mockedResult)); + when(mockedResult.getComponent()).thenReturn(COORDINATION_METADATA); + remoteClusterStateService.setRemoteIndexMetadataManager(mockedIndexManager); + remoteClusterStateService.setRemoteGlobalMetadataManager(mockedGlobalMetadataManager); + remoteClusterStateService.setRemoteClusterStateAttributesManager(mockedClusterStateAttributeManager); + RemoteClusterStateService spiedService = spy(remoteClusterStateService); + spiedService.getClusterStateForManifest(ClusterName.DEFAULT.value(), manifest, NODE_ID, false); + verify(spiedService, times(1)).readClusterStateInParallel( + any(), + eq(manifest), + eq(manifest.getClusterUUID()), + eq(NODE_ID), + eq(manifest.getIndices()), + eq(manifest.getCustomMetadataMap()), + eq(true), + eq(true), + eq(false), + eq(true), + eq(false), + eq(false), + eq(emptyList()), + eq(false), + eq(emptyMap()), + eq(false) + ); + } + + public void testGetClusterStateFromManifest_CodecV1() throws IOException { + ClusterMetadataManifest manifest = generateClusterMetadataManifestWithAllAttributes().codecVersion(CODEC_V1).build(); + mockBlobStoreObjects(); + remoteClusterStateService.start(); + final Index index = new Index("test-index", "index-uuid"); + final Settings idxSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) + .build(); + final IndexMetadata indexMetadata = new IndexMetadata.Builder(index.getName()).settings(idxSettings) + .numberOfShards(1) + .numberOfReplicas(0) + .build(); + RemoteIndexMetadataManager mockedIndexManager = mock(RemoteIndexMetadataManager.class); + RemoteGlobalMetadataManager mockedGlobalMetadataManager = mock(RemoteGlobalMetadataManager.class); + remoteClusterStateService.setRemoteIndexMetadataManager(mockedIndexManager); + remoteClusterStateService.setRemoteGlobalMetadataManager(mockedGlobalMetadataManager); + ArgumentCaptor> listenerArgumentCaptor = ArgumentCaptor.forClass( + LatchedActionListener.class + ); + when(mockedIndexManager.getAsyncIndexMetadataReadAction(any(), anyString(), listenerArgumentCaptor.capture())).thenReturn( + () -> listenerArgumentCaptor.getValue().onResponse(new RemoteReadResult(indexMetadata, INDEX, INDEX)) + ); + when(mockedGlobalMetadataManager.getGlobalMetadata(anyString(), eq(manifest))).thenReturn(Metadata.EMPTY_METADATA); + RemoteClusterStateService spiedService = spy(remoteClusterStateService); + spiedService.getClusterStateForManifest(ClusterName.DEFAULT.value(), manifest, NODE_ID, true); + verify(spiedService, times(1)).readClusterStateInParallel( + any(), + eq(manifest), + eq(manifest.getClusterUUID()), + eq(NODE_ID), + eq(manifest.getIndices()), + eq(emptyMap()), + eq(false), + eq(false), + eq(false), + eq(false), + eq(false), + eq(false), + eq(emptyList()), + eq(false), + eq(emptyMap()), + eq(false) + ); + verify(mockedGlobalMetadataManager, times(1)).getGlobalMetadata(eq(manifest.getClusterUUID()), eq(manifest)); + } + + public void testGetClusterStateUsingDiffFailWhenDiffManifestAbsent() { + ClusterMetadataManifest manifest = ClusterMetadataManifest.builder().build(); + ClusterState previousState = ClusterState.EMPTY_STATE; + AssertionError error = assertThrows( + AssertionError.class, + () -> remoteClusterStateService.getClusterStateUsingDiff(manifest, previousState, "test-node") + ); + assertEquals("Diff manifest null which is required for downloading cluster state", error.getMessage()); + } + + public void testGetClusterStateUsingDiff_NoDiff() throws IOException { + ClusterStateDiffManifest diffManifest = ClusterStateDiffManifest.builder().build(); + ClusterState clusterState = generateClusterStateWithAllAttributes().build(); + ClusterMetadataManifest manifest = ClusterMetadataManifest.builder() + .diffManifest(diffManifest) + .stateUUID(clusterState.stateUUID()) + .stateVersion(clusterState.version()) + .metadataVersion(clusterState.metadata().version()) + .clusterUUID(clusterState.getMetadata().clusterUUID()) + .routingTableVersion(clusterState.routingTable().version()) + .build(); + ClusterState updatedClusterState = remoteClusterStateService.getClusterStateUsingDiff(manifest, clusterState, "test-node"); + assertEquals(clusterState.getClusterName(), updatedClusterState.getClusterName()); + assertEquals(clusterState.metadata().clusterUUID(), updatedClusterState.metadata().clusterUUID()); + assertEquals(clusterState.metadata().version(), updatedClusterState.metadata().version()); + assertEquals(clusterState.metadata().coordinationMetadata(), updatedClusterState.metadata().coordinationMetadata()); + assertEquals(clusterState.metadata().getIndices(), updatedClusterState.metadata().getIndices()); + assertEquals(clusterState.metadata().templates(), updatedClusterState.metadata().templates()); + assertEquals(clusterState.metadata().persistentSettings(), updatedClusterState.metadata().persistentSettings()); + assertEquals(clusterState.metadata().transientSettings(), updatedClusterState.metadata().transientSettings()); + assertEquals(clusterState.metadata().getCustoms(), updatedClusterState.metadata().getCustoms()); + assertEquals(clusterState.metadata().hashesOfConsistentSettings(), updatedClusterState.metadata().hashesOfConsistentSettings()); + assertEquals(clusterState.getCustoms(), updatedClusterState.getCustoms()); + assertEquals(clusterState.stateUUID(), updatedClusterState.stateUUID()); + assertEquals(clusterState.version(), updatedClusterState.version()); + assertEquals(clusterState.getRoutingTable().version(), updatedClusterState.getRoutingTable().version()); + assertEquals(clusterState.getRoutingTable().getIndicesRouting(), updatedClusterState.getRoutingTable().getIndicesRouting()); + assertEquals(clusterState.getNodes(), updatedClusterState.getNodes()); + assertEquals(clusterState.getBlocks(), updatedClusterState.getBlocks()); + } + + public void testGetClusterStateUsingDiff() throws IOException { + ClusterState clusterState = generateClusterStateWithAllAttributes().build(); + ClusterState.Builder expectedClusterStateBuilder = ClusterState.builder(clusterState); + Metadata.Builder mb = Metadata.builder(clusterState.metadata()); + ClusterStateDiffManifest.Builder diffManifestBuilder = ClusterStateDiffManifest.builder(); + ClusterMetadataManifest.Builder manifestBuilder = ClusterMetadataManifest.builder(); + BlobContainer blobContainer = mockBlobStoreObjects(); + if (randomBoolean()) { + // updated coordination metadata + CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder() + .term(clusterState.metadata().coordinationMetadata().term() + 1) + .build(); + mb.coordinationMetadata(coordinationMetadata); + diffManifestBuilder.coordinationMetadataUpdated(true); + manifestBuilder.coordinationMetadata(new UploadedMetadataAttribute(COORDINATION_METADATA, COORDINATION_METADATA_FILENAME)); + when(blobContainer.readBlob(COORDINATION_METADATA_FILENAME)).thenAnswer(i -> { + BytesReference bytes = COORDINATION_METADATA_FORMAT.serialize( + coordinationMetadata, + COORDINATION_METADATA_FILENAME, + compressor, + FORMAT_PARAMS + ); + return new ByteArrayInputStream(bytes.streamInput().readAllBytes()); + }); + } + if (randomBoolean()) { + // updated templates + TemplatesMetadata templatesMetadata = TemplatesMetadata.builder() + .put( + IndexTemplateMetadata.builder("template" + randomAlphaOfLength(3)) + .patterns(Arrays.asList("bar-*", "foo-*")) + .settings(Settings.builder().put("random_index_setting_" + randomAlphaOfLength(3), randomAlphaOfLength(5)).build()) + .build() + ) + .build(); + mb.templates(templatesMetadata); + diffManifestBuilder.templatesMetadataUpdated(true); + manifestBuilder.templatesMetadata(new UploadedMetadataAttribute(TEMPLATES_METADATA, TEMPLATES_METADATA_FILENAME)); + when(blobContainer.readBlob(TEMPLATES_METADATA_FILENAME)).thenAnswer(i -> { + BytesReference bytes = TEMPLATES_METADATA_FORMAT.serialize( + templatesMetadata, + TEMPLATES_METADATA_FILENAME, + compressor, + FORMAT_PARAMS + ); + return new ByteArrayInputStream(bytes.streamInput().readAllBytes()); + }); + } + if (randomBoolean()) { + // updated persistent settings + Settings persistentSettings = Settings.builder() + .put("random_persistent_setting_" + randomAlphaOfLength(3), randomAlphaOfLength(5)) + .build(); + mb.persistentSettings(persistentSettings); + diffManifestBuilder.settingsMetadataUpdated(true); + manifestBuilder.settingMetadata(new UploadedMetadataAttribute(SETTING_METADATA, PERSISTENT_SETTINGS_FILENAME)); + when(blobContainer.readBlob(PERSISTENT_SETTINGS_FILENAME)).thenAnswer(i -> { + BytesReference bytes = RemotePersistentSettingsMetadata.SETTINGS_METADATA_FORMAT.serialize( + persistentSettings, + PERSISTENT_SETTINGS_FILENAME, + compressor, + FORMAT_PARAMS + ); + return new ByteArrayInputStream(bytes.streamInput().readAllBytes()); + }); + } + if (randomBoolean()) { + // updated transient settings + Settings transientSettings = Settings.builder() + .put("random_transient_setting_" + randomAlphaOfLength(3), randomAlphaOfLength(5)) + .build(); + mb.transientSettings(transientSettings); + diffManifestBuilder.transientSettingsMetadataUpdate(true); + manifestBuilder.transientSettingsMetadata( + new UploadedMetadataAttribute(TRANSIENT_SETTING_METADATA, TRANSIENT_SETTINGS_FILENAME) + ); + when(blobContainer.readBlob(TRANSIENT_SETTINGS_FILENAME)).thenAnswer(i -> { + BytesReference bytes = RemoteTransientSettingsMetadata.SETTINGS_METADATA_FORMAT.serialize( + transientSettings, + TRANSIENT_SETTINGS_FILENAME, + compressor, + FORMAT_PARAMS + ); + return new ByteArrayInputStream(bytes.streamInput().readAllBytes()); + }); + } + if (randomBoolean()) { + // updated customs + CustomMetadata2 addedCustom = new CustomMetadata2(randomAlphaOfLength(10)); + mb.putCustom(addedCustom.getWriteableName(), addedCustom); + diffManifestBuilder.customMetadataUpdated(Collections.singletonList(addedCustom.getWriteableName())); + manifestBuilder.customMetadataMap( + Map.of(addedCustom.getWriteableName(), new UploadedMetadataAttribute(addedCustom.getWriteableName(), "custom-md2-file__1")) + ); + when(blobContainer.readBlob("custom-md2-file__1")).thenAnswer(i -> { + ChecksumWritableBlobStoreFormat customMetadataFormat = new ChecksumWritableBlobStoreFormat<>( + "custom", + is -> readFrom(is, namedWriteableRegistry, addedCustom.getWriteableName()) + ); + BytesReference bytes = customMetadataFormat.serialize(addedCustom, "custom-md2-file__1", compressor); + return new ByteArrayInputStream(bytes.streamInput().readAllBytes()); + }); + } + if (randomBoolean()) { + Set customsToRemove = clusterState.metadata().customs().keySet(); + customsToRemove.forEach(mb::removeCustom); + diffManifestBuilder.customMetadataDeleted(new ArrayList<>(customsToRemove)); + } + if (randomBoolean()) { + // updated hashes of consistent settings + DiffableStringMap hashesOfConsistentSettings = new DiffableStringMap(Map.of("secure_setting_key", "secure_setting_value")); + mb.hashesOfConsistentSettings(hashesOfConsistentSettings); + diffManifestBuilder.hashesOfConsistentSettingsUpdated(true); + manifestBuilder.hashesOfConsistentSettings( + new UploadedMetadataAttribute(HASHES_OF_CONSISTENT_SETTINGS, HASHES_OF_CONSISTENT_SETTINGS_FILENAME) + ); + when(blobContainer.readBlob(HASHES_OF_CONSISTENT_SETTINGS_FILENAME)).thenAnswer(i -> { + BytesReference bytes = HASHES_OF_CONSISTENT_SETTINGS_FORMAT.serialize( + hashesOfConsistentSettings, + HASHES_OF_CONSISTENT_SETTINGS_FILENAME, + compressor + ); + return new ByteArrayInputStream(bytes.streamInput().readAllBytes()); + }); + } + if (randomBoolean()) { + // updated index metadata + IndexMetadata indexMetadata = new IndexMetadata.Builder("add-test-index").settings( + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, "add-test-index-uuid") + .build() + ).numberOfShards(1).numberOfReplicas(0).build(); + mb.put(indexMetadata, true); + diffManifestBuilder.indicesUpdated(Collections.singletonList(indexMetadata.getIndex().getName())); + manifestBuilder.indices( + List.of( + new UploadedIndexMetadata(indexMetadata.getIndex().getName(), indexMetadata.getIndexUUID(), "add-test-index-file__2") + ) + ); + when(blobContainer.readBlob("add-test-index-file__2")).thenAnswer(i -> { + BytesReference bytes = INDEX_METADATA_FORMAT.serialize(indexMetadata, "add-test-index-file__2", compressor, FORMAT_PARAMS); + return new ByteArrayInputStream(bytes.streamInput().readAllBytes()); + }); + } + if (randomBoolean()) { + // remove index metadata + Set indicesToDelete = clusterState.metadata().getIndices().keySet(); + indicesToDelete.forEach(mb::remove); + diffManifestBuilder.indicesDeleted(new ArrayList<>(indicesToDelete)); + } + if (randomBoolean()) { + // update nodes + DiscoveryNode node = new DiscoveryNode("node_id", buildNewFakeTransportAddress(), Version.CURRENT); + DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(clusterState.nodes()).add(node); + expectedClusterStateBuilder.nodes(nodesBuilder.build()); + diffManifestBuilder.discoveryNodesUpdated(true); + manifestBuilder.discoveryNodesMetadata(new UploadedMetadataAttribute(DISCOVERY_NODES, DISCOVERY_NODES_FILENAME)); + when(blobContainer.readBlob(DISCOVERY_NODES_FILENAME)).thenAnswer(invocationOnMock -> { + BytesReference bytes = DISCOVERY_NODES_FORMAT.serialize(nodesBuilder.build(), DISCOVERY_NODES_FILENAME, compressor); + return new ByteArrayInputStream(bytes.streamInput().readAllBytes()); + }); + } + if (randomBoolean()) { + // update blocks + ClusterBlocks newClusterBlock = randomClusterBlocks(); + expectedClusterStateBuilder.blocks(newClusterBlock); + diffManifestBuilder.clusterBlocksUpdated(true); + manifestBuilder.clusterBlocksMetadata(new UploadedMetadataAttribute(CLUSTER_BLOCKS, CLUSTER_BLOCKS_FILENAME)); + when(blobContainer.readBlob(CLUSTER_BLOCKS_FILENAME)).thenAnswer(invocationOnMock -> { + BytesReference bytes = CLUSTER_BLOCKS_FORMAT.serialize(newClusterBlock, CLUSTER_BLOCKS_FILENAME, compressor); + return new ByteArrayInputStream(bytes.streamInput().readAllBytes()); + }); + + } + ClusterState expectedClusterState = expectedClusterStateBuilder.metadata(mb).build(); + ClusterStateDiffManifest diffManifest = diffManifestBuilder.build(); + manifestBuilder.diffManifest(diffManifest) + .stateUUID(clusterState.stateUUID()) + .stateVersion(clusterState.version()) + .metadataVersion(clusterState.metadata().version()) + .clusterUUID(clusterState.getMetadata().clusterUUID()) + .routingTableVersion(clusterState.getRoutingTable().version()); + + remoteClusterStateService.start(); + ClusterState updatedClusterState = remoteClusterStateService.getClusterStateUsingDiff( + manifestBuilder.build(), + clusterState, + NODE_ID + ); + + assertEquals(expectedClusterState.getClusterName(), updatedClusterState.getClusterName()); + assertEquals(expectedClusterState.stateUUID(), updatedClusterState.stateUUID()); + assertEquals(expectedClusterState.version(), updatedClusterState.version()); + assertEquals(expectedClusterState.metadata().clusterUUID(), updatedClusterState.metadata().clusterUUID()); + assertEquals(expectedClusterState.getRoutingTable().version(), updatedClusterState.getRoutingTable().version()); + assertNotEquals(diffManifest.isClusterBlocksUpdated(), updatedClusterState.getBlocks().equals(clusterState.getBlocks())); + assertNotEquals(diffManifest.isDiscoveryNodesUpdated(), updatedClusterState.getNodes().equals(clusterState.getNodes())); + assertNotEquals( + diffManifest.isCoordinationMetadataUpdated(), + updatedClusterState.getMetadata().coordinationMetadata().equals(clusterState.getMetadata().coordinationMetadata()) + ); + assertNotEquals( + diffManifest.isTemplatesMetadataUpdated(), + updatedClusterState.getMetadata().templates().equals(clusterState.getMetadata().getTemplates()) + ); + assertNotEquals( + diffManifest.isSettingsMetadataUpdated(), + updatedClusterState.getMetadata().persistentSettings().equals(clusterState.getMetadata().persistentSettings()) + ); + assertNotEquals( + diffManifest.isTransientSettingsMetadataUpdated(), + updatedClusterState.getMetadata().transientSettings().equals(clusterState.getMetadata().transientSettings()) + ); + diffManifest.getIndicesUpdated().forEach(indexName -> { + IndexMetadata updatedIndexMetadata = updatedClusterState.metadata().index(indexName); + IndexMetadata originalIndexMetadata = clusterState.metadata().index(indexName); + assertNotEquals(originalIndexMetadata, updatedIndexMetadata); + }); + diffManifest.getCustomMetadataUpdated().forEach(customMetadataName -> { + Metadata.Custom updatedCustomMetadata = updatedClusterState.metadata().custom(customMetadataName); + Metadata.Custom originalCustomMetadata = clusterState.metadata().custom(customMetadataName); + assertNotEquals(originalCustomMetadata, updatedCustomMetadata); + }); + diffManifest.getClusterStateCustomUpdated().forEach(clusterStateCustomName -> { + ClusterState.Custom updateClusterStateCustom = updatedClusterState.customs().get(clusterStateCustomName); + ClusterState.Custom originalClusterStateCustom = clusterState.customs().get(clusterStateCustomName); + assertNotEquals(originalClusterStateCustom, updateClusterStateCustom); + }); + diffManifest.getIndicesRoutingUpdated().forEach(indexName -> { + IndexRoutingTable updatedIndexRoutingTable = updatedClusterState.getRoutingTable().getIndicesRouting().get(indexName); + IndexRoutingTable originalIndexingRoutingTable = clusterState.getRoutingTable().getIndicesRouting().get(indexName); + assertNotEquals(originalIndexingRoutingTable, updatedIndexRoutingTable); + }); + diffManifest.getIndicesDeleted() + .forEach(indexName -> { assertFalse(updatedClusterState.metadata().getIndices().containsKey(indexName)); }); + diffManifest.getCustomMetadataDeleted().forEach(customMetadataName -> { + assertFalse(updatedClusterState.metadata().customs().containsKey(customMetadataName)); + }); + diffManifest.getClusterStateCustomDeleted().forEach(clusterStateCustomName -> { + assertFalse(updatedClusterState.customs().containsKey(clusterStateCustomName)); + }); + diffManifest.getIndicesRoutingDeleted().forEach(indexName -> { + assertFalse(updatedClusterState.getRoutingTable().getIndicesRouting().containsKey(indexName)); + }); + } + + public void testReadClusterStateInParallel_TimedOut() throws IOException { + ClusterState previousClusterState = generateClusterStateWithAllAttributes().build(); + ClusterMetadataManifest manifest = generateClusterMetadataManifestWithAllAttributes().build(); + BlobContainer container = mockBlobStoreObjects(); + int readTimeOut = 2; + Settings newSettings = Settings.builder().put("cluster.remote_store.state.read_timeout", readTimeOut + "s").build(); + clusterSettings.applySettings(newSettings); + when(container.readBlob(anyString())).thenAnswer(invocationOnMock -> { + Thread.sleep(readTimeOut * 1000 + 100); + return null; + }); + remoteClusterStateService.start(); + RemoteStateTransferException exception = expectThrows( + RemoteStateTransferException.class, + () -> remoteClusterStateService.readClusterStateInParallel( + previousClusterState, + manifest, + manifest.getClusterUUID(), + NODE_ID, + emptyList(), + emptyMap(), + true, + true, + true, + true, + true, + true, + emptyList(), + true, + emptyMap(), + true + ) + ); + assertEquals("Timed out waiting to read cluster state from remote within timeout " + readTimeOut + "s", exception.getMessage()); + } + + public void testReadClusterStateInParallel_ExceptionDuringRead() throws IOException { + ClusterState previousClusterState = generateClusterStateWithAllAttributes().build(); + ClusterMetadataManifest manifest = generateClusterMetadataManifestWithAllAttributes().build(); + BlobContainer container = mockBlobStoreObjects(); + Exception mockException = new IOException("mock exception"); + when(container.readBlob(anyString())).thenThrow(mockException); + remoteClusterStateService.start(); + RemoteStateTransferException exception = expectThrows( + RemoteStateTransferException.class, + () -> remoteClusterStateService.readClusterStateInParallel( + previousClusterState, + manifest, + manifest.getClusterUUID(), + NODE_ID, + emptyList(), + emptyMap(), + true, + true, + true, + true, + true, + true, + emptyList(), + true, + emptyMap(), + true + ) + ); + assertEquals("Exception during reading cluster state from remote", exception.getMessage()); + assertTrue(exception.getSuppressed().length > 0); + assertEquals(mockException, exception.getSuppressed()[0]); + } + + public void testReadClusterStateInParallel_UnexpectedResult() throws IOException { + ClusterState previousClusterState = generateClusterStateWithAllAttributes().build(); + // index already present in previous state + List uploadedIndexMetadataList = new ArrayList<>( + List.of(new UploadedIndexMetadata("test-index", "test-index-uuid", "test-index-file__2")) + ); + // new index to be added + List newIndicesToRead = List.of( + new UploadedIndexMetadata("test-index-1", "test-index-1-uuid", "test-index-1-file__2") + ); + uploadedIndexMetadataList.addAll(newIndicesToRead); + // existing custom metadata + Map uploadedCustomMetadataMap = new HashMap<>( + Map.of( + "custom_md_1", + new UploadedMetadataAttribute("custom_md_1", "test-custom1-file__1"), + "custom_md_2", + new UploadedMetadataAttribute("custom_md_2", "test-custom2-file__1") + ) + ); + // new custom metadata to be added + Map newCustomMetadataMap = Map.of( + "custom_md_3", + new UploadedMetadataAttribute("custom_md_3", "test-custom3-file__1") + ); + uploadedCustomMetadataMap.putAll(newCustomMetadataMap); + // already existing cluster state customs + Map uploadedClusterStateCustomMap = new HashMap<>( + Map.of( + "custom_1", + new UploadedMetadataAttribute("custom_1", "test-cluster-state-custom1-file__1"), + "custom_2", + new UploadedMetadataAttribute("custom_2", "test-cluster-state-custom2-file__1") + ) + ); + // new customs uploaded + Map newClusterStateCustoms = Map.of( + "custom_3", + new UploadedMetadataAttribute("custom_3", "test-cluster-state-custom3-file__1") + ); + uploadedClusterStateCustomMap.putAll(newClusterStateCustoms); + ClusterMetadataManifest manifest = ClusterMetadataManifest.builder() + .clusterUUID(previousClusterState.getMetadata().clusterUUID()) + .indices(uploadedIndexMetadataList) + .coordinationMetadata(new UploadedMetadataAttribute(COORDINATION_METADATA, COORDINATION_METADATA_FILENAME)) + .settingMetadata(new UploadedMetadataAttribute(SETTING_METADATA, PERSISTENT_SETTINGS_FILENAME)) + .transientSettingsMetadata(new UploadedMetadataAttribute(TRANSIENT_SETTING_METADATA, TRANSIENT_SETTINGS_FILENAME)) + .templatesMetadata(new UploadedMetadataAttribute(TEMPLATES_METADATA, TEMPLATES_METADATA_FILENAME)) + .hashesOfConsistentSettings( + new UploadedMetadataAttribute(HASHES_OF_CONSISTENT_SETTINGS, HASHES_OF_CONSISTENT_SETTINGS_FILENAME) + ) + .customMetadataMap(uploadedCustomMetadataMap) + .discoveryNodesMetadata(new UploadedMetadataAttribute(DISCOVERY_NODES, DISCOVERY_NODES_FILENAME)) + .clusterBlocksMetadata(new UploadedMetadataAttribute(CLUSTER_BLOCKS, CLUSTER_BLOCKS_FILENAME)) + .clusterStateCustomMetadataMap(uploadedClusterStateCustomMap) + .build(); + + RemoteReadResult mockResult = mock(RemoteReadResult.class); + RemoteIndexMetadataManager mockIndexMetadataManager = mock(RemoteIndexMetadataManager.class); + CheckedRunnable mockRunnable = mock(CheckedRunnable.class); + ArgumentCaptor> latchCapture = ArgumentCaptor.forClass(LatchedActionListener.class); + when(mockIndexMetadataManager.getAsyncIndexMetadataReadAction(anyString(), anyString(), latchCapture.capture())).thenReturn( + mockRunnable + ); + RemoteGlobalMetadataManager mockGlobalMetadataManager = mock(RemoteGlobalMetadataManager.class); + when(mockGlobalMetadataManager.getAsyncMetadataReadAction(any(), anyString(), latchCapture.capture())).thenReturn(mockRunnable); + RemoteClusterStateAttributesManager mockClusterStateAttributeManager = mock(RemoteClusterStateAttributesManager.class); + when(mockClusterStateAttributeManager.getAsyncMetadataReadAction(anyString(), any(), latchCapture.capture())).thenReturn( + mockRunnable + ); + doAnswer(invocationOnMock -> { + latchCapture.getValue().onResponse(mockResult); + return null; + }).when(mockRunnable).run(); + when(mockResult.getComponent()).thenReturn("mock-result"); + remoteClusterStateService.start(); + remoteClusterStateService.setRemoteIndexMetadataManager(mockIndexMetadataManager); + remoteClusterStateService.setRemoteGlobalMetadataManager(mockGlobalMetadataManager); + remoteClusterStateService.setRemoteClusterStateAttributesManager(mockClusterStateAttributeManager); + IllegalStateException exception = expectThrows( + IllegalStateException.class, + () -> remoteClusterStateService.readClusterStateInParallel( + previousClusterState, + manifest, + manifest.getClusterUUID(), + NODE_ID, + newIndicesToRead, + newCustomMetadataMap, + true, + true, + true, + true, + true, + true, + emptyList(), + true, + newClusterStateCustoms, + true + ) + ); + assertEquals("Unknown component: mock-result", exception.getMessage()); + newIndicesToRead.forEach( + uploadedIndexMetadata -> verify(mockIndexMetadataManager, times(1)).getAsyncIndexMetadataReadAction( + eq(previousClusterState.getMetadata().clusterUUID()), + eq(uploadedIndexMetadata.getUploadedFilename()), + any() + ) + ); + verify(mockGlobalMetadataManager, times(1)).getAsyncMetadataReadAction( + argThat(new BlobNameMatcher(COORDINATION_METADATA_FILENAME)), + eq(COORDINATION_METADATA), + any() + ); + verify(mockGlobalMetadataManager, times(1)).getAsyncMetadataReadAction( + argThat(new BlobNameMatcher(PERSISTENT_SETTINGS_FILENAME)), + eq(SETTING_METADATA), + any() + ); + verify(mockGlobalMetadataManager, times(1)).getAsyncMetadataReadAction( + argThat(new BlobNameMatcher(TRANSIENT_SETTINGS_FILENAME)), + eq(TRANSIENT_SETTING_METADATA), + any() + ); + verify(mockGlobalMetadataManager, times(1)).getAsyncMetadataReadAction( + argThat(new BlobNameMatcher(TEMPLATES_METADATA_FILENAME)), + eq(TEMPLATES_METADATA), + any() + ); + verify(mockGlobalMetadataManager, times(1)).getAsyncMetadataReadAction( + argThat(new BlobNameMatcher(HASHES_OF_CONSISTENT_SETTINGS_FILENAME)), + eq(HASHES_OF_CONSISTENT_SETTINGS), + any() + ); + newCustomMetadataMap.keySet().forEach(uploadedCustomMetadataKey -> { + verify(mockGlobalMetadataManager, times(1)).getAsyncMetadataReadAction( + argThat(new BlobNameMatcher(newCustomMetadataMap.get(uploadedCustomMetadataKey).getUploadedFilename())), + eq(uploadedCustomMetadataKey), + any() + ); + }); + verify(mockClusterStateAttributeManager, times(1)).getAsyncMetadataReadAction( + eq(DISCOVERY_NODES), + argThat(new BlobNameMatcher(DISCOVERY_NODES_FILENAME)), + any() + ); + verify(mockClusterStateAttributeManager, times(1)).getAsyncMetadataReadAction( + eq(CLUSTER_BLOCKS), + argThat(new BlobNameMatcher(CLUSTER_BLOCKS_FILENAME)), + any() + ); + newClusterStateCustoms.keySet().forEach(uploadedClusterStateCustomMetadataKey -> { + verify(mockClusterStateAttributeManager, times(1)).getAsyncMetadataReadAction( + eq(String.join(CUSTOM_DELIMITER, CLUSTER_STATE_CUSTOM, uploadedClusterStateCustomMetadataKey)), + argThat(new BlobNameMatcher(newClusterStateCustoms.get(uploadedClusterStateCustomMetadataKey).getUploadedFilename())), + any() + ); + }); + } + + public void testReadClusterStateInParallel_Success() throws IOException { + ClusterState previousClusterState = generateClusterStateWithAllAttributes().build(); + String indexFilename = "test-index-1-file__2"; + String customMetadataFilename = "test-custom3-file__1"; + String clusterStateCustomFilename = "test-cluster-state-custom3-file__1"; + // index already present in previous state + List uploadedIndexMetadataList = new ArrayList<>( + List.of(new UploadedIndexMetadata("test-index", "test-index-uuid", "test-index-file__2")) + ); + // new index to be added + List newIndicesToRead = List.of( + new UploadedIndexMetadata("test-index-1", "test-index-1-uuid", indexFilename) + ); + uploadedIndexMetadataList.addAll(newIndicesToRead); + // existing custom metadata + Map uploadedCustomMetadataMap = new HashMap<>( + Map.of( + "custom_md_1", + new UploadedMetadataAttribute("custom_md_1", "test-custom1-file__1"), + "custom_md_2", + new UploadedMetadataAttribute("custom_md_2", "test-custom2-file__1") + ) + ); + // new custom metadata to be added + Map newCustomMetadataMap = Map.of( + "custom_md_3", + new UploadedMetadataAttribute("custom_md_3", customMetadataFilename) + ); + uploadedCustomMetadataMap.putAll(newCustomMetadataMap); + // already existing cluster state customs + Map uploadedClusterStateCustomMap = new HashMap<>( + Map.of( + "custom_1", + new UploadedMetadataAttribute("custom_1", "test-cluster-state-custom1-file__1"), + "custom_2", + new UploadedMetadataAttribute("custom_2", "test-cluster-state-custom2-file__1") + ) + ); + // new customs uploaded + Map newClusterStateCustoms = Map.of( + "custom_3", + new UploadedMetadataAttribute("custom_3", clusterStateCustomFilename) + ); + uploadedClusterStateCustomMap.putAll(newClusterStateCustoms); + + ClusterMetadataManifest manifest = generateClusterMetadataManifestWithAllAttributes().indices(uploadedIndexMetadataList) + .customMetadataMap(uploadedCustomMetadataMap) + .clusterStateCustomMetadataMap(uploadedClusterStateCustomMap) + .build(); + + IndexMetadata newIndexMetadata = new IndexMetadata.Builder("test-index-1").state(IndexMetadata.State.OPEN) + .settings(Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).build()) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + CustomMetadata3 customMetadata3 = new CustomMetadata3("custom_md_3"); + CoordinationMetadata updatedCoordinationMetadata = CoordinationMetadata.builder() + .term(previousClusterState.metadata().coordinationMetadata().term() + 1) + .build(); + Settings updatedPersistentSettings = Settings.builder() + .put("random_persistent_setting_" + randomAlphaOfLength(3), randomAlphaOfLength(5)) + .build(); + Settings updatedTransientSettings = Settings.builder() + .put("random_transient_setting_" + randomAlphaOfLength(3), randomAlphaOfLength(5)) + .build(); + TemplatesMetadata updatedTemplateMetadata = getTemplatesMetadata(); + DiffableStringMap updatedHashesOfConsistentSettings = getHashesOfConsistentSettings(); + DiscoveryNodes updatedDiscoveryNodes = getDiscoveryNodes(); + ClusterBlocks updatedClusterBlocks = randomClusterBlocks(); + TestClusterStateCustom3 updatedClusterStateCustom3 = new TestClusterStateCustom3("custom_3"); + + RemoteIndexMetadataManager mockedIndexManager = mock(RemoteIndexMetadataManager.class); + RemoteGlobalMetadataManager mockedGlobalMetadataManager = mock(RemoteGlobalMetadataManager.class); + RemoteClusterStateAttributesManager mockedClusterStateAttributeManager = mock(RemoteClusterStateAttributesManager.class); + + when( + mockedIndexManager.getAsyncIndexMetadataReadAction( + eq(manifest.getClusterUUID()), + eq(indexFilename), + any(LatchedActionListener.class) + ) + ).thenAnswer(invocationOnMock -> { + LatchedActionListener latchedActionListener = invocationOnMock.getArgument(2, LatchedActionListener.class); + return (CheckedRunnable) () -> latchedActionListener.onResponse( + new RemoteReadResult(newIndexMetadata, INDEX, "test-index-1") + ); + }); + when( + mockedGlobalMetadataManager.getAsyncMetadataReadAction( + argThat(new BlobNameMatcher(customMetadataFilename)), + eq("custom_md_3"), + any() + ) + ).thenAnswer(invocationOnMock -> { + LatchedActionListener latchedActionListener = invocationOnMock.getArgument(2, LatchedActionListener.class); + return (CheckedRunnable) () -> latchedActionListener.onResponse( + new RemoteReadResult(customMetadata3, CUSTOM_METADATA, "custom_md_3") + ); + }); + when( + mockedGlobalMetadataManager.getAsyncMetadataReadAction( + argThat(new BlobNameMatcher(COORDINATION_METADATA_FILENAME)), + eq(COORDINATION_METADATA), + any() + ) + ).thenAnswer(invocationOnMock -> { + LatchedActionListener latchedActionListener = invocationOnMock.getArgument(2, LatchedActionListener.class); + return (CheckedRunnable) () -> latchedActionListener.onResponse( + new RemoteReadResult(updatedCoordinationMetadata, COORDINATION_METADATA, COORDINATION_METADATA) + ); + }); + when( + mockedGlobalMetadataManager.getAsyncMetadataReadAction( + argThat(new BlobNameMatcher(PERSISTENT_SETTINGS_FILENAME)), + eq(SETTING_METADATA), + any() + ) + ).thenAnswer(invocationOnMock -> { + LatchedActionListener latchedActionListener = invocationOnMock.getArgument(2, LatchedActionListener.class); + + return (CheckedRunnable) () -> latchedActionListener.onResponse( + new RemoteReadResult(updatedPersistentSettings, SETTING_METADATA, SETTING_METADATA) + ); + }); + when( + mockedGlobalMetadataManager.getAsyncMetadataReadAction( + argThat(new BlobNameMatcher(TRANSIENT_SETTINGS_FILENAME)), + eq(TRANSIENT_SETTING_METADATA), + any() + ) + ).thenAnswer(invocationOnMock -> { + LatchedActionListener latchedActionListener = invocationOnMock.getArgument(2, LatchedActionListener.class); + return (CheckedRunnable) () -> latchedActionListener.onResponse( + new RemoteReadResult(updatedTransientSettings, TRANSIENT_SETTING_METADATA, TRANSIENT_SETTING_METADATA) + ); + }); + when( + mockedGlobalMetadataManager.getAsyncMetadataReadAction( + argThat(new BlobNameMatcher(TEMPLATES_METADATA_FILENAME)), + eq(TEMPLATES_METADATA), + any() + ) + ).thenAnswer(invocationOnMock -> { + LatchedActionListener latchedActionListener = invocationOnMock.getArgument(2, LatchedActionListener.class); + return (CheckedRunnable) () -> latchedActionListener.onResponse( + new RemoteReadResult(updatedTemplateMetadata, TEMPLATES_METADATA, TEMPLATES_METADATA) + ); + }); + when( + mockedGlobalMetadataManager.getAsyncMetadataReadAction( + argThat(new BlobNameMatcher(HASHES_OF_CONSISTENT_SETTINGS_FILENAME)), + eq(HASHES_OF_CONSISTENT_SETTINGS), + any() + ) + ).thenAnswer(invocationOnMock -> { + LatchedActionListener latchedActionListener = invocationOnMock.getArgument(2, LatchedActionListener.class); + return (CheckedRunnable) () -> latchedActionListener.onResponse( + new RemoteReadResult(updatedHashesOfConsistentSettings, HASHES_OF_CONSISTENT_SETTINGS, HASHES_OF_CONSISTENT_SETTINGS) + ); + }); + when( + mockedClusterStateAttributeManager.getAsyncMetadataReadAction( + eq(DISCOVERY_NODES), + argThat(new BlobNameMatcher(DISCOVERY_NODES_FILENAME)), + any() + ) + ).thenAnswer(invocationOnMock -> { + LatchedActionListener latchedActionListener = invocationOnMock.getArgument(2, LatchedActionListener.class); + return (CheckedRunnable) () -> latchedActionListener.onResponse( + new RemoteReadResult(updatedDiscoveryNodes, CLUSTER_STATE_ATTRIBUTE, DISCOVERY_NODES) + ); + }); + when( + mockedClusterStateAttributeManager.getAsyncMetadataReadAction( + eq(CLUSTER_BLOCKS), + argThat(new BlobNameMatcher(CLUSTER_BLOCKS_FILENAME)), + any() + ) + ).thenAnswer(invocationOnMock -> { + LatchedActionListener latchedActionListener = invocationOnMock.getArgument(2, LatchedActionListener.class); + return (CheckedRunnable) () -> latchedActionListener.onResponse( + new RemoteReadResult(updatedClusterBlocks, CLUSTER_STATE_ATTRIBUTE, CLUSTER_BLOCKS) + ); + }); + when( + mockedClusterStateAttributeManager.getAsyncMetadataReadAction( + eq(String.join(CUSTOM_DELIMITER, CLUSTER_STATE_CUSTOM, updatedClusterStateCustom3.getWriteableName())), + argThat(new BlobNameMatcher(clusterStateCustomFilename)), + any() + ) + ).thenAnswer(invocationOnMock -> { + LatchedActionListener latchedActionListener = invocationOnMock.getArgument(2, LatchedActionListener.class); + return (CheckedRunnable) () -> latchedActionListener.onResponse( + new RemoteReadResult( + updatedClusterStateCustom3, + CLUSTER_STATE_ATTRIBUTE, + String.join(CUSTOM_DELIMITER, CLUSTER_STATE_CUSTOM, updatedClusterStateCustom3.getWriteableName()) + ) + ); + }); + + remoteClusterStateService.start(); + remoteClusterStateService.setRemoteIndexMetadataManager(mockedIndexManager); + remoteClusterStateService.setRemoteGlobalMetadataManager(mockedGlobalMetadataManager); + remoteClusterStateService.setRemoteClusterStateAttributesManager(mockedClusterStateAttributeManager); + + ClusterState updatedClusterState = remoteClusterStateService.readClusterStateInParallel( + previousClusterState, + manifest, + manifest.getClusterUUID(), + NODE_ID, + newIndicesToRead, + newCustomMetadataMap, + true, + true, + true, + true, + true, + true, + emptyList(), + true, + newClusterStateCustoms, + true + ); + + assertEquals(uploadedIndexMetadataList.size(), updatedClusterState.metadata().indices().size()); + assertTrue(updatedClusterState.metadata().indices().containsKey("test-index-1")); + assertEquals(newIndexMetadata, updatedClusterState.metadata().index(newIndexMetadata.getIndex())); + uploadedCustomMetadataMap.keySet().forEach(key -> assertTrue(updatedClusterState.metadata().customs().containsKey(key))); + assertEquals(customMetadata3, updatedClusterState.metadata().custom(customMetadata3.getWriteableName())); + assertEquals( + previousClusterState.metadata().coordinationMetadata().term() + 1, + updatedClusterState.metadata().coordinationMetadata().term() + ); + assertEquals(updatedPersistentSettings, updatedClusterState.metadata().persistentSettings()); + assertEquals(updatedTransientSettings, updatedClusterState.metadata().transientSettings()); + assertEquals(updatedTemplateMetadata.getTemplates(), updatedClusterState.metadata().templates()); + assertEquals(updatedHashesOfConsistentSettings, updatedClusterState.metadata().hashesOfConsistentSettings()); + assertEquals(updatedDiscoveryNodes.getSize(), updatedClusterState.getNodes().getSize()); + updatedDiscoveryNodes.getNodes().forEach((nodeId, node) -> assertEquals(updatedClusterState.getNodes().get(nodeId), node)); + assertEquals(updatedDiscoveryNodes.getClusterManagerNodeId(), updatedClusterState.getNodes().getClusterManagerNodeId()); + assertEquals(updatedClusterBlocks, updatedClusterState.blocks()); + uploadedClusterStateCustomMap.keySet().forEach(key -> assertTrue(updatedClusterState.customs().containsKey(key))); + assertEquals(updatedClusterStateCustom3, updatedClusterState.custom("custom_3")); + newIndicesToRead.forEach( + uploadedIndexMetadata -> verify(mockedIndexManager, times(1)).getAsyncIndexMetadataReadAction( + eq(previousClusterState.getMetadata().clusterUUID()), + eq(uploadedIndexMetadata.getUploadedFilename()), + any() + ) + ); + verify(mockedGlobalMetadataManager, times(1)).getAsyncMetadataReadAction( + argThat(new BlobNameMatcher(COORDINATION_METADATA_FILENAME)), + eq(COORDINATION_METADATA), + any() + ); + verify(mockedGlobalMetadataManager, times(1)).getAsyncMetadataReadAction( + argThat(new BlobNameMatcher(PERSISTENT_SETTINGS_FILENAME)), + eq(SETTING_METADATA), + any() + ); + verify(mockedGlobalMetadataManager, times(1)).getAsyncMetadataReadAction( + argThat(new BlobNameMatcher(TRANSIENT_SETTINGS_FILENAME)), + eq(TRANSIENT_SETTING_METADATA), + any() + ); + verify(mockedGlobalMetadataManager, times(1)).getAsyncMetadataReadAction( + argThat(new BlobNameMatcher(TEMPLATES_METADATA_FILENAME)), + eq(TEMPLATES_METADATA), + any() + ); + verify(mockedGlobalMetadataManager, times(1)).getAsyncMetadataReadAction( + argThat(new BlobNameMatcher(HASHES_OF_CONSISTENT_SETTINGS_FILENAME)), + eq(HASHES_OF_CONSISTENT_SETTINGS), + any() + ); + newCustomMetadataMap.keySet().forEach(uploadedCustomMetadataKey -> { + verify(mockedGlobalMetadataManager, times(1)).getAsyncMetadataReadAction( + argThat(new BlobNameMatcher(newCustomMetadataMap.get(uploadedCustomMetadataKey).getUploadedFilename())), + eq(uploadedCustomMetadataKey), + any() + ); + }); + verify(mockedClusterStateAttributeManager, times(1)).getAsyncMetadataReadAction( + eq(DISCOVERY_NODES), + argThat(new BlobNameMatcher(DISCOVERY_NODES_FILENAME)), + any() + ); + verify(mockedClusterStateAttributeManager, times(1)).getAsyncMetadataReadAction( + eq(CLUSTER_BLOCKS), + argThat(new BlobNameMatcher(CLUSTER_BLOCKS_FILENAME)), + any() + ); + newClusterStateCustoms.keySet().forEach(uploadedClusterStateCustomMetadataKey -> { + verify(mockedClusterStateAttributeManager, times(1)).getAsyncMetadataReadAction( + eq(String.join(CUSTOM_DELIMITER, CLUSTER_STATE_CUSTOM, uploadedClusterStateCustomMetadataKey)), + argThat(new BlobNameMatcher(newClusterStateCustoms.get(uploadedClusterStateCustomMetadataKey).getUploadedFilename())), + any() + ); + }); + } + /* * Here we will verify the migration of manifest file from codec V0. * @@ -1857,7 +2920,7 @@ private void mockObjectsForGettingPreviousClusterUUID( BlobContainer[] mockBlobContainerOrderedArray = new BlobContainer[mockBlobContainerOrderedList.size()]; mockBlobContainerOrderedList.toArray(mockBlobContainerOrderedArray); when(blobStore.blobContainer(ArgumentMatchers.any())).thenReturn(uuidBlobContainer, mockBlobContainerOrderedArray); - when(blobStoreRepository.getCompressor()).thenReturn(new DeflateCompressor()); + when(blobStoreRepository.getCompressor()).thenReturn(compressor); } private ClusterMetadataManifest generateV1ClusterMetadataManifest( @@ -1986,7 +3049,7 @@ private void mockBlobContainer( } String fileName = uploadedIndexMetadata.getUploadedFilename(); when(blobContainer.readBlob(getFormattedIndexFileName(fileName))).thenAnswer((invocationOnMock) -> { - BytesReference bytesIndexMetadata = RemoteIndexMetadata.INDEX_METADATA_FORMAT.serialize( + BytesReference bytesIndexMetadata = INDEX_METADATA_FORMAT.serialize( indexMetadata, fileName, blobStoreRepository.getCompressor(), @@ -2057,12 +3120,6 @@ private void mockBlobContainerForGlobalMetadata( .stream() .collect(Collectors.toMap(Map.Entry::getKey, entry -> getFileNameFromPath(entry.getValue().getUploadedFilename()))); - // ChecksumBlobStoreFormat customMetadataFormat = new ChecksumBlobStoreFormat<>( - // "custom", - // METADATA_NAME_PLAIN_FORMAT, - // null - // ); - ChecksumWritableBlobStoreFormat customMetadataFormat = new ChecksumWritableBlobStoreFormat<>("custom", null); for (Map.Entry entry : customFileMap.entrySet()) { String custom = entry.getKey(); @@ -2147,32 +3204,105 @@ static ClusterState.Builder generateClusterStateWithOneIndex() { .routingTable(RoutingTable.builder().addAsNew(indexMetadata).version(1L).build()); } + static ClusterState.Builder generateClusterStateWithAllAttributes() { + final Index index = new Index("test-index", "index-uuid"); + final Settings idxSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) + .build(); + final IndexMetadata indexMetadata = new IndexMetadata.Builder(index.getName()).settings(idxSettings) + .numberOfShards(1) + .numberOfReplicas(0) + .build(); + final CoordinationMetadata coordinationMetadata = CoordinationMetadata.builder().term(1L).build(); + final Settings settings = Settings.builder().put("mock-settings", true).build(); + final Settings transientSettings = Settings.builder().put("mock-transient-settings", true).build(); + final DiffableStringMap hashesOfConsistentSettings = new DiffableStringMap(emptyMap()); + final TemplatesMetadata templatesMetadata = TemplatesMetadata.builder() + .put(IndexTemplateMetadata.builder("template-1").patterns(List.of("test-index* ")).build()) + .build(); + final CustomMetadata1 customMetadata1 = new CustomMetadata1("custom-metadata-1"); + final CustomMetadata2 customMetadata2 = new CustomMetadata2("custom-metadata-2"); + final DiscoveryNodes nodes = nodesWithLocalNodeClusterManager(); + final ClusterBlocks clusterBlocks = randomClusterBlocks(); + final TestClusterStateCustom1 custom1 = new RemoteClusterStateTestUtils.TestClusterStateCustom1("custom-1"); + final TestClusterStateCustom2 custom2 = new RemoteClusterStateTestUtils.TestClusterStateCustom2("custom-2"); + return ClusterState.builder(ClusterName.DEFAULT) + .version(1L) + .stateUUID("state-uuid") + .metadata( + Metadata.builder() + .version(randomNonNegativeLong()) + .put(indexMetadata, true) + .clusterUUID("cluster-uuid") + .coordinationMetadata(coordinationMetadata) + .persistentSettings(settings) + .transientSettings(transientSettings) + .hashesOfConsistentSettings(hashesOfConsistentSettings) + .templates(templatesMetadata) + .putCustom(customMetadata1.getWriteableName(), customMetadata1) + .putCustom(customMetadata2.getWriteableName(), customMetadata2) + .build() + ) + .routingTable(RoutingTable.builder().addAsNew(indexMetadata).version(1L).build()) + .nodes(nodes) + .blocks(clusterBlocks) + .putCustom(custom1.getWriteableName(), custom1) + .putCustom(custom2.getWriteableName(), custom2); + } + + static ClusterMetadataManifest.Builder generateClusterMetadataManifestWithAllAttributes() { + return ClusterMetadataManifest.builder() + .codecVersion(CODEC_V2) + .clusterUUID("cluster-uuid") + .indices(List.of(new UploadedIndexMetadata("test-index", "test-index-uuid", "test-index-file__2"))) + .customMetadataMap( + Map.of( + "custom_md_1", + new UploadedMetadataAttribute("custom_md_1", "test-custom1-file__1"), + "custom_md_2", + new UploadedMetadataAttribute("custom_md_2", "test-custom2-file__1") + ) + ) + .coordinationMetadata(new UploadedMetadataAttribute(COORDINATION_METADATA, COORDINATION_METADATA_FILENAME)) + .settingMetadata(new UploadedMetadataAttribute(SETTING_METADATA, PERSISTENT_SETTINGS_FILENAME)) + .transientSettingsMetadata(new UploadedMetadataAttribute(TRANSIENT_SETTING_METADATA, TRANSIENT_SETTINGS_FILENAME)) + .templatesMetadata(new UploadedMetadataAttribute(TEMPLATES_METADATA, TEMPLATES_METADATA_FILENAME)) + .hashesOfConsistentSettings( + new UploadedMetadataAttribute(HASHES_OF_CONSISTENT_SETTINGS, HASHES_OF_CONSISTENT_SETTINGS_FILENAME) + ) + .discoveryNodesMetadata(new UploadedMetadataAttribute(DISCOVERY_NODES, DISCOVERY_NODES_FILENAME)) + .clusterBlocksMetadata(new UploadedMetadataAttribute(CLUSTER_BLOCKS, CLUSTER_BLOCKS_FILENAME)) + .clusterStateCustomMetadataMap( + Map.of( + "custom_1", + new UploadedMetadataAttribute("custom_1", "test-cluster-state-custom1-file__1"), + "custom_2", + new UploadedMetadataAttribute("custom_2", "test-cluster-state-custom2-file__1") + ) + ); + } + static DiscoveryNodes nodesWithLocalNodeClusterManager() { final DiscoveryNode localNode = new DiscoveryNode("cluster-manager-id", buildNewFakeTransportAddress(), Version.CURRENT); return DiscoveryNodes.builder().clusterManagerNodeId("cluster-manager-id").localNodeId("cluster-manager-id").add(localNode).build(); } - private static class CustomMetadata1 extends TestCustomMetadata { - public static final String TYPE = "custom_md_1"; + private class BlobNameMatcher implements ArgumentMatcher { + private final String expectedBlobName; - CustomMetadata1(String data) { - super(data); + BlobNameMatcher(String expectedBlobName) { + this.expectedBlobName = expectedBlobName; } @Override - public String getWriteableName() { - return TYPE; + public boolean matches(AbstractRemoteWritableBlobEntity argument) { + return argument != null && expectedBlobName.equals(argument.getFullBlobName()); } @Override - public Version getMinimalSupportedVersion() { - return Version.CURRENT; - } - - @Override - public EnumSet context() { - return EnumSet.of(Metadata.XContentContext.GATEWAY); + public String toString() { + return "BlobNameMatcher[Expected blobName: " + expectedBlobName + "]"; } } - } diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateTestUtils.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateTestUtils.java new file mode 100644 index 0000000000000..b17ffcbaac344 --- /dev/null +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteClusterStateTestUtils.java @@ -0,0 +1,227 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.gateway.remote; + +import org.opensearch.Version; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.test.TestClusterStateCustom; +import org.opensearch.test.TestCustomMetadata; + +import java.io.IOException; +import java.util.EnumSet; + +public class RemoteClusterStateTestUtils { + public static class CustomMetadata1 extends TestCustomMetadata { + public static final String TYPE = "custom_md_1"; + + public CustomMetadata1(String data) { + super(data); + } + + public CustomMetadata1(StreamInput in) throws IOException { + super(in.readString()); + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.CURRENT; + } + + @Override + public EnumSet context() { + return EnumSet.of(Metadata.XContentContext.GATEWAY); + } + } + + public static class CustomMetadata2 extends TestCustomMetadata { + public static final String TYPE = "custom_md_2"; + + public CustomMetadata2(String data) { + super(data); + } + + public CustomMetadata2(StreamInput in) throws IOException { + super(in.readString()); + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.CURRENT; + } + + @Override + public EnumSet context() { + return EnumSet.of(Metadata.XContentContext.GATEWAY); + } + } + + public static class CustomMetadata3 extends TestCustomMetadata { + public static final String TYPE = "custom_md_3"; + + public CustomMetadata3(String data) { + super(data); + } + + public CustomMetadata3(StreamInput in) throws IOException { + super(in.readString()); + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.CURRENT; + } + + @Override + public EnumSet context() { + return EnumSet.of(Metadata.XContentContext.GATEWAY); + } + } + + public static class CustomMetadata4 extends TestCustomMetadata { + public static final String TYPE = "custom_md_4"; + + public CustomMetadata4(String data) { + super(data); + } + + public CustomMetadata4(StreamInput in) throws IOException { + super(in.readString()); + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.CURRENT; + } + + @Override + public EnumSet context() { + return EnumSet.of(Metadata.XContentContext.GATEWAY); + } + } + + public static class CustomMetadata5 extends TestCustomMetadata { + public static final String TYPE = "custom_md_5"; + + public CustomMetadata5(String data) { + super(data); + } + + public CustomMetadata5(StreamInput in) throws IOException { + super(in.readString()); + } + + @Override + public String getWriteableName() { + return TYPE; + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.CURRENT; + } + + @Override + public EnumSet context() { + return EnumSet.of(Metadata.XContentContext.API); + } + } + + public static class TestClusterStateCustom1 extends TestClusterStateCustom { + + public static final String TYPE = "custom_1"; + + public TestClusterStateCustom1(String value) { + super(value); + } + + public TestClusterStateCustom1(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return TYPE; + } + } + + public static class TestClusterStateCustom2 extends TestClusterStateCustom { + + public static final String TYPE = "custom_2"; + + public TestClusterStateCustom2(String value) { + super(value); + } + + public TestClusterStateCustom2(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return TYPE; + } + } + + public static class TestClusterStateCustom3 extends TestClusterStateCustom { + + public static final String TYPE = "custom_3"; + + public TestClusterStateCustom3(String value) { + super(value); + } + + public TestClusterStateCustom3(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return TYPE; + } + } + + public static class TestClusterStateCustom4 extends TestClusterStateCustom { + + public static final String TYPE = "custom_4"; + + public TestClusterStateCustom4(String value) { + super(value); + } + + public TestClusterStateCustom4(StreamInput in) throws IOException { + super(in); + } + + @Override + public String getWriteableName() { + return TYPE; + } + } +} diff --git a/server/src/test/java/org/opensearch/gateway/remote/RemoteGlobalMetadataManagerTests.java b/server/src/test/java/org/opensearch/gateway/remote/RemoteGlobalMetadataManagerTests.java index c543f986b3e86..917794ec03c3a 100644 --- a/server/src/test/java/org/opensearch/gateway/remote/RemoteGlobalMetadataManagerTests.java +++ b/server/src/test/java/org/opensearch/gateway/remote/RemoteGlobalMetadataManagerTests.java @@ -8,7 +8,6 @@ package org.opensearch.gateway.remote; -import org.opensearch.Version; import org.opensearch.action.LatchedActionListener; import org.opensearch.cluster.ClusterModule; import org.opensearch.cluster.ClusterName; @@ -18,7 +17,6 @@ import org.opensearch.cluster.metadata.DiffableStringMap; import org.opensearch.cluster.metadata.IndexGraveyard; import org.opensearch.cluster.metadata.Metadata; -import org.opensearch.cluster.metadata.Metadata.XContentContext; import org.opensearch.cluster.metadata.TemplatesMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.network.NetworkModule; @@ -43,7 +41,6 @@ import org.opensearch.indices.IndicesModule; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.test.TestCustomMetadata; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.junit.After; @@ -51,7 +48,6 @@ import java.io.IOException; import java.io.InputStream; -import java.util.EnumSet; import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; @@ -61,6 +57,11 @@ import static java.util.stream.Collectors.toList; import static org.opensearch.cluster.metadata.Metadata.isGlobalStateEquals; import static org.opensearch.common.blobstore.stream.write.WritePriority.URGENT; +import static org.opensearch.gateway.remote.RemoteClusterStateTestUtils.CustomMetadata1; +import static org.opensearch.gateway.remote.RemoteClusterStateTestUtils.CustomMetadata2; +import static org.opensearch.gateway.remote.RemoteClusterStateTestUtils.CustomMetadata3; +import static org.opensearch.gateway.remote.RemoteClusterStateTestUtils.CustomMetadata4; +import static org.opensearch.gateway.remote.RemoteClusterStateTestUtils.CustomMetadata5; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.CLUSTER_STATE_PATH_TOKEN; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.CUSTOM_DELIMITER; import static org.opensearch.gateway.remote.RemoteClusterStateUtils.DELIMITER; @@ -699,121 +700,5 @@ public void testGetUpdatedCustoms() { ); assertThat(customsDiff.getUpserts(), is(expectedUpserts)); assertThat(customsDiff.getDeletes(), is(List.of(CustomMetadata1.TYPE))); - - } - - private static class CustomMetadata1 extends TestCustomMetadata { - public static final String TYPE = "custom_md_1"; - - CustomMetadata1(String data) { - super(data); - } - - @Override - public String getWriteableName() { - return TYPE; - } - - @Override - public Version getMinimalSupportedVersion() { - return Version.CURRENT; - } - - @Override - public EnumSet context() { - return EnumSet.of(Metadata.XContentContext.GATEWAY); - } - } - - private static class CustomMetadata2 extends TestCustomMetadata { - public static final String TYPE = "custom_md_2"; - - CustomMetadata2(String data) { - super(data); - } - - @Override - public String getWriteableName() { - return TYPE; - } - - @Override - public Version getMinimalSupportedVersion() { - return Version.CURRENT; - } - - @Override - public EnumSet context() { - return EnumSet.of(Metadata.XContentContext.GATEWAY); - } - } - - private static class CustomMetadata3 extends TestCustomMetadata { - public static final String TYPE = "custom_md_3"; - - CustomMetadata3(String data) { - super(data); - } - - @Override - public String getWriteableName() { - return TYPE; - } - - @Override - public Version getMinimalSupportedVersion() { - return Version.CURRENT; - } - - @Override - public EnumSet context() { - return EnumSet.of(Metadata.XContentContext.GATEWAY); - } - } - - private static class CustomMetadata4 extends TestCustomMetadata { - public static final String TYPE = "custom_md_4"; - - CustomMetadata4(String data) { - super(data); - } - - @Override - public String getWriteableName() { - return TYPE; - } - - @Override - public Version getMinimalSupportedVersion() { - return Version.CURRENT; - } - - @Override - public EnumSet context() { - return EnumSet.of(Metadata.XContentContext.GATEWAY); - } - } - - private static class CustomMetadata5 extends TestCustomMetadata { - public static final String TYPE = "custom_md_5"; - - CustomMetadata5(String data) { - super(data); - } - - @Override - public String getWriteableName() { - return TYPE; - } - - @Override - public Version getMinimalSupportedVersion() { - return Version.CURRENT; - } - - @Override - public EnumSet context() { - return EnumSet.of(XContentContext.API); - } } } diff --git a/test/framework/src/main/java/org/opensearch/test/TestClusterStateCustom.java b/test/framework/src/main/java/org/opensearch/test/TestClusterStateCustom.java new file mode 100644 index 0000000000000..ac32b8d227eda --- /dev/null +++ b/test/framework/src/main/java/org/opensearch/test/TestClusterStateCustom.java @@ -0,0 +1,68 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.test; + +import org.opensearch.Version; +import org.opensearch.cluster.AbstractNamedDiffable; +import org.opensearch.cluster.ClusterState; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; + +public abstract class TestClusterStateCustom extends AbstractNamedDiffable implements ClusterState.Custom { + + private final String value; + + protected TestClusterStateCustom(String value) { + this.value = value; + } + + protected TestClusterStateCustom(StreamInput in) throws IOException { + this.value = in.readString(); + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.CURRENT; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(value); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder; + } + + @Override + public boolean isPrivate() { + return true; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + TestClusterStateCustom that = (TestClusterStateCustom) o; + + if (!value.equals(that.value)) return false; + + return true; + } + + @Override + public int hashCode() { + return value.hashCode(); + } +} From bda839377195fe3f665cb08b2ebf5263690e757c Mon Sep 17 00:00:00 2001 From: gaobinlong Date: Wed, 10 Jul 2024 19:35:09 +0800 Subject: [PATCH 12/12] Update version check for the bug fix of match_phrase_prefix_query not working on text field with multiple values and index_prefixes (#14703) Signed-off-by: Gao Binlong --- .../rest-api-spec/test/search/190_index_prefix_search.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml index 8b031c132f979..6a946fb264560 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/190_index_prefix_search.yml @@ -138,8 +138,8 @@ setup: --- "search index prefixes with multiple values": - skip: - version: " - 2.99.99" - reason: "the bug was fixed in 3.0.0" + version: " - 2.15.99" + reason: "the bug was fixed since 2.16.0" - do: search: rest_total_hits_as_int: true @@ -154,8 +154,8 @@ setup: --- "search index prefixes with multiple values and custom position_increment_gap": - skip: - version: " - 2.99.99" - reason: "the bug was fixed in 3.0.0" + version: " - 2.15.99" + reason: "the bug was fixed since 2.16.0" - do: search: rest_total_hits_as_int: true