From ebd8ce5b5067ddcb24686c67f062a2d01fe82f0b Mon Sep 17 00:00:00 2001 From: Eric Date: Wed, 8 Nov 2023 01:23:42 +0000 Subject: [PATCH] revert commits back to after 2.11 release Signed-off-by: Eric --- common/build.gradle | 4 +- .../sql/common/setting/Settings.java | 9 +- .../sql/datasource/DataSourceService.java | 10 +- .../datasource/model/DataSourceMetadata.java | 81 +- .../sql/analysis/AnalyzerTestBase.java | 5 - .../datasource/DataSourceTableScanTest.java | 2 - datasources/build.gradle | 4 +- .../PatchDataSourceActionRequest.java | 49 - .../PatchDataSourceActionResponse.java | 31 - .../rest/RestDataSourceQueryAction.java | 66 +- .../service/DataSourceServiceImpl.java | 50 +- .../TransportCreateDataSourceAction.java | 44 +- .../TransportDeleteDataSourceAction.java | 3 - .../TransportGetDataSourceAction.java | 3 - .../TransportPatchDataSourceAction.java | 77 -- .../TransportUpdateDataSourceAction.java | 3 - .../utils/XContentParserUtils.java | 72 -- .../service/DataSourceServiceImplTest.java | 47 +- .../TransportCreateDataSourceActionTest.java | 53 +- .../TransportDeleteDataSourceActionTest.java | 15 - .../TransportGetDataSourceActionTest.java | 14 - .../TransportPatchDataSourceActionTest.java | 91 -- .../TransportUpdateDataSourceActionTest.java | 14 - .../utils/XContentParserUtilsTest.java | 75 -- docs/user/admin/settings.rst | 107 -- docs/user/interfaces/asyncqueryinterface.rst | 44 - docs/user/ppl/admin/datasources.rst | 14 - integ-test/build.gradle | 2 +- .../sql/datasource/DataSourceAPIsIT.java | 85 -- .../sql/legacy/SQLIntegTestCase.java | 10 - .../sql/ppl/InformationSchemaCommandIT.java | 2 - .../ppl/PrometheusDataSourceCommandsIT.java | 1 - .../sql/ppl/ShowDataSourcesCommandIT.java | 2 - legacy/build.gradle | 2 +- .../sql/legacy/metrics/MetricFactory.java | 13 - .../sql/legacy/metrics/MetricName.java | 27 +- .../sql/legacy/utils/MetricUtils.java | 22 - opensearch/build.gradle | 2 +- .../setting/OpenSearchSettings.java | 86 -- .../org/opensearch/sql/plugin/SQLPlugin.java | 56 +- .../plugin/rest/RestQuerySettingsAction.java | 9 - ppl/build.gradle | 4 +- prometheus/build.gradle | 2 +- spark/build.gradle | 49 +- .../src/main/antlr/FlintSparkSqlExtensions.g4 | 49 - spark/src/main/antlr/SparkSqlBase.g4 | 6 - spark/src/main/antlr/SqlBaseLexer.g4 | 1 - spark/src/main/antlr/SqlBaseParser.g4 | 3 +- .../AsyncQueryExecutorServiceImpl.java | 21 +- ...chAsyncQueryJobMetadataStorageService.java | 161 ++- .../model/AsyncQueryExecutionResponse.java | 1 - .../spark/asyncquery/model/AsyncQueryId.java | 35 - .../model/AsyncQueryJobMetadata.java | 145 +-- .../model/SparkSubmitParameters.java | 17 +- .../sql/spark/client/EmrClientImpl.java | 4 +- .../spark/client/EmrServerlessClientImpl.java | 39 +- .../sql/spark/client/StartJobRequest.java | 2 - .../cluster/ClusterManagerEventListener.java | 148 --- .../spark/cluster/FlintIndexRetention.java | 148 --- .../sql/spark/cluster/IndexCleanup.java | 64 - .../spark/data/constants/SparkConstants.java | 8 +- .../spark/dispatcher/AsyncQueryHandler.java | 50 - .../spark/dispatcher/BatchQueryHandler.java | 94 -- .../sql/spark/dispatcher/IndexDMLHandler.java | 116 -- .../dispatcher/InteractiveQueryHandler.java | 135 --- .../dispatcher/SparkQueryDispatcher.java | 290 +++-- .../dispatcher/StreamingQueryHandler.java | 75 -- .../model/DispatchQueryContext.java | 21 - .../model/DispatchQueryRequest.java | 3 - .../model/DispatchQueryResponse.java | 4 +- .../model/FullyQualifiedTableName.java | 22 - .../dispatcher/model/IndexDMLResult.java | 74 -- .../model/IndexQueryActionType.java | 15 - .../dispatcher/model/IndexQueryDetails.java | 105 -- .../sql/spark/dispatcher/model/JobType.java | 37 - .../session/CreateSessionRequest.java | 60 - .../execution/session/InteractiveSession.java | 140 --- .../sql/spark/execution/session/Session.java | 43 - .../spark/execution/session/SessionId.java | 31 - .../execution/session/SessionManager.java | 58 - .../spark/execution/session/SessionModel.java | 169 --- .../spark/execution/session/SessionState.java | 43 - .../spark/execution/session/SessionType.java | 29 - .../execution/statement/QueryRequest.java | 17 - .../spark/execution/statement/Statement.java | 103 -- .../execution/statement/StatementId.java | 23 - .../execution/statement/StatementModel.java | 204 ---- .../execution/statement/StatementState.java | 41 - .../execution/statestore/StateModel.java | 34 - .../execution/statestore/StateStore.java | 360 ------ .../sql/spark/flint/FlintIndexMetadata.java | 14 +- .../spark/flint/FlintIndexMetadataReader.java | 6 +- .../flint/FlintIndexMetadataReaderImpl.java | 6 +- .../sql/spark/flint/FlintIndexState.java | 54 - .../sql/spark/flint/FlintIndexStateModel.java | 150 --- .../spark/flint/operation/FlintIndexOp.java | 111 -- .../flint/operation/FlintIndexOpCancel.java | 76 -- .../flint/operation/FlintIndexOpDelete.java | 39 - ...DefaultSparkSqlFunctionResponseHandle.java | 1 + .../ConcurrencyLimitExceededException.java | 13 - .../leasemanager/DefaultLeaseManager.java | 105 -- .../sql/spark/leasemanager/LeaseManager.java | 19 - .../leasemanager/model/LeaseRequest.java | 18 - .../response/JobExecutionResponseReader.java | 8 +- .../sql/spark/response/SparkResponse.java | 8 +- .../rest/model/CreateAsyncQueryRequest.java | 15 +- .../rest/model/CreateAsyncQueryResponse.java | 2 - .../opensearch/sql/spark/utils/IDUtils.java | 25 - .../sql/spark/utils/SQLQueryUtils.java | 120 +- .../query_execution_request_mapping.yml | 44 - .../query_execution_request_settings.yml | 11 - ...AsyncQueryExecutorServiceImplSpecTest.java | 467 ------- .../AsyncQueryExecutorServiceImplTest.java | 30 +- .../AsyncQueryExecutorServiceSpec.java | 314 ----- .../spark/asyncquery/IndexQuerySpecTest.java | 793 ------------ ...yncQueryJobMetadataStorageServiceTest.java | 270 ++++- .../client/EmrServerlessClientImplTest.java | 75 +- .../sql/spark/constants/TestConstants.java | 5 - .../spark/dispatcher/IndexDMLHandlerTest.java | 21 - .../dispatcher/SparkQueryDispatcherTest.java | 1074 +++++++---------- .../session/InteractiveSessionTest.java | 231 ---- .../execution/session/SessionManagerTest.java | 50 - .../execution/session/SessionStateTest.java | 20 - .../execution/session/SessionTypeTest.java | 20 - .../statement/StatementStateTest.java | 20 - .../execution/statement/StatementTest.java | 443 ------- .../FlintIndexMetadataReaderImplTest.java | 61 +- .../sql/spark/flint/FlintIndexStateTest.java | 18 - .../spark/flint/IndexQueryDetailsTest.java | 106 -- .../flint/operation/FlintIndexOpTest.java | 61 - .../leasemanager/DefaultLeaseManagerTest.java | 41 - ...AsyncQueryExecutionResponseReaderTest.java | 4 +- .../sql/spark/response/SparkResponseTest.java | 4 +- .../model/CreateAsyncQueryRequestTest.java | 52 - ...portCreateAsyncQueryRequestActionTest.java | 21 +- ...ransportGetAsyncQueryResultActionTest.java | 3 +- .../sql/spark/utils/SQLQueryUtilsTest.java | 206 +--- .../0.1.1/flint_covering_index.json | 37 - .../flint-index-mappings/0.1.1/flint_mv.json | 30 - .../0.1.1/flint_skipping_index.json | 23 - .../flint_covering_index.json | 36 - .../flint-index-mappings/flint_mv.json | 42 - .../flint_skipping_index.json | 22 - .../query_execution_result_mapping.json | 44 - sql/build.gradle | 2 +- 145 files changed, 1270 insertions(+), 8830 deletions(-) delete mode 100644 datasources/src/main/java/org/opensearch/sql/datasources/model/transport/PatchDataSourceActionRequest.java delete mode 100644 datasources/src/main/java/org/opensearch/sql/datasources/model/transport/PatchDataSourceActionResponse.java delete mode 100644 datasources/src/main/java/org/opensearch/sql/datasources/transport/TransportPatchDataSourceAction.java delete mode 100644 datasources/src/test/java/org/opensearch/sql/datasources/transport/TransportPatchDataSourceActionTest.java delete mode 100644 legacy/src/main/java/org/opensearch/sql/legacy/utils/MetricUtils.java delete mode 100644 spark/src/main/java/org/opensearch/sql/spark/asyncquery/model/AsyncQueryId.java delete mode 100644 spark/src/main/java/org/opensearch/sql/spark/cluster/ClusterManagerEventListener.java delete mode 100644 spark/src/main/java/org/opensearch/sql/spark/cluster/FlintIndexRetention.java delete mode 100644 spark/src/main/java/org/opensearch/sql/spark/cluster/IndexCleanup.java delete mode 100644 spark/src/main/java/org/opensearch/sql/spark/dispatcher/AsyncQueryHandler.java delete mode 100644 spark/src/main/java/org/opensearch/sql/spark/dispatcher/BatchQueryHandler.java delete mode 100644 spark/src/main/java/org/opensearch/sql/spark/dispatcher/IndexDMLHandler.java delete mode 100644 spark/src/main/java/org/opensearch/sql/spark/dispatcher/InteractiveQueryHandler.java delete mode 100644 spark/src/main/java/org/opensearch/sql/spark/dispatcher/StreamingQueryHandler.java delete mode 100644 spark/src/main/java/org/opensearch/sql/spark/dispatcher/model/DispatchQueryContext.java delete mode 100644 spark/src/main/java/org/opensearch/sql/spark/dispatcher/model/IndexDMLResult.java delete mode 100644 spark/src/main/java/org/opensearch/sql/spark/dispatcher/model/IndexQueryActionType.java delete mode 100644 spark/src/main/java/org/opensearch/sql/spark/dispatcher/model/IndexQueryDetails.java delete mode 100644 spark/src/main/java/org/opensearch/sql/spark/dispatcher/model/JobType.java delete mode 100644 spark/src/main/java/org/opensearch/sql/spark/execution/session/CreateSessionRequest.java delete mode 100644 spark/src/main/java/org/opensearch/sql/spark/execution/session/InteractiveSession.java delete mode 100644 spark/src/main/java/org/opensearch/sql/spark/execution/session/Session.java delete mode 100644 spark/src/main/java/org/opensearch/sql/spark/execution/session/SessionId.java delete mode 100644 spark/src/main/java/org/opensearch/sql/spark/execution/session/SessionManager.java delete mode 100644 spark/src/main/java/org/opensearch/sql/spark/execution/session/SessionModel.java delete mode 100644 spark/src/main/java/org/opensearch/sql/spark/execution/session/SessionState.java delete mode 100644 spark/src/main/java/org/opensearch/sql/spark/execution/session/SessionType.java delete mode 100644 spark/src/main/java/org/opensearch/sql/spark/execution/statement/QueryRequest.java delete mode 100644 spark/src/main/java/org/opensearch/sql/spark/execution/statement/Statement.java delete mode 100644 spark/src/main/java/org/opensearch/sql/spark/execution/statement/StatementId.java delete mode 100644 spark/src/main/java/org/opensearch/sql/spark/execution/statement/StatementModel.java delete mode 100644 spark/src/main/java/org/opensearch/sql/spark/execution/statement/StatementState.java delete mode 100644 spark/src/main/java/org/opensearch/sql/spark/execution/statestore/StateModel.java delete mode 100644 spark/src/main/java/org/opensearch/sql/spark/execution/statestore/StateStore.java delete mode 100644 spark/src/main/java/org/opensearch/sql/spark/flint/FlintIndexState.java delete mode 100644 spark/src/main/java/org/opensearch/sql/spark/flint/FlintIndexStateModel.java delete mode 100644 spark/src/main/java/org/opensearch/sql/spark/flint/operation/FlintIndexOp.java delete mode 100644 spark/src/main/java/org/opensearch/sql/spark/flint/operation/FlintIndexOpCancel.java delete mode 100644 spark/src/main/java/org/opensearch/sql/spark/flint/operation/FlintIndexOpDelete.java delete mode 100644 spark/src/main/java/org/opensearch/sql/spark/leasemanager/ConcurrencyLimitExceededException.java delete mode 100644 spark/src/main/java/org/opensearch/sql/spark/leasemanager/DefaultLeaseManager.java delete mode 100644 spark/src/main/java/org/opensearch/sql/spark/leasemanager/LeaseManager.java delete mode 100644 spark/src/main/java/org/opensearch/sql/spark/leasemanager/model/LeaseRequest.java delete mode 100644 spark/src/main/java/org/opensearch/sql/spark/utils/IDUtils.java delete mode 100644 spark/src/main/resources/query_execution_request_mapping.yml delete mode 100644 spark/src/main/resources/query_execution_request_settings.yml delete mode 100644 spark/src/test/java/org/opensearch/sql/spark/asyncquery/AsyncQueryExecutorServiceImplSpecTest.java delete mode 100644 spark/src/test/java/org/opensearch/sql/spark/asyncquery/AsyncQueryExecutorServiceSpec.java delete mode 100644 spark/src/test/java/org/opensearch/sql/spark/asyncquery/IndexQuerySpecTest.java delete mode 100644 spark/src/test/java/org/opensearch/sql/spark/dispatcher/IndexDMLHandlerTest.java delete mode 100644 spark/src/test/java/org/opensearch/sql/spark/execution/session/InteractiveSessionTest.java delete mode 100644 spark/src/test/java/org/opensearch/sql/spark/execution/session/SessionManagerTest.java delete mode 100644 spark/src/test/java/org/opensearch/sql/spark/execution/session/SessionStateTest.java delete mode 100644 spark/src/test/java/org/opensearch/sql/spark/execution/session/SessionTypeTest.java delete mode 100644 spark/src/test/java/org/opensearch/sql/spark/execution/statement/StatementStateTest.java delete mode 100644 spark/src/test/java/org/opensearch/sql/spark/execution/statement/StatementTest.java delete mode 100644 spark/src/test/java/org/opensearch/sql/spark/flint/FlintIndexStateTest.java delete mode 100644 spark/src/test/java/org/opensearch/sql/spark/flint/IndexQueryDetailsTest.java delete mode 100644 spark/src/test/java/org/opensearch/sql/spark/flint/operation/FlintIndexOpTest.java delete mode 100644 spark/src/test/java/org/opensearch/sql/spark/leasemanager/DefaultLeaseManagerTest.java delete mode 100644 spark/src/test/java/org/opensearch/sql/spark/rest/model/CreateAsyncQueryRequestTest.java delete mode 100644 spark/src/test/resources/flint-index-mappings/0.1.1/flint_covering_index.json delete mode 100644 spark/src/test/resources/flint-index-mappings/0.1.1/flint_mv.json delete mode 100644 spark/src/test/resources/flint-index-mappings/0.1.1/flint_skipping_index.json delete mode 100644 spark/src/test/resources/flint-index-mappings/flint_covering_index.json delete mode 100644 spark/src/test/resources/flint-index-mappings/flint_mv.json delete mode 100644 spark/src/test/resources/flint-index-mappings/flint_skipping_index.json delete mode 100644 spark/src/test/resources/query_execution_result_mapping.json diff --git a/common/build.gradle b/common/build.gradle index 3a04e87fe7..0561468d1f 100644 --- a/common/build.gradle +++ b/common/build.gradle @@ -34,7 +34,7 @@ repositories { dependencies { api "org.antlr:antlr4-runtime:4.7.1" api group: 'com.google.guava', name: 'guava', version: '32.0.1-jre' - api group: 'org.apache.logging.log4j', name: 'log4j-core', version:"${versions.log4j}" + api group: 'org.apache.logging.log4j', name: 'log4j-core', version:'2.20.0' api group: 'org.apache.commons', name: 'commons-lang3', version: '3.12.0' api group: 'com.squareup.okhttp3', name: 'okhttp', version: '4.9.3' implementation 'com.github.babbel:okhttp-aws-signer:1.0.2' @@ -63,4 +63,4 @@ configurations.all { resolutionStrategy.force "org.apache.httpcomponents:httpcore:4.4.13" resolutionStrategy.force "joda-time:joda-time:2.10.12" resolutionStrategy.force "org.slf4j:slf4j-api:1.7.36" -} +} \ No newline at end of file diff --git a/common/src/main/java/org/opensearch/sql/common/setting/Settings.java b/common/src/main/java/org/opensearch/sql/common/setting/Settings.java index c9e348dbd4..8daf0e9bf6 100644 --- a/common/src/main/java/org/opensearch/sql/common/setting/Settings.java +++ b/common/src/main/java/org/opensearch/sql/common/setting/Settings.java @@ -32,18 +32,11 @@ public enum Key { QUERY_SIZE_LIMIT("plugins.query.size_limit"), ENCYRPTION_MASTER_KEY("plugins.query.datasources.encryption.masterkey"), DATASOURCES_URI_HOSTS_DENY_LIST("plugins.query.datasources.uri.hosts.denylist"), - DATASOURCES_LIMIT("plugins.query.datasources.limit"), METRICS_ROLLING_WINDOW("plugins.query.metrics.rolling_window"), METRICS_ROLLING_INTERVAL("plugins.query.metrics.rolling_interval"), SPARK_EXECUTION_ENGINE_CONFIG("plugins.query.executionengine.spark.config"), - CLUSTER_NAME("cluster.name"), - SPARK_EXECUTION_SESSION_LIMIT("plugins.query.executionengine.spark.session.limit"), - SPARK_EXECUTION_REFRESH_JOB_LIMIT("plugins.query.executionengine.spark.refresh_job.limit"), - SESSION_INDEX_TTL("plugins.query.executionengine.spark.session.index.ttl"), - RESULT_INDEX_TTL("plugins.query.executionengine.spark.result.index.ttl"), - AUTO_INDEX_MANAGEMENT_ENABLED( - "plugins.query.executionengine.spark.auto_index_management.enabled"); + CLUSTER_NAME("cluster.name"); @Getter private final String keyValue; diff --git a/core/src/main/java/org/opensearch/sql/datasource/DataSourceService.java b/core/src/main/java/org/opensearch/sql/datasource/DataSourceService.java index 162fe9e8f8..6dace50f99 100644 --- a/core/src/main/java/org/opensearch/sql/datasource/DataSourceService.java +++ b/core/src/main/java/org/opensearch/sql/datasource/DataSourceService.java @@ -5,7 +5,6 @@ package org.opensearch.sql.datasource; -import java.util.Map; import java.util.Set; import org.opensearch.sql.datasource.model.DataSource; import org.opensearch.sql.datasource.model.DataSourceMetadata; @@ -57,19 +56,12 @@ public interface DataSourceService { void createDataSource(DataSourceMetadata metadata); /** - * Updates {@link DataSource} corresponding to dataSourceMetadata (all fields needed). + * Updates {@link DataSource} corresponding to dataSourceMetadata. * * @param dataSourceMetadata {@link DataSourceMetadata}. */ void updateDataSource(DataSourceMetadata dataSourceMetadata); - /** - * Patches {@link DataSource} corresponding to the given name (only fields to be changed needed). - * - * @param dataSourceData - */ - void patchDataSource(Map dataSourceData); - /** * Deletes {@link DataSource} corresponding to the DataSource name. * diff --git a/core/src/main/java/org/opensearch/sql/datasource/model/DataSourceMetadata.java b/core/src/main/java/org/opensearch/sql/datasource/model/DataSourceMetadata.java index 9e47f9b37e..866e9cadef 100644 --- a/core/src/main/java/org/opensearch/sql/datasource/model/DataSourceMetadata.java +++ b/core/src/main/java/org/opensearch/sql/datasource/model/DataSourceMetadata.java @@ -16,7 +16,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.function.Function; +import lombok.AllArgsConstructor; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.Setter; @@ -25,24 +25,11 @@ @Getter @Setter +@AllArgsConstructor @EqualsAndHashCode @JsonIgnoreProperties(ignoreUnknown = true) public class DataSourceMetadata { - public static final String DEFAULT_RESULT_INDEX = "query_execution_result"; - public static final int MAX_RESULT_INDEX_NAME_SIZE = 255; - // OS doesn’t allow uppercase: https://tinyurl.com/yse2xdbx - public static final String RESULT_INDEX_NAME_PATTERN = "[a-z0-9_-]+"; - public static String INVALID_RESULT_INDEX_NAME_SIZE = - "Result index name size must contains less than " - + MAX_RESULT_INDEX_NAME_SIZE - + " characters"; - public static String INVALID_CHAR_IN_RESULT_INDEX_NAME = - "Result index name has invalid character. Valid characters are a-z, 0-9, -(hyphen) and" - + " _(underscore)"; - public static String INVALID_RESULT_INDEX_PREFIX = - "Result index must start with " + DEFAULT_RESULT_INDEX; - @JsonProperty private String name; @JsonProperty private String description; @@ -57,31 +44,18 @@ public class DataSourceMetadata { @JsonProperty private String resultIndex; - public static Function DATASOURCE_TO_RESULT_INDEX = - datasourceName -> String.format("%s_%s", DEFAULT_RESULT_INDEX, datasourceName); - public DataSourceMetadata( String name, - String description, DataSourceType connector, List allowedRoles, Map properties, String resultIndex) { this.name = name; - String errorMessage = validateCustomResultIndex(resultIndex); - if (errorMessage != null) { - throw new IllegalArgumentException(errorMessage); - } - if (resultIndex == null) { - this.resultIndex = fromNameToCustomResultIndex(); - } else { - this.resultIndex = resultIndex; - } - this.connector = connector; - this.description = description; + this.description = StringUtils.EMPTY; this.properties = properties; this.allowedRoles = allowedRoles; + this.resultIndex = resultIndex; } public DataSourceMetadata() { @@ -97,56 +71,9 @@ public DataSourceMetadata() { public static DataSourceMetadata defaultOpenSearchDataSourceMetadata() { return new DataSourceMetadata( DEFAULT_DATASOURCE_NAME, - StringUtils.EMPTY, DataSourceType.OPENSEARCH, Collections.emptyList(), ImmutableMap.of(), null); } - - public String validateCustomResultIndex(String resultIndex) { - if (resultIndex == null) { - return null; - } - if (resultIndex.length() > MAX_RESULT_INDEX_NAME_SIZE) { - return INVALID_RESULT_INDEX_NAME_SIZE; - } - if (!resultIndex.matches(RESULT_INDEX_NAME_PATTERN)) { - return INVALID_CHAR_IN_RESULT_INDEX_NAME; - } - if (resultIndex != null && !resultIndex.startsWith(DEFAULT_RESULT_INDEX)) { - return INVALID_RESULT_INDEX_PREFIX; - } - return null; - } - - /** - * Since we are using datasource name to create result index, we need to make sure that the final - * name is valid - * - * @param resultIndex result index name - * @return valid result index name - */ - private String convertToValidResultIndex(String resultIndex) { - // Limit Length - if (resultIndex.length() > MAX_RESULT_INDEX_NAME_SIZE) { - resultIndex = resultIndex.substring(0, MAX_RESULT_INDEX_NAME_SIZE); - } - - // Pattern Matching: Remove characters that don't match the pattern - StringBuilder validChars = new StringBuilder(); - for (char c : resultIndex.toCharArray()) { - if (String.valueOf(c).matches(RESULT_INDEX_NAME_PATTERN)) { - validChars.append(c); - } - } - return validChars.toString(); - } - - public String fromNameToCustomResultIndex() { - if (name == null) { - throw new IllegalArgumentException("Datasource name cannot be null"); - } - return convertToValidResultIndex(DATASOURCE_TO_RESULT_INDEX.apply(name.toLowerCase())); - } } diff --git a/core/src/test/java/org/opensearch/sql/analysis/AnalyzerTestBase.java b/core/src/test/java/org/opensearch/sql/analysis/AnalyzerTestBase.java index bfd68ee53a..508567582b 100644 --- a/core/src/test/java/org/opensearch/sql/analysis/AnalyzerTestBase.java +++ b/core/src/test/java/org/opensearch/sql/analysis/AnalyzerTestBase.java @@ -19,7 +19,6 @@ import java.util.Set; import java.util.stream.Collectors; import java.util.stream.Stream; -import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.tuple.Pair; import org.opensearch.sql.DataSourceSchemaName; import org.opensearch.sql.analysis.symbol.Namespace; @@ -198,7 +197,6 @@ public Set getDataSourceMetadata(boolean isDefaultDataSource ds -> new DataSourceMetadata( ds.getName(), - StringUtils.EMPTY, ds.getConnectorType(), Collections.emptyList(), ImmutableMap.of(), @@ -233,9 +231,6 @@ public DataSource getDataSource(String dataSourceName) { @Override public void updateDataSource(DataSourceMetadata dataSourceMetadata) {} - @Override - public void patchDataSource(Map dataSourceData) {} - @Override public void deleteDataSource(String dataSourceName) {} diff --git a/core/src/test/java/org/opensearch/sql/planner/physical/datasource/DataSourceTableScanTest.java b/core/src/test/java/org/opensearch/sql/planner/physical/datasource/DataSourceTableScanTest.java index 0c9449e824..4aefc5521d 100644 --- a/core/src/test/java/org/opensearch/sql/planner/physical/datasource/DataSourceTableScanTest.java +++ b/core/src/test/java/org/opensearch/sql/planner/physical/datasource/DataSourceTableScanTest.java @@ -18,7 +18,6 @@ import java.util.LinkedHashMap; import java.util.Set; import java.util.stream.Collectors; -import org.apache.commons.lang3.StringUtils; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; @@ -63,7 +62,6 @@ void testIterator() { dataSource -> new DataSourceMetadata( dataSource.getName(), - StringUtils.EMPTY, dataSource.getConnectorType(), Collections.emptyList(), ImmutableMap.of(), diff --git a/datasources/build.gradle b/datasources/build.gradle index c1a0b94b5c..cdd2ee813b 100644 --- a/datasources/build.gradle +++ b/datasources/build.gradle @@ -16,8 +16,6 @@ repositories { dependencies { implementation project(':core') implementation project(':protocol') - implementation project(':opensearch') - implementation project(':legacy') implementation group: 'org.opensearch', name: 'opensearch', version: "${opensearch_version}" implementation group: 'org.opensearch', name: 'opensearch-x-content', version: "${opensearch_version}" implementation group: 'org.opensearch', name: 'common-utils', version: "${opensearch_build}" @@ -37,7 +35,7 @@ dependencies { test { useJUnitPlatform() testLogging { - events "skipped", "failed" + events "passed", "skipped", "failed" exceptionFormat "full" } } diff --git a/datasources/src/main/java/org/opensearch/sql/datasources/model/transport/PatchDataSourceActionRequest.java b/datasources/src/main/java/org/opensearch/sql/datasources/model/transport/PatchDataSourceActionRequest.java deleted file mode 100644 index 9443ea561e..0000000000 --- a/datasources/src/main/java/org/opensearch/sql/datasources/model/transport/PatchDataSourceActionRequest.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * - * * Copyright OpenSearch Contributors - * * SPDX-License-Identifier: Apache-2.0 - * - */ - -package org.opensearch.sql.datasources.model.transport; - -import static org.opensearch.sql.analysis.DataSourceSchemaIdentifierNameResolver.DEFAULT_DATASOURCE_NAME; -import static org.opensearch.sql.datasources.utils.XContentParserUtils.CONNECTOR_FIELD; -import static org.opensearch.sql.datasources.utils.XContentParserUtils.NAME_FIELD; - -import java.io.IOException; -import java.util.Map; -import lombok.Getter; -import org.opensearch.action.ActionRequest; -import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.core.common.io.stream.StreamInput; - -public class PatchDataSourceActionRequest extends ActionRequest { - - @Getter private Map dataSourceData; - - /** Constructor of UpdateDataSourceActionRequest from StreamInput. */ - public PatchDataSourceActionRequest(StreamInput in) throws IOException { - super(in); - } - - public PatchDataSourceActionRequest(Map dataSourceData) { - this.dataSourceData = dataSourceData; - } - - @Override - public ActionRequestValidationException validate() { - if (this.dataSourceData.get(NAME_FIELD).equals(DEFAULT_DATASOURCE_NAME)) { - ActionRequestValidationException exception = new ActionRequestValidationException(); - exception.addValidationError( - "Not allowed to update datasource with name : " + DEFAULT_DATASOURCE_NAME); - return exception; - } else if (this.dataSourceData.get(CONNECTOR_FIELD) != null) { - ActionRequestValidationException exception = new ActionRequestValidationException(); - exception.addValidationError("Not allowed to update connector for datasource"); - return exception; - } else { - return null; - } - } -} diff --git a/datasources/src/main/java/org/opensearch/sql/datasources/model/transport/PatchDataSourceActionResponse.java b/datasources/src/main/java/org/opensearch/sql/datasources/model/transport/PatchDataSourceActionResponse.java deleted file mode 100644 index 18873a6731..0000000000 --- a/datasources/src/main/java/org/opensearch/sql/datasources/model/transport/PatchDataSourceActionResponse.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * - * * Copyright OpenSearch Contributors - * * SPDX-License-Identifier: Apache-2.0 - * - */ - -package org.opensearch.sql.datasources.model.transport; - -import java.io.IOException; -import lombok.Getter; -import lombok.RequiredArgsConstructor; -import org.opensearch.core.action.ActionResponse; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; - -@RequiredArgsConstructor -public class PatchDataSourceActionResponse extends ActionResponse { - - @Getter private final String result; - - public PatchDataSourceActionResponse(StreamInput in) throws IOException { - super(in); - result = in.readString(); - } - - @Override - public void writeTo(StreamOutput streamOutput) throws IOException { - streamOutput.writeString(result); - } -} diff --git a/datasources/src/main/java/org/opensearch/sql/datasources/rest/RestDataSourceQueryAction.java b/datasources/src/main/java/org/opensearch/sql/datasources/rest/RestDataSourceQueryAction.java index 5693df3486..2947afc5b9 100644 --- a/datasources/src/main/java/org/opensearch/sql/datasources/rest/RestDataSourceQueryAction.java +++ b/datasources/src/main/java/org/opensearch/sql/datasources/rest/RestDataSourceQueryAction.java @@ -10,13 +10,15 @@ import static org.opensearch.core.rest.RestStatus.BAD_REQUEST; import static org.opensearch.core.rest.RestStatus.NOT_FOUND; import static org.opensearch.core.rest.RestStatus.SERVICE_UNAVAILABLE; -import static org.opensearch.rest.RestRequest.Method.*; +import static org.opensearch.rest.RestRequest.Method.DELETE; +import static org.opensearch.rest.RestRequest.Method.GET; +import static org.opensearch.rest.RestRequest.Method.POST; +import static org.opensearch.rest.RestRequest.Method.PUT; import com.google.common.collect.ImmutableList; import java.io.IOException; import java.util.List; import java.util.Locale; -import java.util.Map; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.OpenSearchException; @@ -30,12 +32,20 @@ import org.opensearch.sql.datasource.model.DataSourceMetadata; import org.opensearch.sql.datasources.exceptions.DataSourceNotFoundException; import org.opensearch.sql.datasources.exceptions.ErrorMessage; -import org.opensearch.sql.datasources.model.transport.*; -import org.opensearch.sql.datasources.transport.*; +import org.opensearch.sql.datasources.model.transport.CreateDataSourceActionRequest; +import org.opensearch.sql.datasources.model.transport.CreateDataSourceActionResponse; +import org.opensearch.sql.datasources.model.transport.DeleteDataSourceActionRequest; +import org.opensearch.sql.datasources.model.transport.DeleteDataSourceActionResponse; +import org.opensearch.sql.datasources.model.transport.GetDataSourceActionRequest; +import org.opensearch.sql.datasources.model.transport.GetDataSourceActionResponse; +import org.opensearch.sql.datasources.model.transport.UpdateDataSourceActionRequest; +import org.opensearch.sql.datasources.model.transport.UpdateDataSourceActionResponse; +import org.opensearch.sql.datasources.transport.TransportCreateDataSourceAction; +import org.opensearch.sql.datasources.transport.TransportDeleteDataSourceAction; +import org.opensearch.sql.datasources.transport.TransportGetDataSourceAction; +import org.opensearch.sql.datasources.transport.TransportUpdateDataSourceAction; import org.opensearch.sql.datasources.utils.Scheduler; import org.opensearch.sql.datasources.utils.XContentParserUtils; -import org.opensearch.sql.legacy.metrics.MetricName; -import org.opensearch.sql.legacy.utils.MetricUtils; public class RestDataSourceQueryAction extends BaseRestHandler { @@ -88,17 +98,6 @@ public List routes() { */ new Route(PUT, BASE_DATASOURCE_ACTION_URL), - /* - * PATCH datasources - * Request body: - * Ref - * [org.opensearch.sql.plugin.transport.datasource.model.PatchDataSourceActionRequest] - * Response body: - * Ref - * [org.opensearch.sql.plugin.transport.datasource.model.PatchDataSourceActionResponse] - */ - new Route(PATCH, BASE_DATASOURCE_ACTION_URL), - /* * DELETE datasources * Request body: Ref @@ -123,8 +122,6 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient return executeUpdateRequest(restRequest, nodeClient); case DELETE: return executeDeleteRequest(restRequest, nodeClient); - case PATCH: - return executePatchRequest(restRequest, nodeClient); default: return restChannel -> restChannel.sendResponse( @@ -135,6 +132,7 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient private RestChannelConsumer executePostRequest(RestRequest restRequest, NodeClient nodeClient) throws IOException { + DataSourceMetadata dataSourceMetadata = XContentParserUtils.toDataSourceMetadata(restRequest.contentParser()); return restChannel -> @@ -218,34 +216,6 @@ public void onFailure(Exception e) { })); } - private RestChannelConsumer executePatchRequest(RestRequest restRequest, NodeClient nodeClient) - throws IOException { - Map dataSourceData = XContentParserUtils.toMap(restRequest.contentParser()); - return restChannel -> - Scheduler.schedule( - nodeClient, - () -> - nodeClient.execute( - TransportPatchDataSourceAction.ACTION_TYPE, - new PatchDataSourceActionRequest(dataSourceData), - new ActionListener<>() { - @Override - public void onResponse( - PatchDataSourceActionResponse patchDataSourceActionResponse) { - restChannel.sendResponse( - new BytesRestResponse( - RestStatus.OK, - "application/json; charset=UTF-8", - patchDataSourceActionResponse.getResult())); - } - - @Override - public void onFailure(Exception e) { - handleException(e, restChannel); - } - })); - } - private RestChannelConsumer executeDeleteRequest(RestRequest restRequest, NodeClient nodeClient) { String dataSourceName = restRequest.param("dataSourceName"); @@ -283,10 +253,8 @@ private void handleException(Exception e, RestChannel restChannel) { } else { LOG.error("Error happened during request handling", e); if (isClientError(e)) { - MetricUtils.incrementNumericalMetric(MetricName.DATASOURCE_FAILED_REQ_COUNT_CUS); reportError(restChannel, e, BAD_REQUEST); } else { - MetricUtils.incrementNumericalMetric(MetricName.DATASOURCE_FAILED_REQ_COUNT_SYS); reportError(restChannel, e, SERVICE_UNAVAILABLE); } } diff --git a/datasources/src/main/java/org/opensearch/sql/datasources/service/DataSourceServiceImpl.java b/datasources/src/main/java/org/opensearch/sql/datasources/service/DataSourceServiceImpl.java index 8ba618fb44..25e8006d66 100644 --- a/datasources/src/main/java/org/opensearch/sql/datasources/service/DataSourceServiceImpl.java +++ b/datasources/src/main/java/org/opensearch/sql/datasources/service/DataSourceServiceImpl.java @@ -6,11 +6,15 @@ package org.opensearch.sql.datasources.service; import static org.opensearch.sql.analysis.DataSourceSchemaIdentifierNameResolver.DEFAULT_DATASOURCE_NAME; -import static org.opensearch.sql.datasources.utils.XContentParserUtils.*; import com.google.common.base.Preconditions; import com.google.common.base.Strings; -import java.util.*; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; import org.opensearch.sql.common.utils.StringUtils; import org.opensearch.sql.datasource.DataSourceService; import org.opensearch.sql.datasource.model.DataSource; @@ -96,19 +100,6 @@ public void updateDataSource(DataSourceMetadata dataSourceMetadata) { } } - @Override - public void patchDataSource(Map dataSourceData) { - if (!dataSourceData.get(NAME_FIELD).equals(DEFAULT_DATASOURCE_NAME)) { - DataSourceMetadata dataSourceMetadata = - getRawDataSourceMetadata((String) dataSourceData.get(NAME_FIELD)); - replaceOldDatasourceMetadata(dataSourceData, dataSourceMetadata); - updateDataSource(dataSourceMetadata); - } else { - throw new UnsupportedOperationException( - "Not allowed to update default datasource :" + DEFAULT_DATASOURCE_NAME); - } - } - @Override public void deleteDataSource(String dataSourceName) { if (dataSourceName.equals(DEFAULT_DATASOURCE_NAME)) { @@ -145,35 +136,6 @@ private void validateDataSourceMetaData(DataSourceMetadata metadata) { + " Properties are required parameters."); } - /** - * Replaces the fields in the map of the given metadata. - * - * @param dataSourceData - * @param metadata {@link DataSourceMetadata}. - */ - private void replaceOldDatasourceMetadata( - Map dataSourceData, DataSourceMetadata metadata) { - - for (String key : dataSourceData.keySet()) { - switch (key) { - // Name and connector should not be modified - case DESCRIPTION_FIELD: - metadata.setDescription((String) dataSourceData.get(DESCRIPTION_FIELD)); - break; - case ALLOWED_ROLES_FIELD: - metadata.setAllowedRoles((List) dataSourceData.get(ALLOWED_ROLES_FIELD)); - break; - case PROPERTIES_FIELD: - Map properties = new HashMap<>(metadata.getProperties()); - properties.putAll(((Map) dataSourceData.get(PROPERTIES_FIELD))); - break; - case NAME_FIELD: - case CONNECTOR_FIELD: - break; - } - } - } - @Override public DataSourceMetadata getRawDataSourceMetadata(String dataSourceName) { if (dataSourceName.equals(DEFAULT_DATASOURCE_NAME)) { diff --git a/datasources/src/main/java/org/opensearch/sql/datasources/transport/TransportCreateDataSourceAction.java b/datasources/src/main/java/org/opensearch/sql/datasources/transport/TransportCreateDataSourceAction.java index 1b3e678f5d..b3c1ba4196 100644 --- a/datasources/src/main/java/org/opensearch/sql/datasources/transport/TransportCreateDataSourceAction.java +++ b/datasources/src/main/java/org/opensearch/sql/datasources/transport/TransportCreateDataSourceAction.java @@ -7,7 +7,6 @@ package org.opensearch.sql.datasources.transport; -import static org.opensearch.sql.common.setting.Settings.Key.DATASOURCES_LIMIT; import static org.opensearch.sql.protocol.response.format.JsonResponseFormatter.Style.PRETTY; import org.opensearch.action.ActionType; @@ -20,8 +19,6 @@ import org.opensearch.sql.datasources.model.transport.CreateDataSourceActionRequest; import org.opensearch.sql.datasources.model.transport.CreateDataSourceActionResponse; import org.opensearch.sql.datasources.service.DataSourceServiceImpl; -import org.opensearch.sql.legacy.metrics.MetricName; -import org.opensearch.sql.legacy.utils.MetricUtils; import org.opensearch.sql.protocol.response.format.JsonResponseFormatter; import org.opensearch.tasks.Task; import org.opensearch.transport.TransportService; @@ -33,7 +30,6 @@ public class TransportCreateDataSourceAction new ActionType<>(NAME, CreateDataSourceActionResponse::new); private DataSourceService dataSourceService; - private org.opensearch.sql.opensearch.setting.OpenSearchSettings settings; /** * TransportCreateDataSourceAction action for creating datasource. @@ -46,15 +42,13 @@ public class TransportCreateDataSourceAction public TransportCreateDataSourceAction( TransportService transportService, ActionFilters actionFilters, - DataSourceServiceImpl dataSourceService, - org.opensearch.sql.opensearch.setting.OpenSearchSettings settings) { + DataSourceServiceImpl dataSourceService) { super( TransportCreateDataSourceAction.NAME, transportService, actionFilters, CreateDataSourceActionRequest::new); this.dataSourceService = dataSourceService; - this.settings = settings; } @Override @@ -62,29 +56,19 @@ protected void doExecute( Task task, CreateDataSourceActionRequest request, ActionListener actionListener) { - MetricUtils.incrementNumericalMetric(MetricName.DATASOURCE_CREATION_REQ_COUNT); - int dataSourceLimit = settings.getSettingValue(DATASOURCES_LIMIT); - if (dataSourceService.getDataSourceMetadata(false).size() >= dataSourceLimit) { - actionListener.onFailure( - new IllegalStateException( - String.format( - "domain concurrent datasources can not" + " exceed %d", dataSourceLimit))); - } else { - try { - - DataSourceMetadata dataSourceMetadata = request.getDataSourceMetadata(); - dataSourceService.createDataSource(dataSourceMetadata); - String responseContent = - new JsonResponseFormatter(PRETTY) { - @Override - protected Object buildJsonObject(String response) { - return response; - } - }.format("Created DataSource with name " + dataSourceMetadata.getName()); - actionListener.onResponse(new CreateDataSourceActionResponse(responseContent)); - } catch (Exception e) { - actionListener.onFailure(e); - } + try { + DataSourceMetadata dataSourceMetadata = request.getDataSourceMetadata(); + dataSourceService.createDataSource(dataSourceMetadata); + String responseContent = + new JsonResponseFormatter(PRETTY) { + @Override + protected Object buildJsonObject(String response) { + return response; + } + }.format("Created DataSource with name " + dataSourceMetadata.getName()); + actionListener.onResponse(new CreateDataSourceActionResponse(responseContent)); + } catch (Exception e) { + actionListener.onFailure(e); } } } diff --git a/datasources/src/main/java/org/opensearch/sql/datasources/transport/TransportDeleteDataSourceAction.java b/datasources/src/main/java/org/opensearch/sql/datasources/transport/TransportDeleteDataSourceAction.java index bcc5ef650f..5578d40651 100644 --- a/datasources/src/main/java/org/opensearch/sql/datasources/transport/TransportDeleteDataSourceAction.java +++ b/datasources/src/main/java/org/opensearch/sql/datasources/transport/TransportDeleteDataSourceAction.java @@ -16,8 +16,6 @@ import org.opensearch.sql.datasources.model.transport.DeleteDataSourceActionRequest; import org.opensearch.sql.datasources.model.transport.DeleteDataSourceActionResponse; import org.opensearch.sql.datasources.service.DataSourceServiceImpl; -import org.opensearch.sql.legacy.metrics.MetricName; -import org.opensearch.sql.legacy.utils.MetricUtils; import org.opensearch.tasks.Task; import org.opensearch.transport.TransportService; @@ -55,7 +53,6 @@ protected void doExecute( Task task, DeleteDataSourceActionRequest request, ActionListener actionListener) { - MetricUtils.incrementNumericalMetric(MetricName.DATASOURCE_DELETE_REQ_COUNT); try { dataSourceService.deleteDataSource(request.getDataSourceName()); actionListener.onResponse( diff --git a/datasources/src/main/java/org/opensearch/sql/datasources/transport/TransportGetDataSourceAction.java b/datasources/src/main/java/org/opensearch/sql/datasources/transport/TransportGetDataSourceAction.java index c8d77dd2e7..34ad59c80f 100644 --- a/datasources/src/main/java/org/opensearch/sql/datasources/transport/TransportGetDataSourceAction.java +++ b/datasources/src/main/java/org/opensearch/sql/datasources/transport/TransportGetDataSourceAction.java @@ -18,8 +18,6 @@ import org.opensearch.sql.datasources.model.transport.GetDataSourceActionRequest; import org.opensearch.sql.datasources.model.transport.GetDataSourceActionResponse; import org.opensearch.sql.datasources.service.DataSourceServiceImpl; -import org.opensearch.sql.legacy.metrics.MetricName; -import org.opensearch.sql.legacy.utils.MetricUtils; import org.opensearch.sql.protocol.response.format.JsonResponseFormatter; import org.opensearch.tasks.Task; import org.opensearch.transport.TransportService; @@ -58,7 +56,6 @@ protected void doExecute( Task task, GetDataSourceActionRequest request, ActionListener actionListener) { - MetricUtils.incrementNumericalMetric(MetricName.DATASOURCE_GET_REQ_COUNT); try { String responseContent; if (request.getDataSourceName() == null) { diff --git a/datasources/src/main/java/org/opensearch/sql/datasources/transport/TransportPatchDataSourceAction.java b/datasources/src/main/java/org/opensearch/sql/datasources/transport/TransportPatchDataSourceAction.java deleted file mode 100644 index 8c9334f3a6..0000000000 --- a/datasources/src/main/java/org/opensearch/sql/datasources/transport/TransportPatchDataSourceAction.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * - * * Copyright OpenSearch Contributors - * * SPDX-License-Identifier: Apache-2.0 - * - */ - -package org.opensearch.sql.datasources.transport; - -import static org.opensearch.sql.datasources.utils.XContentParserUtils.NAME_FIELD; -import static org.opensearch.sql.protocol.response.format.JsonResponseFormatter.Style.PRETTY; - -import org.opensearch.action.ActionType; -import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.HandledTransportAction; -import org.opensearch.common.inject.Inject; -import org.opensearch.core.action.ActionListener; -import org.opensearch.sql.datasource.DataSourceService; -import org.opensearch.sql.datasources.model.transport.PatchDataSourceActionRequest; -import org.opensearch.sql.datasources.model.transport.PatchDataSourceActionResponse; -import org.opensearch.sql.datasources.service.DataSourceServiceImpl; -import org.opensearch.sql.legacy.metrics.MetricName; -import org.opensearch.sql.legacy.utils.MetricUtils; -import org.opensearch.sql.protocol.response.format.JsonResponseFormatter; -import org.opensearch.tasks.Task; -import org.opensearch.transport.TransportService; - -public class TransportPatchDataSourceAction - extends HandledTransportAction { - - public static final String NAME = "cluster:admin/opensearch/ql/datasources/patch"; - public static final ActionType ACTION_TYPE = - new ActionType<>(NAME, PatchDataSourceActionResponse::new); - - private DataSourceService dataSourceService; - - /** - * TransportPatchDataSourceAction action for updating datasource. - * - * @param transportService transportService. - * @param actionFilters actionFilters. - * @param dataSourceService dataSourceService. - */ - @Inject - public TransportPatchDataSourceAction( - TransportService transportService, - ActionFilters actionFilters, - DataSourceServiceImpl dataSourceService) { - super( - TransportPatchDataSourceAction.NAME, - transportService, - actionFilters, - PatchDataSourceActionRequest::new); - this.dataSourceService = dataSourceService; - } - - @Override - protected void doExecute( - Task task, - PatchDataSourceActionRequest request, - ActionListener actionListener) { - MetricUtils.incrementNumericalMetric(MetricName.DATASOURCE_PATCH_REQ_COUNT); - try { - dataSourceService.patchDataSource(request.getDataSourceData()); - String responseContent = - new JsonResponseFormatter(PRETTY) { - @Override - protected Object buildJsonObject(String response) { - return response; - } - }.format("Updated DataSource with name " + request.getDataSourceData().get(NAME_FIELD)); - actionListener.onResponse(new PatchDataSourceActionResponse(responseContent)); - } catch (Exception e) { - actionListener.onFailure(e); - } - } -} diff --git a/datasources/src/main/java/org/opensearch/sql/datasources/transport/TransportUpdateDataSourceAction.java b/datasources/src/main/java/org/opensearch/sql/datasources/transport/TransportUpdateDataSourceAction.java index 32394ab64c..fefd0f3a01 100644 --- a/datasources/src/main/java/org/opensearch/sql/datasources/transport/TransportUpdateDataSourceAction.java +++ b/datasources/src/main/java/org/opensearch/sql/datasources/transport/TransportUpdateDataSourceAction.java @@ -18,8 +18,6 @@ import org.opensearch.sql.datasources.model.transport.UpdateDataSourceActionRequest; import org.opensearch.sql.datasources.model.transport.UpdateDataSourceActionResponse; import org.opensearch.sql.datasources.service.DataSourceServiceImpl; -import org.opensearch.sql.legacy.metrics.MetricName; -import org.opensearch.sql.legacy.utils.MetricUtils; import org.opensearch.sql.protocol.response.format.JsonResponseFormatter; import org.opensearch.tasks.Task; import org.opensearch.transport.TransportService; @@ -58,7 +56,6 @@ protected void doExecute( Task task, UpdateDataSourceActionRequest request, ActionListener actionListener) { - MetricUtils.incrementNumericalMetric(MetricName.DATASOURCE_PUT_REQ_COUNT); try { dataSourceService.updateDataSource(request.getDataSourceMetadata()); String responseContent = diff --git a/datasources/src/main/java/org/opensearch/sql/datasources/utils/XContentParserUtils.java b/datasources/src/main/java/org/opensearch/sql/datasources/utils/XContentParserUtils.java index 6af2a5a761..261f13870a 100644 --- a/datasources/src/main/java/org/opensearch/sql/datasources/utils/XContentParserUtils.java +++ b/datasources/src/main/java/org/opensearch/sql/datasources/utils/XContentParserUtils.java @@ -90,59 +90,6 @@ public static DataSourceMetadata toDataSourceMetadata(XContentParser parser) thr name, description, connector, allowedRoles, properties, resultIndex); } - public static Map toMap(XContentParser parser) throws IOException { - Map resultMap = new HashMap<>(); - String name; - String description; - List allowedRoles = new ArrayList<>(); - Map properties = new HashMap<>(); - String resultIndex; - ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); - while (parser.nextToken() != XContentParser.Token.END_OBJECT) { - String fieldName = parser.currentName(); - parser.nextToken(); - switch (fieldName) { - case NAME_FIELD: - name = parser.textOrNull(); - resultMap.put(NAME_FIELD, name); - break; - case DESCRIPTION_FIELD: - description = parser.textOrNull(); - resultMap.put(DESCRIPTION_FIELD, description); - break; - case CONNECTOR_FIELD: - // no-op - datasource connector should not be modified - break; - case ALLOWED_ROLES_FIELD: - while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - allowedRoles.add(parser.text()); - } - resultMap.put(ALLOWED_ROLES_FIELD, allowedRoles); - break; - case PROPERTIES_FIELD: - ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser); - while (parser.nextToken() != XContentParser.Token.END_OBJECT) { - String key = parser.currentName(); - parser.nextToken(); - String value = parser.textOrNull(); - properties.put(key, value); - } - resultMap.put(PROPERTIES_FIELD, properties); - break; - case RESULT_INDEX_FIELD: - resultIndex = parser.textOrNull(); - resultMap.put(RESULT_INDEX_FIELD, resultIndex); - break; - default: - throw new IllegalArgumentException("Unknown field: " + fieldName); - } - } - if (resultMap.get(NAME_FIELD) == null || resultMap.get(NAME_FIELD) == "") { - throw new IllegalArgumentException("Name is a required field."); - } - return resultMap; - } - /** * Converts json string to DataSourceMetadata. * @@ -162,25 +109,6 @@ public static DataSourceMetadata toDataSourceMetadata(String json) throws IOExce } } - /** - * Converts json string to Map. - * - * @param json jsonstring. - * @return DataSourceData - * @throws IOException IOException. - */ - public static Map toMap(String json) throws IOException { - try (XContentParser parser = - XContentType.JSON - .xContent() - .createParser( - NamedXContentRegistry.EMPTY, - DeprecationHandler.THROW_UNSUPPORTED_OPERATION, - json)) { - return toMap(parser); - } - } - /** * Converts DataSourceMetadata to XContentBuilder. * diff --git a/datasources/src/test/java/org/opensearch/sql/datasources/service/DataSourceServiceImplTest.java b/datasources/src/test/java/org/opensearch/sql/datasources/service/DataSourceServiceImplTest.java index bf88302833..6164d8b73f 100644 --- a/datasources/src/test/java/org/opensearch/sql/datasources/service/DataSourceServiceImplTest.java +++ b/datasources/src/test/java/org/opensearch/sql/datasources/service/DataSourceServiceImplTest.java @@ -18,7 +18,6 @@ import static org.mockito.Mockito.verifyNoInteractions; import static org.mockito.Mockito.when; import static org.opensearch.sql.analysis.DataSourceSchemaIdentifierNameResolver.DEFAULT_DATASOURCE_NAME; -import static org.opensearch.sql.datasources.utils.XContentParserUtils.*; import com.google.common.collect.ImmutableMap; import java.util.ArrayList; @@ -29,7 +28,6 @@ import java.util.Map; import java.util.Optional; import java.util.Set; -import org.apache.commons.lang3.StringUtils; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -266,6 +264,7 @@ void testGetDataSourceMetadataSetWithDefaultDatasource() { @Test void testUpdateDataSourceSuccessCase() { + DataSourceMetadata dataSourceMetadata = metadata("testDS", DataSourceType.OPENSEARCH, Collections.emptyList(), ImmutableMap.of()); dataSourceService.updateDataSource(dataSourceMetadata); @@ -290,46 +289,6 @@ void testUpdateDefaultDataSource() { unsupportedOperationException.getMessage()); } - @Test - void testPatchDefaultDataSource() { - Map dataSourceData = - Map.of(NAME_FIELD, DEFAULT_DATASOURCE_NAME, DESCRIPTION_FIELD, "test"); - UnsupportedOperationException unsupportedOperationException = - assertThrows( - UnsupportedOperationException.class, - () -> dataSourceService.patchDataSource(dataSourceData)); - assertEquals( - "Not allowed to update default datasource :" + DEFAULT_DATASOURCE_NAME, - unsupportedOperationException.getMessage()); - } - - @Test - void testPatchDataSourceSuccessCase() { - // Tests that patch underlying implementation is to call update - Map dataSourceData = - new HashMap<>( - Map.of( - NAME_FIELD, - "testDS", - DESCRIPTION_FIELD, - "test", - CONNECTOR_FIELD, - "PROMETHEUS", - ALLOWED_ROLES_FIELD, - new ArrayList<>(), - PROPERTIES_FIELD, - Map.of(), - RESULT_INDEX_FIELD, - "")); - DataSourceMetadata getData = - metadata("testDS", DataSourceType.OPENSEARCH, Collections.emptyList(), ImmutableMap.of()); - when(dataSourceMetadataStorage.getDataSourceMetadata("testDS")) - .thenReturn(Optional.ofNullable(getData)); - - dataSourceService.patchDataSource(dataSourceData); - verify(dataSourceMetadataStorage, times(1)).updateDataSourceMetadata(any()); - } - @Test void testDeleteDatasource() { dataSourceService.deleteDataSource("testDS"); @@ -383,7 +342,6 @@ void testRemovalOfAuthorizationInfo() { DataSourceMetadata dataSourceMetadata = new DataSourceMetadata( "testDS", - StringUtils.EMPTY, DataSourceType.PROMETHEUS, Collections.singletonList("prometheus_access"), properties, @@ -409,7 +367,6 @@ void testRemovalOfAuthorizationInfoForAccessKeyAndSecretKye() { DataSourceMetadata dataSourceMetadata = new DataSourceMetadata( "testDS", - StringUtils.EMPTY, DataSourceType.PROMETHEUS, Collections.singletonList("prometheus_access"), properties, @@ -437,7 +394,6 @@ void testRemovalOfAuthorizationInfoForGlueWithRoleARN() { DataSourceMetadata dataSourceMetadata = new DataSourceMetadata( "testGlue", - StringUtils.EMPTY, DataSourceType.S3GLUE, Collections.singletonList("glue_access"), properties, @@ -502,7 +458,6 @@ void testGetRawDataSourceMetadata() { DataSourceMetadata dataSourceMetadata = new DataSourceMetadata( "testDS", - StringUtils.EMPTY, DataSourceType.PROMETHEUS, Collections.singletonList("prometheus_access"), properties, diff --git a/datasources/src/test/java/org/opensearch/sql/datasources/transport/TransportCreateDataSourceActionTest.java b/datasources/src/test/java/org/opensearch/sql/datasources/transport/TransportCreateDataSourceActionTest.java index 9088d3c4ad..e104672fa3 100644 --- a/datasources/src/test/java/org/opensearch/sql/datasources/transport/TransportCreateDataSourceActionTest.java +++ b/datasources/src/test/java/org/opensearch/sql/datasources/transport/TransportCreateDataSourceActionTest.java @@ -1,21 +1,14 @@ package org.opensearch.sql.datasources.transport; -import static java.util.Collections.emptyList; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.fail; -import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; -import static org.opensearch.sql.common.setting.Settings.Key.DATASOURCES_LIMIT; import java.util.HashSet; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; -import org.mockito.Answers; import org.mockito.ArgumentCaptor; import org.mockito.Captor; import org.mockito.Mock; @@ -23,15 +16,11 @@ import org.mockito.junit.jupiter.MockitoExtension; import org.opensearch.action.support.ActionFilters; import org.opensearch.core.action.ActionListener; -import org.opensearch.sql.common.setting.Settings; import org.opensearch.sql.datasource.model.DataSourceMetadata; import org.opensearch.sql.datasource.model.DataSourceType; import org.opensearch.sql.datasources.model.transport.CreateDataSourceActionRequest; import org.opensearch.sql.datasources.model.transport.CreateDataSourceActionResponse; import org.opensearch.sql.datasources.service.DataSourceServiceImpl; -import org.opensearch.sql.legacy.esdomain.LocalClusterState; -import org.opensearch.sql.legacy.metrics.Metrics; -import org.opensearch.sql.opensearch.setting.OpenSearchSettings; import org.opensearch.tasks.Task; import org.opensearch.transport.TransportService; @@ -40,13 +29,9 @@ public class TransportCreateDataSourceActionTest { @Mock private TransportService transportService; @Mock private TransportCreateDataSourceAction action; - - @Mock(answer = Answers.RETURNS_DEEP_STUBS) - private DataSourceServiceImpl dataSourceService; - + @Mock private DataSourceServiceImpl dataSourceService; @Mock private Task task; @Mock private ActionListener actionListener; - @Mock private OpenSearchSettings settings; @Captor private ArgumentCaptor @@ -58,15 +43,7 @@ public class TransportCreateDataSourceActionTest { public void setUp() { action = new TransportCreateDataSourceAction( - transportService, new ActionFilters(new HashSet<>()), dataSourceService, settings); - when(dataSourceService.getDataSourceMetadata(false).size()).thenReturn(1); - when(settings.getSettingValue(DATASOURCES_LIMIT)).thenReturn(20); - // Required for metrics initialization - doReturn(emptyList()).when(settings).getSettings(); - when(settings.getSettingValue(Settings.Key.METRICS_ROLLING_INTERVAL)).thenReturn(3600L); - when(settings.getSettingValue(Settings.Key.METRICS_ROLLING_WINDOW)).thenReturn(600L); - LocalClusterState.state().setPluginSettings(settings); - Metrics.getInstance().registerDefaultMetrics(); + transportService, new ActionFilters(new HashSet<>()), dataSourceService); } @Test @@ -102,30 +79,4 @@ public void testDoExecuteWithException() { Assertions.assertTrue(exception instanceof RuntimeException); Assertions.assertEquals("Error", exception.getMessage()); } - - @Test - public void testDataSourcesLimit() { - DataSourceMetadata dataSourceMetadata = new DataSourceMetadata(); - dataSourceMetadata.setName("test_datasource"); - dataSourceMetadata.setConnector(DataSourceType.PROMETHEUS); - CreateDataSourceActionRequest request = new CreateDataSourceActionRequest(dataSourceMetadata); - when(dataSourceService.getDataSourceMetadata(false).size()).thenReturn(1); - when(settings.getSettingValue(DATASOURCES_LIMIT)).thenReturn(1); - - action.doExecute( - task, - request, - new ActionListener() { - @Override - public void onResponse(CreateDataSourceActionResponse createDataSourceActionResponse) { - fail(); - } - - @Override - public void onFailure(Exception e) { - assertEquals("domain concurrent datasources can not exceed 1", e.getMessage()); - } - }); - verify(dataSourceService, times(0)).createDataSource(dataSourceMetadata); - } } diff --git a/datasources/src/test/java/org/opensearch/sql/datasources/transport/TransportDeleteDataSourceActionTest.java b/datasources/src/test/java/org/opensearch/sql/datasources/transport/TransportDeleteDataSourceActionTest.java index df066654c6..ea581de20c 100644 --- a/datasources/src/test/java/org/opensearch/sql/datasources/transport/TransportDeleteDataSourceActionTest.java +++ b/datasources/src/test/java/org/opensearch/sql/datasources/transport/TransportDeleteDataSourceActionTest.java @@ -1,11 +1,8 @@ package org.opensearch.sql.datasources.transport; -import static java.util.Collections.emptyList; -import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; import java.util.HashSet; import org.junit.jupiter.api.Assertions; @@ -19,13 +16,9 @@ import org.mockito.junit.jupiter.MockitoExtension; import org.opensearch.action.support.ActionFilters; import org.opensearch.core.action.ActionListener; -import org.opensearch.sql.common.setting.Settings; import org.opensearch.sql.datasources.model.transport.DeleteDataSourceActionRequest; import org.opensearch.sql.datasources.model.transport.DeleteDataSourceActionResponse; import org.opensearch.sql.datasources.service.DataSourceServiceImpl; -import org.opensearch.sql.legacy.esdomain.LocalClusterState; -import org.opensearch.sql.legacy.metrics.Metrics; -import org.opensearch.sql.opensearch.setting.OpenSearchSettings; import org.opensearch.tasks.Task; import org.opensearch.transport.TransportService; @@ -38,8 +31,6 @@ public class TransportDeleteDataSourceActionTest { @Mock private Task task; @Mock private ActionListener actionListener; - @Mock private OpenSearchSettings settings; - @Captor private ArgumentCaptor deleteDataSourceActionResponseArgumentCaptor; @@ -51,12 +42,6 @@ public void setUp() { action = new TransportDeleteDataSourceAction( transportService, new ActionFilters(new HashSet<>()), dataSourceService); - // Required for metrics initialization - doReturn(emptyList()).when(settings).getSettings(); - when(settings.getSettingValue(Settings.Key.METRICS_ROLLING_INTERVAL)).thenReturn(3600L); - when(settings.getSettingValue(Settings.Key.METRICS_ROLLING_WINDOW)).thenReturn(600L); - LocalClusterState.state().setPluginSettings(settings); - Metrics.getInstance().registerDefaultMetrics(); } @Test diff --git a/datasources/src/test/java/org/opensearch/sql/datasources/transport/TransportGetDataSourceActionTest.java b/datasources/src/test/java/org/opensearch/sql/datasources/transport/TransportGetDataSourceActionTest.java index 286f308402..4f04afd667 100644 --- a/datasources/src/test/java/org/opensearch/sql/datasources/transport/TransportGetDataSourceActionTest.java +++ b/datasources/src/test/java/org/opensearch/sql/datasources/transport/TransportGetDataSourceActionTest.java @@ -1,7 +1,5 @@ package org.opensearch.sql.datasources.transport; -import static java.util.Collections.emptyList; -import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -24,15 +22,11 @@ import org.mockito.junit.jupiter.MockitoExtension; import org.opensearch.action.support.ActionFilters; import org.opensearch.core.action.ActionListener; -import org.opensearch.sql.common.setting.Settings; import org.opensearch.sql.datasource.model.DataSourceMetadata; import org.opensearch.sql.datasource.model.DataSourceType; import org.opensearch.sql.datasources.model.transport.GetDataSourceActionRequest; import org.opensearch.sql.datasources.model.transport.GetDataSourceActionResponse; import org.opensearch.sql.datasources.service.DataSourceServiceImpl; -import org.opensearch.sql.legacy.esdomain.LocalClusterState; -import org.opensearch.sql.legacy.metrics.Metrics; -import org.opensearch.sql.opensearch.setting.OpenSearchSettings; import org.opensearch.sql.protocol.response.format.JsonResponseFormatter; import org.opensearch.tasks.Task; import org.opensearch.transport.TransportService; @@ -51,19 +45,11 @@ public class TransportGetDataSourceActionTest { @Captor private ArgumentCaptor exceptionArgumentCaptor; - @Mock private OpenSearchSettings settings; - @BeforeEach public void setUp() { action = new TransportGetDataSourceAction( transportService, new ActionFilters(new HashSet<>()), dataSourceService); - // Required for metrics initialization - doReturn(emptyList()).when(settings).getSettings(); - when(settings.getSettingValue(Settings.Key.METRICS_ROLLING_INTERVAL)).thenReturn(3600L); - when(settings.getSettingValue(Settings.Key.METRICS_ROLLING_WINDOW)).thenReturn(600L); - LocalClusterState.state().setPluginSettings(settings); - Metrics.getInstance().registerDefaultMetrics(); } @Test diff --git a/datasources/src/test/java/org/opensearch/sql/datasources/transport/TransportPatchDataSourceActionTest.java b/datasources/src/test/java/org/opensearch/sql/datasources/transport/TransportPatchDataSourceActionTest.java deleted file mode 100644 index a0f159bfd0..0000000000 --- a/datasources/src/test/java/org/opensearch/sql/datasources/transport/TransportPatchDataSourceActionTest.java +++ /dev/null @@ -1,91 +0,0 @@ -package org.opensearch.sql.datasources.transport; - -import static java.util.Collections.emptyList; -import static org.mockito.Mockito.*; -import static org.opensearch.sql.datasources.utils.XContentParserUtils.*; - -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.ExtendWith; -import org.mockito.ArgumentCaptor; -import org.mockito.Captor; -import org.mockito.Mock; -import org.mockito.Mockito; -import org.mockito.junit.jupiter.MockitoExtension; -import org.opensearch.action.support.ActionFilters; -import org.opensearch.core.action.ActionListener; -import org.opensearch.sql.common.setting.Settings; -import org.opensearch.sql.datasources.model.transport.PatchDataSourceActionRequest; -import org.opensearch.sql.datasources.model.transport.PatchDataSourceActionResponse; -import org.opensearch.sql.datasources.service.DataSourceServiceImpl; -import org.opensearch.sql.legacy.esdomain.LocalClusterState; -import org.opensearch.sql.legacy.metrics.Metrics; -import org.opensearch.sql.opensearch.setting.OpenSearchSettings; -import org.opensearch.tasks.Task; -import org.opensearch.transport.TransportService; - -@ExtendWith(MockitoExtension.class) -public class TransportPatchDataSourceActionTest { - - @Mock private TransportService transportService; - @Mock private TransportPatchDataSourceAction action; - @Mock private DataSourceServiceImpl dataSourceService; - @Mock private Task task; - @Mock private ActionListener actionListener; - - @Captor - private ArgumentCaptor patchDataSourceActionResponseArgumentCaptor; - - @Captor private ArgumentCaptor exceptionArgumentCaptor; - @Mock private OpenSearchSettings settings; - - @BeforeEach - public void setUp() { - action = - new TransportPatchDataSourceAction( - transportService, new ActionFilters(new HashSet<>()), dataSourceService); - // Required for metrics initialization - doReturn(emptyList()).when(settings).getSettings(); - when(settings.getSettingValue(Settings.Key.METRICS_ROLLING_INTERVAL)).thenReturn(3600L); - when(settings.getSettingValue(Settings.Key.METRICS_ROLLING_WINDOW)).thenReturn(600L); - LocalClusterState.state().setPluginSettings(settings); - Metrics.getInstance().registerDefaultMetrics(); - } - - @Test - public void testDoExecute() { - Map dataSourceData = new HashMap<>(); - dataSourceData.put(NAME_FIELD, "test_datasource"); - dataSourceData.put(DESCRIPTION_FIELD, "test"); - - PatchDataSourceActionRequest request = new PatchDataSourceActionRequest(dataSourceData); - - action.doExecute(task, request, actionListener); - verify(dataSourceService, times(1)).patchDataSource(dataSourceData); - Mockito.verify(actionListener) - .onResponse(patchDataSourceActionResponseArgumentCaptor.capture()); - PatchDataSourceActionResponse patchDataSourceActionResponse = - patchDataSourceActionResponseArgumentCaptor.getValue(); - String responseAsJson = "\"Updated DataSource with name test_datasource\""; - Assertions.assertEquals(responseAsJson, patchDataSourceActionResponse.getResult()); - } - - @Test - public void testDoExecuteWithException() { - Map dataSourceData = new HashMap<>(); - dataSourceData.put(NAME_FIELD, "test_datasource"); - dataSourceData.put(DESCRIPTION_FIELD, "test"); - doThrow(new RuntimeException("Error")).when(dataSourceService).patchDataSource(dataSourceData); - PatchDataSourceActionRequest request = new PatchDataSourceActionRequest(dataSourceData); - action.doExecute(task, request, actionListener); - verify(dataSourceService, times(1)).patchDataSource(dataSourceData); - Mockito.verify(actionListener).onFailure(exceptionArgumentCaptor.capture()); - Exception exception = exceptionArgumentCaptor.getValue(); - Assertions.assertTrue(exception instanceof RuntimeException); - Assertions.assertEquals("Error", exception.getMessage()); - } -} diff --git a/datasources/src/test/java/org/opensearch/sql/datasources/transport/TransportUpdateDataSourceActionTest.java b/datasources/src/test/java/org/opensearch/sql/datasources/transport/TransportUpdateDataSourceActionTest.java index 4d42cdb2fa..ffcd526f87 100644 --- a/datasources/src/test/java/org/opensearch/sql/datasources/transport/TransportUpdateDataSourceActionTest.java +++ b/datasources/src/test/java/org/opensearch/sql/datasources/transport/TransportUpdateDataSourceActionTest.java @@ -1,11 +1,8 @@ package org.opensearch.sql.datasources.transport; -import static java.util.Collections.emptyList; -import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; import java.util.HashSet; import org.junit.jupiter.api.Assertions; @@ -19,15 +16,11 @@ import org.mockito.junit.jupiter.MockitoExtension; import org.opensearch.action.support.ActionFilters; import org.opensearch.core.action.ActionListener; -import org.opensearch.sql.common.setting.Settings; import org.opensearch.sql.datasource.model.DataSourceMetadata; import org.opensearch.sql.datasource.model.DataSourceType; import org.opensearch.sql.datasources.model.transport.UpdateDataSourceActionRequest; import org.opensearch.sql.datasources.model.transport.UpdateDataSourceActionResponse; import org.opensearch.sql.datasources.service.DataSourceServiceImpl; -import org.opensearch.sql.legacy.esdomain.LocalClusterState; -import org.opensearch.sql.legacy.metrics.Metrics; -import org.opensearch.sql.opensearch.setting.OpenSearchSettings; import org.opensearch.tasks.Task; import org.opensearch.transport.TransportService; @@ -39,7 +32,6 @@ public class TransportUpdateDataSourceActionTest { @Mock private DataSourceServiceImpl dataSourceService; @Mock private Task task; @Mock private ActionListener actionListener; - @Mock private OpenSearchSettings settings; @Captor private ArgumentCaptor @@ -52,12 +44,6 @@ public void setUp() { action = new TransportUpdateDataSourceAction( transportService, new ActionFilters(new HashSet<>()), dataSourceService); - // Required for metrics initialization - doReturn(emptyList()).when(settings).getSettings(); - when(settings.getSettingValue(Settings.Key.METRICS_ROLLING_INTERVAL)).thenReturn(3600L); - when(settings.getSettingValue(Settings.Key.METRICS_ROLLING_WINDOW)).thenReturn(600L); - LocalClusterState.state().setPluginSettings(settings); - Metrics.getInstance().registerDefaultMetrics(); } @Test diff --git a/datasources/src/test/java/org/opensearch/sql/datasources/utils/XContentParserUtilsTest.java b/datasources/src/test/java/org/opensearch/sql/datasources/utils/XContentParserUtilsTest.java index 5a1f5e155f..d134293456 100644 --- a/datasources/src/test/java/org/opensearch/sql/datasources/utils/XContentParserUtilsTest.java +++ b/datasources/src/test/java/org/opensearch/sql/datasources/utils/XContentParserUtilsTest.java @@ -1,7 +1,6 @@ package org.opensearch.sql.datasources.utils; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.opensearch.sql.datasources.utils.XContentParserUtils.*; import com.google.gson.Gson; import java.util.HashMap; @@ -44,7 +43,6 @@ public void testToDataSourceMetadataFromJson() { dataSourceMetadata.setConnector(DataSourceType.PROMETHEUS); dataSourceMetadata.setAllowedRoles(List.of("prometheus_access")); dataSourceMetadata.setProperties(Map.of("prometheus.uri", "https://localhost:9090")); - dataSourceMetadata.setResultIndex("query_execution_result2"); Gson gson = new Gson(); String json = gson.toJson(dataSourceMetadata); @@ -54,46 +52,6 @@ public void testToDataSourceMetadataFromJson() { Assertions.assertEquals("prometheus_access", retrievedMetadata.getAllowedRoles().get(0)); } - @SneakyThrows - @Test - public void testToMapFromJson() { - Map dataSourceData = - Map.of( - NAME_FIELD, - "test_DS", - DESCRIPTION_FIELD, - "test", - ALLOWED_ROLES_FIELD, - List.of("all_access"), - PROPERTIES_FIELD, - Map.of("prometheus.uri", "localhost:9090"), - CONNECTOR_FIELD, - "PROMETHEUS", - RESULT_INDEX_FIELD, - ""); - - Map dataSourceDataConnectorRemoved = - Map.of( - NAME_FIELD, - "test_DS", - DESCRIPTION_FIELD, - "test", - ALLOWED_ROLES_FIELD, - List.of("all_access"), - PROPERTIES_FIELD, - Map.of("prometheus.uri", "localhost:9090"), - RESULT_INDEX_FIELD, - ""); - - Gson gson = new Gson(); - String json = gson.toJson(dataSourceData); - - Map parsedData = XContentParserUtils.toMap(json); - - Assertions.assertEquals(parsedData, dataSourceDataConnectorRemoved); - Assertions.assertEquals("test", parsedData.get(DESCRIPTION_FIELD)); - } - @SneakyThrows @Test public void testToDataSourceMetadataFromJsonWithoutName() { @@ -113,22 +71,6 @@ public void testToDataSourceMetadataFromJsonWithoutName() { Assertions.assertEquals("name and connector are required fields.", exception.getMessage()); } - @SneakyThrows - @Test - public void testToMapFromJsonWithoutName() { - Map dataSourceData = new HashMap<>(Map.of(DESCRIPTION_FIELD, "test")); - Gson gson = new Gson(); - String json = gson.toJson(dataSourceData); - - IllegalArgumentException exception = - assertThrows( - IllegalArgumentException.class, - () -> { - XContentParserUtils.toMap(json); - }); - Assertions.assertEquals("Name is a required field.", exception.getMessage()); - } - @SneakyThrows @Test public void testToDataSourceMetadataFromJsonWithoutConnector() { @@ -164,21 +106,4 @@ public void testToDataSourceMetadataFromJsonUsingUnknownObject() { }); Assertions.assertEquals("Unknown field: test", exception.getMessage()); } - - @SneakyThrows - @Test - public void testToMapFromJsonUsingUnknownObject() { - HashMap hashMap = new HashMap<>(); - hashMap.put("test", "test"); - Gson gson = new Gson(); - String json = gson.toJson(hashMap); - - IllegalArgumentException exception = - assertThrows( - IllegalArgumentException.class, - () -> { - XContentParserUtils.toMap(json); - }); - Assertions.assertEquals("Unknown field: test", exception.getMessage()); - } } diff --git a/docs/user/admin/settings.rst b/docs/user/admin/settings.rst index 7e175ae719..b5da4e28e2 100644 --- a/docs/user/admin/settings.rst +++ b/docs/user/admin/settings.rst @@ -311,110 +311,3 @@ SQL query:: "status": 400 } - -plugins.query.executionengine.spark.session.limit -================================================== - -Description ------------ - -Each cluster can have maximum 100 sessions running in parallel by default. You can increase limit by this setting. - -1. The default value is 100. -2. This setting is node scope. -3. This setting can be updated dynamically. - -You can update the setting with a new value like this. - -SQL query:: - - sh$ curl -sS -H 'Content-Type: application/json' -X PUT localhost:9200/_cluster/settings \ - ... -d '{"transient":{"plugins.query.executionengine.spark.session.limit":200}}' - { - "acknowledged": true, - "persistent": {}, - "transient": { - "plugins": { - "query": { - "executionengine": { - "spark": { - "session": { - "limit": "200" - } - } - } - } - } - } - } - - -plugins.query.executionengine.spark.refresh_job.limit -===================================================== - -Description ------------ - -Each cluster can have maximum 20 datasources. You can increase limit by this setting. - -1. The default value is 20. -2. This setting is node scope. -3. This setting can be updated dynamically. - -You can update the setting with a new value like this. - -SQL query:: - - sh$ curl -sS -H 'Content-Type: application/json' -X PUT localhost:9200/_cluster/settings \ - ... -d '{"transient":{"plugins.query.executionengine.spark.refresh_job.limit":200}}' - { - "acknowledged": true, - "persistent": {}, - "transient": { - "plugins": { - "query": { - "executionengine": { - "spark": { - "refresh_job": { - "limit": "200" - } - } - } - } - } - } - } - - -plugins.query.datasources.limit -=============================== - -Description ------------ - -Each cluster can have maximum 20 datasources. You can increase limit by this setting. - -1. The default value is 20. -2. This setting is node scope. -3. This setting can be updated dynamically. - -You can update the setting with a new value like this. - -SQL query:: - - sh$ curl -sS -H 'Content-Type: application/json' -X PUT localhost:9200/_cluster/settings \ - ... -d '{"transient":{"plugins.query.datasources.limit":25}}' - { - "acknowledged": true, - "persistent": {}, - "transient": { - "plugins": { - "query": { - "datasources": { - "limit": "25" - } - } - } - } - } - diff --git a/docs/user/interfaces/asyncqueryinterface.rst b/docs/user/interfaces/asyncqueryinterface.rst index 3fbc16d15f..a9fc77264c 100644 --- a/docs/user/interfaces/asyncqueryinterface.rst +++ b/docs/user/interfaces/asyncqueryinterface.rst @@ -62,50 +62,6 @@ Sample Response:: "queryId": "00fd796ut1a7eg0q" } -Execute query in session ------------------------- - -if plugins.query.executionengine.spark.session.enabled is set to true, session based execution is enabled. Under the hood, all queries submitted to the same session will be executed in the same SparkContext. Session is auto closed if not query submission in 10 minutes. - -Async query response include ``sessionId`` indicate the query is executed in session. - -Sample Request:: - - curl --location 'http://localhost:9200/_plugins/_async_query' \ - --header 'Content-Type: application/json' \ - --data '{ - "datasource" : "my_glue", - "lang" : "sql", - "query" : "select * from my_glue.default.http_logs limit 10" - }' - -Sample Response:: - - { - "queryId": "HlbM61kX6MDkAktO", - "sessionId": "1Giy65ZnzNlmsPAm" - } - -User could reuse the session by using ``sessionId`` query parameters. - -Sample Request:: - - curl --location 'http://localhost:9200/_plugins/_async_query' \ - --header 'Content-Type: application/json' \ - --data '{ - "datasource" : "my_glue", - "lang" : "sql", - "query" : "select * from my_glue.default.http_logs limit 10", - "sessionId" : "1Giy65ZnzNlmsPAm" - }' - -Sample Response:: - - { - "queryId": "7GC4mHhftiTejvxN", - "sessionId": "1Giy65ZnzNlmsPAm" - } - Async Query Result API ====================================== diff --git a/docs/user/ppl/admin/datasources.rst b/docs/user/ppl/admin/datasources.rst index 31378f6cc4..3682153b9d 100644 --- a/docs/user/ppl/admin/datasources.rst +++ b/docs/user/ppl/admin/datasources.rst @@ -93,19 +93,6 @@ we can remove authorization and other details in case of security disabled domai "allowedRoles" : ["prometheus_access"] } -* Datasource modification PATCH API ("_plugins/_query/_datasources") :: - - PATCH https://localhost:9200/_plugins/_query/_datasources - content-type: application/json - Authorization: Basic {{username}} {{password}} - - { - "name" : "my_prometheus", - "allowedRoles" : ["all_access"] - } - - **Name is required and must exist. Connector cannot be modified and will be ignored.** - * Datasource Read GET API("_plugins/_query/_datasources/{{dataSourceName}}" :: GET https://localhost:9200/_plugins/_query/_datasources/my_prometheus @@ -127,7 +114,6 @@ Each of the datasource configuration management apis are controlled by following * cluster:admin/opensearch/datasources/create [Create POST API] * cluster:admin/opensearch/datasources/read [Get GET API] * cluster:admin/opensearch/datasources/update [Update PUT API] -* cluster:admin/opensearch/datasources/patch [Update PATCH API] * cluster:admin/opensearch/datasources/delete [Delete DELETE API] Only users mapped with roles having above actions are authorized to execute datasource management apis. diff --git a/integ-test/build.gradle b/integ-test/build.gradle index c48d43d3e5..dd646d7a66 100644 --- a/integ-test/build.gradle +++ b/integ-test/build.gradle @@ -167,7 +167,7 @@ dependencies { testImplementation group: 'org.opensearch.client', name: 'opensearch-rest-client', version: "${opensearch_version}" testImplementation group: 'org.opensearch.driver', name: 'opensearch-sql-jdbc', version: System.getProperty("jdbcDriverVersion", '1.2.0.0') testImplementation group: 'org.hamcrest', name: 'hamcrest', version: '2.1' - implementation group: 'org.apache.logging.log4j', name: 'log4j-core', version:"${versions.log4j}" + implementation group: 'org.apache.logging.log4j', name: 'log4j-core', version:'2.20.0' testImplementation project(':opensearch-sql-plugin') testImplementation project(':legacy') testImplementation('org.junit.jupiter:junit-jupiter-api:5.6.2') diff --git a/integ-test/src/test/java/org/opensearch/sql/datasource/DataSourceAPIsIT.java b/integ-test/src/test/java/org/opensearch/sql/datasource/DataSourceAPIsIT.java index c681b58eb4..8623b9fa6f 100644 --- a/integ-test/src/test/java/org/opensearch/sql/datasource/DataSourceAPIsIT.java +++ b/integ-test/src/test/java/org/opensearch/sql/datasource/DataSourceAPIsIT.java @@ -5,8 +5,6 @@ package org.opensearch.sql.datasource; -import static org.opensearch.sql.datasources.utils.XContentParserUtils.DESCRIPTION_FIELD; -import static org.opensearch.sql.datasources.utils.XContentParserUtils.NAME_FIELD; import static org.opensearch.sql.legacy.TestUtils.getResponseBody; import com.google.common.collect.ImmutableList; @@ -17,12 +15,8 @@ import java.io.IOException; import java.lang.reflect.Type; import java.util.ArrayList; -import java.util.HashMap; import java.util.List; -import java.util.Map; import lombok.SneakyThrows; -import org.apache.commons.lang3.StringUtils; -import org.junit.After; import org.junit.AfterClass; import org.junit.Assert; import org.junit.Test; @@ -35,11 +29,6 @@ public class DataSourceAPIsIT extends PPLIntegTestCase { - @After - public void cleanUp() throws IOException { - wipeAllClusterSettings(); - } - @AfterClass protected static void deleteDataSourcesCreated() throws IOException { Request deleteRequest = getDeleteDataSourceRequest("create_prometheus"); @@ -57,10 +46,6 @@ protected static void deleteDataSourcesCreated() throws IOException { deleteRequest = getDeleteDataSourceRequest("Create_Prometheus"); deleteResponse = client().performRequest(deleteRequest); Assert.assertEquals(204, deleteResponse.getStatusLine().getStatusCode()); - - deleteRequest = getDeleteDataSourceRequest("duplicate_prometheus"); - deleteResponse = client().performRequest(deleteRequest); - Assert.assertEquals(204, deleteResponse.getStatusLine().getStatusCode()); } @SneakyThrows @@ -114,7 +99,6 @@ public void updateDataSourceAPITest() { DataSourceMetadata createDSM = new DataSourceMetadata( "update_prometheus", - StringUtils.EMPTY, DataSourceType.PROMETHEUS, ImmutableList.of(), ImmutableMap.of("prometheus.uri", "https://localhost:9090"), @@ -128,7 +112,6 @@ public void updateDataSourceAPITest() { DataSourceMetadata updateDSM = new DataSourceMetadata( "update_prometheus", - StringUtils.EMPTY, DataSourceType.PROMETHEUS, ImmutableList.of(), ImmutableMap.of("prometheus.uri", "https://randomtest.com:9090"), @@ -153,31 +136,6 @@ public void updateDataSourceAPITest() { Assert.assertEquals( "https://randomtest.com:9090", dataSourceMetadata.getProperties().get("prometheus.uri")); Assert.assertEquals("", dataSourceMetadata.getDescription()); - - // patch datasource - Map updateDS = - new HashMap<>(Map.of(NAME_FIELD, "update_prometheus", DESCRIPTION_FIELD, "test")); - Request patchRequest = getPatchDataSourceRequest(updateDS); - Response patchResponse = client().performRequest(patchRequest); - Assert.assertEquals(200, patchResponse.getStatusLine().getStatusCode()); - String patchResponseString = getResponseBody(patchResponse); - Assert.assertEquals("\"Updated DataSource with name update_prometheus\"", patchResponseString); - - // Datasource is not immediately updated. so introducing a sleep of 2s. - Thread.sleep(2000); - - // get datasource to validate the modification. - // get datasource - Request getRequestAfterPatch = getFetchDataSourceRequest("update_prometheus"); - Response getResponseAfterPatch = client().performRequest(getRequestAfterPatch); - Assert.assertEquals(200, getResponseAfterPatch.getStatusLine().getStatusCode()); - String getResponseStringAfterPatch = getResponseBody(getResponseAfterPatch); - DataSourceMetadata dataSourceMetadataAfterPatch = - new Gson().fromJson(getResponseStringAfterPatch, DataSourceMetadata.class); - Assert.assertEquals( - "https://randomtest.com:9090", - dataSourceMetadataAfterPatch.getProperties().get("prometheus.uri")); - Assert.assertEquals("test", dataSourceMetadataAfterPatch.getDescription()); } @SneakyThrows @@ -188,7 +146,6 @@ public void deleteDataSourceTest() { DataSourceMetadata createDSM = new DataSourceMetadata( "delete_prometheus", - StringUtils.EMPTY, DataSourceType.PROMETHEUS, ImmutableList.of(), ImmutableMap.of("prometheus.uri", "https://localhost:9090"), @@ -228,7 +185,6 @@ public void getAllDataSourceTest() { DataSourceMetadata createDSM = new DataSourceMetadata( "get_all_prometheus", - StringUtils.EMPTY, DataSourceType.PROMETHEUS, ImmutableList.of(), ImmutableMap.of("prometheus.uri", "https://localhost:9090"), @@ -293,45 +249,4 @@ public void issue2196() { Assert.assertNull(dataSourceMetadata.getProperties().get("prometheus.auth.password")); Assert.assertEquals("Prometheus Creation for Integ test", dataSourceMetadata.getDescription()); } - - @Test - public void datasourceLimitTest() throws InterruptedException, IOException { - DataSourceMetadata d1 = mockDataSourceMetadata("duplicate_prometheus"); - Request createRequest = getCreateDataSourceRequest(d1); - Response response = client().performRequest(createRequest); - Assert.assertEquals(201, response.getStatusLine().getStatusCode()); - // Datasource is not immediately created. so introducing a sleep of 2s. - Thread.sleep(2000); - - updateClusterSettings(new ClusterSetting(TRANSIENT, "plugins.query.datasources.limit", "1")); - - DataSourceMetadata d2 = mockDataSourceMetadata("d2"); - ResponseException exception = - Assert.assertThrows( - ResponseException.class, () -> client().performRequest(getCreateDataSourceRequest(d2))); - Assert.assertEquals(400, exception.getResponse().getStatusLine().getStatusCode()); - String prometheusGetResponseString = getResponseBody(exception.getResponse()); - JsonObject errorMessage = new Gson().fromJson(prometheusGetResponseString, JsonObject.class); - Assert.assertEquals( - "domain concurrent datasources can not exceed 1", - errorMessage.get("error").getAsJsonObject().get("details").getAsString()); - } - - public DataSourceMetadata mockDataSourceMetadata(String name) { - return new DataSourceMetadata( - name, - "Prometheus Creation for Integ test", - DataSourceType.PROMETHEUS, - ImmutableList.of(), - ImmutableMap.of( - "prometheus.uri", - "https://localhost:9090", - "prometheus.auth.type", - "basicauth", - "prometheus.auth.username", - "username", - "prometheus.auth.password", - "password"), - null); - } } diff --git a/integ-test/src/test/java/org/opensearch/sql/legacy/SQLIntegTestCase.java b/integ-test/src/test/java/org/opensearch/sql/legacy/SQLIntegTestCase.java index 303654ea37..e0d04c55b8 100644 --- a/integ-test/src/test/java/org/opensearch/sql/legacy/SQLIntegTestCase.java +++ b/integ-test/src/test/java/org/opensearch/sql/legacy/SQLIntegTestCase.java @@ -49,7 +49,6 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.util.Locale; -import java.util.Map; import javax.management.MBeanServerInvocationHandler; import javax.management.ObjectName; import javax.management.remote.JMXConnector; @@ -495,15 +494,6 @@ protected static Request getUpdateDataSourceRequest(DataSourceMetadata dataSourc return request; } - protected static Request getPatchDataSourceRequest(Map dataSourceData) { - Request request = new Request("PATCH", "/_plugins/_query/_datasources"); - request.setJsonEntity(new Gson().toJson(dataSourceData)); - RequestOptions.Builder restOptionsBuilder = RequestOptions.DEFAULT.toBuilder(); - restOptionsBuilder.addHeader("Content-Type", "application/json"); - request.setOptions(restOptionsBuilder); - return request; - } - protected static Request getFetchDataSourceRequest(String name) { Request request = new Request("GET", "/_plugins/_query/_datasources" + "/" + name); if (StringUtils.isEmpty(name)) { diff --git a/integ-test/src/test/java/org/opensearch/sql/ppl/InformationSchemaCommandIT.java b/integ-test/src/test/java/org/opensearch/sql/ppl/InformationSchemaCommandIT.java index d916bfc4db..7b694ce222 100644 --- a/integ-test/src/test/java/org/opensearch/sql/ppl/InformationSchemaCommandIT.java +++ b/integ-test/src/test/java/org/opensearch/sql/ppl/InformationSchemaCommandIT.java @@ -15,7 +15,6 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import java.io.IOException; -import org.apache.commons.lang3.StringUtils; import org.json.JSONObject; import org.junit.After; import org.junit.Assert; @@ -45,7 +44,6 @@ protected void init() throws InterruptedException, IOException { DataSourceMetadata createDSM = new DataSourceMetadata( "my_prometheus", - StringUtils.EMPTY, DataSourceType.PROMETHEUS, ImmutableList.of(), ImmutableMap.of("prometheus.uri", "http://localhost:9090"), diff --git a/integ-test/src/test/java/org/opensearch/sql/ppl/PrometheusDataSourceCommandsIT.java b/integ-test/src/test/java/org/opensearch/sql/ppl/PrometheusDataSourceCommandsIT.java index 10fe13a8db..b81b7f9517 100644 --- a/integ-test/src/test/java/org/opensearch/sql/ppl/PrometheusDataSourceCommandsIT.java +++ b/integ-test/src/test/java/org/opensearch/sql/ppl/PrometheusDataSourceCommandsIT.java @@ -56,7 +56,6 @@ protected void init() throws InterruptedException, IOException { DataSourceMetadata createDSM = new DataSourceMetadata( "my_prometheus", - StringUtils.EMPTY, DataSourceType.PROMETHEUS, ImmutableList.of(), ImmutableMap.of("prometheus.uri", "http://localhost:9090"), diff --git a/integ-test/src/test/java/org/opensearch/sql/ppl/ShowDataSourcesCommandIT.java b/integ-test/src/test/java/org/opensearch/sql/ppl/ShowDataSourcesCommandIT.java index b6a34d5c41..c3d2bf5912 100644 --- a/integ-test/src/test/java/org/opensearch/sql/ppl/ShowDataSourcesCommandIT.java +++ b/integ-test/src/test/java/org/opensearch/sql/ppl/ShowDataSourcesCommandIT.java @@ -15,7 +15,6 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import java.io.IOException; -import org.apache.commons.lang3.StringUtils; import org.json.JSONObject; import org.junit.After; import org.junit.Assert; @@ -45,7 +44,6 @@ protected void init() throws InterruptedException, IOException { DataSourceMetadata createDSM = new DataSourceMetadata( "my_prometheus", - StringUtils.EMPTY, DataSourceType.PROMETHEUS, ImmutableList.of(), ImmutableMap.of("prometheus.uri", "http://localhost:9090"), diff --git a/legacy/build.gradle b/legacy/build.gradle index 7eb5489dc2..d89f7affe7 100644 --- a/legacy/build.gradle +++ b/legacy/build.gradle @@ -89,7 +89,7 @@ dependencies { } } implementation group: 'com.google.guava', name: 'guava', version: '32.0.1-jre' - implementation group: 'org.json', name: 'json', version:'20231013' + implementation group: 'org.json', name: 'json', version:'20230227' implementation group: 'org.apache.commons', name: 'commons-lang3', version: '3.12.0' implementation group: 'org.opensearch', name: 'opensearch', version: "${opensearch_version}" // add geo module as dependency. https://github.com/opensearch-project/OpenSearch/pull/4180/. diff --git a/legacy/src/main/java/org/opensearch/sql/legacy/metrics/MetricFactory.java b/legacy/src/main/java/org/opensearch/sql/legacy/metrics/MetricFactory.java index fc243e1b50..e4fbd173c9 100644 --- a/legacy/src/main/java/org/opensearch/sql/legacy/metrics/MetricFactory.java +++ b/legacy/src/main/java/org/opensearch/sql/legacy/metrics/MetricFactory.java @@ -27,19 +27,6 @@ public static Metric createMetric(MetricName name) { case PPL_REQ_COUNT_TOTAL: case PPL_FAILED_REQ_COUNT_CUS: case PPL_FAILED_REQ_COUNT_SYS: - case DATASOURCE_CREATION_REQ_COUNT: - case DATASOURCE_GET_REQ_COUNT: - case DATASOURCE_PUT_REQ_COUNT: - case DATASOURCE_PATCH_REQ_COUNT: - case DATASOURCE_DELETE_REQ_COUNT: - case DATASOURCE_FAILED_REQ_COUNT_SYS: - case DATASOURCE_FAILED_REQ_COUNT_CUS: - case EMR_GET_JOB_RESULT_FAILURE_COUNT: - case EMR_START_JOB_REQUEST_FAILURE_COUNT: - case EMR_CANCEL_JOB_REQUEST_FAILURE_COUNT: - case EMR_BATCH_QUERY_JOBS_CREATION_COUNT: - case EMR_STREAMING_QUERY_JOBS_CREATION_COUNT: - case EMR_INTERACTIVE_QUERY_JOBS_CREATION_COUNT: return new NumericMetric<>(name.getName(), new RollingCounter()); default: return new NumericMetric<>(name.getName(), new BasicCounter()); diff --git a/legacy/src/main/java/org/opensearch/sql/legacy/metrics/MetricName.java b/legacy/src/main/java/org/opensearch/sql/legacy/metrics/MetricName.java index 0098008e57..1c895f5d69 100644 --- a/legacy/src/main/java/org/opensearch/sql/legacy/metrics/MetricName.java +++ b/legacy/src/main/java/org/opensearch/sql/legacy/metrics/MetricName.java @@ -26,19 +26,9 @@ public enum MetricName { PPL_REQ_COUNT_TOTAL("ppl_request_count"), PPL_FAILED_REQ_COUNT_SYS("ppl_failed_request_count_syserr"), PPL_FAILED_REQ_COUNT_CUS("ppl_failed_request_count_cuserr"), - DATASOURCE_CREATION_REQ_COUNT("datasource_create_request_count"), - DATASOURCE_GET_REQ_COUNT("datasource_get_request_count"), - DATASOURCE_PUT_REQ_COUNT("datasource_put_request_count"), - DATASOURCE_PATCH_REQ_COUNT("datasource_patch_request_count"), - DATASOURCE_DELETE_REQ_COUNT("datasource_delete_request_count"), + DATASOURCE_REQ_COUNT("datasource_request_count"), DATASOURCE_FAILED_REQ_COUNT_SYS("datasource_failed_request_count_syserr"), - DATASOURCE_FAILED_REQ_COUNT_CUS("datasource_failed_request_count_cuserr"), - EMR_START_JOB_REQUEST_FAILURE_COUNT("emr_start_job_request_failure_count"), - EMR_GET_JOB_RESULT_FAILURE_COUNT("emr_get_job_request_failure_count"), - EMR_CANCEL_JOB_REQUEST_FAILURE_COUNT("emr_cancel_job_request_failure_count"), - EMR_STREAMING_QUERY_JOBS_CREATION_COUNT("emr_streaming_jobs_creation_count"), - EMR_INTERACTIVE_QUERY_JOBS_CREATION_COUNT("emr_interactive_jobs_creation_count"), - EMR_BATCH_QUERY_JOBS_CREATION_COUNT("emr_batch_jobs_creation_count"); + DATASOURCE_FAILED_REQ_COUNT_CUS("datasource_failed_request_count_cuserr"); private String name; @@ -60,19 +50,6 @@ public static List getNames() { .add(PPL_REQ_COUNT_TOTAL) .add(PPL_FAILED_REQ_COUNT_SYS) .add(PPL_FAILED_REQ_COUNT_CUS) - .add(DATASOURCE_CREATION_REQ_COUNT) - .add(DATASOURCE_DELETE_REQ_COUNT) - .add(DATASOURCE_FAILED_REQ_COUNT_CUS) - .add(DATASOURCE_GET_REQ_COUNT) - .add(DATASOURCE_PATCH_REQ_COUNT) - .add(DATASOURCE_FAILED_REQ_COUNT_SYS) - .add(DATASOURCE_PUT_REQ_COUNT) - .add(EMR_GET_JOB_RESULT_FAILURE_COUNT) - .add(EMR_CANCEL_JOB_REQUEST_FAILURE_COUNT) - .add(EMR_START_JOB_REQUEST_FAILURE_COUNT) - .add(EMR_INTERACTIVE_QUERY_JOBS_CREATION_COUNT) - .add(EMR_STREAMING_QUERY_JOBS_CREATION_COUNT) - .add(EMR_BATCH_QUERY_JOBS_CREATION_COUNT) .build(); public boolean isNumerical() { diff --git a/legacy/src/main/java/org/opensearch/sql/legacy/utils/MetricUtils.java b/legacy/src/main/java/org/opensearch/sql/legacy/utils/MetricUtils.java deleted file mode 100644 index b7a56bde4f..0000000000 --- a/legacy/src/main/java/org/opensearch/sql/legacy/utils/MetricUtils.java +++ /dev/null @@ -1,22 +0,0 @@ -package org.opensearch.sql.legacy.utils; - -import lombok.experimental.UtilityClass; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.sql.legacy.metrics.MetricName; -import org.opensearch.sql.legacy.metrics.Metrics; - -/** Utility to add metrics. */ -@UtilityClass -public class MetricUtils { - - private static final Logger LOG = LogManager.getLogger(); - - public static void incrementNumericalMetric(MetricName metricName) { - try { - Metrics.getInstance().getNumericalMetric(metricName).increment(); - } catch (Throwable throwable) { - LOG.error("Error while adding metric: {}", throwable.getMessage()); - } - } -} diff --git a/opensearch/build.gradle b/opensearch/build.gradle index 2261a1b4a9..11f4a9be6b 100644 --- a/opensearch/build.gradle +++ b/opensearch/build.gradle @@ -35,7 +35,7 @@ dependencies { implementation group: 'com.fasterxml.jackson.core', name: 'jackson-core', version: "${versions.jackson}" implementation group: 'com.fasterxml.jackson.core', name: 'jackson-databind', version: "${versions.jackson_databind}" implementation group: 'com.fasterxml.jackson.dataformat', name: 'jackson-dataformat-cbor', version: "${versions.jackson}" - implementation group: 'org.json', name: 'json', version:'20231013' + implementation group: 'org.json', name: 'json', version:'20230227' compileOnly group: 'org.opensearch.client', name: 'opensearch-rest-high-level-client', version: "${opensearch_version}" implementation group: 'org.opensearch', name:'opensearch-ml-client', version: "${opensearch_build}" diff --git a/opensearch/src/main/java/org/opensearch/sql/opensearch/setting/OpenSearchSettings.java b/opensearch/src/main/java/org/opensearch/sql/opensearch/setting/OpenSearchSettings.java index 02b28d58ce..76bda07607 100644 --- a/opensearch/src/main/java/org/opensearch/sql/opensearch/setting/OpenSearchSettings.java +++ b/opensearch/src/main/java/org/opensearch/sql/opensearch/setting/OpenSearchSettings.java @@ -6,7 +6,6 @@ package org.opensearch.sql.opensearch.setting; import static org.opensearch.common.settings.Settings.EMPTY; -import static org.opensearch.common.unit.TimeValue.timeValueDays; import static org.opensearch.sql.common.setting.Settings.Key.ENCYRPTION_MASTER_KEY; import com.google.common.annotations.VisibleForTesting; @@ -26,7 +25,6 @@ import org.opensearch.common.settings.SecureSetting; import org.opensearch.common.settings.Setting; import org.opensearch.common.unit.MemorySizeValue; -import org.opensearch.common.unit.TimeValue; import org.opensearch.sql.common.setting.LegacySettings; import org.opensearch.sql.common.setting.Settings; @@ -137,48 +135,6 @@ public class OpenSearchSettings extends Settings { Setting.Property.NodeScope, Setting.Property.Dynamic); - public static final Setting SPARK_EXECUTION_SESSION_LIMIT_SETTING = - Setting.intSetting( - Key.SPARK_EXECUTION_SESSION_LIMIT.getKeyValue(), - 100, - Setting.Property.NodeScope, - Setting.Property.Dynamic); - - public static final Setting SPARK_EXECUTION_REFRESH_JOB_LIMIT_SETTING = - Setting.intSetting( - Key.SPARK_EXECUTION_REFRESH_JOB_LIMIT.getKeyValue(), - 50, - Setting.Property.NodeScope, - Setting.Property.Dynamic); - - public static final Setting SESSION_INDEX_TTL_SETTING = - Setting.positiveTimeSetting( - Key.SESSION_INDEX_TTL.getKeyValue(), - timeValueDays(14), - Setting.Property.NodeScope, - Setting.Property.Dynamic); - - public static final Setting RESULT_INDEX_TTL_SETTING = - Setting.positiveTimeSetting( - Key.RESULT_INDEX_TTL.getKeyValue(), - timeValueDays(60), - Setting.Property.NodeScope, - Setting.Property.Dynamic); - - public static final Setting AUTO_INDEX_MANAGEMENT_ENABLED_SETTING = - Setting.boolSetting( - Key.AUTO_INDEX_MANAGEMENT_ENABLED.getKeyValue(), - true, - Setting.Property.NodeScope, - Setting.Property.Dynamic); - - public static final Setting DATASOURCES_LIMIT_SETTING = - Setting.intSetting( - Key.DATASOURCES_LIMIT.getKeyValue(), - 20, - Setting.Property.NodeScope, - Setting.Property.Dynamic); - /** Construct OpenSearchSetting. The OpenSearchSetting must be singleton. */ @SuppressWarnings("unchecked") public OpenSearchSettings(ClusterSettings clusterSettings) { @@ -249,42 +205,6 @@ public OpenSearchSettings(ClusterSettings clusterSettings) { Key.SPARK_EXECUTION_ENGINE_CONFIG, SPARK_EXECUTION_ENGINE_CONFIG, new Updater(Key.SPARK_EXECUTION_ENGINE_CONFIG)); - register( - settingBuilder, - clusterSettings, - Key.SPARK_EXECUTION_SESSION_LIMIT, - SPARK_EXECUTION_SESSION_LIMIT_SETTING, - new Updater(Key.SPARK_EXECUTION_SESSION_LIMIT)); - register( - settingBuilder, - clusterSettings, - Key.SPARK_EXECUTION_REFRESH_JOB_LIMIT, - SPARK_EXECUTION_REFRESH_JOB_LIMIT_SETTING, - new Updater(Key.SPARK_EXECUTION_REFRESH_JOB_LIMIT)); - register( - settingBuilder, - clusterSettings, - Key.SESSION_INDEX_TTL, - SESSION_INDEX_TTL_SETTING, - new Updater(Key.SESSION_INDEX_TTL)); - register( - settingBuilder, - clusterSettings, - Key.RESULT_INDEX_TTL, - RESULT_INDEX_TTL_SETTING, - new Updater(Key.RESULT_INDEX_TTL)); - register( - settingBuilder, - clusterSettings, - Key.AUTO_INDEX_MANAGEMENT_ENABLED, - AUTO_INDEX_MANAGEMENT_ENABLED_SETTING, - new Updater(Key.AUTO_INDEX_MANAGEMENT_ENABLED)); - register( - settingBuilder, - clusterSettings, - Key.DATASOURCES_LIMIT, - DATASOURCES_LIMIT_SETTING, - new Updater(Key.DATASOURCES_LIMIT)); registerNonDynamicSettings( settingBuilder, clusterSettings, Key.CLUSTER_NAME, ClusterName.CLUSTER_NAME_SETTING); defaultSettings = settingBuilder.build(); @@ -350,12 +270,6 @@ public static List> pluginSettings() { .add(METRICS_ROLLING_INTERVAL_SETTING) .add(DATASOURCE_URI_HOSTS_DENY_LIST) .add(SPARK_EXECUTION_ENGINE_CONFIG) - .add(SPARK_EXECUTION_SESSION_LIMIT_SETTING) - .add(SPARK_EXECUTION_REFRESH_JOB_LIMIT_SETTING) - .add(SESSION_INDEX_TTL_SETTING) - .add(RESULT_INDEX_TTL_SETTING) - .add(AUTO_INDEX_MANAGEMENT_ENABLED_SETTING) - .add(DATASOURCES_LIMIT_SETTING) .build(); } diff --git a/plugin/src/main/java/org/opensearch/sql/plugin/SQLPlugin.java b/plugin/src/main/java/org/opensearch/sql/plugin/SQLPlugin.java index 20a6834f90..f3fd043b63 100644 --- a/plugin/src/main/java/org/opensearch/sql/plugin/SQLPlugin.java +++ b/plugin/src/main/java/org/opensearch/sql/plugin/SQLPlugin.java @@ -7,7 +7,6 @@ import static org.opensearch.sql.common.setting.Settings.Key.SPARK_EXECUTION_ENGINE_CONFIG; import static org.opensearch.sql.datasource.model.DataSourceMetadata.defaultOpenSearchDataSourceMetadata; -import static org.opensearch.sql.spark.execution.statestore.StateStore.ALL_DATASOURCE; import com.amazonaws.auth.DefaultAWSCredentialsProviderChain; import com.amazonaws.services.emrserverless.AWSEMRServerless; @@ -16,7 +15,6 @@ import com.google.common.collect.ImmutableSet; import java.security.AccessController; import java.security.PrivilegedAction; -import java.time.Clock; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -60,15 +58,20 @@ import org.opensearch.sql.datasources.auth.DataSourceUserAuthorizationHelperImpl; import org.opensearch.sql.datasources.encryptor.EncryptorImpl; import org.opensearch.sql.datasources.glue.GlueDataSourceFactory; -import org.opensearch.sql.datasources.model.transport.*; +import org.opensearch.sql.datasources.model.transport.CreateDataSourceActionResponse; +import org.opensearch.sql.datasources.model.transport.DeleteDataSourceActionResponse; +import org.opensearch.sql.datasources.model.transport.GetDataSourceActionResponse; +import org.opensearch.sql.datasources.model.transport.UpdateDataSourceActionResponse; import org.opensearch.sql.datasources.rest.RestDataSourceQueryAction; import org.opensearch.sql.datasources.service.DataSourceMetadataStorage; import org.opensearch.sql.datasources.service.DataSourceServiceImpl; import org.opensearch.sql.datasources.storage.OpenSearchDataSourceMetadataStorage; -import org.opensearch.sql.datasources.transport.*; +import org.opensearch.sql.datasources.transport.TransportCreateDataSourceAction; +import org.opensearch.sql.datasources.transport.TransportDeleteDataSourceAction; +import org.opensearch.sql.datasources.transport.TransportGetDataSourceAction; +import org.opensearch.sql.datasources.transport.TransportUpdateDataSourceAction; import org.opensearch.sql.legacy.esdomain.LocalClusterState; import org.opensearch.sql.legacy.executor.AsyncRestExecutor; -import org.opensearch.sql.legacy.metrics.GaugeMetric; import org.opensearch.sql.legacy.metrics.Metrics; import org.opensearch.sql.legacy.plugin.RestSqlAction; import org.opensearch.sql.legacy.plugin.RestSqlStatsAction; @@ -92,15 +95,11 @@ import org.opensearch.sql.spark.asyncquery.OpensearchAsyncQueryJobMetadataStorageService; import org.opensearch.sql.spark.client.EMRServerlessClient; import org.opensearch.sql.spark.client.EmrServerlessClientImpl; -import org.opensearch.sql.spark.cluster.ClusterManagerEventListener; import org.opensearch.sql.spark.config.SparkExecutionEngineConfig; import org.opensearch.sql.spark.config.SparkExecutionEngineConfigSupplier; import org.opensearch.sql.spark.config.SparkExecutionEngineConfigSupplierImpl; import org.opensearch.sql.spark.dispatcher.SparkQueryDispatcher; -import org.opensearch.sql.spark.execution.session.SessionManager; -import org.opensearch.sql.spark.execution.statestore.StateStore; import org.opensearch.sql.spark.flint.FlintIndexMetadataReaderImpl; -import org.opensearch.sql.spark.leasemanager.DefaultLeaseManager; import org.opensearch.sql.spark.response.JobExecutionResponseReader; import org.opensearch.sql.spark.rest.RestAsyncQueryManagementAction; import org.opensearch.sql.spark.storage.SparkStorageFactory; @@ -181,10 +180,6 @@ public List getRestHandlers( new ActionType<>( TransportUpdateDataSourceAction.NAME, UpdateDataSourceActionResponse::new), TransportUpdateDataSourceAction.class), - new ActionHandler<>( - new ActionType<>( - TransportPatchDataSourceAction.NAME, PatchDataSourceActionResponse::new), - TransportPatchDataSourceAction.class), new ActionHandler<>( new ActionType<>( TransportDeleteDataSourceAction.NAME, DeleteDataSourceActionResponse::new), @@ -250,18 +245,7 @@ public Collection createComponents( }); injector = modules.createInjector(); - ClusterManagerEventListener clusterManagerEventListener = - new ClusterManagerEventListener( - clusterService, - threadPool, - client, - Clock.systemUTC(), - OpenSearchSettings.SESSION_INDEX_TTL_SETTING, - OpenSearchSettings.RESULT_INDEX_TTL_SETTING, - OpenSearchSettings.AUTO_INDEX_MANAGEMENT_ENABLED_SETTING, - environment.settings()); - return ImmutableList.of( - dataSourceService, asyncQueryExecutorService, clusterManagerEventListener, pluginSettings); + return ImmutableList.of(dataSourceService, asyncQueryExecutorService); } @Override @@ -322,10 +306,8 @@ private DataSourceServiceImpl createDataSourceService() { private AsyncQueryExecutorService createAsyncQueryExecutorService( SparkExecutionEngineConfigSupplier sparkExecutionEngineConfigSupplier, SparkExecutionEngineConfig sparkExecutionEngineConfig) { - StateStore stateStore = new StateStore(client, clusterService); - registerStateStoreMetrics(stateStore); AsyncQueryJobMetadataStorageService asyncQueryJobMetadataStorageService = - new OpensearchAsyncQueryJobMetadataStorageService(stateStore); + new OpensearchAsyncQueryJobMetadataStorageService(client, clusterService); EMRServerlessClient emrServerlessClient = createEMRServerlessClient(sparkExecutionEngineConfig.getRegion()); JobExecutionResponseReader jobExecutionResponseReader = new JobExecutionResponseReader(client); @@ -336,29 +318,13 @@ private AsyncQueryExecutorService createAsyncQueryExecutorService( new DataSourceUserAuthorizationHelperImpl(client), jobExecutionResponseReader, new FlintIndexMetadataReaderImpl(client), - client, - new SessionManager(stateStore, emrServerlessClient, pluginSettings), - new DefaultLeaseManager(pluginSettings, stateStore), - stateStore); + client); return new AsyncQueryExecutorServiceImpl( asyncQueryJobMetadataStorageService, sparkQueryDispatcher, sparkExecutionEngineConfigSupplier); } - private void registerStateStoreMetrics(StateStore stateStore) { - GaugeMetric activeSessionMetric = - new GaugeMetric<>( - "active_async_query_sessions_count", - StateStore.activeSessionsCount(stateStore, ALL_DATASOURCE)); - GaugeMetric activeStatementMetric = - new GaugeMetric<>( - "active_async_query_statements_count", - StateStore.activeStatementsCount(stateStore, ALL_DATASOURCE)); - Metrics.getInstance().registerMetric(activeSessionMetric); - Metrics.getInstance().registerMetric(activeStatementMetric); - } - private EMRServerlessClient createEMRServerlessClient(String region) { return AccessController.doPrivileged( (PrivilegedAction) diff --git a/plugin/src/main/java/org/opensearch/sql/plugin/rest/RestQuerySettingsAction.java b/plugin/src/main/java/org/opensearch/sql/plugin/rest/RestQuerySettingsAction.java index f9080051b4..885c953c17 100644 --- a/plugin/src/main/java/org/opensearch/sql/plugin/rest/RestQuerySettingsAction.java +++ b/plugin/src/main/java/org/opensearch/sql/plugin/rest/RestQuerySettingsAction.java @@ -39,8 +39,6 @@ public class RestQuerySettingsAction extends BaseRestHandler { private static final String LEGACY_SQL_SETTINGS_PREFIX = "opendistro.sql."; private static final String LEGACY_PPL_SETTINGS_PREFIX = "opendistro.ppl."; private static final String LEGACY_COMMON_SETTINGS_PREFIX = "opendistro.query."; - private static final String EXECUTION_ENGINE_SETTINGS_PREFIX = "plugins.query.executionengine"; - public static final String DATASOURCES_SETTINGS_PREFIX = "plugins.query.datasources"; private static final List SETTINGS_PREFIX = ImmutableList.of( SQL_SETTINGS_PREFIX, @@ -50,9 +48,6 @@ public class RestQuerySettingsAction extends BaseRestHandler { LEGACY_PPL_SETTINGS_PREFIX, LEGACY_COMMON_SETTINGS_PREFIX); - private static final List DENY_LIST_SETTINGS_PREFIX = - ImmutableList.of(EXECUTION_ENGINE_SETTINGS_PREFIX, DATASOURCES_SETTINGS_PREFIX); - public static final String SETTINGS_API_ENDPOINT = "/_plugins/_query/settings"; public static final String LEGACY_SQL_SETTINGS_API_ENDPOINT = "/_opendistro/_sql/settings"; @@ -138,10 +133,6 @@ private Settings getAndFilterSettings(Map source) { } return true; }); - // Applying DenyList Filter. - settingsBuilder - .keys() - .removeIf(key -> DENY_LIST_SETTINGS_PREFIX.stream().anyMatch(key::startsWith)); return settingsBuilder.build(); } catch (IOException e) { throw new OpenSearchGenerationException("Failed to generate [" + source + "]", e); diff --git a/ppl/build.gradle b/ppl/build.gradle index 6d0a67c443..484934ddc3 100644 --- a/ppl/build.gradle +++ b/ppl/build.gradle @@ -48,8 +48,8 @@ dependencies { implementation "org.antlr:antlr4-runtime:4.7.1" implementation group: 'com.google.guava', name: 'guava', version: '32.0.1-jre' - api group: 'org.json', name: 'json', version: '20231013' - implementation group: 'org.apache.logging.log4j', name: 'log4j-core', version:"${versions.log4j}" + api group: 'org.json', name: 'json', version: '20230227' + implementation group: 'org.apache.logging.log4j', name: 'log4j-core', version:'2.20.0' api project(':common') api project(':core') api project(':protocol') diff --git a/prometheus/build.gradle b/prometheus/build.gradle index c2878ab1b4..f8c10c7f6b 100644 --- a/prometheus/build.gradle +++ b/prometheus/build.gradle @@ -22,7 +22,7 @@ dependencies { implementation group: 'com.fasterxml.jackson.core', name: 'jackson-core', version: "${versions.jackson}" implementation group: 'com.fasterxml.jackson.core', name: 'jackson-databind', version: "${versions.jackson_databind}" implementation group: 'com.fasterxml.jackson.dataformat', name: 'jackson-dataformat-cbor', version: "${versions.jackson}" - implementation group: 'org.json', name: 'json', version: '20231013' + implementation group: 'org.json', name: 'json', version: '20230227' testImplementation('org.junit.jupiter:junit-jupiter:5.6.2') testImplementation group: 'org.hamcrest', name: 'hamcrest-library', version: '2.1' diff --git a/spark/build.gradle b/spark/build.gradle index bed355b9d2..c06b5b6ecf 100644 --- a/spark/build.gradle +++ b/spark/build.gradle @@ -45,47 +45,22 @@ dependencies { api project(':core') implementation project(':protocol') implementation project(':datasources') - implementation project(':legacy') implementation group: 'org.opensearch', name: 'opensearch', version: "${opensearch_version}" - implementation group: 'org.json', name: 'json', version: '20231013' + implementation group: 'org.json', name: 'json', version: '20230227' api group: 'com.amazonaws', name: 'aws-java-sdk-emr', version: '1.12.545' api group: 'com.amazonaws', name: 'aws-java-sdk-emrserverless', version: '1.12.545' implementation group: 'commons-io', name: 'commons-io', version: '2.8.0' - testImplementation(platform("org.junit:junit-bom:5.6.2")) - - testImplementation('org.junit.jupiter:junit-jupiter') + testImplementation('org.junit.jupiter:junit-jupiter:5.6.2') testImplementation group: 'org.mockito', name: 'mockito-core', version: '5.2.0' testImplementation group: 'org.mockito', name: 'mockito-junit-jupiter', version: '5.2.0' - - testCompileOnly('junit:junit:4.13.1') { - exclude group: 'org.hamcrest', module: 'hamcrest-core' - } - testRuntimeOnly("org.junit.vintage:junit-vintage-engine") { - exclude group: 'org.hamcrest', module: 'hamcrest-core' - } - testRuntimeOnly("org.junit.platform:junit-platform-launcher") { - because 'allows tests to run from IDEs that bundle older version of launcher' - } - testImplementation("org.opensearch.test:framework:${opensearch_version}") - testImplementation project(':opensearch') + testImplementation 'junit:junit:4.13.1' + testImplementation "org.opensearch.test:framework:${opensearch_version}" } test { - useJUnitPlatform { - includeEngines("junit-jupiter") - } - testLogging { - events "failed" - exceptionFormat "full" - } -} -task junit4(type: Test) { - useJUnitPlatform { - includeEngines("junit-vintage") - } - systemProperty 'tests.security.manager', 'false' + useJUnitPlatform() testLogging { events "failed" exceptionFormat "full" @@ -93,8 +68,6 @@ task junit4(type: Test) { } jacocoTestReport { - dependsOn test, junit4 - executionData test, junit4 reports { html.enabled true xml.enabled true @@ -105,10 +78,9 @@ jacocoTestReport { })) } } +test.finalizedBy(project.tasks.jacocoTestReport) jacocoTestCoverageVerification { - dependsOn test, junit4 - executionData test, junit4 violationRules { rule { element = 'CLASS' @@ -120,15 +92,6 @@ jacocoTestCoverageVerification { 'org.opensearch.sql.spark.asyncquery.exceptions.*', 'org.opensearch.sql.spark.dispatcher.model.*', 'org.opensearch.sql.spark.flint.FlintIndexType', - // ignore because XContext IOException - 'org.opensearch.sql.spark.execution.statestore.StateStore', - 'org.opensearch.sql.spark.execution.session.SessionModel', - 'org.opensearch.sql.spark.execution.statement.StatementModel', - 'org.opensearch.sql.spark.flint.FlintIndexStateModel', - // TODO: add tests for purging flint indices - 'org.opensearch.sql.spark.cluster.ClusterManagerEventListener*', - 'org.opensearch.sql.spark.cluster.FlintIndexRetention', - 'org.opensearch.sql.spark.cluster.IndexCleanup' ] limit { counter = 'LINE' diff --git a/spark/src/main/antlr/FlintSparkSqlExtensions.g4 b/spark/src/main/antlr/FlintSparkSqlExtensions.g4 index e44944fcff..e8e0264f28 100644 --- a/spark/src/main/antlr/FlintSparkSqlExtensions.g4 +++ b/spark/src/main/antlr/FlintSparkSqlExtensions.g4 @@ -17,7 +17,6 @@ singleStatement statement : skippingIndexStatement | coveringIndexStatement - | materializedViewStatement ; skippingIndexStatement @@ -31,7 +30,6 @@ createSkippingIndexStatement : CREATE SKIPPING INDEX (IF NOT EXISTS)? ON tableName LEFT_PAREN indexColTypeList RIGHT_PAREN - whereClause? (WITH LEFT_PAREN propertyList RIGHT_PAREN)? ; @@ -59,7 +57,6 @@ createCoveringIndexStatement : CREATE INDEX (IF NOT EXISTS)? indexName ON tableName LEFT_PAREN indexColumns=multipartIdentifierPropertyList RIGHT_PAREN - whereClause? (WITH LEFT_PAREN propertyList RIGHT_PAREN)? ; @@ -79,52 +76,6 @@ dropCoveringIndexStatement : DROP INDEX indexName ON tableName ; -materializedViewStatement - : createMaterializedViewStatement - | refreshMaterializedViewStatement - | showMaterializedViewStatement - | describeMaterializedViewStatement - | dropMaterializedViewStatement - ; - -createMaterializedViewStatement - : CREATE MATERIALIZED VIEW (IF NOT EXISTS)? mvName=multipartIdentifier - AS query=materializedViewQuery - (WITH LEFT_PAREN propertyList RIGHT_PAREN)? - ; - -refreshMaterializedViewStatement - : REFRESH MATERIALIZED VIEW mvName=multipartIdentifier - ; - -showMaterializedViewStatement - : SHOW MATERIALIZED (VIEW | VIEWS) IN catalogDb=multipartIdentifier - ; - -describeMaterializedViewStatement - : (DESC | DESCRIBE) MATERIALIZED VIEW mvName=multipartIdentifier - ; - -dropMaterializedViewStatement - : DROP MATERIALIZED VIEW mvName=multipartIdentifier - ; - -/* - * Match all remaining tokens in non-greedy way - * so WITH clause won't be captured by this rule. - */ -materializedViewQuery - : .+? - ; - -whereClause - : WHERE filterCondition - ; - -filterCondition - : .+? - ; - indexColTypeList : indexColType (COMMA indexColType)* ; diff --git a/spark/src/main/antlr/SparkSqlBase.g4 b/spark/src/main/antlr/SparkSqlBase.g4 index 597a1e5856..4ac1ced5c4 100644 --- a/spark/src/main/antlr/SparkSqlBase.g4 +++ b/spark/src/main/antlr/SparkSqlBase.g4 @@ -154,7 +154,6 @@ COMMA: ','; DOT: '.'; -AS: 'AS'; CREATE: 'CREATE'; DESC: 'DESC'; DESCRIBE: 'DESCRIBE'; @@ -162,19 +161,14 @@ DROP: 'DROP'; EXISTS: 'EXISTS'; FALSE: 'FALSE'; IF: 'IF'; -IN: 'IN'; INDEX: 'INDEX'; INDEXES: 'INDEXES'; -MATERIALIZED: 'MATERIALIZED'; NOT: 'NOT'; ON: 'ON'; PARTITION: 'PARTITION'; REFRESH: 'REFRESH'; SHOW: 'SHOW'; TRUE: 'TRUE'; -VIEW: 'VIEW'; -VIEWS: 'VIEWS'; -WHERE: 'WHERE'; WITH: 'WITH'; diff --git a/spark/src/main/antlr/SqlBaseLexer.g4 b/spark/src/main/antlr/SqlBaseLexer.g4 index e8b5cb012f..d9128de0f5 100644 --- a/spark/src/main/antlr/SqlBaseLexer.g4 +++ b/spark/src/main/antlr/SqlBaseLexer.g4 @@ -447,7 +447,6 @@ PIPE: '|'; CONCAT_PIPE: '||'; HAT: '^'; COLON: ':'; -DOUBLE_COLON: '::'; ARROW: '->'; FAT_ARROW : '=>'; HENT_START: '/*+'; diff --git a/spark/src/main/antlr/SqlBaseParser.g4 b/spark/src/main/antlr/SqlBaseParser.g4 index 84a31dafed..6a6d39e96c 100644 --- a/spark/src/main/antlr/SqlBaseParser.g4 +++ b/spark/src/main/antlr/SqlBaseParser.g4 @@ -957,7 +957,6 @@ primaryExpression | CASE whenClause+ (ELSE elseExpression=expression)? END #searchedCase | CASE value=expression whenClause+ (ELSE elseExpression=expression)? END #simpleCase | name=(CAST | TRY_CAST) LEFT_PAREN expression AS dataType RIGHT_PAREN #cast - | primaryExpression DOUBLE_COLON dataType #castByColon | STRUCT LEFT_PAREN (argument+=namedExpression (COMMA argument+=namedExpression)*)? RIGHT_PAREN #struct | FIRST LEFT_PAREN expression (IGNORE NULLS)? RIGHT_PAREN #first | ANY_VALUE LEFT_PAREN expression (IGNORE NULLS)? RIGHT_PAREN #any_value @@ -968,6 +967,7 @@ primaryExpression | qualifiedName DOT ASTERISK #star | LEFT_PAREN namedExpression (COMMA namedExpression)+ RIGHT_PAREN #rowConstructor | LEFT_PAREN query RIGHT_PAREN #subqueryExpression + | IDENTIFIER_KW LEFT_PAREN expression RIGHT_PAREN #identifierClause | functionName LEFT_PAREN (setQuantifier? argument+=functionArgument (COMMA argument+=functionArgument)*)? RIGHT_PAREN (FILTER LEFT_PAREN WHERE where=booleanExpression RIGHT_PAREN)? @@ -1196,7 +1196,6 @@ qualifiedNameList functionName : IDENTIFIER_KW LEFT_PAREN expression RIGHT_PAREN - | identFunc=IDENTIFIER_KW // IDENTIFIER itself is also a valid function name. | qualifiedName | FILTER | LEFT diff --git a/spark/src/main/java/org/opensearch/sql/spark/asyncquery/AsyncQueryExecutorServiceImpl.java b/spark/src/main/java/org/opensearch/sql/spark/asyncquery/AsyncQueryExecutorServiceImpl.java index 1c0979dffb..13db103f4b 100644 --- a/spark/src/main/java/org/opensearch/sql/spark/asyncquery/AsyncQueryExecutorServiceImpl.java +++ b/spark/src/main/java/org/opensearch/sql/spark/asyncquery/AsyncQueryExecutorServiceImpl.java @@ -65,17 +65,14 @@ public CreateAsyncQueryResponse createAsyncQuery( createAsyncQueryRequest.getLang(), sparkExecutionEngineConfig.getExecutionRoleARN(), sparkExecutionEngineConfig.getClusterName(), - sparkExecutionEngineConfig.getSparkSubmitParameters(), - createAsyncQueryRequest.getSessionId())); + sparkExecutionEngineConfig.getSparkSubmitParameters())); asyncQueryJobMetadataStorageService.storeJobMetadata( new AsyncQueryJobMetadata( - dispatchQueryResponse.getQueryId(), sparkExecutionEngineConfig.getApplicationId(), dispatchQueryResponse.getJobId(), - dispatchQueryResponse.getResultIndex(), - dispatchQueryResponse.getSessionId())); - return new CreateAsyncQueryResponse( - dispatchQueryResponse.getQueryId().getId(), dispatchQueryResponse.getSessionId()); + dispatchQueryResponse.isDropIndexQuery(), + dispatchQueryResponse.getResultIndex())); + return new CreateAsyncQueryResponse(dispatchQueryResponse.getJobId()); } @Override @@ -84,7 +81,6 @@ public AsyncQueryExecutionResponse getAsyncQueryResults(String queryId) { Optional jobMetadata = asyncQueryJobMetadataStorageService.getJobMetadata(queryId); if (jobMetadata.isPresent()) { - String sessionId = jobMetadata.get().getSessionId(); JSONObject jsonObject = sparkQueryDispatcher.getQueryResponse(jobMetadata.get()); if (JobRunState.SUCCESS.toString().equals(jsonObject.getString(STATUS_FIELD))) { DefaultSparkSqlFunctionResponseHandle sparkSqlFunctionResponseHandle = @@ -94,18 +90,13 @@ public AsyncQueryExecutionResponse getAsyncQueryResults(String queryId) { result.add(sparkSqlFunctionResponseHandle.next()); } return new AsyncQueryExecutionResponse( - JobRunState.SUCCESS.toString(), - sparkSqlFunctionResponseHandle.schema(), - result, - null, - sessionId); + JobRunState.SUCCESS.toString(), sparkSqlFunctionResponseHandle.schema(), result, null); } else { return new AsyncQueryExecutionResponse( jsonObject.optString(STATUS_FIELD, JobRunState.FAILED.toString()), null, null, - jsonObject.optString(ERROR_FIELD, ""), - sessionId); + jsonObject.optString(ERROR_FIELD, "")); } } throw new AsyncQueryNotFoundException(String.format("QueryId: %s not found", queryId)); diff --git a/spark/src/main/java/org/opensearch/sql/spark/asyncquery/OpensearchAsyncQueryJobMetadataStorageService.java b/spark/src/main/java/org/opensearch/sql/spark/asyncquery/OpensearchAsyncQueryJobMetadataStorageService.java index 6de8c35f03..a95a6ffe45 100644 --- a/spark/src/main/java/org/opensearch/sql/spark/asyncquery/OpensearchAsyncQueryJobMetadataStorageService.java +++ b/spark/src/main/java/org/opensearch/sql/spark/asyncquery/OpensearchAsyncQueryJobMetadataStorageService.java @@ -7,31 +7,166 @@ package org.opensearch.sql.spark.asyncquery; -import static org.opensearch.sql.spark.execution.statestore.StateStore.createJobMetaData; - +import java.io.IOException; +import java.io.InputStream; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; import java.util.Optional; -import lombok.RequiredArgsConstructor; -import org.opensearch.sql.spark.asyncquery.model.AsyncQueryId; +import org.apache.commons.io.IOUtils; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.DocWriteRequest; +import org.opensearch.action.DocWriteResponse; +import org.opensearch.action.admin.indices.create.CreateIndexRequest; +import org.opensearch.action.admin.indices.create.CreateIndexResponse; +import org.opensearch.action.index.IndexRequest; +import org.opensearch.action.index.IndexResponse; +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.support.WriteRequest; +import org.opensearch.client.Client; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.action.ActionFuture; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.search.SearchHit; +import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.sql.spark.asyncquery.model.AsyncQueryJobMetadata; -import org.opensearch.sql.spark.execution.statestore.StateStore; /** Opensearch implementation of {@link AsyncQueryJobMetadataStorageService} */ -@RequiredArgsConstructor public class OpensearchAsyncQueryJobMetadataStorageService implements AsyncQueryJobMetadataStorageService { - private final StateStore stateStore; + public static final String JOB_METADATA_INDEX = ".ql-job-metadata"; + private static final String JOB_METADATA_INDEX_MAPPING_FILE_NAME = + "job-metadata-index-mapping.yml"; + private static final String JOB_METADATA_INDEX_SETTINGS_FILE_NAME = + "job-metadata-index-settings.yml"; + private static final Logger LOG = LogManager.getLogger(); + private final Client client; + private final ClusterService clusterService; + + /** + * This class implements JobMetadataStorageService interface using OpenSearch as underlying + * storage. + * + * @param client opensearch NodeClient. + * @param clusterService ClusterService. + */ + public OpensearchAsyncQueryJobMetadataStorageService( + Client client, ClusterService clusterService) { + this.client = client; + this.clusterService = clusterService; + } @Override public void storeJobMetadata(AsyncQueryJobMetadata asyncQueryJobMetadata) { - AsyncQueryId queryId = asyncQueryJobMetadata.getQueryId(); - createJobMetaData(stateStore, queryId.getDataSourceName()).apply(asyncQueryJobMetadata); + if (!this.clusterService.state().routingTable().hasIndex(JOB_METADATA_INDEX)) { + createJobMetadataIndex(); + } + IndexRequest indexRequest = new IndexRequest(JOB_METADATA_INDEX); + indexRequest.id(asyncQueryJobMetadata.getJobId()); + indexRequest.opType(DocWriteRequest.OpType.CREATE); + indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + ActionFuture indexResponseActionFuture; + IndexResponse indexResponse; + try (ThreadContext.StoredContext storedContext = + client.threadPool().getThreadContext().stashContext()) { + indexRequest.source(AsyncQueryJobMetadata.convertToXContent(asyncQueryJobMetadata)); + indexResponseActionFuture = client.index(indexRequest); + indexResponse = indexResponseActionFuture.actionGet(); + } catch (Exception e) { + throw new RuntimeException(e); + } + + if (indexResponse.getResult().equals(DocWriteResponse.Result.CREATED)) { + LOG.debug("JobMetadata : {} successfully created", asyncQueryJobMetadata.getJobId()); + } else { + throw new RuntimeException( + "Saving job metadata information failed with result : " + + indexResponse.getResult().getLowercase()); + } } @Override - public Optional getJobMetadata(String qid) { - AsyncQueryId queryId = new AsyncQueryId(qid); - return StateStore.getJobMetaData(stateStore, queryId.getDataSourceName()) - .apply(queryId.docId()); + public Optional getJobMetadata(String jobId) { + if (!this.clusterService.state().routingTable().hasIndex(JOB_METADATA_INDEX)) { + createJobMetadataIndex(); + return Optional.empty(); + } + return searchInJobMetadataIndex(QueryBuilders.termQuery("jobId.keyword", jobId)).stream() + .findFirst(); + } + + private void createJobMetadataIndex() { + try { + InputStream mappingFileStream = + OpensearchAsyncQueryJobMetadataStorageService.class + .getClassLoader() + .getResourceAsStream(JOB_METADATA_INDEX_MAPPING_FILE_NAME); + InputStream settingsFileStream = + OpensearchAsyncQueryJobMetadataStorageService.class + .getClassLoader() + .getResourceAsStream(JOB_METADATA_INDEX_SETTINGS_FILE_NAME); + CreateIndexRequest createIndexRequest = new CreateIndexRequest(JOB_METADATA_INDEX); + createIndexRequest + .mapping(IOUtils.toString(mappingFileStream, StandardCharsets.UTF_8), XContentType.YAML) + .settings( + IOUtils.toString(settingsFileStream, StandardCharsets.UTF_8), XContentType.YAML); + ActionFuture createIndexResponseActionFuture; + try (ThreadContext.StoredContext ignored = + client.threadPool().getThreadContext().stashContext()) { + createIndexResponseActionFuture = client.admin().indices().create(createIndexRequest); + } + CreateIndexResponse createIndexResponse = createIndexResponseActionFuture.actionGet(); + if (createIndexResponse.isAcknowledged()) { + LOG.info("Index: {} creation Acknowledged", JOB_METADATA_INDEX); + } else { + throw new RuntimeException("Index creation is not acknowledged."); + } + } catch (Throwable e) { + throw new RuntimeException( + "Internal server error while creating" + + JOB_METADATA_INDEX + + " index:: " + + e.getMessage()); + } + } + + private List searchInJobMetadataIndex(QueryBuilder query) { + SearchRequest searchRequest = new SearchRequest(); + searchRequest.indices(JOB_METADATA_INDEX); + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); + searchSourceBuilder.query(query); + searchSourceBuilder.size(1); + searchRequest.source(searchSourceBuilder); + // https://github.com/opensearch-project/sql/issues/1801. + searchRequest.preference("_primary_first"); + ActionFuture searchResponseActionFuture; + try (ThreadContext.StoredContext ignored = + client.threadPool().getThreadContext().stashContext()) { + searchResponseActionFuture = client.search(searchRequest); + } + SearchResponse searchResponse = searchResponseActionFuture.actionGet(); + if (searchResponse.status().getStatus() != 200) { + throw new RuntimeException( + "Fetching job metadata information failed with status : " + searchResponse.status()); + } else { + List list = new ArrayList<>(); + for (SearchHit searchHit : searchResponse.getHits().getHits()) { + String sourceAsString = searchHit.getSourceAsString(); + AsyncQueryJobMetadata asyncQueryJobMetadata; + try { + asyncQueryJobMetadata = AsyncQueryJobMetadata.toJobMetadata(sourceAsString); + } catch (IOException e) { + throw new RuntimeException(e); + } + list.add(asyncQueryJobMetadata); + } + return list; + } } } diff --git a/spark/src/main/java/org/opensearch/sql/spark/asyncquery/model/AsyncQueryExecutionResponse.java b/spark/src/main/java/org/opensearch/sql/spark/asyncquery/model/AsyncQueryExecutionResponse.java index e5d9cffd5f..d2e54af004 100644 --- a/spark/src/main/java/org/opensearch/sql/spark/asyncquery/model/AsyncQueryExecutionResponse.java +++ b/spark/src/main/java/org/opensearch/sql/spark/asyncquery/model/AsyncQueryExecutionResponse.java @@ -19,5 +19,4 @@ public class AsyncQueryExecutionResponse { private final ExecutionEngine.Schema schema; private final List results; private final String error; - private final String sessionId; } diff --git a/spark/src/main/java/org/opensearch/sql/spark/asyncquery/model/AsyncQueryId.java b/spark/src/main/java/org/opensearch/sql/spark/asyncquery/model/AsyncQueryId.java deleted file mode 100644 index b99ebe0e8c..0000000000 --- a/spark/src/main/java/org/opensearch/sql/spark/asyncquery/model/AsyncQueryId.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.asyncquery.model; - -import static org.opensearch.sql.spark.utils.IDUtils.decode; -import static org.opensearch.sql.spark.utils.IDUtils.encode; - -import lombok.Data; - -/** Async query id. */ -@Data -public class AsyncQueryId { - private final String id; - - public static AsyncQueryId newAsyncQueryId(String datasourceName) { - return new AsyncQueryId(encode(datasourceName)); - } - - public String getDataSourceName() { - return decode(id); - } - - /** OpenSearch DocId. */ - public String docId() { - return "qid" + id; - } - - @Override - public String toString() { - return "asyncQueryId=" + id; - } -} diff --git a/spark/src/main/java/org/opensearch/sql/spark/asyncquery/model/AsyncQueryJobMetadata.java b/spark/src/main/java/org/opensearch/sql/spark/asyncquery/model/AsyncQueryJobMetadata.java index d1357f364d..b470ef989f 100644 --- a/spark/src/main/java/org/opensearch/sql/spark/asyncquery/model/AsyncQueryJobMetadata.java +++ b/spark/src/main/java/org/opensearch/sql/spark/asyncquery/model/AsyncQueryJobMetadata.java @@ -8,77 +8,34 @@ package org.opensearch.sql.spark.asyncquery.model; import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; -import static org.opensearch.sql.spark.execution.statement.StatementModel.QUERY_ID; import com.google.gson.Gson; import java.io.IOException; +import lombok.AllArgsConstructor; import lombok.Data; import lombok.EqualsAndHashCode; -import lombok.SneakyThrows; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.core.xcontent.DeprecationHandler; +import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.index.seqno.SequenceNumbers; -import org.opensearch.sql.spark.execution.statestore.StateModel; /** This class models all the metadata required for a job. */ @Data -@EqualsAndHashCode(callSuper = false) -public class AsyncQueryJobMetadata extends StateModel { - public static final String TYPE_JOBMETA = "jobmeta"; +@AllArgsConstructor +@EqualsAndHashCode +public class AsyncQueryJobMetadata { + private String applicationId; + private String jobId; + private boolean isDropIndexQuery; + private String resultIndex; - private final AsyncQueryId queryId; - private final String applicationId; - private final String jobId; - private final String resultIndex; - // optional sessionId. - private final String sessionId; - - @EqualsAndHashCode.Exclude private final long seqNo; - @EqualsAndHashCode.Exclude private final long primaryTerm; - - public AsyncQueryJobMetadata( - AsyncQueryId queryId, String applicationId, String jobId, String resultIndex) { - this( - queryId, - applicationId, - jobId, - resultIndex, - null, - SequenceNumbers.UNASSIGNED_SEQ_NO, - SequenceNumbers.UNASSIGNED_PRIMARY_TERM); - } - - public AsyncQueryJobMetadata( - AsyncQueryId queryId, - String applicationId, - String jobId, - String resultIndex, - String sessionId) { - this( - queryId, - applicationId, - jobId, - resultIndex, - sessionId, - SequenceNumbers.UNASSIGNED_SEQ_NO, - SequenceNumbers.UNASSIGNED_PRIMARY_TERM); - } - - public AsyncQueryJobMetadata( - AsyncQueryId queryId, - String applicationId, - String jobId, - String resultIndex, - String sessionId, - long seqNo, - long primaryTerm) { - this.queryId = queryId; + public AsyncQueryJobMetadata(String applicationId, String jobId, String resultIndex) { this.applicationId = applicationId; this.jobId = jobId; + this.isDropIndexQuery = false; this.resultIndex = resultIndex; - this.sessionId = sessionId; - this.seqNo = seqNo; - this.primaryTerm = primaryTerm; } @Override @@ -89,34 +46,38 @@ public String toString() { /** * Converts JobMetadata to XContentBuilder. * + * @param metadata metadata. * @return XContentBuilder {@link XContentBuilder} * @throws Exception Exception. */ - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder - .startObject() - .field(QUERY_ID, queryId.getId()) - .field("type", TYPE_JOBMETA) - .field("jobId", jobId) - .field("applicationId", applicationId) - .field("resultIndex", resultIndex) - .field("sessionId", sessionId) - .endObject(); + public static XContentBuilder convertToXContent(AsyncQueryJobMetadata metadata) throws Exception { + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + builder.field("jobId", metadata.getJobId()); + builder.field("applicationId", metadata.getApplicationId()); + builder.field("isDropIndexQuery", metadata.isDropIndexQuery()); + builder.field("resultIndex", metadata.getResultIndex()); + builder.endObject(); return builder; } - /** copy builder. update seqNo and primaryTerm */ - public static AsyncQueryJobMetadata copy( - AsyncQueryJobMetadata copy, long seqNo, long primaryTerm) { - return new AsyncQueryJobMetadata( - copy.getQueryId(), - copy.getApplicationId(), - copy.getJobId(), - copy.getResultIndex(), - copy.getSessionId(), - seqNo, - primaryTerm); + /** + * Converts json string to DataSourceMetadata. + * + * @param json jsonstring. + * @return jobmetadata {@link AsyncQueryJobMetadata} + * @throws java.io.IOException IOException. + */ + public static AsyncQueryJobMetadata toJobMetadata(String json) throws IOException { + try (XContentParser parser = + XContentType.JSON + .xContent() + .createParser( + NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + json)) { + return toJobMetadata(parser); + } } /** @@ -126,23 +87,16 @@ public static AsyncQueryJobMetadata copy( * @return JobMetadata {@link AsyncQueryJobMetadata} * @throws IOException IOException. */ - @SneakyThrows - public static AsyncQueryJobMetadata fromXContent( - XContentParser parser, long seqNo, long primaryTerm) { - AsyncQueryId queryId = null; + public static AsyncQueryJobMetadata toJobMetadata(XContentParser parser) throws IOException { String jobId = null; String applicationId = null; boolean isDropIndexQuery = false; String resultIndex = null; - String sessionId = null; - ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser); - while (!XContentParser.Token.END_OBJECT.equals(parser.nextToken())) { + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); + while (parser.nextToken() != XContentParser.Token.END_OBJECT) { String fieldName = parser.currentName(); parser.nextToken(); switch (fieldName) { - case QUERY_ID: - queryId = new AsyncQueryId(parser.textOrNull()); - break; case "jobId": jobId = parser.textOrNull(); break; @@ -155,11 +109,6 @@ public static AsyncQueryJobMetadata fromXContent( case "resultIndex": resultIndex = parser.textOrNull(); break; - case "sessionId": - sessionId = parser.textOrNull(); - break; - case "type": - break; default: throw new IllegalArgumentException("Unknown field: " + fieldName); } @@ -167,12 +116,6 @@ public static AsyncQueryJobMetadata fromXContent( if (jobId == null || applicationId == null) { throw new IllegalArgumentException("jobId and applicationId are required fields."); } - return new AsyncQueryJobMetadata( - queryId, applicationId, jobId, resultIndex, sessionId, seqNo, primaryTerm); - } - - @Override - public String getId() { - return queryId.docId(); + return new AsyncQueryJobMetadata(applicationId, jobId, isDropIndexQuery, resultIndex); } } diff --git a/spark/src/main/java/org/opensearch/sql/spark/asyncquery/model/SparkSubmitParameters.java b/spark/src/main/java/org/opensearch/sql/spark/asyncquery/model/SparkSubmitParameters.java index 9a73b0f364..0609d8903c 100644 --- a/spark/src/main/java/org/opensearch/sql/spark/asyncquery/model/SparkSubmitParameters.java +++ b/spark/src/main/java/org/opensearch/sql/spark/asyncquery/model/SparkSubmitParameters.java @@ -12,7 +12,6 @@ import static org.opensearch.sql.datasources.glue.GlueDataSourceFactory.GLUE_INDEX_STORE_OPENSEARCH_URI; import static org.opensearch.sql.datasources.glue.GlueDataSourceFactory.GLUE_ROLE_ARN; import static org.opensearch.sql.spark.data.constants.SparkConstants.*; -import static org.opensearch.sql.spark.execution.statestore.StateStore.DATASOURCE_TO_REQUEST_INDEX; import java.net.URI; import java.net.URISyntaxException; @@ -31,7 +30,6 @@ public class SparkSubmitParameters { public static final String SPACE = " "; public static final String EQUALS = "="; - public static final String FLINT_BASIC_AUTH = "basic"; private final String className; private final Map config; @@ -41,7 +39,7 @@ public class SparkSubmitParameters { public static class Builder { - private String className; + private final String className; private final Map config; private String extraParameters; @@ -72,11 +70,6 @@ public static Builder builder() { return new Builder(); } - public Builder className(String className) { - this.className = className; - return this; - } - public Builder dataSource(DataSourceMetadata metadata) { if (DataSourceType.S3GLUE.equals(metadata.getConnector())) { String roleArn = metadata.getProperties().get(GLUE_ROLE_ARN); @@ -115,7 +108,7 @@ private void setFlintIndexStoreAuthProperties( Supplier password, Supplier region) { if (AuthenticationType.get(authType).equals(AuthenticationType.BASICAUTH)) { - config.put(FLINT_INDEX_STORE_AUTH_KEY, FLINT_BASIC_AUTH); + config.put(FLINT_INDEX_STORE_AUTH_KEY, authType); config.put(FLINT_INDEX_STORE_AUTH_USERNAME, userName.get()); config.put(FLINT_INDEX_STORE_AUTH_PASSWORD, password.get()); } else if (AuthenticationType.get(authType).equals(AuthenticationType.AWSSIGV4AUTH)) { @@ -148,12 +141,6 @@ public Builder extraParameters(String params) { return this; } - public Builder sessionExecution(String sessionId, String datasourceName) { - config.put(FLINT_JOB_REQUEST_INDEX, DATASOURCE_TO_REQUEST_INDEX.apply(datasourceName)); - config.put(FLINT_JOB_SESSION_ID, sessionId); - return this; - } - public SparkSubmitParameters build() { return new SparkSubmitParameters(className, config, extraParameters); } diff --git a/spark/src/main/java/org/opensearch/sql/spark/client/EmrClientImpl.java b/spark/src/main/java/org/opensearch/sql/spark/client/EmrClientImpl.java index 87f35bbc1e..4e66cd9a00 100644 --- a/spark/src/main/java/org/opensearch/sql/spark/client/EmrClientImpl.java +++ b/spark/src/main/java/org/opensearch/sql/spark/client/EmrClientImpl.java @@ -5,7 +5,7 @@ package org.opensearch.sql.spark.client; -import static org.opensearch.sql.datasource.model.DataSourceMetadata.DEFAULT_RESULT_INDEX; +import static org.opensearch.sql.spark.data.constants.SparkConstants.SPARK_RESPONSE_BUFFER_INDEX_NAME; import static org.opensearch.sql.spark.data.constants.SparkConstants.SPARK_SQL_APPLICATION_JAR; import com.amazonaws.services.elasticmapreduce.AmazonElasticMapReduce; @@ -74,7 +74,7 @@ void runEmrApplication(String query) { flint.getFlintIntegrationJar(), sparkApplicationJar, query, - DEFAULT_RESULT_INDEX, + SPARK_RESPONSE_BUFFER_INDEX_NAME, flint.getFlintHost(), flint.getFlintPort(), flint.getFlintScheme(), diff --git a/spark/src/main/java/org/opensearch/sql/spark/client/EmrServerlessClientImpl.java b/spark/src/main/java/org/opensearch/sql/spark/client/EmrServerlessClientImpl.java index d7f558a020..335f3b6fc8 100644 --- a/spark/src/main/java/org/opensearch/sql/spark/client/EmrServerlessClientImpl.java +++ b/spark/src/main/java/org/opensearch/sql/spark/client/EmrServerlessClientImpl.java @@ -5,7 +5,7 @@ package org.opensearch.sql.spark.client; -import static org.opensearch.sql.datasource.model.DataSourceMetadata.DEFAULT_RESULT_INDEX; +import static org.opensearch.sql.spark.data.constants.SparkConstants.SPARK_RESPONSE_BUFFER_INDEX_NAME; import static org.opensearch.sql.spark.data.constants.SparkConstants.SPARK_SQL_APPLICATION_JAR; import com.amazonaws.services.emrserverless.AWSEMRServerless; @@ -22,8 +22,6 @@ import java.security.PrivilegedAction; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.sql.legacy.metrics.MetricName; -import org.opensearch.sql.legacy.utils.MetricUtils; public class EmrServerlessClientImpl implements EMRServerlessClient { @@ -38,7 +36,7 @@ public EmrServerlessClientImpl(AWSEMRServerless emrServerless) { public String startJobRun(StartJobRequest startJobRequest) { String resultIndex = startJobRequest.getResultIndex() == null - ? DEFAULT_RESULT_INDEX + ? SPARK_RESPONSE_BUFFER_INDEX_NAME : startJobRequest.getResultIndex(); StartJobRunRequest request = new StartJobRunRequest() @@ -54,19 +52,9 @@ public String startJobRun(StartJobRequest startJobRequest) { .withEntryPoint(SPARK_SQL_APPLICATION_JAR) .withEntryPointArguments(startJobRequest.getQuery(), resultIndex) .withSparkSubmitParameters(startJobRequest.getSparkSubmitParams()))); - StartJobRunResult startJobRunResult = AccessController.doPrivileged( - (PrivilegedAction) - () -> { - try { - return emrServerless.startJobRun(request); - } catch (Throwable t) { - MetricUtils.incrementNumericalMetric( - MetricName.EMR_START_JOB_REQUEST_FAILURE_COUNT); - throw t; - } - }); + (PrivilegedAction) () -> emrServerless.startJobRun(request)); logger.info("Job Run ID: " + startJobRunResult.getJobRunId()); return startJobRunResult.getJobRunId(); } @@ -77,16 +65,7 @@ public GetJobRunResult getJobRunResult(String applicationId, String jobId) { new GetJobRunRequest().withApplicationId(applicationId).withJobRunId(jobId); GetJobRunResult getJobRunResult = AccessController.doPrivileged( - (PrivilegedAction) - () -> { - try { - return emrServerless.getJobRun(request); - } catch (Throwable t) { - MetricUtils.incrementNumericalMetric( - MetricName.EMR_GET_JOB_RESULT_FAILURE_COUNT); - throw t; - } - }); + (PrivilegedAction) () -> emrServerless.getJobRun(request)); logger.info("Job Run state: " + getJobRunResult.getJobRun().getState()); return getJobRunResult; } @@ -99,15 +78,7 @@ public CancelJobRunResult cancelJobRun(String applicationId, String jobId) { CancelJobRunResult cancelJobRunResult = AccessController.doPrivileged( (PrivilegedAction) - () -> { - try { - return emrServerless.cancelJobRun(cancelJobRunRequest); - } catch (Throwable t) { - MetricUtils.incrementNumericalMetric( - MetricName.EMR_CANCEL_JOB_REQUEST_FAILURE_COUNT); - throw t; - } - }); + () -> emrServerless.cancelJobRun(cancelJobRunRequest)); logger.info(String.format("Job : %s cancelled", cancelJobRunResult.getJobRunId())); return cancelJobRunResult; } catch (ValidationException e) { diff --git a/spark/src/main/java/org/opensearch/sql/spark/client/StartJobRequest.java b/spark/src/main/java/org/opensearch/sql/spark/client/StartJobRequest.java index f57c8facee..c4382239a1 100644 --- a/spark/src/main/java/org/opensearch/sql/spark/client/StartJobRequest.java +++ b/spark/src/main/java/org/opensearch/sql/spark/client/StartJobRequest.java @@ -7,14 +7,12 @@ import java.util.Map; import lombok.Data; -import lombok.EqualsAndHashCode; /** * This POJO carries all the fields required for emr serverless job submission. Used as model in * {@link EMRServerlessClient} interface. */ @Data -@EqualsAndHashCode public class StartJobRequest { public static final Long DEFAULT_JOB_TIMEOUT = 120L; diff --git a/spark/src/main/java/org/opensearch/sql/spark/cluster/ClusterManagerEventListener.java b/spark/src/main/java/org/opensearch/sql/spark/cluster/ClusterManagerEventListener.java deleted file mode 100644 index 3d004b548f..0000000000 --- a/spark/src/main/java/org/opensearch/sql/spark/cluster/ClusterManagerEventListener.java +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.cluster; - -import static org.opensearch.sql.spark.data.constants.SparkConstants.SPARK_REQUEST_BUFFER_INDEX_NAME; - -import com.google.common.annotations.VisibleForTesting; -import java.time.Clock; -import java.time.Duration; -import java.util.Arrays; -import java.util.List; -import org.opensearch.client.Client; -import org.opensearch.cluster.LocalNodeClusterManagerListener; -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.lifecycle.LifecycleListener; -import org.opensearch.common.settings.Setting; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.sql.datasource.model.DataSourceMetadata; -import org.opensearch.threadpool.Scheduler.Cancellable; -import org.opensearch.threadpool.ThreadPool; - -public class ClusterManagerEventListener implements LocalNodeClusterManagerListener { - - private Cancellable flintIndexRetentionCron; - private ClusterService clusterService; - private ThreadPool threadPool; - private Client client; - private Clock clock; - private Duration sessionTtlDuration; - private Duration resultTtlDuration; - private boolean isAutoIndexManagementEnabled; - - public ClusterManagerEventListener( - ClusterService clusterService, - ThreadPool threadPool, - Client client, - Clock clock, - Setting sessionTtl, - Setting resultTtl, - Setting isAutoIndexManagementEnabledSetting, - Settings settings) { - this.clusterService = clusterService; - this.threadPool = threadPool; - this.client = client; - this.clusterService.addLocalNodeClusterManagerListener(this); - this.clock = clock; - - this.sessionTtlDuration = toDuration(sessionTtl.get(settings)); - this.resultTtlDuration = toDuration(resultTtl.get(settings)); - - clusterService - .getClusterSettings() - .addSettingsUpdateConsumer( - sessionTtl, - it -> { - this.sessionTtlDuration = toDuration(it); - cancel(flintIndexRetentionCron); - reInitializeFlintIndexRetention(); - }); - - clusterService - .getClusterSettings() - .addSettingsUpdateConsumer( - resultTtl, - it -> { - this.resultTtlDuration = toDuration(it); - cancel(flintIndexRetentionCron); - reInitializeFlintIndexRetention(); - }); - - isAutoIndexManagementEnabled = isAutoIndexManagementEnabledSetting.get(settings); - clusterService - .getClusterSettings() - .addSettingsUpdateConsumer( - isAutoIndexManagementEnabledSetting, - it -> { - if (isAutoIndexManagementEnabled != it) { - this.isAutoIndexManagementEnabled = it; - if (it) { - onClusterManager(); - } else { - offClusterManager(); - } - } - }); - } - - @Override - public void onClusterManager() { - - if (isAutoIndexManagementEnabled && flintIndexRetentionCron == null) { - reInitializeFlintIndexRetention(); - - clusterService.addLifecycleListener( - new LifecycleListener() { - @Override - public void beforeStop() { - cancel(flintIndexRetentionCron); - flintIndexRetentionCron = null; - } - }); - } - } - - private void reInitializeFlintIndexRetention() { - IndexCleanup indexCleanup = new IndexCleanup(client, clusterService); - flintIndexRetentionCron = - threadPool.scheduleWithFixedDelay( - new FlintIndexRetention( - sessionTtlDuration, - resultTtlDuration, - clock, - indexCleanup, - SPARK_REQUEST_BUFFER_INDEX_NAME + "*", - DataSourceMetadata.DEFAULT_RESULT_INDEX + "*"), - TimeValue.timeValueHours(24), - executorName()); - } - - @Override - public void offClusterManager() { - cancel(flintIndexRetentionCron); - flintIndexRetentionCron = null; - } - - private void cancel(Cancellable cron) { - if (cron != null) { - cron.cancel(); - } - } - - @VisibleForTesting - public List getFlintIndexRetentionCron() { - return Arrays.asList(flintIndexRetentionCron); - } - - private String executorName() { - return ThreadPool.Names.GENERIC; - } - - public static Duration toDuration(TimeValue timeValue) { - return Duration.ofMillis(timeValue.millis()); - } -} diff --git a/spark/src/main/java/org/opensearch/sql/spark/cluster/FlintIndexRetention.java b/spark/src/main/java/org/opensearch/sql/spark/cluster/FlintIndexRetention.java deleted file mode 100644 index 3ca56ca173..0000000000 --- a/spark/src/main/java/org/opensearch/sql/spark/cluster/FlintIndexRetention.java +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.cluster; - -import static org.opensearch.sql.spark.execution.session.SessionModel.LAST_UPDATE_TIME; -import static org.opensearch.sql.spark.execution.statement.StatementModel.SUBMIT_TIME; - -import java.time.Clock; -import java.time.Duration; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.common.CheckedConsumer; -import org.opensearch.common.time.FormatNames; -import org.opensearch.core.action.ActionListener; -import org.opensearch.index.IndexNotFoundException; -import org.opensearch.index.query.QueryBuilders; - -public class FlintIndexRetention implements Runnable { - private static final Logger LOG = LogManager.getLogger(FlintIndexRetention.class); - - static final String SESSION_INDEX_NOT_EXIST_MSG = "Checkpoint index does not exist."; - - static final String RESULT_INDEX_NOT_EXIST_MSG = "Result index does not exist."; - - // timestamp field in result index - static final String UPDATE_TIME_FIELD = "updateTime"; - - private final Duration defaultSessionTtl; - private final Duration defaultResultTtl; - private final Clock clock; - private final IndexCleanup indexCleanup; - private final String sessionIndexNameRegex; - private final String resultIndexNameRegex; - - public FlintIndexRetention( - Duration defaultSessionTtl, - Duration defaultResultTtl, - Clock clock, - IndexCleanup indexCleanup, - String sessionIndexNameRegex, - String resultIndexNameRegex) { - this.defaultSessionTtl = defaultSessionTtl; - this.defaultResultTtl = defaultResultTtl; - this.clock = clock; - this.indexCleanup = indexCleanup; - this.sessionIndexNameRegex = sessionIndexNameRegex; - this.resultIndexNameRegex = resultIndexNameRegex; - } - - @Override - public void run() { - purgeSessionIndex(); - } - - private void purgeSessionIndex() { - purgeIndex( - sessionIndexNameRegex, - defaultSessionTtl, - LAST_UPDATE_TIME, - this::handleSessionPurgeResponse, - this::handleSessionPurgeError); - } - - private void handleSessionPurgeResponse(Long response) { - purgeStatementIndex(); - } - - private void handleSessionPurgeError(Exception exception) { - handlePurgeError(SESSION_INDEX_NOT_EXIST_MSG, "session index", exception); - purgeStatementIndex(); - } - - private void purgeStatementIndex() { - purgeIndex( - sessionIndexNameRegex, - defaultSessionTtl, - SUBMIT_TIME, - this::handleStatementPurgeResponse, - this::handleStatementPurgeError); - } - - private void handleStatementPurgeResponse(Long response) { - purgeResultIndex(); - } - - private void handleStatementPurgeError(Exception exception) { - handlePurgeError(SESSION_INDEX_NOT_EXIST_MSG, "session index", exception); - purgeResultIndex(); - } - - private void purgeResultIndex() { - purgeIndex( - resultIndexNameRegex, - defaultResultTtl, - UPDATE_TIME_FIELD, - this::handleResultPurgeResponse, - this::handleResultPurgeError); - } - - private void handleResultPurgeResponse(Long response) { - LOG.debug("purge result index done"); - } - - private void handleResultPurgeError(Exception exception) { - handlePurgeError(RESULT_INDEX_NOT_EXIST_MSG, "result index", exception); - } - - private void handlePurgeError(String notExistMsg, String indexType, Exception exception) { - if (exception instanceof IndexNotFoundException) { - LOG.debug(notExistMsg); - } else { - LOG.error("delete docs by query fails for " + indexType, exception); - } - } - - private void purgeIndex( - String indexName, - Duration ttl, - String timeStampField, - CheckedConsumer successHandler, - CheckedConsumer errorHandler) { - indexCleanup.deleteDocsByQuery( - indexName, - QueryBuilders.boolQuery() - .filter( - QueryBuilders.rangeQuery(timeStampField) - .lte(clock.millis() - ttl.toMillis()) - .format(FormatNames.EPOCH_MILLIS.getSnakeCaseName())), - ActionListener.wrap( - response -> { - try { - successHandler.accept(response); - } catch (Exception e) { - LOG.error("Error handling response for index " + indexName, e); - } - }, - ex -> { - try { - errorHandler.accept(ex); - } catch (Exception e) { - LOG.error("Error handling error for index " + indexName, e); - } - })); - } -} diff --git a/spark/src/main/java/org/opensearch/sql/spark/cluster/IndexCleanup.java b/spark/src/main/java/org/opensearch/sql/spark/cluster/IndexCleanup.java deleted file mode 100644 index 562f12b69e..0000000000 --- a/spark/src/main/java/org/opensearch/sql/spark/cluster/IndexCleanup.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.cluster; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.action.support.IndicesOptions; -import org.opensearch.client.Client; -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.core.action.ActionListener; -import org.opensearch.index.query.QueryBuilder; -import org.opensearch.index.reindex.DeleteByQueryAction; -import org.opensearch.index.reindex.DeleteByQueryRequest; - -/** Clean up the old docs for indices. */ -public class IndexCleanup { - private static final Logger LOG = LogManager.getLogger(IndexCleanup.class); - - private final Client client; - private final ClusterService clusterService; - - public IndexCleanup(Client client, ClusterService clusterService) { - this.client = client; - this.clusterService = clusterService; - } - - /** - * Delete docs based on query request - * - * @param indexName index name - * @param queryForDeleteByQueryRequest query request - * @param listener action listener - */ - public void deleteDocsByQuery( - String indexName, QueryBuilder queryForDeleteByQueryRequest, ActionListener listener) { - DeleteByQueryRequest deleteRequest = - new DeleteByQueryRequest(indexName) - .setQuery(queryForDeleteByQueryRequest) - .setIndicesOptions(IndicesOptions.LENIENT_EXPAND_OPEN) - .setRefresh(true); - - try (ThreadContext.StoredContext context = - client.threadPool().getThreadContext().stashContext()) { - client.execute( - DeleteByQueryAction.INSTANCE, - deleteRequest, - ActionListener.wrap( - response -> { - long deleted = response.getDeleted(); - if (deleted > 0) { - // if 0 docs get deleted, it means our query cannot find any matching doc - // or the index does not exist at all - LOG.info("{} docs are deleted for index:{}", deleted, indexName); - } - listener.onResponse(response.getDeleted()); - }, - listener::onFailure)); - } - } -} diff --git a/spark/src/main/java/org/opensearch/sql/spark/data/constants/SparkConstants.java b/spark/src/main/java/org/opensearch/sql/spark/data/constants/SparkConstants.java index 3a243cb5b3..284afcc0a9 100644 --- a/spark/src/main/java/org/opensearch/sql/spark/data/constants/SparkConstants.java +++ b/spark/src/main/java/org/opensearch/sql/spark/data/constants/SparkConstants.java @@ -20,11 +20,13 @@ public class SparkConstants { // EMR-S will download JAR to local maven public static final String SPARK_SQL_APPLICATION_JAR = "file:///home/hadoop/.ivy2/jars/org.opensearch_opensearch-spark-sql-application_2.12-0.1.0-SNAPSHOT.jar"; - public static final String SPARK_REQUEST_BUFFER_INDEX_NAME = ".query_execution_request"; + public static final String SPARK_RESPONSE_BUFFER_INDEX_NAME = ".query_execution_result"; // TODO should be replaced with mvn jar. public static final String FLINT_INTEGRATION_JAR = "s3://spark-datasource/flint-spark-integration-assembly-0.1.0-SNAPSHOT.jar"; // TODO should be replaced with mvn jar. + public static final String FLINT_CATALOG_JAR = + "s3://flint-data-dp-eu-west-1-beta/code/flint/flint-catalog.jar"; public static final String FLINT_DEFAULT_HOST = "localhost"; public static final String FLINT_DEFAULT_PORT = "9200"; public static final String FLINT_DEFAULT_SCHEME = "http"; @@ -84,8 +86,4 @@ public class SparkConstants { public static final String EMR_ASSUME_ROLE_CREDENTIALS_PROVIDER = "com.amazonaws.emr.AssumeRoleAWSCredentialsProvider"; public static final String JAVA_HOME_LOCATION = "/usr/lib/jvm/java-17-amazon-corretto.x86_64/"; - - public static final String FLINT_JOB_REQUEST_INDEX = "spark.flint.job.requestIndex"; - public static final String FLINT_JOB_SESSION_ID = "spark.flint.job.sessionId"; - public static final String FLINT_SESSION_CLASS_NAME = "org.apache.spark.sql.FlintREPL"; } diff --git a/spark/src/main/java/org/opensearch/sql/spark/dispatcher/AsyncQueryHandler.java b/spark/src/main/java/org/opensearch/sql/spark/dispatcher/AsyncQueryHandler.java deleted file mode 100644 index b3d2cdd289..0000000000 --- a/spark/src/main/java/org/opensearch/sql/spark/dispatcher/AsyncQueryHandler.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.dispatcher; - -import static org.opensearch.sql.spark.data.constants.SparkConstants.DATA_FIELD; -import static org.opensearch.sql.spark.data.constants.SparkConstants.ERROR_FIELD; -import static org.opensearch.sql.spark.data.constants.SparkConstants.STATUS_FIELD; - -import com.amazonaws.services.emrserverless.model.JobRunState; -import org.json.JSONObject; -import org.opensearch.sql.spark.asyncquery.model.AsyncQueryJobMetadata; -import org.opensearch.sql.spark.dispatcher.model.DispatchQueryContext; -import org.opensearch.sql.spark.dispatcher.model.DispatchQueryRequest; -import org.opensearch.sql.spark.dispatcher.model.DispatchQueryResponse; - -/** Process async query request. */ -public abstract class AsyncQueryHandler { - - public JSONObject getQueryResponse(AsyncQueryJobMetadata asyncQueryJobMetadata) { - JSONObject result = getResponseFromResultIndex(asyncQueryJobMetadata); - if (result.has(DATA_FIELD)) { - JSONObject items = result.getJSONObject(DATA_FIELD); - - // If items have STATUS_FIELD, use it; otherwise, mark failed - String status = items.optString(STATUS_FIELD, JobRunState.FAILED.toString()); - result.put(STATUS_FIELD, status); - - // If items have ERROR_FIELD, use it; otherwise, set empty string - String error = items.optString(ERROR_FIELD, ""); - result.put(ERROR_FIELD, error); - return result; - } else { - return getResponseFromExecutor(asyncQueryJobMetadata); - } - } - - protected abstract JSONObject getResponseFromResultIndex( - AsyncQueryJobMetadata asyncQueryJobMetadata); - - protected abstract JSONObject getResponseFromExecutor( - AsyncQueryJobMetadata asyncQueryJobMetadata); - - public abstract String cancelJob(AsyncQueryJobMetadata asyncQueryJobMetadata); - - public abstract DispatchQueryResponse submit( - DispatchQueryRequest request, DispatchQueryContext context); -} diff --git a/spark/src/main/java/org/opensearch/sql/spark/dispatcher/BatchQueryHandler.java b/spark/src/main/java/org/opensearch/sql/spark/dispatcher/BatchQueryHandler.java deleted file mode 100644 index de25f1188c..0000000000 --- a/spark/src/main/java/org/opensearch/sql/spark/dispatcher/BatchQueryHandler.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.dispatcher; - -import static org.opensearch.sql.spark.data.constants.SparkConstants.ERROR_FIELD; -import static org.opensearch.sql.spark.data.constants.SparkConstants.STATUS_FIELD; -import static org.opensearch.sql.spark.dispatcher.SparkQueryDispatcher.JOB_TYPE_TAG_KEY; - -import com.amazonaws.services.emrserverless.model.GetJobRunResult; -import java.util.Map; -import lombok.RequiredArgsConstructor; -import org.json.JSONObject; -import org.opensearch.sql.datasource.model.DataSourceMetadata; -import org.opensearch.sql.legacy.metrics.MetricName; -import org.opensearch.sql.legacy.utils.MetricUtils; -import org.opensearch.sql.spark.asyncquery.model.AsyncQueryJobMetadata; -import org.opensearch.sql.spark.asyncquery.model.SparkSubmitParameters; -import org.opensearch.sql.spark.client.EMRServerlessClient; -import org.opensearch.sql.spark.client.StartJobRequest; -import org.opensearch.sql.spark.dispatcher.model.DispatchQueryContext; -import org.opensearch.sql.spark.dispatcher.model.DispatchQueryRequest; -import org.opensearch.sql.spark.dispatcher.model.DispatchQueryResponse; -import org.opensearch.sql.spark.dispatcher.model.JobType; -import org.opensearch.sql.spark.leasemanager.LeaseManager; -import org.opensearch.sql.spark.leasemanager.model.LeaseRequest; -import org.opensearch.sql.spark.response.JobExecutionResponseReader; - -@RequiredArgsConstructor -public class BatchQueryHandler extends AsyncQueryHandler { - private final EMRServerlessClient emrServerlessClient; - private final JobExecutionResponseReader jobExecutionResponseReader; - protected final LeaseManager leaseManager; - - @Override - protected JSONObject getResponseFromResultIndex(AsyncQueryJobMetadata asyncQueryJobMetadata) { - // either empty json when the result is not available or data with status - // Fetch from Result Index - return jobExecutionResponseReader.getResultFromOpensearchIndex( - asyncQueryJobMetadata.getJobId(), asyncQueryJobMetadata.getResultIndex()); - } - - @Override - protected JSONObject getResponseFromExecutor(AsyncQueryJobMetadata asyncQueryJobMetadata) { - JSONObject result = new JSONObject(); - // make call to EMR Serverless when related result index documents are not available - GetJobRunResult getJobRunResult = - emrServerlessClient.getJobRunResult( - asyncQueryJobMetadata.getApplicationId(), asyncQueryJobMetadata.getJobId()); - String jobState = getJobRunResult.getJobRun().getState(); - result.put(STATUS_FIELD, jobState); - result.put(ERROR_FIELD, ""); - return result; - } - - @Override - public String cancelJob(AsyncQueryJobMetadata asyncQueryJobMetadata) { - emrServerlessClient.cancelJobRun( - asyncQueryJobMetadata.getApplicationId(), asyncQueryJobMetadata.getJobId()); - return asyncQueryJobMetadata.getQueryId().getId(); - } - - @Override - public DispatchQueryResponse submit( - DispatchQueryRequest dispatchQueryRequest, DispatchQueryContext context) { - leaseManager.borrow(new LeaseRequest(JobType.BATCH, dispatchQueryRequest.getDatasource())); - - String jobName = dispatchQueryRequest.getClusterName() + ":" + "non-index-query"; - Map tags = context.getTags(); - DataSourceMetadata dataSourceMetadata = context.getDataSourceMetadata(); - - tags.put(JOB_TYPE_TAG_KEY, JobType.BATCH.getText()); - StartJobRequest startJobRequest = - new StartJobRequest( - dispatchQueryRequest.getQuery(), - jobName, - dispatchQueryRequest.getApplicationId(), - dispatchQueryRequest.getExecutionRoleARN(), - SparkSubmitParameters.Builder.builder() - .dataSource(context.getDataSourceMetadata()) - .extraParameters(dispatchQueryRequest.getExtraSparkSubmitParams()) - .build() - .toString(), - tags, - false, - dataSourceMetadata.getResultIndex()); - String jobId = emrServerlessClient.startJobRun(startJobRequest); - MetricUtils.incrementNumericalMetric(MetricName.EMR_BATCH_QUERY_JOBS_CREATION_COUNT); - return new DispatchQueryResponse( - context.getQueryId(), jobId, dataSourceMetadata.getResultIndex(), null); - } -} diff --git a/spark/src/main/java/org/opensearch/sql/spark/dispatcher/IndexDMLHandler.java b/spark/src/main/java/org/opensearch/sql/spark/dispatcher/IndexDMLHandler.java deleted file mode 100644 index 3ab5439ad5..0000000000 --- a/spark/src/main/java/org/opensearch/sql/spark/dispatcher/IndexDMLHandler.java +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.dispatcher; - -import static org.opensearch.sql.spark.execution.statestore.StateStore.createIndexDMLResult; - -import com.amazonaws.services.emrserverless.model.JobRunState; -import lombok.RequiredArgsConstructor; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.json.JSONObject; -import org.opensearch.client.Client; -import org.opensearch.sql.datasource.DataSourceService; -import org.opensearch.sql.datasource.model.DataSourceMetadata; -import org.opensearch.sql.datasources.auth.DataSourceUserAuthorizationHelperImpl; -import org.opensearch.sql.spark.asyncquery.model.AsyncQueryId; -import org.opensearch.sql.spark.asyncquery.model.AsyncQueryJobMetadata; -import org.opensearch.sql.spark.client.EMRServerlessClient; -import org.opensearch.sql.spark.dispatcher.model.DispatchQueryContext; -import org.opensearch.sql.spark.dispatcher.model.DispatchQueryRequest; -import org.opensearch.sql.spark.dispatcher.model.DispatchQueryResponse; -import org.opensearch.sql.spark.dispatcher.model.IndexDMLResult; -import org.opensearch.sql.spark.dispatcher.model.IndexQueryDetails; -import org.opensearch.sql.spark.execution.statestore.StateStore; -import org.opensearch.sql.spark.flint.FlintIndexMetadata; -import org.opensearch.sql.spark.flint.FlintIndexMetadataReader; -import org.opensearch.sql.spark.flint.operation.FlintIndexOp; -import org.opensearch.sql.spark.flint.operation.FlintIndexOpCancel; -import org.opensearch.sql.spark.flint.operation.FlintIndexOpDelete; -import org.opensearch.sql.spark.response.JobExecutionResponseReader; - -/** Handle Index DML query. includes * DROP * ALT? */ -@RequiredArgsConstructor -public class IndexDMLHandler extends AsyncQueryHandler { - private static final Logger LOG = LogManager.getLogger(); - - public static final String DROP_INDEX_JOB_ID = "dropIndexJobId"; - - private final EMRServerlessClient emrServerlessClient; - - private final DataSourceService dataSourceService; - - private final DataSourceUserAuthorizationHelperImpl dataSourceUserAuthorizationHelper; - - private final JobExecutionResponseReader jobExecutionResponseReader; - - private final FlintIndexMetadataReader flintIndexMetadataReader; - - private final Client client; - - private final StateStore stateStore; - - public static boolean isIndexDMLQuery(String jobId) { - return DROP_INDEX_JOB_ID.equalsIgnoreCase(jobId); - } - - @Override - public DispatchQueryResponse submit( - DispatchQueryRequest dispatchQueryRequest, DispatchQueryContext context) { - DataSourceMetadata dataSourceMetadata = context.getDataSourceMetadata(); - IndexQueryDetails indexDetails = context.getIndexQueryDetails(); - FlintIndexMetadata indexMetadata = flintIndexMetadataReader.getFlintIndexMetadata(indexDetails); - // if index is created without auto refresh. there is no job to cancel. - String status = JobRunState.FAILED.toString(); - String error = ""; - long startTime = 0L; - try { - FlintIndexOp jobCancelOp = - new FlintIndexOpCancel( - stateStore, dispatchQueryRequest.getDatasource(), emrServerlessClient); - jobCancelOp.apply(indexMetadata); - - FlintIndexOp indexDeleteOp = - new FlintIndexOpDelete(stateStore, dispatchQueryRequest.getDatasource()); - indexDeleteOp.apply(indexMetadata); - status = JobRunState.SUCCESS.toString(); - } catch (Exception e) { - error = e.getMessage(); - LOG.error(e); - } - - AsyncQueryId asyncQueryId = AsyncQueryId.newAsyncQueryId(dataSourceMetadata.getName()); - IndexDMLResult indexDMLResult = - new IndexDMLResult( - asyncQueryId.getId(), - status, - error, - dispatchQueryRequest.getDatasource(), - System.currentTimeMillis() - startTime, - System.currentTimeMillis()); - String resultIndex = dataSourceMetadata.getResultIndex(); - createIndexDMLResult(stateStore, resultIndex).apply(indexDMLResult); - - return new DispatchQueryResponse(asyncQueryId, DROP_INDEX_JOB_ID, resultIndex, null); - } - - @Override - protected JSONObject getResponseFromResultIndex(AsyncQueryJobMetadata asyncQueryJobMetadata) { - String queryId = asyncQueryJobMetadata.getQueryId().getId(); - return jobExecutionResponseReader.getResultWithQueryId( - queryId, asyncQueryJobMetadata.getResultIndex()); - } - - @Override - protected JSONObject getResponseFromExecutor(AsyncQueryJobMetadata asyncQueryJobMetadata) { - throw new IllegalStateException("[BUG] can't fetch result of index DML query form server"); - } - - @Override - public String cancelJob(AsyncQueryJobMetadata asyncQueryJobMetadata) { - throw new IllegalArgumentException("can't cancel index DML query"); - } -} diff --git a/spark/src/main/java/org/opensearch/sql/spark/dispatcher/InteractiveQueryHandler.java b/spark/src/main/java/org/opensearch/sql/spark/dispatcher/InteractiveQueryHandler.java deleted file mode 100644 index 5aa82432bb..0000000000 --- a/spark/src/main/java/org/opensearch/sql/spark/dispatcher/InteractiveQueryHandler.java +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.dispatcher; - -import static org.opensearch.sql.spark.data.constants.SparkConstants.ERROR_FIELD; -import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_SESSION_CLASS_NAME; -import static org.opensearch.sql.spark.data.constants.SparkConstants.STATUS_FIELD; -import static org.opensearch.sql.spark.dispatcher.SparkQueryDispatcher.JOB_TYPE_TAG_KEY; - -import java.util.Map; -import java.util.Optional; -import lombok.RequiredArgsConstructor; -import org.json.JSONObject; -import org.opensearch.sql.datasource.model.DataSourceMetadata; -import org.opensearch.sql.legacy.metrics.MetricName; -import org.opensearch.sql.legacy.utils.MetricUtils; -import org.opensearch.sql.spark.asyncquery.model.AsyncQueryJobMetadata; -import org.opensearch.sql.spark.asyncquery.model.SparkSubmitParameters; -import org.opensearch.sql.spark.dispatcher.model.DispatchQueryContext; -import org.opensearch.sql.spark.dispatcher.model.DispatchQueryRequest; -import org.opensearch.sql.spark.dispatcher.model.DispatchQueryResponse; -import org.opensearch.sql.spark.dispatcher.model.JobType; -import org.opensearch.sql.spark.execution.session.CreateSessionRequest; -import org.opensearch.sql.spark.execution.session.Session; -import org.opensearch.sql.spark.execution.session.SessionId; -import org.opensearch.sql.spark.execution.session.SessionManager; -import org.opensearch.sql.spark.execution.statement.QueryRequest; -import org.opensearch.sql.spark.execution.statement.Statement; -import org.opensearch.sql.spark.execution.statement.StatementId; -import org.opensearch.sql.spark.execution.statement.StatementState; -import org.opensearch.sql.spark.leasemanager.LeaseManager; -import org.opensearch.sql.spark.leasemanager.model.LeaseRequest; -import org.opensearch.sql.spark.response.JobExecutionResponseReader; - -@RequiredArgsConstructor -public class InteractiveQueryHandler extends AsyncQueryHandler { - private final SessionManager sessionManager; - private final JobExecutionResponseReader jobExecutionResponseReader; - private final LeaseManager leaseManager; - - @Override - protected JSONObject getResponseFromResultIndex(AsyncQueryJobMetadata asyncQueryJobMetadata) { - String queryId = asyncQueryJobMetadata.getQueryId().getId(); - return jobExecutionResponseReader.getResultWithQueryId( - queryId, asyncQueryJobMetadata.getResultIndex()); - } - - @Override - protected JSONObject getResponseFromExecutor(AsyncQueryJobMetadata asyncQueryJobMetadata) { - JSONObject result = new JSONObject(); - String queryId = asyncQueryJobMetadata.getQueryId().getId(); - Statement statement = getStatementByQueryId(asyncQueryJobMetadata.getSessionId(), queryId); - StatementState statementState = statement.getStatementState(); - result.put(STATUS_FIELD, statementState.getState()); - result.put(ERROR_FIELD, Optional.of(statement.getStatementModel().getError()).orElse("")); - return result; - } - - @Override - public String cancelJob(AsyncQueryJobMetadata asyncQueryJobMetadata) { - String queryId = asyncQueryJobMetadata.getQueryId().getId(); - getStatementByQueryId(asyncQueryJobMetadata.getSessionId(), queryId).cancel(); - return queryId; - } - - @Override - public DispatchQueryResponse submit( - DispatchQueryRequest dispatchQueryRequest, DispatchQueryContext context) { - Session session = null; - String jobName = dispatchQueryRequest.getClusterName() + ":" + "non-index-query"; - Map tags = context.getTags(); - DataSourceMetadata dataSourceMetadata = context.getDataSourceMetadata(); - - // todo, manage lease lifecycle - leaseManager.borrow( - new LeaseRequest(JobType.INTERACTIVE, dispatchQueryRequest.getDatasource())); - - if (dispatchQueryRequest.getSessionId() != null) { - // get session from request - SessionId sessionId = new SessionId(dispatchQueryRequest.getSessionId()); - Optional createdSession = sessionManager.getSession(sessionId); - if (createdSession.isPresent()) { - session = createdSession.get(); - } - } - if (session == null || !session.isReady()) { - // create session if not exist or session dead/fail - tags.put(JOB_TYPE_TAG_KEY, JobType.INTERACTIVE.getText()); - session = - sessionManager.createSession( - new CreateSessionRequest( - jobName, - dispatchQueryRequest.getApplicationId(), - dispatchQueryRequest.getExecutionRoleARN(), - SparkSubmitParameters.Builder.builder() - .className(FLINT_SESSION_CLASS_NAME) - .dataSource(dataSourceMetadata) - .extraParameters(dispatchQueryRequest.getExtraSparkSubmitParams()), - tags, - dataSourceMetadata.getResultIndex(), - dataSourceMetadata.getName())); - MetricUtils.incrementNumericalMetric(MetricName.EMR_INTERACTIVE_QUERY_JOBS_CREATION_COUNT); - } - session.submit( - new QueryRequest( - context.getQueryId(), - dispatchQueryRequest.getLangType(), - dispatchQueryRequest.getQuery())); - return new DispatchQueryResponse( - context.getQueryId(), - session.getSessionModel().getJobId(), - dataSourceMetadata.getResultIndex(), - session.getSessionId().getSessionId()); - } - - private Statement getStatementByQueryId(String sid, String qid) { - SessionId sessionId = new SessionId(sid); - Optional session = sessionManager.getSession(sessionId); - if (session.isPresent()) { - // todo, statementId == jobId if statement running in session. - StatementId statementId = new StatementId(qid); - Optional statement = session.get().get(statementId); - if (statement.isPresent()) { - return statement.get(); - } else { - throw new IllegalArgumentException("no statement found. " + statementId); - } - } else { - throw new IllegalArgumentException("no session found. " + sessionId); - } - } -} diff --git a/spark/src/main/java/org/opensearch/sql/spark/dispatcher/SparkQueryDispatcher.java b/spark/src/main/java/org/opensearch/sql/spark/dispatcher/SparkQueryDispatcher.java index 0aa183335e..347e154885 100644 --- a/spark/src/main/java/org/opensearch/sql/spark/dispatcher/SparkQueryDispatcher.java +++ b/spark/src/main/java/org/opensearch/sql/spark/dispatcher/SparkQueryDispatcher.java @@ -5,28 +5,42 @@ package org.opensearch.sql.spark.dispatcher; +import static org.opensearch.sql.spark.data.constants.SparkConstants.DATA_FIELD; +import static org.opensearch.sql.spark.data.constants.SparkConstants.ERROR_FIELD; +import static org.opensearch.sql.spark.data.constants.SparkConstants.STATUS_FIELD; + +import com.amazonaws.services.emrserverless.model.CancelJobRunResult; +import com.amazonaws.services.emrserverless.model.GetJobRunResult; +import com.amazonaws.services.emrserverless.model.JobRunState; +import java.nio.charset.StandardCharsets; +import java.util.Base64; import java.util.HashMap; import java.util.Map; +import java.util.concurrent.ExecutionException; import lombok.AllArgsConstructor; +import lombok.Getter; +import lombok.RequiredArgsConstructor; +import org.apache.commons.lang3.RandomStringUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.json.JSONArray; import org.json.JSONObject; +import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.sql.datasource.DataSourceService; import org.opensearch.sql.datasource.model.DataSourceMetadata; import org.opensearch.sql.datasources.auth.DataSourceUserAuthorizationHelperImpl; -import org.opensearch.sql.spark.asyncquery.model.AsyncQueryId; import org.opensearch.sql.spark.asyncquery.model.AsyncQueryJobMetadata; +import org.opensearch.sql.spark.asyncquery.model.SparkSubmitParameters; import org.opensearch.sql.spark.client.EMRServerlessClient; -import org.opensearch.sql.spark.dispatcher.model.DispatchQueryContext; +import org.opensearch.sql.spark.client.StartJobRequest; import org.opensearch.sql.spark.dispatcher.model.DispatchQueryRequest; import org.opensearch.sql.spark.dispatcher.model.DispatchQueryResponse; -import org.opensearch.sql.spark.dispatcher.model.IndexQueryActionType; -import org.opensearch.sql.spark.dispatcher.model.IndexQueryDetails; -import org.opensearch.sql.spark.execution.session.SessionManager; -import org.opensearch.sql.spark.execution.statestore.StateStore; +import org.opensearch.sql.spark.dispatcher.model.FullyQualifiedTableName; +import org.opensearch.sql.spark.dispatcher.model.IndexDetails; +import org.opensearch.sql.spark.flint.FlintIndexMetadata; import org.opensearch.sql.spark.flint.FlintIndexMetadataReader; -import org.opensearch.sql.spark.leasemanager.LeaseManager; import org.opensearch.sql.spark.response.JobExecutionResponseReader; import org.opensearch.sql.spark.rest.model.LangType; import org.opensearch.sql.spark.utils.SQLQueryUtils; @@ -36,10 +50,12 @@ public class SparkQueryDispatcher { private static final Logger LOG = LogManager.getLogger(); + public static final String INDEX_TAG_KEY = "index"; public static final String DATASOURCE_TAG_KEY = "datasource"; - public static final String CLUSTER_NAME_TAG_KEY = "domain_ident"; - public static final String JOB_TYPE_TAG_KEY = "type"; + public static final String SCHEMA_TAG_KEY = "schema"; + public static final String TABLE_TAG_KEY = "table"; + public static final String CLUSTER_NAME_TAG_KEY = "cluster"; private EMRServerlessClient emrServerlessClient; @@ -53,100 +69,167 @@ public class SparkQueryDispatcher { private Client client; - private SessionManager sessionManager; - - private LeaseManager leaseManager; - - private StateStore stateStore; - public DispatchQueryResponse dispatch(DispatchQueryRequest dispatchQueryRequest) { - DataSourceMetadata dataSourceMetadata = - this.dataSourceService.getRawDataSourceMetadata(dispatchQueryRequest.getDatasource()); - dataSourceUserAuthorizationHelper.authorizeDataSource(dataSourceMetadata); - - AsyncQueryHandler asyncQueryHandler = - sessionManager.isEnabled() - ? new InteractiveQueryHandler(sessionManager, jobExecutionResponseReader, leaseManager) - : new BatchQueryHandler(emrServerlessClient, jobExecutionResponseReader, leaseManager); - DispatchQueryContext.DispatchQueryContextBuilder contextBuilder = - DispatchQueryContext.builder() - .dataSourceMetadata(dataSourceMetadata) - .tags(getDefaultTagsForJobSubmission(dispatchQueryRequest)) - .queryId(AsyncQueryId.newAsyncQueryId(dataSourceMetadata.getName())); - - // override asyncQueryHandler with specific. - if (LangType.SQL.equals(dispatchQueryRequest.getLangType()) - && SQLQueryUtils.isFlintExtensionQuery(dispatchQueryRequest.getQuery())) { - IndexQueryDetails indexQueryDetails = - SQLQueryUtils.extractIndexDetails(dispatchQueryRequest.getQuery()); - fillMissingDetails(dispatchQueryRequest, indexQueryDetails); - contextBuilder.indexQueryDetails(indexQueryDetails); - - if (IndexQueryActionType.DROP.equals(indexQueryDetails.getIndexQueryActionType())) { - asyncQueryHandler = createIndexDMLHandler(); - } else if (IndexQueryActionType.CREATE.equals(indexQueryDetails.getIndexQueryActionType()) - && indexQueryDetails.isAutoRefresh()) { - asyncQueryHandler = - new StreamingQueryHandler( - emrServerlessClient, jobExecutionResponseReader, leaseManager); - } else if (IndexQueryActionType.REFRESH.equals(indexQueryDetails.getIndexQueryActionType())) { - // manual refresh should be handled by batch handler - asyncQueryHandler = - new BatchQueryHandler(emrServerlessClient, jobExecutionResponseReader, leaseManager); - } + if (LangType.SQL.equals(dispatchQueryRequest.getLangType())) { + return handleSQLQuery(dispatchQueryRequest); + } else { + // Since we don't need any extra handling for PPL, we are treating it as normal dispatch + // Query. + return handleNonIndexQuery(dispatchQueryRequest); } - return asyncQueryHandler.submit(dispatchQueryRequest, contextBuilder.build()); } public JSONObject getQueryResponse(AsyncQueryJobMetadata asyncQueryJobMetadata) { - if (asyncQueryJobMetadata.getSessionId() != null) { - return new InteractiveQueryHandler(sessionManager, jobExecutionResponseReader, leaseManager) - .getQueryResponse(asyncQueryJobMetadata); - } else if (IndexDMLHandler.isIndexDMLQuery(asyncQueryJobMetadata.getJobId())) { - return createIndexDMLHandler().getQueryResponse(asyncQueryJobMetadata); + // todo. refactor query process logic in plugin. + if (asyncQueryJobMetadata.isDropIndexQuery()) { + return DropIndexResult.fromJobId(asyncQueryJobMetadata.getJobId()).result(); + } + + // either empty json when the result is not available or data with status + // Fetch from Result Index + JSONObject result = + jobExecutionResponseReader.getResultFromOpensearchIndex( + asyncQueryJobMetadata.getJobId(), asyncQueryJobMetadata.getResultIndex()); + + // if result index document has a status, we are gonna use the status directly; otherwise, we + // will use emr-s job status. + // That a job is successful does not mean there is no error in execution. For example, even if + // result + // index mapping is incorrect, we still write query result and let the job finish. + // That a job is running does not mean the status is running. For example, index/streaming Query + // is a + // long-running job which runs forever. But we need to return success from the result index + // immediately. + if (result.has(DATA_FIELD)) { + JSONObject items = result.getJSONObject(DATA_FIELD); + + // If items have STATUS_FIELD, use it; otherwise, mark failed + String status = items.optString(STATUS_FIELD, JobRunState.FAILED.toString()); + result.put(STATUS_FIELD, status); + + // If items have ERROR_FIELD, use it; otherwise, set empty string + String error = items.optString(ERROR_FIELD, ""); + result.put(ERROR_FIELD, error); } else { - return new BatchQueryHandler(emrServerlessClient, jobExecutionResponseReader, leaseManager) - .getQueryResponse(asyncQueryJobMetadata); + // make call to EMR Serverless when related result index documents are not available + GetJobRunResult getJobRunResult = + emrServerlessClient.getJobRunResult( + asyncQueryJobMetadata.getApplicationId(), asyncQueryJobMetadata.getJobId()); + String jobState = getJobRunResult.getJobRun().getState(); + result.put(STATUS_FIELD, jobState); + result.put(ERROR_FIELD, ""); } + + return result; } public String cancelJob(AsyncQueryJobMetadata asyncQueryJobMetadata) { - AsyncQueryHandler queryHandler; - if (asyncQueryJobMetadata.getSessionId() != null) { - queryHandler = - new InteractiveQueryHandler(sessionManager, jobExecutionResponseReader, leaseManager); - } else if (IndexDMLHandler.isIndexDMLQuery(asyncQueryJobMetadata.getJobId())) { - queryHandler = createIndexDMLHandler(); + CancelJobRunResult cancelJobRunResult = + emrServerlessClient.cancelJobRun( + asyncQueryJobMetadata.getApplicationId(), asyncQueryJobMetadata.getJobId()); + return cancelJobRunResult.getJobRunId(); + } + + private DispatchQueryResponse handleSQLQuery(DispatchQueryRequest dispatchQueryRequest) { + if (SQLQueryUtils.isIndexQuery(dispatchQueryRequest.getQuery())) { + IndexDetails indexDetails = + SQLQueryUtils.extractIndexDetails(dispatchQueryRequest.getQuery()); + if (indexDetails.isDropIndex()) { + return handleDropIndexQuery(dispatchQueryRequest, indexDetails); + } else { + return handleIndexQuery(dispatchQueryRequest, indexDetails); + } } else { - queryHandler = - new BatchQueryHandler(emrServerlessClient, jobExecutionResponseReader, leaseManager); + return handleNonIndexQuery(dispatchQueryRequest); } - return queryHandler.cancelJob(asyncQueryJobMetadata); } - private IndexDMLHandler createIndexDMLHandler() { - return new IndexDMLHandler( - emrServerlessClient, - dataSourceService, - dataSourceUserAuthorizationHelper, - jobExecutionResponseReader, - flintIndexMetadataReader, - client, - stateStore); + private DispatchQueryResponse handleIndexQuery( + DispatchQueryRequest dispatchQueryRequest, IndexDetails indexDetails) { + FullyQualifiedTableName fullyQualifiedTableName = indexDetails.getFullyQualifiedTableName(); + DataSourceMetadata dataSourceMetadata = + this.dataSourceService.getRawDataSourceMetadata(dispatchQueryRequest.getDatasource()); + dataSourceUserAuthorizationHelper.authorizeDataSource(dataSourceMetadata); + String jobName = dispatchQueryRequest.getClusterName() + ":" + "index-query"; + Map tags = getDefaultTagsForJobSubmission(dispatchQueryRequest); + tags.put(INDEX_TAG_KEY, indexDetails.getIndexName()); + tags.put(TABLE_TAG_KEY, fullyQualifiedTableName.getTableName()); + tags.put(SCHEMA_TAG_KEY, fullyQualifiedTableName.getSchemaName()); + StartJobRequest startJobRequest = + new StartJobRequest( + dispatchQueryRequest.getQuery(), + jobName, + dispatchQueryRequest.getApplicationId(), + dispatchQueryRequest.getExecutionRoleARN(), + SparkSubmitParameters.Builder.builder() + .dataSource( + dataSourceService.getRawDataSourceMetadata( + dispatchQueryRequest.getDatasource())) + .structuredStreaming(indexDetails.getAutoRefresh()) + .extraParameters(dispatchQueryRequest.getExtraSparkSubmitParams()) + .build() + .toString(), + tags, + indexDetails.getAutoRefresh(), + dataSourceMetadata.getResultIndex()); + String jobId = emrServerlessClient.startJobRun(startJobRequest); + return new DispatchQueryResponse(jobId, false, dataSourceMetadata.getResultIndex()); } - // TODO: Revisit this logic. - // Currently, Spark if datasource is not provided in query. - // Spark Assumes the datasource to be catalog. - // This is required to handle drop index case properly when datasource name is not provided. - private static void fillMissingDetails( - DispatchQueryRequest dispatchQueryRequest, IndexQueryDetails indexQueryDetails) { - if (indexQueryDetails.getFullyQualifiedTableName() != null - && indexQueryDetails.getFullyQualifiedTableName().getDatasourceName() == null) { - indexQueryDetails - .getFullyQualifiedTableName() - .setDatasourceName(dispatchQueryRequest.getDatasource()); + private DispatchQueryResponse handleNonIndexQuery(DispatchQueryRequest dispatchQueryRequest) { + DataSourceMetadata dataSourceMetadata = + this.dataSourceService.getRawDataSourceMetadata(dispatchQueryRequest.getDatasource()); + dataSourceUserAuthorizationHelper.authorizeDataSource(dataSourceMetadata); + String jobName = dispatchQueryRequest.getClusterName() + ":" + "non-index-query"; + Map tags = getDefaultTagsForJobSubmission(dispatchQueryRequest); + StartJobRequest startJobRequest = + new StartJobRequest( + dispatchQueryRequest.getQuery(), + jobName, + dispatchQueryRequest.getApplicationId(), + dispatchQueryRequest.getExecutionRoleARN(), + SparkSubmitParameters.Builder.builder() + .dataSource( + dataSourceService.getRawDataSourceMetadata( + dispatchQueryRequest.getDatasource())) + .extraParameters(dispatchQueryRequest.getExtraSparkSubmitParams()) + .build() + .toString(), + tags, + false, + dataSourceMetadata.getResultIndex()); + String jobId = emrServerlessClient.startJobRun(startJobRequest); + return new DispatchQueryResponse(jobId, false, dataSourceMetadata.getResultIndex()); + } + + private DispatchQueryResponse handleDropIndexQuery( + DispatchQueryRequest dispatchQueryRequest, IndexDetails indexDetails) { + DataSourceMetadata dataSourceMetadata = + this.dataSourceService.getRawDataSourceMetadata(dispatchQueryRequest.getDatasource()); + dataSourceUserAuthorizationHelper.authorizeDataSource(dataSourceMetadata); + FlintIndexMetadata indexMetadata = flintIndexMetadataReader.getFlintIndexMetadata(indexDetails); + // if index is created without auto refresh. there is no job to cancel. + String status = JobRunState.FAILED.toString(); + try { + if (indexMetadata.isAutoRefresh()) { + emrServerlessClient.cancelJobRun( + dispatchQueryRequest.getApplicationId(), indexMetadata.getJobId()); + } + } finally { + String indexName = indexDetails.openSearchIndexName(); + try { + AcknowledgedResponse response = + client.admin().indices().delete(new DeleteIndexRequest().indices(indexName)).get(); + if (!response.isAcknowledged()) { + LOG.error("failed to delete index"); + } + status = JobRunState.SUCCESS.toString(); + } catch (InterruptedException | ExecutionException e) { + LOG.error("failed to delete index"); + } } + return new DispatchQueryResponse( + new DropIndexResult(status).toJobId(), true, dataSourceMetadata.getResultIndex()); } private static Map getDefaultTagsForJobSubmission( @@ -156,4 +239,39 @@ private static Map getDefaultTagsForJobSubmission( tags.put(DATASOURCE_TAG_KEY, dispatchQueryRequest.getDatasource()); return tags; } + + @Getter + @RequiredArgsConstructor + public static class DropIndexResult { + private static final int PREFIX_LEN = 10; + + private final String status; + + public static DropIndexResult fromJobId(String jobId) { + String status = new String(Base64.getDecoder().decode(jobId)).substring(PREFIX_LEN); + return new DropIndexResult(status); + } + + public String toJobId() { + String queryId = RandomStringUtils.randomAlphanumeric(PREFIX_LEN) + status; + return Base64.getEncoder().encodeToString(queryId.getBytes(StandardCharsets.UTF_8)); + } + + public JSONObject result() { + JSONObject result = new JSONObject(); + if (JobRunState.SUCCESS.toString().equalsIgnoreCase(status)) { + result.put(STATUS_FIELD, status); + // todo. refactor response handling. + JSONObject dummyData = new JSONObject(); + dummyData.put("result", new JSONArray()); + dummyData.put("schema", new JSONArray()); + dummyData.put("applicationId", "fakeDropIndexApplicationId"); + result.put(DATA_FIELD, dummyData); + } else { + result.put(STATUS_FIELD, status); + result.put(ERROR_FIELD, "failed to drop index"); + } + return result; + } + } } diff --git a/spark/src/main/java/org/opensearch/sql/spark/dispatcher/StreamingQueryHandler.java b/spark/src/main/java/org/opensearch/sql/spark/dispatcher/StreamingQueryHandler.java deleted file mode 100644 index 6a4045b85a..0000000000 --- a/spark/src/main/java/org/opensearch/sql/spark/dispatcher/StreamingQueryHandler.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.dispatcher; - -import static org.opensearch.sql.spark.dispatcher.SparkQueryDispatcher.INDEX_TAG_KEY; -import static org.opensearch.sql.spark.dispatcher.SparkQueryDispatcher.JOB_TYPE_TAG_KEY; - -import java.util.Map; -import org.opensearch.sql.datasource.model.DataSourceMetadata; -import org.opensearch.sql.legacy.metrics.MetricName; -import org.opensearch.sql.legacy.utils.MetricUtils; -import org.opensearch.sql.spark.asyncquery.model.AsyncQueryId; -import org.opensearch.sql.spark.asyncquery.model.SparkSubmitParameters; -import org.opensearch.sql.spark.client.EMRServerlessClient; -import org.opensearch.sql.spark.client.StartJobRequest; -import org.opensearch.sql.spark.dispatcher.model.DispatchQueryContext; -import org.opensearch.sql.spark.dispatcher.model.DispatchQueryRequest; -import org.opensearch.sql.spark.dispatcher.model.DispatchQueryResponse; -import org.opensearch.sql.spark.dispatcher.model.IndexQueryDetails; -import org.opensearch.sql.spark.dispatcher.model.JobType; -import org.opensearch.sql.spark.leasemanager.LeaseManager; -import org.opensearch.sql.spark.leasemanager.model.LeaseRequest; -import org.opensearch.sql.spark.response.JobExecutionResponseReader; - -/** Handle Streaming Query. */ -public class StreamingQueryHandler extends BatchQueryHandler { - private final EMRServerlessClient emrServerlessClient; - - public StreamingQueryHandler( - EMRServerlessClient emrServerlessClient, - JobExecutionResponseReader jobExecutionResponseReader, - LeaseManager leaseManager) { - super(emrServerlessClient, jobExecutionResponseReader, leaseManager); - this.emrServerlessClient = emrServerlessClient; - } - - @Override - public DispatchQueryResponse submit( - DispatchQueryRequest dispatchQueryRequest, DispatchQueryContext context) { - - leaseManager.borrow(new LeaseRequest(JobType.STREAMING, dispatchQueryRequest.getDatasource())); - - String jobName = dispatchQueryRequest.getClusterName() + ":" + "index-query"; - IndexQueryDetails indexQueryDetails = context.getIndexQueryDetails(); - Map tags = context.getTags(); - tags.put(INDEX_TAG_KEY, indexQueryDetails.openSearchIndexName()); - DataSourceMetadata dataSourceMetadata = context.getDataSourceMetadata(); - tags.put(JOB_TYPE_TAG_KEY, JobType.STREAMING.getText()); - StartJobRequest startJobRequest = - new StartJobRequest( - dispatchQueryRequest.getQuery(), - jobName, - dispatchQueryRequest.getApplicationId(), - dispatchQueryRequest.getExecutionRoleARN(), - SparkSubmitParameters.Builder.builder() - .dataSource(dataSourceMetadata) - .structuredStreaming(true) - .extraParameters(dispatchQueryRequest.getExtraSparkSubmitParams()) - .build() - .toString(), - tags, - indexQueryDetails.isAutoRefresh(), - dataSourceMetadata.getResultIndex()); - String jobId = emrServerlessClient.startJobRun(startJobRequest); - MetricUtils.incrementNumericalMetric(MetricName.EMR_STREAMING_QUERY_JOBS_CREATION_COUNT); - return new DispatchQueryResponse( - AsyncQueryId.newAsyncQueryId(dataSourceMetadata.getName()), - jobId, - dataSourceMetadata.getResultIndex(), - null); - } -} diff --git a/spark/src/main/java/org/opensearch/sql/spark/dispatcher/model/DispatchQueryContext.java b/spark/src/main/java/org/opensearch/sql/spark/dispatcher/model/DispatchQueryContext.java deleted file mode 100644 index d3400d86bf..0000000000 --- a/spark/src/main/java/org/opensearch/sql/spark/dispatcher/model/DispatchQueryContext.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.dispatcher.model; - -import java.util.Map; -import lombok.Builder; -import lombok.Getter; -import org.opensearch.sql.datasource.model.DataSourceMetadata; -import org.opensearch.sql.spark.asyncquery.model.AsyncQueryId; - -@Getter -@Builder -public class DispatchQueryContext { - private final AsyncQueryId queryId; - private final DataSourceMetadata dataSourceMetadata; - private final Map tags; - private final IndexQueryDetails indexQueryDetails; -} diff --git a/spark/src/main/java/org/opensearch/sql/spark/dispatcher/model/DispatchQueryRequest.java b/spark/src/main/java/org/opensearch/sql/spark/dispatcher/model/DispatchQueryRequest.java index 6aa28227a1..823a4570ce 100644 --- a/spark/src/main/java/org/opensearch/sql/spark/dispatcher/model/DispatchQueryRequest.java +++ b/spark/src/main/java/org/opensearch/sql/spark/dispatcher/model/DispatchQueryRequest.java @@ -23,7 +23,4 @@ public class DispatchQueryRequest { /** Optional extra Spark submit parameters to include in final request */ private String extraSparkSubmitParams; - - /** Optional sessionId. */ - private String sessionId; } diff --git a/spark/src/main/java/org/opensearch/sql/spark/dispatcher/model/DispatchQueryResponse.java b/spark/src/main/java/org/opensearch/sql/spark/dispatcher/model/DispatchQueryResponse.java index b20648cdfd..9ee5f156f2 100644 --- a/spark/src/main/java/org/opensearch/sql/spark/dispatcher/model/DispatchQueryResponse.java +++ b/spark/src/main/java/org/opensearch/sql/spark/dispatcher/model/DispatchQueryResponse.java @@ -2,13 +2,11 @@ import lombok.AllArgsConstructor; import lombok.Data; -import org.opensearch.sql.spark.asyncquery.model.AsyncQueryId; @Data @AllArgsConstructor public class DispatchQueryResponse { - private AsyncQueryId queryId; private String jobId; + private boolean isDropIndexQuery; private String resultIndex; - private String sessionId; } diff --git a/spark/src/main/java/org/opensearch/sql/spark/dispatcher/model/FullyQualifiedTableName.java b/spark/src/main/java/org/opensearch/sql/spark/dispatcher/model/FullyQualifiedTableName.java index fc1513241f..5a9fe4d31f 100644 --- a/spark/src/main/java/org/opensearch/sql/spark/dispatcher/model/FullyQualifiedTableName.java +++ b/spark/src/main/java/org/opensearch/sql/spark/dispatcher/model/FullyQualifiedTableName.java @@ -5,9 +5,6 @@ package org.opensearch.sql.spark.dispatcher.model; -import static org.apache.commons.lang3.StringUtils.strip; -import static org.opensearch.sql.spark.dispatcher.model.IndexQueryDetails.STRIP_CHARS; - import java.util.Arrays; import lombok.Data; import lombok.NoArgsConstructor; @@ -43,23 +40,4 @@ public FullyQualifiedTableName(String fullyQualifiedName) { tableName = parts[0]; } } - - /** - * Convert qualified name to Flint name concat by underscore. - * - * @return Flint name - */ - public String toFlintName() { - StringBuilder builder = new StringBuilder(); - if (datasourceName != null) { - builder.append(strip(datasourceName, STRIP_CHARS)).append("_"); - } - if (schemaName != null) { - builder.append(strip(schemaName, STRIP_CHARS)).append("_"); - } - if (tableName != null) { - builder.append(strip(tableName, STRIP_CHARS)); - } - return builder.toString(); - } } diff --git a/spark/src/main/java/org/opensearch/sql/spark/dispatcher/model/IndexDMLResult.java b/spark/src/main/java/org/opensearch/sql/spark/dispatcher/model/IndexDMLResult.java deleted file mode 100644 index b01ecf55ba..0000000000 --- a/spark/src/main/java/org/opensearch/sql/spark/dispatcher/model/IndexDMLResult.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.dispatcher.model; - -import static org.opensearch.sql.spark.execution.session.SessionModel.DATASOURCE_NAME; - -import com.google.common.collect.ImmutableList; -import java.io.IOException; -import lombok.Data; -import lombok.EqualsAndHashCode; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.index.seqno.SequenceNumbers; -import org.opensearch.sql.spark.execution.statestore.StateModel; - -/** Plugin create Index DML result. */ -@Data -@EqualsAndHashCode(callSuper = false) -public class IndexDMLResult extends StateModel { - private static final String QUERY_ID = "queryId"; - private static final String QUERY_RUNTIME = "queryRunTime"; - private static final String UPDATE_TIME = "updateTime"; - private static final String DOC_ID_PREFIX = "index"; - - private final String queryId; - private final String status; - private final String error; - private final String datasourceName; - private final Long queryRunTime; - private final Long updateTime; - - public static IndexDMLResult copy(IndexDMLResult copy, long seqNo, long primaryTerm) { - return new IndexDMLResult( - copy.queryId, - copy.status, - copy.error, - copy.datasourceName, - copy.queryRunTime, - copy.updateTime); - } - - @Override - public String getId() { - return DOC_ID_PREFIX + queryId; - } - - @Override - public long getSeqNo() { - return SequenceNumbers.UNASSIGNED_SEQ_NO; - } - - @Override - public long getPrimaryTerm() { - return SequenceNumbers.UNASSIGNED_PRIMARY_TERM; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder - .startObject() - .field(QUERY_ID, queryId) - .field("status", status) - .field("error", error) - .field(DATASOURCE_NAME, datasourceName) - .field(QUERY_RUNTIME, queryRunTime) - .field(UPDATE_TIME, updateTime) - .field("result", ImmutableList.of()) - .field("schema", ImmutableList.of()) - .endObject(); - return builder; - } -} diff --git a/spark/src/main/java/org/opensearch/sql/spark/dispatcher/model/IndexQueryActionType.java b/spark/src/main/java/org/opensearch/sql/spark/dispatcher/model/IndexQueryActionType.java deleted file mode 100644 index 2c96511d2a..0000000000 --- a/spark/src/main/java/org/opensearch/sql/spark/dispatcher/model/IndexQueryActionType.java +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.dispatcher.model; - -/** Enum for Index Action in the given query.* */ -public enum IndexQueryActionType { - CREATE, - REFRESH, - DESCRIBE, - SHOW, - DROP -} diff --git a/spark/src/main/java/org/opensearch/sql/spark/dispatcher/model/IndexQueryDetails.java b/spark/src/main/java/org/opensearch/sql/spark/dispatcher/model/IndexQueryDetails.java deleted file mode 100644 index 576b0772d2..0000000000 --- a/spark/src/main/java/org/opensearch/sql/spark/dispatcher/model/IndexQueryDetails.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.dispatcher.model; - -import static org.apache.commons.lang3.StringUtils.strip; - -import lombok.EqualsAndHashCode; -import lombok.Getter; -import org.apache.commons.lang3.StringUtils; -import org.opensearch.sql.spark.flint.FlintIndexType; - -/** Index details in an async query. */ -@Getter -@EqualsAndHashCode -public class IndexQueryDetails { - - public static final String STRIP_CHARS = "`"; - - private String indexName; - private FullyQualifiedTableName fullyQualifiedTableName; - // by default, auto_refresh = false; - private boolean autoRefresh; - private IndexQueryActionType indexQueryActionType; - // materialized view special case where - // table name and mv name are combined. - private String mvName; - private FlintIndexType indexType; - - private IndexQueryDetails() {} - - public static IndexQueryDetailsBuilder builder() { - return new IndexQueryDetailsBuilder(); - } - - // Builder class - public static class IndexQueryDetailsBuilder { - private final IndexQueryDetails indexQueryDetails; - - public IndexQueryDetailsBuilder() { - indexQueryDetails = new IndexQueryDetails(); - } - - public IndexQueryDetailsBuilder indexName(String indexName) { - indexQueryDetails.indexName = indexName; - return this; - } - - public IndexQueryDetailsBuilder fullyQualifiedTableName(FullyQualifiedTableName tableName) { - indexQueryDetails.fullyQualifiedTableName = tableName; - return this; - } - - public IndexQueryDetailsBuilder autoRefresh(Boolean autoRefresh) { - indexQueryDetails.autoRefresh = autoRefresh; - return this; - } - - public IndexQueryDetailsBuilder indexQueryActionType( - IndexQueryActionType indexQueryActionType) { - indexQueryDetails.indexQueryActionType = indexQueryActionType; - return this; - } - - public IndexQueryDetailsBuilder mvName(String mvName) { - indexQueryDetails.mvName = mvName; - return this; - } - - public IndexQueryDetailsBuilder indexType(FlintIndexType indexType) { - indexQueryDetails.indexType = indexType; - return this; - } - - public IndexQueryDetails build() { - return indexQueryDetails; - } - } - - public String openSearchIndexName() { - FullyQualifiedTableName fullyQualifiedTableName = getFullyQualifiedTableName(); - String indexName = StringUtils.EMPTY; - switch (getIndexType()) { - case COVERING: - indexName = - "flint_" - + fullyQualifiedTableName.toFlintName() - + "_" - + strip(getIndexName(), STRIP_CHARS) - + "_" - + getIndexType().getSuffix(); - break; - case SKIPPING: - indexName = - "flint_" + fullyQualifiedTableName.toFlintName() + "_" + getIndexType().getSuffix(); - break; - case MATERIALIZED_VIEW: - indexName = "flint_" + new FullyQualifiedTableName(mvName).toFlintName(); - break; - } - return indexName.toLowerCase(); - } -} diff --git a/spark/src/main/java/org/opensearch/sql/spark/dispatcher/model/JobType.java b/spark/src/main/java/org/opensearch/sql/spark/dispatcher/model/JobType.java deleted file mode 100644 index 01f5f422e9..0000000000 --- a/spark/src/main/java/org/opensearch/sql/spark/dispatcher/model/JobType.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.dispatcher.model; - -public enum JobType { - INTERACTIVE("interactive"), - STREAMING("streaming"), - BATCH("batch"); - - private String text; - - JobType(String text) { - this.text = text; - } - - public String getText() { - return this.text; - } - - /** - * Get JobType from text. - * - * @param text text. - * @return JobType {@link JobType}. - */ - public static JobType fromString(String text) { - for (JobType JobType : JobType.values()) { - if (JobType.text.equalsIgnoreCase(text)) { - return JobType; - } - } - throw new IllegalArgumentException("No JobType with text " + text + " found"); - } -} diff --git a/spark/src/main/java/org/opensearch/sql/spark/execution/session/CreateSessionRequest.java b/spark/src/main/java/org/opensearch/sql/spark/execution/session/CreateSessionRequest.java deleted file mode 100644 index b2201fbd01..0000000000 --- a/spark/src/main/java/org/opensearch/sql/spark/execution/session/CreateSessionRequest.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.execution.session; - -import java.util.Map; -import lombok.Data; -import org.opensearch.sql.spark.asyncquery.model.SparkSubmitParameters; -import org.opensearch.sql.spark.client.StartJobRequest; - -@Data -public class CreateSessionRequest { - private final String jobName; - private final String applicationId; - private final String executionRoleArn; - private final SparkSubmitParameters.Builder sparkSubmitParametersBuilder; - private final Map tags; - private final String resultIndex; - private final String datasourceName; - - public StartJobRequest getStartJobRequest() { - return new InteractiveSessionStartJobRequest( - "select 1", - jobName, - applicationId, - executionRoleArn, - sparkSubmitParametersBuilder.build().toString(), - tags, - resultIndex); - } - - static class InteractiveSessionStartJobRequest extends StartJobRequest { - public InteractiveSessionStartJobRequest( - String query, - String jobName, - String applicationId, - String executionRoleArn, - String sparkSubmitParams, - Map tags, - String resultIndex) { - super( - query, - jobName, - applicationId, - executionRoleArn, - sparkSubmitParams, - tags, - false, - resultIndex); - } - - /** Interactive query keep running. */ - @Override - public Long executionTimeout() { - return 0L; - } - } -} diff --git a/spark/src/main/java/org/opensearch/sql/spark/execution/session/InteractiveSession.java b/spark/src/main/java/org/opensearch/sql/spark/execution/session/InteractiveSession.java deleted file mode 100644 index 3221b33b2c..0000000000 --- a/spark/src/main/java/org/opensearch/sql/spark/execution/session/InteractiveSession.java +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.execution.session; - -import static org.opensearch.sql.spark.execution.session.SessionModel.initInteractiveSession; -import static org.opensearch.sql.spark.execution.session.SessionState.DEAD; -import static org.opensearch.sql.spark.execution.session.SessionState.END_STATE; -import static org.opensearch.sql.spark.execution.session.SessionState.FAIL; -import static org.opensearch.sql.spark.execution.statement.StatementId.newStatementId; -import static org.opensearch.sql.spark.execution.statestore.StateStore.createSession; -import static org.opensearch.sql.spark.execution.statestore.StateStore.getSession; - -import java.util.Optional; -import lombok.Builder; -import lombok.Getter; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.index.engine.VersionConflictEngineException; -import org.opensearch.sql.spark.client.EMRServerlessClient; -import org.opensearch.sql.spark.execution.statement.QueryRequest; -import org.opensearch.sql.spark.execution.statement.Statement; -import org.opensearch.sql.spark.execution.statement.StatementId; -import org.opensearch.sql.spark.execution.statestore.StateStore; -import org.opensearch.sql.spark.rest.model.LangType; - -/** - * Interactive session. - * - *

ENTRY_STATE: not_started - */ -@Getter -@Builder -public class InteractiveSession implements Session { - private static final Logger LOG = LogManager.getLogger(); - - public static final String SESSION_ID_TAG_KEY = "sid"; - - private final SessionId sessionId; - private final StateStore stateStore; - private final EMRServerlessClient serverlessClient; - private SessionModel sessionModel; - - @Override - public void open(CreateSessionRequest createSessionRequest) { - try { - // append session id; - createSessionRequest - .getSparkSubmitParametersBuilder() - .sessionExecution(sessionId.getSessionId(), createSessionRequest.getDatasourceName()); - createSessionRequest.getTags().put(SESSION_ID_TAG_KEY, sessionId.getSessionId()); - String jobID = serverlessClient.startJobRun(createSessionRequest.getStartJobRequest()); - String applicationId = createSessionRequest.getStartJobRequest().getApplicationId(); - - sessionModel = - initInteractiveSession( - applicationId, jobID, sessionId, createSessionRequest.getDatasourceName()); - createSession(stateStore, sessionModel.getDatasourceName()).apply(sessionModel); - } catch (VersionConflictEngineException e) { - String errorMsg = "session already exist. " + sessionId; - LOG.error(errorMsg); - throw new IllegalStateException(errorMsg); - } - } - - /** todo. StatementSweeper will delete doc. */ - @Override - public void close() { - Optional model = - getSession(stateStore, sessionModel.getDatasourceName()).apply(sessionModel.getId()); - if (model.isEmpty()) { - throw new IllegalStateException("session does not exist. " + sessionModel.getSessionId()); - } else { - serverlessClient.cancelJobRun(sessionModel.getApplicationId(), sessionModel.getJobId()); - } - } - - /** Submit statement. If submit successfully, Statement in waiting state. */ - public StatementId submit(QueryRequest request) { - Optional model = - getSession(stateStore, sessionModel.getDatasourceName()).apply(sessionModel.getId()); - if (model.isEmpty()) { - throw new IllegalStateException("session does not exist. " + sessionModel.getSessionId()); - } else { - sessionModel = model.get(); - if (!END_STATE.contains(sessionModel.getSessionState())) { - String qid = request.getQueryId().getId(); - StatementId statementId = newStatementId(qid); - Statement st = - Statement.builder() - .sessionId(sessionId) - .applicationId(sessionModel.getApplicationId()) - .jobId(sessionModel.getJobId()) - .stateStore(stateStore) - .statementId(statementId) - .langType(LangType.SQL) - .datasourceName(sessionModel.getDatasourceName()) - .query(request.getQuery()) - .queryId(qid) - .build(); - st.open(); - return statementId; - } else { - String errMsg = - String.format( - "can't submit statement, session should not be in end state, " - + "current session state is: %s", - sessionModel.getSessionState().getSessionState()); - LOG.debug(errMsg); - throw new IllegalStateException(errMsg); - } - } - } - - @Override - public Optional get(StatementId stID) { - return StateStore.getStatement(stateStore, sessionModel.getDatasourceName()) - .apply(stID.getId()) - .map( - model -> - Statement.builder() - .sessionId(sessionId) - .applicationId(model.getApplicationId()) - .jobId(model.getJobId()) - .statementId(model.getStatementId()) - .langType(model.getLangType()) - .query(model.getQuery()) - .queryId(model.getQueryId()) - .stateStore(stateStore) - .statementModel(model) - .build()); - } - - @Override - public boolean isReady() { - return sessionModel.getSessionState() != DEAD && sessionModel.getSessionState() != FAIL; - } -} diff --git a/spark/src/main/java/org/opensearch/sql/spark/execution/session/Session.java b/spark/src/main/java/org/opensearch/sql/spark/execution/session/Session.java deleted file mode 100644 index d3d3411ded..0000000000 --- a/spark/src/main/java/org/opensearch/sql/spark/execution/session/Session.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.execution.session; - -import java.util.Optional; -import org.opensearch.sql.spark.execution.statement.QueryRequest; -import org.opensearch.sql.spark.execution.statement.Statement; -import org.opensearch.sql.spark.execution.statement.StatementId; - -/** Session define the statement execution context. Each session is binding to one Spark Job. */ -public interface Session { - /** open session. */ - void open(CreateSessionRequest createSessionRequest); - - /** close session. */ - void close(); - - /** - * submit {@link QueryRequest}. - * - * @param request {@link QueryRequest} - * @return {@link StatementId} - */ - StatementId submit(QueryRequest request); - - /** - * get {@link Statement}. - * - * @param stID {@link StatementId} - * @return {@link Statement} - */ - Optional get(StatementId stID); - - SessionModel getSessionModel(); - - SessionId getSessionId(); - - /** return true if session is ready to use. */ - boolean isReady(); -} diff --git a/spark/src/main/java/org/opensearch/sql/spark/execution/session/SessionId.java b/spark/src/main/java/org/opensearch/sql/spark/execution/session/SessionId.java deleted file mode 100644 index c85e4dd35c..0000000000 --- a/spark/src/main/java/org/opensearch/sql/spark/execution/session/SessionId.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.execution.session; - -import static org.opensearch.sql.spark.utils.IDUtils.decode; -import static org.opensearch.sql.spark.utils.IDUtils.encode; - -import lombok.Data; - -@Data -public class SessionId { - public static final int PREFIX_LEN = 10; - - private final String sessionId; - - public static SessionId newSessionId(String datasourceName) { - return new SessionId(encode(datasourceName)); - } - - public String getDataSourceName() { - return decode(sessionId); - } - - @Override - public String toString() { - return "sessionId=" + sessionId; - } -} diff --git a/spark/src/main/java/org/opensearch/sql/spark/execution/session/SessionManager.java b/spark/src/main/java/org/opensearch/sql/spark/execution/session/SessionManager.java deleted file mode 100644 index 0f0a4ce373..0000000000 --- a/spark/src/main/java/org/opensearch/sql/spark/execution/session/SessionManager.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.execution.session; - -import static org.opensearch.sql.spark.execution.session.SessionId.newSessionId; - -import java.util.Optional; -import lombok.RequiredArgsConstructor; -import org.opensearch.sql.common.setting.Settings; -import org.opensearch.sql.spark.client.EMRServerlessClient; -import org.opensearch.sql.spark.execution.statestore.StateStore; - -/** - * Singleton Class - * - *

todo. add Session cache and Session sweeper. - */ -@RequiredArgsConstructor -public class SessionManager { - private final StateStore stateStore; - private final EMRServerlessClient emrServerlessClient; - private final Settings settings; - - public Session createSession(CreateSessionRequest request) { - InteractiveSession session = - InteractiveSession.builder() - .sessionId(newSessionId(request.getDatasourceName())) - .stateStore(stateStore) - .serverlessClient(emrServerlessClient) - .build(); - session.open(request); - return session; - } - - public Optional getSession(SessionId sid) { - Optional model = - StateStore.getSession(stateStore, sid.getDataSourceName()).apply(sid.getSessionId()); - if (model.isPresent()) { - InteractiveSession session = - InteractiveSession.builder() - .sessionId(sid) - .stateStore(stateStore) - .serverlessClient(emrServerlessClient) - .sessionModel(model.get()) - .build(); - return Optional.ofNullable(session); - } - return Optional.empty(); - } - - // todo, keep it only for testing, will remove it later. - public boolean isEnabled() { - return true; - } -} diff --git a/spark/src/main/java/org/opensearch/sql/spark/execution/session/SessionModel.java b/spark/src/main/java/org/opensearch/sql/spark/execution/session/SessionModel.java deleted file mode 100644 index 806cdb083e..0000000000 --- a/spark/src/main/java/org/opensearch/sql/spark/execution/session/SessionModel.java +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.execution.session; - -import static org.opensearch.sql.spark.execution.session.SessionState.NOT_STARTED; -import static org.opensearch.sql.spark.execution.session.SessionType.INTERACTIVE; - -import java.io.IOException; -import lombok.Builder; -import lombok.Data; -import lombok.SneakyThrows; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.xcontent.XContentParserUtils; -import org.opensearch.index.seqno.SequenceNumbers; -import org.opensearch.sql.spark.execution.statestore.StateModel; - -/** Session data in flint.ql.sessions index. */ -@Data -@Builder -public class SessionModel extends StateModel { - public static final String VERSION = "version"; - public static final String TYPE = "type"; - public static final String SESSION_TYPE = "sessionType"; - public static final String SESSION_ID = "sessionId"; - public static final String SESSION_STATE = "state"; - public static final String DATASOURCE_NAME = "dataSourceName"; - public static final String LAST_UPDATE_TIME = "lastUpdateTime"; - public static final String APPLICATION_ID = "applicationId"; - public static final String JOB_ID = "jobId"; - public static final String ERROR = "error"; - public static final String UNKNOWN = "unknown"; - public static final String SESSION_DOC_TYPE = "session"; - - private final String version; - private final SessionType sessionType; - private final SessionId sessionId; - private final SessionState sessionState; - private final String applicationId; - private final String jobId; - private final String datasourceName; - private final String error; - private final long lastUpdateTime; - - private final long seqNo; - private final long primaryTerm; - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder - .startObject() - .field(VERSION, version) - .field(TYPE, SESSION_DOC_TYPE) - .field(SESSION_TYPE, sessionType.getSessionType()) - .field(SESSION_ID, sessionId.getSessionId()) - .field(SESSION_STATE, sessionState.getSessionState()) - .field(DATASOURCE_NAME, datasourceName) - .field(APPLICATION_ID, applicationId) - .field(JOB_ID, jobId) - .field(LAST_UPDATE_TIME, lastUpdateTime) - .field(ERROR, error) - .endObject(); - return builder; - } - - public static SessionModel of(SessionModel copy, long seqNo, long primaryTerm) { - return builder() - .version(copy.version) - .sessionType(copy.sessionType) - .sessionId(new SessionId(copy.sessionId.getSessionId())) - .sessionState(copy.sessionState) - .datasourceName(copy.datasourceName) - .applicationId(copy.getApplicationId()) - .jobId(copy.jobId) - .error(UNKNOWN) - .lastUpdateTime(copy.getLastUpdateTime()) - .seqNo(seqNo) - .primaryTerm(primaryTerm) - .build(); - } - - public static SessionModel copyWithState( - SessionModel copy, SessionState state, long seqNo, long primaryTerm) { - return builder() - .version(copy.version) - .sessionType(copy.sessionType) - .sessionId(new SessionId(copy.sessionId.getSessionId())) - .sessionState(state) - .datasourceName(copy.datasourceName) - .applicationId(copy.getApplicationId()) - .jobId(copy.jobId) - .error(UNKNOWN) - .lastUpdateTime(copy.getLastUpdateTime()) - .seqNo(seqNo) - .primaryTerm(primaryTerm) - .build(); - } - - @SneakyThrows - public static SessionModel fromXContent(XContentParser parser, long seqNo, long primaryTerm) { - SessionModelBuilder builder = new SessionModelBuilder(); - XContentParserUtils.ensureExpectedToken( - XContentParser.Token.START_OBJECT, parser.currentToken(), parser); - while (!XContentParser.Token.END_OBJECT.equals(parser.nextToken())) { - String fieldName = parser.currentName(); - parser.nextToken(); - switch (fieldName) { - case VERSION: - builder.version(parser.text()); - break; - case SESSION_TYPE: - builder.sessionType(SessionType.fromString(parser.text())); - break; - case SESSION_ID: - builder.sessionId(new SessionId(parser.text())); - break; - case SESSION_STATE: - builder.sessionState(SessionState.fromString(parser.text())); - break; - case DATASOURCE_NAME: - builder.datasourceName(parser.text()); - break; - case ERROR: - builder.error(parser.text()); - break; - case APPLICATION_ID: - builder.applicationId(parser.text()); - break; - case JOB_ID: - builder.jobId(parser.text()); - break; - case LAST_UPDATE_TIME: - builder.lastUpdateTime(parser.longValue()); - break; - case TYPE: - // do nothing. - break; - } - } - builder.seqNo(seqNo); - builder.primaryTerm(primaryTerm); - return builder.build(); - } - - public static SessionModel initInteractiveSession( - String applicationId, String jobId, SessionId sid, String datasourceName) { - return builder() - .version("1.0") - .sessionType(INTERACTIVE) - .sessionId(sid) - .sessionState(NOT_STARTED) - .datasourceName(datasourceName) - .applicationId(applicationId) - .jobId(jobId) - .error(UNKNOWN) - .lastUpdateTime(System.currentTimeMillis()) - .seqNo(SequenceNumbers.UNASSIGNED_SEQ_NO) - .primaryTerm(SequenceNumbers.UNASSIGNED_PRIMARY_TERM) - .build(); - } - - @Override - public String getId() { - return sessionId.getSessionId(); - } -} diff --git a/spark/src/main/java/org/opensearch/sql/spark/execution/session/SessionState.java b/spark/src/main/java/org/opensearch/sql/spark/execution/session/SessionState.java deleted file mode 100644 index bd5d14c603..0000000000 --- a/spark/src/main/java/org/opensearch/sql/spark/execution/session/SessionState.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.execution.session; - -import com.google.common.collect.ImmutableList; -import java.util.Arrays; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.stream.Collectors; -import lombok.Getter; - -@Getter -public enum SessionState { - NOT_STARTED("not_started"), - RUNNING("running"), - DEAD("dead"), - FAIL("fail"); - - public static List END_STATE = ImmutableList.of(DEAD, FAIL); - - private final String sessionState; - - SessionState(String sessionState) { - this.sessionState = sessionState; - } - - private static Map STATES = - Arrays.stream(SessionState.values()) - .collect(Collectors.toMap(t -> t.name().toLowerCase(), t -> t)); - - public static SessionState fromString(String key) { - for (SessionState ss : SessionState.values()) { - if (ss.getSessionState().toLowerCase(Locale.ROOT).equals(key)) { - return ss; - } - } - throw new IllegalArgumentException("Invalid session state: " + key); - } -} diff --git a/spark/src/main/java/org/opensearch/sql/spark/execution/session/SessionType.java b/spark/src/main/java/org/opensearch/sql/spark/execution/session/SessionType.java deleted file mode 100644 index 10b9ce7bd5..0000000000 --- a/spark/src/main/java/org/opensearch/sql/spark/execution/session/SessionType.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.execution.session; - -import java.util.Locale; -import lombok.Getter; - -@Getter -public enum SessionType { - INTERACTIVE("interactive"); - - private final String sessionType; - - SessionType(String sessionType) { - this.sessionType = sessionType; - } - - public static SessionType fromString(String key) { - for (SessionType sType : SessionType.values()) { - if (sType.getSessionType().toLowerCase(Locale.ROOT).equals(key)) { - return sType; - } - } - throw new IllegalArgumentException("Invalid session type: " + key); - } -} diff --git a/spark/src/main/java/org/opensearch/sql/spark/execution/statement/QueryRequest.java b/spark/src/main/java/org/opensearch/sql/spark/execution/statement/QueryRequest.java deleted file mode 100644 index c365265224..0000000000 --- a/spark/src/main/java/org/opensearch/sql/spark/execution/statement/QueryRequest.java +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.execution.statement; - -import lombok.Data; -import org.opensearch.sql.spark.asyncquery.model.AsyncQueryId; -import org.opensearch.sql.spark.rest.model.LangType; - -@Data -public class QueryRequest { - private final AsyncQueryId queryId; - private final LangType langType; - private final String query; -} diff --git a/spark/src/main/java/org/opensearch/sql/spark/execution/statement/Statement.java b/spark/src/main/java/org/opensearch/sql/spark/execution/statement/Statement.java deleted file mode 100644 index 94c1f79511..0000000000 --- a/spark/src/main/java/org/opensearch/sql/spark/execution/statement/Statement.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.execution.statement; - -import static org.opensearch.sql.spark.execution.statement.StatementModel.submitStatement; -import static org.opensearch.sql.spark.execution.statestore.StateStore.createStatement; -import static org.opensearch.sql.spark.execution.statestore.StateStore.getStatement; -import static org.opensearch.sql.spark.execution.statestore.StateStore.updateStatementState; - -import lombok.Builder; -import lombok.Getter; -import lombok.Setter; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.index.engine.DocumentMissingException; -import org.opensearch.index.engine.VersionConflictEngineException; -import org.opensearch.sql.spark.execution.session.SessionId; -import org.opensearch.sql.spark.execution.statestore.StateStore; -import org.opensearch.sql.spark.rest.model.LangType; - -/** Statement represent query to execute in session. One statement map to one session. */ -@Getter -@Builder -public class Statement { - private static final Logger LOG = LogManager.getLogger(); - - private final SessionId sessionId; - private final String applicationId; - private final String jobId; - private final StatementId statementId; - private final LangType langType; - private final String datasourceName; - private final String query; - private final String queryId; - private final StateStore stateStore; - - @Setter private StatementModel statementModel; - - /** Open a statement. */ - public void open() { - try { - statementModel = - submitStatement( - sessionId, - applicationId, - jobId, - statementId, - langType, - datasourceName, - query, - queryId); - statementModel = createStatement(stateStore, datasourceName).apply(statementModel); - } catch (VersionConflictEngineException e) { - String errorMsg = "statement already exist. " + statementId; - LOG.error(errorMsg); - throw new IllegalStateException(errorMsg); - } - } - - /** Cancel a statement. */ - public void cancel() { - StatementState statementState = statementModel.getStatementState(); - - if (statementState.equals(StatementState.SUCCESS) - || statementState.equals(StatementState.FAILED) - || statementState.equals(StatementState.CANCELLED)) { - String errorMsg = - String.format( - "can't cancel statement in %s state. statement: %s.", - statementState.getState(), statementId); - LOG.error(errorMsg); - throw new IllegalStateException(errorMsg); - } - try { - this.statementModel = - updateStatementState(stateStore, statementModel.getDatasourceName()) - .apply(this.statementModel, StatementState.CANCELLED); - } catch (DocumentMissingException e) { - String errorMsg = - String.format("cancel statement failed. no statement found. statement: %s.", statementId); - LOG.error(errorMsg); - throw new IllegalStateException(errorMsg); - } catch (VersionConflictEngineException e) { - this.statementModel = - getStatement(stateStore, statementModel.getDatasourceName()) - .apply(statementModel.getId()) - .orElse(this.statementModel); - String errorMsg = - String.format( - "cancel statement failed. current statementState: %s " + "statement: %s.", - this.statementModel.getStatementState(), statementId); - LOG.error(errorMsg); - throw new IllegalStateException(errorMsg); - } - } - - public StatementState getStatementState() { - return statementModel.getStatementState(); - } -} diff --git a/spark/src/main/java/org/opensearch/sql/spark/execution/statement/StatementId.java b/spark/src/main/java/org/opensearch/sql/spark/execution/statement/StatementId.java deleted file mode 100644 index 33284c4b3d..0000000000 --- a/spark/src/main/java/org/opensearch/sql/spark/execution/statement/StatementId.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.execution.statement; - -import lombok.Data; - -@Data -public class StatementId { - private final String id; - - // construct statementId from queryId. - public static StatementId newStatementId(String qid) { - return new StatementId(qid); - } - - @Override - public String toString() { - return "statementId=" + id; - } -} diff --git a/spark/src/main/java/org/opensearch/sql/spark/execution/statement/StatementModel.java b/spark/src/main/java/org/opensearch/sql/spark/execution/statement/StatementModel.java deleted file mode 100644 index adc147c905..0000000000 --- a/spark/src/main/java/org/opensearch/sql/spark/execution/statement/StatementModel.java +++ /dev/null @@ -1,204 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.execution.statement; - -import static org.opensearch.sql.spark.execution.session.SessionModel.APPLICATION_ID; -import static org.opensearch.sql.spark.execution.session.SessionModel.DATASOURCE_NAME; -import static org.opensearch.sql.spark.execution.session.SessionModel.JOB_ID; -import static org.opensearch.sql.spark.execution.statement.StatementState.WAITING; - -import java.io.IOException; -import lombok.Builder; -import lombok.Data; -import lombok.SneakyThrows; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.xcontent.XContentParserUtils; -import org.opensearch.index.seqno.SequenceNumbers; -import org.opensearch.sql.spark.execution.session.SessionId; -import org.opensearch.sql.spark.execution.statestore.StateModel; -import org.opensearch.sql.spark.rest.model.LangType; - -/** Statement data in flint.ql.sessions index. */ -@Data -@Builder -public class StatementModel extends StateModel { - public static final String VERSION = "version"; - public static final String TYPE = "type"; - public static final String STATEMENT_STATE = "state"; - public static final String STATEMENT_ID = "statementId"; - public static final String SESSION_ID = "sessionId"; - public static final String LANG = "lang"; - public static final String QUERY = "query"; - public static final String QUERY_ID = "queryId"; - public static final String SUBMIT_TIME = "submitTime"; - public static final String ERROR = "error"; - public static final String UNKNOWN = ""; - public static final String STATEMENT_DOC_TYPE = "statement"; - - private final String version; - private final StatementState statementState; - private final StatementId statementId; - private final SessionId sessionId; - private final String applicationId; - private final String jobId; - private final LangType langType; - private final String datasourceName; - private final String query; - private final String queryId; - private final long submitTime; - private final String error; - - private final long seqNo; - private final long primaryTerm; - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder - .startObject() - .field(VERSION, version) - .field(TYPE, STATEMENT_DOC_TYPE) - .field(STATEMENT_STATE, statementState.getState()) - .field(STATEMENT_ID, statementId.getId()) - .field(SESSION_ID, sessionId.getSessionId()) - .field(APPLICATION_ID, applicationId) - .field(JOB_ID, jobId) - .field(LANG, langType.getText()) - .field(DATASOURCE_NAME, datasourceName) - .field(QUERY, query) - .field(QUERY_ID, queryId) - .field(SUBMIT_TIME, submitTime) - .field(ERROR, error) - .endObject(); - return builder; - } - - public static StatementModel copy(StatementModel copy, long seqNo, long primaryTerm) { - return builder() - .version("1.0") - .statementState(copy.statementState) - .statementId(copy.statementId) - .sessionId(copy.sessionId) - .applicationId(copy.applicationId) - .jobId(copy.jobId) - .langType(copy.langType) - .datasourceName(copy.datasourceName) - .query(copy.query) - .queryId(copy.queryId) - .submitTime(copy.submitTime) - .error(copy.error) - .seqNo(seqNo) - .primaryTerm(primaryTerm) - .build(); - } - - public static StatementModel copyWithState( - StatementModel copy, StatementState state, long seqNo, long primaryTerm) { - return builder() - .version("1.0") - .statementState(state) - .statementId(copy.statementId) - .sessionId(copy.sessionId) - .applicationId(copy.applicationId) - .jobId(copy.jobId) - .langType(copy.langType) - .datasourceName(copy.datasourceName) - .query(copy.query) - .queryId(copy.queryId) - .submitTime(copy.submitTime) - .error(copy.error) - .seqNo(seqNo) - .primaryTerm(primaryTerm) - .build(); - } - - @SneakyThrows - public static StatementModel fromXContent(XContentParser parser, long seqNo, long primaryTerm) { - StatementModel.StatementModelBuilder builder = StatementModel.builder(); - XContentParserUtils.ensureExpectedToken( - XContentParser.Token.START_OBJECT, parser.currentToken(), parser); - while (!XContentParser.Token.END_OBJECT.equals(parser.nextToken())) { - String fieldName = parser.currentName(); - parser.nextToken(); - switch (fieldName) { - case VERSION: - builder.version(parser.text()); - break; - case TYPE: - // do nothing - break; - case STATEMENT_STATE: - builder.statementState(StatementState.fromString(parser.text())); - break; - case STATEMENT_ID: - builder.statementId(new StatementId(parser.text())); - break; - case SESSION_ID: - builder.sessionId(new SessionId(parser.text())); - break; - case APPLICATION_ID: - builder.applicationId(parser.text()); - break; - case JOB_ID: - builder.jobId(parser.text()); - break; - case LANG: - builder.langType(LangType.fromString(parser.text())); - break; - case DATASOURCE_NAME: - builder.datasourceName(parser.text()); - break; - case QUERY: - builder.query(parser.text()); - break; - case QUERY_ID: - builder.queryId(parser.text()); - break; - case SUBMIT_TIME: - builder.submitTime(parser.longValue()); - break; - case ERROR: - builder.error(parser.text()); - break; - } - } - builder.seqNo(seqNo); - builder.primaryTerm(primaryTerm); - return builder.build(); - } - - public static StatementModel submitStatement( - SessionId sid, - String applicationId, - String jobId, - StatementId statementId, - LangType langType, - String datasourceName, - String query, - String queryId) { - return builder() - .version("1.0") - .statementState(WAITING) - .statementId(statementId) - .sessionId(sid) - .applicationId(applicationId) - .jobId(jobId) - .langType(langType) - .datasourceName(datasourceName) - .query(query) - .queryId(queryId) - .submitTime(System.currentTimeMillis()) - .error(UNKNOWN) - .seqNo(SequenceNumbers.UNASSIGNED_SEQ_NO) - .primaryTerm(SequenceNumbers.UNASSIGNED_PRIMARY_TERM) - .build(); - } - - @Override - public String getId() { - return statementId.getId(); - } -} diff --git a/spark/src/main/java/org/opensearch/sql/spark/execution/statement/StatementState.java b/spark/src/main/java/org/opensearch/sql/spark/execution/statement/StatementState.java deleted file mode 100644 index 48978ff8f9..0000000000 --- a/spark/src/main/java/org/opensearch/sql/spark/execution/statement/StatementState.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.execution.statement; - -import java.util.Arrays; -import java.util.Locale; -import java.util.Map; -import java.util.stream.Collectors; -import lombok.Getter; - -/** {@link Statement} State. */ -@Getter -public enum StatementState { - WAITING("waiting"), - RUNNING("running"), - SUCCESS("success"), - FAILED("failed"), - CANCELLED("cancelled"); - - private final String state; - - StatementState(String state) { - this.state = state; - } - - private static Map STATES = - Arrays.stream(StatementState.values()) - .collect(Collectors.toMap(t -> t.name().toLowerCase(), t -> t)); - - public static StatementState fromString(String key) { - for (StatementState ss : StatementState.values()) { - if (ss.getState().toLowerCase(Locale.ROOT).equals(key)) { - return ss; - } - } - throw new IllegalArgumentException("Invalid statement state: " + key); - } -} diff --git a/spark/src/main/java/org/opensearch/sql/spark/execution/statestore/StateModel.java b/spark/src/main/java/org/opensearch/sql/spark/execution/statestore/StateModel.java deleted file mode 100644 index fe105cc8e4..0000000000 --- a/spark/src/main/java/org/opensearch/sql/spark/execution/statestore/StateModel.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.execution.statestore; - -import org.opensearch.core.xcontent.ToXContentObject; -import org.opensearch.core.xcontent.XContentParser; - -public abstract class StateModel implements ToXContentObject { - public static final String VERSION_1_0 = "1.0"; - public static final String TYPE = "type"; - public static final String STATE = "state"; - public static final String LAST_UPDATE_TIME = "lastUpdateTime"; - - public abstract String getId(); - - public abstract long getSeqNo(); - - public abstract long getPrimaryTerm(); - - public interface CopyBuilder { - T of(T copy, long seqNo, long primaryTerm); - } - - public interface StateCopyBuilder { - T of(T copy, S state, long seqNo, long primaryTerm); - } - - public interface FromXContent { - T fromXContent(XContentParser parser, long seqNo, long primaryTerm); - } -} diff --git a/spark/src/main/java/org/opensearch/sql/spark/execution/statestore/StateStore.java b/spark/src/main/java/org/opensearch/sql/spark/execution/statestore/StateStore.java deleted file mode 100644 index e99087b24d..0000000000 --- a/spark/src/main/java/org/opensearch/sql/spark/execution/statestore/StateStore.java +++ /dev/null @@ -1,360 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.execution.statestore; - -import static org.opensearch.sql.spark.data.constants.SparkConstants.SPARK_REQUEST_BUFFER_INDEX_NAME; -import static org.opensearch.sql.spark.execution.statestore.StateModel.STATE; - -import com.google.common.annotations.VisibleForTesting; -import java.io.IOException; -import java.io.InputStream; -import java.nio.charset.StandardCharsets; -import java.util.Locale; -import java.util.Optional; -import java.util.function.BiFunction; -import java.util.function.Function; -import java.util.function.Supplier; -import lombok.RequiredArgsConstructor; -import org.apache.commons.io.IOUtils; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.action.DocWriteResponse; -import org.opensearch.action.admin.indices.create.CreateIndexRequest; -import org.opensearch.action.admin.indices.create.CreateIndexResponse; -import org.opensearch.action.get.GetRequest; -import org.opensearch.action.get.GetResponse; -import org.opensearch.action.index.IndexRequest; -import org.opensearch.action.index.IndexResponse; -import org.opensearch.action.search.SearchRequest; -import org.opensearch.action.search.SearchResponse; -import org.opensearch.action.support.WriteRequest; -import org.opensearch.action.update.UpdateRequest; -import org.opensearch.action.update.UpdateResponse; -import org.opensearch.client.Client; -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.action.ActionFuture; -import org.opensearch.common.util.concurrent.ThreadContext; -import org.opensearch.common.xcontent.LoggingDeprecationHandler; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.index.query.QueryBuilder; -import org.opensearch.index.query.QueryBuilders; -import org.opensearch.search.builder.SearchSourceBuilder; -import org.opensearch.sql.spark.asyncquery.model.AsyncQueryJobMetadata; -import org.opensearch.sql.spark.dispatcher.model.IndexDMLResult; -import org.opensearch.sql.spark.execution.session.SessionModel; -import org.opensearch.sql.spark.execution.session.SessionState; -import org.opensearch.sql.spark.execution.session.SessionType; -import org.opensearch.sql.spark.execution.statement.StatementModel; -import org.opensearch.sql.spark.execution.statement.StatementState; -import org.opensearch.sql.spark.flint.FlintIndexState; -import org.opensearch.sql.spark.flint.FlintIndexStateModel; - -/** - * State Store maintain the state of Session and Statement. State State create/update/get doc on - * index regardless user FGAC permissions. - */ -@RequiredArgsConstructor -public class StateStore { - public static String SETTINGS_FILE_NAME = "query_execution_request_settings.yml"; - public static String MAPPING_FILE_NAME = "query_execution_request_mapping.yml"; - public static Function DATASOURCE_TO_REQUEST_INDEX = - datasourceName -> - String.format( - "%s_%s", SPARK_REQUEST_BUFFER_INDEX_NAME, datasourceName.toLowerCase(Locale.ROOT)); - public static String ALL_DATASOURCE = "*"; - - private static final Logger LOG = LogManager.getLogger(); - - private final Client client; - private final ClusterService clusterService; - - @VisibleForTesting - public T create( - T st, StateModel.CopyBuilder builder, String indexName) { - try { - if (!this.clusterService.state().routingTable().hasIndex(indexName)) { - createIndex(indexName); - } - IndexRequest indexRequest = - new IndexRequest(indexName) - .id(st.getId()) - .source(st.toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS)) - .setIfSeqNo(st.getSeqNo()) - .setIfPrimaryTerm(st.getPrimaryTerm()) - .create(true) - .setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL); - try (ThreadContext.StoredContext ignored = - client.threadPool().getThreadContext().stashContext()) { - IndexResponse indexResponse = client.index(indexRequest).actionGet(); - if (indexResponse.getResult().equals(DocWriteResponse.Result.CREATED)) { - LOG.debug("Successfully created doc. id: {}", st.getId()); - return builder.of(st, indexResponse.getSeqNo(), indexResponse.getPrimaryTerm()); - } else { - throw new RuntimeException( - String.format( - Locale.ROOT, - "Failed create doc. id: %s, error: %s", - st.getId(), - indexResponse.getResult().getLowercase())); - } - } - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - @VisibleForTesting - public Optional get( - String sid, StateModel.FromXContent builder, String indexName) { - try { - if (!this.clusterService.state().routingTable().hasIndex(indexName)) { - createIndex(indexName); - return Optional.empty(); - } - GetRequest getRequest = new GetRequest().index(indexName).id(sid).refresh(true); - try (ThreadContext.StoredContext ignored = - client.threadPool().getThreadContext().stashContext()) { - GetResponse getResponse = client.get(getRequest).actionGet(); - if (getResponse.isExists()) { - XContentParser parser = - XContentType.JSON - .xContent() - .createParser( - NamedXContentRegistry.EMPTY, - LoggingDeprecationHandler.INSTANCE, - getResponse.getSourceAsString()); - parser.nextToken(); - return Optional.of( - builder.fromXContent(parser, getResponse.getSeqNo(), getResponse.getPrimaryTerm())); - } else { - return Optional.empty(); - } - } - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - @VisibleForTesting - public T updateState( - T st, S state, StateModel.StateCopyBuilder builder, String indexName) { - try { - T model = builder.of(st, state, st.getSeqNo(), st.getPrimaryTerm()); - UpdateRequest updateRequest = - new UpdateRequest() - .index(indexName) - .id(model.getId()) - .setIfSeqNo(model.getSeqNo()) - .setIfPrimaryTerm(model.getPrimaryTerm()) - .doc(model.toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS)) - .fetchSource(true) - .setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL); - try (ThreadContext.StoredContext ignored = - client.threadPool().getThreadContext().stashContext()) { - UpdateResponse updateResponse = client.update(updateRequest).actionGet(); - LOG.debug("Successfully update doc. id: {}", st.getId()); - return builder.of(model, state, updateResponse.getSeqNo(), updateResponse.getPrimaryTerm()); - } - } catch (IOException e) { - throw new RuntimeException(e); - } - } - - private void createIndex(String indexName) { - try { - CreateIndexRequest createIndexRequest = new CreateIndexRequest(indexName); - createIndexRequest - .mapping(loadConfigFromResource(MAPPING_FILE_NAME), XContentType.YAML) - .settings(loadConfigFromResource(SETTINGS_FILE_NAME), XContentType.YAML); - ActionFuture createIndexResponseActionFuture; - try (ThreadContext.StoredContext ignored = - client.threadPool().getThreadContext().stashContext()) { - createIndexResponseActionFuture = client.admin().indices().create(createIndexRequest); - } - CreateIndexResponse createIndexResponse = createIndexResponseActionFuture.actionGet(); - if (createIndexResponse.isAcknowledged()) { - LOG.info("Index: {} creation Acknowledged", indexName); - } else { - throw new RuntimeException("Index creation is not acknowledged."); - } - } catch (Throwable e) { - throw new RuntimeException( - "Internal server error while creating" + indexName + " index:: " + e.getMessage()); - } - } - - private long count(String indexName, QueryBuilder query) { - SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); - searchSourceBuilder.query(query); - searchSourceBuilder.size(0); - - // https://github.com/opensearch-project/sql/issues/1801. - SearchRequest searchRequest = - new SearchRequest() - .indices(indexName) - .preference("_primary_first") - .source(searchSourceBuilder); - - ActionFuture searchResponseActionFuture; - try (ThreadContext.StoredContext ignored = - client.threadPool().getThreadContext().stashContext()) { - searchResponseActionFuture = client.search(searchRequest); - } - SearchResponse searchResponse = searchResponseActionFuture.actionGet(); - if (searchResponse.status().getStatus() != 200) { - throw new RuntimeException( - "Fetching job metadata information failed with status : " + searchResponse.status()); - } else { - return searchResponse.getHits().getTotalHits().value; - } - } - - private String loadConfigFromResource(String fileName) throws IOException { - InputStream fileStream = StateStore.class.getClassLoader().getResourceAsStream(fileName); - return IOUtils.toString(fileStream, StandardCharsets.UTF_8); - } - - /** Helper Functions */ - public static Function createStatement( - StateStore stateStore, String datasourceName) { - return (st) -> - stateStore.create( - st, StatementModel::copy, DATASOURCE_TO_REQUEST_INDEX.apply(datasourceName)); - } - - public static Function> getStatement( - StateStore stateStore, String datasourceName) { - return (docId) -> - stateStore.get( - docId, StatementModel::fromXContent, DATASOURCE_TO_REQUEST_INDEX.apply(datasourceName)); - } - - public static BiFunction updateStatementState( - StateStore stateStore, String datasourceName) { - return (old, state) -> - stateStore.updateState( - old, - state, - StatementModel::copyWithState, - DATASOURCE_TO_REQUEST_INDEX.apply(datasourceName)); - } - - public static Function createSession( - StateStore stateStore, String datasourceName) { - return (session) -> - stateStore.create( - session, SessionModel::of, DATASOURCE_TO_REQUEST_INDEX.apply(datasourceName)); - } - - public static Function> getSession( - StateStore stateStore, String datasourceName) { - return (docId) -> - stateStore.get( - docId, SessionModel::fromXContent, DATASOURCE_TO_REQUEST_INDEX.apply(datasourceName)); - } - - public static BiFunction updateSessionState( - StateStore stateStore, String datasourceName) { - return (old, state) -> - stateStore.updateState( - old, - state, - SessionModel::copyWithState, - DATASOURCE_TO_REQUEST_INDEX.apply(datasourceName)); - } - - public static Function createJobMetaData( - StateStore stateStore, String datasourceName) { - return (jobMetadata) -> - stateStore.create( - jobMetadata, - AsyncQueryJobMetadata::copy, - DATASOURCE_TO_REQUEST_INDEX.apply(datasourceName)); - } - - public static Function> getJobMetaData( - StateStore stateStore, String datasourceName) { - return (docId) -> - stateStore.get( - docId, - AsyncQueryJobMetadata::fromXContent, - DATASOURCE_TO_REQUEST_INDEX.apply(datasourceName)); - } - - public static Supplier activeSessionsCount(StateStore stateStore, String datasourceName) { - return () -> - stateStore.count( - DATASOURCE_TO_REQUEST_INDEX.apply(datasourceName), - QueryBuilders.boolQuery() - .must(QueryBuilders.termQuery(SessionModel.TYPE, SessionModel.SESSION_DOC_TYPE)) - .must( - QueryBuilders.termQuery( - SessionModel.SESSION_TYPE, SessionType.INTERACTIVE.getSessionType())) - .must( - QueryBuilders.termQuery( - SessionModel.SESSION_STATE, SessionState.RUNNING.getSessionState()))); - } - - public static BiFunction - updateFlintIndexState(StateStore stateStore, String datasourceName) { - return (old, state) -> - stateStore.updateState( - old, - state, - FlintIndexStateModel::copyWithState, - DATASOURCE_TO_REQUEST_INDEX.apply(datasourceName)); - } - - public static Function> getFlintIndexState( - StateStore stateStore, String datasourceName) { - return (docId) -> - stateStore.get( - docId, - FlintIndexStateModel::fromXContent, - DATASOURCE_TO_REQUEST_INDEX.apply(datasourceName)); - } - - public static Function createFlintIndexState( - StateStore stateStore, String datasourceName) { - return (st) -> - stateStore.create( - st, FlintIndexStateModel::copy, DATASOURCE_TO_REQUEST_INDEX.apply(datasourceName)); - } - - public static Function createIndexDMLResult( - StateStore stateStore, String indexName) { - return (result) -> stateStore.create(result, IndexDMLResult::copy, indexName); - } - - public static Supplier activeRefreshJobCount(StateStore stateStore, String datasourceName) { - return () -> - stateStore.count( - DATASOURCE_TO_REQUEST_INDEX.apply(datasourceName), - QueryBuilders.boolQuery() - .must( - QueryBuilders.termQuery( - SessionModel.TYPE, FlintIndexStateModel.FLINT_INDEX_DOC_TYPE)) - .must(QueryBuilders.termQuery(STATE, FlintIndexState.REFRESHING.getState()))); - } - - public static Supplier activeStatementsCount(StateStore stateStore, String datasourceName) { - return () -> - stateStore.count( - DATASOURCE_TO_REQUEST_INDEX.apply(datasourceName), - QueryBuilders.boolQuery() - .must( - QueryBuilders.termQuery(StatementModel.TYPE, StatementModel.STATEMENT_DOC_TYPE)) - .should( - QueryBuilders.termsQuery( - StatementModel.STATEMENT_STATE, - StatementState.RUNNING.getState(), - StatementState.WAITING.getState()))); - } -} diff --git a/spark/src/main/java/org/opensearch/sql/spark/flint/FlintIndexMetadata.java b/spark/src/main/java/org/opensearch/sql/spark/flint/FlintIndexMetadata.java index 1721263bf8..81b7fa1693 100644 --- a/spark/src/main/java/org/opensearch/sql/spark/flint/FlintIndexMetadata.java +++ b/spark/src/main/java/org/opensearch/sql/spark/flint/FlintIndexMetadata.java @@ -7,7 +7,6 @@ import java.util.Locale; import java.util.Map; -import java.util.Optional; import lombok.Data; @Data @@ -20,13 +19,8 @@ public class FlintIndexMetadata { public static final String AUTO_REFRESH = "auto_refresh"; public static final String AUTO_REFRESH_DEFAULT = "false"; - public static final String APP_ID = "SERVERLESS_EMR_VIRTUAL_CLUSTER_ID"; - public static final String FLINT_INDEX_STATE_DOC_ID = "latestId"; - private final String jobId; private final boolean autoRefresh; - private final String appId; - private final String latestId; public static FlintIndexMetadata fromMetatdata(Map metaMap) { Map propertiesMap = (Map) metaMap.get(PROPERTIES_KEY); @@ -38,12 +32,6 @@ public static FlintIndexMetadata fromMetatdata(Map metaMap) { !((String) options.getOrDefault(AUTO_REFRESH, AUTO_REFRESH_DEFAULT)) .toLowerCase(Locale.ROOT) .equalsIgnoreCase(AUTO_REFRESH_DEFAULT); - String appId = (String) envMap.getOrDefault(APP_ID, null); - String latestId = (String) metaMap.getOrDefault(FLINT_INDEX_STATE_DOC_ID, null); - return new FlintIndexMetadata(jobId, autoRefresh, appId, latestId); - } - - public Optional getLatestId() { - return Optional.ofNullable(latestId); + return new FlintIndexMetadata(jobId, autoRefresh); } } diff --git a/spark/src/main/java/org/opensearch/sql/spark/flint/FlintIndexMetadataReader.java b/spark/src/main/java/org/opensearch/sql/spark/flint/FlintIndexMetadataReader.java index d4a8e7ddbf..e4a5e92035 100644 --- a/spark/src/main/java/org/opensearch/sql/spark/flint/FlintIndexMetadataReader.java +++ b/spark/src/main/java/org/opensearch/sql/spark/flint/FlintIndexMetadataReader.java @@ -1,6 +1,6 @@ package org.opensearch.sql.spark.flint; -import org.opensearch.sql.spark.dispatcher.model.IndexQueryDetails; +import org.opensearch.sql.spark.dispatcher.model.IndexDetails; /** Interface for FlintIndexMetadataReader */ public interface FlintIndexMetadataReader { @@ -8,8 +8,8 @@ public interface FlintIndexMetadataReader { /** * Given Index details, get the streaming job Id. * - * @param indexQueryDetails indexDetails. + * @param indexDetails indexDetails. * @return FlintIndexMetadata. */ - FlintIndexMetadata getFlintIndexMetadata(IndexQueryDetails indexQueryDetails); + FlintIndexMetadata getFlintIndexMetadata(IndexDetails indexDetails); } diff --git a/spark/src/main/java/org/opensearch/sql/spark/flint/FlintIndexMetadataReaderImpl.java b/spark/src/main/java/org/opensearch/sql/spark/flint/FlintIndexMetadataReaderImpl.java index a16d0b9138..5f712e65cd 100644 --- a/spark/src/main/java/org/opensearch/sql/spark/flint/FlintIndexMetadataReaderImpl.java +++ b/spark/src/main/java/org/opensearch/sql/spark/flint/FlintIndexMetadataReaderImpl.java @@ -5,7 +5,7 @@ import org.opensearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.opensearch.client.Client; import org.opensearch.cluster.metadata.MappingMetadata; -import org.opensearch.sql.spark.dispatcher.model.IndexQueryDetails; +import org.opensearch.sql.spark.dispatcher.model.IndexDetails; /** Implementation of {@link FlintIndexMetadataReader} */ @AllArgsConstructor @@ -14,8 +14,8 @@ public class FlintIndexMetadataReaderImpl implements FlintIndexMetadataReader { private final Client client; @Override - public FlintIndexMetadata getFlintIndexMetadata(IndexQueryDetails indexQueryDetails) { - String indexName = indexQueryDetails.openSearchIndexName(); + public FlintIndexMetadata getFlintIndexMetadata(IndexDetails indexDetails) { + String indexName = indexDetails.openSearchIndexName(); GetMappingsResponse mappingsResponse = client.admin().indices().prepareGetMappings(indexName).get(); try { diff --git a/spark/src/main/java/org/opensearch/sql/spark/flint/FlintIndexState.java b/spark/src/main/java/org/opensearch/sql/spark/flint/FlintIndexState.java deleted file mode 100644 index 0ab4d92c17..0000000000 --- a/spark/src/main/java/org/opensearch/sql/spark/flint/FlintIndexState.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.flint; - -import java.util.Arrays; -import java.util.Locale; -import java.util.Map; -import java.util.stream.Collectors; -import lombok.Getter; - -/** Flint index state. */ -@Getter -public enum FlintIndexState { - // stable state - EMPTY("empty"), - // transitioning state - CREATING("creating"), - // transitioning state - REFRESHING("refreshing"), - // transitioning state - CANCELLING("cancelling"), - // stable state - ACTIVE("active"), - // transitioning state - DELETING("deleting"), - // stable state - DELETED("deleted"), - // stable state - FAILED("failed"), - // unknown state, if some state update in Spark side, not reflect in here. - UNKNOWN("unknown"); - - private final String state; - - FlintIndexState(String state) { - this.state = state; - } - - private static Map STATES = - Arrays.stream(FlintIndexState.values()) - .collect(Collectors.toMap(t -> t.name().toLowerCase(), t -> t)); - - public static FlintIndexState fromString(String key) { - for (FlintIndexState ss : FlintIndexState.values()) { - if (ss.getState().toLowerCase(Locale.ROOT).equals(key)) { - return ss; - } - } - return UNKNOWN; - } -} diff --git a/spark/src/main/java/org/opensearch/sql/spark/flint/FlintIndexStateModel.java b/spark/src/main/java/org/opensearch/sql/spark/flint/FlintIndexStateModel.java deleted file mode 100644 index bb73f439a2..0000000000 --- a/spark/src/main/java/org/opensearch/sql/spark/flint/FlintIndexStateModel.java +++ /dev/null @@ -1,150 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.flint; - -import static org.opensearch.sql.spark.execution.session.SessionModel.APPLICATION_ID; -import static org.opensearch.sql.spark.execution.session.SessionModel.DATASOURCE_NAME; -import static org.opensearch.sql.spark.execution.session.SessionModel.JOB_ID; -import static org.opensearch.sql.spark.execution.statement.StatementModel.ERROR; -import static org.opensearch.sql.spark.execution.statement.StatementModel.VERSION; - -import java.io.IOException; -import lombok.Builder; -import lombok.EqualsAndHashCode; -import lombok.Getter; -import lombok.SneakyThrows; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.core.xcontent.XContentParserUtils; -import org.opensearch.sql.spark.execution.statestore.StateModel; - -/** Flint Index Model maintain the index state. */ -@Getter -@Builder -@EqualsAndHashCode(callSuper = false) -public class FlintIndexStateModel extends StateModel { - public static final String FLINT_INDEX_DOC_TYPE = "flintindexstate"; - public static final String LATEST_ID = "latestId"; - public static final String DOC_ID_PREFIX = "flint"; - - private final FlintIndexState indexState; - private final String applicationId; - private final String jobId; - private final String latestId; - private final String datasourceName; - private final long lastUpdateTime; - private final String error; - - @EqualsAndHashCode.Exclude private final long seqNo; - @EqualsAndHashCode.Exclude private final long primaryTerm; - - public FlintIndexStateModel( - FlintIndexState indexState, - String applicationId, - String jobId, - String latestId, - String datasourceName, - long lastUpdateTime, - String error, - long seqNo, - long primaryTerm) { - this.indexState = indexState; - this.applicationId = applicationId; - this.jobId = jobId; - this.latestId = latestId; - this.datasourceName = datasourceName; - this.lastUpdateTime = lastUpdateTime; - this.error = error; - this.seqNo = seqNo; - this.primaryTerm = primaryTerm; - } - - public static FlintIndexStateModel copy(FlintIndexStateModel copy, long seqNo, long primaryTerm) { - return new FlintIndexStateModel( - copy.indexState, - copy.applicationId, - copy.jobId, - copy.latestId, - copy.datasourceName, - copy.lastUpdateTime, - copy.error, - seqNo, - primaryTerm); - } - - public static FlintIndexStateModel copyWithState( - FlintIndexStateModel copy, FlintIndexState state, long seqNo, long primaryTerm) { - return new FlintIndexStateModel( - state, - copy.applicationId, - copy.jobId, - copy.latestId, - copy.datasourceName, - copy.lastUpdateTime, - copy.error, - seqNo, - primaryTerm); - } - - @SneakyThrows - public static FlintIndexStateModel fromXContent( - XContentParser parser, long seqNo, long primaryTerm) { - FlintIndexStateModelBuilder builder = FlintIndexStateModel.builder(); - XContentParserUtils.ensureExpectedToken( - XContentParser.Token.START_OBJECT, parser.currentToken(), parser); - while (!XContentParser.Token.END_OBJECT.equals(parser.nextToken())) { - String fieldName = parser.currentName(); - parser.nextToken(); - switch (fieldName) { - case STATE: - builder.indexState(FlintIndexState.fromString(parser.text())); - case APPLICATION_ID: - builder.applicationId(parser.text()); - break; - case JOB_ID: - builder.jobId(parser.text()); - break; - case LATEST_ID: - builder.latestId(parser.text()); - break; - case DATASOURCE_NAME: - builder.datasourceName(parser.text()); - break; - case LAST_UPDATE_TIME: - builder.lastUpdateTime(parser.longValue()); - break; - case ERROR: - builder.error(parser.text()); - break; - } - } - builder.seqNo(seqNo); - builder.primaryTerm(primaryTerm); - return builder.build(); - } - - @Override - public String getId() { - return latestId; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder - .startObject() - .field(VERSION, VERSION_1_0) - .field(TYPE, FLINT_INDEX_DOC_TYPE) - .field(STATE, indexState.getState()) - .field(APPLICATION_ID, applicationId) - .field(JOB_ID, jobId) - .field(LATEST_ID, latestId) - .field(DATASOURCE_NAME, datasourceName) - .field(LAST_UPDATE_TIME, lastUpdateTime) - .field(ERROR, error) - .endObject(); - return builder; - } -} diff --git a/spark/src/main/java/org/opensearch/sql/spark/flint/operation/FlintIndexOp.java b/spark/src/main/java/org/opensearch/sql/spark/flint/operation/FlintIndexOp.java deleted file mode 100644 index fb44b27568..0000000000 --- a/spark/src/main/java/org/opensearch/sql/spark/flint/operation/FlintIndexOp.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.flint.operation; - -import static org.opensearch.sql.spark.execution.statestore.StateStore.getFlintIndexState; -import static org.opensearch.sql.spark.execution.statestore.StateStore.updateFlintIndexState; - -import java.util.Locale; -import java.util.Optional; -import lombok.RequiredArgsConstructor; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.index.seqno.SequenceNumbers; -import org.opensearch.sql.spark.execution.statestore.StateStore; -import org.opensearch.sql.spark.flint.FlintIndexMetadata; -import org.opensearch.sql.spark.flint.FlintIndexState; -import org.opensearch.sql.spark.flint.FlintIndexStateModel; - -/** Flint Index Operation. */ -@RequiredArgsConstructor -public abstract class FlintIndexOp { - private static final Logger LOG = LogManager.getLogger(); - - private final StateStore stateStore; - private final String datasourceName; - - /** Apply operation on {@link FlintIndexMetadata} */ - public void apply(FlintIndexMetadata metadata) { - // todo, remove this logic after IndexState feature is enabled in Flint. - Optional latestId = metadata.getLatestId(); - if (latestId.isEmpty()) { - // take action without occ. - FlintIndexStateModel fakeModel = - new FlintIndexStateModel( - FlintIndexState.REFRESHING, - metadata.getAppId(), - metadata.getJobId(), - "", - datasourceName, - System.currentTimeMillis(), - "", - SequenceNumbers.UNASSIGNED_SEQ_NO, - SequenceNumbers.UNASSIGNED_PRIMARY_TERM); - runOp(fakeModel); - } else { - Optional flintIndexOptional = - getFlintIndexState(stateStore, datasourceName).apply(latestId.get()); - if (flintIndexOptional.isEmpty()) { - String errorMsg = String.format(Locale.ROOT, "no state found. docId: %s", latestId.get()); - LOG.error(errorMsg); - throw new IllegalStateException(errorMsg); - } - FlintIndexStateModel flintIndex = flintIndexOptional.get(); - - // 1.validate state. - FlintIndexState currentState = flintIndex.getIndexState(); - if (!validate(currentState)) { - String errorMsg = - String.format(Locale.ROOT, "validate failed. unexpected state: [%s]", currentState); - LOG.debug(errorMsg); - return; - } - - // 2.begin, move to transitioning state - FlintIndexState transitioningState = transitioningState(); - try { - flintIndex = - updateFlintIndexState(stateStore, datasourceName) - .apply(flintIndex, transitioningState()); - } catch (Exception e) { - String errorMsg = - String.format( - Locale.ROOT, "begin failed. target transitioning state: [%s]", transitioningState); - LOG.error(errorMsg, e); - throw new IllegalStateException(errorMsg, e); - } - - // 3.runOp - runOp(flintIndex); - - // 4.commit, move to stable state - FlintIndexState stableState = stableState(); - try { - updateFlintIndexState(stateStore, datasourceName).apply(flintIndex, stableState); - } catch (Exception e) { - String errorMsg = - String.format(Locale.ROOT, "commit failed. target stable state: [%s]", stableState); - LOG.error(errorMsg, e); - throw new IllegalStateException(errorMsg, e); - } - } - } - - /** - * Validate expected state. - * - *

return true if validate. - */ - abstract boolean validate(FlintIndexState state); - - /** get transitioningState */ - abstract FlintIndexState transitioningState(); - - abstract void runOp(FlintIndexStateModel flintIndex); - - /** get stableState */ - abstract FlintIndexState stableState(); -} diff --git a/spark/src/main/java/org/opensearch/sql/spark/flint/operation/FlintIndexOpCancel.java b/spark/src/main/java/org/opensearch/sql/spark/flint/operation/FlintIndexOpCancel.java deleted file mode 100644 index ba067e5c03..0000000000 --- a/spark/src/main/java/org/opensearch/sql/spark/flint/operation/FlintIndexOpCancel.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.flint.operation; - -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import lombok.SneakyThrows; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.opensearch.sql.spark.client.EMRServerlessClient; -import org.opensearch.sql.spark.execution.statestore.StateStore; -import org.opensearch.sql.spark.flint.FlintIndexState; -import org.opensearch.sql.spark.flint.FlintIndexStateModel; - -/** Cancel refreshing job. */ -public class FlintIndexOpCancel extends FlintIndexOp { - private static final Logger LOG = LogManager.getLogger(); - - private final EMRServerlessClient emrServerlessClient; - - public FlintIndexOpCancel( - StateStore stateStore, String datasourceName, EMRServerlessClient emrServerlessClient) { - super(stateStore, datasourceName); - this.emrServerlessClient = emrServerlessClient; - } - - public boolean validate(FlintIndexState state) { - return state == FlintIndexState.REFRESHING || state == FlintIndexState.CANCELLING; - } - - @Override - FlintIndexState transitioningState() { - return FlintIndexState.CANCELLING; - } - - /** cancel EMR-S job, wait cancelled state upto 15s. */ - @SneakyThrows - @Override - void runOp(FlintIndexStateModel flintIndexStateModel) { - String applicationId = flintIndexStateModel.getApplicationId(); - String jobId = flintIndexStateModel.getJobId(); - try { - emrServerlessClient.cancelJobRun( - flintIndexStateModel.getApplicationId(), flintIndexStateModel.getJobId()); - } catch (IllegalArgumentException e) { - // handle job does not exist case. - LOG.error(e); - return; - } - - // pull job state until timeout or cancelled. - String jobRunState = ""; - int count = 3; - while (count-- != 0) { - jobRunState = - emrServerlessClient.getJobRunResult(applicationId, jobId).getJobRun().getState(); - if (jobRunState.equalsIgnoreCase("Cancelled")) { - break; - } - TimeUnit.SECONDS.sleep(1); - } - if (!jobRunState.equalsIgnoreCase("Cancelled")) { - String errMsg = "cancel job timeout"; - LOG.error(errMsg); - throw new TimeoutException(errMsg); - } - } - - @Override - FlintIndexState stableState() { - return FlintIndexState.ACTIVE; - } -} diff --git a/spark/src/main/java/org/opensearch/sql/spark/flint/operation/FlintIndexOpDelete.java b/spark/src/main/java/org/opensearch/sql/spark/flint/operation/FlintIndexOpDelete.java deleted file mode 100644 index d8b275c621..0000000000 --- a/spark/src/main/java/org/opensearch/sql/spark/flint/operation/FlintIndexOpDelete.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.flint.operation; - -import org.opensearch.sql.spark.execution.statestore.StateStore; -import org.opensearch.sql.spark.flint.FlintIndexState; -import org.opensearch.sql.spark.flint.FlintIndexStateModel; - -/** Flint Index Logical delete operation. Change state to DELETED. */ -public class FlintIndexOpDelete extends FlintIndexOp { - - public FlintIndexOpDelete(StateStore stateStore, String datasourceName) { - super(stateStore, datasourceName); - } - - public boolean validate(FlintIndexState state) { - return state == FlintIndexState.ACTIVE - || state == FlintIndexState.EMPTY - || state == FlintIndexState.DELETING; - } - - @Override - FlintIndexState transitioningState() { - return FlintIndexState.DELETING; - } - - @Override - void runOp(FlintIndexStateModel flintIndex) { - // logically delete, do nothing. - } - - @Override - FlintIndexState stableState() { - return FlintIndexState.DELETED; - } -} diff --git a/spark/src/main/java/org/opensearch/sql/spark/functions/response/DefaultSparkSqlFunctionResponseHandle.java b/spark/src/main/java/org/opensearch/sql/spark/functions/response/DefaultSparkSqlFunctionResponseHandle.java index 422d1caaf1..8fc417cd80 100644 --- a/spark/src/main/java/org/opensearch/sql/spark/functions/response/DefaultSparkSqlFunctionResponseHandle.java +++ b/spark/src/main/java/org/opensearch/sql/spark/functions/response/DefaultSparkSqlFunctionResponseHandle.java @@ -48,6 +48,7 @@ private void constructIteratorAndSchema(JSONObject responseObject) { List result = new ArrayList<>(); List columnList; JSONObject items = responseObject.getJSONObject("data"); + logger.info("Spark Application ID: " + items.getString("applicationId")); columnList = getColumnList(items.getJSONArray("schema")); for (int i = 0; i < items.getJSONArray("result").length(); i++) { JSONObject row = diff --git a/spark/src/main/java/org/opensearch/sql/spark/leasemanager/ConcurrencyLimitExceededException.java b/spark/src/main/java/org/opensearch/sql/spark/leasemanager/ConcurrencyLimitExceededException.java deleted file mode 100644 index ab6305c835..0000000000 --- a/spark/src/main/java/org/opensearch/sql/spark/leasemanager/ConcurrencyLimitExceededException.java +++ /dev/null @@ -1,13 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.leasemanager; - -/** Concurrency limit exceeded. */ -public class ConcurrencyLimitExceededException extends RuntimeException { - public ConcurrencyLimitExceededException(String message) { - super(message); - } -} diff --git a/spark/src/main/java/org/opensearch/sql/spark/leasemanager/DefaultLeaseManager.java b/spark/src/main/java/org/opensearch/sql/spark/leasemanager/DefaultLeaseManager.java deleted file mode 100644 index 375fa7b11e..0000000000 --- a/spark/src/main/java/org/opensearch/sql/spark/leasemanager/DefaultLeaseManager.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.leasemanager; - -import static org.opensearch.sql.common.setting.Settings.Key.SPARK_EXECUTION_REFRESH_JOB_LIMIT; -import static org.opensearch.sql.common.setting.Settings.Key.SPARK_EXECUTION_SESSION_LIMIT; -import static org.opensearch.sql.spark.execution.statestore.StateStore.ALL_DATASOURCE; -import static org.opensearch.sql.spark.execution.statestore.StateStore.activeRefreshJobCount; -import static org.opensearch.sql.spark.execution.statestore.StateStore.activeSessionsCount; - -import java.util.Arrays; -import java.util.List; -import java.util.Locale; -import java.util.function.Predicate; -import lombok.RequiredArgsConstructor; -import org.opensearch.sql.common.setting.Settings; -import org.opensearch.sql.spark.dispatcher.model.JobType; -import org.opensearch.sql.spark.execution.statestore.StateStore; -import org.opensearch.sql.spark.leasemanager.model.LeaseRequest; - -/** - * Default Lease Manager - *

  • QueryHandler borrow lease before execute the query. - *
  • LeaseManagerService check request against domain level concurrent limit. - *
  • LeaseManagerService running on data node and check limit based on cluster settings. - */ -public class DefaultLeaseManager implements LeaseManager { - - private final List> concurrentLimitRules; - private final Settings settings; - private final StateStore stateStore; - - public DefaultLeaseManager(Settings settings, StateStore stateStore) { - this.settings = settings; - this.stateStore = stateStore; - this.concurrentLimitRules = - Arrays.asList( - new ConcurrentSessionRule(settings, stateStore), - new ConcurrentRefreshJobRule(settings, stateStore)); - } - - @Override - public void borrow(LeaseRequest request) { - for (Rule rule : concurrentLimitRules) { - if (!rule.test(request)) { - throw new ConcurrencyLimitExceededException(rule.description()); - } - } - } - - interface Rule extends Predicate { - String description(); - } - - @RequiredArgsConstructor - public static class ConcurrentSessionRule implements Rule { - private final Settings settings; - private final StateStore stateStore; - - @Override - public String description() { - return String.format( - Locale.ROOT, "domain concurrent active session can not exceed %d", sessionMaxLimit()); - } - - @Override - public boolean test(LeaseRequest leaseRequest) { - if (leaseRequest.getJobType() != JobType.INTERACTIVE) { - return true; - } - return activeSessionsCount(stateStore, ALL_DATASOURCE).get() < sessionMaxLimit(); - } - - public int sessionMaxLimit() { - return settings.getSettingValue(SPARK_EXECUTION_SESSION_LIMIT); - } - } - - @RequiredArgsConstructor - public static class ConcurrentRefreshJobRule implements Rule { - private final Settings settings; - private final StateStore stateStore; - - @Override - public String description() { - return String.format( - Locale.ROOT, "domain concurrent refresh job can not exceed %d", refreshJobLimit()); - } - - @Override - public boolean test(LeaseRequest leaseRequest) { - if (leaseRequest.getJobType() == JobType.INTERACTIVE) { - return true; - } - return activeRefreshJobCount(stateStore, ALL_DATASOURCE).get() < refreshJobLimit(); - } - - public int refreshJobLimit() { - return settings.getSettingValue(SPARK_EXECUTION_REFRESH_JOB_LIMIT); - } - } -} diff --git a/spark/src/main/java/org/opensearch/sql/spark/leasemanager/LeaseManager.java b/spark/src/main/java/org/opensearch/sql/spark/leasemanager/LeaseManager.java deleted file mode 100644 index 6cc74ecdc5..0000000000 --- a/spark/src/main/java/org/opensearch/sql/spark/leasemanager/LeaseManager.java +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.leasemanager; - -import org.opensearch.sql.spark.leasemanager.model.LeaseRequest; - -/** Lease manager */ -public interface LeaseManager { - - /** - * Borrow from LeaseManager. If no exception, lease successfully. - * - * @throws ConcurrencyLimitExceededException - */ - void borrow(LeaseRequest request); -} diff --git a/spark/src/main/java/org/opensearch/sql/spark/leasemanager/model/LeaseRequest.java b/spark/src/main/java/org/opensearch/sql/spark/leasemanager/model/LeaseRequest.java deleted file mode 100644 index 190c033198..0000000000 --- a/spark/src/main/java/org/opensearch/sql/spark/leasemanager/model/LeaseRequest.java +++ /dev/null @@ -1,18 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.leasemanager.model; - -import lombok.Getter; -import lombok.RequiredArgsConstructor; -import org.opensearch.sql.spark.dispatcher.model.JobType; - -/** Lease Request. */ -@Getter -@RequiredArgsConstructor -public class LeaseRequest { - private final JobType jobType; - private final String datasourceName; -} diff --git a/spark/src/main/java/org/opensearch/sql/spark/response/JobExecutionResponseReader.java b/spark/src/main/java/org/opensearch/sql/spark/response/JobExecutionResponseReader.java index e4773310f0..d3cbd68dce 100644 --- a/spark/src/main/java/org/opensearch/sql/spark/response/JobExecutionResponseReader.java +++ b/spark/src/main/java/org/opensearch/sql/spark/response/JobExecutionResponseReader.java @@ -5,9 +5,9 @@ package org.opensearch.sql.spark.response; -import static org.opensearch.sql.datasource.model.DataSourceMetadata.DEFAULT_RESULT_INDEX; import static org.opensearch.sql.spark.data.constants.SparkConstants.DATA_FIELD; import static org.opensearch.sql.spark.data.constants.SparkConstants.JOB_ID_FIELD; +import static org.opensearch.sql.spark.data.constants.SparkConstants.SPARK_RESPONSE_BUFFER_INDEX_NAME; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -39,13 +39,9 @@ public JSONObject getResultFromOpensearchIndex(String jobId, String resultIndex) return searchInSparkIndex(QueryBuilders.termQuery(JOB_ID_FIELD, jobId), resultIndex); } - public JSONObject getResultWithQueryId(String queryId, String resultIndex) { - return searchInSparkIndex(QueryBuilders.termQuery("queryId", queryId), resultIndex); - } - private JSONObject searchInSparkIndex(QueryBuilder query, String resultIndex) { SearchRequest searchRequest = new SearchRequest(); - String searchResultIndex = resultIndex == null ? DEFAULT_RESULT_INDEX : resultIndex; + String searchResultIndex = resultIndex == null ? SPARK_RESPONSE_BUFFER_INDEX_NAME : resultIndex; searchRequest.indices(searchResultIndex); SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); searchSourceBuilder.query(query); diff --git a/spark/src/main/java/org/opensearch/sql/spark/response/SparkResponse.java b/spark/src/main/java/org/opensearch/sql/spark/response/SparkResponse.java index e225804043..496caba2c9 100644 --- a/spark/src/main/java/org/opensearch/sql/spark/response/SparkResponse.java +++ b/spark/src/main/java/org/opensearch/sql/spark/response/SparkResponse.java @@ -5,7 +5,7 @@ package org.opensearch.sql.spark.response; -import static org.opensearch.sql.datasource.model.DataSourceMetadata.DEFAULT_RESULT_INDEX; +import static org.opensearch.sql.spark.data.constants.SparkConstants.SPARK_RESPONSE_BUFFER_INDEX_NAME; import com.google.common.annotations.VisibleForTesting; import lombok.Data; @@ -51,7 +51,7 @@ public JSONObject getResultFromOpensearchIndex() { private JSONObject searchInSparkIndex(QueryBuilder query) { SearchRequest searchRequest = new SearchRequest(); - searchRequest.indices(DEFAULT_RESULT_INDEX); + searchRequest.indices(SPARK_RESPONSE_BUFFER_INDEX_NAME); SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); searchSourceBuilder.query(query); searchRequest.source(searchSourceBuilder); @@ -65,7 +65,7 @@ private JSONObject searchInSparkIndex(QueryBuilder query) { if (searchResponse.status().getStatus() != 200) { throw new RuntimeException( "Fetching result from " - + DEFAULT_RESULT_INDEX + + SPARK_RESPONSE_BUFFER_INDEX_NAME + " index failed with status : " + searchResponse.status()); } else { @@ -80,7 +80,7 @@ private JSONObject searchInSparkIndex(QueryBuilder query) { @VisibleForTesting void deleteInSparkIndex(String id) { - DeleteRequest deleteRequest = new DeleteRequest(DEFAULT_RESULT_INDEX); + DeleteRequest deleteRequest = new DeleteRequest(SPARK_RESPONSE_BUFFER_INDEX_NAME); deleteRequest.id(id); ActionFuture deleteResponseActionFuture; try { diff --git a/spark/src/main/java/org/opensearch/sql/spark/rest/model/CreateAsyncQueryRequest.java b/spark/src/main/java/org/opensearch/sql/spark/rest/model/CreateAsyncQueryRequest.java index 6acf6bc9a8..8802630d9f 100644 --- a/spark/src/main/java/org/opensearch/sql/spark/rest/model/CreateAsyncQueryRequest.java +++ b/spark/src/main/java/org/opensearch/sql/spark/rest/model/CreateAsyncQueryRequest.java @@ -6,7 +6,6 @@ package org.opensearch.sql.spark.rest.model; import static org.opensearch.core.xcontent.XContentParserUtils.ensureExpectedToken; -import static org.opensearch.sql.spark.execution.session.SessionModel.SESSION_ID; import java.io.IOException; import lombok.Data; @@ -19,8 +18,6 @@ public class CreateAsyncQueryRequest { private String query; private String datasource; private LangType lang; - // optional sessionId - private String sessionId; public CreateAsyncQueryRequest(String query, String datasource, LangType lang) { this.query = Validate.notNull(query, "Query can't be null"); @@ -28,19 +25,11 @@ public CreateAsyncQueryRequest(String query, String datasource, LangType lang) { this.lang = Validate.notNull(lang, "lang can't be null"); } - public CreateAsyncQueryRequest(String query, String datasource, LangType lang, String sessionId) { - this.query = Validate.notNull(query, "Query can't be null"); - this.datasource = Validate.notNull(datasource, "Datasource can't be null"); - this.lang = Validate.notNull(lang, "lang can't be null"); - this.sessionId = sessionId; - } - public static CreateAsyncQueryRequest fromXContentParser(XContentParser parser) throws IOException { String query = null; LangType lang = null; String datasource = null; - String sessionId = null; ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); while (parser.nextToken() != XContentParser.Token.END_OBJECT) { String fieldName = parser.currentName(); @@ -52,12 +41,10 @@ public static CreateAsyncQueryRequest fromXContentParser(XContentParser parser) lang = LangType.fromString(langString); } else if (fieldName.equals("datasource")) { datasource = parser.textOrNull(); - } else if (fieldName.equals(SESSION_ID)) { - sessionId = parser.textOrNull(); } else { throw new IllegalArgumentException("Unknown field: " + fieldName); } } - return new CreateAsyncQueryRequest(query, datasource, lang, sessionId); + return new CreateAsyncQueryRequest(query, datasource, lang); } } diff --git a/spark/src/main/java/org/opensearch/sql/spark/rest/model/CreateAsyncQueryResponse.java b/spark/src/main/java/org/opensearch/sql/spark/rest/model/CreateAsyncQueryResponse.java index 2f918308c4..8cfe57c2a6 100644 --- a/spark/src/main/java/org/opensearch/sql/spark/rest/model/CreateAsyncQueryResponse.java +++ b/spark/src/main/java/org/opensearch/sql/spark/rest/model/CreateAsyncQueryResponse.java @@ -12,6 +12,4 @@ @AllArgsConstructor public class CreateAsyncQueryResponse { private String queryId; - // optional sessionId - private String sessionId; } diff --git a/spark/src/main/java/org/opensearch/sql/spark/utils/IDUtils.java b/spark/src/main/java/org/opensearch/sql/spark/utils/IDUtils.java deleted file mode 100644 index 438d2342b4..0000000000 --- a/spark/src/main/java/org/opensearch/sql/spark/utils/IDUtils.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.utils; - -import java.nio.charset.StandardCharsets; -import java.util.Base64; -import lombok.experimental.UtilityClass; -import org.apache.commons.lang3.RandomStringUtils; - -@UtilityClass -public class IDUtils { - public static final int PREFIX_LEN = 10; - - public static String decode(String id) { - return new String(Base64.getDecoder().decode(id)).substring(PREFIX_LEN); - } - - public static String encode(String datasourceName) { - String randomId = RandomStringUtils.randomAlphanumeric(PREFIX_LEN) + datasourceName; - return Base64.getEncoder().encodeToString(randomId.getBytes(StandardCharsets.UTF_8)); - } -} diff --git a/spark/src/main/java/org/opensearch/sql/spark/utils/SQLQueryUtils.java b/spark/src/main/java/org/opensearch/sql/spark/utils/SQLQueryUtils.java index c1f3f02576..f6b75d49ef 100644 --- a/spark/src/main/java/org/opensearch/sql/spark/utils/SQLQueryUtils.java +++ b/spark/src/main/java/org/opensearch/sql/spark/utils/SQLQueryUtils.java @@ -20,8 +20,7 @@ import org.opensearch.sql.spark.antlr.parser.SqlBaseParser; import org.opensearch.sql.spark.antlr.parser.SqlBaseParserBaseVisitor; import org.opensearch.sql.spark.dispatcher.model.FullyQualifiedTableName; -import org.opensearch.sql.spark.dispatcher.model.IndexQueryActionType; -import org.opensearch.sql.spark.dispatcher.model.IndexQueryDetails; +import org.opensearch.sql.spark.dispatcher.model.IndexDetails; import org.opensearch.sql.spark.flint.FlintIndexType; /** @@ -43,7 +42,7 @@ public static FullyQualifiedTableName extractFullyQualifiedTableName(String sqlQ return sparkSqlTableNameVisitor.getFullyQualifiedTableName(); } - public static IndexQueryDetails extractIndexDetails(String sqlQuery) { + public static IndexDetails extractIndexDetails(String sqlQuery) { FlintSparkSqlExtensionsParser flintSparkSqlExtensionsParser = new FlintSparkSqlExtensionsParser( new CommonTokenStream( @@ -53,10 +52,10 @@ public static IndexQueryDetails extractIndexDetails(String sqlQuery) { flintSparkSqlExtensionsParser.statement(); FlintSQLIndexDetailsVisitor flintSQLIndexDetailsVisitor = new FlintSQLIndexDetailsVisitor(); statementContext.accept(flintSQLIndexDetailsVisitor); - return flintSQLIndexDetailsVisitor.getIndexQueryDetailsBuilder().build(); + return flintSQLIndexDetailsVisitor.getIndexDetails(); } - public static boolean isFlintExtensionQuery(String sqlQuery) { + public static boolean isIndexQuery(String sqlQuery) { FlintSparkSqlExtensionsParser flintSparkSqlExtensionsParser = new FlintSparkSqlExtensionsParser( new CommonTokenStream( @@ -118,29 +117,29 @@ public Void visitCreateTableHeader(SqlBaseParser.CreateTableHeaderContext ctx) { public static class FlintSQLIndexDetailsVisitor extends FlintSparkSqlExtensionsBaseVisitor { - @Getter private final IndexQueryDetails.IndexQueryDetailsBuilder indexQueryDetailsBuilder; + @Getter private final IndexDetails indexDetails; public FlintSQLIndexDetailsVisitor() { - this.indexQueryDetailsBuilder = new IndexQueryDetails.IndexQueryDetailsBuilder(); + this.indexDetails = new IndexDetails(); } @Override public Void visitIndexName(FlintSparkSqlExtensionsParser.IndexNameContext ctx) { - indexQueryDetailsBuilder.indexName(ctx.getText()); + indexDetails.setIndexName(ctx.getText()); return super.visitIndexName(ctx); } @Override public Void visitTableName(FlintSparkSqlExtensionsParser.TableNameContext ctx) { - indexQueryDetailsBuilder.fullyQualifiedTableName(new FullyQualifiedTableName(ctx.getText())); + indexDetails.setFullyQualifiedTableName(new FullyQualifiedTableName(ctx.getText())); return super.visitTableName(ctx); } @Override public Void visitCreateSkippingIndexStatement( FlintSparkSqlExtensionsParser.CreateSkippingIndexStatementContext ctx) { - indexQueryDetailsBuilder.indexQueryActionType(IndexQueryActionType.CREATE); - indexQueryDetailsBuilder.indexType(FlintIndexType.SKIPPING); + indexDetails.setDropIndex(false); + indexDetails.setIndexType(FlintIndexType.SKIPPING); visitPropertyList(ctx.propertyList()); return super.visitCreateSkippingIndexStatement(ctx); } @@ -148,113 +147,28 @@ public Void visitCreateSkippingIndexStatement( @Override public Void visitCreateCoveringIndexStatement( FlintSparkSqlExtensionsParser.CreateCoveringIndexStatementContext ctx) { - indexQueryDetailsBuilder.indexQueryActionType(IndexQueryActionType.CREATE); - indexQueryDetailsBuilder.indexType(FlintIndexType.COVERING); + indexDetails.setDropIndex(false); + indexDetails.setIndexType(FlintIndexType.COVERING); visitPropertyList(ctx.propertyList()); return super.visitCreateCoveringIndexStatement(ctx); } - @Override - public Void visitCreateMaterializedViewStatement( - FlintSparkSqlExtensionsParser.CreateMaterializedViewStatementContext ctx) { - indexQueryDetailsBuilder.indexQueryActionType(IndexQueryActionType.CREATE); - indexQueryDetailsBuilder.indexType(FlintIndexType.MATERIALIZED_VIEW); - indexQueryDetailsBuilder.mvName(ctx.mvName.getText()); - visitPropertyList(ctx.propertyList()); - return super.visitCreateMaterializedViewStatement(ctx); - } - @Override public Void visitDropCoveringIndexStatement( FlintSparkSqlExtensionsParser.DropCoveringIndexStatementContext ctx) { - indexQueryDetailsBuilder.indexQueryActionType(IndexQueryActionType.DROP); - indexQueryDetailsBuilder.indexType(FlintIndexType.COVERING); + indexDetails.setDropIndex(true); + indexDetails.setIndexType(FlintIndexType.COVERING); return super.visitDropCoveringIndexStatement(ctx); } @Override public Void visitDropSkippingIndexStatement( FlintSparkSqlExtensionsParser.DropSkippingIndexStatementContext ctx) { - indexQueryDetailsBuilder.indexQueryActionType(IndexQueryActionType.DROP); - indexQueryDetailsBuilder.indexType(FlintIndexType.SKIPPING); + indexDetails.setDropIndex(true); + indexDetails.setIndexType(FlintIndexType.SKIPPING); return super.visitDropSkippingIndexStatement(ctx); } - @Override - public Void visitDropMaterializedViewStatement( - FlintSparkSqlExtensionsParser.DropMaterializedViewStatementContext ctx) { - indexQueryDetailsBuilder.indexQueryActionType(IndexQueryActionType.DROP); - indexQueryDetailsBuilder.indexType(FlintIndexType.MATERIALIZED_VIEW); - indexQueryDetailsBuilder.mvName(ctx.mvName.getText()); - return super.visitDropMaterializedViewStatement(ctx); - } - - @Override - public Void visitDescribeCoveringIndexStatement( - FlintSparkSqlExtensionsParser.DescribeCoveringIndexStatementContext ctx) { - indexQueryDetailsBuilder.indexQueryActionType(IndexQueryActionType.DESCRIBE); - indexQueryDetailsBuilder.indexType(FlintIndexType.COVERING); - return super.visitDescribeCoveringIndexStatement(ctx); - } - - @Override - public Void visitDescribeSkippingIndexStatement( - FlintSparkSqlExtensionsParser.DescribeSkippingIndexStatementContext ctx) { - indexQueryDetailsBuilder.indexQueryActionType(IndexQueryActionType.DESCRIBE); - indexQueryDetailsBuilder.indexType(FlintIndexType.SKIPPING); - return super.visitDescribeSkippingIndexStatement(ctx); - } - - @Override - public Void visitDescribeMaterializedViewStatement( - FlintSparkSqlExtensionsParser.DescribeMaterializedViewStatementContext ctx) { - indexQueryDetailsBuilder.indexQueryActionType(IndexQueryActionType.DESCRIBE); - indexQueryDetailsBuilder.indexType(FlintIndexType.MATERIALIZED_VIEW); - indexQueryDetailsBuilder.mvName(ctx.mvName.getText()); - return super.visitDescribeMaterializedViewStatement(ctx); - } - - @Override - public Void visitShowCoveringIndexStatement( - FlintSparkSqlExtensionsParser.ShowCoveringIndexStatementContext ctx) { - indexQueryDetailsBuilder.indexQueryActionType(IndexQueryActionType.SHOW); - indexQueryDetailsBuilder.indexType(FlintIndexType.COVERING); - return super.visitShowCoveringIndexStatement(ctx); - } - - @Override - public Void visitShowMaterializedViewStatement( - FlintSparkSqlExtensionsParser.ShowMaterializedViewStatementContext ctx) { - indexQueryDetailsBuilder.indexQueryActionType(IndexQueryActionType.SHOW); - indexQueryDetailsBuilder.indexType(FlintIndexType.MATERIALIZED_VIEW); - return super.visitShowMaterializedViewStatement(ctx); - } - - @Override - public Void visitRefreshCoveringIndexStatement( - FlintSparkSqlExtensionsParser.RefreshCoveringIndexStatementContext ctx) { - indexQueryDetailsBuilder.indexQueryActionType(IndexQueryActionType.REFRESH); - indexQueryDetailsBuilder.indexType(FlintIndexType.COVERING); - return super.visitRefreshCoveringIndexStatement(ctx); - } - - @Override - public Void visitRefreshSkippingIndexStatement( - FlintSparkSqlExtensionsParser.RefreshSkippingIndexStatementContext ctx) { - indexQueryDetailsBuilder.indexQueryActionType(IndexQueryActionType.REFRESH); - indexQueryDetailsBuilder.indexType(FlintIndexType.SKIPPING); - return super.visitRefreshSkippingIndexStatement(ctx); - } - - @Override - public Void visitRefreshMaterializedViewStatement( - FlintSparkSqlExtensionsParser.RefreshMaterializedViewStatementContext ctx) { - indexQueryDetailsBuilder.indexQueryActionType(IndexQueryActionType.REFRESH); - indexQueryDetailsBuilder.indexType(FlintIndexType.MATERIALIZED_VIEW); - indexQueryDetailsBuilder.mvName(ctx.mvName.getText()); - return super.visitRefreshMaterializedViewStatement(ctx); - } - @Override public Void visitPropertyList(FlintSparkSqlExtensionsParser.PropertyListContext ctx) { if (ctx != null) { @@ -266,7 +180,7 @@ public Void visitPropertyList(FlintSparkSqlExtensionsParser.PropertyListContext // https://github.com/apache/spark/blob/v3.5.0/sql/api/src/main/scala/org/apache/spark/sql/catalyst/util/SparkParserUtils.scala#L35 to unescape string literal if (propertyKey(property.key).toLowerCase(Locale.ROOT).contains("auto_refresh")) { if (propertyValue(property.value).toLowerCase(Locale.ROOT).contains("true")) { - indexQueryDetailsBuilder.autoRefresh(true); + indexDetails.setAutoRefresh(true); } } }); diff --git a/spark/src/main/resources/query_execution_request_mapping.yml b/spark/src/main/resources/query_execution_request_mapping.yml deleted file mode 100644 index 682534d338..0000000000 --- a/spark/src/main/resources/query_execution_request_mapping.yml +++ /dev/null @@ -1,44 +0,0 @@ ---- -## -# Copyright OpenSearch Contributors -# SPDX-License-Identifier: Apache-2.0 -## - -# Schema file for the .ql-job-metadata index -# Also "dynamic" is set to "false" so that other fields can be added. -dynamic: false -properties: - version: - type: keyword - type: - type: keyword - state: - type: keyword - statementId: - type: keyword - applicationId: - type: keyword - sessionId: - type: keyword - sessionType: - type: keyword - error: - type: text - lang: - type: keyword - query: - type: text - dataSourceName: - type: keyword - submitTime: - type: date - format: strict_date_time||epoch_millis - jobId: - type: keyword - lastUpdateTime: - type: date - format: strict_date_time||epoch_millis - queryId: - type: keyword - excludeJobIds: - type: keyword diff --git a/spark/src/main/resources/query_execution_request_settings.yml b/spark/src/main/resources/query_execution_request_settings.yml deleted file mode 100644 index da2bf07bf1..0000000000 --- a/spark/src/main/resources/query_execution_request_settings.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -## -# Copyright OpenSearch Contributors -# SPDX-License-Identifier: Apache-2.0 -## - -# Settings file for the .ql-job-metadata index -index: - number_of_shards: "1" - auto_expand_replicas: "0-2" - number_of_replicas: "0" diff --git a/spark/src/test/java/org/opensearch/sql/spark/asyncquery/AsyncQueryExecutorServiceImplSpecTest.java b/spark/src/test/java/org/opensearch/sql/spark/asyncquery/AsyncQueryExecutorServiceImplSpecTest.java deleted file mode 100644 index 56ee56ea5e..0000000000 --- a/spark/src/test/java/org/opensearch/sql/spark/asyncquery/AsyncQueryExecutorServiceImplSpecTest.java +++ /dev/null @@ -1,467 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.asyncquery; - -import static org.opensearch.sql.spark.data.constants.SparkConstants.DEFAULT_CLASS_NAME; -import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_JOB_REQUEST_INDEX; -import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_JOB_SESSION_ID; -import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_SESSION_CLASS_NAME; -import static org.opensearch.sql.spark.data.constants.SparkConstants.SPARK_REQUEST_BUFFER_INDEX_NAME; -import static org.opensearch.sql.spark.execution.session.SessionModel.SESSION_DOC_TYPE; -import static org.opensearch.sql.spark.execution.statement.StatementModel.SESSION_ID; -import static org.opensearch.sql.spark.execution.statement.StatementModel.STATEMENT_DOC_TYPE; -import static org.opensearch.sql.spark.execution.statestore.StateStore.getStatement; -import static org.opensearch.sql.spark.execution.statestore.StateStore.updateStatementState; - -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import java.util.HashMap; -import java.util.Map; -import java.util.Optional; -import org.apache.commons.lang3.StringUtils; -import org.junit.Ignore; -import org.junit.Test; -import org.junit.jupiter.api.Disabled; -import org.opensearch.core.common.Strings; -import org.opensearch.index.query.QueryBuilders; -import org.opensearch.sql.datasource.model.DataSourceMetadata; -import org.opensearch.sql.datasource.model.DataSourceType; -import org.opensearch.sql.spark.asyncquery.model.AsyncQueryExecutionResponse; -import org.opensearch.sql.spark.execution.session.SessionId; -import org.opensearch.sql.spark.execution.session.SessionState; -import org.opensearch.sql.spark.execution.statement.StatementModel; -import org.opensearch.sql.spark.execution.statement.StatementState; -import org.opensearch.sql.spark.leasemanager.ConcurrencyLimitExceededException; -import org.opensearch.sql.spark.rest.model.CreateAsyncQueryRequest; -import org.opensearch.sql.spark.rest.model.CreateAsyncQueryResponse; -import org.opensearch.sql.spark.rest.model.LangType; - -public class AsyncQueryExecutorServiceImplSpecTest extends AsyncQueryExecutorServiceSpec { - - @Disabled("batch query is unsupported") - public void withoutSessionCreateAsyncQueryThenGetResultThenCancel() { - LocalEMRSClient emrsClient = new LocalEMRSClient(); - AsyncQueryExecutorService asyncQueryExecutorService = - createAsyncQueryExecutorService(emrsClient); - - // disable session - enableSession(false); - - // 1. create async query. - CreateAsyncQueryResponse response = - asyncQueryExecutorService.createAsyncQuery( - new CreateAsyncQueryRequest("select 1", DATASOURCE, LangType.SQL, null)); - assertFalse(clusterService().state().routingTable().hasIndex(SPARK_REQUEST_BUFFER_INDEX_NAME)); - emrsClient.startJobRunCalled(1); - - // 2. fetch async query result. - AsyncQueryExecutionResponse asyncQueryResults = - asyncQueryExecutorService.getAsyncQueryResults(response.getQueryId()); - assertEquals("RUNNING", asyncQueryResults.getStatus()); - emrsClient.getJobRunResultCalled(1); - - // 3. cancel async query. - String cancelQueryId = asyncQueryExecutorService.cancelQuery(response.getQueryId()); - assertEquals(response.getQueryId(), cancelQueryId); - emrsClient.cancelJobRunCalled(1); - } - - @Disabled("batch query is unsupported") - public void sessionLimitNotImpactBatchQuery() { - LocalEMRSClient emrsClient = new LocalEMRSClient(); - AsyncQueryExecutorService asyncQueryExecutorService = - createAsyncQueryExecutorService(emrsClient); - - // disable session - enableSession(false); - setSessionLimit(0); - - // 1. create async query. - CreateAsyncQueryResponse response = - asyncQueryExecutorService.createAsyncQuery( - new CreateAsyncQueryRequest("select 1", DATASOURCE, LangType.SQL, null)); - emrsClient.startJobRunCalled(1); - - CreateAsyncQueryResponse resp2 = - asyncQueryExecutorService.createAsyncQuery( - new CreateAsyncQueryRequest("select 1", DATASOURCE, LangType.SQL, null)); - emrsClient.startJobRunCalled(2); - } - - @Disabled("batch query is unsupported") - public void createAsyncQueryCreateJobWithCorrectParameters() { - LocalEMRSClient emrsClient = new LocalEMRSClient(); - AsyncQueryExecutorService asyncQueryExecutorService = - createAsyncQueryExecutorService(emrsClient); - - enableSession(false); - CreateAsyncQueryResponse response = - asyncQueryExecutorService.createAsyncQuery( - new CreateAsyncQueryRequest("select 1", DATASOURCE, LangType.SQL, null)); - String params = emrsClient.getJobRequest().getSparkSubmitParams(); - assertNull(response.getSessionId()); - assertTrue(params.contains(String.format("--class %s", DEFAULT_CLASS_NAME))); - assertFalse( - params.contains( - String.format("%s=%s", FLINT_JOB_REQUEST_INDEX, SPARK_REQUEST_BUFFER_INDEX_NAME))); - assertFalse( - params.contains(String.format("%s=%s", FLINT_JOB_SESSION_ID, response.getSessionId()))); - - // enable session - enableSession(true); - response = - asyncQueryExecutorService.createAsyncQuery( - new CreateAsyncQueryRequest("select 1", DATASOURCE, LangType.SQL, null)); - params = emrsClient.getJobRequest().getSparkSubmitParams(); - assertTrue(params.contains(String.format("--class %s", FLINT_SESSION_CLASS_NAME))); - assertTrue( - params.contains( - String.format("%s=%s", FLINT_JOB_REQUEST_INDEX, SPARK_REQUEST_BUFFER_INDEX_NAME))); - assertTrue( - params.contains(String.format("%s=%s", FLINT_JOB_SESSION_ID, response.getSessionId()))); - } - - @Test - public void withSessionCreateAsyncQueryThenGetResultThenCancel() { - LocalEMRSClient emrsClient = new LocalEMRSClient(); - AsyncQueryExecutorService asyncQueryExecutorService = - createAsyncQueryExecutorService(emrsClient); - - // 1. create async query. - CreateAsyncQueryResponse response = - asyncQueryExecutorService.createAsyncQuery( - new CreateAsyncQueryRequest("select 1", DATASOURCE, LangType.SQL, null)); - assertNotNull(response.getSessionId()); - Optional statementModel = - getStatement(stateStore, DATASOURCE).apply(response.getQueryId()); - assertTrue(statementModel.isPresent()); - assertEquals(StatementState.WAITING, statementModel.get().getStatementState()); - - // 2. fetch async query result. - AsyncQueryExecutionResponse asyncQueryResults = - asyncQueryExecutorService.getAsyncQueryResults(response.getQueryId()); - assertTrue(Strings.isEmpty(asyncQueryResults.getError())); - assertEquals(StatementState.WAITING.getState(), asyncQueryResults.getStatus()); - - // 3. cancel async query. - String cancelQueryId = asyncQueryExecutorService.cancelQuery(response.getQueryId()); - assertEquals(response.getQueryId(), cancelQueryId); - } - - @Test - public void reuseSessionWhenCreateAsyncQuery() { - LocalEMRSClient emrsClient = new LocalEMRSClient(); - AsyncQueryExecutorService asyncQueryExecutorService = - createAsyncQueryExecutorService(emrsClient); - - // enable session - enableSession(true); - - // 1. create async query. - CreateAsyncQueryResponse first = - asyncQueryExecutorService.createAsyncQuery( - new CreateAsyncQueryRequest("select 1", DATASOURCE, LangType.SQL, null)); - assertNotNull(first.getSessionId()); - - // 2. reuse session id - CreateAsyncQueryResponse second = - asyncQueryExecutorService.createAsyncQuery( - new CreateAsyncQueryRequest( - "select 1", DATASOURCE, LangType.SQL, first.getSessionId())); - - assertEquals(first.getSessionId(), second.getSessionId()); - assertNotEquals(first.getQueryId(), second.getQueryId()); - // one session doc. - assertEquals( - 1, - search( - QueryBuilders.boolQuery() - .must(QueryBuilders.termQuery("type", SESSION_DOC_TYPE)) - .must(QueryBuilders.termQuery(SESSION_ID, first.getSessionId())))); - // two statement docs has same sessionId. - assertEquals( - 2, - search( - QueryBuilders.boolQuery() - .must(QueryBuilders.termQuery("type", STATEMENT_DOC_TYPE)) - .must(QueryBuilders.termQuery(SESSION_ID, first.getSessionId())))); - - Optional firstModel = - getStatement(stateStore, DATASOURCE).apply(first.getQueryId()); - assertTrue(firstModel.isPresent()); - assertEquals(StatementState.WAITING, firstModel.get().getStatementState()); - assertEquals(first.getQueryId(), firstModel.get().getStatementId().getId()); - assertEquals(first.getQueryId(), firstModel.get().getQueryId()); - Optional secondModel = - getStatement(stateStore, DATASOURCE).apply(second.getQueryId()); - assertEquals(StatementState.WAITING, secondModel.get().getStatementState()); - assertEquals(second.getQueryId(), secondModel.get().getStatementId().getId()); - assertEquals(second.getQueryId(), secondModel.get().getQueryId()); - } - - @Disabled("batch query is unsupported") - public void batchQueryHasTimeout() { - LocalEMRSClient emrsClient = new LocalEMRSClient(); - AsyncQueryExecutorService asyncQueryExecutorService = - createAsyncQueryExecutorService(emrsClient); - - enableSession(false); - CreateAsyncQueryResponse response = - asyncQueryExecutorService.createAsyncQuery( - new CreateAsyncQueryRequest("select 1", DATASOURCE, LangType.SQL, null)); - - assertEquals(120L, (long) emrsClient.getJobRequest().executionTimeout()); - } - - @Test - public void interactiveQueryNoTimeout() { - LocalEMRSClient emrsClient = new LocalEMRSClient(); - AsyncQueryExecutorService asyncQueryExecutorService = - createAsyncQueryExecutorService(emrsClient); - - // enable session - enableSession(true); - - asyncQueryExecutorService.createAsyncQuery( - new CreateAsyncQueryRequest("select 1", DATASOURCE, LangType.SQL, null)); - assertEquals(0L, (long) emrsClient.getJobRequest().executionTimeout()); - } - - @Ignore( - "flaky test, java.lang.IllegalArgumentException: Right now only AES/GCM/NoPadding is" - + " supported") - @Test - public void datasourceWithBasicAuth() { - Map properties = new HashMap<>(); - properties.put("glue.auth.type", "iam_role"); - properties.put( - "glue.auth.role_arn", "arn:aws:iam::924196221507:role/FlintOpensearchServiceRole"); - properties.put("glue.indexstore.opensearch.uri", "http://localhost:9200"); - properties.put("glue.indexstore.opensearch.auth", "basicauth"); - properties.put("glue.indexstore.opensearch.auth.username", "username"); - properties.put("glue.indexstore.opensearch.auth.password", "password"); - - dataSourceService.createDataSource( - new DataSourceMetadata( - "mybasicauth", - StringUtils.EMPTY, - DataSourceType.S3GLUE, - ImmutableList.of(), - properties, - null)); - LocalEMRSClient emrsClient = new LocalEMRSClient(); - AsyncQueryExecutorService asyncQueryExecutorService = - createAsyncQueryExecutorService(emrsClient); - - // enable session - enableSession(true); - - asyncQueryExecutorService.createAsyncQuery( - new CreateAsyncQueryRequest("select 1", "mybasicauth", LangType.SQL, null)); - String params = emrsClient.getJobRequest().getSparkSubmitParams(); - assertTrue(params.contains(String.format("--conf spark.datasource.flint.auth=basic"))); - assertTrue( - params.contains(String.format("--conf spark.datasource.flint.auth.username=username"))); - assertTrue( - params.contains(String.format("--conf spark.datasource.flint.auth.password=password"))); - } - - @Test - public void withSessionCreateAsyncQueryFailed() { - LocalEMRSClient emrsClient = new LocalEMRSClient(); - AsyncQueryExecutorService asyncQueryExecutorService = - createAsyncQueryExecutorService(emrsClient); - - // enable session - enableSession(true); - - // 1. create async query. - CreateAsyncQueryResponse response = - asyncQueryExecutorService.createAsyncQuery( - new CreateAsyncQueryRequest("myselect 1", DATASOURCE, LangType.SQL, null)); - assertNotNull(response.getSessionId()); - Optional statementModel = - getStatement(stateStore, DATASOURCE).apply(response.getQueryId()); - assertTrue(statementModel.isPresent()); - assertEquals(StatementState.WAITING, statementModel.get().getStatementState()); - - // 2. fetch async query result. not result write to DEFAULT_RESULT_INDEX yet. - // mock failed statement. - StatementModel submitted = statementModel.get(); - StatementModel mocked = - StatementModel.builder() - .version("1.0") - .statementState(submitted.getStatementState()) - .statementId(submitted.getStatementId()) - .sessionId(submitted.getSessionId()) - .applicationId(submitted.getApplicationId()) - .jobId(submitted.getJobId()) - .langType(submitted.getLangType()) - .datasourceName(submitted.getDatasourceName()) - .query(submitted.getQuery()) - .queryId(submitted.getQueryId()) - .submitTime(submitted.getSubmitTime()) - .error("mock error") - .seqNo(submitted.getSeqNo()) - .primaryTerm(submitted.getPrimaryTerm()) - .build(); - updateStatementState(stateStore, DATASOURCE).apply(mocked, StatementState.FAILED); - - AsyncQueryExecutionResponse asyncQueryResults = - asyncQueryExecutorService.getAsyncQueryResults(response.getQueryId()); - assertEquals(StatementState.FAILED.getState(), asyncQueryResults.getStatus()); - assertEquals("mock error", asyncQueryResults.getError()); - } - - // https://github.com/opensearch-project/sql/issues/2344 - @Test - public void createSessionMoreThanLimitFailed() { - LocalEMRSClient emrsClient = new LocalEMRSClient(); - AsyncQueryExecutorService asyncQueryExecutorService = - createAsyncQueryExecutorService(emrsClient); - - // enable session - enableSession(true); - // only allow one session in domain. - setSessionLimit(1); - - // 1. create async query. - CreateAsyncQueryResponse first = - asyncQueryExecutorService.createAsyncQuery( - new CreateAsyncQueryRequest("select 1", DATASOURCE, LangType.SQL, null)); - assertNotNull(first.getSessionId()); - setSessionState(first.getSessionId(), SessionState.RUNNING); - - // 2. create async query without session. - ConcurrencyLimitExceededException exception = - assertThrows( - ConcurrencyLimitExceededException.class, - () -> - asyncQueryExecutorService.createAsyncQuery( - new CreateAsyncQueryRequest("select 1", DATASOURCE, LangType.SQL, null))); - assertEquals("domain concurrent active session can not exceed 1", exception.getMessage()); - } - - // https://github.com/opensearch-project/sql/issues/2360 - @Test - public void recreateSessionIfNotReady() { - LocalEMRSClient emrsClient = new LocalEMRSClient(); - AsyncQueryExecutorService asyncQueryExecutorService = - createAsyncQueryExecutorService(emrsClient); - - // enable session - enableSession(true); - - // 1. create async query. - CreateAsyncQueryResponse first = - asyncQueryExecutorService.createAsyncQuery( - new CreateAsyncQueryRequest("select 1", DATASOURCE, LangType.SQL, null)); - assertNotNull(first.getSessionId()); - - // set sessionState to FAIL - setSessionState(first.getSessionId(), SessionState.FAIL); - - // 2. reuse session id - CreateAsyncQueryResponse second = - asyncQueryExecutorService.createAsyncQuery( - new CreateAsyncQueryRequest( - "select 1", DATASOURCE, LangType.SQL, first.getSessionId())); - - assertNotEquals(first.getSessionId(), second.getSessionId()); - - // set sessionState to FAIL - setSessionState(second.getSessionId(), SessionState.DEAD); - - // 3. reuse session id - CreateAsyncQueryResponse third = - asyncQueryExecutorService.createAsyncQuery( - new CreateAsyncQueryRequest( - "select 1", DATASOURCE, LangType.SQL, second.getSessionId())); - assertNotEquals(second.getSessionId(), third.getSessionId()); - } - - @Test - public void submitQueryInInvalidSessionWillCreateNewSession() { - LocalEMRSClient emrsClient = new LocalEMRSClient(); - AsyncQueryExecutorService asyncQueryExecutorService = - createAsyncQueryExecutorService(emrsClient); - - // enable session - enableSession(true); - - // 1. create async query with invalid sessionId - SessionId invalidSessionId = SessionId.newSessionId(DATASOURCE); - CreateAsyncQueryResponse asyncQuery = - asyncQueryExecutorService.createAsyncQuery( - new CreateAsyncQueryRequest( - "select 1", DATASOURCE, LangType.SQL, invalidSessionId.getSessionId())); - assertNotNull(asyncQuery.getSessionId()); - assertNotEquals(invalidSessionId.getSessionId(), asyncQuery.getSessionId()); - } - - @Test - public void datasourceNameIncludeUppercase() { - dataSourceService.createDataSource( - new DataSourceMetadata( - "TESTS3", - StringUtils.EMPTY, - DataSourceType.S3GLUE, - ImmutableList.of(), - ImmutableMap.of( - "glue.auth.type", - "iam_role", - "glue.auth.role_arn", - "arn:aws:iam::924196221507:role/FlintOpensearchServiceRole", - "glue.indexstore.opensearch.uri", - "http://localhost:9200", - "glue.indexstore.opensearch.auth", - "noauth"), - null)); - - LocalEMRSClient emrsClient = new LocalEMRSClient(); - AsyncQueryExecutorService asyncQueryExecutorService = - createAsyncQueryExecutorService(emrsClient); - - // enable session - enableSession(true); - - CreateAsyncQueryResponse response = - asyncQueryExecutorService.createAsyncQuery( - new CreateAsyncQueryRequest("select 1", "TESTS3", LangType.SQL, null)); - String params = emrsClient.getJobRequest().getSparkSubmitParams(); - - assertNotNull(response.getSessionId()); - assertTrue( - params.contains( - "--conf spark.sql.catalog.TESTS3=org.opensearch.sql.FlintDelegatingSessionCatalog")); - } - - @Test - public void concurrentSessionLimitIsDomainLevel() { - LocalEMRSClient emrsClient = new LocalEMRSClient(); - AsyncQueryExecutorService asyncQueryExecutorService = - createAsyncQueryExecutorService(emrsClient); - - // only allow one session in domain. - setSessionLimit(1); - - // 1. create async query. - CreateAsyncQueryResponse first = - asyncQueryExecutorService.createAsyncQuery( - new CreateAsyncQueryRequest("select 1", DATASOURCE, LangType.SQL, null)); - assertNotNull(first.getSessionId()); - setSessionState(first.getSessionId(), SessionState.RUNNING); - - // 2. create async query without session. - ConcurrencyLimitExceededException exception = - assertThrows( - ConcurrencyLimitExceededException.class, - () -> - asyncQueryExecutorService.createAsyncQuery( - new CreateAsyncQueryRequest("select 1", DSOTHER, LangType.SQL, null))); - assertEquals("domain concurrent active session can not exceed 1", exception.getMessage()); - } -} diff --git a/spark/src/test/java/org/opensearch/sql/spark/asyncquery/AsyncQueryExecutorServiceImplTest.java b/spark/src/test/java/org/opensearch/sql/spark/asyncquery/AsyncQueryExecutorServiceImplTest.java index efb965e9f3..01bccd9030 100644 --- a/spark/src/test/java/org/opensearch/sql/spark/asyncquery/AsyncQueryExecutorServiceImplTest.java +++ b/spark/src/test/java/org/opensearch/sql/spark/asyncquery/AsyncQueryExecutorServiceImplTest.java @@ -11,7 +11,6 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyNoInteractions; import static org.mockito.Mockito.when; -import static org.opensearch.sql.spark.asyncquery.OpensearchAsyncQueryAsyncQueryJobMetadataStorageServiceTest.DS_NAME; import static org.opensearch.sql.spark.constants.TestConstants.EMRS_APPLICATION_ID; import static org.opensearch.sql.spark.constants.TestConstants.EMR_JOB_ID; import static org.opensearch.sql.spark.constants.TestConstants.TEST_CLUSTER_NAME; @@ -30,7 +29,6 @@ import org.mockito.junit.jupiter.MockitoExtension; import org.opensearch.sql.spark.asyncquery.exceptions.AsyncQueryNotFoundException; import org.opensearch.sql.spark.asyncquery.model.AsyncQueryExecutionResponse; -import org.opensearch.sql.spark.asyncquery.model.AsyncQueryId; import org.opensearch.sql.spark.asyncquery.model.AsyncQueryJobMetadata; import org.opensearch.sql.spark.config.SparkExecutionEngineConfig; import org.opensearch.sql.spark.config.SparkExecutionEngineConfigSupplier; @@ -49,7 +47,6 @@ public class AsyncQueryExecutorServiceImplTest { private AsyncQueryExecutorService jobExecutorService; @Mock private SparkExecutionEngineConfigSupplier sparkExecutionEngineConfigSupplier; - private final AsyncQueryId QUERY_ID = AsyncQueryId.newAsyncQueryId(DS_NAME); @BeforeEach void setUp() { @@ -81,12 +78,11 @@ void testCreateAsyncQuery() { LangType.SQL, "arn:aws:iam::270824043731:role/emr-job-execution-role", TEST_CLUSTER_NAME))) - .thenReturn(new DispatchQueryResponse(QUERY_ID, EMR_JOB_ID, null, null)); + .thenReturn(new DispatchQueryResponse(EMR_JOB_ID, false, null)); CreateAsyncQueryResponse createAsyncQueryResponse = jobExecutorService.createAsyncQuery(createAsyncQueryRequest); verify(asyncQueryJobMetadataStorageService, times(1)) - .storeJobMetadata( - new AsyncQueryJobMetadata(QUERY_ID, "00fd775baqpu4g0p", EMR_JOB_ID, null)); + .storeJobMetadata(new AsyncQueryJobMetadata("00fd775baqpu4g0p", EMR_JOB_ID, null)); verify(sparkExecutionEngineConfigSupplier, times(1)).getSparkExecutionEngineConfig(); verify(sparkQueryDispatcher, times(1)) .dispatch( @@ -97,7 +93,7 @@ void testCreateAsyncQuery() { LangType.SQL, "arn:aws:iam::270824043731:role/emr-job-execution-role", TEST_CLUSTER_NAME)); - Assertions.assertEquals(QUERY_ID.getId(), createAsyncQueryResponse.getQueryId()); + Assertions.assertEquals(EMR_JOB_ID, createAsyncQueryResponse.getQueryId()); } @Test @@ -111,7 +107,7 @@ void testCreateAsyncQueryWithExtraSparkSubmitParameter() { "--conf spark.dynamicAllocation.enabled=false", TEST_CLUSTER_NAME)); when(sparkQueryDispatcher.dispatch(any())) - .thenReturn(new DispatchQueryResponse(QUERY_ID, EMR_JOB_ID, null, null)); + .thenReturn(new DispatchQueryResponse(EMR_JOB_ID, false, null)); jobExecutorService.createAsyncQuery( new CreateAsyncQueryRequest( @@ -143,13 +139,11 @@ void testGetAsyncQueryResultsWithJobNotFoundException() { @Test void testGetAsyncQueryResultsWithInProgressJob() { when(asyncQueryJobMetadataStorageService.getJobMetadata(EMR_JOB_ID)) - .thenReturn( - Optional.of( - new AsyncQueryJobMetadata(QUERY_ID, EMRS_APPLICATION_ID, EMR_JOB_ID, null))); + .thenReturn(Optional.of(new AsyncQueryJobMetadata(EMRS_APPLICATION_ID, EMR_JOB_ID, null))); JSONObject jobResult = new JSONObject(); jobResult.put("status", JobRunState.PENDING.toString()); when(sparkQueryDispatcher.getQueryResponse( - new AsyncQueryJobMetadata(QUERY_ID, EMRS_APPLICATION_ID, EMR_JOB_ID, null))) + new AsyncQueryJobMetadata(EMRS_APPLICATION_ID, EMR_JOB_ID, null))) .thenReturn(jobResult); AsyncQueryExecutionResponse asyncQueryExecutionResponse = jobExecutorService.getAsyncQueryResults(EMR_JOB_ID); @@ -163,13 +157,11 @@ void testGetAsyncQueryResultsWithInProgressJob() { @Test void testGetAsyncQueryResultsWithSuccessJob() throws IOException { when(asyncQueryJobMetadataStorageService.getJobMetadata(EMR_JOB_ID)) - .thenReturn( - Optional.of( - new AsyncQueryJobMetadata(QUERY_ID, EMRS_APPLICATION_ID, EMR_JOB_ID, null))); + .thenReturn(Optional.of(new AsyncQueryJobMetadata(EMRS_APPLICATION_ID, EMR_JOB_ID, null))); JSONObject jobResult = new JSONObject(getJson("select_query_response.json")); jobResult.put("status", JobRunState.SUCCESS.toString()); when(sparkQueryDispatcher.getQueryResponse( - new AsyncQueryJobMetadata(QUERY_ID, EMRS_APPLICATION_ID, EMR_JOB_ID, null))) + new AsyncQueryJobMetadata(EMRS_APPLICATION_ID, EMR_JOB_ID, null))) .thenReturn(jobResult); AsyncQueryExecutionResponse asyncQueryExecutionResponse = @@ -216,11 +208,9 @@ void testCancelJobWithJobNotFound() { @Test void testCancelJob() { when(asyncQueryJobMetadataStorageService.getJobMetadata(EMR_JOB_ID)) - .thenReturn( - Optional.of( - new AsyncQueryJobMetadata(QUERY_ID, EMRS_APPLICATION_ID, EMR_JOB_ID, null))); + .thenReturn(Optional.of(new AsyncQueryJobMetadata(EMRS_APPLICATION_ID, EMR_JOB_ID, null))); when(sparkQueryDispatcher.cancelJob( - new AsyncQueryJobMetadata(QUERY_ID, EMRS_APPLICATION_ID, EMR_JOB_ID, null))) + new AsyncQueryJobMetadata(EMRS_APPLICATION_ID, EMR_JOB_ID, null))) .thenReturn(EMR_JOB_ID); String jobId = jobExecutorService.cancelQuery(EMR_JOB_ID); Assertions.assertEquals(EMR_JOB_ID, jobId); diff --git a/spark/src/test/java/org/opensearch/sql/spark/asyncquery/AsyncQueryExecutorServiceSpec.java b/spark/src/test/java/org/opensearch/sql/spark/asyncquery/AsyncQueryExecutorServiceSpec.java deleted file mode 100644 index 663e5db852..0000000000 --- a/spark/src/test/java/org/opensearch/sql/spark/asyncquery/AsyncQueryExecutorServiceSpec.java +++ /dev/null @@ -1,314 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.asyncquery; - -import static org.opensearch.sql.opensearch.setting.OpenSearchSettings.DATASOURCE_URI_HOSTS_DENY_LIST; -import static org.opensearch.sql.opensearch.setting.OpenSearchSettings.SPARK_EXECUTION_REFRESH_JOB_LIMIT_SETTING; -import static org.opensearch.sql.opensearch.setting.OpenSearchSettings.SPARK_EXECUTION_SESSION_LIMIT_SETTING; -import static org.opensearch.sql.spark.execution.statestore.StateStore.DATASOURCE_TO_REQUEST_INDEX; -import static org.opensearch.sql.spark.execution.statestore.StateStore.getSession; -import static org.opensearch.sql.spark.execution.statestore.StateStore.updateSessionState; - -import com.amazonaws.services.emrserverless.model.CancelJobRunResult; -import com.amazonaws.services.emrserverless.model.GetJobRunResult; -import com.amazonaws.services.emrserverless.model.JobRun; -import com.google.common.base.Charsets; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; -import com.google.common.io.Resources; -import java.net.URL; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Optional; -import lombok.Getter; -import lombok.SneakyThrows; -import org.apache.commons.lang3.StringUtils; -import org.junit.After; -import org.junit.Before; -import org.opensearch.action.admin.indices.create.CreateIndexRequest; -import org.opensearch.action.search.SearchRequest; -import org.opensearch.action.search.SearchResponse; -import org.opensearch.client.node.NodeClient; -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.settings.Setting; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.index.query.QueryBuilder; -import org.opensearch.plugins.Plugin; -import org.opensearch.search.builder.SearchSourceBuilder; -import org.opensearch.sql.datasource.model.DataSourceMetadata; -import org.opensearch.sql.datasource.model.DataSourceType; -import org.opensearch.sql.datasources.auth.DataSourceUserAuthorizationHelperImpl; -import org.opensearch.sql.datasources.encryptor.EncryptorImpl; -import org.opensearch.sql.datasources.glue.GlueDataSourceFactory; -import org.opensearch.sql.datasources.service.DataSourceMetadataStorage; -import org.opensearch.sql.datasources.service.DataSourceServiceImpl; -import org.opensearch.sql.datasources.storage.OpenSearchDataSourceMetadataStorage; -import org.opensearch.sql.legacy.esdomain.LocalClusterState; -import org.opensearch.sql.legacy.metrics.Metrics; -import org.opensearch.sql.opensearch.setting.OpenSearchSettings; -import org.opensearch.sql.spark.client.EMRServerlessClient; -import org.opensearch.sql.spark.client.StartJobRequest; -import org.opensearch.sql.spark.config.SparkExecutionEngineConfig; -import org.opensearch.sql.spark.dispatcher.SparkQueryDispatcher; -import org.opensearch.sql.spark.execution.session.SessionManager; -import org.opensearch.sql.spark.execution.session.SessionModel; -import org.opensearch.sql.spark.execution.session.SessionState; -import org.opensearch.sql.spark.execution.statestore.StateStore; -import org.opensearch.sql.spark.flint.FlintIndexMetadataReaderImpl; -import org.opensearch.sql.spark.leasemanager.DefaultLeaseManager; -import org.opensearch.sql.spark.response.JobExecutionResponseReader; -import org.opensearch.sql.storage.DataSourceFactory; -import org.opensearch.test.OpenSearchIntegTestCase; - -public class AsyncQueryExecutorServiceSpec extends OpenSearchIntegTestCase { - public static final String DATASOURCE = "mys3"; - public static final String DSOTHER = "mytest"; - - protected ClusterService clusterService; - protected org.opensearch.sql.common.setting.Settings pluginSettings; - protected NodeClient client; - protected DataSourceServiceImpl dataSourceService; - protected StateStore stateStore; - protected ClusterSettings clusterSettings; - - @Override - protected Collection> nodePlugins() { - return Arrays.asList(TestSettingPlugin.class); - } - - public static class TestSettingPlugin extends Plugin { - @Override - public List> getSettings() { - return OpenSearchSettings.pluginSettings(); - } - } - - @Before - public void setup() { - clusterService = clusterService(); - clusterSettings = clusterService.getClusterSettings(); - pluginSettings = new OpenSearchSettings(clusterSettings); - LocalClusterState.state().setClusterService(clusterService); - LocalClusterState.state().setPluginSettings((OpenSearchSettings) pluginSettings); - Metrics.getInstance().registerDefaultMetrics(); - client = (NodeClient) cluster().client(); - client - .admin() - .cluster() - .prepareUpdateSettings() - .setTransientSettings( - Settings.builder() - .putList(DATASOURCE_URI_HOSTS_DENY_LIST.getKey(), Collections.emptyList()) - .build()) - .get(); - dataSourceService = createDataSourceService(); - DataSourceMetadata dm = - new DataSourceMetadata( - DATASOURCE, - StringUtils.EMPTY, - DataSourceType.S3GLUE, - ImmutableList.of(), - ImmutableMap.of( - "glue.auth.type", - "iam_role", - "glue.auth.role_arn", - "arn:aws:iam::924196221507:role/FlintOpensearchServiceRole", - "glue.indexstore.opensearch.uri", - "http://localhost:9200", - "glue.indexstore.opensearch.auth", - "noauth"), - null); - dataSourceService.createDataSource(dm); - DataSourceMetadata otherDm = - new DataSourceMetadata( - DSOTHER, - StringUtils.EMPTY, - DataSourceType.S3GLUE, - ImmutableList.of(), - ImmutableMap.of( - "glue.auth.type", - "iam_role", - "glue.auth.role_arn", - "arn:aws:iam::924196221507:role/FlintOpensearchServiceRole", - "glue.indexstore.opensearch.uri", - "http://localhost:9200", - "glue.indexstore.opensearch.auth", - "noauth"), - null); - dataSourceService.createDataSource(otherDm); - stateStore = new StateStore(client, clusterService); - createIndexWithMappings(dm.getResultIndex(), loadResultIndexMappings()); - createIndexWithMappings(otherDm.getResultIndex(), loadResultIndexMappings()); - } - - @After - public void clean() { - client - .admin() - .cluster() - .prepareUpdateSettings() - .setTransientSettings( - Settings.builder().putNull(SPARK_EXECUTION_SESSION_LIMIT_SETTING.getKey()).build()) - .get(); - client - .admin() - .cluster() - .prepareUpdateSettings() - .setTransientSettings( - Settings.builder().putNull(SPARK_EXECUTION_REFRESH_JOB_LIMIT_SETTING.getKey()).build()) - .get(); - client - .admin() - .cluster() - .prepareUpdateSettings() - .setTransientSettings( - Settings.builder().putNull(DATASOURCE_URI_HOSTS_DENY_LIST.getKey()).build()) - .get(); - } - - private DataSourceServiceImpl createDataSourceService() { - String masterKey = "a57d991d9b573f75b9bba1df"; - DataSourceMetadataStorage dataSourceMetadataStorage = - new OpenSearchDataSourceMetadataStorage( - client, clusterService, new EncryptorImpl(masterKey)); - return new DataSourceServiceImpl( - new ImmutableSet.Builder() - .add(new GlueDataSourceFactory(pluginSettings)) - .build(), - dataSourceMetadataStorage, - meta -> {}); - } - - protected AsyncQueryExecutorService createAsyncQueryExecutorService( - EMRServerlessClient emrServerlessClient) { - StateStore stateStore = new StateStore(client, clusterService); - AsyncQueryJobMetadataStorageService asyncQueryJobMetadataStorageService = - new OpensearchAsyncQueryJobMetadataStorageService(stateStore); - JobExecutionResponseReader jobExecutionResponseReader = new JobExecutionResponseReader(client); - SparkQueryDispatcher sparkQueryDispatcher = - new SparkQueryDispatcher( - emrServerlessClient, - this.dataSourceService, - new DataSourceUserAuthorizationHelperImpl(client), - jobExecutionResponseReader, - new FlintIndexMetadataReaderImpl(client), - client, - new SessionManager(stateStore, emrServerlessClient, pluginSettings), - new DefaultLeaseManager(pluginSettings, stateStore), - stateStore); - return new AsyncQueryExecutorServiceImpl( - asyncQueryJobMetadataStorageService, - sparkQueryDispatcher, - this::sparkExecutionEngineConfig); - } - - public static class LocalEMRSClient implements EMRServerlessClient { - - private int startJobRunCalled = 0; - private int cancelJobRunCalled = 0; - private int getJobResult = 0; - - @Getter private StartJobRequest jobRequest; - - @Override - public String startJobRun(StartJobRequest startJobRequest) { - jobRequest = startJobRequest; - startJobRunCalled++; - return "jobId"; - } - - @Override - public GetJobRunResult getJobRunResult(String applicationId, String jobId) { - getJobResult++; - JobRun jobRun = new JobRun(); - jobRun.setState("RUNNING"); - return new GetJobRunResult().withJobRun(jobRun); - } - - @Override - public CancelJobRunResult cancelJobRun(String applicationId, String jobId) { - cancelJobRunCalled++; - return new CancelJobRunResult().withJobRunId(jobId); - } - - public void startJobRunCalled(int expectedTimes) { - assertEquals(expectedTimes, startJobRunCalled); - } - - public void cancelJobRunCalled(int expectedTimes) { - assertEquals(expectedTimes, cancelJobRunCalled); - } - - public void getJobRunResultCalled(int expectedTimes) { - assertEquals(expectedTimes, getJobResult); - } - } - - public SparkExecutionEngineConfig sparkExecutionEngineConfig() { - return new SparkExecutionEngineConfig("appId", "us-west-2", "roleArn", "", "myCluster"); - } - - public void enableSession(boolean enabled) { - // doNothing - } - - public void setSessionLimit(long limit) { - client - .admin() - .cluster() - .prepareUpdateSettings() - .setTransientSettings( - Settings.builder().put(SPARK_EXECUTION_SESSION_LIMIT_SETTING.getKey(), limit).build()) - .get(); - } - - public void setConcurrentRefreshJob(long limit) { - client - .admin() - .cluster() - .prepareUpdateSettings() - .setTransientSettings( - Settings.builder() - .put(SPARK_EXECUTION_REFRESH_JOB_LIMIT_SETTING.getKey(), limit) - .build()) - .get(); - } - - int search(QueryBuilder query) { - SearchRequest searchRequest = new SearchRequest(); - searchRequest.indices(DATASOURCE_TO_REQUEST_INDEX.apply(DATASOURCE)); - SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); - searchSourceBuilder.query(query); - searchRequest.source(searchSourceBuilder); - SearchResponse searchResponse = client.search(searchRequest).actionGet(); - - return searchResponse.getHits().getHits().length; - } - - void setSessionState(String sessionId, SessionState sessionState) { - Optional model = getSession(stateStore, DATASOURCE).apply(sessionId); - SessionModel updated = - updateSessionState(stateStore, DATASOURCE).apply(model.get(), sessionState); - assertEquals(sessionState, updated.getSessionState()); - } - - @SneakyThrows - public String loadResultIndexMappings() { - URL url = Resources.getResource("query_execution_result_mapping.json"); - return Resources.toString(url, Charsets.UTF_8); - } - - public void createIndexWithMappings(String indexName, String metadata) { - CreateIndexRequest request = new CreateIndexRequest(indexName); - request.mapping(metadata, XContentType.JSON); - client().admin().indices().create(request).actionGet(); - } -} diff --git a/spark/src/test/java/org/opensearch/sql/spark/asyncquery/IndexQuerySpecTest.java b/spark/src/test/java/org/opensearch/sql/spark/asyncquery/IndexQuerySpecTest.java deleted file mode 100644 index 45a83b3296..0000000000 --- a/spark/src/test/java/org/opensearch/sql/spark/asyncquery/IndexQuerySpecTest.java +++ /dev/null @@ -1,793 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.asyncquery; - -import com.amazonaws.services.emrserverless.model.CancelJobRunResult; -import com.amazonaws.services.emrserverless.model.GetJobRunResult; -import com.amazonaws.services.emrserverless.model.JobRun; -import com.google.common.base.Charsets; -import com.google.common.collect.ImmutableList; -import com.google.common.io.Resources; -import java.net.URL; -import java.util.Optional; -import lombok.RequiredArgsConstructor; -import lombok.SneakyThrows; -import org.junit.Assert; -import org.junit.Test; -import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; -import org.opensearch.index.seqno.SequenceNumbers; -import org.opensearch.sql.spark.asyncquery.model.AsyncQueryExecutionResponse; -import org.opensearch.sql.spark.execution.statestore.StateStore; -import org.opensearch.sql.spark.flint.FlintIndexState; -import org.opensearch.sql.spark.flint.FlintIndexStateModel; -import org.opensearch.sql.spark.flint.FlintIndexType; -import org.opensearch.sql.spark.leasemanager.ConcurrencyLimitExceededException; -import org.opensearch.sql.spark.rest.model.CreateAsyncQueryRequest; -import org.opensearch.sql.spark.rest.model.CreateAsyncQueryResponse; -import org.opensearch.sql.spark.rest.model.LangType; - -public class IndexQuerySpecTest extends AsyncQueryExecutorServiceSpec { - - public final FlintDatasetMock LEGACY_SKIPPING = - new FlintDatasetMock( - "DROP SKIPPING INDEX ON mys3.default.http_logs", - FlintIndexType.SKIPPING, - "flint_mys3_default_http_logs_skipping_index") - .isLegacy(true); - public final FlintDatasetMock LEGACY_COVERING = - new FlintDatasetMock( - "DROP INDEX covering ON mys3.default.http_logs", - FlintIndexType.COVERING, - "flint_mys3_default_http_logs_covering_index") - .isLegacy(true); - public final FlintDatasetMock LEGACY_MV = - new FlintDatasetMock( - "DROP MATERIALIZED VIEW mv", FlintIndexType.MATERIALIZED_VIEW, "flint_mv") - .isLegacy(true); - - public final FlintDatasetMock SKIPPING = - new FlintDatasetMock( - "DROP SKIPPING INDEX ON mys3.default.http_logs", - FlintIndexType.SKIPPING, - "flint_mys3_default_http_logs_skipping_index") - .latestId("skippingindexid"); - public final FlintDatasetMock COVERING = - new FlintDatasetMock( - "DROP INDEX covering ON mys3.default.http_logs", - FlintIndexType.COVERING, - "flint_mys3_default_http_logs_covering_index") - .latestId("coveringid"); - public final FlintDatasetMock MV = - new FlintDatasetMock( - "DROP MATERIALIZED VIEW mv", FlintIndexType.MATERIALIZED_VIEW, "flint_mv") - .latestId("mvid"); - - /** - * Happy case. expectation is - * - *

    (1) Drop Index response is SUCCESS - */ - @Test - public void legacyBasicDropAndFetchAndCancel() { - ImmutableList.of(LEGACY_SKIPPING, LEGACY_COVERING) - .forEach( - mockDS -> { - LocalEMRSClient emrsClient = - new LocalEMRSClient() { - @Override - public GetJobRunResult getJobRunResult(String applicationId, String jobId) { - return new GetJobRunResult().withJobRun(new JobRun().withState("Cancelled")); - } - }; - - AsyncQueryExecutorService asyncQueryExecutorService = - createAsyncQueryExecutorService(emrsClient); - - // Mock flint index - mockDS.createIndex(); - - // 1.drop index - CreateAsyncQueryResponse response = - asyncQueryExecutorService.createAsyncQuery( - new CreateAsyncQueryRequest(mockDS.query, DATASOURCE, LangType.SQL, null)); - - assertNotNull(response.getQueryId()); - assertTrue(clusterService.state().routingTable().hasIndex(mockDS.indexName)); - - // 2.fetch result - AsyncQueryExecutionResponse asyncQueryResults = - asyncQueryExecutorService.getAsyncQueryResults(response.getQueryId()); - assertEquals("SUCCESS", asyncQueryResults.getStatus()); - assertNull(asyncQueryResults.getError()); - emrsClient.cancelJobRunCalled(1); - - // 3.cancel - IllegalArgumentException exception = - assertThrows( - IllegalArgumentException.class, - () -> asyncQueryExecutorService.cancelQuery(response.getQueryId())); - assertEquals("can't cancel index DML query", exception.getMessage()); - }); - } - - /** - * Legacy Test, without state index support. Not EMR-S job running. expectation is - * - *

    (1) Drop Index response is SUCCESS - */ - @Test - public void legacyDropIndexNoJobRunning() { - ImmutableList.of(LEGACY_SKIPPING, LEGACY_COVERING, LEGACY_MV) - .forEach( - mockDS -> { - LocalEMRSClient emrsClient = - new LocalEMRSClient() { - @Override - public CancelJobRunResult cancelJobRun(String applicationId, String jobId) { - throw new IllegalArgumentException("Job run is not in a cancellable state"); - } - }; - AsyncQueryExecutorService asyncQueryExecutorService = - createAsyncQueryExecutorService(emrsClient); - - // Mock flint index - mockDS.createIndex(); - - // 1.drop index - CreateAsyncQueryResponse response = - asyncQueryExecutorService.createAsyncQuery( - new CreateAsyncQueryRequest(mockDS.query, DATASOURCE, LangType.SQL, null)); - - // 2.fetch result. - AsyncQueryExecutionResponse asyncQueryResults = - asyncQueryExecutorService.getAsyncQueryResults(response.getQueryId()); - assertEquals("SUCCESS", asyncQueryResults.getStatus()); - assertNull(asyncQueryResults.getError()); - }); - } - - /** - * Legacy Test, without state index support. Cancel EMR-S job call timeout. expectation is - * - *

    (1) Drop Index response is FAILED - */ - @Test - public void legacyDropIndexCancelJobTimeout() { - ImmutableList.of(LEGACY_SKIPPING, LEGACY_COVERING, LEGACY_MV) - .forEach( - mockDS -> { - // Mock EMR-S always return running. - LocalEMRSClient emrsClient = - new LocalEMRSClient() { - @Override - public GetJobRunResult getJobRunResult(String applicationId, String jobId) { - return new GetJobRunResult().withJobRun(new JobRun().withState("Running")); - } - }; - AsyncQueryExecutorService asyncQueryExecutorService = - createAsyncQueryExecutorService(emrsClient); - - // Mock flint index - mockDS.createIndex(); - - // 1. drop index - CreateAsyncQueryResponse response = - asyncQueryExecutorService.createAsyncQuery( - new CreateAsyncQueryRequest(mockDS.query, DATASOURCE, LangType.SQL, null)); - - // 2. fetch result - AsyncQueryExecutionResponse asyncQueryResults = - asyncQueryExecutorService.getAsyncQueryResults(response.getQueryId()); - assertEquals("FAILED", asyncQueryResults.getStatus()); - assertEquals("cancel job timeout", asyncQueryResults.getError()); - }); - } - - /** - * Happy case. expectation is - * - *

    (1) Drop Index response is SUCCESS (2) change index state to: DELETED - */ - @Test - public void dropAndFetchAndCancel() { - ImmutableList.of(SKIPPING, COVERING, MV) - .forEach( - mockDS -> { - LocalEMRSClient emrsClient = - new LocalEMRSClient() { - @Override - public GetJobRunResult getJobRunResult(String applicationId, String jobId) { - return new GetJobRunResult().withJobRun(new JobRun().withState("Cancelled")); - } - }; - - AsyncQueryExecutorService asyncQueryExecutorService = - createAsyncQueryExecutorService(emrsClient); - - // Mock flint index - mockDS.createIndex(); - // Mock index state - MockFlintSparkJob flintIndexJob = new MockFlintSparkJob(mockDS.latestId); - flintIndexJob.refreshing(); - - // 1.drop index - CreateAsyncQueryResponse response = - asyncQueryExecutorService.createAsyncQuery( - new CreateAsyncQueryRequest(mockDS.query, DATASOURCE, LangType.SQL, null)); - - assertNotNull(response.getQueryId()); - assertTrue(clusterService.state().routingTable().hasIndex(mockDS.indexName)); - - // assert state is DELETED - flintIndexJob.assertState(FlintIndexState.DELETED); - - // 2.fetch result - AsyncQueryExecutionResponse asyncQueryResults = - asyncQueryExecutorService.getAsyncQueryResults(response.getQueryId()); - assertEquals("SUCCESS", asyncQueryResults.getStatus()); - assertNull(asyncQueryResults.getError()); - emrsClient.cancelJobRunCalled(1); - - // 3.cancel - IllegalArgumentException exception = - assertThrows( - IllegalArgumentException.class, - () -> asyncQueryExecutorService.cancelQuery(response.getQueryId())); - assertEquals("can't cancel index DML query", exception.getMessage()); - }); - } - - /** - * Cancel EMR-S job, but not job running. expectation is - * - *

    (1) Drop Index response is SUCCESS (2) change index state to: DELETED - */ - @Test - public void dropIndexNoJobRunning() { - ImmutableList.of(SKIPPING, COVERING, MV) - .forEach( - mockDS -> { - // Mock EMR-S job is not running - LocalEMRSClient emrsClient = - new LocalEMRSClient() { - @Override - public CancelJobRunResult cancelJobRun(String applicationId, String jobId) { - throw new IllegalArgumentException("Job run is not in a cancellable state"); - } - }; - AsyncQueryExecutorService asyncQueryExecutorService = - createAsyncQueryExecutorService(emrsClient); - - // Mock flint index - mockDS.createIndex(); - // Mock index state in refresh state. - MockFlintSparkJob flintIndexJob = new MockFlintSparkJob(mockDS.latestId); - flintIndexJob.refreshing(); - - // 1.drop index - CreateAsyncQueryResponse response = - asyncQueryExecutorService.createAsyncQuery( - new CreateAsyncQueryRequest(mockDS.query, DATASOURCE, LangType.SQL, null)); - - // 2.fetch result. - AsyncQueryExecutionResponse asyncQueryResults = - asyncQueryExecutorService.getAsyncQueryResults(response.getQueryId()); - assertEquals("SUCCESS", asyncQueryResults.getStatus()); - assertNull(asyncQueryResults.getError()); - - flintIndexJob.assertState(FlintIndexState.DELETED); - }); - } - - /** - * Cancel EMR-S job call timeout, expectation is - * - *

    (1) Drop Index response is failed, (2) change index state to: CANCELLING - */ - @Test - public void dropIndexCancelJobTimeout() { - ImmutableList.of(SKIPPING, COVERING, MV) - .forEach( - mockDS -> { - // Mock EMR-S always return running. - LocalEMRSClient emrsClient = - new LocalEMRSClient() { - @Override - public GetJobRunResult getJobRunResult(String applicationId, String jobId) { - return new GetJobRunResult().withJobRun(new JobRun().withState("Running")); - } - }; - AsyncQueryExecutorService asyncQueryExecutorService = - createAsyncQueryExecutorService(emrsClient); - - // Mock flint index - mockDS.createIndex(); - // Mock index state - MockFlintSparkJob flintIndexJob = new MockFlintSparkJob(mockDS.latestId); - flintIndexJob.refreshing(); - - // 1. drop index - CreateAsyncQueryResponse response = - asyncQueryExecutorService.createAsyncQuery( - new CreateAsyncQueryRequest(mockDS.query, DATASOURCE, LangType.SQL, null)); - - // 2. fetch result - AsyncQueryExecutionResponse asyncQueryResults = - asyncQueryExecutorService.getAsyncQueryResults(response.getQueryId()); - assertEquals("FAILED", asyncQueryResults.getStatus()); - assertEquals("cancel job timeout", asyncQueryResults.getError()); - - flintIndexJob.assertState(FlintIndexState.CANCELLING); - }); - } - - /** - * Drop Index operation is retryable, expectation is - * - *

    (1) call EMR-S (2) change index state to: DELETED - */ - @Test - public void dropIndexWithIndexInCancellingState() { - ImmutableList.of(SKIPPING, COVERING, MV) - .forEach( - mockDS -> { - LocalEMRSClient emrsClient = - new LocalEMRSClient() { - @Override - public GetJobRunResult getJobRunResult(String applicationId, String jobId) { - return new GetJobRunResult().withJobRun(new JobRun().withState("Cancelled")); - } - }; - AsyncQueryExecutorService asyncQueryExecutorService = - createAsyncQueryExecutorService(emrsClient); - - // Mock flint index - mockDS.createIndex(); - // Mock index state - MockFlintSparkJob flintIndexJob = new MockFlintSparkJob(mockDS.latestId); - flintIndexJob.cancelling(); - - // 1. drop index - CreateAsyncQueryResponse response = - asyncQueryExecutorService.createAsyncQuery( - new CreateAsyncQueryRequest(mockDS.query, DATASOURCE, LangType.SQL, null)); - - // 2. fetch result - assertEquals( - "SUCCESS", - asyncQueryExecutorService - .getAsyncQueryResults(response.getQueryId()) - .getStatus()); - - flintIndexJob.assertState(FlintIndexState.DELETED); - }); - } - - /** - * No Job running, expectation is - * - *

    (1) not call EMR-S (2) change index state to: DELETED - */ - @Test - public void dropIndexWithIndexInActiveState() { - ImmutableList.of(SKIPPING, COVERING, MV) - .forEach( - mockDS -> { - LocalEMRSClient emrsClient = - new LocalEMRSClient() { - @Override - public CancelJobRunResult cancelJobRun(String applicationId, String jobId) { - Assert.fail("should not call cancelJobRun"); - return null; - } - - @Override - public GetJobRunResult getJobRunResult(String applicationId, String jobId) { - Assert.fail("should not call getJobRunResult"); - return null; - } - }; - AsyncQueryExecutorService asyncQueryExecutorService = - createAsyncQueryExecutorService(emrsClient); - - // Mock flint index - mockDS.createIndex(); - // Mock index state - MockFlintSparkJob flintIndexJob = new MockFlintSparkJob(mockDS.latestId); - flintIndexJob.active(); - - // 1. drop index - CreateAsyncQueryResponse response = - asyncQueryExecutorService.createAsyncQuery( - new CreateAsyncQueryRequest(mockDS.query, DATASOURCE, LangType.SQL, null)); - - // 2. fetch result - assertEquals( - "SUCCESS", - asyncQueryExecutorService - .getAsyncQueryResults(response.getQueryId()) - .getStatus()); - - flintIndexJob.assertState(FlintIndexState.DELETED); - }); - } - - @Test - public void dropIndexWithIndexInDeletingState() { - ImmutableList.of(SKIPPING, COVERING, MV) - .forEach( - mockDS -> { - LocalEMRSClient emrsClient = - new LocalEMRSClient() { - @Override - public CancelJobRunResult cancelJobRun(String applicationId, String jobId) { - Assert.fail("should not call cancelJobRun"); - return null; - } - - @Override - public GetJobRunResult getJobRunResult(String applicationId, String jobId) { - Assert.fail("should not call getJobRunResult"); - return null; - } - }; - AsyncQueryExecutorService asyncQueryExecutorService = - createAsyncQueryExecutorService(emrsClient); - - // Mock flint index - mockDS.createIndex(); - // Mock index state - MockFlintSparkJob flintIndexJob = new MockFlintSparkJob(mockDS.latestId); - flintIndexJob.deleted(); - - // 1. drop index - CreateAsyncQueryResponse response = - asyncQueryExecutorService.createAsyncQuery( - new CreateAsyncQueryRequest(mockDS.query, DATASOURCE, LangType.SQL, null)); - - // 2. fetch result - assertEquals( - "SUCCESS", - asyncQueryExecutorService - .getAsyncQueryResults(response.getQueryId()) - .getStatus()); - - flintIndexJob.assertState(FlintIndexState.DELETED); - }); - } - - @Test - public void dropIndexWithIndexInDeletedState() { - ImmutableList.of(SKIPPING, COVERING, MV) - .forEach( - mockDS -> { - LocalEMRSClient emrsClient = - new LocalEMRSClient() { - @Override - public CancelJobRunResult cancelJobRun(String applicationId, String jobId) { - Assert.fail("should not call cancelJobRun"); - return null; - } - - @Override - public GetJobRunResult getJobRunResult(String applicationId, String jobId) { - Assert.fail("should not call getJobRunResult"); - return null; - } - }; - AsyncQueryExecutorService asyncQueryExecutorService = - createAsyncQueryExecutorService(emrsClient); - - // Mock flint index - mockDS.createIndex(); - // Mock index state - MockFlintSparkJob flintIndexJob = new MockFlintSparkJob(mockDS.latestId); - flintIndexJob.deleting(); - - // 1. drop index - CreateAsyncQueryResponse response = - asyncQueryExecutorService.createAsyncQuery( - new CreateAsyncQueryRequest(mockDS.query, DATASOURCE, LangType.SQL, null)); - - // 2. fetch result - assertEquals( - "SUCCESS", - asyncQueryExecutorService - .getAsyncQueryResults(response.getQueryId()) - .getStatus()); - - flintIndexJob.assertState(FlintIndexState.DELETED); - }); - } - - /** - * No Job running, expectation is - * - *

    (1) not call EMR-S (2) change index state to: DELETED - */ - @Test - public void dropIndexWithIndexInEmptyState() { - ImmutableList.of(SKIPPING, COVERING, MV) - .forEach( - mockDS -> { - LocalEMRSClient emrsClient = - new LocalEMRSClient() { - @Override - public CancelJobRunResult cancelJobRun(String applicationId, String jobId) { - Assert.fail("should not call cancelJobRun"); - return null; - } - - @Override - public GetJobRunResult getJobRunResult(String applicationId, String jobId) { - Assert.fail("should not call getJobRunResult"); - return null; - } - }; - AsyncQueryExecutorService asyncQueryExecutorService = - createAsyncQueryExecutorService(emrsClient); - - // Mock flint index - mockDS.createIndex(); - // Mock index state - MockFlintSparkJob flintIndexJob = new MockFlintSparkJob(mockDS.latestId); - - // 1. drop index - CreateAsyncQueryResponse response = - asyncQueryExecutorService.createAsyncQuery( - new CreateAsyncQueryRequest(mockDS.query, DATASOURCE, LangType.SQL, null)); - - // 2. fetch result - assertEquals( - "SUCCESS", - asyncQueryExecutorService - .getAsyncQueryResults(response.getQueryId()) - .getStatus()); - - flintIndexJob.assertState(FlintIndexState.DELETED); - }); - } - - /** - * No Job running, expectation is - * - *

    (1) not call EMR-S (2) change index state to: DELETED - */ - @Test - public void edgeCaseNoIndexStateDoc() { - ImmutableList.of(SKIPPING, COVERING, MV) - .forEach( - mockDS -> { - LocalEMRSClient emrsClient = - new LocalEMRSClient() { - @Override - public CancelJobRunResult cancelJobRun(String applicationId, String jobId) { - Assert.fail("should not call cancelJobRun"); - return null; - } - - @Override - public GetJobRunResult getJobRunResult(String applicationId, String jobId) { - Assert.fail("should not call getJobRunResult"); - return null; - } - }; - AsyncQueryExecutorService asyncQueryExecutorService = - createAsyncQueryExecutorService(emrsClient); - - // Mock flint index - mockDS.createIndex(); - - // 1. drop index - CreateAsyncQueryResponse response = - asyncQueryExecutorService.createAsyncQuery( - new CreateAsyncQueryRequest(mockDS.query, DATASOURCE, LangType.SQL, null)); - - // 2. fetch result - AsyncQueryExecutionResponse asyncQueryResults = - asyncQueryExecutorService.getAsyncQueryResults(response.getQueryId()); - assertEquals("FAILED", asyncQueryResults.getStatus()); - assertTrue(asyncQueryResults.getError().contains("no state found")); - }); - } - - @Test - public void concurrentRefreshJobLimitNotApplied() { - AsyncQueryExecutorService asyncQueryExecutorService = - createAsyncQueryExecutorService(new LocalEMRSClient()); - - // Mock flint index - COVERING.createIndex(); - // Mock index state - MockFlintSparkJob flintIndexJob = new MockFlintSparkJob(COVERING.latestId); - flintIndexJob.refreshing(); - - // query with auto refresh - String query = - "CREATE INDEX covering ON mys3.default.http_logs(l_orderkey, " - + "l_quantity) WITH (auto_refresh = true)"; - CreateAsyncQueryResponse response = - asyncQueryExecutorService.createAsyncQuery( - new CreateAsyncQueryRequest(query, DATASOURCE, LangType.SQL, null)); - assertNull(response.getSessionId()); - } - - @Test - public void concurrentRefreshJobLimitAppliedToDDLWithAuthRefresh() { - AsyncQueryExecutorService asyncQueryExecutorService = - createAsyncQueryExecutorService(new LocalEMRSClient()); - - setConcurrentRefreshJob(1); - - // Mock flint index - COVERING.createIndex(); - // Mock index state - MockFlintSparkJob flintIndexJob = new MockFlintSparkJob(COVERING.latestId); - flintIndexJob.refreshing(); - - // query with auto_refresh = true. - String query = - "CREATE INDEX covering ON mys3.default.http_logs(l_orderkey, " - + "l_quantity) WITH (auto_refresh = true)"; - ConcurrencyLimitExceededException exception = - assertThrows( - ConcurrencyLimitExceededException.class, - () -> - asyncQueryExecutorService.createAsyncQuery( - new CreateAsyncQueryRequest(query, DATASOURCE, LangType.SQL, null))); - assertEquals("domain concurrent refresh job can not exceed 1", exception.getMessage()); - } - - @Test - public void concurrentRefreshJobLimitAppliedToRefresh() { - AsyncQueryExecutorService asyncQueryExecutorService = - createAsyncQueryExecutorService(new LocalEMRSClient()); - - setConcurrentRefreshJob(1); - - // Mock flint index - COVERING.createIndex(); - // Mock index state - MockFlintSparkJob flintIndexJob = new MockFlintSparkJob(COVERING.latestId); - flintIndexJob.refreshing(); - - // query with auto_refresh = true. - String query = "REFRESH INDEX covering ON mys3.default.http_logs"; - ConcurrencyLimitExceededException exception = - assertThrows( - ConcurrencyLimitExceededException.class, - () -> - asyncQueryExecutorService.createAsyncQuery( - new CreateAsyncQueryRequest(query, DATASOURCE, LangType.SQL, null))); - assertEquals("domain concurrent refresh job can not exceed 1", exception.getMessage()); - } - - @Test - public void concurrentRefreshJobLimitNotAppliedToDDL() { - String query = "CREATE INDEX covering ON mys3.default.http_logs(l_orderkey, l_quantity)"; - - AsyncQueryExecutorService asyncQueryExecutorService = - createAsyncQueryExecutorService(new LocalEMRSClient()); - - setConcurrentRefreshJob(1); - - // Mock flint index - COVERING.createIndex(); - // Mock index state - MockFlintSparkJob flintIndexJob = new MockFlintSparkJob(COVERING.latestId); - flintIndexJob.refreshing(); - - CreateAsyncQueryResponse asyncQueryResponse = - asyncQueryExecutorService.createAsyncQuery( - new CreateAsyncQueryRequest(query, DATASOURCE, LangType.SQL, null)); - assertNotNull(asyncQueryResponse.getSessionId()); - } - - public class MockFlintSparkJob { - - private FlintIndexStateModel stateModel; - - public MockFlintSparkJob(String latestId) { - assertNotNull(latestId); - stateModel = - new FlintIndexStateModel( - FlintIndexState.EMPTY, - "mockAppId", - "mockJobId", - latestId, - DATASOURCE, - System.currentTimeMillis(), - "", - SequenceNumbers.UNASSIGNED_SEQ_NO, - SequenceNumbers.UNASSIGNED_PRIMARY_TERM); - stateModel = StateStore.createFlintIndexState(stateStore, DATASOURCE).apply(stateModel); - } - - public void refreshing() { - stateModel = - StateStore.updateFlintIndexState(stateStore, DATASOURCE) - .apply(stateModel, FlintIndexState.REFRESHING); - } - - public void cancelling() { - stateModel = - StateStore.updateFlintIndexState(stateStore, DATASOURCE) - .apply(stateModel, FlintIndexState.CANCELLING); - } - - public void active() { - stateModel = - StateStore.updateFlintIndexState(stateStore, DATASOURCE) - .apply(stateModel, FlintIndexState.ACTIVE); - } - - public void deleting() { - stateModel = - StateStore.updateFlintIndexState(stateStore, DATASOURCE) - .apply(stateModel, FlintIndexState.DELETING); - } - - public void deleted() { - stateModel = - StateStore.updateFlintIndexState(stateStore, DATASOURCE) - .apply(stateModel, FlintIndexState.DELETED); - } - - void assertState(FlintIndexState expected) { - Optional stateModelOpt = - StateStore.getFlintIndexState(stateStore, DATASOURCE).apply(stateModel.getId()); - assertTrue((stateModelOpt.isPresent())); - assertEquals(expected, stateModelOpt.get().getIndexState()); - } - } - - @RequiredArgsConstructor - public class FlintDatasetMock { - private final String query; - private final FlintIndexType indexType; - private final String indexName; - private boolean isLegacy = false; - private String latestId; - - FlintDatasetMock isLegacy(boolean isLegacy) { - this.isLegacy = isLegacy; - return this; - } - - FlintDatasetMock latestId(String latestId) { - this.latestId = latestId; - return this; - } - - public void createIndex() { - String pathPrefix = isLegacy ? "flint-index-mappings" : "flint-index-mappings/0.1.1"; - switch (indexType) { - case SKIPPING: - createIndexWithMappings( - indexName, loadMappings(pathPrefix + "/" + "flint_skipping_index.json")); - break; - case COVERING: - createIndexWithMappings( - indexName, loadMappings(pathPrefix + "/" + "flint_covering_index.json")); - break; - case MATERIALIZED_VIEW: - createIndexWithMappings(indexName, loadMappings(pathPrefix + "/" + "flint_mv.json")); - break; - } - } - - @SneakyThrows - public void deleteIndex() { - client().admin().indices().delete(new DeleteIndexRequest().indices(indexName)).get(); - } - } - - @SneakyThrows - public static String loadMappings(String path) { - URL url = Resources.getResource(path); - return Resources.toString(url, Charsets.UTF_8); - } -} diff --git a/spark/src/test/java/org/opensearch/sql/spark/asyncquery/OpensearchAsyncQueryAsyncQueryJobMetadataStorageServiceTest.java b/spark/src/test/java/org/opensearch/sql/spark/asyncquery/OpensearchAsyncQueryAsyncQueryJobMetadataStorageServiceTest.java index cf838db829..7288fd3fc2 100644 --- a/spark/src/test/java/org/opensearch/sql/spark/asyncquery/OpensearchAsyncQueryAsyncQueryJobMetadataStorageServiceTest.java +++ b/spark/src/test/java/org/opensearch/sql/spark/asyncquery/OpensearchAsyncQueryAsyncQueryJobMetadataStorageServiceTest.java @@ -5,68 +5,242 @@ package org.opensearch.sql.spark.asyncquery; +import static org.opensearch.sql.spark.asyncquery.OpensearchAsyncQueryJobMetadataStorageService.JOB_METADATA_INDEX; import static org.opensearch.sql.spark.constants.TestConstants.EMRS_APPLICATION_ID; import static org.opensearch.sql.spark.constants.TestConstants.EMR_JOB_ID; import java.util.Optional; -import org.junit.Before; -import org.junit.Test; -import org.opensearch.sql.spark.asyncquery.model.AsyncQueryId; +import org.apache.lucene.search.TotalHits; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Answers; +import org.mockito.ArgumentMatchers; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.jupiter.MockitoExtension; +import org.opensearch.action.DocWriteResponse; +import org.opensearch.action.admin.indices.create.CreateIndexResponse; +import org.opensearch.action.index.IndexResponse; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.client.Client; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.action.ActionFuture; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.search.SearchHit; +import org.opensearch.search.SearchHits; import org.opensearch.sql.spark.asyncquery.model.AsyncQueryJobMetadata; -import org.opensearch.sql.spark.execution.statestore.StateStore; -import org.opensearch.test.OpenSearchIntegTestCase; -public class OpensearchAsyncQueryAsyncQueryJobMetadataStorageServiceTest - extends OpenSearchIntegTestCase { +@ExtendWith(MockitoExtension.class) +public class OpensearchAsyncQueryAsyncQueryJobMetadataStorageServiceTest { - public static final String DS_NAME = "mys3"; - private static final String MOCK_SESSION_ID = "sessionId"; - private static final String MOCK_RESULT_INDEX = "resultIndex"; + @Mock(answer = Answers.RETURNS_DEEP_STUBS) + private Client client; + + @Mock(answer = Answers.RETURNS_DEEP_STUBS) + private ClusterService clusterService; + + @Mock(answer = Answers.RETURNS_DEEP_STUBS) + private SearchResponse searchResponse; + + @Mock private ActionFuture searchResponseActionFuture; + @Mock private ActionFuture createIndexResponseActionFuture; + @Mock private ActionFuture indexResponseActionFuture; + @Mock private IndexResponse indexResponse; + @Mock private SearchHit searchHit; + + @InjectMocks private OpensearchAsyncQueryJobMetadataStorageService opensearchJobMetadataStorageService; - @Before - public void setup() { - opensearchJobMetadataStorageService = - new OpensearchAsyncQueryJobMetadataStorageService( - new StateStore(client(), clusterService())); + @Test + public void testStoreJobMetadata() { + + Mockito.when(clusterService.state().routingTable().hasIndex(JOB_METADATA_INDEX)) + .thenReturn(Boolean.FALSE); + Mockito.when(client.admin().indices().create(ArgumentMatchers.any())) + .thenReturn(createIndexResponseActionFuture); + Mockito.when(createIndexResponseActionFuture.actionGet()) + .thenReturn(new CreateIndexResponse(true, true, JOB_METADATA_INDEX)); + Mockito.when(client.index(ArgumentMatchers.any())).thenReturn(indexResponseActionFuture); + Mockito.when(indexResponseActionFuture.actionGet()).thenReturn(indexResponse); + Mockito.when(indexResponse.getResult()).thenReturn(DocWriteResponse.Result.CREATED); + AsyncQueryJobMetadata asyncQueryJobMetadata = + new AsyncQueryJobMetadata(EMR_JOB_ID, EMRS_APPLICATION_ID, null); + + this.opensearchJobMetadataStorageService.storeJobMetadata(asyncQueryJobMetadata); + + Mockito.verify(client.admin().indices(), Mockito.times(1)).create(ArgumentMatchers.any()); + Mockito.verify(client, Mockito.times(1)).index(ArgumentMatchers.any()); + Mockito.verify(client.threadPool().getThreadContext(), Mockito.times(2)).stashContext(); } @Test - public void testStoreJobMetadata() { - AsyncQueryJobMetadata expected = - new AsyncQueryJobMetadata( - AsyncQueryId.newAsyncQueryId(DS_NAME), - EMR_JOB_ID, - EMRS_APPLICATION_ID, - MOCK_RESULT_INDEX); - - opensearchJobMetadataStorageService.storeJobMetadata(expected); - Optional actual = - opensearchJobMetadataStorageService.getJobMetadata(expected.getQueryId().getId()); - - assertTrue(actual.isPresent()); - assertEquals(expected, actual.get()); - assertEquals(expected, actual.get()); - assertNull(actual.get().getSessionId()); + public void testStoreJobMetadataWithOutCreatingIndex() { + Mockito.when(clusterService.state().routingTable().hasIndex(JOB_METADATA_INDEX)) + .thenReturn(Boolean.TRUE); + Mockito.when(client.index(ArgumentMatchers.any())).thenReturn(indexResponseActionFuture); + Mockito.when(indexResponseActionFuture.actionGet()).thenReturn(indexResponse); + Mockito.when(indexResponse.getResult()).thenReturn(DocWriteResponse.Result.CREATED); + AsyncQueryJobMetadata asyncQueryJobMetadata = + new AsyncQueryJobMetadata(EMR_JOB_ID, EMRS_APPLICATION_ID, null); + + this.opensearchJobMetadataStorageService.storeJobMetadata(asyncQueryJobMetadata); + + Mockito.verify(client.admin().indices(), Mockito.times(0)).create(ArgumentMatchers.any()); + Mockito.verify(client, Mockito.times(1)).index(ArgumentMatchers.any()); + Mockito.verify(client.threadPool().getThreadContext(), Mockito.times(1)).stashContext(); + } + + @Test + public void testStoreJobMetadataWithException() { + + Mockito.when(clusterService.state().routingTable().hasIndex(JOB_METADATA_INDEX)) + .thenReturn(Boolean.FALSE); + Mockito.when(client.admin().indices().create(ArgumentMatchers.any())) + .thenReturn(createIndexResponseActionFuture); + Mockito.when(createIndexResponseActionFuture.actionGet()) + .thenReturn(new CreateIndexResponse(true, true, JOB_METADATA_INDEX)); + Mockito.when(client.index(ArgumentMatchers.any())) + .thenThrow(new RuntimeException("error while indexing")); + + AsyncQueryJobMetadata asyncQueryJobMetadata = + new AsyncQueryJobMetadata(EMR_JOB_ID, EMRS_APPLICATION_ID, null); + RuntimeException runtimeException = + Assertions.assertThrows( + RuntimeException.class, + () -> this.opensearchJobMetadataStorageService.storeJobMetadata(asyncQueryJobMetadata)); + Assertions.assertEquals( + "java.lang.RuntimeException: error while indexing", runtimeException.getMessage()); + + Mockito.verify(client.admin().indices(), Mockito.times(1)).create(ArgumentMatchers.any()); + Mockito.verify(client, Mockito.times(1)).index(ArgumentMatchers.any()); + Mockito.verify(client.threadPool().getThreadContext(), Mockito.times(2)).stashContext(); + } + + @Test + public void testStoreJobMetadataWithIndexCreationFailed() { + + Mockito.when(clusterService.state().routingTable().hasIndex(JOB_METADATA_INDEX)) + .thenReturn(Boolean.FALSE); + Mockito.when(client.admin().indices().create(ArgumentMatchers.any())) + .thenReturn(createIndexResponseActionFuture); + Mockito.when(createIndexResponseActionFuture.actionGet()) + .thenReturn(new CreateIndexResponse(false, false, JOB_METADATA_INDEX)); + + AsyncQueryJobMetadata asyncQueryJobMetadata = + new AsyncQueryJobMetadata(EMR_JOB_ID, EMRS_APPLICATION_ID, null); + RuntimeException runtimeException = + Assertions.assertThrows( + RuntimeException.class, + () -> this.opensearchJobMetadataStorageService.storeJobMetadata(asyncQueryJobMetadata)); + Assertions.assertEquals( + "Internal server error while creating.ql-job-metadata index:: " + + "Index creation is not acknowledged.", + runtimeException.getMessage()); + + Mockito.verify(client.admin().indices(), Mockito.times(1)).create(ArgumentMatchers.any()); + Mockito.verify(client.threadPool().getThreadContext(), Mockito.times(1)).stashContext(); + } + + @Test + public void testStoreJobMetadataFailedWithNotFoundResponse() { + + Mockito.when(clusterService.state().routingTable().hasIndex(JOB_METADATA_INDEX)) + .thenReturn(Boolean.FALSE); + Mockito.when(client.admin().indices().create(ArgumentMatchers.any())) + .thenReturn(createIndexResponseActionFuture); + Mockito.when(createIndexResponseActionFuture.actionGet()) + .thenReturn(new CreateIndexResponse(true, true, JOB_METADATA_INDEX)); + Mockito.when(client.index(ArgumentMatchers.any())).thenReturn(indexResponseActionFuture); + Mockito.when(indexResponseActionFuture.actionGet()).thenReturn(indexResponse); + Mockito.when(indexResponse.getResult()).thenReturn(DocWriteResponse.Result.NOT_FOUND); + + AsyncQueryJobMetadata asyncQueryJobMetadata = + new AsyncQueryJobMetadata(EMR_JOB_ID, EMRS_APPLICATION_ID, null); + RuntimeException runtimeException = + Assertions.assertThrows( + RuntimeException.class, + () -> this.opensearchJobMetadataStorageService.storeJobMetadata(asyncQueryJobMetadata)); + Assertions.assertEquals( + "Saving job metadata information failed with result : not_found", + runtimeException.getMessage()); + + Mockito.verify(client.admin().indices(), Mockito.times(1)).create(ArgumentMatchers.any()); + Mockito.verify(client, Mockito.times(1)).index(ArgumentMatchers.any()); + Mockito.verify(client.threadPool().getThreadContext(), Mockito.times(2)).stashContext(); + } + + @Test + public void testGetJobMetadata() { + Mockito.when(clusterService.state().routingTable().hasIndex(JOB_METADATA_INDEX)) + .thenReturn(true); + Mockito.when(client.search(ArgumentMatchers.any())).thenReturn(searchResponseActionFuture); + Mockito.when(searchResponseActionFuture.actionGet()).thenReturn(searchResponse); + Mockito.when(searchResponse.status()).thenReturn(RestStatus.OK); + Mockito.when(searchResponse.getHits()) + .thenReturn( + new SearchHits( + new SearchHit[] {searchHit}, new TotalHits(21, TotalHits.Relation.EQUAL_TO), 1.0F)); + AsyncQueryJobMetadata asyncQueryJobMetadata = + new AsyncQueryJobMetadata(EMRS_APPLICATION_ID, EMR_JOB_ID, null); + Mockito.when(searchHit.getSourceAsString()).thenReturn(asyncQueryJobMetadata.toString()); + + Optional jobMetadataOptional = + opensearchJobMetadataStorageService.getJobMetadata(EMR_JOB_ID); + Assertions.assertTrue(jobMetadataOptional.isPresent()); + Assertions.assertEquals(EMR_JOB_ID, jobMetadataOptional.get().getJobId()); + Assertions.assertEquals(EMRS_APPLICATION_ID, jobMetadataOptional.get().getApplicationId()); + } + + @Test + public void testGetJobMetadataWith404SearchResponse() { + Mockito.when(clusterService.state().routingTable().hasIndex(JOB_METADATA_INDEX)) + .thenReturn(true); + Mockito.when(client.search(ArgumentMatchers.any())).thenReturn(searchResponseActionFuture); + Mockito.when(searchResponseActionFuture.actionGet()).thenReturn(searchResponse); + Mockito.when(searchResponse.status()).thenReturn(RestStatus.NOT_FOUND); + + RuntimeException runtimeException = + Assertions.assertThrows( + RuntimeException.class, + () -> opensearchJobMetadataStorageService.getJobMetadata(EMR_JOB_ID)); + Assertions.assertEquals( + "Fetching job metadata information failed with status : NOT_FOUND", + runtimeException.getMessage()); } @Test - public void testStoreJobMetadataWithResultExtraData() { - AsyncQueryJobMetadata expected = - new AsyncQueryJobMetadata( - AsyncQueryId.newAsyncQueryId(DS_NAME), - EMR_JOB_ID, - EMRS_APPLICATION_ID, - MOCK_RESULT_INDEX, - MOCK_SESSION_ID); - - opensearchJobMetadataStorageService.storeJobMetadata(expected); - Optional actual = - opensearchJobMetadataStorageService.getJobMetadata(expected.getQueryId().getId()); - - assertTrue(actual.isPresent()); - assertEquals(expected, actual.get()); - assertEquals("resultIndex", actual.get().getResultIndex()); - assertEquals(MOCK_SESSION_ID, actual.get().getSessionId()); + public void testGetJobMetadataWithParsingFailed() { + Mockito.when(clusterService.state().routingTable().hasIndex(JOB_METADATA_INDEX)) + .thenReturn(true); + Mockito.when(client.search(ArgumentMatchers.any())).thenReturn(searchResponseActionFuture); + Mockito.when(searchResponseActionFuture.actionGet()).thenReturn(searchResponse); + Mockito.when(searchResponse.status()).thenReturn(RestStatus.OK); + Mockito.when(searchResponse.getHits()) + .thenReturn( + new SearchHits( + new SearchHit[] {searchHit}, new TotalHits(21, TotalHits.Relation.EQUAL_TO), 1.0F)); + Mockito.when(searchHit.getSourceAsString()).thenReturn("..tesJOBs"); + + Assertions.assertThrows( + RuntimeException.class, + () -> opensearchJobMetadataStorageService.getJobMetadata(EMR_JOB_ID)); + } + + @Test + public void testGetJobMetadataWithNoIndex() { + Mockito.when(clusterService.state().routingTable().hasIndex(JOB_METADATA_INDEX)) + .thenReturn(Boolean.FALSE); + Mockito.when(client.admin().indices().create(ArgumentMatchers.any())) + .thenReturn(createIndexResponseActionFuture); + Mockito.when(createIndexResponseActionFuture.actionGet()) + .thenReturn(new CreateIndexResponse(true, true, JOB_METADATA_INDEX)); + Mockito.when(client.index(ArgumentMatchers.any())).thenReturn(indexResponseActionFuture); + + Optional jobMetadata = + opensearchJobMetadataStorageService.getJobMetadata(EMR_JOB_ID); + + Assertions.assertFalse(jobMetadata.isPresent()); } } diff --git a/spark/src/test/java/org/opensearch/sql/spark/client/EmrServerlessClientImplTest.java b/spark/src/test/java/org/opensearch/sql/spark/client/EmrServerlessClientImplTest.java index 8129c3b0e0..f874b351a9 100644 --- a/spark/src/test/java/org/opensearch/sql/spark/client/EmrServerlessClientImplTest.java +++ b/spark/src/test/java/org/opensearch/sql/spark/client/EmrServerlessClientImplTest.java @@ -4,19 +4,13 @@ package org.opensearch.sql.spark.client; -import static java.util.Collections.emptyList; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import static org.opensearch.sql.spark.constants.TestConstants.DEFAULT_RESULT_INDEX; import static org.opensearch.sql.spark.constants.TestConstants.EMRS_APPLICATION_ID; import static org.opensearch.sql.spark.constants.TestConstants.EMRS_EXECUTION_ROLE; import static org.opensearch.sql.spark.constants.TestConstants.EMRS_JOB_NAME; import static org.opensearch.sql.spark.constants.TestConstants.EMR_JOB_ID; -import static org.opensearch.sql.spark.constants.TestConstants.ENTRY_POINT_START_JAR; import static org.opensearch.sql.spark.constants.TestConstants.QUERY; import static org.opensearch.sql.spark.constants.TestConstants.SPARK_SUBMIT_PARAMETERS; @@ -24,41 +18,19 @@ import com.amazonaws.services.emrserverless.model.CancelJobRunResult; import com.amazonaws.services.emrserverless.model.GetJobRunResult; import com.amazonaws.services.emrserverless.model.JobRun; -import com.amazonaws.services.emrserverless.model.StartJobRunRequest; import com.amazonaws.services.emrserverless.model.StartJobRunResult; import com.amazonaws.services.emrserverless.model.ValidationException; import java.util.HashMap; -import java.util.List; import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; -import org.mockito.ArgumentCaptor; -import org.mockito.Captor; import org.mockito.Mock; import org.mockito.junit.jupiter.MockitoExtension; -import org.opensearch.sql.common.setting.Settings; -import org.opensearch.sql.legacy.esdomain.LocalClusterState; -import org.opensearch.sql.legacy.metrics.Metrics; -import org.opensearch.sql.opensearch.setting.OpenSearchSettings; @ExtendWith(MockitoExtension.class) public class EmrServerlessClientImplTest { @Mock private AWSEMRServerless emrServerless; - @Mock private OpenSearchSettings settings; - - @Captor private ArgumentCaptor startJobRunRequestArgumentCaptor; - - @BeforeEach - public void setUp() { - doReturn(emptyList()).when(settings).getSettings(); - when(settings.getSettingValue(Settings.Key.METRICS_ROLLING_INTERVAL)).thenReturn(3600L); - when(settings.getSettingValue(Settings.Key.METRICS_ROLLING_WINDOW)).thenReturn(600L); - LocalClusterState.state().setPluginSettings(settings); - Metrics.getInstance().registerDefaultMetrics(); - } - @Test void testStartJobRun() { StartJobRunResult response = new StartJobRunResult(); @@ -74,35 +46,7 @@ void testStartJobRun() { SPARK_SUBMIT_PARAMETERS, new HashMap<>(), false, - DEFAULT_RESULT_INDEX)); - verify(emrServerless, times(1)).startJobRun(startJobRunRequestArgumentCaptor.capture()); - StartJobRunRequest startJobRunRequest = startJobRunRequestArgumentCaptor.getValue(); - Assertions.assertEquals(EMRS_APPLICATION_ID, startJobRunRequest.getApplicationId()); - Assertions.assertEquals(EMRS_EXECUTION_ROLE, startJobRunRequest.getExecutionRoleArn()); - Assertions.assertEquals( - ENTRY_POINT_START_JAR, startJobRunRequest.getJobDriver().getSparkSubmit().getEntryPoint()); - Assertions.assertEquals( - List.of(QUERY, DEFAULT_RESULT_INDEX), - startJobRunRequest.getJobDriver().getSparkSubmit().getEntryPointArguments()); - } - - @Test - void testStartJobRunWithErrorMetric() { - doThrow(new RuntimeException()).when(emrServerless).startJobRun(any()); - EmrServerlessClientImpl emrServerlessClient = new EmrServerlessClientImpl(emrServerless); - Assertions.assertThrows( - RuntimeException.class, - () -> - emrServerlessClient.startJobRun( - new StartJobRequest( - QUERY, - EMRS_JOB_NAME, - EMRS_APPLICATION_ID, - EMRS_EXECUTION_ROLE, - SPARK_SUBMIT_PARAMETERS, - new HashMap<>(), - false, - null))); + null)); } @Test @@ -134,15 +78,6 @@ void testGetJobRunState() { emrServerlessClient.getJobRunResult(EMRS_APPLICATION_ID, "123"); } - @Test - void testGetJobRunStateWithErrorMetric() { - doThrow(new RuntimeException()).when(emrServerless).getJobRun(any()); - EmrServerlessClientImpl emrServerlessClient = new EmrServerlessClientImpl(emrServerless); - Assertions.assertThrows( - RuntimeException.class, - () -> emrServerlessClient.getJobRunResult(EMRS_APPLICATION_ID, "123")); - } - @Test void testCancelJobRun() { when(emrServerless.cancelJobRun(any())) @@ -153,14 +88,6 @@ void testCancelJobRun() { Assertions.assertEquals(EMR_JOB_ID, cancelJobRunResult.getJobRunId()); } - @Test - void testCancelJobRunWithErrorMetric() { - doThrow(new RuntimeException()).when(emrServerless).cancelJobRun(any()); - EmrServerlessClientImpl emrServerlessClient = new EmrServerlessClientImpl(emrServerless); - Assertions.assertThrows( - RuntimeException.class, () -> emrServerlessClient.cancelJobRun(EMRS_APPLICATION_ID, "123")); - } - @Test void testCancelJobRunWithValidationException() { doThrow(new ValidationException("Error")).when(emrServerless).cancelJobRun(any()); diff --git a/spark/src/test/java/org/opensearch/sql/spark/constants/TestConstants.java b/spark/src/test/java/org/opensearch/sql/spark/constants/TestConstants.java index cc13061323..abae0377a2 100644 --- a/spark/src/test/java/org/opensearch/sql/spark/constants/TestConstants.java +++ b/spark/src/test/java/org/opensearch/sql/spark/constants/TestConstants.java @@ -16,9 +16,4 @@ public class TestConstants { public static final String EMRS_JOB_NAME = "job_name"; public static final String SPARK_SUBMIT_PARAMETERS = "--conf org.flint.sql.SQLJob"; public static final String TEST_CLUSTER_NAME = "TEST_CLUSTER"; - public static final String MOCK_SESSION_ID = "s-0123456"; - public static final String MOCK_STATEMENT_ID = "st-0123456"; - public static final String ENTRY_POINT_START_JAR = - "file:///home/hadoop/.ivy2/jars/org.opensearch_opensearch-spark-sql-application_2.12-0.1.0-SNAPSHOT.jar"; - public static final String DEFAULT_RESULT_INDEX = "query_execution_result_ds1"; } diff --git a/spark/src/test/java/org/opensearch/sql/spark/dispatcher/IndexDMLHandlerTest.java b/spark/src/test/java/org/opensearch/sql/spark/dispatcher/IndexDMLHandlerTest.java deleted file mode 100644 index 8419d50ae1..0000000000 --- a/spark/src/test/java/org/opensearch/sql/spark/dispatcher/IndexDMLHandlerTest.java +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.dispatcher; - -import static org.junit.jupiter.api.Assertions.*; - -import org.junit.jupiter.api.Test; - -class IndexDMLHandlerTest { - @Test - public void getResponseFromExecutor() { - assertThrows( - IllegalStateException.class, - () -> - new IndexDMLHandler(null, null, null, null, null, null, null) - .getResponseFromExecutor(null)); - } -} diff --git a/spark/src/test/java/org/opensearch/sql/spark/dispatcher/SparkQueryDispatcherTest.java b/spark/src/test/java/org/opensearch/sql/spark/dispatcher/SparkQueryDispatcherTest.java index 2a76eabe6a..ab9761da36 100644 --- a/spark/src/test/java/org/opensearch/sql/spark/dispatcher/SparkQueryDispatcherTest.java +++ b/spark/src/test/java/org/opensearch/sql/spark/dispatcher/SparkQueryDispatcherTest.java @@ -5,25 +5,19 @@ package org.opensearch.sql.spark.dispatcher; -import static org.mockito.Answers.RETURNS_DEEP_STUBS; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.argThat; -import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.never; +import static org.mockito.Mockito.mock; import static org.mockito.Mockito.reset; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyNoInteractions; import static org.mockito.Mockito.when; -import static org.opensearch.sql.spark.asyncquery.OpensearchAsyncQueryAsyncQueryJobMetadataStorageServiceTest.DS_NAME; import static org.opensearch.sql.spark.constants.TestConstants.EMRS_APPLICATION_ID; import static org.opensearch.sql.spark.constants.TestConstants.EMRS_EXECUTION_ROLE; import static org.opensearch.sql.spark.constants.TestConstants.EMR_JOB_ID; -import static org.opensearch.sql.spark.constants.TestConstants.MOCK_SESSION_ID; -import static org.opensearch.sql.spark.constants.TestConstants.MOCK_STATEMENT_ID; import static org.opensearch.sql.spark.constants.TestConstants.TEST_CLUSTER_NAME; import static org.opensearch.sql.spark.data.constants.SparkConstants.DATA_FIELD; import static org.opensearch.sql.spark.data.constants.SparkConstants.ERROR_FIELD; @@ -31,10 +25,6 @@ import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_INDEX_STORE_AUTH_USERNAME; import static org.opensearch.sql.spark.data.constants.SparkConstants.FLINT_INDEX_STORE_AWSREGION_KEY; import static org.opensearch.sql.spark.data.constants.SparkConstants.STATUS_FIELD; -import static org.opensearch.sql.spark.dispatcher.SparkQueryDispatcher.CLUSTER_NAME_TAG_KEY; -import static org.opensearch.sql.spark.dispatcher.SparkQueryDispatcher.DATASOURCE_TAG_KEY; -import static org.opensearch.sql.spark.dispatcher.SparkQueryDispatcher.INDEX_TAG_KEY; -import static org.opensearch.sql.spark.dispatcher.SparkQueryDispatcher.JOB_TYPE_TAG_KEY; import com.amazonaws.services.emrserverless.model.CancelJobRunResult; import com.amazonaws.services.emrserverless.model.GetJobRunResult; @@ -44,37 +34,31 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Map; -import java.util.Optional; +import java.util.concurrent.ExecutionException; import org.json.JSONObject; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; -import org.mockito.ArgumentCaptor; -import org.mockito.Captor; +import org.mockito.Answers; import org.mockito.Mock; import org.mockito.junit.jupiter.MockitoExtension; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.sql.datasource.DataSourceService; import org.opensearch.sql.datasource.model.DataSourceMetadata; import org.opensearch.sql.datasource.model.DataSourceType; import org.opensearch.sql.datasources.auth.DataSourceUserAuthorizationHelperImpl; -import org.opensearch.sql.spark.asyncquery.model.AsyncQueryId; import org.opensearch.sql.spark.asyncquery.model.AsyncQueryJobMetadata; import org.opensearch.sql.spark.client.EMRServerlessClient; import org.opensearch.sql.spark.client.StartJobRequest; import org.opensearch.sql.spark.dispatcher.model.DispatchQueryRequest; import org.opensearch.sql.spark.dispatcher.model.DispatchQueryResponse; -import org.opensearch.sql.spark.dispatcher.model.JobType; -import org.opensearch.sql.spark.execution.session.Session; -import org.opensearch.sql.spark.execution.session.SessionId; -import org.opensearch.sql.spark.execution.session.SessionManager; -import org.opensearch.sql.spark.execution.statement.Statement; -import org.opensearch.sql.spark.execution.statement.StatementId; -import org.opensearch.sql.spark.execution.statement.StatementState; -import org.opensearch.sql.spark.execution.statestore.StateStore; +import org.opensearch.sql.spark.dispatcher.model.FullyQualifiedTableName; +import org.opensearch.sql.spark.dispatcher.model.IndexDetails; +import org.opensearch.sql.spark.flint.FlintIndexMetadata; import org.opensearch.sql.spark.flint.FlintIndexMetadataReader; -import org.opensearch.sql.spark.leasemanager.LeaseManager; +import org.opensearch.sql.spark.flint.FlintIndexType; import org.opensearch.sql.spark.response.JobExecutionResponseReader; import org.opensearch.sql.spark.rest.model.LangType; @@ -87,27 +71,13 @@ public class SparkQueryDispatcherTest { @Mock private DataSourceUserAuthorizationHelperImpl dataSourceUserAuthorizationHelper; @Mock private FlintIndexMetadataReader flintIndexMetadataReader; - @Mock(answer = RETURNS_DEEP_STUBS) + @Mock(answer = Answers.RETURNS_DEEP_STUBS) private Client openSearchClient; - @Mock private SessionManager sessionManager; - - @Mock private LeaseManager leaseManager; - - @Mock(answer = RETURNS_DEEP_STUBS) - private Session session; - - @Mock(answer = RETURNS_DEEP_STUBS) - private Statement statement; - - @Mock private StateStore stateStore; + @Mock private FlintIndexMetadata flintIndexMetadata; private SparkQueryDispatcher sparkQueryDispatcher; - private final AsyncQueryId QUERY_ID = AsyncQueryId.newAsyncQueryId(DS_NAME); - - @Captor ArgumentCaptor startJobRequestArgumentCaptor; - @BeforeEach void setUp() { sparkQueryDispatcher = @@ -117,34 +87,28 @@ void setUp() { dataSourceUserAuthorizationHelper, jobExecutionResponseReader, flintIndexMetadataReader, - openSearchClient, - sessionManager, - leaseManager, - stateStore); + openSearchClient); } @Test void testDispatchSelectQuery() { HashMap tags = new HashMap<>(); - tags.put(DATASOURCE_TAG_KEY, "my_glue"); - tags.put(CLUSTER_NAME_TAG_KEY, TEST_CLUSTER_NAME); - tags.put(JOB_TYPE_TAG_KEY, JobType.BATCH.getText()); + tags.put("datasource", "my_glue"); + tags.put("cluster", TEST_CLUSTER_NAME); String query = "select * from my_glue.default.http_logs"; - String sparkSubmitParameters = - constructExpectedSparkSubmitParameterString( - "sigv4", - new HashMap<>() { - { - put(FLINT_INDEX_STORE_AWSREGION_KEY, "eu-west-1"); - } - }); when(emrServerlessClient.startJobRun( new StartJobRequest( query, "TEST_CLUSTER:non-index-query", EMRS_APPLICATION_ID, EMRS_EXECUTION_ROLE, - sparkSubmitParameters, + constructExpectedSparkSubmitParameterString( + "sigv4", + new HashMap<>() { + { + put(FLINT_INDEX_STORE_AWSREGION_KEY, "eu-west-1"); + } + }), tags, false, any()))) @@ -161,45 +125,48 @@ void testDispatchSelectQuery() { LangType.SQL, EMRS_EXECUTION_ROLE, TEST_CLUSTER_NAME)); - verify(emrServerlessClient, times(1)).startJobRun(startJobRequestArgumentCaptor.capture()); - StartJobRequest expected = - new StartJobRequest( - query, - "TEST_CLUSTER:non-index-query", - EMRS_APPLICATION_ID, - EMRS_EXECUTION_ROLE, - sparkSubmitParameters, - tags, - false, - null); - Assertions.assertEquals(expected, startJobRequestArgumentCaptor.getValue()); + verify(emrServerlessClient, times(1)) + .startJobRun( + new StartJobRequest( + query, + "TEST_CLUSTER:non-index-query", + EMRS_APPLICATION_ID, + EMRS_EXECUTION_ROLE, + constructExpectedSparkSubmitParameterString( + "sigv4", + new HashMap<>() { + { + put(FLINT_INDEX_STORE_AWSREGION_KEY, "eu-west-1"); + } + }), + tags, + false, + any())); Assertions.assertEquals(EMR_JOB_ID, dispatchQueryResponse.getJobId()); + Assertions.assertFalse(dispatchQueryResponse.isDropIndexQuery()); verifyNoInteractions(flintIndexMetadataReader); } @Test void testDispatchSelectQueryWithBasicAuthIndexStoreDatasource() { HashMap tags = new HashMap<>(); - tags.put(DATASOURCE_TAG_KEY, "my_glue"); - tags.put(CLUSTER_NAME_TAG_KEY, TEST_CLUSTER_NAME); - tags.put(JOB_TYPE_TAG_KEY, JobType.BATCH.getText()); + tags.put("datasource", "my_glue"); + tags.put("cluster", TEST_CLUSTER_NAME); String query = "select * from my_glue.default.http_logs"; - String sparkSubmitParameters = - constructExpectedSparkSubmitParameterString( - "basic", - new HashMap<>() { - { - put(FLINT_INDEX_STORE_AUTH_USERNAME, "username"); - put(FLINT_INDEX_STORE_AUTH_PASSWORD, "password"); - } - }); when(emrServerlessClient.startJobRun( new StartJobRequest( query, "TEST_CLUSTER:non-index-query", EMRS_APPLICATION_ID, EMRS_EXECUTION_ROLE, - sparkSubmitParameters, + constructExpectedSparkSubmitParameterString( + "basicauth", + new HashMap<>() { + { + put(FLINT_INDEX_STORE_AUTH_USERNAME, "username"); + put(FLINT_INDEX_STORE_AUTH_PASSWORD, "password"); + } + }), tags, false, any()))) @@ -216,43 +183,47 @@ void testDispatchSelectQueryWithBasicAuthIndexStoreDatasource() { LangType.SQL, EMRS_EXECUTION_ROLE, TEST_CLUSTER_NAME)); - verify(emrServerlessClient, times(1)).startJobRun(startJobRequestArgumentCaptor.capture()); - StartJobRequest expected = - new StartJobRequest( - query, - "TEST_CLUSTER:non-index-query", - EMRS_APPLICATION_ID, - EMRS_EXECUTION_ROLE, - sparkSubmitParameters, - tags, - false, - null); - Assertions.assertEquals(expected, startJobRequestArgumentCaptor.getValue()); + verify(emrServerlessClient, times(1)) + .startJobRun( + new StartJobRequest( + query, + "TEST_CLUSTER:non-index-query", + EMRS_APPLICATION_ID, + EMRS_EXECUTION_ROLE, + constructExpectedSparkSubmitParameterString( + "basicauth", + new HashMap<>() { + { + put(FLINT_INDEX_STORE_AUTH_USERNAME, "username"); + put(FLINT_INDEX_STORE_AUTH_PASSWORD, "password"); + } + }), + tags, + false, + any())); Assertions.assertEquals(EMR_JOB_ID, dispatchQueryResponse.getJobId()); + Assertions.assertFalse(dispatchQueryResponse.isDropIndexQuery()); verifyNoInteractions(flintIndexMetadataReader); } @Test void testDispatchSelectQueryWithNoAuthIndexStoreDatasource() { HashMap tags = new HashMap<>(); - tags.put(DATASOURCE_TAG_KEY, "my_glue"); - tags.put(CLUSTER_NAME_TAG_KEY, TEST_CLUSTER_NAME); - tags.put(JOB_TYPE_TAG_KEY, JobType.BATCH.getText()); + tags.put("datasource", "my_glue"); + tags.put("cluster", TEST_CLUSTER_NAME); String query = "select * from my_glue.default.http_logs"; - String sparkSubmitParameters = - constructExpectedSparkSubmitParameterString( - "noauth", - new HashMap<>() { - { - } - }); when(emrServerlessClient.startJobRun( new StartJobRequest( query, "TEST_CLUSTER:non-index-query", EMRS_APPLICATION_ID, EMRS_EXECUTION_ROLE, - sparkSubmitParameters, + constructExpectedSparkSubmitParameterString( + "noauth", + new HashMap<>() { + { + } + }), tags, false, any()))) @@ -269,109 +240,52 @@ void testDispatchSelectQueryWithNoAuthIndexStoreDatasource() { LangType.SQL, EMRS_EXECUTION_ROLE, TEST_CLUSTER_NAME)); - verify(emrServerlessClient, times(1)).startJobRun(startJobRequestArgumentCaptor.capture()); - StartJobRequest expected = - new StartJobRequest( - query, - "TEST_CLUSTER:non-index-query", - EMRS_APPLICATION_ID, - EMRS_EXECUTION_ROLE, - sparkSubmitParameters, - tags, - false, - null); - Assertions.assertEquals(expected, startJobRequestArgumentCaptor.getValue()); + verify(emrServerlessClient, times(1)) + .startJobRun( + new StartJobRequest( + query, + "TEST_CLUSTER:non-index-query", + EMRS_APPLICATION_ID, + EMRS_EXECUTION_ROLE, + constructExpectedSparkSubmitParameterString( + "noauth", + new HashMap<>() { + { + } + }), + tags, + false, + any())); Assertions.assertEquals(EMR_JOB_ID, dispatchQueryResponse.getJobId()); + Assertions.assertFalse(dispatchQueryResponse.isDropIndexQuery()); verifyNoInteractions(flintIndexMetadataReader); } - @Test - void testDispatchSelectQueryCreateNewSession() { - String query = "select * from my_glue.default.http_logs"; - DispatchQueryRequest queryRequest = dispatchQueryRequestWithSessionId(query, null); - - doReturn(true).when(sessionManager).isEnabled(); - doReturn(session).when(sessionManager).createSession(any()); - doReturn(new SessionId(MOCK_SESSION_ID)).when(session).getSessionId(); - doReturn(new StatementId(MOCK_STATEMENT_ID)).when(session).submit(any()); - when(session.getSessionModel().getJobId()).thenReturn(EMR_JOB_ID); - DataSourceMetadata dataSourceMetadata = constructMyGlueDataSourceMetadata(); - when(dataSourceService.getRawDataSourceMetadata("my_glue")).thenReturn(dataSourceMetadata); - doNothing().when(dataSourceUserAuthorizationHelper).authorizeDataSource(dataSourceMetadata); - DispatchQueryResponse dispatchQueryResponse = sparkQueryDispatcher.dispatch(queryRequest); - - verifyNoInteractions(emrServerlessClient); - verify(sessionManager, never()).getSession(any()); - Assertions.assertEquals(EMR_JOB_ID, dispatchQueryResponse.getJobId()); - Assertions.assertEquals(MOCK_SESSION_ID, dispatchQueryResponse.getSessionId()); - } - - @Test - void testDispatchSelectQueryReuseSession() { - String query = "select * from my_glue.default.http_logs"; - DispatchQueryRequest queryRequest = dispatchQueryRequestWithSessionId(query, MOCK_SESSION_ID); - - doReturn(true).when(sessionManager).isEnabled(); - doReturn(Optional.of(session)) - .when(sessionManager) - .getSession(eq(new SessionId(MOCK_SESSION_ID))); - doReturn(new SessionId(MOCK_SESSION_ID)).when(session).getSessionId(); - doReturn(new StatementId(MOCK_STATEMENT_ID)).when(session).submit(any()); - when(session.getSessionModel().getJobId()).thenReturn(EMR_JOB_ID); - when(session.isReady()).thenReturn(true); - DataSourceMetadata dataSourceMetadata = constructMyGlueDataSourceMetadata(); - when(dataSourceService.getRawDataSourceMetadata("my_glue")).thenReturn(dataSourceMetadata); - doNothing().when(dataSourceUserAuthorizationHelper).authorizeDataSource(dataSourceMetadata); - DispatchQueryResponse dispatchQueryResponse = sparkQueryDispatcher.dispatch(queryRequest); - - verifyNoInteractions(emrServerlessClient); - verify(sessionManager, never()).createSession(any()); - Assertions.assertEquals(EMR_JOB_ID, dispatchQueryResponse.getJobId()); - Assertions.assertEquals(MOCK_SESSION_ID, dispatchQueryResponse.getSessionId()); - } - - @Test - void testDispatchSelectQueryFailedCreateSession() { - String query = "select * from my_glue.default.http_logs"; - DispatchQueryRequest queryRequest = dispatchQueryRequestWithSessionId(query, null); - - doReturn(true).when(sessionManager).isEnabled(); - doThrow(RuntimeException.class).when(sessionManager).createSession(any()); - DataSourceMetadata dataSourceMetadata = constructMyGlueDataSourceMetadata(); - when(dataSourceService.getRawDataSourceMetadata("my_glue")).thenReturn(dataSourceMetadata); - doNothing().when(dataSourceUserAuthorizationHelper).authorizeDataSource(dataSourceMetadata); - Assertions.assertThrows( - RuntimeException.class, () -> sparkQueryDispatcher.dispatch(queryRequest)); - - verifyNoInteractions(emrServerlessClient); - } - @Test void testDispatchIndexQuery() { HashMap tags = new HashMap<>(); - tags.put(DATASOURCE_TAG_KEY, "my_glue"); - tags.put(INDEX_TAG_KEY, "flint_my_glue_default_http_logs_elb_and_requesturi_index"); - tags.put(CLUSTER_NAME_TAG_KEY, TEST_CLUSTER_NAME); - tags.put(JOB_TYPE_TAG_KEY, JobType.STREAMING.getText()); + tags.put("datasource", "my_glue"); + tags.put("table", "http_logs"); + tags.put("index", "elb_and_requestUri"); + tags.put("cluster", TEST_CLUSTER_NAME); + tags.put("schema", "default"); String query = "CREATE INDEX elb_and_requestUri ON my_glue.default.http_logs(l_orderkey, l_quantity) WITH" + " (auto_refresh = true)"; - String sparkSubmitParameters = - withStructuredStreaming( - constructExpectedSparkSubmitParameterString( - "sigv4", - new HashMap<>() { - { - put(FLINT_INDEX_STORE_AWSREGION_KEY, "eu-west-1"); - } - })); when(emrServerlessClient.startJobRun( new StartJobRequest( query, "TEST_CLUSTER:index-query", EMRS_APPLICATION_ID, EMRS_EXECUTION_ROLE, - sparkSubmitParameters, + withStructuredStreaming( + constructExpectedSparkSubmitParameterString( + "sigv4", + new HashMap<>() { + { + put(FLINT_INDEX_STORE_AWSREGION_KEY, "eu-west-1"); + } + })), tags, true, any()))) @@ -388,44 +302,49 @@ void testDispatchIndexQuery() { LangType.SQL, EMRS_EXECUTION_ROLE, TEST_CLUSTER_NAME)); - verify(emrServerlessClient, times(1)).startJobRun(startJobRequestArgumentCaptor.capture()); - StartJobRequest expected = - new StartJobRequest( - query, - "TEST_CLUSTER:index-query", - EMRS_APPLICATION_ID, - EMRS_EXECUTION_ROLE, - sparkSubmitParameters, - tags, - true, - null); - Assertions.assertEquals(expected, startJobRequestArgumentCaptor.getValue()); + verify(emrServerlessClient, times(1)) + .startJobRun( + new StartJobRequest( + query, + "TEST_CLUSTER:index-query", + EMRS_APPLICATION_ID, + EMRS_EXECUTION_ROLE, + withStructuredStreaming( + constructExpectedSparkSubmitParameterString( + "sigv4", + new HashMap<>() { + { + put(FLINT_INDEX_STORE_AWSREGION_KEY, "eu-west-1"); + } + })), + tags, + true, + any())); Assertions.assertEquals(EMR_JOB_ID, dispatchQueryResponse.getJobId()); + Assertions.assertFalse(dispatchQueryResponse.isDropIndexQuery()); verifyNoInteractions(flintIndexMetadataReader); } @Test void testDispatchWithPPLQuery() { HashMap tags = new HashMap<>(); - tags.put(DATASOURCE_TAG_KEY, "my_glue"); - tags.put(CLUSTER_NAME_TAG_KEY, TEST_CLUSTER_NAME); - tags.put(JOB_TYPE_TAG_KEY, JobType.BATCH.getText()); + tags.put("datasource", "my_glue"); + tags.put("cluster", TEST_CLUSTER_NAME); + String query = "source = my_glue.default.http_logs"; - String sparkSubmitParameters = - constructExpectedSparkSubmitParameterString( - "sigv4", - new HashMap<>() { - { - put(FLINT_INDEX_STORE_AWSREGION_KEY, "eu-west-1"); - } - }); when(emrServerlessClient.startJobRun( new StartJobRequest( query, "TEST_CLUSTER:non-index-query", EMRS_APPLICATION_ID, EMRS_EXECUTION_ROLE, - sparkSubmitParameters, + constructExpectedSparkSubmitParameterString( + "sigv4", + new HashMap<>() { + { + put(FLINT_INDEX_STORE_AWSREGION_KEY, "eu-west-1"); + } + }), tags, false, any()))) @@ -442,162 +361,50 @@ void testDispatchWithPPLQuery() { LangType.PPL, EMRS_EXECUTION_ROLE, TEST_CLUSTER_NAME)); - verify(emrServerlessClient, times(1)).startJobRun(startJobRequestArgumentCaptor.capture()); - StartJobRequest expected = - new StartJobRequest( - query, - "TEST_CLUSTER:non-index-query", - EMRS_APPLICATION_ID, - EMRS_EXECUTION_ROLE, - sparkSubmitParameters, - tags, - false, - null); - Assertions.assertEquals(expected, startJobRequestArgumentCaptor.getValue()); - Assertions.assertEquals(EMR_JOB_ID, dispatchQueryResponse.getJobId()); - verifyNoInteractions(flintIndexMetadataReader); - } - - @Test - void testDispatchQueryWithoutATableAndDataSourceName() { - HashMap tags = new HashMap<>(); - tags.put(DATASOURCE_TAG_KEY, "my_glue"); - tags.put(CLUSTER_NAME_TAG_KEY, TEST_CLUSTER_NAME); - tags.put(JOB_TYPE_TAG_KEY, JobType.BATCH.getText()); - String query = "show tables"; - String sparkSubmitParameters = - constructExpectedSparkSubmitParameterString( - "sigv4", - new HashMap<>() { - { - put(FLINT_INDEX_STORE_AWSREGION_KEY, "eu-west-1"); - } - }); - when(emrServerlessClient.startJobRun( + verify(emrServerlessClient, times(1)) + .startJobRun( new StartJobRequest( query, "TEST_CLUSTER:non-index-query", EMRS_APPLICATION_ID, EMRS_EXECUTION_ROLE, - sparkSubmitParameters, + constructExpectedSparkSubmitParameterString( + "sigv4", + new HashMap<>() { + { + put(FLINT_INDEX_STORE_AWSREGION_KEY, "eu-west-1"); + } + }), tags, false, - any()))) - .thenReturn(EMR_JOB_ID); - DataSourceMetadata dataSourceMetadata = constructMyGlueDataSourceMetadata(); - when(dataSourceService.getRawDataSourceMetadata("my_glue")).thenReturn(dataSourceMetadata); - doNothing().when(dataSourceUserAuthorizationHelper).authorizeDataSource(dataSourceMetadata); - DispatchQueryResponse dispatchQueryResponse = - sparkQueryDispatcher.dispatch( - new DispatchQueryRequest( - EMRS_APPLICATION_ID, - query, - "my_glue", - LangType.SQL, - EMRS_EXECUTION_ROLE, - TEST_CLUSTER_NAME)); - verify(emrServerlessClient, times(1)).startJobRun(startJobRequestArgumentCaptor.capture()); - StartJobRequest expected = - new StartJobRequest( - query, - "TEST_CLUSTER:non-index-query", - EMRS_APPLICATION_ID, - EMRS_EXECUTION_ROLE, - sparkSubmitParameters, - tags, - false, - null); - Assertions.assertEquals(expected, startJobRequestArgumentCaptor.getValue()); + any())); Assertions.assertEquals(EMR_JOB_ID, dispatchQueryResponse.getJobId()); + Assertions.assertFalse(dispatchQueryResponse.isDropIndexQuery()); verifyNoInteractions(flintIndexMetadataReader); } @Test - void testDispatchIndexQueryWithoutADatasourceName() { + void testDispatchQueryWithoutATableAndDataSourceName() { HashMap tags = new HashMap<>(); - tags.put(DATASOURCE_TAG_KEY, "my_glue"); - tags.put(INDEX_TAG_KEY, "flint_my_glue_default_http_logs_elb_and_requesturi_index"); - tags.put(CLUSTER_NAME_TAG_KEY, TEST_CLUSTER_NAME); - tags.put(JOB_TYPE_TAG_KEY, JobType.STREAMING.getText()); - String query = - "CREATE INDEX elb_and_requestUri ON default.http_logs(l_orderkey, l_quantity) WITH" - + " (auto_refresh = true)"; - String sparkSubmitParameters = - withStructuredStreaming( - constructExpectedSparkSubmitParameterString( - "sigv4", - new HashMap<>() { - { - put(FLINT_INDEX_STORE_AWSREGION_KEY, "eu-west-1"); - } - })); - when(emrServerlessClient.startJobRun( - new StartJobRequest( - query, - "TEST_CLUSTER:index-query", - EMRS_APPLICATION_ID, - EMRS_EXECUTION_ROLE, - sparkSubmitParameters, - tags, - true, - any()))) - .thenReturn(EMR_JOB_ID); - DataSourceMetadata dataSourceMetadata = constructMyGlueDataSourceMetadata(); - when(dataSourceService.getRawDataSourceMetadata("my_glue")).thenReturn(dataSourceMetadata); - doNothing().when(dataSourceUserAuthorizationHelper).authorizeDataSource(dataSourceMetadata); - DispatchQueryResponse dispatchQueryResponse = - sparkQueryDispatcher.dispatch( - new DispatchQueryRequest( - EMRS_APPLICATION_ID, - query, - "my_glue", - LangType.SQL, - EMRS_EXECUTION_ROLE, - TEST_CLUSTER_NAME)); - verify(emrServerlessClient, times(1)).startJobRun(startJobRequestArgumentCaptor.capture()); - StartJobRequest expected = - new StartJobRequest( - query, - "TEST_CLUSTER:index-query", - EMRS_APPLICATION_ID, - EMRS_EXECUTION_ROLE, - sparkSubmitParameters, - tags, - true, - null); - Assertions.assertEquals(expected, startJobRequestArgumentCaptor.getValue()); - Assertions.assertEquals(EMR_JOB_ID, dispatchQueryResponse.getJobId()); - verifyNoInteractions(flintIndexMetadataReader); - } + tags.put("datasource", "my_glue"); + tags.put("cluster", TEST_CLUSTER_NAME); - @Test - void testDispatchMaterializedViewQuery() { - HashMap tags = new HashMap<>(); - tags.put(DATASOURCE_TAG_KEY, "my_glue"); - tags.put(INDEX_TAG_KEY, "flint_mv_1"); - tags.put(CLUSTER_NAME_TAG_KEY, TEST_CLUSTER_NAME); - tags.put(JOB_TYPE_TAG_KEY, JobType.STREAMING.getText()); - String query = - "CREATE MATERIALIZED VIEW mv_1 AS query=select * from my_glue.default.logs WITH" - + " (auto_refresh = true)"; - String sparkSubmitParameters = - withStructuredStreaming( - constructExpectedSparkSubmitParameterString( - "sigv4", - new HashMap<>() { - { - put(FLINT_INDEX_STORE_AWSREGION_KEY, "eu-west-1"); - } - })); + String query = "show tables"; when(emrServerlessClient.startJobRun( new StartJobRequest( query, - "TEST_CLUSTER:index-query", + "TEST_CLUSTER:non-index-query", EMRS_APPLICATION_ID, EMRS_EXECUTION_ROLE, - sparkSubmitParameters, + constructExpectedSparkSubmitParameterString( + "sigv4", + new HashMap<>() { + { + put(FLINT_INDEX_STORE_AWSREGION_KEY, "eu-west-1"); + } + }), tags, - true, + false, any()))) .thenReturn(EMR_JOB_ID); DataSourceMetadata dataSourceMetadata = constructMyGlueDataSourceMetadata(); @@ -612,100 +419,56 @@ void testDispatchMaterializedViewQuery() { LangType.SQL, EMRS_EXECUTION_ROLE, TEST_CLUSTER_NAME)); - verify(emrServerlessClient, times(1)).startJobRun(startJobRequestArgumentCaptor.capture()); - StartJobRequest expected = - new StartJobRequest( - query, - "TEST_CLUSTER:index-query", - EMRS_APPLICATION_ID, - EMRS_EXECUTION_ROLE, - sparkSubmitParameters, - tags, - true, - null); - Assertions.assertEquals(expected, startJobRequestArgumentCaptor.getValue()); - Assertions.assertEquals(EMR_JOB_ID, dispatchQueryResponse.getJobId()); - verifyNoInteractions(flintIndexMetadataReader); - } - - @Test - void testDispatchShowMVQuery() { - HashMap tags = new HashMap<>(); - tags.put(DATASOURCE_TAG_KEY, "my_glue"); - tags.put(CLUSTER_NAME_TAG_KEY, TEST_CLUSTER_NAME); - tags.put(JOB_TYPE_TAG_KEY, JobType.BATCH.getText()); - String query = "SHOW MATERIALIZED VIEW IN mys3.default"; - String sparkSubmitParameters = - constructExpectedSparkSubmitParameterString( - "sigv4", - new HashMap<>() { - { - put(FLINT_INDEX_STORE_AWSREGION_KEY, "eu-west-1"); - } - }); - when(emrServerlessClient.startJobRun( + verify(emrServerlessClient, times(1)) + .startJobRun( new StartJobRequest( query, - "TEST_CLUSTER:index-query", + "TEST_CLUSTER:non-index-query", EMRS_APPLICATION_ID, EMRS_EXECUTION_ROLE, - sparkSubmitParameters, + constructExpectedSparkSubmitParameterString( + "sigv4", + new HashMap<>() { + { + put(FLINT_INDEX_STORE_AWSREGION_KEY, "eu-west-1"); + } + }), tags, false, - any()))) - .thenReturn(EMR_JOB_ID); - DataSourceMetadata dataSourceMetadata = constructMyGlueDataSourceMetadata(); - when(dataSourceService.getRawDataSourceMetadata("my_glue")).thenReturn(dataSourceMetadata); - doNothing().when(dataSourceUserAuthorizationHelper).authorizeDataSource(dataSourceMetadata); - DispatchQueryResponse dispatchQueryResponse = - sparkQueryDispatcher.dispatch( - new DispatchQueryRequest( - EMRS_APPLICATION_ID, - query, - "my_glue", - LangType.SQL, - EMRS_EXECUTION_ROLE, - TEST_CLUSTER_NAME)); - verify(emrServerlessClient, times(1)).startJobRun(startJobRequestArgumentCaptor.capture()); - StartJobRequest expected = - new StartJobRequest( - query, - "TEST_CLUSTER:non-index-query", - EMRS_APPLICATION_ID, - EMRS_EXECUTION_ROLE, - sparkSubmitParameters, - tags, - false, - null); - Assertions.assertEquals(expected, startJobRequestArgumentCaptor.getValue()); + any())); Assertions.assertEquals(EMR_JOB_ID, dispatchQueryResponse.getJobId()); + Assertions.assertFalse(dispatchQueryResponse.isDropIndexQuery()); verifyNoInteractions(flintIndexMetadataReader); } @Test - void testRefreshIndexQuery() { + void testDispatchIndexQueryWithoutADatasourceName() { HashMap tags = new HashMap<>(); - tags.put(DATASOURCE_TAG_KEY, "my_glue"); - tags.put(CLUSTER_NAME_TAG_KEY, TEST_CLUSTER_NAME); - tags.put(JOB_TYPE_TAG_KEY, JobType.BATCH.getText()); - String query = "REFRESH SKIPPING INDEX ON my_glue.default.http_logs"; - String sparkSubmitParameters = - constructExpectedSparkSubmitParameterString( - "sigv4", - new HashMap<>() { - { - put(FLINT_INDEX_STORE_AWSREGION_KEY, "eu-west-1"); - } - }); + tags.put("datasource", "my_glue"); + tags.put("table", "http_logs"); + tags.put("index", "elb_and_requestUri"); + tags.put("cluster", TEST_CLUSTER_NAME); + tags.put("schema", "default"); + + String query = + "CREATE INDEX elb_and_requestUri ON default.http_logs(l_orderkey, l_quantity) WITH" + + " (auto_refresh = true)"; when(emrServerlessClient.startJobRun( new StartJobRequest( query, - "TEST_CLUSTER:non-index-query", + "TEST_CLUSTER:index-query", EMRS_APPLICATION_ID, EMRS_EXECUTION_ROLE, - sparkSubmitParameters, + withStructuredStreaming( + constructExpectedSparkSubmitParameterString( + "sigv4", + new HashMap<>() { + { + put(FLINT_INDEX_STORE_AWSREGION_KEY, "eu-west-1"); + } + })), tags, - false, + true, any()))) .thenReturn(EMR_JOB_ID); DataSourceMetadata dataSourceMetadata = constructMyGlueDataSourceMetadata(); @@ -720,73 +483,26 @@ void testRefreshIndexQuery() { LangType.SQL, EMRS_EXECUTION_ROLE, TEST_CLUSTER_NAME)); - verify(emrServerlessClient, times(1)).startJobRun(startJobRequestArgumentCaptor.capture()); - StartJobRequest expected = - new StartJobRequest( - query, - "TEST_CLUSTER:non-index-query", - EMRS_APPLICATION_ID, - EMRS_EXECUTION_ROLE, - sparkSubmitParameters, - tags, - false, - null); - Assertions.assertEquals(expected, startJobRequestArgumentCaptor.getValue()); - Assertions.assertEquals(EMR_JOB_ID, dispatchQueryResponse.getJobId()); - verifyNoInteractions(flintIndexMetadataReader); - } - - @Test - void testDispatchDescribeIndexQuery() { - HashMap tags = new HashMap<>(); - tags.put(DATASOURCE_TAG_KEY, "my_glue"); - tags.put(CLUSTER_NAME_TAG_KEY, TEST_CLUSTER_NAME); - tags.put(JOB_TYPE_TAG_KEY, JobType.BATCH.getText()); - String query = "DESCRIBE SKIPPING INDEX ON mys3.default.http_logs"; - String sparkSubmitParameters = - constructExpectedSparkSubmitParameterString( - "sigv4", - new HashMap<>() { - { - put(FLINT_INDEX_STORE_AWSREGION_KEY, "eu-west-1"); - } - }); - when(emrServerlessClient.startJobRun( + verify(emrServerlessClient, times(1)) + .startJobRun( new StartJobRequest( query, "TEST_CLUSTER:index-query", EMRS_APPLICATION_ID, EMRS_EXECUTION_ROLE, - sparkSubmitParameters, + withStructuredStreaming( + constructExpectedSparkSubmitParameterString( + "sigv4", + new HashMap<>() { + { + put(FLINT_INDEX_STORE_AWSREGION_KEY, "eu-west-1"); + } + })), tags, - false, - any()))) - .thenReturn(EMR_JOB_ID); - DataSourceMetadata dataSourceMetadata = constructMyGlueDataSourceMetadata(); - when(dataSourceService.getRawDataSourceMetadata("my_glue")).thenReturn(dataSourceMetadata); - doNothing().when(dataSourceUserAuthorizationHelper).authorizeDataSource(dataSourceMetadata); - DispatchQueryResponse dispatchQueryResponse = - sparkQueryDispatcher.dispatch( - new DispatchQueryRequest( - EMRS_APPLICATION_ID, - query, - "my_glue", - LangType.SQL, - EMRS_EXECUTION_ROLE, - TEST_CLUSTER_NAME)); - verify(emrServerlessClient, times(1)).startJobRun(startJobRequestArgumentCaptor.capture()); - StartJobRequest expected = - new StartJobRequest( - query, - "TEST_CLUSTER:non-index-query", - EMRS_APPLICATION_ID, - EMRS_EXECUTION_ROLE, - sparkSubmitParameters, - tags, - false, - null); - Assertions.assertEquals(expected, startJobRequestArgumentCaptor.getValue()); + true, + any())); Assertions.assertEquals(EMR_JOB_ID, dispatchQueryResponse.getJobId()); + Assertions.assertFalse(dispatchQueryResponse.isDropIndexQuery()); verifyNoInteractions(flintIndexMetadataReader); } @@ -841,68 +557,10 @@ void testCancelJob() { new CancelJobRunResult() .withJobRunId(EMR_JOB_ID) .withApplicationId(EMRS_APPLICATION_ID)); - String queryId = sparkQueryDispatcher.cancelJob(asyncQueryJobMetadata()); - Assertions.assertEquals(QUERY_ID.getId(), queryId); - } - - @Test - void testCancelQueryWithSession() { - doReturn(Optional.of(session)).when(sessionManager).getSession(new SessionId(MOCK_SESSION_ID)); - doReturn(Optional.of(statement)).when(session).get(any()); - doNothing().when(statement).cancel(); - - String queryId = + String jobId = sparkQueryDispatcher.cancelJob( - asyncQueryJobMetadataWithSessionId(MOCK_STATEMENT_ID, MOCK_SESSION_ID)); - - verifyNoInteractions(emrServerlessClient); - verify(statement, times(1)).cancel(); - Assertions.assertEquals(MOCK_STATEMENT_ID, queryId); - } - - @Test - void testCancelQueryWithInvalidSession() { - doReturn(Optional.empty()).when(sessionManager).getSession(new SessionId("invalid")); - - IllegalArgumentException exception = - Assertions.assertThrows( - IllegalArgumentException.class, - () -> - sparkQueryDispatcher.cancelJob( - asyncQueryJobMetadataWithSessionId(MOCK_STATEMENT_ID, "invalid"))); - - verifyNoInteractions(emrServerlessClient); - verifyNoInteractions(session); - Assertions.assertEquals( - "no session found. " + new SessionId("invalid"), exception.getMessage()); - } - - @Test - void testCancelQueryWithInvalidStatementId() { - doReturn(Optional.of(session)).when(sessionManager).getSession(new SessionId(MOCK_SESSION_ID)); - - IllegalArgumentException exception = - Assertions.assertThrows( - IllegalArgumentException.class, - () -> - sparkQueryDispatcher.cancelJob( - asyncQueryJobMetadataWithSessionId("invalid", MOCK_SESSION_ID))); - - verifyNoInteractions(emrServerlessClient); - verifyNoInteractions(statement); - Assertions.assertEquals( - "no statement found. " + new StatementId("invalid"), exception.getMessage()); - } - - @Test - void testCancelQueryWithNoSessionId() { - when(emrServerlessClient.cancelJobRun(EMRS_APPLICATION_ID, EMR_JOB_ID)) - .thenReturn( - new CancelJobRunResult() - .withJobRunId(EMR_JOB_ID) - .withApplicationId(EMRS_APPLICATION_ID)); - String queryId = sparkQueryDispatcher.cancelJob(asyncQueryJobMetadata()); - Assertions.assertEquals(QUERY_ID.getId(), queryId); + new AsyncQueryJobMetadata(EMRS_APPLICATION_ID, EMR_JOB_ID, null)); + Assertions.assertEquals(EMR_JOB_ID, jobId); } @Test @@ -913,67 +571,22 @@ void testGetQueryResponse() { // simulate result index is not created yet when(jobExecutionResponseReader.getResultFromOpensearchIndex(EMR_JOB_ID, null)) .thenReturn(new JSONObject()); - JSONObject result = sparkQueryDispatcher.getQueryResponse(asyncQueryJobMetadata()); - Assertions.assertEquals("PENDING", result.get("status")); - } - - @Test - void testGetQueryResponseWithSession() { - doReturn(Optional.of(session)).when(sessionManager).getSession(new SessionId(MOCK_SESSION_ID)); - doReturn(Optional.of(statement)).when(session).get(any()); - when(statement.getStatementModel().getError()).thenReturn("mock error"); - doReturn(StatementState.WAITING).when(statement).getStatementState(); - - doReturn(new JSONObject()) - .when(jobExecutionResponseReader) - .getResultWithQueryId(eq(MOCK_STATEMENT_ID), any()); JSONObject result = sparkQueryDispatcher.getQueryResponse( - asyncQueryJobMetadataWithSessionId(MOCK_STATEMENT_ID, MOCK_SESSION_ID)); - - verifyNoInteractions(emrServerlessClient); - Assertions.assertEquals("waiting", result.get("status")); - } - - @Test - void testGetQueryResponseWithInvalidSession() { - doReturn(Optional.empty()).when(sessionManager).getSession(eq(new SessionId(MOCK_SESSION_ID))); - doReturn(new JSONObject()) - .when(jobExecutionResponseReader) - .getResultWithQueryId(eq(MOCK_STATEMENT_ID), any()); - IllegalArgumentException exception = - Assertions.assertThrows( - IllegalArgumentException.class, - () -> - sparkQueryDispatcher.getQueryResponse( - asyncQueryJobMetadataWithSessionId(MOCK_STATEMENT_ID, MOCK_SESSION_ID))); - - verifyNoInteractions(emrServerlessClient); - Assertions.assertEquals( - "no session found. " + new SessionId(MOCK_SESSION_ID), exception.getMessage()); - } - - @Test - void testGetQueryResponseWithStatementNotExist() { - doReturn(Optional.of(session)).when(sessionManager).getSession(new SessionId(MOCK_SESSION_ID)); - doReturn(Optional.empty()).when(session).get(any()); - doReturn(new JSONObject()) - .when(jobExecutionResponseReader) - .getResultWithQueryId(eq(MOCK_STATEMENT_ID), any()); - - IllegalArgumentException exception = - Assertions.assertThrows( - IllegalArgumentException.class, - () -> - sparkQueryDispatcher.getQueryResponse( - asyncQueryJobMetadataWithSessionId(MOCK_STATEMENT_ID, MOCK_SESSION_ID))); - verifyNoInteractions(emrServerlessClient); - Assertions.assertEquals( - "no statement found. " + new StatementId(MOCK_STATEMENT_ID), exception.getMessage()); + new AsyncQueryJobMetadata(EMRS_APPLICATION_ID, EMR_JOB_ID, null)); + Assertions.assertEquals("PENDING", result.get("status")); } @Test void testGetQueryResponseWithSuccess() { + SparkQueryDispatcher sparkQueryDispatcher = + new SparkQueryDispatcher( + emrServerlessClient, + dataSourceService, + dataSourceUserAuthorizationHelper, + jobExecutionResponseReader, + flintIndexMetadataReader, + openSearchClient); JSONObject queryResult = new JSONObject(); Map resultMap = new HashMap<>(); resultMap.put(STATUS_FIELD, "SUCCESS"); @@ -981,7 +594,9 @@ void testGetQueryResponseWithSuccess() { queryResult.put(DATA_FIELD, resultMap); when(jobExecutionResponseReader.getResultFromOpensearchIndex(EMR_JOB_ID, null)) .thenReturn(queryResult); - JSONObject result = sparkQueryDispatcher.getQueryResponse(asyncQueryJobMetadata()); + JSONObject result = + sparkQueryDispatcher.getQueryResponse( + new AsyncQueryJobMetadata(EMRS_APPLICATION_ID, EMR_JOB_ID, null)); verify(jobExecutionResponseReader, times(1)).getResultFromOpensearchIndex(EMR_JOB_ID, null); Assertions.assertEquals( new HashSet<>(Arrays.asList(DATA_FIELD, STATUS_FIELD, ERROR_FIELD)), result.keySet()); @@ -998,6 +613,226 @@ void testGetQueryResponseWithSuccess() { verifyNoInteractions(emrServerlessClient); } + // todo. refactor query process logic in plugin. + @Test + void testGetQueryResponseOfDropIndex() { + SparkQueryDispatcher sparkQueryDispatcher = + new SparkQueryDispatcher( + emrServerlessClient, + dataSourceService, + dataSourceUserAuthorizationHelper, + jobExecutionResponseReader, + flintIndexMetadataReader, + openSearchClient); + + String jobId = + new SparkQueryDispatcher.DropIndexResult(JobRunState.SUCCESS.toString()).toJobId(); + + JSONObject result = + sparkQueryDispatcher.getQueryResponse( + new AsyncQueryJobMetadata(EMRS_APPLICATION_ID, jobId, true, null)); + verify(jobExecutionResponseReader, times(0)) + .getResultFromOpensearchIndex(anyString(), anyString()); + Assertions.assertEquals("SUCCESS", result.get(STATUS_FIELD)); + } + + @Test + void testDropIndexQuery() throws ExecutionException, InterruptedException { + String query = "DROP INDEX size_year ON my_glue.default.http_logs"; + when(flintIndexMetadataReader.getFlintIndexMetadata( + new IndexDetails( + "size_year", + new FullyQualifiedTableName("my_glue.default.http_logs"), + false, + true, + FlintIndexType.COVERING))) + .thenReturn(flintIndexMetadata); + when(flintIndexMetadata.getJobId()).thenReturn(EMR_JOB_ID); + // auto_refresh == true + when(flintIndexMetadata.isAutoRefresh()).thenReturn(true); + + when(emrServerlessClient.cancelJobRun(EMRS_APPLICATION_ID, EMR_JOB_ID)) + .thenReturn( + new CancelJobRunResult() + .withJobRunId(EMR_JOB_ID) + .withApplicationId(EMRS_APPLICATION_ID)); + DataSourceMetadata dataSourceMetadata = constructMyGlueDataSourceMetadata(); + when(dataSourceService.getRawDataSourceMetadata("my_glue")).thenReturn(dataSourceMetadata); + doNothing().when(dataSourceUserAuthorizationHelper).authorizeDataSource(dataSourceMetadata); + + AcknowledgedResponse acknowledgedResponse = mock(AcknowledgedResponse.class); + when(openSearchClient.admin().indices().delete(any()).get()).thenReturn(acknowledgedResponse); + when(acknowledgedResponse.isAcknowledged()).thenReturn(true); + DispatchQueryResponse dispatchQueryResponse = + sparkQueryDispatcher.dispatch( + new DispatchQueryRequest( + EMRS_APPLICATION_ID, + query, + "my_glue", + LangType.SQL, + EMRS_EXECUTION_ROLE, + TEST_CLUSTER_NAME)); + verify(emrServerlessClient, times(1)).cancelJobRun(EMRS_APPLICATION_ID, EMR_JOB_ID); + verify(dataSourceUserAuthorizationHelper, times(1)).authorizeDataSource(dataSourceMetadata); + verify(flintIndexMetadataReader, times(1)) + .getFlintIndexMetadata( + new IndexDetails( + "size_year", + new FullyQualifiedTableName("my_glue.default.http_logs"), + false, + true, + FlintIndexType.COVERING)); + + SparkQueryDispatcher.DropIndexResult dropIndexResult = + SparkQueryDispatcher.DropIndexResult.fromJobId(dispatchQueryResponse.getJobId()); + Assertions.assertEquals(JobRunState.SUCCESS.toString(), dropIndexResult.getStatus()); + Assertions.assertTrue(dispatchQueryResponse.isDropIndexQuery()); + } + + @Test + void testDropSkippingIndexQuery() throws ExecutionException, InterruptedException { + String query = "DROP SKIPPING INDEX ON my_glue.default.http_logs"; + when(flintIndexMetadataReader.getFlintIndexMetadata( + new IndexDetails( + null, + new FullyQualifiedTableName("my_glue.default.http_logs"), + false, + true, + FlintIndexType.SKIPPING))) + .thenReturn(flintIndexMetadata); + when(flintIndexMetadata.getJobId()).thenReturn(EMR_JOB_ID); + when(flintIndexMetadata.isAutoRefresh()).thenReturn(true); + + when(emrServerlessClient.cancelJobRun(EMRS_APPLICATION_ID, EMR_JOB_ID)) + .thenReturn( + new CancelJobRunResult() + .withJobRunId(EMR_JOB_ID) + .withApplicationId(EMRS_APPLICATION_ID)); + DataSourceMetadata dataSourceMetadata = constructMyGlueDataSourceMetadata(); + when(dataSourceService.getRawDataSourceMetadata("my_glue")).thenReturn(dataSourceMetadata); + doNothing().when(dataSourceUserAuthorizationHelper).authorizeDataSource(dataSourceMetadata); + AcknowledgedResponse acknowledgedResponse = mock(AcknowledgedResponse.class); + when(openSearchClient.admin().indices().delete(any()).get()).thenReturn(acknowledgedResponse); + + DispatchQueryResponse dispatchQueryResponse = + sparkQueryDispatcher.dispatch( + new DispatchQueryRequest( + EMRS_APPLICATION_ID, + query, + "my_glue", + LangType.SQL, + EMRS_EXECUTION_ROLE, + TEST_CLUSTER_NAME)); + verify(emrServerlessClient, times(1)).cancelJobRun(EMRS_APPLICATION_ID, EMR_JOB_ID); + verify(dataSourceUserAuthorizationHelper, times(1)).authorizeDataSource(dataSourceMetadata); + verify(flintIndexMetadataReader, times(1)) + .getFlintIndexMetadata( + new IndexDetails( + null, + new FullyQualifiedTableName("my_glue.default.http_logs"), + false, + true, + FlintIndexType.SKIPPING)); + SparkQueryDispatcher.DropIndexResult dropIndexResult = + SparkQueryDispatcher.DropIndexResult.fromJobId(dispatchQueryResponse.getJobId()); + Assertions.assertEquals(JobRunState.SUCCESS.toString(), dropIndexResult.getStatus()); + Assertions.assertTrue(dispatchQueryResponse.isDropIndexQuery()); + } + + @Test + void testDropSkippingIndexQueryAutoRefreshFalse() + throws ExecutionException, InterruptedException { + String query = "DROP SKIPPING INDEX ON my_glue.default.http_logs"; + when(flintIndexMetadataReader.getFlintIndexMetadata( + new IndexDetails( + null, + new FullyQualifiedTableName("my_glue.default.http_logs"), + false, + true, + FlintIndexType.SKIPPING))) + .thenReturn(flintIndexMetadata); + when(flintIndexMetadata.isAutoRefresh()).thenReturn(false); + + DataSourceMetadata dataSourceMetadata = constructMyGlueDataSourceMetadata(); + when(dataSourceService.getRawDataSourceMetadata("my_glue")).thenReturn(dataSourceMetadata); + doNothing().when(dataSourceUserAuthorizationHelper).authorizeDataSource(dataSourceMetadata); + AcknowledgedResponse acknowledgedResponse = mock(AcknowledgedResponse.class); + when(openSearchClient.admin().indices().delete(any()).get()).thenReturn(acknowledgedResponse); + + DispatchQueryResponse dispatchQueryResponse = + sparkQueryDispatcher.dispatch( + new DispatchQueryRequest( + EMRS_APPLICATION_ID, + query, + "my_glue", + LangType.SQL, + EMRS_EXECUTION_ROLE, + TEST_CLUSTER_NAME)); + verify(emrServerlessClient, times(0)).cancelJobRun(EMRS_APPLICATION_ID, EMR_JOB_ID); + verify(dataSourceUserAuthorizationHelper, times(1)).authorizeDataSource(dataSourceMetadata); + verify(flintIndexMetadataReader, times(1)) + .getFlintIndexMetadata( + new IndexDetails( + null, + new FullyQualifiedTableName("my_glue.default.http_logs"), + false, + true, + FlintIndexType.SKIPPING)); + SparkQueryDispatcher.DropIndexResult dropIndexResult = + SparkQueryDispatcher.DropIndexResult.fromJobId(dispatchQueryResponse.getJobId()); + Assertions.assertEquals(JobRunState.SUCCESS.toString(), dropIndexResult.getStatus()); + Assertions.assertTrue(dispatchQueryResponse.isDropIndexQuery()); + } + + @Test + void testDropSkippingIndexQueryDeleteIndexException() + throws ExecutionException, InterruptedException { + String query = "DROP SKIPPING INDEX ON my_glue.default.http_logs"; + when(flintIndexMetadataReader.getFlintIndexMetadata( + new IndexDetails( + null, + new FullyQualifiedTableName("my_glue.default.http_logs"), + false, + true, + FlintIndexType.SKIPPING))) + .thenReturn(flintIndexMetadata); + when(flintIndexMetadata.isAutoRefresh()).thenReturn(false); + + DataSourceMetadata dataSourceMetadata = constructMyGlueDataSourceMetadata(); + when(dataSourceService.getRawDataSourceMetadata("my_glue")).thenReturn(dataSourceMetadata); + doNothing().when(dataSourceUserAuthorizationHelper).authorizeDataSource(dataSourceMetadata); + + when(openSearchClient.admin().indices().delete(any()).get()) + .thenThrow(ExecutionException.class); + + DispatchQueryResponse dispatchQueryResponse = + sparkQueryDispatcher.dispatch( + new DispatchQueryRequest( + EMRS_APPLICATION_ID, + query, + "my_glue", + LangType.SQL, + EMRS_EXECUTION_ROLE, + TEST_CLUSTER_NAME)); + verify(emrServerlessClient, times(0)).cancelJobRun(EMRS_APPLICATION_ID, EMR_JOB_ID); + verify(dataSourceUserAuthorizationHelper, times(1)).authorizeDataSource(dataSourceMetadata); + verify(flintIndexMetadataReader, times(1)) + .getFlintIndexMetadata( + new IndexDetails( + null, + new FullyQualifiedTableName("my_glue.default.http_logs"), + false, + true, + FlintIndexType.SKIPPING)); + SparkQueryDispatcher.DropIndexResult dropIndexResult = + SparkQueryDispatcher.DropIndexResult.fromJobId(dispatchQueryResponse.getJobId()); + Assertions.assertEquals(JobRunState.FAILED.toString(), dropIndexResult.getStatus()); + Assertions.assertEquals( + "{\"error\":\"failed to drop index\",\"status\":\"FAILED\"}", + dropIndexResult.result().toString()); + Assertions.assertTrue(dispatchQueryResponse.isDropIndexQuery()); + } + @Test void testDispatchQueryWithExtraSparkSubmitParameters() { DataSourceMetadata dataSourceMetadata = constructMyGlueDataSourceMetadata(); @@ -1070,8 +905,8 @@ private String constructExpectedSparkSubmitParameterString( + " --conf" + " spark.hive.metastore.glue.role.arn=arn:aws:iam::924196221507:role/FlintOpensearchServiceRole" + " --conf spark.sql.catalog.my_glue=org.opensearch.sql.FlintDelegatingSessionCatalog " - + " --conf spark.flint.datasource.name=my_glue " - + authParamConfigBuilder; + + authParamConfigBuilder + + " --conf spark.flint.datasource.name=my_glue "; } private String withStructuredStreaming(String parameters) { @@ -1162,29 +997,6 @@ private DispatchQueryRequest constructDispatchQueryRequest( langType, EMRS_EXECUTION_ROLE, TEST_CLUSTER_NAME, - extraParameters, - null); - } - - private DispatchQueryRequest dispatchQueryRequestWithSessionId(String query, String sessionId) { - return new DispatchQueryRequest( - EMRS_APPLICATION_ID, - query, - "my_glue", - LangType.SQL, - EMRS_EXECUTION_ROLE, - TEST_CLUSTER_NAME, - null, - sessionId); - } - - private AsyncQueryJobMetadata asyncQueryJobMetadata() { - return new AsyncQueryJobMetadata(QUERY_ID, EMRS_APPLICATION_ID, EMR_JOB_ID, null); - } - - private AsyncQueryJobMetadata asyncQueryJobMetadataWithSessionId( - String statementId, String sessionId) { - return new AsyncQueryJobMetadata( - new AsyncQueryId(statementId), EMRS_APPLICATION_ID, EMR_JOB_ID, null, sessionId); + extraParameters); } } diff --git a/spark/src/test/java/org/opensearch/sql/spark/execution/session/InteractiveSessionTest.java b/spark/src/test/java/org/opensearch/sql/spark/execution/session/InteractiveSessionTest.java deleted file mode 100644 index 14ccaf7708..0000000000 --- a/spark/src/test/java/org/opensearch/sql/spark/execution/session/InteractiveSessionTest.java +++ /dev/null @@ -1,231 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.execution.session; - -import static org.opensearch.sql.spark.execution.session.InteractiveSessionTest.TestSession.testSession; -import static org.opensearch.sql.spark.execution.session.SessionManagerTest.sessionSetting; -import static org.opensearch.sql.spark.execution.session.SessionState.NOT_STARTED; -import static org.opensearch.sql.spark.execution.statestore.StateStore.DATASOURCE_TO_REQUEST_INDEX; -import static org.opensearch.sql.spark.execution.statestore.StateStore.getSession; - -import com.amazonaws.services.emrserverless.model.CancelJobRunResult; -import com.amazonaws.services.emrserverless.model.GetJobRunResult; -import java.util.HashMap; -import java.util.Optional; -import lombok.RequiredArgsConstructor; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; -import org.opensearch.action.delete.DeleteRequest; -import org.opensearch.sql.spark.asyncquery.model.SparkSubmitParameters; -import org.opensearch.sql.spark.client.EMRServerlessClient; -import org.opensearch.sql.spark.client.StartJobRequest; -import org.opensearch.sql.spark.execution.statestore.StateStore; -import org.opensearch.test.OpenSearchIntegTestCase; - -/** mock-maker-inline does not work with OpenSearchTestCase. */ -public class InteractiveSessionTest extends OpenSearchIntegTestCase { - - private static final String DS_NAME = "mys3"; - private static final String indexName = DATASOURCE_TO_REQUEST_INDEX.apply(DS_NAME); - - private TestEMRServerlessClient emrsClient; - private StartJobRequest startJobRequest; - private StateStore stateStore; - - @Before - public void setup() { - emrsClient = new TestEMRServerlessClient(); - startJobRequest = new StartJobRequest("", "", "appId", "", "", new HashMap<>(), false, ""); - stateStore = new StateStore(client(), clusterService()); - } - - @After - public void clean() { - if (clusterService().state().routingTable().hasIndex(indexName)) { - client().admin().indices().delete(new DeleteIndexRequest(indexName)).actionGet(); - } - } - - @Test - public void openCloseSession() { - InteractiveSession session = - InteractiveSession.builder() - .sessionId(SessionId.newSessionId(DS_NAME)) - .stateStore(stateStore) - .serverlessClient(emrsClient) - .build(); - - // open session - TestSession testSession = testSession(session, stateStore); - testSession - .open(createSessionRequest()) - .assertSessionState(NOT_STARTED) - .assertAppId("appId") - .assertJobId("jobId"); - emrsClient.startJobRunCalled(1); - - // close session - testSession.close(); - emrsClient.cancelJobRunCalled(1); - } - - @Test - public void openSessionFailedConflict() { - SessionId sessionId = SessionId.newSessionId(DS_NAME); - InteractiveSession session = - InteractiveSession.builder() - .sessionId(sessionId) - .stateStore(stateStore) - .serverlessClient(emrsClient) - .build(); - session.open(createSessionRequest()); - - InteractiveSession duplicateSession = - InteractiveSession.builder() - .sessionId(sessionId) - .stateStore(stateStore) - .serverlessClient(emrsClient) - .build(); - IllegalStateException exception = - assertThrows( - IllegalStateException.class, () -> duplicateSession.open(createSessionRequest())); - assertEquals("session already exist. " + sessionId, exception.getMessage()); - } - - @Test - public void closeNotExistSession() { - SessionId sessionId = SessionId.newSessionId(DS_NAME); - InteractiveSession session = - InteractiveSession.builder() - .sessionId(sessionId) - .stateStore(stateStore) - .serverlessClient(emrsClient) - .build(); - session.open(createSessionRequest()); - - client().delete(new DeleteRequest(indexName, sessionId.getSessionId())).actionGet(); - - IllegalStateException exception = assertThrows(IllegalStateException.class, session::close); - assertEquals("session does not exist. " + sessionId, exception.getMessage()); - emrsClient.cancelJobRunCalled(0); - } - - @Test - public void sessionManagerCreateSession() { - Session session = - new SessionManager(stateStore, emrsClient, sessionSetting(false)) - .createSession(createSessionRequest()); - - TestSession testSession = testSession(session, stateStore); - testSession.assertSessionState(NOT_STARTED).assertAppId("appId").assertJobId("jobId"); - } - - @Test - public void sessionManagerGetSession() { - SessionManager sessionManager = - new SessionManager(stateStore, emrsClient, sessionSetting(false)); - Session session = sessionManager.createSession(createSessionRequest()); - - Optional managerSession = sessionManager.getSession(session.getSessionId()); - assertTrue(managerSession.isPresent()); - assertEquals(session.getSessionId(), managerSession.get().getSessionId()); - } - - @Test - public void sessionManagerGetSessionNotExist() { - SessionManager sessionManager = - new SessionManager(stateStore, emrsClient, sessionSetting(false)); - - Optional managerSession = - sessionManager.getSession(SessionId.newSessionId("no-exist")); - assertTrue(managerSession.isEmpty()); - } - - @RequiredArgsConstructor - static class TestSession { - private final Session session; - private final StateStore stateStore; - - public static TestSession testSession(Session session, StateStore stateStore) { - return new TestSession(session, stateStore); - } - - public TestSession assertSessionState(SessionState expected) { - assertEquals(expected, session.getSessionModel().getSessionState()); - - Optional sessionStoreState = - getSession(stateStore, DS_NAME).apply(session.getSessionModel().getId()); - assertTrue(sessionStoreState.isPresent()); - assertEquals(expected, sessionStoreState.get().getSessionState()); - - return this; - } - - public TestSession assertAppId(String expected) { - assertEquals(expected, session.getSessionModel().getApplicationId()); - return this; - } - - public TestSession assertJobId(String expected) { - assertEquals(expected, session.getSessionModel().getJobId()); - return this; - } - - public TestSession open(CreateSessionRequest req) { - session.open(req); - return this; - } - - public TestSession close() { - session.close(); - return this; - } - } - - public static CreateSessionRequest createSessionRequest() { - return new CreateSessionRequest( - "jobName", - "appId", - "arn", - SparkSubmitParameters.Builder.builder(), - new HashMap<>(), - "resultIndex", - DS_NAME); - } - - public static class TestEMRServerlessClient implements EMRServerlessClient { - - private int startJobRunCalled = 0; - private int cancelJobRunCalled = 0; - - @Override - public String startJobRun(StartJobRequest startJobRequest) { - startJobRunCalled++; - return "jobId"; - } - - @Override - public GetJobRunResult getJobRunResult(String applicationId, String jobId) { - return null; - } - - @Override - public CancelJobRunResult cancelJobRun(String applicationId, String jobId) { - cancelJobRunCalled++; - return null; - } - - public void startJobRunCalled(int expectedTimes) { - assertEquals(expectedTimes, startJobRunCalled); - } - - public void cancelJobRunCalled(int expectedTimes) { - assertEquals(expectedTimes, cancelJobRunCalled); - } - } -} diff --git a/spark/src/test/java/org/opensearch/sql/spark/execution/session/SessionManagerTest.java b/spark/src/test/java/org/opensearch/sql/spark/execution/session/SessionManagerTest.java deleted file mode 100644 index 5a0a53009f..0000000000 --- a/spark/src/test/java/org/opensearch/sql/spark/execution/session/SessionManagerTest.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.execution.session; - -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.ExtendWith; -import org.mockito.Mock; -import org.mockito.junit.jupiter.MockitoExtension; -import org.opensearch.sql.common.setting.Settings; -import org.opensearch.sql.spark.client.EMRServerlessClient; -import org.opensearch.sql.spark.execution.statestore.StateStore; - -@ExtendWith(MockitoExtension.class) -public class SessionManagerTest { - @Mock private StateStore stateStore; - @Mock private EMRServerlessClient emrClient; - - @Test - public void sessionEnable() { - Assertions.assertTrue( - new SessionManager(stateStore, emrClient, sessionSetting(true)).isEnabled()); - } - - public static Settings sessionSetting(boolean enabled) { - Map settings = new HashMap<>(); - settings.put(Settings.Key.SPARK_EXECUTION_SESSION_LIMIT, 100); - return settings(settings); - } - - public static Settings settings(Map settings) { - return new Settings() { - @Override - public T getSettingValue(Key key) { - return (T) settings.get(key); - } - - @Override - public List getSettings() { - return (List) settings; - } - }; - } -} diff --git a/spark/src/test/java/org/opensearch/sql/spark/execution/session/SessionStateTest.java b/spark/src/test/java/org/opensearch/sql/spark/execution/session/SessionStateTest.java deleted file mode 100644 index a987c80d59..0000000000 --- a/spark/src/test/java/org/opensearch/sql/spark/execution/session/SessionStateTest.java +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.execution.session; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThrows; - -import org.junit.jupiter.api.Test; - -class SessionStateTest { - @Test - public void invalidSessionType() { - IllegalArgumentException exception = - assertThrows(IllegalArgumentException.class, () -> SessionState.fromString("invalid")); - assertEquals("Invalid session state: invalid", exception.getMessage()); - } -} diff --git a/spark/src/test/java/org/opensearch/sql/spark/execution/session/SessionTypeTest.java b/spark/src/test/java/org/opensearch/sql/spark/execution/session/SessionTypeTest.java deleted file mode 100644 index a2ab43e709..0000000000 --- a/spark/src/test/java/org/opensearch/sql/spark/execution/session/SessionTypeTest.java +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.execution.session; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThrows; - -import org.junit.jupiter.api.Test; - -class SessionTypeTest { - @Test - public void invalidSessionType() { - IllegalArgumentException exception = - assertThrows(IllegalArgumentException.class, () -> SessionType.fromString("invalid")); - assertEquals("Invalid session type: invalid", exception.getMessage()); - } -} diff --git a/spark/src/test/java/org/opensearch/sql/spark/execution/statement/StatementStateTest.java b/spark/src/test/java/org/opensearch/sql/spark/execution/statement/StatementStateTest.java deleted file mode 100644 index b7af1123ba..0000000000 --- a/spark/src/test/java/org/opensearch/sql/spark/execution/statement/StatementStateTest.java +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.execution.statement; - -import static org.junit.Assert.assertThrows; - -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; - -class StatementStateTest { - @Test - public void invalidStatementState() { - IllegalArgumentException exception = - assertThrows(IllegalArgumentException.class, () -> StatementState.fromString("invalid")); - Assertions.assertEquals("Invalid statement state: invalid", exception.getMessage()); - } -} diff --git a/spark/src/test/java/org/opensearch/sql/spark/execution/statement/StatementTest.java b/spark/src/test/java/org/opensearch/sql/spark/execution/statement/StatementTest.java deleted file mode 100644 index 29020f2496..0000000000 --- a/spark/src/test/java/org/opensearch/sql/spark/execution/statement/StatementTest.java +++ /dev/null @@ -1,443 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.execution.statement; - -import static org.opensearch.sql.spark.execution.session.InteractiveSessionTest.createSessionRequest; -import static org.opensearch.sql.spark.execution.session.SessionManagerTest.sessionSetting; -import static org.opensearch.sql.spark.execution.statement.StatementState.CANCELLED; -import static org.opensearch.sql.spark.execution.statement.StatementState.RUNNING; -import static org.opensearch.sql.spark.execution.statement.StatementState.WAITING; -import static org.opensearch.sql.spark.execution.statement.StatementTest.TestStatement.testStatement; -import static org.opensearch.sql.spark.execution.statestore.StateStore.DATASOURCE_TO_REQUEST_INDEX; -import static org.opensearch.sql.spark.execution.statestore.StateStore.getStatement; -import static org.opensearch.sql.spark.execution.statestore.StateStore.updateSessionState; -import static org.opensearch.sql.spark.execution.statestore.StateStore.updateStatementState; - -import java.util.Optional; -import lombok.RequiredArgsConstructor; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.opensearch.action.admin.indices.delete.DeleteIndexRequest; -import org.opensearch.action.delete.DeleteRequest; -import org.opensearch.sql.spark.asyncquery.model.AsyncQueryId; -import org.opensearch.sql.spark.execution.session.InteractiveSessionTest; -import org.opensearch.sql.spark.execution.session.Session; -import org.opensearch.sql.spark.execution.session.SessionId; -import org.opensearch.sql.spark.execution.session.SessionManager; -import org.opensearch.sql.spark.execution.session.SessionState; -import org.opensearch.sql.spark.execution.statestore.StateStore; -import org.opensearch.sql.spark.rest.model.LangType; -import org.opensearch.test.OpenSearchIntegTestCase; - -public class StatementTest extends OpenSearchIntegTestCase { - - private static final String DS_NAME = "mys3"; - private static final String indexName = DATASOURCE_TO_REQUEST_INDEX.apply(DS_NAME); - - private StateStore stateStore; - private InteractiveSessionTest.TestEMRServerlessClient emrsClient = - new InteractiveSessionTest.TestEMRServerlessClient(); - - @Before - public void setup() { - stateStore = new StateStore(client(), clusterService()); - } - - @After - public void clean() { - if (clusterService().state().routingTable().hasIndex(indexName)) { - client().admin().indices().delete(new DeleteIndexRequest(indexName)).actionGet(); - } - } - - @Test - public void openThenCancelStatement() { - Statement st = - Statement.builder() - .sessionId(new SessionId("sessionId")) - .applicationId("appId") - .jobId("jobId") - .statementId(new StatementId("statementId")) - .langType(LangType.SQL) - .datasourceName(DS_NAME) - .query("query") - .queryId("statementId") - .stateStore(stateStore) - .build(); - - // submit statement - TestStatement testStatement = testStatement(st, stateStore); - testStatement - .open() - .assertSessionState(WAITING) - .assertStatementId(new StatementId("statementId")); - - // close statement - testStatement.cancel().assertSessionState(CANCELLED); - } - - @Test - public void openFailedBecauseConflict() { - Statement st = - Statement.builder() - .sessionId(new SessionId("sessionId")) - .applicationId("appId") - .jobId("jobId") - .statementId(new StatementId("statementId")) - .langType(LangType.SQL) - .datasourceName(DS_NAME) - .query("query") - .queryId("statementId") - .stateStore(stateStore) - .build(); - st.open(); - - // open statement with same statement id - Statement dupSt = - Statement.builder() - .sessionId(new SessionId("sessionId")) - .applicationId("appId") - .jobId("jobId") - .statementId(new StatementId("statementId")) - .langType(LangType.SQL) - .datasourceName(DS_NAME) - .query("query") - .queryId("statementId") - .stateStore(stateStore) - .build(); - IllegalStateException exception = assertThrows(IllegalStateException.class, dupSt::open); - assertEquals("statement already exist. statementId=statementId", exception.getMessage()); - } - - @Test - public void cancelNotExistStatement() { - StatementId stId = new StatementId("statementId"); - Statement st = - Statement.builder() - .sessionId(new SessionId("sessionId")) - .applicationId("appId") - .jobId("jobId") - .statementId(stId) - .langType(LangType.SQL) - .datasourceName(DS_NAME) - .query("query") - .queryId("statementId") - .stateStore(stateStore) - .build(); - st.open(); - - client().delete(new DeleteRequest(indexName, stId.getId())).actionGet(); - - IllegalStateException exception = assertThrows(IllegalStateException.class, st::cancel); - assertEquals( - String.format("cancel statement failed. no statement found. statement: %s.", stId), - exception.getMessage()); - } - - @Test - public void cancelFailedBecauseOfConflict() { - StatementId stId = new StatementId("statementId"); - Statement st = - Statement.builder() - .sessionId(new SessionId("sessionId")) - .applicationId("appId") - .jobId("jobId") - .statementId(stId) - .langType(LangType.SQL) - .datasourceName(DS_NAME) - .query("query") - .queryId("statementId") - .stateStore(stateStore) - .build(); - st.open(); - - StatementModel running = - updateStatementState(stateStore, DS_NAME).apply(st.getStatementModel(), CANCELLED); - - assertEquals(StatementState.CANCELLED, running.getStatementState()); - - // cancel conflict - IllegalStateException exception = assertThrows(IllegalStateException.class, st::cancel); - assertEquals( - String.format( - "cancel statement failed. current statementState: CANCELLED " + "statement: %s.", stId), - exception.getMessage()); - } - - @Test - public void cancelSuccessStatementFailed() { - StatementId stId = new StatementId("statementId"); - Statement st = createStatement(stId); - - // update to running state - StatementModel model = st.getStatementModel(); - st.setStatementModel( - StatementModel.copyWithState( - st.getStatementModel(), - StatementState.SUCCESS, - model.getSeqNo(), - model.getPrimaryTerm())); - - // cancel conflict - IllegalStateException exception = assertThrows(IllegalStateException.class, st::cancel); - assertEquals( - String.format("can't cancel statement in success state. statement: %s.", stId), - exception.getMessage()); - } - - @Test - public void cancelFailedStatementFailed() { - StatementId stId = new StatementId("statementId"); - Statement st = createStatement(stId); - - // update to running state - StatementModel model = st.getStatementModel(); - st.setStatementModel( - StatementModel.copyWithState( - st.getStatementModel(), - StatementState.FAILED, - model.getSeqNo(), - model.getPrimaryTerm())); - - // cancel conflict - IllegalStateException exception = assertThrows(IllegalStateException.class, st::cancel); - assertEquals( - String.format("can't cancel statement in failed state. statement: %s.", stId), - exception.getMessage()); - } - - @Test - public void cancelCancelledStatementFailed() { - StatementId stId = new StatementId("statementId"); - Statement st = createStatement(stId); - - // update to running state - StatementModel model = st.getStatementModel(); - st.setStatementModel( - StatementModel.copyWithState( - st.getStatementModel(), CANCELLED, model.getSeqNo(), model.getPrimaryTerm())); - - // cancel conflict - IllegalStateException exception = assertThrows(IllegalStateException.class, st::cancel); - assertEquals( - String.format("can't cancel statement in cancelled state. statement: %s.", stId), - exception.getMessage()); - } - - @Test - public void cancelRunningStatementSuccess() { - Statement st = - Statement.builder() - .sessionId(new SessionId("sessionId")) - .applicationId("appId") - .jobId("jobId") - .statementId(new StatementId("statementId")) - .langType(LangType.SQL) - .datasourceName(DS_NAME) - .query("query") - .queryId("statementId") - .stateStore(stateStore) - .build(); - - // submit statement - TestStatement testStatement = testStatement(st, stateStore); - testStatement - .open() - .assertSessionState(WAITING) - .assertStatementId(new StatementId("statementId")); - - testStatement.run(); - - // close statement - testStatement.cancel().assertSessionState(CANCELLED); - } - - @Test - public void submitStatementInRunningSession() { - Session session = - new SessionManager(stateStore, emrsClient, sessionSetting(false)) - .createSession(createSessionRequest()); - - // App change state to running - updateSessionState(stateStore, DS_NAME).apply(session.getSessionModel(), SessionState.RUNNING); - - StatementId statementId = session.submit(queryRequest()); - assertFalse(statementId.getId().isEmpty()); - } - - @Test - public void submitStatementInNotStartedState() { - Session session = - new SessionManager(stateStore, emrsClient, sessionSetting(false)) - .createSession(createSessionRequest()); - - StatementId statementId = session.submit(queryRequest()); - assertFalse(statementId.getId().isEmpty()); - } - - @Test - public void failToSubmitStatementInDeadState() { - Session session = - new SessionManager(stateStore, emrsClient, sessionSetting(false)) - .createSession(createSessionRequest()); - - updateSessionState(stateStore, DS_NAME).apply(session.getSessionModel(), SessionState.DEAD); - - IllegalStateException exception = - assertThrows(IllegalStateException.class, () -> session.submit(queryRequest())); - assertEquals( - "can't submit statement, session should not be in end state, current session state is:" - + " dead", - exception.getMessage()); - } - - @Test - public void failToSubmitStatementInFailState() { - Session session = - new SessionManager(stateStore, emrsClient, sessionSetting(false)) - .createSession(createSessionRequest()); - - updateSessionState(stateStore, DS_NAME).apply(session.getSessionModel(), SessionState.FAIL); - - IllegalStateException exception = - assertThrows(IllegalStateException.class, () -> session.submit(queryRequest())); - assertEquals( - "can't submit statement, session should not be in end state, current session state is:" - + " fail", - exception.getMessage()); - } - - @Test - public void newStatementFieldAssert() { - Session session = - new SessionManager(stateStore, emrsClient, sessionSetting(false)) - .createSession(createSessionRequest()); - StatementId statementId = session.submit(queryRequest()); - Optional statement = session.get(statementId); - - assertTrue(statement.isPresent()); - assertEquals(session.getSessionId(), statement.get().getSessionId()); - assertEquals("appId", statement.get().getApplicationId()); - assertEquals("jobId", statement.get().getJobId()); - assertEquals(statementId, statement.get().getStatementId()); - assertEquals(WAITING, statement.get().getStatementState()); - assertEquals(LangType.SQL, statement.get().getLangType()); - assertEquals("select 1", statement.get().getQuery()); - } - - @Test - public void failToSubmitStatementInDeletedSession() { - Session session = - new SessionManager(stateStore, emrsClient, sessionSetting(false)) - .createSession(createSessionRequest()); - - // other's delete session - client() - .delete(new DeleteRequest(indexName, session.getSessionId().getSessionId())) - .actionGet(); - - IllegalStateException exception = - assertThrows(IllegalStateException.class, () -> session.submit(queryRequest())); - assertEquals("session does not exist. " + session.getSessionId(), exception.getMessage()); - } - - @Test - public void getStatementSuccess() { - Session session = - new SessionManager(stateStore, emrsClient, sessionSetting(false)) - .createSession(createSessionRequest()); - // App change state to running - updateSessionState(stateStore, DS_NAME).apply(session.getSessionModel(), SessionState.RUNNING); - StatementId statementId = session.submit(queryRequest()); - - Optional statement = session.get(statementId); - assertTrue(statement.isPresent()); - assertEquals(WAITING, statement.get().getStatementState()); - assertEquals(statementId, statement.get().getStatementId()); - } - - @Test - public void getStatementNotExist() { - Session session = - new SessionManager(stateStore, emrsClient, sessionSetting(false)) - .createSession(createSessionRequest()); - // App change state to running - updateSessionState(stateStore, DS_NAME).apply(session.getSessionModel(), SessionState.RUNNING); - - Optional statement = session.get(StatementId.newStatementId("not-exist-id")); - assertFalse(statement.isPresent()); - } - - @RequiredArgsConstructor - static class TestStatement { - private final Statement st; - private final StateStore stateStore; - - public static TestStatement testStatement(Statement st, StateStore stateStore) { - return new TestStatement(st, stateStore); - } - - public TestStatement assertSessionState(StatementState expected) { - assertEquals(expected, st.getStatementModel().getStatementState()); - - Optional model = - getStatement(stateStore, DS_NAME).apply(st.getStatementId().getId()); - assertTrue(model.isPresent()); - assertEquals(expected, model.get().getStatementState()); - - return this; - } - - public TestStatement assertStatementId(StatementId expected) { - assertEquals(expected, st.getStatementModel().getStatementId()); - - Optional model = - getStatement(stateStore, DS_NAME).apply(st.getStatementId().getId()); - assertTrue(model.isPresent()); - assertEquals(expected, model.get().getStatementId()); - return this; - } - - public TestStatement open() { - st.open(); - return this; - } - - public TestStatement cancel() { - st.cancel(); - return this; - } - - public TestStatement run() { - StatementModel model = - updateStatementState(stateStore, DS_NAME).apply(st.getStatementModel(), RUNNING); - st.setStatementModel(model); - return this; - } - } - - private QueryRequest queryRequest() { - return new QueryRequest(AsyncQueryId.newAsyncQueryId(DS_NAME), LangType.SQL, "select 1"); - } - - private Statement createStatement(StatementId stId) { - Statement st = - Statement.builder() - .sessionId(new SessionId("sessionId")) - .applicationId("appId") - .jobId("jobId") - .statementId(stId) - .langType(LangType.SQL) - .datasourceName(DS_NAME) - .query("query") - .queryId("statementId") - .stateStore(stateStore) - .build(); - st.open(); - return st; - } -} diff --git a/spark/src/test/java/org/opensearch/sql/spark/flint/FlintIndexMetadataReaderImplTest.java b/spark/src/test/java/org/opensearch/sql/spark/flint/FlintIndexMetadataReaderImplTest.java index 4d809c31dc..b0c8491b0b 100644 --- a/spark/src/test/java/org/opensearch/sql/spark/flint/FlintIndexMetadataReaderImplTest.java +++ b/spark/src/test/java/org/opensearch/sql/spark/flint/FlintIndexMetadataReaderImplTest.java @@ -25,8 +25,7 @@ import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.sql.spark.dispatcher.model.FullyQualifiedTableName; -import org.opensearch.sql.spark.dispatcher.model.IndexQueryActionType; -import org.opensearch.sql.spark.dispatcher.model.IndexQueryDetails; +import org.opensearch.sql.spark.dispatcher.model.IndexDetails; @ExtendWith(MockitoExtension.class) public class FlintIndexMetadataReaderImplTest { @@ -45,12 +44,12 @@ void testGetJobIdFromFlintSkippingIndexMetadata() { FlintIndexMetadataReader flintIndexMetadataReader = new FlintIndexMetadataReaderImpl(client); FlintIndexMetadata indexMetadata = flintIndexMetadataReader.getFlintIndexMetadata( - IndexQueryDetails.builder() - .fullyQualifiedTableName(new FullyQualifiedTableName("mys3.default.http_logs")) - .autoRefresh(false) - .indexQueryActionType(IndexQueryActionType.DROP) - .indexType(FlintIndexType.SKIPPING) - .build()); + new IndexDetails( + null, + new FullyQualifiedTableName("mys3.default.http_logs"), + false, + true, + FlintIndexType.SKIPPING)); Assertions.assertEquals("00fdmvv9hp8u0o0q", indexMetadata.getJobId()); } @@ -65,13 +64,12 @@ void testGetJobIdFromFlintCoveringIndexMetadata() { FlintIndexMetadataReader flintIndexMetadataReader = new FlintIndexMetadataReaderImpl(client); FlintIndexMetadata indexMetadata = flintIndexMetadataReader.getFlintIndexMetadata( - IndexQueryDetails.builder() - .indexName("cv1") - .fullyQualifiedTableName(new FullyQualifiedTableName("mys3.default.http_logs")) - .autoRefresh(false) - .indexQueryActionType(IndexQueryActionType.DROP) - .indexType(FlintIndexType.COVERING) - .build()); + new IndexDetails( + "cv1", + new FullyQualifiedTableName("mys3.default.http_logs"), + false, + true, + FlintIndexType.COVERING)); Assertions.assertEquals("00fdmvv9hp8u0o0q", indexMetadata.getJobId()); } @@ -88,17 +86,34 @@ void testGetJobIDWithNPEException() { IllegalArgumentException.class, () -> flintIndexMetadataReader.getFlintIndexMetadata( - IndexQueryDetails.builder() - .indexName("cv1") - .fullyQualifiedTableName( - new FullyQualifiedTableName("mys3.default.http_logs")) - .autoRefresh(false) - .indexQueryActionType(IndexQueryActionType.DROP) - .indexType(FlintIndexType.COVERING) - .build())); + new IndexDetails( + "cv1", + new FullyQualifiedTableName("mys3.default.http_logs"), + false, + true, + FlintIndexType.COVERING))); Assertions.assertEquals("Provided Index doesn't exist", illegalArgumentException.getMessage()); } + @SneakyThrows + @Test + void testGetJobIdFromUnsupportedIndex() { + FlintIndexMetadataReader flintIndexMetadataReader = new FlintIndexMetadataReaderImpl(client); + UnsupportedOperationException unsupportedOperationException = + Assertions.assertThrows( + UnsupportedOperationException.class, + () -> + flintIndexMetadataReader.getFlintIndexMetadata( + new IndexDetails( + "cv1", + new FullyQualifiedTableName("mys3.default.http_logs"), + false, + true, + FlintIndexType.MATERIALIZED_VIEW))); + Assertions.assertEquals( + "Unsupported Index Type : MATERIALIZED_VIEW", unsupportedOperationException.getMessage()); + } + @SneakyThrows public void mockNodeClientIndicesMappings(String indexName, String mappings) { GetMappingsResponse mockResponse = mock(GetMappingsResponse.class); diff --git a/spark/src/test/java/org/opensearch/sql/spark/flint/FlintIndexStateTest.java b/spark/src/test/java/org/opensearch/sql/spark/flint/FlintIndexStateTest.java deleted file mode 100644 index acd76fa11a..0000000000 --- a/spark/src/test/java/org/opensearch/sql/spark/flint/FlintIndexStateTest.java +++ /dev/null @@ -1,18 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.flint; - -import static org.junit.jupiter.api.Assertions.*; -import static org.opensearch.sql.spark.flint.FlintIndexState.UNKNOWN; - -import org.junit.jupiter.api.Test; - -class FlintIndexStateTest { - @Test - public void unknownState() { - assertEquals(UNKNOWN, FlintIndexState.fromString("noSupported")); - } -} diff --git a/spark/src/test/java/org/opensearch/sql/spark/flint/IndexQueryDetailsTest.java b/spark/src/test/java/org/opensearch/sql/spark/flint/IndexQueryDetailsTest.java deleted file mode 100644 index 6299dee0ca..0000000000 --- a/spark/src/test/java/org/opensearch/sql/spark/flint/IndexQueryDetailsTest.java +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.flint; - -import static org.junit.jupiter.api.Assertions.assertEquals; - -import org.junit.jupiter.api.Test; -import org.opensearch.sql.spark.dispatcher.model.FullyQualifiedTableName; -import org.opensearch.sql.spark.dispatcher.model.IndexQueryActionType; -import org.opensearch.sql.spark.dispatcher.model.IndexQueryDetails; - -public class IndexQueryDetailsTest { - @Test - public void skippingIndexName() { - assertEquals( - "flint_mys3_default_http_logs_skipping_index", - IndexQueryDetails.builder() - .indexName("invalid") - .fullyQualifiedTableName(new FullyQualifiedTableName("mys3.default.http_logs")) - .autoRefresh(false) - .indexQueryActionType(IndexQueryActionType.DROP) - .indexType(FlintIndexType.SKIPPING) - .build() - .openSearchIndexName()); - } - - @Test - public void coveringIndexName() { - assertEquals( - "flint_mys3_default_http_logs_idx_status_index", - IndexQueryDetails.builder() - .indexName("idx_status") - .fullyQualifiedTableName(new FullyQualifiedTableName("mys3.default.http_logs")) - .indexType(FlintIndexType.COVERING) - .build() - .openSearchIndexName()); - } - - @Test - public void materializedViewIndexName() { - assertEquals( - "flint_mys3_default_http_logs_metrics", - IndexQueryDetails.builder() - .mvName("mys3.default.http_logs_metrics") - .indexType(FlintIndexType.MATERIALIZED_VIEW) - .build() - .openSearchIndexName()); - } - - @Test - public void materializedViewIndexNameWithBackticks() { - assertEquals( - "flint_mys3_default_http_logs_metrics", - IndexQueryDetails.builder() - .mvName("`mys3`.`default`.`http_logs_metrics`") - .indexType(FlintIndexType.MATERIALIZED_VIEW) - .build() - .openSearchIndexName()); - } - - @Test - public void materializedViewIndexNameWithDots() { - assertEquals( - "flint_mys3_default_http_logs_metrics.1026", - IndexQueryDetails.builder() - .mvName("`mys3`.`default`.`http_logs_metrics.1026`") - .indexType(FlintIndexType.MATERIALIZED_VIEW) - .build() - .openSearchIndexName()); - } - - @Test - public void materializedViewIndexNameWithDotsInCatalogName() { - // FIXME: should not use ctx.getText which is hard to split - assertEquals( - "flint_mys3_1026_default`.`http_logs_metrics", - IndexQueryDetails.builder() - .mvName("`mys3.1026`.`default`.`http_logs_metrics`") - .indexType(FlintIndexType.MATERIALIZED_VIEW) - .build() - .openSearchIndexName()); - } - - @Test - public void materializedViewIndexNameNotFullyQualified() { - // Normally this should not happen and can add precondition check once confirmed. - assertEquals( - "flint_default_http_logs_metrics", - IndexQueryDetails.builder() - .mvName("default.http_logs_metrics") - .indexType(FlintIndexType.MATERIALIZED_VIEW) - .build() - .openSearchIndexName()); - - assertEquals( - "flint_http_logs_metrics", - IndexQueryDetails.builder() - .mvName("http_logs_metrics") - .indexType(FlintIndexType.MATERIALIZED_VIEW) - .build() - .openSearchIndexName()); - } -} diff --git a/spark/src/test/java/org/opensearch/sql/spark/flint/operation/FlintIndexOpTest.java b/spark/src/test/java/org/opensearch/sql/spark/flint/operation/FlintIndexOpTest.java deleted file mode 100644 index 5b3c1d74db..0000000000 --- a/spark/src/test/java/org/opensearch/sql/spark/flint/operation/FlintIndexOpTest.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.flint.operation; - -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.when; -import static org.opensearch.sql.spark.asyncquery.AsyncQueryExecutorServiceSpec.DATASOURCE; - -import java.util.Optional; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.ExtendWith; -import org.mockito.Mock; -import org.mockito.junit.jupiter.MockitoExtension; -import org.opensearch.sql.spark.execution.statestore.StateStore; -import org.opensearch.sql.spark.flint.FlintIndexMetadata; -import org.opensearch.sql.spark.flint.FlintIndexState; -import org.opensearch.sql.spark.flint.FlintIndexStateModel; - -@ExtendWith(MockitoExtension.class) -class FlintIndexOpTest { - @Mock private StateStore stateStore; - - @Mock private FlintIndexMetadata flintIndexMetadata; - - @Mock private FlintIndexStateModel model; - - @Test - public void beginFailed() { - when(stateStore.updateState(any(), any(), any(), any())).thenThrow(RuntimeException.class); - when(stateStore.get(any(), any(), any())).thenReturn(Optional.of(model)); - when(model.getIndexState()).thenReturn(FlintIndexState.ACTIVE); - when(flintIndexMetadata.getLatestId()).thenReturn(Optional.of("latestId")); - - FlintIndexOpDelete indexOp = new FlintIndexOpDelete(stateStore, DATASOURCE); - IllegalStateException exception = - assertThrows(IllegalStateException.class, () -> indexOp.apply(flintIndexMetadata)); - Assertions.assertEquals( - "begin failed. target transitioning state: [DELETING]", exception.getMessage()); - } - - @Test - public void commitFailed() { - when(stateStore.updateState(any(), any(), any(), any())) - .thenReturn(model) - .thenThrow(RuntimeException.class); - when(stateStore.get(any(), any(), any())).thenReturn(Optional.of(model)); - when(model.getIndexState()).thenReturn(FlintIndexState.EMPTY); - when(flintIndexMetadata.getLatestId()).thenReturn(Optional.of("latestId")); - - FlintIndexOpDelete indexOp = new FlintIndexOpDelete(stateStore, DATASOURCE); - IllegalStateException exception = - assertThrows(IllegalStateException.class, () -> indexOp.apply(flintIndexMetadata)); - Assertions.assertEquals( - "commit failed. target stable state: [DELETED]", exception.getMessage()); - } -} diff --git a/spark/src/test/java/org/opensearch/sql/spark/leasemanager/DefaultLeaseManagerTest.java b/spark/src/test/java/org/opensearch/sql/spark/leasemanager/DefaultLeaseManagerTest.java deleted file mode 100644 index 558f7f7b3a..0000000000 --- a/spark/src/test/java/org/opensearch/sql/spark/leasemanager/DefaultLeaseManagerTest.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.leasemanager; - -import static org.junit.jupiter.api.Assertions.assertTrue; - -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.ExtendWith; -import org.mockito.Mock; -import org.mockito.junit.jupiter.MockitoExtension; -import org.opensearch.sql.common.setting.Settings; -import org.opensearch.sql.spark.dispatcher.model.JobType; -import org.opensearch.sql.spark.execution.statestore.StateStore; -import org.opensearch.sql.spark.leasemanager.model.LeaseRequest; - -@ExtendWith(MockitoExtension.class) -class DefaultLeaseManagerTest { - @Mock private Settings settings; - - @Mock private StateStore stateStore; - - @Test - public void concurrentSessionRuleOnlyApplyToInteractiveQuery() { - assertTrue( - new DefaultLeaseManager.ConcurrentSessionRule(settings, stateStore) - .test(new LeaseRequest(JobType.BATCH, "mys3"))); - assertTrue( - new DefaultLeaseManager.ConcurrentSessionRule(settings, stateStore) - .test(new LeaseRequest(JobType.STREAMING, "mys3"))); - } - - @Test - public void concurrentRefreshRuleOnlyNotAppliedToInteractiveQuery() { - assertTrue( - new DefaultLeaseManager.ConcurrentRefreshJobRule(settings, stateStore) - .test(new LeaseRequest(JobType.INTERACTIVE, "mys3"))); - } -} diff --git a/spark/src/test/java/org/opensearch/sql/spark/response/AsyncQueryExecutionResponseReaderTest.java b/spark/src/test/java/org/opensearch/sql/spark/response/AsyncQueryExecutionResponseReaderTest.java index bbaf6f0f59..fefc951dd7 100644 --- a/spark/src/test/java/org/opensearch/sql/spark/response/AsyncQueryExecutionResponseReaderTest.java +++ b/spark/src/test/java/org/opensearch/sql/spark/response/AsyncQueryExecutionResponseReaderTest.java @@ -10,8 +10,8 @@ import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.when; -import static org.opensearch.sql.datasource.model.DataSourceMetadata.DEFAULT_RESULT_INDEX; import static org.opensearch.sql.spark.constants.TestConstants.EMR_JOB_ID; +import static org.opensearch.sql.spark.data.constants.SparkConstants.SPARK_RESPONSE_BUFFER_INDEX_NAME; import java.util.Map; import org.apache.lucene.search.TotalHits; @@ -79,7 +79,7 @@ public void testInvalidSearchResponse() { () -> jobExecutionResponseReader.getResultFromOpensearchIndex(EMR_JOB_ID, null)); Assertions.assertEquals( "Fetching result from " - + DEFAULT_RESULT_INDEX + + SPARK_RESPONSE_BUFFER_INDEX_NAME + " index failed with status : " + RestStatus.NO_CONTENT, exception.getMessage()); diff --git a/spark/src/test/java/org/opensearch/sql/spark/response/SparkResponseTest.java b/spark/src/test/java/org/opensearch/sql/spark/response/SparkResponseTest.java index bad26a2792..e234454021 100644 --- a/spark/src/test/java/org/opensearch/sql/spark/response/SparkResponseTest.java +++ b/spark/src/test/java/org/opensearch/sql/spark/response/SparkResponseTest.java @@ -9,8 +9,8 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.when; -import static org.opensearch.sql.datasource.model.DataSourceMetadata.DEFAULT_RESULT_INDEX; import static org.opensearch.sql.spark.constants.TestConstants.EMR_CLUSTER_ID; +import static org.opensearch.sql.spark.data.constants.SparkConstants.SPARK_RESPONSE_BUFFER_INDEX_NAME; import java.util.Map; import org.apache.lucene.search.TotalHits; @@ -69,7 +69,7 @@ public void testInvalidSearchResponse() { assertThrows(RuntimeException.class, () -> sparkResponse.getResultFromOpensearchIndex()); Assertions.assertEquals( "Fetching result from " - + DEFAULT_RESULT_INDEX + + SPARK_RESPONSE_BUFFER_INDEX_NAME + " index failed with status : " + RestStatus.NO_CONTENT, exception.getMessage()); diff --git a/spark/src/test/java/org/opensearch/sql/spark/rest/model/CreateAsyncQueryRequestTest.java b/spark/src/test/java/org/opensearch/sql/spark/rest/model/CreateAsyncQueryRequestTest.java deleted file mode 100644 index dd634d6055..0000000000 --- a/spark/src/test/java/org/opensearch/sql/spark/rest/model/CreateAsyncQueryRequestTest.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright OpenSearch Contributors - * SPDX-License-Identifier: Apache-2.0 - */ - -package org.opensearch.sql.spark.rest.model; - -import java.io.IOException; -import org.junit.jupiter.api.Assertions; -import org.junit.jupiter.api.Test; -import org.opensearch.common.xcontent.LoggingDeprecationHandler; -import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.core.xcontent.XContentParser; - -public class CreateAsyncQueryRequestTest { - - @Test - public void fromXContent() throws IOException { - String request = - "{\n" - + " \"datasource\": \"my_glue\",\n" - + " \"lang\": \"sql\",\n" - + " \"query\": \"select 1\"\n" - + "}"; - CreateAsyncQueryRequest queryRequest = - CreateAsyncQueryRequest.fromXContentParser(xContentParser(request)); - Assertions.assertEquals("my_glue", queryRequest.getDatasource()); - Assertions.assertEquals(LangType.SQL, queryRequest.getLang()); - Assertions.assertEquals("select 1", queryRequest.getQuery()); - } - - @Test - public void fromXContentWithSessionId() throws IOException { - String request = - "{\n" - + " \"datasource\": \"my_glue\",\n" - + " \"lang\": \"sql\",\n" - + " \"query\": \"select 1\",\n" - + " \"sessionId\": \"00fdjevgkf12s00q\"\n" - + "}"; - CreateAsyncQueryRequest queryRequest = - CreateAsyncQueryRequest.fromXContentParser(xContentParser(request)); - Assertions.assertEquals("00fdjevgkf12s00q", queryRequest.getSessionId()); - } - - private XContentParser xContentParser(String request) throws IOException { - return XContentType.JSON - .xContent() - .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, request); - } -} diff --git a/spark/src/test/java/org/opensearch/sql/spark/transport/TransportCreateAsyncQueryRequestActionTest.java b/spark/src/test/java/org/opensearch/sql/spark/transport/TransportCreateAsyncQueryRequestActionTest.java index 36060d3850..8599e4b88e 100644 --- a/spark/src/test/java/org/opensearch/sql/spark/transport/TransportCreateAsyncQueryRequestActionTest.java +++ b/spark/src/test/java/org/opensearch/sql/spark/transport/TransportCreateAsyncQueryRequestActionTest.java @@ -11,7 +11,6 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import static org.opensearch.sql.spark.constants.TestConstants.MOCK_SESSION_ID; import java.util.HashSet; import org.junit.jupiter.api.Assertions; @@ -62,7 +61,7 @@ public void testDoExecute() { CreateAsyncQueryActionRequest request = new CreateAsyncQueryActionRequest(createAsyncQueryRequest); when(jobExecutorService.createAsyncQuery(createAsyncQueryRequest)) - .thenReturn(new CreateAsyncQueryResponse("123", null)); + .thenReturn(new CreateAsyncQueryResponse("123")); action.doExecute(task, request, actionListener); Mockito.verify(actionListener).onResponse(createJobActionResponseArgumentCaptor.capture()); CreateAsyncQueryActionResponse createAsyncQueryActionResponse = @@ -71,24 +70,6 @@ public void testDoExecute() { "{\n" + " \"queryId\": \"123\"\n" + "}", createAsyncQueryActionResponse.getResult()); } - @Test - public void testDoExecuteWithSessionId() { - CreateAsyncQueryRequest createAsyncQueryRequest = - new CreateAsyncQueryRequest( - "source = my_glue.default.alb_logs", "my_glue", LangType.SQL, MOCK_SESSION_ID); - CreateAsyncQueryActionRequest request = - new CreateAsyncQueryActionRequest(createAsyncQueryRequest); - when(jobExecutorService.createAsyncQuery(createAsyncQueryRequest)) - .thenReturn(new CreateAsyncQueryResponse("123", MOCK_SESSION_ID)); - action.doExecute(task, request, actionListener); - Mockito.verify(actionListener).onResponse(createJobActionResponseArgumentCaptor.capture()); - CreateAsyncQueryActionResponse createAsyncQueryActionResponse = - createJobActionResponseArgumentCaptor.getValue(); - Assertions.assertEquals( - "{\n" + " \"queryId\": \"123\",\n" + " \"sessionId\": \"s-0123456\"\n" + "}", - createAsyncQueryActionResponse.getResult()); - } - @Test public void testDoExecuteWithException() { CreateAsyncQueryRequest createAsyncQueryRequest = diff --git a/spark/src/test/java/org/opensearch/sql/spark/transport/TransportGetAsyncQueryResultActionTest.java b/spark/src/test/java/org/opensearch/sql/spark/transport/TransportGetAsyncQueryResultActionTest.java index 34f10b0083..21a213c7c2 100644 --- a/spark/src/test/java/org/opensearch/sql/spark/transport/TransportGetAsyncQueryResultActionTest.java +++ b/spark/src/test/java/org/opensearch/sql/spark/transport/TransportGetAsyncQueryResultActionTest.java @@ -63,7 +63,7 @@ public void setUp() { public void testDoExecute() { GetAsyncQueryResultActionRequest request = new GetAsyncQueryResultActionRequest("jobId"); AsyncQueryExecutionResponse asyncQueryExecutionResponse = - new AsyncQueryExecutionResponse("IN_PROGRESS", null, null, null, null); + new AsyncQueryExecutionResponse("IN_PROGRESS", null, null, null); when(jobExecutorService.getAsyncQueryResults("jobId")).thenReturn(asyncQueryExecutionResponse); action.doExecute(task, request, actionListener); verify(actionListener).onResponse(createJobActionResponseArgumentCaptor.capture()); @@ -89,7 +89,6 @@ public void testDoExecuteWithSuccessResponse() { Arrays.asList( tupleValue(ImmutableMap.of("name", "John", "age", 20)), tupleValue(ImmutableMap.of("name", "Smith", "age", 30))), - null, null); when(jobExecutorService.getAsyncQueryResults("jobId")).thenReturn(asyncQueryExecutionResponse); action.doExecute(task, request, actionListener); diff --git a/spark/src/test/java/org/opensearch/sql/spark/utils/SQLQueryUtilsTest.java b/spark/src/test/java/org/opensearch/sql/spark/utils/SQLQueryUtilsTest.java index f5226206ab..af892fa097 100644 --- a/spark/src/test/java/org/opensearch/sql/spark/utils/SQLQueryUtilsTest.java +++ b/spark/src/test/java/org/opensearch/sql/spark/utils/SQLQueryUtilsTest.java @@ -6,7 +6,6 @@ package org.opensearch.sql.spark.utils; import static org.opensearch.sql.spark.utils.SQLQueryUtilsTest.IndexQuery.index; -import static org.opensearch.sql.spark.utils.SQLQueryUtilsTest.IndexQuery.mv; import static org.opensearch.sql.spark.utils.SQLQueryUtilsTest.IndexQuery.skippingIndex; import lombok.Getter; @@ -15,9 +14,7 @@ import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.junit.jupiter.MockitoExtension; import org.opensearch.sql.spark.dispatcher.model.FullyQualifiedTableName; -import org.opensearch.sql.spark.dispatcher.model.IndexQueryActionType; -import org.opensearch.sql.spark.dispatcher.model.IndexQueryDetails; -import org.opensearch.sql.spark.flint.FlintIndexType; +import org.opensearch.sql.spark.dispatcher.model.IndexDetails; @ExtendWith(MockitoExtension.class) public class SQLQueryUtilsTest { @@ -27,13 +24,13 @@ void testExtractionOfTableNameFromSQLQueries() { String sqlQuery = "select * from my_glue.default.http_logs"; FullyQualifiedTableName fullyQualifiedTableName = SQLQueryUtils.extractFullyQualifiedTableName(sqlQuery); - Assertions.assertFalse(SQLQueryUtils.isFlintExtensionQuery(sqlQuery)); + Assertions.assertFalse(SQLQueryUtils.isIndexQuery(sqlQuery)); Assertions.assertEquals("my_glue", fullyQualifiedTableName.getDatasourceName()); Assertions.assertEquals("default", fullyQualifiedTableName.getSchemaName()); Assertions.assertEquals("http_logs", fullyQualifiedTableName.getTableName()); sqlQuery = "select * from my_glue.db.http_logs"; - Assertions.assertFalse(SQLQueryUtils.isFlintExtensionQuery(sqlQuery)); + Assertions.assertFalse(SQLQueryUtils.isIndexQuery(sqlQuery)); fullyQualifiedTableName = SQLQueryUtils.extractFullyQualifiedTableName(sqlQuery); Assertions.assertEquals("my_glue", fullyQualifiedTableName.getDatasourceName()); Assertions.assertEquals("db", fullyQualifiedTableName.getSchemaName()); @@ -41,28 +38,28 @@ void testExtractionOfTableNameFromSQLQueries() { sqlQuery = "select * from my_glue.http_logs"; fullyQualifiedTableName = SQLQueryUtils.extractFullyQualifiedTableName(sqlQuery); - Assertions.assertFalse(SQLQueryUtils.isFlintExtensionQuery(sqlQuery)); + Assertions.assertFalse(SQLQueryUtils.isIndexQuery(sqlQuery)); Assertions.assertEquals("my_glue", fullyQualifiedTableName.getSchemaName()); Assertions.assertNull(fullyQualifiedTableName.getDatasourceName()); Assertions.assertEquals("http_logs", fullyQualifiedTableName.getTableName()); sqlQuery = "select * from http_logs"; fullyQualifiedTableName = SQLQueryUtils.extractFullyQualifiedTableName(sqlQuery); - Assertions.assertFalse(SQLQueryUtils.isFlintExtensionQuery(sqlQuery)); + Assertions.assertFalse(SQLQueryUtils.isIndexQuery(sqlQuery)); Assertions.assertNull(fullyQualifiedTableName.getDatasourceName()); Assertions.assertNull(fullyQualifiedTableName.getSchemaName()); Assertions.assertEquals("http_logs", fullyQualifiedTableName.getTableName()); sqlQuery = "DROP TABLE myS3.default.alb_logs"; fullyQualifiedTableName = SQLQueryUtils.extractFullyQualifiedTableName(sqlQuery); - Assertions.assertFalse(SQLQueryUtils.isFlintExtensionQuery(sqlQuery)); + Assertions.assertFalse(SQLQueryUtils.isIndexQuery(sqlQuery)); Assertions.assertEquals("myS3", fullyQualifiedTableName.getDatasourceName()); Assertions.assertEquals("default", fullyQualifiedTableName.getSchemaName()); Assertions.assertEquals("alb_logs", fullyQualifiedTableName.getTableName()); sqlQuery = "DESCRIBE TABLE myS3.default.alb_logs"; fullyQualifiedTableName = SQLQueryUtils.extractFullyQualifiedTableName(sqlQuery); - Assertions.assertFalse(SQLQueryUtils.isFlintExtensionQuery(sqlQuery)); + Assertions.assertFalse(SQLQueryUtils.isIndexQuery(sqlQuery)); Assertions.assertEquals("myS3", fullyQualifiedTableName.getDatasourceName()); Assertions.assertEquals("default", fullyQualifiedTableName.getSchemaName()); Assertions.assertEquals("alb_logs", fullyQualifiedTableName.getTableName()); @@ -75,7 +72,7 @@ void testExtractionOfTableNameFromSQLQueries() { + "STORED AS file_format\n" + "LOCATION { 's3://bucket/folder/' }"; fullyQualifiedTableName = SQLQueryUtils.extractFullyQualifiedTableName(sqlQuery); - Assertions.assertFalse(SQLQueryUtils.isFlintExtensionQuery(sqlQuery)); + Assertions.assertFalse(SQLQueryUtils.isIndexQuery(sqlQuery)); Assertions.assertEquals("myS3", fullyQualifiedTableName.getDatasourceName()); Assertions.assertEquals("default", fullyQualifiedTableName.getSchemaName()); Assertions.assertEquals("alb_logs", fullyQualifiedTableName.getTableName()); @@ -94,7 +91,7 @@ void testErrorScenarios() { sqlQuery = "DESCRIBE TABLE FROM myS3.default.alb_logs"; fullyQualifiedTableName = SQLQueryUtils.extractFullyQualifiedTableName(sqlQuery); - Assertions.assertFalse(SQLQueryUtils.isFlintExtensionQuery(sqlQuery)); + Assertions.assertFalse(SQLQueryUtils.isIndexQuery(sqlQuery)); Assertions.assertEquals("FROM", fullyQualifiedTableName.getFullyQualifiedName()); Assertions.assertNull(fullyQualifiedTableName.getSchemaName()); Assertions.assertEquals("FROM", fullyQualifiedTableName.getTableName()); @@ -102,207 +99,63 @@ void testErrorScenarios() { } @Test - void testExtractionFromFlintSkippingIndexQueries() { - String[] createSkippingIndexQueries = { - "CREATE SKIPPING INDEX ON myS3.default.alb_logs (l_orderkey VALUE_SET)", - "CREATE SKIPPING INDEX IF NOT EXISTS" - + " ON myS3.default.alb_logs (l_orderkey VALUE_SET) " - + " WITH (auto_refresh = true)", - "CREATE SKIPPING INDEX ON myS3.default.alb_logs(l_orderkey VALUE_SET)" - + " WITH (auto_refresh = true)", - "CREATE SKIPPING INDEX ON myS3.default.alb_logs(l_orderkey VALUE_SET) " - + " WHERE elb_status_code = 500 " - + " WITH (auto_refresh = true)" - }; - - for (String query : createSkippingIndexQueries) { - Assertions.assertTrue(SQLQueryUtils.isFlintExtensionQuery(query), "Failed query: " + query); - IndexQueryDetails indexQueryDetails = SQLQueryUtils.extractIndexDetails(query); - FullyQualifiedTableName fullyQualifiedTableName = - indexQueryDetails.getFullyQualifiedTableName(); - - Assertions.assertNull(indexQueryDetails.getIndexName()); - Assertions.assertEquals("myS3", fullyQualifiedTableName.getDatasourceName()); - Assertions.assertEquals("default", fullyQualifiedTableName.getSchemaName()); - Assertions.assertEquals("alb_logs", fullyQualifiedTableName.getTableName()); - } - } - - @Test - void testExtractionFromFlintCoveringIndexQueries() { - String[] createCoveredIndexQueries = { - "CREATE INDEX elb_and_requestUri ON myS3.default.alb_logs(l_orderkey, l_quantity)", - "CREATE INDEX IF NOT EXISTS elb_and_requestUri " - + " ON myS3.default.alb_logs(l_orderkey, l_quantity) " - + " WITH (auto_refresh = true)", - "CREATE INDEX elb_and_requestUri ON myS3.default.alb_logs(l_orderkey, l_quantity)" - + " WITH (auto_refresh = true)", - "CREATE INDEX elb_and_requestUri ON myS3.default.alb_logs(l_orderkey, l_quantity) " - + " WHERE elb_status_code = 500 " - + " WITH (auto_refresh = true)" - }; - - for (String query : createCoveredIndexQueries) { - Assertions.assertTrue(SQLQueryUtils.isFlintExtensionQuery(query), "Failed query: " + query); - IndexQueryDetails indexQueryDetails = SQLQueryUtils.extractIndexDetails(query); - FullyQualifiedTableName fullyQualifiedTableName = - indexQueryDetails.getFullyQualifiedTableName(); - - Assertions.assertEquals("elb_and_requestUri", indexQueryDetails.getIndexName()); - Assertions.assertEquals("myS3", fullyQualifiedTableName.getDatasourceName()); - Assertions.assertEquals("default", fullyQualifiedTableName.getSchemaName()); - Assertions.assertEquals("alb_logs", fullyQualifiedTableName.getTableName()); - } - } - - @Test - void testExtractionFromFlintMVQuery() { + void testExtractionFromFlintIndexQueries() { String createCoveredIndexQuery = - "CREATE MATERIALIZED VIEW mv_1 AS query=select * from my_glue.default.logs WITH" + "CREATE INDEX elb_and_requestUri ON myS3.default.alb_logs(l_orderkey, l_quantity) WITH" + " (auto_refresh = true)"; - Assertions.assertTrue(SQLQueryUtils.isFlintExtensionQuery(createCoveredIndexQuery)); - IndexQueryDetails indexQueryDetails = - SQLQueryUtils.extractIndexDetails(createCoveredIndexQuery); - FullyQualifiedTableName fullyQualifiedTableName = - indexQueryDetails.getFullyQualifiedTableName(); - Assertions.assertNull(indexQueryDetails.getIndexName()); - Assertions.assertNull(fullyQualifiedTableName); - Assertions.assertEquals("mv_1", indexQueryDetails.getMvName()); - } - - @Test - void testDescIndex() { - String descSkippingIndex = "DESC SKIPPING INDEX ON mys3.default.http_logs"; - Assertions.assertTrue(SQLQueryUtils.isFlintExtensionQuery(descSkippingIndex)); - IndexQueryDetails indexDetails = SQLQueryUtils.extractIndexDetails(descSkippingIndex); - FullyQualifiedTableName fullyQualifiedTableName = indexDetails.getFullyQualifiedTableName(); - Assertions.assertNull(indexDetails.getIndexName()); - Assertions.assertNotNull(fullyQualifiedTableName); - Assertions.assertEquals(FlintIndexType.SKIPPING, indexDetails.getIndexType()); - Assertions.assertEquals(IndexQueryActionType.DESCRIBE, indexDetails.getIndexQueryActionType()); - - String descCoveringIndex = "DESC INDEX cv1 ON mys3.default.http_logs"; - Assertions.assertTrue(SQLQueryUtils.isFlintExtensionQuery(descCoveringIndex)); - indexDetails = SQLQueryUtils.extractIndexDetails(descCoveringIndex); - fullyQualifiedTableName = indexDetails.getFullyQualifiedTableName(); - Assertions.assertEquals("cv1", indexDetails.getIndexName()); - Assertions.assertNotNull(fullyQualifiedTableName); - Assertions.assertEquals(FlintIndexType.COVERING, indexDetails.getIndexType()); - Assertions.assertEquals(IndexQueryActionType.DESCRIBE, indexDetails.getIndexQueryActionType()); - - String descMv = "DESC MATERIALIZED VIEW mv1"; - Assertions.assertTrue(SQLQueryUtils.isFlintExtensionQuery(descMv)); - indexDetails = SQLQueryUtils.extractIndexDetails(descMv); - fullyQualifiedTableName = indexDetails.getFullyQualifiedTableName(); - Assertions.assertNull(indexDetails.getIndexName()); - Assertions.assertEquals("mv1", indexDetails.getMvName()); - Assertions.assertNull(fullyQualifiedTableName); - Assertions.assertEquals(FlintIndexType.MATERIALIZED_VIEW, indexDetails.getIndexType()); - Assertions.assertEquals(IndexQueryActionType.DESCRIBE, indexDetails.getIndexQueryActionType()); - } - - @Test - void testShowIndex() { - String showCoveringIndex = " SHOW INDEX ON myS3.default.http_logs"; - Assertions.assertTrue(SQLQueryUtils.isFlintExtensionQuery(showCoveringIndex)); - IndexQueryDetails indexDetails = SQLQueryUtils.extractIndexDetails(showCoveringIndex); + Assertions.assertTrue(SQLQueryUtils.isIndexQuery(createCoveredIndexQuery)); + IndexDetails indexDetails = SQLQueryUtils.extractIndexDetails(createCoveredIndexQuery); FullyQualifiedTableName fullyQualifiedTableName = indexDetails.getFullyQualifiedTableName(); - Assertions.assertNull(indexDetails.getIndexName()); - Assertions.assertNull(indexDetails.getMvName()); - Assertions.assertNotNull(fullyQualifiedTableName); - Assertions.assertEquals(FlintIndexType.COVERING, indexDetails.getIndexType()); - Assertions.assertEquals(IndexQueryActionType.SHOW, indexDetails.getIndexQueryActionType()); - - String showMV = "SHOW MATERIALIZED VIEW IN my_glue.default"; - Assertions.assertTrue(SQLQueryUtils.isFlintExtensionQuery(showMV)); - indexDetails = SQLQueryUtils.extractIndexDetails(showMV); - fullyQualifiedTableName = indexDetails.getFullyQualifiedTableName(); - Assertions.assertNull(indexDetails.getIndexName()); - Assertions.assertNull(indexDetails.getMvName()); - Assertions.assertNull(fullyQualifiedTableName); - Assertions.assertEquals(FlintIndexType.MATERIALIZED_VIEW, indexDetails.getIndexType()); - Assertions.assertEquals(IndexQueryActionType.SHOW, indexDetails.getIndexQueryActionType()); - } - - @Test - void testRefreshIndex() { - String refreshSkippingIndex = "REFRESH SKIPPING INDEX ON mys3.default.http_logs"; - Assertions.assertTrue(SQLQueryUtils.isFlintExtensionQuery(refreshSkippingIndex)); - IndexQueryDetails indexDetails = SQLQueryUtils.extractIndexDetails(refreshSkippingIndex); - FullyQualifiedTableName fullyQualifiedTableName = indexDetails.getFullyQualifiedTableName(); - Assertions.assertNull(indexDetails.getIndexName()); - Assertions.assertNotNull(fullyQualifiedTableName); - Assertions.assertEquals(FlintIndexType.SKIPPING, indexDetails.getIndexType()); - Assertions.assertEquals(IndexQueryActionType.REFRESH, indexDetails.getIndexQueryActionType()); - - String refreshCoveringIndex = "REFRESH INDEX cv1 ON mys3.default.http_logs"; - Assertions.assertTrue(SQLQueryUtils.isFlintExtensionQuery(refreshCoveringIndex)); - indexDetails = SQLQueryUtils.extractIndexDetails(refreshCoveringIndex); - fullyQualifiedTableName = indexDetails.getFullyQualifiedTableName(); - Assertions.assertEquals("cv1", indexDetails.getIndexName()); - Assertions.assertNotNull(fullyQualifiedTableName); - Assertions.assertEquals(FlintIndexType.COVERING, indexDetails.getIndexType()); - Assertions.assertEquals(IndexQueryActionType.REFRESH, indexDetails.getIndexQueryActionType()); - - String refreshMV = "REFRESH MATERIALIZED VIEW mv1"; - Assertions.assertTrue(SQLQueryUtils.isFlintExtensionQuery(refreshMV)); - indexDetails = SQLQueryUtils.extractIndexDetails(refreshMV); - fullyQualifiedTableName = indexDetails.getFullyQualifiedTableName(); - Assertions.assertNull(indexDetails.getIndexName()); - Assertions.assertEquals("mv1", indexDetails.getMvName()); - Assertions.assertNull(fullyQualifiedTableName); - Assertions.assertEquals(FlintIndexType.MATERIALIZED_VIEW, indexDetails.getIndexType()); - Assertions.assertEquals(IndexQueryActionType.REFRESH, indexDetails.getIndexQueryActionType()); + Assertions.assertEquals("elb_and_requestUri", indexDetails.getIndexName()); + Assertions.assertEquals("myS3", fullyQualifiedTableName.getDatasourceName()); + Assertions.assertEquals("default", fullyQualifiedTableName.getSchemaName()); + Assertions.assertEquals("alb_logs", fullyQualifiedTableName.getTableName()); } /** https://github.com/opensearch-project/sql/issues/2206 */ @Test void testAutoRefresh() { Assertions.assertFalse( - SQLQueryUtils.extractIndexDetails(skippingIndex().getQuery()).isAutoRefresh()); + SQLQueryUtils.extractIndexDetails(skippingIndex().getQuery()).getAutoRefresh()); Assertions.assertFalse( SQLQueryUtils.extractIndexDetails( skippingIndex().withProperty("auto_refresh", "false").getQuery()) - .isAutoRefresh()); + .getAutoRefresh()); Assertions.assertTrue( SQLQueryUtils.extractIndexDetails( skippingIndex().withProperty("auto_refresh", "true").getQuery()) - .isAutoRefresh()); + .getAutoRefresh()); Assertions.assertTrue( SQLQueryUtils.extractIndexDetails( skippingIndex().withProperty("\"auto_refresh\"", "true").getQuery()) - .isAutoRefresh()); + .getAutoRefresh()); Assertions.assertTrue( SQLQueryUtils.extractIndexDetails( skippingIndex().withProperty("\"auto_refresh\"", "\"true\"").getQuery()) - .isAutoRefresh()); + .getAutoRefresh()); Assertions.assertFalse( SQLQueryUtils.extractIndexDetails( skippingIndex().withProperty("auto_refresh", "1").getQuery()) - .isAutoRefresh()); + .getAutoRefresh()); Assertions.assertFalse( SQLQueryUtils.extractIndexDetails(skippingIndex().withProperty("interval", "1").getQuery()) - .isAutoRefresh()); + .getAutoRefresh()); - Assertions.assertFalse(SQLQueryUtils.extractIndexDetails(index().getQuery()).isAutoRefresh()); + Assertions.assertFalse(SQLQueryUtils.extractIndexDetails(index().getQuery()).getAutoRefresh()); Assertions.assertFalse( SQLQueryUtils.extractIndexDetails(index().withProperty("auto_refresh", "false").getQuery()) - .isAutoRefresh()); + .getAutoRefresh()); Assertions.assertTrue( SQLQueryUtils.extractIndexDetails(index().withProperty("auto_refresh", "true").getQuery()) - .isAutoRefresh()); - - Assertions.assertTrue( - SQLQueryUtils.extractIndexDetails(mv().withProperty("auto_refresh", "true").getQuery()) - .isAutoRefresh()); + .getAutoRefresh()); } @Getter @@ -323,11 +176,6 @@ public static IndexQuery index() { "CREATE INDEX elb_and_requestUri ON myS3.default.alb_logs(l_orderkey, " + "l_quantity)"); } - public static IndexQuery mv() { - return new IndexQuery( - "CREATE MATERIALIZED VIEW mv_1 AS query=select * from my_glue.default.logs"); - } - public IndexQuery withProperty(String key, String value) { query = String.format("%s with (%s = %s)", query, key, value); return this; diff --git a/spark/src/test/resources/flint-index-mappings/0.1.1/flint_covering_index.json b/spark/src/test/resources/flint-index-mappings/0.1.1/flint_covering_index.json deleted file mode 100644 index 54ed5e05e1..0000000000 --- a/spark/src/test/resources/flint-index-mappings/0.1.1/flint_covering_index.json +++ /dev/null @@ -1,37 +0,0 @@ -{ - "_meta": { - "kind": "covering", - "indexedColumns": [ - { - "columnType": "timestamp", - "columnName": "time" - }, - { - "columnType": "string", - "columnName": "client_ip" - }, - { - "columnType": "int", - "columnName": "client_port" - }, - { - "columnType": "string", - "columnName": "request_url" - } - ], - "name": "test", - "options": { - "auto_refresh": "true", - "index_settings": "{\"number_of_shards\":1,\"number_of_replicas\":1}" - }, - "source": "mys3.default.http_logs", - "version": "0.1.0", - "properties": { - "env": { - "SERVERLESS_EMR_VIRTUAL_CLUSTER_ID": "00fd777k3k3ls20p", - "SERVERLESS_EMR_JOB_ID": "00fe3gu2tgad000q" - } - }, - "latestId": "coveringid" - } -} diff --git a/spark/src/test/resources/flint-index-mappings/0.1.1/flint_mv.json b/spark/src/test/resources/flint-index-mappings/0.1.1/flint_mv.json deleted file mode 100644 index 1a9c74806a..0000000000 --- a/spark/src/test/resources/flint-index-mappings/0.1.1/flint_mv.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "_meta": { - "kind": "mv", - "indexedColumns": [ - { - "columnType": "timestamp", - "columnName": "start.time" - }, - { - "columnType": "long", - "columnName": "count" - } - ], - "name": "spark_catalog.default.http_logs_metrics_chen", - "options": { - "auto_refresh": "true", - "checkpoint_location": "s3://flint-data-dp-eu-west-1-beta/data/checkpoint/chen-job-1", - "watermark_delay": "30 Minutes" - }, - "source": "SELECT window.start AS `start.time`, COUNT(*) AS count FROM mys3.default.http_logs WHERE status != 200 GROUP BY TUMBLE(`@timestamp`, '6 Hours')", - "version": "0.1.0", - "properties": { - "env": { - "SERVERLESS_EMR_VIRTUAL_CLUSTER_ID": "00fd777k3k3ls20p", - "SERVERLESS_EMR_JOB_ID": "00fe86mkk5q3u00q" - } - }, - "latestId": "mvid" - } -} diff --git a/spark/src/test/resources/flint-index-mappings/0.1.1/flint_skipping_index.json b/spark/src/test/resources/flint-index-mappings/0.1.1/flint_skipping_index.json deleted file mode 100644 index 5e7c9175fd..0000000000 --- a/spark/src/test/resources/flint-index-mappings/0.1.1/flint_skipping_index.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "_meta": { - "kind": "skipping", - "indexedColumns": [ - { - "columnType": "int", - "kind": "VALUE_SET", - "columnName": "status" - } - ], - "name": "flint_mys3_default_http_logs_skipping_index", - "options": {}, - "source": "mys3.default.http_logs", - "version": "0.1.0", - "properties": { - "env": { - "SERVERLESS_EMR_VIRTUAL_CLUSTER_ID": "00fd777k3k3ls20p", - "SERVERLESS_EMR_JOB_ID": "00fdmvv9hp8u0o0q" - } - }, - "latestId": "skippingindexid" - } -} diff --git a/spark/src/test/resources/flint-index-mappings/flint_covering_index.json b/spark/src/test/resources/flint-index-mappings/flint_covering_index.json deleted file mode 100644 index f68a1627ab..0000000000 --- a/spark/src/test/resources/flint-index-mappings/flint_covering_index.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "_meta": { - "kind": "covering", - "indexedColumns": [ - { - "columnType": "timestamp", - "columnName": "time" - }, - { - "columnType": "string", - "columnName": "client_ip" - }, - { - "columnType": "int", - "columnName": "client_port" - }, - { - "columnType": "string", - "columnName": "request_url" - } - ], - "name": "test", - "options": { - "auto_refresh": "true", - "index_settings": "{\"number_of_shards\":1,\"number_of_replicas\":1}" - }, - "source": "mys3.default.http_logs", - "version": "0.1.0", - "properties": { - "env": { - "SERVERLESS_EMR_VIRTUAL_CLUSTER_ID": "00fd777k3k3ls20p", - "SERVERLESS_EMR_JOB_ID": "00fe3gu2tgad000q" - } - } - } -} diff --git a/spark/src/test/resources/flint-index-mappings/flint_mv.json b/spark/src/test/resources/flint-index-mappings/flint_mv.json deleted file mode 100644 index 3d130832b8..0000000000 --- a/spark/src/test/resources/flint-index-mappings/flint_mv.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "_meta": { - "kind": "mv", - "indexedColumns": [ - { - "columnType": "timestamp", - "columnName": "start.time" - }, - { - "columnType": "long", - "columnName": "count" - } - ], - "name": "spark_catalog.default.http_logs_metrics_chen", - "options": { - "auto_refresh": "true", - "checkpoint_location": "s3://flint-data-dp-eu-west-1-beta/data/checkpoint/chen-job-1", - "watermark_delay": "30 Minutes" - }, - "source": "SELECT window.start AS `start.time`, COUNT(*) AS count FROM mys3.default.http_logs WHERE status != 200 GROUP BY TUMBLE(`@timestamp`, '6 Hours')", - "version": "0.1.0", - "properties": { - "env": { - "SERVERLESS_EMR_VIRTUAL_CLUSTER_ID": "00fd777k3k3ls20p", - "SERVERLESS_EMR_JOB_ID": "00fe86mkk5q3u00q" - } - } - }, - "properties": { - "count": { - "type": "long" - }, - "start": { - "properties": { - "time": { - "type": "date", - "format": "strict_date_optional_time_nanos" - } - } - } - } -} diff --git a/spark/src/test/resources/flint-index-mappings/flint_skipping_index.json b/spark/src/test/resources/flint-index-mappings/flint_skipping_index.json deleted file mode 100644 index e4bf849f20..0000000000 --- a/spark/src/test/resources/flint-index-mappings/flint_skipping_index.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "_meta": { - "kind": "skipping", - "indexedColumns": [ - { - "columnType": "int", - "kind": "VALUE_SET", - "columnName": "status" - } - ], - "name": "flint_mys3_default_http_logs_skipping_index", - "options": {}, - "source": "mys3.default.http_logs", - "version": "0.1.0", - "properties": { - "env": { - "SERVERLESS_EMR_VIRTUAL_CLUSTER_ID": "00fd777k3k3ls20p", - "SERVERLESS_EMR_JOB_ID": "00fdmvv9hp8u0o0q" - } - } - } -} diff --git a/spark/src/test/resources/query_execution_result_mapping.json b/spark/src/test/resources/query_execution_result_mapping.json deleted file mode 100644 index a76ef77383..0000000000 --- a/spark/src/test/resources/query_execution_result_mapping.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "dynamic": "false", - "properties": { - "applicationId": { - "type": "keyword" - }, - "dataSourceName": { - "type": "keyword" - }, - "error": { - "type": "text" - }, - "jobRunId": { - "type": "keyword" - }, - "queryId": { - "type": "keyword" - }, - "queryRunTime": { - "type": "long" - }, - "queryText": { - "type": "text" - }, - "result": { - "type": "object", - "enabled": false - }, - "schema": { - "type": "object", - "enabled": false - }, - "sessionId": { - "type": "keyword" - }, - "status": { - "type": "keyword" - }, - "updateTime": { - "type": "date", - "format": "strict_date_time||epoch_millis" - } - } -} diff --git a/sql/build.gradle b/sql/build.gradle index a9e1787c27..44dc37cf0f 100644 --- a/sql/build.gradle +++ b/sql/build.gradle @@ -46,7 +46,7 @@ dependencies { implementation "org.antlr:antlr4-runtime:4.7.1" implementation group: 'com.google.guava', name: 'guava', version: '32.0.1-jre' - implementation group: 'org.json', name: 'json', version:'20231013' + implementation group: 'org.json', name: 'json', version:'20230227' implementation project(':common') implementation project(':core') api project(':protocol')