diff --git a/.github/benchmark-configs.json b/.github/benchmark-configs.json index 4ada715d21495..27b7228e1203a 100644 --- a/.github/benchmark-configs.json +++ b/.github/benchmark-configs.json @@ -75,7 +75,7 @@ "SINGLE_NODE_CLUSTER": "true", "MIN_DISTRIBUTION": "true", "TEST_WORKLOAD": "big5", - "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-300\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-300\",\"snapshot_name\":\"big5_1_shard\"}", + "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-300\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-300\",\"snapshot_name\":\"big5_1_shard_ordered\"}", "CAPTURE_NODE_STAT": "true", "TEST_PROCEDURE": "restore-from-snapshot" }, @@ -126,7 +126,7 @@ "SINGLE_NODE_CLUSTER": "true", "MIN_DISTRIBUTION": "true", "TEST_WORKLOAD": "big5", - "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots\",\"snapshot_name\":\"big5_1_shard\"}", + "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots\",\"snapshot_name\":\"big5_1_shard_ordered\"}", "CAPTURE_NODE_STAT": "true", "TEST_PROCEDURE": "restore-from-snapshot" }, @@ -176,7 +176,7 @@ "MIN_DISTRIBUTION": "true", "TEST_WORKLOAD": "big5", "ADDITIONAL_CONFIG": "search.concurrent_segment_search.enabled:true", - "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-300\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-300\",\"snapshot_name\":\"big5_1_shard\"}", + "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-300\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-300\",\"snapshot_name\":\"big5_1_shard_ordered\"}", "CAPTURE_NODE_STAT": "true", "TEST_PROCEDURE": "restore-from-snapshot" }, @@ -194,7 +194,7 @@ "MIN_DISTRIBUTION": "true", "TEST_WORKLOAD": "big5", "ADDITIONAL_CONFIG": "search.concurrent_segment_search.mode:all", - "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-300\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-300\",\"snapshot_name\":\"big5_1_shard\"}", + "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-300\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-300\",\"snapshot_name\":\"big5_1_shard_ordered\"}", "CAPTURE_NODE_STAT": "true", "TEST_PROCEDURE": "restore-from-snapshot" }, @@ -212,7 +212,7 @@ "MIN_DISTRIBUTION": "true", "TEST_WORKLOAD": "big5", "ADDITIONAL_CONFIG": "search.concurrent_segment_search.mode:auto", - "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-300\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-300\",\"snapshot_name\":\"big5_1_shard\"}", + "WORKLOAD_PARAMS": "{\"snapshot_repo_name\":\"benchmark-workloads-repo-300\",\"snapshot_bucket_name\":\"benchmark-workload-snapshots\",\"snapshot_region\":\"us-east-1\",\"snapshot_base_path\":\"workload-snapshots-300\",\"snapshot_name\":\"big5_1_shard_ordered\"}", "CAPTURE_NODE_STAT": "true", "TEST_PROCEDURE": "restore-from-snapshot" }, diff --git a/.github/workflows/delete_backport_branch.yml b/.github/workflows/delete_backport_branch.yml index b60c2225db26a..91626a0bc5092 100644 --- a/.github/workflows/delete_backport_branch.yml +++ b/.github/workflows/delete_backport_branch.yml @@ -11,7 +11,7 @@ jobs: contents: write if: startsWith(github.event.pull_request.head.ref,'backport/') steps: - - name: Delete merged branch + - name: Delete merged branch uses: actions/github-script@v5 with: script: | diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index 1b9b30625eb83..1421eeb7f7576 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -8,6 +8,10 @@ on: pull_request_target: types: [opened, synchronize, reopened] +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number }} + cancel-in-progress: true + permissions: contents: read # to fetch code (actions/checkout) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4ed85b630bbd4..32fdfab619b3e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,10 +5,13 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 2.x] ### Added +- Add support for async deletion in S3BlobContainer ([#15621](https://github.com/opensearch-project/OpenSearch/pull/15621)) - MultiTermQueries in keyword fields now default to `indexed` approach and gated behind cluster setting ([#15637](https://github.com/opensearch-project/OpenSearch/pull/15637)) - [Workload Management] QueryGroup resource cancellation framework changes ([#15651](https://github.com/opensearch-project/OpenSearch/pull/15651)) - Fallback to Remote cluster-state on Term-Version check mismatch - ([#15424](https://github.com/opensearch-project/OpenSearch/pull/15424)) - Implement WithFieldName interface in ValuesSourceAggregationBuilder & FieldSortBuilder ([#15916](https://github.com/opensearch-project/OpenSearch/pull/15916)) +- Add successfulSearchShardIndices in searchRequestContext ([#15967](https://github.com/opensearch-project/OpenSearch/pull/15967)) +- Remove identity-related feature flagged code from the RestController ([#15430](https://github.com/opensearch-project/OpenSearch/pull/15430)) ### Dependencies - Bump `com.azure:azure-identity` from 1.13.0 to 1.13.2 ([#15578](https://github.com/opensearch-project/OpenSearch/pull/15578)) @@ -18,6 +21,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.nimbusds:oauth2-oidc-sdk` from 11.9.1 to 11.19.1 ([#15862](https://github.com/opensearch-project/OpenSearch/pull/15862)) - Bump `com.microsoft.azure:msal4j` from 1.17.0 to 1.17.1 ([#15945](https://github.com/opensearch-project/OpenSearch/pull/15945)) - Bump `ch.qos.logback:logback-core` from 1.5.6 to 1.5.8 ([#15946](https://github.com/opensearch-project/OpenSearch/pull/15946)) +- Update protobuf from 3.25.4 to 3.25.5 ([#16011](https://github.com/opensearch-project/OpenSearch/pull/16011)) ### Changed @@ -30,6 +34,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix wildcard query containing escaped character ([#15737](https://github.com/opensearch-project/OpenSearch/pull/15737)) - Fix case-insensitive query on wildcard field ([#15882](https://github.com/opensearch-project/OpenSearch/pull/15882)) - Add validation for the search backpressure cancellation settings ([#15501](https://github.com/opensearch-project/OpenSearch/pull/15501)) +- Fix infinite loop in nested agg ([#15931](https://github.com/opensearch-project/OpenSearch/pull/15931)) ### Security diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 0c6d0536dd1d5..63817289e80c0 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -20,7 +20,7 @@ woodstox = 6.4.0 kotlin = 1.7.10 antlr4 = 4.13.1 guava = 32.1.1-jre -protobuf = 3.25.4 +protobuf = 3.25.5 jakarta_annotation = 1.3.5 google_http_client = 1.44.1 tdigest = 3.3 diff --git a/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroIdentityPlugin.java b/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroIdentityPlugin.java index ee7b0c2b600e6..1b1290527d4a2 100644 --- a/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroIdentityPlugin.java +++ b/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroIdentityPlugin.java @@ -13,21 +13,29 @@ import org.apache.shiro.SecurityUtils; import org.apache.shiro.mgt.SecurityManager; import org.opensearch.client.Client; +import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; +import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; import org.opensearch.identity.PluginSubject; import org.opensearch.identity.Subject; +import org.opensearch.identity.tokens.AuthToken; import org.opensearch.identity.tokens.TokenManager; +import org.opensearch.plugins.ActionPlugin; import org.opensearch.plugins.IdentityPlugin; import org.opensearch.plugins.Plugin; import org.opensearch.plugins.PluginInfo; import org.opensearch.repositories.RepositoriesService; +import org.opensearch.rest.BytesRestResponse; +import org.opensearch.rest.RestChannel; +import org.opensearch.rest.RestHandler; +import org.opensearch.rest.RestRequest; import org.opensearch.script.ScriptService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.watcher.ResourceWatcherService; @@ -35,14 +43,12 @@ import java.util.Collection; import java.util.Collections; import java.util.function.Supplier; +import java.util.function.UnaryOperator; /** * Identity implementation with Shiro - * - * @opensearch.experimental */ -@ExperimentalApi -public final class ShiroIdentityPlugin extends Plugin implements IdentityPlugin { +public final class ShiroIdentityPlugin extends Plugin implements IdentityPlugin, ActionPlugin { private Logger log = LogManager.getLogger(this.getClass()); private final Settings settings; @@ -101,6 +107,38 @@ public TokenManager getTokenManager() { return this.authTokenHandler; } + @Override + public UnaryOperator getRestHandlerWrapper(ThreadContext threadContext) { + return AuthcRestHandler::new; + } + + class AuthcRestHandler extends RestHandler.Wrapper { + + public AuthcRestHandler(RestHandler original) { + super(original); + } + + @Override + public void handleRequest(RestRequest request, RestChannel channel, NodeClient client) throws Exception { + try { + final AuthToken token = ShiroTokenExtractor.extractToken(request); + // If no token was found, continue executing the request + if (token == null) { + // Authentication did not fail so return true. Authorization is handled at the action level. + super.handleRequest(request, channel, client); + return; + } + ShiroSubject shiroSubject = (ShiroSubject) getCurrentSubject(); + shiroSubject.authenticate(token); + // Caller was authorized, forward the request to the handler + super.handleRequest(request, channel, client); + } catch (final Exception e) { + final BytesRestResponse bytesRestResponse = new BytesRestResponse(RestStatus.UNAUTHORIZED, e.getMessage()); + channel.sendResponse(bytesRestResponse); + } + } + } + @Override public PluginSubject getPluginSubject(PluginInfo pluginInfo) { return new ShiroPluginSubject(threadPool); diff --git a/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroSecurityManager.java b/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroSecurityManager.java index 96cf05ac53a1a..0a809dd6c9071 100644 --- a/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroSecurityManager.java +++ b/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroSecurityManager.java @@ -15,8 +15,6 @@ /** * OpenSearch specific security manager implementation - * - * @opensearch.experimental */ public class ShiroSecurityManager extends DefaultSecurityManager { diff --git a/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroSubject.java b/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroSubject.java index 72a168f23c5cd..73ce3f835fc9b 100644 --- a/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroSubject.java +++ b/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroSubject.java @@ -17,8 +17,6 @@ /** * Subject backed by Shiro - * - * @opensearch.experimental */ public class ShiroSubject implements UserSubject { private final ShiroTokenManager authTokenHandler; diff --git a/server/src/main/java/org/opensearch/identity/tokens/RestTokenExtractor.java b/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroTokenExtractor.java similarity index 86% rename from server/src/main/java/org/opensearch/identity/tokens/RestTokenExtractor.java rename to plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroTokenExtractor.java index 4bd3ebdded588..86be5ca664daa 100644 --- a/server/src/main/java/org/opensearch/identity/tokens/RestTokenExtractor.java +++ b/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroTokenExtractor.java @@ -5,11 +5,13 @@ * this file be licensed under the Apache-2.0 license or a * compatible open source license. */ -package org.opensearch.identity.tokens; +package org.opensearch.identity.shiro; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.core.common.Strings; +import org.opensearch.identity.tokens.AuthToken; +import org.opensearch.identity.tokens.BasicAuthToken; import org.opensearch.rest.RestRequest; import java.util.Collections; @@ -18,9 +20,9 @@ /** * Extracts tokens from RestRequests used for authentication */ -public class RestTokenExtractor { +public class ShiroTokenExtractor { - private static final Logger logger = LogManager.getLogger(RestTokenExtractor.class); + private static final Logger logger = LogManager.getLogger(ShiroTokenExtractor.class); public final static String AUTH_HEADER_NAME = "Authorization"; diff --git a/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroTokenManager.java b/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroTokenManager.java index a14215aa7655b..cd54bbf9b3124 100644 --- a/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroTokenManager.java +++ b/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/ShiroTokenManager.java @@ -36,8 +36,6 @@ /** * Extracts Shiro's {@link AuthenticationToken} from different types of auth headers - * - * @opensearch.experimental */ class ShiroTokenManager implements TokenManager { diff --git a/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/realm/BCryptPasswordMatcher.java b/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/realm/BCryptPasswordMatcher.java index a2cb78425929e..f8113101deb70 100644 --- a/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/realm/BCryptPasswordMatcher.java +++ b/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/realm/BCryptPasswordMatcher.java @@ -16,8 +16,6 @@ /** * Password matcher for BCrypt - * - * @opensearch.experimental */ public class BCryptPasswordMatcher implements CredentialsMatcher { diff --git a/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/realm/OpenSearchRealm.java b/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/realm/OpenSearchRealm.java index ef405a5637ae7..1fc9a1f437a42 100644 --- a/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/realm/OpenSearchRealm.java +++ b/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/realm/OpenSearchRealm.java @@ -25,8 +25,6 @@ /** * Internal Realm is a custom realm using the internal OpenSearch IdP - * - * @opensearch.experimental */ public class OpenSearchRealm extends AuthenticatingRealm { private static final String DEFAULT_REALM_NAME = "internal"; @@ -93,7 +91,7 @@ public OpenSearchRealm build() { public User getInternalUser(final String principalIdentifier) throws UnknownAccountException { final User userRecord = internalUsers.get(principalIdentifier); if (userRecord == null) { - throw new UnknownAccountException(); + throw new UnknownAccountException("Incorrect credentials"); } return userRecord; } @@ -131,7 +129,7 @@ protected AuthenticationInfo doGetAuthenticationInfo(final AuthenticationToken t return sai; } else { // Bad password - throw new IncorrectCredentialsException(); + throw new IncorrectCredentialsException("Incorrect credentials"); } } diff --git a/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/realm/User.java b/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/realm/User.java index 35b3348a955d7..1d2d0fed800e2 100644 --- a/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/realm/User.java +++ b/plugins/identity-shiro/src/main/java/org/opensearch/identity/shiro/realm/User.java @@ -12,8 +12,6 @@ /** * A non-volatile and immutable object in the storage. - * - * @opensearch.experimental */ public class User { diff --git a/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/ShiroIdentityPluginTests.java b/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/ShiroIdentityPluginTests.java index bc14410d81de0..a15538e48bd66 100644 --- a/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/ShiroIdentityPluginTests.java +++ b/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/ShiroIdentityPluginTests.java @@ -13,7 +13,7 @@ import org.opensearch.identity.IdentityService; import org.opensearch.plugins.IdentityPlugin; import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; import java.util.List; @@ -21,27 +21,27 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.junit.Assert.assertThrows; +import static org.mockito.Mockito.mock; public class ShiroIdentityPluginTests extends OpenSearchTestCase { public void testSingleIdentityPluginSucceeds() { - TestThreadPool threadPool = new TestThreadPool(getTestName()); IdentityPlugin identityPlugin1 = new ShiroIdentityPlugin(Settings.EMPTY); List pluginList1 = List.of(identityPlugin1); - IdentityService identityService1 = new IdentityService(Settings.EMPTY, threadPool, pluginList1); + IdentityService identityService1 = new IdentityService(Settings.EMPTY, mock(ThreadPool.class), pluginList1); assertThat(identityService1.getTokenManager(), is(instanceOf(ShiroTokenManager.class))); - terminate(threadPool); } public void testMultipleIdentityPluginsFail() { - TestThreadPool threadPool = new TestThreadPool(getTestName()); IdentityPlugin identityPlugin1 = new ShiroIdentityPlugin(Settings.EMPTY); IdentityPlugin identityPlugin2 = new ShiroIdentityPlugin(Settings.EMPTY); IdentityPlugin identityPlugin3 = new ShiroIdentityPlugin(Settings.EMPTY); List pluginList = List.of(identityPlugin1, identityPlugin2, identityPlugin3); - Exception ex = assertThrows(OpenSearchException.class, () -> new IdentityService(Settings.EMPTY, threadPool, pluginList)); + Exception ex = assertThrows( + OpenSearchException.class, + () -> new IdentityService(Settings.EMPTY, mock(ThreadPool.class), pluginList) + ); assert (ex.getMessage().contains("Multiple identity plugins are not supported,")); - terminate(threadPool); } } diff --git a/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/ShiroTokenExtractorTests.java b/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/ShiroTokenExtractorTests.java new file mode 100644 index 0000000000000..4dc398bacb707 --- /dev/null +++ b/plugins/identity-shiro/src/test/java/org/opensearch/identity/shiro/ShiroTokenExtractorTests.java @@ -0,0 +1,45 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.identity.shiro; + +import org.opensearch.identity.tokens.AuthToken; +import org.opensearch.identity.tokens.BasicAuthToken; +import org.opensearch.rest.RestRequest; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.rest.FakeRestRequest; + +import java.nio.charset.StandardCharsets; +import java.util.Base64; +import java.util.List; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; + +public class ShiroTokenExtractorTests extends OpenSearchTestCase { + + public void testAuthorizationHeaderExtractionWithBasicAuthToken() { + String basicAuthHeader = Base64.getEncoder().encodeToString("foo:bar".getBytes(StandardCharsets.UTF_8)); + RestRequest fakeRequest = new FakeRestRequest.Builder(xContentRegistry()).withHeaders( + Map.of(ShiroTokenExtractor.AUTH_HEADER_NAME, List.of(BasicAuthToken.TOKEN_IDENTIFIER + " " + basicAuthHeader)) + ).build(); + AuthToken extractedToken = ShiroTokenExtractor.extractToken(fakeRequest); + assertThat(extractedToken, instanceOf(BasicAuthToken.class)); + assertThat(extractedToken.asAuthHeaderValue(), equalTo(basicAuthHeader)); + } + + public void testAuthorizationHeaderExtractionWithUnknownToken() { + String authHeader = "foo"; + RestRequest fakeRequest = new FakeRestRequest.Builder(xContentRegistry()).withHeaders( + Map.of(ShiroTokenExtractor.AUTH_HEADER_NAME, List.of(authHeader)) + ).build(); + AuthToken extractedToken = ShiroTokenExtractor.extractToken(fakeRequest); + assertNull(extractedToken); + } +} diff --git a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java index c5438d58e679d..944de326d144c 100644 --- a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -153,6 +153,7 @@ protected Settings nodeSettings(int nodeOrdinal) { // Disable request throttling because some random values in tests might generate too many failures for the S3 client .put(S3ClientSettings.USE_THROTTLE_RETRIES_SETTING.getConcreteSettingForNamespace("test").getKey(), false) .put(S3ClientSettings.PROXY_TYPE_SETTING.getConcreteSettingForNamespace("test").getKey(), ProxySettings.ProxyType.DIRECT) + .put(BlobStoreRepository.SNAPSHOT_ASYNC_DELETION_ENABLE_SETTING.getKey(), false) .put(super.nodeSettings(nodeOrdinal)) .setSecureSettings(secureSettings); diff --git a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java index 7db9a0d3ba790..f0e40db965646 100644 --- a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java +++ b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java @@ -55,6 +55,14 @@ public class S3RepositoryThirdPartyTests extends AbstractThirdPartyRepositoryTestCase { + @Override + protected Settings nodeSettings() { + return Settings.builder() + .put(super.nodeSettings()) + .put(BlobStoreRepository.SNAPSHOT_ASYNC_DELETION_ENABLE_SETTING.getKey(), false) + .build(); + } + @Override @Before @SuppressForbidden(reason = "Need to set system property here for AWS SDK v2") diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java index b489a3cc85037..1a402e8431e25 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobContainer.java @@ -62,6 +62,7 @@ import software.amazon.awssdk.services.s3.model.UploadPartRequest; import software.amazon.awssdk.services.s3.model.UploadPartResponse; import software.amazon.awssdk.services.s3.paginators.ListObjectsV2Iterable; +import software.amazon.awssdk.services.s3.paginators.ListObjectsV2Publisher; import software.amazon.awssdk.utils.CollectionUtils; import org.apache.logging.log4j.LogManager; @@ -90,6 +91,7 @@ import org.opensearch.core.common.Strings; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.repositories.s3.async.S3AsyncDeleteHelper; import org.opensearch.repositories.s3.async.SizeBasedBlockingQ; import org.opensearch.repositories.s3.async.UploadRequest; import org.opensearch.repositories.s3.utils.HttpRangeUtils; @@ -109,6 +111,9 @@ import java.util.function.Function; import java.util.stream.Collectors; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; + import static org.opensearch.repositories.s3.S3Repository.MAX_FILE_SIZE; import static org.opensearch.repositories.s3.S3Repository.MAX_FILE_SIZE_USING_MULTIPART; import static org.opensearch.repositories.s3.S3Repository.MIN_PART_SIZE_USING_MULTIPART; @@ -875,4 +880,123 @@ CompletableFuture getBlobMetadata(S3AsyncClient s3A return SocketAccess.doPrivileged(() -> s3AsyncClient.getObjectAttributes(getObjectAttributesRequest)); } + + @Override + public void deleteAsync(ActionListener completionListener) { + try (AmazonAsyncS3Reference asyncClientReference = blobStore.asyncClientReference()) { + S3AsyncClient s3AsyncClient = asyncClientReference.get().client(); + + ListObjectsV2Request listRequest = ListObjectsV2Request.builder().bucket(blobStore.bucket()).prefix(keyPath).build(); + ListObjectsV2Publisher listPublisher = s3AsyncClient.listObjectsV2Paginator(listRequest); + + AtomicLong deletedBlobs = new AtomicLong(); + AtomicLong deletedBytes = new AtomicLong(); + + CompletableFuture listingFuture = new CompletableFuture<>(); + + listPublisher.subscribe(new Subscriber<>() { + private Subscription subscription; + private final List objectsToDelete = new ArrayList<>(); + private CompletableFuture deletionChain = CompletableFuture.completedFuture(null); + + @Override + public void onSubscribe(Subscription s) { + this.subscription = s; + subscription.request(1); + } + + @Override + public void onNext(ListObjectsV2Response response) { + response.contents().forEach(s3Object -> { + deletedBlobs.incrementAndGet(); + deletedBytes.addAndGet(s3Object.size()); + objectsToDelete.add(s3Object.key()); + }); + + int bulkDeleteSize = blobStore.getBulkDeletesSize(); + if (objectsToDelete.size() >= bulkDeleteSize) { + int fullBatchesCount = objectsToDelete.size() / bulkDeleteSize; + int itemsToDelete = fullBatchesCount * bulkDeleteSize; + + List batchToDelete = new ArrayList<>(objectsToDelete.subList(0, itemsToDelete)); + objectsToDelete.subList(0, itemsToDelete).clear(); + + deletionChain = S3AsyncDeleteHelper.executeDeleteChain( + s3AsyncClient, + blobStore, + batchToDelete, + deletionChain, + () -> subscription.request(1) + ); + } else { + subscription.request(1); + } + } + + @Override + public void onError(Throwable t) { + listingFuture.completeExceptionally(new IOException("Failed to list objects for deletion", t)); + } + + @Override + public void onComplete() { + if (!objectsToDelete.isEmpty()) { + deletionChain = S3AsyncDeleteHelper.executeDeleteChain( + s3AsyncClient, + blobStore, + objectsToDelete, + deletionChain, + null + ); + } + deletionChain.whenComplete((v, throwable) -> { + if (throwable != null) { + listingFuture.completeExceptionally(throwable); + } else { + listingFuture.complete(null); + } + }); + } + }); + + listingFuture.whenComplete((v, throwable) -> { + if (throwable != null) { + completionListener.onFailure( + throwable instanceof Exception + ? (Exception) throwable + : new IOException("Unexpected error during async deletion", throwable) + ); + } else { + completionListener.onResponse(new DeleteResult(deletedBlobs.get(), deletedBytes.get())); + } + }); + } catch (Exception e) { + completionListener.onFailure(new IOException("Failed to initiate async deletion", e)); + } + } + + @Override + public void deleteBlobsAsyncIgnoringIfNotExists(List blobNames, ActionListener completionListener) { + if (blobNames.isEmpty()) { + completionListener.onResponse(null); + return; + } + + try (AmazonAsyncS3Reference asyncClientReference = blobStore.asyncClientReference()) { + S3AsyncClient s3AsyncClient = asyncClientReference.get().client(); + + List keysToDelete = blobNames.stream().map(this::buildKey).collect(Collectors.toList()); + + S3AsyncDeleteHelper.executeDeleteChain(s3AsyncClient, blobStore, keysToDelete, CompletableFuture.completedFuture(null), null) + .whenComplete((v, throwable) -> { + if (throwable != null) { + completionListener.onFailure(new IOException("Failed to delete blobs " + blobNames, throwable)); + } else { + completionListener.onResponse(null); + } + }); + } catch (Exception e) { + completionListener.onFailure(new IOException("Failed to initiate async blob deletion", e)); + } + } } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java index f688be9216b8f..90bfa11e18481 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3BlobStore.java @@ -63,7 +63,7 @@ import static org.opensearch.repositories.s3.S3Repository.STORAGE_CLASS_SETTING; import static org.opensearch.repositories.s3.S3Repository.UPLOAD_RETRY_ENABLED; -class S3BlobStore implements BlobStore { +public class S3BlobStore implements BlobStore { private static final Logger logger = LogManager.getLogger(S3BlobStore.class); diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/StatsMetricPublisher.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/StatsMetricPublisher.java index 8d2772d42ebca..9f73c67df3b18 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/StatsMetricPublisher.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/StatsMetricPublisher.java @@ -95,6 +95,10 @@ public void publish(MetricCollection metricCollection) { public void close() {} }; + public MetricPublisher getDeleteObjectsMetricPublisher() { + return deleteObjectsMetricPublisher; + } + public MetricPublisher getObjectMetricPublisher = new MetricPublisher() { @Override public void publish(MetricCollection metricCollection) { diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/S3AsyncDeleteHelper.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/S3AsyncDeleteHelper.java new file mode 100644 index 0000000000000..eed95c0e68ef1 --- /dev/null +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/async/S3AsyncDeleteHelper.java @@ -0,0 +1,95 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.repositories.s3.async; + +import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.model.Delete; +import software.amazon.awssdk.services.s3.model.DeleteObjectsRequest; +import software.amazon.awssdk.services.s3.model.DeleteObjectsResponse; +import software.amazon.awssdk.services.s3.model.ObjectIdentifier; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.repositories.s3.S3BlobStore; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.stream.Collectors; + +public class S3AsyncDeleteHelper { + private static final Logger logger = LogManager.getLogger(S3AsyncDeleteHelper.class); + + public static CompletableFuture executeDeleteChain( + S3AsyncClient s3AsyncClient, + S3BlobStore blobStore, + List objectsToDelete, + CompletableFuture currentChain, + Runnable afterDeleteAction + ) { + List> batches = createDeleteBatches(objectsToDelete, blobStore.getBulkDeletesSize()); + CompletableFuture newChain = currentChain.thenCompose(v -> executeDeleteBatches(s3AsyncClient, blobStore, batches)); + if (afterDeleteAction != null) { + newChain = newChain.thenRun(afterDeleteAction); + } + return newChain; + } + + static List> createDeleteBatches(List keys, int bulkDeleteSize) { + List> batches = new ArrayList<>(); + for (int i = 0; i < keys.size(); i += bulkDeleteSize) { + batches.add(keys.subList(i, Math.min(keys.size(), i + bulkDeleteSize))); + } + return batches; + } + + static CompletableFuture executeDeleteBatches(S3AsyncClient s3AsyncClient, S3BlobStore blobStore, List> batches) { + CompletableFuture allDeletesFuture = CompletableFuture.completedFuture(null); + + for (List batch : batches) { + allDeletesFuture = allDeletesFuture.thenCompose(v -> executeSingleDeleteBatch(s3AsyncClient, blobStore, batch)); + } + + return allDeletesFuture; + } + + static CompletableFuture executeSingleDeleteBatch(S3AsyncClient s3AsyncClient, S3BlobStore blobStore, List batch) { + DeleteObjectsRequest deleteRequest = bulkDelete(blobStore.bucket(), batch, blobStore); + return s3AsyncClient.deleteObjects(deleteRequest).thenApply(S3AsyncDeleteHelper::processDeleteResponse); + } + + static Void processDeleteResponse(DeleteObjectsResponse deleteObjectsResponse) { + if (!deleteObjectsResponse.errors().isEmpty()) { + logger.warn( + () -> new ParameterizedMessage( + "Failed to delete some blobs {}", + deleteObjectsResponse.errors() + .stream() + .map(s3Error -> "[" + s3Error.key() + "][" + s3Error.code() + "][" + s3Error.message() + "]") + .collect(Collectors.toList()) + ) + ); + } + return null; + } + + static DeleteObjectsRequest bulkDelete(String bucket, List blobs, S3BlobStore blobStore) { + return DeleteObjectsRequest.builder() + .bucket(bucket) + .delete( + Delete.builder() + .objects(blobs.stream().map(blob -> ObjectIdentifier.builder().key(blob).build()).collect(Collectors.toList())) + .quiet(true) + .build() + ) + .overrideConfiguration(o -> o.addMetricPublisher(blobStore.getStatsMetricPublisher().getDeleteObjectsMetricPublisher())) + .build(); + } +} diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java index 654d8a72690c4..2cb11541d924f 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3BlobStoreContainerTests.java @@ -69,6 +69,7 @@ import software.amazon.awssdk.services.s3.model.UploadPartRequest; import software.amazon.awssdk.services.s3.model.UploadPartResponse; import software.amazon.awssdk.services.s3.paginators.ListObjectsV2Iterable; +import software.amazon.awssdk.services.s3.paginators.ListObjectsV2Publisher; import org.opensearch.action.LatchedActionListener; import org.opensearch.common.blobstore.BlobContainer; @@ -99,22 +100,28 @@ import java.util.Set; import java.util.UUID; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionException; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import java.util.stream.IntStream; import org.mockito.ArgumentCaptor; import org.mockito.ArgumentMatchers; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; import static org.mockito.Mockito.when; public class S3BlobStoreContainerTests extends OpenSearchTestCase { @@ -1275,6 +1282,504 @@ public void testTransformResponseToInputStreamContainer() throws Exception { assertEquals(inputStream.available(), inputStreamContainer.getInputStream().available()); } + public void testDeleteAsync() throws Exception { + for (int i = 0; i < 100; i++) { + testDeleteAsync(i + 1); + } + } + + private void testDeleteAsync(int bulkDeleteSize) throws InterruptedException { + final String bucketName = randomAlphaOfLengthBetween(1, 10); + final BlobPath blobPath = new BlobPath(); + + final S3BlobStore blobStore = mock(S3BlobStore.class); + when(blobStore.bucket()).thenReturn(bucketName); + when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + when(blobStore.getBulkDeletesSize()).thenReturn(bulkDeleteSize); + + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final AmazonAsyncS3Reference asyncClientReference = mock(AmazonAsyncS3Reference.class); + when(blobStore.asyncClientReference()).thenReturn(asyncClientReference); + AmazonAsyncS3WithCredentials amazonAsyncS3WithCredentials = AmazonAsyncS3WithCredentials.create( + s3AsyncClient, + s3AsyncClient, + s3AsyncClient, + null + ); + when(asyncClientReference.get()).thenReturn(amazonAsyncS3WithCredentials); + + final List s3Objects = new ArrayList<>(); + int numObjects = randomIntBetween(20, 100); + long totalSize = 0; + for (int i = 0; i < numObjects; i++) { + long size = randomIntBetween(1, 100); + s3Objects.add(S3Object.builder().key(randomAlphaOfLength(10)).size(size).build()); + totalSize += size; + } + + final List responseList = new ArrayList<>(); + int size = 0; + while (size < numObjects) { + int toAdd = randomIntBetween(10, 20); + int endIndex = Math.min(numObjects, size + toAdd); + responseList.add(ListObjectsV2Response.builder().contents(s3Objects.subList(size, endIndex)).build()); + size = endIndex; + } + int expectedDeletedObjectsCall = numObjects / bulkDeleteSize + (numObjects % bulkDeleteSize > 0 ? 1 : 0); + + final ListObjectsV2Publisher listPublisher = mock(ListObjectsV2Publisher.class); + AtomicInteger counter = new AtomicInteger(); + doAnswer(invocation -> { + Subscriber subscriber = invocation.getArgument(0); + subscriber.onSubscribe(new Subscription() { + @Override + public void request(long n) { + int currentCounter = counter.getAndIncrement(); + if (currentCounter < responseList.size()) { + subscriber.onNext(responseList.get(currentCounter)); + } + if (currentCounter == responseList.size()) { + subscriber.onComplete(); + } + } + + @Override + public void cancel() {} + }); + return null; + }).when(listPublisher).subscribe(ArgumentMatchers.>any()); + when(s3AsyncClient.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listPublisher); + + when(s3AsyncClient.deleteObjects(any(DeleteObjectsRequest.class))).thenReturn( + CompletableFuture.completedFuture(DeleteObjectsResponse.builder().build()) + ); + + final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); + + CountDownLatch latch = new CountDownLatch(1); + AtomicReference deleteResultRef = new AtomicReference<>(); + blobContainer.deleteAsync(new ActionListener<>() { + @Override + public void onResponse(DeleteResult deleteResult) { + deleteResultRef.set(deleteResult); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + logger.error("exception during deleteAsync", e); + fail("Unexpected failure: " + e.getMessage()); + } + }); + + latch.await(); + + DeleteResult deleteResult = deleteResultRef.get(); + assertEquals(numObjects, deleteResult.blobsDeleted()); + assertEquals(totalSize, deleteResult.bytesDeleted()); + + verify(s3AsyncClient, times(1)).listObjectsV2Paginator(any(ListObjectsV2Request.class)); + verify(s3AsyncClient, times(expectedDeletedObjectsCall)).deleteObjects(any(DeleteObjectsRequest.class)); + } + + public void testDeleteAsyncFailure() throws Exception { + final String bucketName = randomAlphaOfLengthBetween(1, 10); + final BlobPath blobPath = new BlobPath(); + + final S3BlobStore blobStore = mock(S3BlobStore.class); + when(blobStore.bucket()).thenReturn(bucketName); + when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + when(blobStore.getBulkDeletesSize()).thenReturn(1000); + + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final AmazonAsyncS3Reference asyncClientReference = mock(AmazonAsyncS3Reference.class); + when(blobStore.asyncClientReference()).thenReturn(asyncClientReference); + AmazonAsyncS3WithCredentials amazonAsyncS3WithCredentials = AmazonAsyncS3WithCredentials.create( + s3AsyncClient, + s3AsyncClient, + s3AsyncClient, + null + ); + when(asyncClientReference.get()).thenReturn(amazonAsyncS3WithCredentials); + + // Simulate a failure in listObjectsV2Paginator + RuntimeException simulatedFailure = new RuntimeException("Simulated failure"); + when(s3AsyncClient.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenThrow(simulatedFailure); + + final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); + + CountDownLatch latch = new CountDownLatch(1); + AtomicReference exceptionRef = new AtomicReference<>(); + blobContainer.deleteAsync(new ActionListener<>() { + @Override + public void onResponse(DeleteResult deleteResult) { + fail("Expected a failure, but got a success response"); + } + + @Override + public void onFailure(Exception e) { + exceptionRef.set(e); + latch.countDown(); + } + }); + + latch.await(); + + assertNotNull(exceptionRef.get()); + assertEquals(IOException.class, exceptionRef.get().getClass()); + assertEquals("Failed to initiate async deletion", exceptionRef.get().getMessage()); + assertEquals(simulatedFailure, exceptionRef.get().getCause()); + } + + public void testDeleteAsyncListingError() throws Exception { + final String bucketName = randomAlphaOfLengthBetween(1, 10); + final BlobPath blobPath = new BlobPath(); + + final S3BlobStore blobStore = mock(S3BlobStore.class); + when(blobStore.bucket()).thenReturn(bucketName); + when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + when(blobStore.getBulkDeletesSize()).thenReturn(1000); + + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final AmazonAsyncS3Reference asyncClientReference = mock(AmazonAsyncS3Reference.class); + when(blobStore.asyncClientReference()).thenReturn(asyncClientReference); + AmazonAsyncS3WithCredentials amazonAsyncS3WithCredentials = AmazonAsyncS3WithCredentials.create( + s3AsyncClient, + s3AsyncClient, + s3AsyncClient, + null + ); + when(asyncClientReference.get()).thenReturn(amazonAsyncS3WithCredentials); + + final ListObjectsV2Publisher listPublisher = mock(ListObjectsV2Publisher.class); + doAnswer(invocation -> { + Subscriber subscriber = invocation.getArgument(0); + subscriber.onSubscribe(new Subscription() { + @Override + public void request(long n) { + subscriber.onError(new RuntimeException("Simulated listing error")); + } + + @Override + public void cancel() {} + }); + return null; + }).when(listPublisher).subscribe(ArgumentMatchers.>any()); + when(s3AsyncClient.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listPublisher); + + final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); + + CountDownLatch latch = new CountDownLatch(1); + AtomicReference exceptionRef = new AtomicReference<>(); + blobContainer.deleteAsync(new ActionListener<>() { + @Override + public void onResponse(DeleteResult deleteResult) { + fail("Expected a failure, but got a success response"); + } + + @Override + public void onFailure(Exception e) { + exceptionRef.set(e); + latch.countDown(); + } + }); + + latch.await(); + + assertNotNull(exceptionRef.get()); + assertEquals(IOException.class, exceptionRef.get().getClass()); + assertEquals("Failed to list objects for deletion", exceptionRef.get().getMessage()); + assertNotNull(exceptionRef.get().getCause()); + assertEquals("Simulated listing error", exceptionRef.get().getCause().getMessage()); + } + + public void testDeleteAsyncDeletionError() throws Exception { + final String bucketName = randomAlphaOfLengthBetween(1, 10); + final BlobPath blobPath = new BlobPath(); + + final S3BlobStore blobStore = mock(S3BlobStore.class); + when(blobStore.bucket()).thenReturn(bucketName); + when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + when(blobStore.getBulkDeletesSize()).thenReturn(1000); + + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final AmazonAsyncS3Reference asyncClientReference = mock(AmazonAsyncS3Reference.class); + when(blobStore.asyncClientReference()).thenReturn(asyncClientReference); + AmazonAsyncS3WithCredentials amazonAsyncS3WithCredentials = AmazonAsyncS3WithCredentials.create( + s3AsyncClient, + s3AsyncClient, + s3AsyncClient, + null + ); + when(asyncClientReference.get()).thenReturn(amazonAsyncS3WithCredentials); + + final ListObjectsV2Publisher listPublisher = mock(ListObjectsV2Publisher.class); + doAnswer(invocation -> { + Subscriber subscriber = invocation.getArgument(0); + subscriber.onSubscribe(new Subscription() { + @Override + public void request(long n) { + subscriber.onNext( + ListObjectsV2Response.builder().contents(S3Object.builder().key("test-key").size(100L).build()).build() + ); + subscriber.onComplete(); + } + + @Override + public void cancel() {} + }); + return null; + }).when(listPublisher).subscribe(ArgumentMatchers.>any()); + when(s3AsyncClient.listObjectsV2Paginator(any(ListObjectsV2Request.class))).thenReturn(listPublisher); + + // Simulate a failure in deleteObjects + CompletableFuture failedFuture = new CompletableFuture<>(); + failedFuture.completeExceptionally(new RuntimeException("Simulated delete error")); + when(s3AsyncClient.deleteObjects(any(DeleteObjectsRequest.class))).thenReturn(failedFuture); + + final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); + + CountDownLatch latch = new CountDownLatch(1); + AtomicReference exceptionRef = new AtomicReference<>(); + blobContainer.deleteAsync(new ActionListener<>() { + @Override + public void onResponse(DeleteResult deleteResult) { + fail("Expected a failure, but got a success response"); + } + + @Override + public void onFailure(Exception e) { + exceptionRef.set(e); + latch.countDown(); + } + }); + + latch.await(); + + assertNotNull(exceptionRef.get()); + assertEquals(CompletionException.class, exceptionRef.get().getClass()); + assertEquals("java.lang.RuntimeException: Simulated delete error", exceptionRef.get().getMessage()); + assertNotNull(exceptionRef.get().getCause()); + assertEquals("Simulated delete error", exceptionRef.get().getCause().getMessage()); + } + + public void testDeleteBlobsAsyncIgnoringIfNotExists() throws Exception { + final String bucketName = randomAlphaOfLengthBetween(1, 10); + final BlobPath blobPath = new BlobPath(); + + final S3BlobStore blobStore = mock(S3BlobStore.class); + when(blobStore.bucket()).thenReturn(bucketName); + when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + int bulkDeleteSize = randomIntBetween(1, 10); + when(blobStore.getBulkDeletesSize()).thenReturn(bulkDeleteSize); + + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final AmazonAsyncS3Reference asyncClientReference = mock(AmazonAsyncS3Reference.class); + when(blobStore.asyncClientReference()).thenReturn(asyncClientReference); + AmazonAsyncS3WithCredentials amazonAsyncS3WithCredentials = AmazonAsyncS3WithCredentials.create( + s3AsyncClient, + s3AsyncClient, + s3AsyncClient, + null + ); + when(asyncClientReference.get()).thenReturn(amazonAsyncS3WithCredentials); + + final List blobNames = new ArrayList<>(); + int size = randomIntBetween(10, 100); + for (int i = 0; i < size; i++) { + blobNames.add(randomAlphaOfLength(10)); + } + int expectedDeleteCalls = size / bulkDeleteSize + (size % bulkDeleteSize > 0 ? 1 : 0); + + when(s3AsyncClient.deleteObjects(any(DeleteObjectsRequest.class))).thenReturn( + CompletableFuture.completedFuture(DeleteObjectsResponse.builder().build()) + ); + + final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); + + CountDownLatch latch = new CountDownLatch(1); + AtomicReference exceptionRef = new AtomicReference<>(); + blobContainer.deleteBlobsAsyncIgnoringIfNotExists(blobNames, new ActionListener() { + @Override + public void onResponse(Void aVoid) { + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + exceptionRef.set(e); + latch.countDown(); + } + }); + + latch.await(); + + assertNull(exceptionRef.get()); + + ArgumentCaptor deleteRequestCaptor = ArgumentCaptor.forClass(DeleteObjectsRequest.class); + verify(s3AsyncClient, times(expectedDeleteCalls)).deleteObjects(deleteRequestCaptor.capture()); + + DeleteObjectsRequest capturedRequest = deleteRequestCaptor.getAllValues().stream().findAny().get(); + assertEquals(bucketName, capturedRequest.bucket()); + int totalBlobsDeleted = deleteRequestCaptor.getAllValues() + .stream() + .map(r -> r.delete().objects().size()) + .reduce(Integer::sum) + .get(); + assertEquals(blobNames.size(), totalBlobsDeleted); + List deletedKeys = deleteRequestCaptor.getAllValues() + .stream() + .flatMap(r -> r.delete().objects().stream()) + .map(ObjectIdentifier::key) + .collect(Collectors.toList()); + assertTrue(deletedKeys.containsAll(blobNames)); + } + + public void testDeleteBlobsAsyncIgnoringIfNotExistsFailure() throws Exception { + final String bucketName = randomAlphaOfLengthBetween(1, 10); + final BlobPath blobPath = new BlobPath(); + + final S3BlobStore blobStore = mock(S3BlobStore.class); + when(blobStore.bucket()).thenReturn(bucketName); + when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + when(blobStore.getBulkDeletesSize()).thenReturn(1000); + + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final AmazonAsyncS3Reference asyncClientReference = mock(AmazonAsyncS3Reference.class); + when(blobStore.asyncClientReference()).thenReturn(asyncClientReference); + AmazonAsyncS3WithCredentials amazonAsyncS3WithCredentials = AmazonAsyncS3WithCredentials.create( + s3AsyncClient, + s3AsyncClient, + s3AsyncClient, + null + ); + when(asyncClientReference.get()).thenReturn(amazonAsyncS3WithCredentials); + + // Simulate a failure in deleteObjects + CompletableFuture failedFuture = new CompletableFuture<>(); + failedFuture.completeExceptionally(new RuntimeException("Simulated delete failure")); + when(s3AsyncClient.deleteObjects(any(DeleteObjectsRequest.class))).thenReturn(failedFuture); + + final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); + + List blobNames = Arrays.asList("blob1", "blob2", "blob3"); + + CountDownLatch latch = new CountDownLatch(1); + AtomicReference exceptionRef = new AtomicReference<>(); + blobContainer.deleteBlobsAsyncIgnoringIfNotExists(blobNames, new ActionListener() { + @Override + public void onResponse(Void aVoid) { + fail("Expected a failure, but got a success response"); + } + + @Override + public void onFailure(Exception e) { + exceptionRef.set(e); + latch.countDown(); + } + }); + + latch.await(); + + assertNotNull(exceptionRef.get()); + assertEquals(IOException.class, exceptionRef.get().getClass()); + assertEquals("Failed to delete blobs " + blobNames, exceptionRef.get().getMessage()); + assertNotNull(exceptionRef.get().getCause()); + assertEquals("java.lang.RuntimeException: Simulated delete failure", exceptionRef.get().getCause().getMessage()); + } + + public void testDeleteBlobsAsyncIgnoringIfNotExistsWithEmptyList() throws Exception { + final String bucketName = randomAlphaOfLengthBetween(1, 10); + final BlobPath blobPath = new BlobPath(); + + final S3BlobStore blobStore = mock(S3BlobStore.class); + when(blobStore.bucket()).thenReturn(bucketName); + when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + when(blobStore.getBulkDeletesSize()).thenReturn(1000); + + final S3AsyncClient s3AsyncClient = mock(S3AsyncClient.class); + final AmazonAsyncS3Reference asyncClientReference = mock(AmazonAsyncS3Reference.class); + when(blobStore.asyncClientReference()).thenReturn(asyncClientReference); + AmazonAsyncS3WithCredentials amazonAsyncS3WithCredentials = AmazonAsyncS3WithCredentials.create( + s3AsyncClient, + s3AsyncClient, + s3AsyncClient, + null + ); + when(asyncClientReference.get()).thenReturn(amazonAsyncS3WithCredentials); + + final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); + + List emptyBlobNames = Collections.emptyList(); + + CountDownLatch latch = new CountDownLatch(1); + AtomicBoolean onResponseCalled = new AtomicBoolean(false); + AtomicReference exceptionRef = new AtomicReference<>(); + + blobContainer.deleteBlobsAsyncIgnoringIfNotExists(emptyBlobNames, new ActionListener() { + @Override + public void onResponse(Void aVoid) { + onResponseCalled.set(true); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + exceptionRef.set(e); + latch.countDown(); + } + }); + + latch.await(); + + assertTrue("onResponse should have been called", onResponseCalled.get()); + assertNull("No exception should have been thrown", exceptionRef.get()); + + // Verify that no interactions with S3AsyncClient occurred + verifyNoInteractions(s3AsyncClient); + } + + public void testDeleteBlobsAsyncIgnoringIfNotExistsInitializationFailure() throws Exception { + final String bucketName = randomAlphaOfLengthBetween(1, 10); + final BlobPath blobPath = new BlobPath(); + + final S3BlobStore blobStore = mock(S3BlobStore.class); + when(blobStore.bucket()).thenReturn(bucketName); + when(blobStore.getStatsMetricPublisher()).thenReturn(new StatsMetricPublisher()); + when(blobStore.getBulkDeletesSize()).thenReturn(1000); + + // Simulate a failure when getting the asyncClientReference + RuntimeException simulatedFailure = new RuntimeException("Simulated initialization failure"); + when(blobStore.asyncClientReference()).thenThrow(simulatedFailure); + + final S3BlobContainer blobContainer = new S3BlobContainer(blobPath, blobStore); + + List blobNames = Arrays.asList("blob1", "blob2", "blob3"); + + CountDownLatch latch = new CountDownLatch(1); + AtomicReference exceptionRef = new AtomicReference<>(); + + blobContainer.deleteBlobsAsyncIgnoringIfNotExists(blobNames, new ActionListener() { + @Override + public void onResponse(Void aVoid) { + fail("Expected a failure, but got a success response"); + } + + @Override + public void onFailure(Exception e) { + exceptionRef.set(e); + latch.countDown(); + } + }); + + latch.await(); + + assertNotNull("An exception should have been thrown", exceptionRef.get()); + assertTrue("Exception should be an IOException", exceptionRef.get() instanceof IOException); + assertEquals("Failed to initiate async blob deletion", exceptionRef.get().getMessage()); + assertEquals(simulatedFailure, exceptionRef.get().getCause()); + } + private void mockObjectResponse(S3AsyncClient s3AsyncClient, String bucketName, String blobName, int objectSize) { final InputStream inputStream = new ByteArrayInputStream(randomByteArrayOfLength(objectSize)); diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/S3AsyncDeleteHelperTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/S3AsyncDeleteHelperTests.java new file mode 100644 index 0000000000000..d7f924e05cc70 --- /dev/null +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/async/S3AsyncDeleteHelperTests.java @@ -0,0 +1,236 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.repositories.s3.async; + +import software.amazon.awssdk.metrics.MetricPublisher; +import software.amazon.awssdk.services.s3.S3AsyncClient; +import software.amazon.awssdk.services.s3.model.DeleteObjectsRequest; +import software.amazon.awssdk.services.s3.model.DeleteObjectsResponse; +import software.amazon.awssdk.services.s3.model.ObjectIdentifier; +import software.amazon.awssdk.services.s3.model.S3Error; + +import org.opensearch.repositories.s3.S3BlobStore; +import org.opensearch.repositories.s3.StatsMetricPublisher; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.stream.Collectors; + +import org.mockito.ArgumentCaptor; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class S3AsyncDeleteHelperTests extends OpenSearchTestCase { + + @Mock + private S3AsyncClient s3AsyncClient; + + @Mock + private S3BlobStore blobStore; + + @Override + public void setUp() throws Exception { + super.setUp(); + MockitoAnnotations.openMocks(this); + } + + public void testExecuteDeleteChain() { + List objectsToDelete = Arrays.asList("key1", "key2", "key3"); + CompletableFuture currentChain = CompletableFuture.completedFuture(null); + + // Mock the deleteObjects method of S3AsyncClient + when(s3AsyncClient.deleteObjects(any(DeleteObjectsRequest.class))).thenReturn( + CompletableFuture.completedFuture(DeleteObjectsResponse.builder().build()) + ); + + // Mock the getBulkDeletesSize method of S3BlobStore + when(blobStore.getBulkDeletesSize()).thenReturn(2); + + // Mock the getStatsMetricPublisher method of S3BlobStore to return a non-null value + StatsMetricPublisher mockMetricPublisher = mock(StatsMetricPublisher.class); + MetricPublisher mockDeleteObjectsMetricPublisher = mock(MetricPublisher.class); + when(blobStore.getStatsMetricPublisher()).thenReturn(mockMetricPublisher); + when(mockMetricPublisher.getDeleteObjectsMetricPublisher()).thenReturn(mockDeleteObjectsMetricPublisher); + + CompletableFuture newChain = S3AsyncDeleteHelper.executeDeleteChain( + s3AsyncClient, + blobStore, + objectsToDelete, + currentChain, + null + ); + + // Verify that the newChain is completed without any exceptions + assertNotNull(newChain); + assertTrue(newChain.isDone()); + assertFalse(newChain.isCompletedExceptionally()); + + // Verify that the deleteObjects method of S3AsyncClient was called with the expected request + ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(DeleteObjectsRequest.class); + verify(s3AsyncClient, times(2)).deleteObjects(requestCaptor.capture()); + + List capturedRequests = requestCaptor.getAllValues(); + assertEquals(2, capturedRequests.size()); + + // Verify that the requests have the expected metric publisher added + for (DeleteObjectsRequest request : capturedRequests) { + assertNotNull(request.overrideConfiguration()); + assertTrue(request.overrideConfiguration().get().metricPublishers().contains(mockDeleteObjectsMetricPublisher)); + } + } + + public void testCreateDeleteBatches() { + List keys = Arrays.asList("key1", "key2", "key3", "key4", "key5", "key6"); + int bulkDeleteSize = 3; + + List> batches = S3AsyncDeleteHelper.createDeleteBatches(keys, bulkDeleteSize); + + assertEquals(2, batches.size()); + assertEquals(Arrays.asList("key1", "key2", "key3"), batches.get(0)); + assertEquals(Arrays.asList("key4", "key5", "key6"), batches.get(1)); + } + + public void testExecuteSingleDeleteBatch() throws Exception { + List batch = Arrays.asList("key1", "key2"); + DeleteObjectsResponse expectedResponse = DeleteObjectsResponse.builder().build(); + + when(s3AsyncClient.deleteObjects(any(DeleteObjectsRequest.class))).thenReturn(CompletableFuture.completedFuture(expectedResponse)); + + // Mock the getStatsMetricPublisher method of S3BlobStore to return a non-null value + StatsMetricPublisher mockMetricPublisher = mock(StatsMetricPublisher.class); + MetricPublisher mockDeleteObjectsMetricPublisher = mock(MetricPublisher.class); + when(blobStore.getStatsMetricPublisher()).thenReturn(mockMetricPublisher); + when(mockMetricPublisher.getDeleteObjectsMetricPublisher()).thenReturn(mockDeleteObjectsMetricPublisher); + + CompletableFuture future = S3AsyncDeleteHelper.executeSingleDeleteBatch(s3AsyncClient, blobStore, batch); + + assertNotNull(future); + assertTrue(future.isDone()); + assertFalse(future.isCompletedExceptionally()); + future.join(); // Wait for the CompletableFuture to complete + + // Verify that the deleteObjects method of S3AsyncClient was called with the expected request + ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(DeleteObjectsRequest.class); + verify(s3AsyncClient).deleteObjects(requestCaptor.capture()); + + DeleteObjectsRequest capturedRequest = requestCaptor.getValue(); + assertEquals(blobStore.bucket(), capturedRequest.bucket()); + assertEquals(batch.size(), capturedRequest.delete().objects().size()); + assertTrue(capturedRequest.delete().objects().stream().map(ObjectIdentifier::key).collect(Collectors.toList()).containsAll(batch)); + } + + public void testProcessDeleteResponse() { + DeleteObjectsResponse response = DeleteObjectsResponse.builder() + .errors( + Arrays.asList( + S3Error.builder().key("key1").code("Code1").message("Message1").build(), + S3Error.builder().key("key2").code("Code2").message("Message2").build() + ) + ) + .build(); + + // Call the processDeleteResponse method + S3AsyncDeleteHelper.processDeleteResponse(response); + } + + public void testBulkDelete() { + List blobs = Arrays.asList("key1", "key2", "key3"); + String bucketName = "my-bucket"; + + // Mock the getStatsMetricPublisher method of S3BlobStore to return a non-null value + StatsMetricPublisher mockMetricPublisher = mock(StatsMetricPublisher.class); + MetricPublisher mockDeleteObjectsMetricPublisher = mock(MetricPublisher.class); + when(blobStore.getStatsMetricPublisher()).thenReturn(mockMetricPublisher); + when(mockMetricPublisher.getDeleteObjectsMetricPublisher()).thenReturn(mockDeleteObjectsMetricPublisher); + + DeleteObjectsRequest request = S3AsyncDeleteHelper.bulkDelete(bucketName, blobs, blobStore); + + assertEquals(bucketName, request.bucket()); + assertEquals(blobs.size(), request.delete().objects().size()); + assertTrue(request.delete().objects().stream().map(ObjectIdentifier::key).collect(Collectors.toList()).containsAll(blobs)); + assertNotNull(request.overrideConfiguration()); + assertTrue(request.overrideConfiguration().get().metricPublishers().contains(mockDeleteObjectsMetricPublisher)); + } + + public void testExecuteDeleteBatches() { + List> batches = Arrays.asList(Arrays.asList("key1", "key2"), Arrays.asList("key3", "key4")); + DeleteObjectsResponse expectedResponse = DeleteObjectsResponse.builder().build(); + + when(s3AsyncClient.deleteObjects(any(DeleteObjectsRequest.class))).thenReturn(CompletableFuture.completedFuture(expectedResponse)); + + // Mock the getStatsMetricPublisher method of S3BlobStore to return a non-null value + StatsMetricPublisher mockMetricPublisher = mock(StatsMetricPublisher.class); + MetricPublisher mockDeleteObjectsMetricPublisher = mock(MetricPublisher.class); + when(blobStore.getStatsMetricPublisher()).thenReturn(mockMetricPublisher); + when(mockMetricPublisher.getDeleteObjectsMetricPublisher()).thenReturn(mockDeleteObjectsMetricPublisher); + + CompletableFuture future = S3AsyncDeleteHelper.executeDeleteBatches(s3AsyncClient, blobStore, batches); + + assertNotNull(future); + assertTrue(future.isDone()); + assertFalse(future.isCompletedExceptionally()); + future.join(); // Wait for the CompletableFuture to complete + + // Verify that the deleteObjects method of S3AsyncClient was called with the expected requests + ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(DeleteObjectsRequest.class); + verify(s3AsyncClient, times(2)).deleteObjects(requestCaptor.capture()); + + List capturedRequests = requestCaptor.getAllValues(); + assertEquals(2, capturedRequests.size()); + for (DeleteObjectsRequest request : capturedRequests) { + assertNotNull(request.overrideConfiguration()); + assertTrue(request.overrideConfiguration().get().metricPublishers().contains(mockDeleteObjectsMetricPublisher)); + } + } + + public void testExecuteDeleteChainWithAfterDeleteAction() { + List objectsToDelete = Arrays.asList("key1", "key2", "key3"); + CompletableFuture currentChain = CompletableFuture.completedFuture(null); + Runnable afterDeleteAction = mock(Runnable.class); + + // Mock the deleteObjects method of S3AsyncClient + when(s3AsyncClient.deleteObjects(any(DeleteObjectsRequest.class))).thenReturn( + CompletableFuture.completedFuture(DeleteObjectsResponse.builder().build()) + ); + + // Mock the getBulkDeletesSize method of S3BlobStore + when(blobStore.getBulkDeletesSize()).thenReturn(2); + + // Mock the getStatsMetricPublisher method of S3BlobStore to return a non-null value + StatsMetricPublisher mockMetricPublisher = mock(StatsMetricPublisher.class); + MetricPublisher mockDeleteObjectsMetricPublisher = mock(MetricPublisher.class); + when(blobStore.getStatsMetricPublisher()).thenReturn(mockMetricPublisher); + when(mockMetricPublisher.getDeleteObjectsMetricPublisher()).thenReturn(mockDeleteObjectsMetricPublisher); + + CompletableFuture newChain = S3AsyncDeleteHelper.executeDeleteChain( + s3AsyncClient, + blobStore, + objectsToDelete, + currentChain, + afterDeleteAction + ); + + // Verify that the newChain is completed without any exceptions + assertNotNull(newChain); + assertTrue(newChain.isDone()); + assertFalse(newChain.isCompletedExceptionally()); + + // Verify that the afterDeleteAction was called + verify(afterDeleteAction).run(); + } + +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.put_settings/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.put_settings/10_basic.yml index 0f4a45e4591a3..ce78709f6ed5e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.put_settings/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.put_settings/10_basic.yml @@ -103,8 +103,8 @@ --- "Test setting search backpressure cancellation settings": - skip: - version: "- 2.99.99" - reason: "Fixed in 3.0.0" + version: "- 2.17.99" + reason: "Fixed in 2.18.0" - do: cluster.put_settings: @@ -181,8 +181,8 @@ --- "Test setting invalid search backpressure cancellation_rate and cancellation_ratio": - skip: - version: "- 2.99.99" - reason: "Fixed in 3.0.0" + version: "- 2.17.99" + reason: "Fixed in 2.18.0" - do: catch: /search_backpressure.search_task.cancellation_rate must be > 0/ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/410_nested_aggs.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/410_nested_aggs.yml new file mode 100644 index 0000000000000..c115dd4751f8f --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/410_nested_aggs.yml @@ -0,0 +1,111 @@ +--- +# The test setup includes: +# - Create nested mapping for test_nested_agg_index index +# - Index two example documents +# - nested agg + +setup: + - do: + indices.create: + index: test_nested_agg_index + body: + mappings: + properties: + a: + type: nested + properties: + b1: + type: keyword + b2: + type: nested + properties: + c: + type: nested + properties: + d: + type: keyword + + - do: + bulk: + refresh: true + body: | + {"index": {"_index": "test_nested_agg_index", "_id": "0"}} + {"a": { "b1": "b11", "b2": { "c": { "d": "d1" } }}} + {"index": {"_index": "test_nested_agg_index", "_id": "1"}} + {"a": { "b1": "b12", "b2": { "c": { "d": "d2" } }}} + +--- +# Delete Index when connection is teardown +teardown: + - do: + indices.delete: + index: test_nested_agg_index + +--- +"Supported queries": + - skip: + version: " - 2.17.99" + reason: "fixed in 2.18.0" + + # Verify Document Count + - do: + search: + body: { + query: { + match_all: { } + } + } + + - length: { hits.hits: 2 } + + # Verify nested aggregation + - do: + search: + body: { + aggs: { + nested_agg: { + nested: { + path: "a" + }, + aggs: { + a_b1: { + terms: { + field: "a.b1" + }, + aggs: { + "c": { + nested: { + path: "a.b2.c" + }, + aggs: { + "d": { + terms: { + field: "a.b2.c.d" + } + } + } + } + } + } + } + } + } + } + + - length: { hits.hits: 2 } + - match: { aggregations.nested_agg.doc_count: 2 } + - length: { aggregations.nested_agg.a_b1.buckets: 2 } + + - match: { aggregations.nested_agg.a_b1.buckets.0.key: "b11" } + - match: { aggregations.nested_agg.a_b1.buckets.0.doc_count: 1 } + - match: { aggregations.nested_agg.a_b1.buckets.0.c.doc_count: 1 } + - length: { aggregations.nested_agg.a_b1.buckets.0.c.d.buckets: "1" } + - match: { aggregations.nested_agg.a_b1.buckets.0.c.d.buckets.0.key: "d1" } + - match: { aggregations.nested_agg.a_b1.buckets.0.c.d.buckets.0.doc_count: 1 } + + - match: { aggregations.nested_agg.a_b1.buckets.1.key: "b12" } + - match: { aggregations.nested_agg.a_b1.buckets.1.doc_count: 1 } + - match: { aggregations.nested_agg.a_b1.buckets.1.c.doc_count: 1 } + - length: { aggregations.nested_agg.a_b1.buckets.1.c.d.buckets: "1" } + - match: { aggregations.nested_agg.a_b1.buckets.1.c.d.buckets.0.key: "d2" } + - match: { aggregations.nested_agg.a_b1.buckets.1.c.d.buckets.0.doc_count: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/370_bitmap_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/370_bitmap_filtering.yml index d728070adb188..c885e3fbc2446 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/370_bitmap_filtering.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/370_bitmap_filtering.yml @@ -1,7 +1,7 @@ --- setup: - skip: - version: " - 2.99.99" + version: " - 2.16.99" reason: The bitmap filtering feature is available in 2.17 and later. - do: indices.create: diff --git a/server/licenses/protobuf-java-3.25.4.jar.sha1 b/server/licenses/protobuf-java-3.25.4.jar.sha1 deleted file mode 100644 index 21b6fdfd24dc8..0000000000000 --- a/server/licenses/protobuf-java-3.25.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -43fcb86e4a411516c7fc681450f1516de0b862a2 \ No newline at end of file diff --git a/server/licenses/protobuf-java-3.25.5.jar.sha1 b/server/licenses/protobuf-java-3.25.5.jar.sha1 new file mode 100644 index 0000000000000..72b42c9efc85a --- /dev/null +++ b/server/licenses/protobuf-java-3.25.5.jar.sha1 @@ -0,0 +1 @@ +5ae5c9ec39930ae9b5a61b32b93288818ec05ec1 \ No newline at end of file diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsAsyncBlobContainer.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsAsyncBlobContainer.java index d45b4e3deb798..875f11203281a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsAsyncBlobContainer.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/mocks/MockFsAsyncBlobContainer.java @@ -1,3 +1,4 @@ + /* * SPDX-License-Identifier: Apache-2.0 * @@ -12,6 +13,7 @@ import org.opensearch.common.StreamContext; import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.DeleteResult; import org.opensearch.common.blobstore.fs.FsBlobContainer; import org.opensearch.common.blobstore.fs.FsBlobStore; import org.opensearch.common.blobstore.stream.read.ReadContext; @@ -146,4 +148,14 @@ public boolean remoteIntegrityCheckSupported() { private boolean isSegmentFile(String filename) { return !filename.endsWith(".tlog") && !filename.endsWith(".ckp"); } + + @Override + public void deleteAsync(ActionListener completionListener) { + throw new UnsupportedOperationException("deleteAsync"); + } + + @Override + public void deleteBlobsAsyncIgnoringIfNotExists(List blobNames, ActionListener completionListener) { + throw new UnsupportedOperationException("deleteBlobsAsyncIgnoringIfNotExists"); + } } diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index fbf90b97d1e8f..3fe0f1dc7cb83 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -579,7 +579,7 @@ public ActionModule( actionPlugins.stream().flatMap(p -> p.indicesAliasesRequestValidators().stream()).collect(Collectors.toList()) ); - restController = new RestController(headers, restWrapper, nodeClient, circuitBreakerService, usageService, identityService); + restController = new RestController(headers, restWrapper, nodeClient, circuitBreakerService, usageService); } public Map> getActions() { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java index aab7ea54f87c2..d694721feda38 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java @@ -60,7 +60,7 @@ public class NodesReloadSecureSettingsRequest extends BaseNodesRequest result.getSearchShardTarget().getIndex()).collect(Collectors.toSet()) + ); onPhaseEnd(searchRequestContext); onRequestEnd(searchRequestContext); listener.onResponse(buildSearchResponse(internalSearchResponse, failures, scrollId, searchContextId)); diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequestContext.java b/server/src/main/java/org/opensearch/action/search/SearchRequestContext.java index 111d9c64550b3..376cf71448d5c 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequestContext.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequestContext.java @@ -20,6 +20,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Set; import java.util.concurrent.LinkedBlockingQueue; import java.util.function.Supplier; @@ -36,6 +37,7 @@ public class SearchRequestContext { private final Map phaseTookMap; private TotalHits totalHits; private final EnumMap shardStats; + private Set successfulSearchShardIndices; private final SearchRequest searchRequest; private final LinkedBlockingQueue phaseResourceUsage; @@ -141,6 +143,18 @@ public List getPhaseResourceUsage() { public SearchRequest getRequest() { return searchRequest; } + + void setSuccessfulSearchShardIndices(Set successfulSearchShardIndices) { + this.successfulSearchShardIndices = successfulSearchShardIndices; + } + + /** + * @return A {@link List} of {@link String} representing the names of the indices that were + * successfully queried at the shard level. + */ + public Set getSuccessfulSearchShardIndices() { + return successfulSearchShardIndices; + } } enum ShardStatsFieldNames { diff --git a/server/src/main/java/org/opensearch/common/blobstore/AsyncMultiStreamBlobContainer.java b/server/src/main/java/org/opensearch/common/blobstore/AsyncMultiStreamBlobContainer.java index 97f304d776f5c..b769cdc2fe7ab 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/AsyncMultiStreamBlobContainer.java +++ b/server/src/main/java/org/opensearch/common/blobstore/AsyncMultiStreamBlobContainer.java @@ -14,6 +14,7 @@ import org.opensearch.core.action.ActionListener; import java.io.IOException; +import java.util.List; /** * An extension of {@link BlobContainer} that adds {@link AsyncMultiStreamBlobContainer#asyncBlobUpload} to allow @@ -48,4 +49,8 @@ public interface AsyncMultiStreamBlobContainer extends BlobContainer { * by underlying blobContainer. In this case, caller doesn't need to ensure integrity of data. */ boolean remoteIntegrityCheckSupported(); + + void deleteAsync(ActionListener completionListener); + + void deleteBlobsAsyncIgnoringIfNotExists(List blobNames, ActionListener completionListener); } diff --git a/server/src/main/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainer.java b/server/src/main/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainer.java index 82bc7a0baed50..286c01f9dca44 100644 --- a/server/src/main/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainer.java +++ b/server/src/main/java/org/opensearch/common/blobstore/AsyncMultiStreamEncryptedBlobContainer.java @@ -171,4 +171,14 @@ private InputStreamContainer decryptInputStreamContainer(InputStreamContainer in return new InputStreamContainer(decryptedStream, adjustedLength, adjustedPos); } } + + @Override + public void deleteAsync(ActionListener completionListener) { + blobContainer.deleteAsync(completionListener); + } + + @Override + public void deleteBlobsAsyncIgnoringIfNotExists(List blobNames, ActionListener completionListener) { + blobContainer.deleteBlobsAsyncIgnoringIfNotExists(blobNames, completionListener); + } } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 09832e2b41b6d..ecdd23530c648 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -779,7 +779,10 @@ public void apply(Settings value, Settings current, Settings previous) { RemoteStoreSettings.CLUSTER_REMOTE_STORE_PINNED_TIMESTAMP_ENABLED, RemoteStoreSettings.CLUSTER_REMOTE_STORE_SEGMENTS_PATH_PREFIX, RemoteStoreSettings.CLUSTER_REMOTE_STORE_TRANSLOG_PATH_PREFIX, + + // Snapshot related Settings BlobStoreRepository.SNAPSHOT_SHARD_PATH_PREFIX_SETTING, + BlobStoreRepository.SNAPSHOT_ASYNC_DELETION_ENABLE_SETTING, SearchService.CLUSTER_ALLOW_DERIVED_FIELD_SETTING, diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java index a1d638616f2aa..5e5814528fcd2 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/BaseStarTreeBuilder.java @@ -34,6 +34,7 @@ import org.opensearch.index.compositeindex.datacube.startree.node.InMemoryTreeNode; import org.opensearch.index.compositeindex.datacube.startree.node.StarTreeNodeType; import org.opensearch.index.compositeindex.datacube.startree.utils.SequentialDocValuesIterator; +import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedNumericStarTreeValuesIterator; import org.opensearch.index.mapper.DocCountFieldMapper; import org.opensearch.index.mapper.FieldMapper; import org.opensearch.index.mapper.FieldValueConverter; @@ -193,7 +194,9 @@ public List getMetricReaders(SegmentWriteState stat metricFieldInfo = getFieldInfo(metric.getField(), DocValuesType.SORTED_NUMERIC); } metricReader = new SequentialDocValuesIterator( - fieldProducerMap.get(metricFieldInfo.name).getSortedNumeric(metricFieldInfo) + new SortedNumericStarTreeValuesIterator( + fieldProducerMap.get(metricFieldInfo.name).getSortedNumeric(metricFieldInfo) + ) ); } metricReaders.add(metricReader); @@ -228,7 +231,7 @@ public void build( dimensionFieldInfo = getFieldInfo(dimension, DocValuesType.SORTED_NUMERIC); } dimensionReaders[i] = new SequentialDocValuesIterator( - fieldProducerMap.get(dimensionFieldInfo.name).getSortedNumeric(dimensionFieldInfo) + new SortedNumericStarTreeValuesIterator(fieldProducerMap.get(dimensionFieldInfo.name).getSortedNumeric(dimensionFieldInfo)) ); } Iterator starTreeDocumentIterator = sortAndAggregateSegmentDocuments(dimensionReaders, metricReaders); @@ -287,7 +290,7 @@ void appendDocumentsToStarTree(Iterator starTreeDocumentIterat } } - private void serializeStarTree(int numSegmentStarTreeDocument, int numStarTreeDocs) throws IOException { + private void serializeStarTree(int numSegmentStarTreeDocuments, int numStarTreeDocs) throws IOException { // serialize the star tree data long dataFilePointer = dataOut.getFilePointer(); StarTreeWriter starTreeWriter = new StarTreeWriter(); @@ -299,7 +302,7 @@ private void serializeStarTree(int numSegmentStarTreeDocument, int numStarTreeDo starTreeField, metricAggregatorInfos, numStarTreeNodes, - numSegmentStarTreeDocument, + numSegmentStarTreeDocuments, numStarTreeDocs, dataFilePointer, totalStarTreeDataLength @@ -400,22 +403,20 @@ protected StarTreeDocument getStarTreeDocument( ) throws IOException { Long[] dims = new Long[numDimensions]; int i = 0; - for (SequentialDocValuesIterator dimensionDocValueIterator : dimensionReaders) { - dimensionDocValueIterator.nextDoc(currentDocId); - Long val = dimensionDocValueIterator.value(currentDocId); + for (SequentialDocValuesIterator dimensionValueIterator : dimensionReaders) { + dimensionValueIterator.nextEntry(currentDocId); + Long val = dimensionValueIterator.value(currentDocId); dims[i] = val; i++; } i = 0; Object[] metrics = new Object[metricReaders.size()]; - for (SequentialDocValuesIterator metricDocValuesIterator : metricReaders) { - metricDocValuesIterator.nextDoc(currentDocId); + for (SequentialDocValuesIterator metricValuesIterator : metricReaders) { + metricValuesIterator.nextEntry(currentDocId); // As part of merge, we traverse the star tree doc values // The type of data stored in metric fields is different from the // actual indexing field they're based on - metrics[i] = metricAggregatorInfos.get(i) - .getValueAggregators() - .toAggregatedValueType(metricDocValuesIterator.value(currentDocId)); + metrics[i] = metricAggregatorInfos.get(i).getValueAggregators().toAggregatedValueType(metricValuesIterator.value(currentDocId)); i++; } return new StarTreeDocument(dims, metrics); @@ -502,7 +503,7 @@ Long[] getStarTreeDimensionsFromSegment(int currentDocId, SequentialDocValuesIte for (int i = 0; i < numDimensions; i++) { if (dimensionReaders[i] != null) { try { - dimensionReaders[i].nextDoc(currentDocId); + dimensionReaders[i].nextEntry(currentDocId); } catch (IOException e) { logger.error("unable to iterate to next doc", e); throw new RuntimeException("unable to iterate to next doc", e); @@ -530,7 +531,7 @@ private Object[] getStarTreeMetricsFromSegment(int currentDocId, List mergeStarTrees(List starTreeValuesSub .size()]; for (int i = 0; i < dimensionsSplitOrder.size(); i++) { String dimension = dimensionsSplitOrder.get(i).getField(); - dimensionReaders[i] = new SequentialDocValuesIterator(starTreeValues.getDimensionDocIdSetIterator(dimension)); + dimensionReaders[i] = new SequentialDocValuesIterator(starTreeValues.getDimensionValuesIterator(dimension)); } List metricReaders = new ArrayList<>(); // get doc id set iterators for metrics @@ -164,7 +164,7 @@ Iterator mergeStarTrees(List starTreeValuesSub metric.getField(), metricStat.getTypeName() ); - metricReaders.add(new SequentialDocValuesIterator(starTreeValues.getMetricDocIdSetIterator(metricFullName))); + metricReaders.add(new SequentialDocValuesIterator(starTreeValues.getMetricValuesIterator(metricFullName))); } } int currentDocId = 0; diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/OnHeapStarTreeBuilder.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/OnHeapStarTreeBuilder.java index 1a5c906ad413b..13c6d03c4dc3d 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/OnHeapStarTreeBuilder.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/OnHeapStarTreeBuilder.java @@ -138,7 +138,7 @@ StarTreeDocument[] getSegmentsStarTreeDocuments(List starTreeVal for (int i = 0; i < dimensionsSplitOrder.size(); i++) { String dimension = dimensionsSplitOrder.get(i).getField(); - dimensionReaders[i] = new SequentialDocValuesIterator(starTreeValues.getDimensionDocIdSetIterator(dimension)); + dimensionReaders[i] = new SequentialDocValuesIterator(starTreeValues.getDimensionValuesIterator(dimension)); } List metricReaders = new ArrayList<>(); @@ -150,7 +150,7 @@ StarTreeDocument[] getSegmentsStarTreeDocuments(List starTreeVal metric.getField(), metricStat.getTypeName() ); - metricReaders.add(new SequentialDocValuesIterator(starTreeValues.getMetricDocIdSetIterator(metricFullName))); + metricReaders.add(new SequentialDocValuesIterator(starTreeValues.getMetricValuesIterator(metricFullName))); } } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeDocsFileManager.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeDocsFileManager.java index 779ed77b0540a..15ed153249243 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeDocsFileManager.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/builder/StarTreeDocsFileManager.java @@ -21,7 +21,6 @@ import java.io.Closeable; import java.io.IOException; -import java.util.ArrayList; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -55,11 +54,9 @@ public class StarTreeDocsFileManager extends AbstractDocumentsFileManager implem private RandomAccessInput starTreeDocsFileRandomInput; private IndexOutput starTreeDocsFileOutput; private final Map fileToEndDocIdMap; - private final List starTreeDocumentOffsets = new ArrayList<>(); private int currentFileStartDocId; private int numReadableStarTreeDocuments; private int starTreeFileCount = -1; - private int currBytes = 0; private final int fileCountMergeThreshold; private int numStarTreeDocs = 0; @@ -98,7 +95,11 @@ IndexOutput createStarTreeDocumentsFileOutput() throws IOException { public void writeStarTreeDocument(StarTreeDocument starTreeDocument, boolean isAggregatedDoc) throws IOException { assert isAggregatedDoc == true; int numBytes = writeStarTreeDocument(starTreeDocument, starTreeDocsFileOutput, true); - addStarTreeDocumentOffset(numBytes); + if (docSizeInBytes == -1) { + docSizeInBytes = numBytes; + } else { + assert docSizeInBytes == numBytes; + } numStarTreeDocs++; } @@ -106,7 +107,14 @@ public void writeStarTreeDocument(StarTreeDocument starTreeDocument, boolean isA public StarTreeDocument readStarTreeDocument(int docId, boolean isAggregatedDoc) throws IOException { assert isAggregatedDoc == true; ensureDocumentReadable(docId); - return readStarTreeDocument(starTreeDocsFileRandomInput, starTreeDocumentOffsets.get(docId), true); + return readStarTreeDocument(starTreeDocsFileRandomInput, getOffset(docId), true); + } + + /** + * Returns offset for the docId based on the current file start id + */ + private long getOffset(int docId) { + return (long) (docId - currentFileStartDocId) * docSizeInBytes; } @Override @@ -119,19 +127,10 @@ public Long getDimensionValue(int docId, int dimensionId) throws IOException { public Long[] readDimensions(int docId) throws IOException { ensureDocumentReadable(docId); Long[] dims = new Long[starTreeField.getDimensionsOrder().size()]; - readDimensions(dims, starTreeDocsFileRandomInput, starTreeDocumentOffsets.get(docId)); + readDimensions(dims, starTreeDocsFileRandomInput, getOffset(docId)); return dims; } - private void addStarTreeDocumentOffset(int bytes) { - starTreeDocumentOffsets.add(currBytes); - currBytes += bytes; - if (docSizeInBytes == -1) { - docSizeInBytes = bytes; - } - assert docSizeInBytes == bytes; - } - /** * Load the correct StarTreeDocuments file based on the docId */ @@ -199,7 +198,6 @@ private void loadStarTreeDocumentFile(int docId) throws IOException { * If the operation is only for reading existing documents, a new file is not created. */ private void closeAndMaybeCreateNewFile(boolean shouldCreateFileForAppend, int numStarTreeDocs) throws IOException { - currBytes = 0; if (starTreeDocsFileOutput != null) { fileToEndDocIdMap.put(starTreeDocsFileOutput.getName(), numStarTreeDocs); IOUtils.close(starTreeDocsFileOutput); @@ -232,7 +230,6 @@ private void mergeFiles(int numStarTreeDocs) throws IOException { deleteOldFiles(); fileToEndDocIdMap.clear(); fileToEndDocIdMap.put(mergedOutput.getName(), numStarTreeDocs); - resetStarTreeDocumentOffsets(); } } @@ -259,17 +256,6 @@ private void deleteOldFiles() throws IOException { } } - /** - * Reset the star tree document offsets based on the merged file - */ - private void resetStarTreeDocumentOffsets() { - int curr = 0; - for (int i = 0; i < starTreeDocumentOffsets.size(); i++) { - starTreeDocumentOffsets.set(i, curr); - curr += docSizeInBytes; - } - } - @Override public void close() { try { @@ -288,7 +274,6 @@ public void close() { tmpDirectory.deleteFile(file); } catch (IOException ignored) {} // similar to IOUtils.deleteFilesIgnoringExceptions } - starTreeDocumentOffsets.clear(); fileToEndDocIdMap.clear(); } } diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/index/StarTreeValues.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/index/StarTreeValues.java index a34bbbe9ee738..255ad343cde32 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/index/StarTreeValues.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/index/StarTreeValues.java @@ -12,7 +12,6 @@ import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.SegmentReadState; import org.apache.lucene.index.SortedNumericDocValues; -import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.store.IndexInput; import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.index.compositeindex.CompositeIndexMetadata; @@ -25,6 +24,8 @@ import org.opensearch.index.compositeindex.datacube.startree.fileformats.meta.StarTreeMetadata; import org.opensearch.index.compositeindex.datacube.startree.node.StarTreeFactory; import org.opensearch.index.compositeindex.datacube.startree.node.StarTreeNode; +import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedNumericStarTreeValuesIterator; +import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.StarTreeValuesIterator; import java.io.IOException; import java.util.ArrayList; @@ -59,14 +60,14 @@ public class StarTreeValues implements CompositeIndexValues { private final StarTreeNode root; /** - * A map containing suppliers for DocIdSetIterators for dimensions. + * A map containing suppliers for StarTreeValues iterators for dimensions. */ - private final Map> dimensionDocValuesIteratorMap; + private final Map> dimensionValuesIteratorMap; /** - * A map containing suppliers for DocIdSetIterators for metrics. + * A map containing suppliers for StarTreeValues iterators for metrics. */ - private final Map> metricDocValuesIteratorMap; + private final Map> metricValuesIteratorMap; /** * A map containing attributes associated with the star tree values. @@ -84,22 +85,22 @@ public class StarTreeValues implements CompositeIndexValues { * * @param starTreeField The StarTreeField object representing the star tree field configuration. * @param root The root node of the star tree. - * @param dimensionDocValuesIteratorMap A map containing suppliers for DocIdSetIterators for dimensions. - * @param metricDocValuesIteratorMap A map containing suppliers for DocIdSetIterators for metrics. + * @param dimensionValuesIteratorMap A map containing suppliers for StarTreeValues iterators for dimensions. + * @param metricValuesIteratorMap A map containing suppliers for StarTreeValues iterators for metrics. * @param attributes A map containing attributes associated with the star tree values. */ public StarTreeValues( StarTreeField starTreeField, StarTreeNode root, - Map> dimensionDocValuesIteratorMap, - Map> metricDocValuesIteratorMap, + Map> dimensionValuesIteratorMap, + Map> metricValuesIteratorMap, Map attributes, StarTreeMetadata compositeIndexMetadata ) { this.starTreeField = starTreeField; this.root = root; - this.dimensionDocValuesIteratorMap = dimensionDocValuesIteratorMap; - this.metricDocValuesIteratorMap = metricDocValuesIteratorMap; + this.dimensionValuesIteratorMap = dimensionValuesIteratorMap; + this.metricValuesIteratorMap = metricValuesIteratorMap; this.attributes = attributes; this.starTreeMetadata = compositeIndexMetadata; } @@ -146,12 +147,12 @@ public StarTreeValues( this.root = StarTreeFactory.createStarTree(compositeIndexDataIn, starTreeMetadata); // get doc id set iterators for metrics and dimensions - dimensionDocValuesIteratorMap = new LinkedHashMap<>(); - metricDocValuesIteratorMap = new LinkedHashMap<>(); + dimensionValuesIteratorMap = new LinkedHashMap<>(); + metricValuesIteratorMap = new LinkedHashMap<>(); // get doc id set iterators for dimensions for (String dimension : starTreeMetadata.getDimensionFields()) { - dimensionDocValuesIteratorMap.put(dimension, () -> { + dimensionValuesIteratorMap.put(dimension, () -> { try { SortedNumericDocValues dimensionSortedNumericDocValues = null; if (readState != null) { @@ -162,9 +163,9 @@ public StarTreeValues( dimensionSortedNumericDocValues = compositeDocValuesProducer.getSortedNumeric(dimensionfieldInfo); } } - return getSortedNumericDocValues(dimensionSortedNumericDocValues); + return new SortedNumericStarTreeValuesIterator(getSortedNumericDocValues(dimensionSortedNumericDocValues)); } catch (IOException e) { - throw new RuntimeException("Error loading dimension DocIdSetIterator", e); + throw new RuntimeException("Error loading dimension StarTreeValuesIterator", e); } }); } @@ -177,7 +178,7 @@ public StarTreeValues( metric.getField(), metricStat.getTypeName() ); - metricDocValuesIteratorMap.put(metricFullName, () -> { + metricValuesIteratorMap.put(metricFullName, () -> { try { SortedNumericDocValues metricSortedNumericDocValues = null; if (readState != null) { @@ -186,7 +187,7 @@ public StarTreeValues( metricSortedNumericDocValues = compositeDocValuesProducer.getSortedNumeric(metricFieldInfo); } } - return getSortedNumericDocValues(metricSortedNumericDocValues); + return new SortedNumericStarTreeValuesIterator(getSortedNumericDocValues(metricSortedNumericDocValues)); } catch (IOException e) { throw new RuntimeException("Error loading metric DocIdSetIterator", e); } @@ -239,30 +240,30 @@ public Map getAttributes() { } /** - * Returns the DocIdSetIterator for the specified dimension. + * Returns the StarTreeValues iterator for the specified dimension. * * @param dimension The name of the dimension. - * @return The DocIdSetIterator for the specified dimension. + * @return The StarTreeValuesIterator for the specified dimension. */ - public DocIdSetIterator getDimensionDocIdSetIterator(String dimension) { + public StarTreeValuesIterator getDimensionValuesIterator(String dimension) { - if (dimensionDocValuesIteratorMap.containsKey(dimension)) { - return dimensionDocValuesIteratorMap.get(dimension).get(); + if (dimensionValuesIteratorMap.containsKey(dimension)) { + return dimensionValuesIteratorMap.get(dimension).get(); } throw new IllegalArgumentException("dimension [" + dimension + "] does not exist in the segment."); } /** - * Returns the DocIdSetIterator for the specified fully qualified metric name. + * Returns the StarTreeValues iterator for the specified fully qualified metric name. * * @param fullyQualifiedMetricName The fully qualified name of the metric. - * @return The DocIdSetIterator for the specified fully qualified metric name. + * @return The StarTreeValuesIterator for the specified fully qualified metric name. */ - public DocIdSetIterator getMetricDocIdSetIterator(String fullyQualifiedMetricName) { + public StarTreeValuesIterator getMetricValuesIterator(String fullyQualifiedMetricName) { - if (metricDocValuesIteratorMap.containsKey(fullyQualifiedMetricName)) { - return metricDocValuesIteratorMap.get(fullyQualifiedMetricName).get(); + if (metricValuesIteratorMap.containsKey(fullyQualifiedMetricName)) { + return metricValuesIteratorMap.get(fullyQualifiedMetricName).get(); } throw new IllegalArgumentException("metric [" + fullyQualifiedMetricName + "] does not exist in the segment."); diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/SequentialDocValuesIterator.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/SequentialDocValuesIterator.java index 061841d3e140a..9029a451ca4d9 100644 --- a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/SequentialDocValuesIterator.java +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/SequentialDocValuesIterator.java @@ -9,15 +9,19 @@ package org.opensearch.index.compositeindex.datacube.startree.utils; -import org.apache.lucene.index.SortedNumericDocValues; -import org.apache.lucene.search.DocIdSetIterator; import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedNumericStarTreeValuesIterator; +import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.StarTreeValuesIterator; import java.io.IOException; /** - * Coordinates the reading of documents across multiple DocIdSetIterators. - * It encapsulates a single DocIdSetIterator and maintains the latest document ID and its associated value. + * Coordinates the reading of documents across multiple StarTreeValuesIterator. + * It encapsulates a single StarTreeValuesIterator and maintains the latest document ID and its associated value. + * + * In case of merge , this will be reading the entries of star tree values and in case of flush this will go through + * the actual segment documents. + * * @opensearch.experimental */ @ExperimentalApi @@ -26,76 +30,56 @@ public class SequentialDocValuesIterator { /** * The doc id set iterator associated for each field. */ - private final DocIdSetIterator docIdSetIterator; + private final StarTreeValuesIterator starTreeValuesIterator; /** - * The value associated with the latest document. + * The id of the latest record/entry. */ - private Long docValue; + private int entryId = -1; - /** - * The id of the latest document. - */ - private int docId = -1; - - /** - * Constructs a new SequentialDocValuesIterator instance with the given DocIdSetIterator. - * - * @param docIdSetIterator the DocIdSetIterator to be associated with this instance - */ - public SequentialDocValuesIterator(DocIdSetIterator docIdSetIterator) { - this.docIdSetIterator = docIdSetIterator; - } - - /** - * Returns the id of the latest document. - * - * @return the id of the latest document - */ - public int getDocId() { - return docId; + public SequentialDocValuesIterator(StarTreeValuesIterator starTreeValuesIterator) { + this.starTreeValuesIterator = starTreeValuesIterator; } /** - * Sets the id of the latest document. + * Returns the ID of the star tree record/entry or the segment document id * - * @param docId the ID of the latest document + * @return the ID of the star tree record/entry or the segment document id */ - private void setDocId(int docId) { - this.docId = docId; + public int getEntryId() { + return entryId; } /** - * Returns the DocIdSetIterator associated with this instance. + * Sets the id of the latest entry. * - * @return the DocIdSetIterator associated with this instance + * @param entryId the ID of the star tree record/entry or the segment document id */ - public DocIdSetIterator getDocIdSetIterator() { - return docIdSetIterator; + private void setEntryId(int entryId) { + this.entryId = entryId; } - public int nextDoc(int currentDocId) throws IOException { + public int nextEntry(int currentEntryId) throws IOException { // if doc id stored is less than or equal to the requested doc id , return the stored doc id - if (docId >= currentDocId) { - return docId; + if (entryId >= currentEntryId) { + return entryId; } - setDocId(this.docIdSetIterator.nextDoc()); - return docId; + setEntryId(this.starTreeValuesIterator.nextEntry()); + return entryId; } - public Long value(int currentDocId) throws IOException { - if (this.getDocIdSetIterator() instanceof SortedNumericDocValues) { - SortedNumericDocValues sortedNumericDocValues = (SortedNumericDocValues) this.getDocIdSetIterator(); - if (currentDocId < 0) { - throw new IllegalStateException("invalid doc id to fetch the next value"); + public Long value(int currentEntryId) throws IOException { + if (starTreeValuesIterator instanceof SortedNumericStarTreeValuesIterator) { + if (currentEntryId < 0) { + throw new IllegalStateException("invalid entry id to fetch the next value"); } - if (currentDocId == DocIdSetIterator.NO_MORE_DOCS) { - throw new IllegalStateException("DocValuesIterator is already exhausted"); + if (currentEntryId == StarTreeValuesIterator.NO_MORE_ENTRIES) { + throw new IllegalStateException("StarTreeValuesIterator is already exhausted"); } - if (docId == DocIdSetIterator.NO_MORE_DOCS || docId != currentDocId) { + if (entryId == StarTreeValuesIterator.NO_MORE_ENTRIES || entryId != currentEntryId) { return null; } - return sortedNumericDocValues.nextValue(); + return ((SortedNumericStarTreeValuesIterator) starTreeValuesIterator).nextValue(); } else { throw new IllegalStateException("Unsupported Iterator requested for SequentialDocValuesIterator"); diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/iterator/SortedNumericStarTreeValuesIterator.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/iterator/SortedNumericStarTreeValuesIterator.java new file mode 100644 index 0000000000000..27afdf1479b4e --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/iterator/SortedNumericStarTreeValuesIterator.java @@ -0,0 +1,32 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube.startree.utils.iterator; + +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.search.DocIdSetIterator; +import org.opensearch.common.annotation.ExperimentalApi; + +import java.io.IOException; + +/** + * Wrapper iterator class for StarTree index to traverse through SortedNumericDocValues + * + * @opensearch.experimental + */ +@ExperimentalApi +public class SortedNumericStarTreeValuesIterator extends StarTreeValuesIterator { + + public SortedNumericStarTreeValuesIterator(DocIdSetIterator docIdSetIterator) { + super(docIdSetIterator); + } + + public long nextValue() throws IOException { + return ((SortedNumericDocValues) docIdSetIterator).nextValue(); + } +} diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/iterator/StarTreeValuesIterator.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/iterator/StarTreeValuesIterator.java new file mode 100644 index 0000000000000..32866f3e50092 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/iterator/StarTreeValuesIterator.java @@ -0,0 +1,48 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.compositeindex.datacube.startree.utils.iterator; + +import org.apache.lucene.search.DocIdSetIterator; +import org.opensearch.common.annotation.ExperimentalApi; + +import java.io.IOException; + +/** + * Wrapper iterator class for StarTree index in place of DocIdSetIterator to read / traverse the docValues formats. + * This is needed since star tree values are different from segment documents and number of star tree values + * can even exceed segment docs in the worst cases. + * + * @opensearch.experimental + */ +@ExperimentalApi +public abstract class StarTreeValuesIterator { + + public static final int NO_MORE_ENTRIES = Integer.MAX_VALUE; + protected final DocIdSetIterator docIdSetIterator; + + public StarTreeValuesIterator(DocIdSetIterator docIdSetIterator) { + this.docIdSetIterator = docIdSetIterator; + } + + public int entryId() { + return docIdSetIterator.docID(); + } + + public int nextEntry() throws IOException { + return docIdSetIterator.nextDoc(); + } + + public int advance(int target) throws IOException { + return docIdSetIterator.advance(target); + } + + public long cost() { + return docIdSetIterator.cost(); + } +} diff --git a/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/iterator/package-info.java b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/iterator/package-info.java new file mode 100644 index 0000000000000..3c6444a4a5cac --- /dev/null +++ b/server/src/main/java/org/opensearch/index/compositeindex/datacube/startree/utils/iterator/package-info.java @@ -0,0 +1,14 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * This contains classes for StarTreeValues iterators + * + * @opensearch.experimental + */ +package org.opensearch.index.compositeindex.datacube.startree.utils.iterator; diff --git a/server/src/main/java/org/opensearch/index/mapper/ObjectMapper.java b/server/src/main/java/org/opensearch/index/mapper/ObjectMapper.java index dd984373fc9df..b93c82d7a5c7c 100644 --- a/server/src/main/java/org/opensearch/index/mapper/ObjectMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/ObjectMapper.java @@ -171,6 +171,19 @@ public void setIncludeInParent(boolean value) { public void setIncludeInRoot(boolean value) { includeInRoot = new Explicit<>(value, true); } + + public static boolean isParent(ObjectMapper parentObjectMapper, ObjectMapper childObjectMapper, MapperService mapperService) { + if (parentObjectMapper == null || childObjectMapper == null) { + return false; + } + + ObjectMapper parent = childObjectMapper.getParentObjectMapper(mapperService); + while (parent != null && parent != parentObjectMapper) { + childObjectMapper = parent; + parent = childObjectMapper.getParentObjectMapper(mapperService); + } + return parentObjectMapper == parent; + } } /** diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index b954560c1bc94..0292cecc36a81 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -50,6 +50,7 @@ import org.opensearch.action.ActionRunnable; import org.opensearch.action.StepListener; import org.opensearch.action.support.GroupedActionListener; +import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.RepositoryCleanupInProgress; @@ -69,6 +70,7 @@ import org.opensearch.common.Randomness; import org.opensearch.common.SetOnce; import org.opensearch.common.UUIDs; +import org.opensearch.common.blobstore.AsyncMultiStreamBlobContainer; import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; @@ -180,6 +182,7 @@ import java.util.Set; import java.util.concurrent.BlockingQueue; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.TimeUnit; @@ -353,6 +356,16 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp Setting.Property.Final ); + /** + * Controls the fixed prefix for the snapshot shard blob path. cluster.snapshot.async-deletion.enable + */ + public static final Setting SNAPSHOT_ASYNC_DELETION_ENABLE_SETTING = Setting.boolSetting( + "cluster.snapshot.async-deletion.enable", + true, + Setting.Property.NodeScope, + Setting.Property.Dynamic + ); + protected volatile boolean supportURLRepo; private volatile int maxShardBlobDeleteBatch; @@ -446,6 +459,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp private final String snapshotShardPathPrefix; + private volatile boolean enableAsyncDeletion; + /** * Flag that is set to {@code true} if this instance is started with {@link #metadata} that has a higher value for * {@link RepositoryMetadata#pendingGeneration()} than for {@link RepositoryMetadata#generation()} indicating a full cluster restart @@ -498,6 +513,8 @@ protected BlobStoreRepository( this.recoverySettings = recoverySettings; this.remoteStoreSettings = new RemoteStoreSettings(clusterService.getSettings(), clusterService.getClusterSettings()); this.snapshotShardPathPrefix = SNAPSHOT_SHARD_PATH_PREFIX_SETTING.get(clusterService.getSettings()); + this.enableAsyncDeletion = SNAPSHOT_ASYNC_DELETION_ENABLE_SETTING.get(clusterService.getSettings()); + clusterService.getClusterSettings().addSettingsUpdateConsumer(SNAPSHOT_ASYNC_DELETION_ENABLE_SETTING, this::setEnableAsyncDeletion); } @Override @@ -2082,7 +2099,7 @@ private void executeOneStaleIndexDelete( } // Finally, we delete the [base_path]/indexId folder - deleteResult = deleteResult.add(indexEntry.getValue().delete()); // Deleting the index folder + deleteResult = deleteResult.add(deleteContainer(indexEntry.getValue())); // Deleting the index folder logger.debug("[{}] Cleaned up stale index [{}]", metadata.name(), indexSnId); return deleteResult; } catch (IOException e) { @@ -2115,6 +2132,21 @@ private void executeOneStaleIndexDelete( })); } + private DeleteResult deleteContainer(BlobContainer container) throws IOException { + long startTime = System.nanoTime(); + DeleteResult deleteResult; + if (enableAsyncDeletion && container instanceof AsyncMultiStreamBlobContainer) { + // Use deleteAsync and wait for the result + PlainActionFuture future = new PlainActionFuture<>(); + ((AsyncMultiStreamBlobContainer) container).deleteAsync(future); + deleteResult = future.actionGet(); + } else { + deleteResult = container.delete(); + } + logger.debug(new ParameterizedMessage("[{}] Deleted {} in {}ns", metadata.name(), container.path(), startTime - System.nanoTime())); + return deleteResult; + } + /** * Cleans up the remote store directory if needed. *

This method cleans up segments in the remote store directory for deleted indices. @@ -2318,7 +2350,7 @@ void releaseRemoteStoreLocksAndCleanup( * @return A DeleteResult object representing the result of the deletion operation. * @throws IOException If an I/O error occurs during the deletion process. */ - private DeleteResult deleteShardData(ShardInfo shardInfo) throws IOException { + private DeleteResult deleteShardData(ShardInfo shardInfo) throws IOException, ExecutionException, InterruptedException { // If the provided ShardInfo is null, return a zero DeleteResult if (shardInfo == null) { return DeleteResult.ZERO; @@ -2330,7 +2362,7 @@ private DeleteResult deleteShardData(ShardInfo shardInfo) throws IOException { // Iterate over the shards and delete each shard's data for (int i = 0; i < shardInfo.getShardCount(); i++) { // Call the delete method on the shardContainer and accumulate the result - deleteResult = deleteResult.add(shardContainer(shardInfo.getIndexId(), i).delete()); + deleteResult = deleteResult.add(deleteContainer(shardContainer(shardInfo.getIndexId(), i))); } // Return the accumulated DeleteResult @@ -2714,7 +2746,23 @@ public IndexMetadata getSnapshotIndexMetaData(RepositoryData repositoryData, Sna private void deleteFromContainer(BlobContainer container, List blobs) throws IOException { logger.trace(() -> new ParameterizedMessage("[{}] Deleting {} from [{}]", metadata.name(), blobs, container.path())); - container.deleteBlobsIgnoringIfNotExists(blobs); + long startTime = System.nanoTime(); + if (enableAsyncDeletion && container instanceof AsyncMultiStreamBlobContainer) { + PlainActionFuture future = new PlainActionFuture<>(); + ((AsyncMultiStreamBlobContainer) container).deleteBlobsAsyncIgnoringIfNotExists(blobs, future); + future.actionGet(); + } else { + container.deleteBlobsIgnoringIfNotExists(blobs); + } + logger.debug( + () -> new ParameterizedMessage( + "[{}] Deletion {} from [{}] took {}ns", + metadata.name(), + blobs, + container.path(), + System.nanoTime() - startTime + ) + ); } private BlobPath indicesPath() { @@ -4565,4 +4613,8 @@ public String toString() { return name; } } + + public void setEnableAsyncDeletion(boolean enableAsyncDeletion) { + this.enableAsyncDeletion = enableAsyncDeletion; + } } diff --git a/server/src/main/java/org/opensearch/rest/RestController.java b/server/src/main/java/org/opensearch/rest/RestController.java index 0d6f965c7033f..4f87c01258396 100644 --- a/server/src/main/java/org/opensearch/rest/RestController.java +++ b/server/src/main/java/org/opensearch/rest/RestController.java @@ -41,7 +41,6 @@ import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.path.PathTrie; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.util.io.Streams; import org.opensearch.common.xcontent.XContentType; @@ -56,11 +55,6 @@ import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.http.HttpChunk; import org.opensearch.http.HttpServerTransport; -import org.opensearch.identity.IdentityService; -import org.opensearch.identity.Subject; -import org.opensearch.identity.UserSubject; -import org.opensearch.identity.tokens.AuthToken; -import org.opensearch.identity.tokens.RestTokenExtractor; import org.opensearch.usage.UsageService; import java.io.ByteArrayOutputStream; @@ -125,25 +119,23 @@ public class RestController implements HttpServerTransport.Dispatcher { /** Rest headers that are copied to internal requests made during a rest request. */ private final Set headersToCopy; private final UsageService usageService; - private final IdentityService identityService; public RestController( Set headersToCopy, UnaryOperator handlerWrapper, NodeClient client, CircuitBreakerService circuitBreakerService, - UsageService usageService, - IdentityService identityService + UsageService usageService ) { this.headersToCopy = headersToCopy; this.usageService = usageService; if (handlerWrapper == null) { handlerWrapper = h -> h; // passthrough if no wrapper set } + this.handlerWrapper = handlerWrapper; this.client = client; this.circuitBreakerService = circuitBreakerService; - this.identityService = identityService; registerHandlerNoWrap( RestRequest.Method.GET, "/favicon.ico", @@ -472,11 +464,6 @@ private void tryAllHandlers(final RestRequest request, final RestChannel channel return; } } else { - if (FeatureFlags.isEnabled(FeatureFlags.IDENTITY)) { - if (!handleAuthenticateUser(request, channel)) { - return; - } - } dispatchRequest(request, channel, handler); return; } @@ -587,43 +574,6 @@ private void handleBadRequest(String uri, RestRequest.Method method, RestChannel } } - /** - * Attempts to extract auth token and login. - * - * @return false if there was an error and the request should not continue being dispatched - * */ - private boolean handleAuthenticateUser(final RestRequest request, final RestChannel channel) { - try { - final AuthToken token = RestTokenExtractor.extractToken(request); - // If no token was found, continue executing the request - if (token == null) { - // Authentication did not fail so return true. Authorization is handled at the action level. - return true; - } - final Subject currentSubject = identityService.getCurrentSubject(); - if (currentSubject instanceof UserSubject) { - ((UserSubject) currentSubject).authenticate(token); - logger.debug("Logged in as user " + currentSubject); - } - } catch (final Exception e) { - try { - final BytesRestResponse bytesRestResponse = BytesRestResponse.createSimpleErrorResponse( - channel, - RestStatus.UNAUTHORIZED, - e.getMessage() - ); - channel.sendResponse(bytesRestResponse); - } catch (final Exception ex) { - final BytesRestResponse bytesRestResponse = new BytesRestResponse(RestStatus.UNAUTHORIZED, ex.getMessage()); - channel.sendResponse(bytesRestResponse); - } - return false; - } - - // Authentication did not fail so return true. Authorization is handled at the action level. - return true; - } - /** * Get the valid set of HTTP methods for a REST request. */ diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/NestedAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/NestedAggregator.java index 150efa878f866..db8979d611b4f 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/NestedAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/nested/NestedAggregator.java @@ -46,7 +46,6 @@ import org.opensearch.common.collect.Tuple; import org.opensearch.common.lucene.search.Queries; import org.opensearch.core.ParseField; -import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.ObjectMapper; import org.opensearch.search.aggregations.Aggregator; import org.opensearch.search.aggregations.AggregatorFactories; @@ -63,6 +62,8 @@ import java.util.List; import java.util.Map; +import static org.opensearch.index.mapper.ObjectMapper.Nested.isParent; + /** * Aggregate all docs that match a nested path * @@ -98,17 +99,6 @@ public class NestedAggregator extends BucketsAggregator implements SingleBucketA this.collectsFromSingleBucket = cardinality.map(estimate -> estimate < 2); } - private boolean isParent(ObjectMapper parentObjectMapper, ObjectMapper childObjectMapper, MapperService mapperService) { - if (parentObjectMapper == null) { - return false; - } - ObjectMapper parent; - do { - parent = childObjectMapper.getParentObjectMapper(mapperService); - } while (parent != null && parent != parentObjectMapper); - return parentObjectMapper == parent; - } - @Override public LeafBucketCollector getLeafCollector(final LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException { IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(ctx); diff --git a/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java b/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java index 5ae1bdce48cd5..bf1d52b49cb1f 100644 --- a/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java +++ b/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java @@ -153,8 +153,7 @@ public List> getExtensionSettings() { null, new NodeClient(Settings.EMPTY, threadPool), new NoneCircuitBreakerService(), - new UsageService(), - new IdentityService(Settings.EMPTY, threadPool, List.of()) + new UsageService() ); when(actionModule.getDynamicActionRegistry()).thenReturn(mock(DynamicActionRegistry.class)); when(actionModule.getRestController()).thenReturn(restController); diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeTestUtils.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeTestUtils.java index b7395b993f67b..7cae1cd25ee93 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeTestUtils.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/StarTreeTestUtils.java @@ -54,7 +54,7 @@ public static StarTreeDocument[] getSegmentsStarTreeDocuments( for (int i = 0; i < dimensionsSplitOrder.size(); i++) { String dimension = dimensionsSplitOrder.get(i).getField(); - dimensionReaders[i] = new SequentialDocValuesIterator(starTreeValues.getDimensionDocIdSetIterator(dimension)); + dimensionReaders[i] = new SequentialDocValuesIterator(starTreeValues.getDimensionValuesIterator(dimension)); } List metricReaders = new ArrayList<>(); @@ -69,7 +69,7 @@ public static StarTreeDocument[] getSegmentsStarTreeDocuments( metric.getField(), metricStat.getTypeName() ); - metricReaders.add(new SequentialDocValuesIterator(starTreeValues.getMetricDocIdSetIterator(metricFullName))); + metricReaders.add(new SequentialDocValuesIterator(starTreeValues.getMetricValuesIterator(metricFullName))); } } @@ -92,7 +92,7 @@ public static StarTreeDocument getStarTreeDocument( Long[] dims = new Long[dimensionReaders.length]; int i = 0; for (SequentialDocValuesIterator dimensionDocValueIterator : dimensionReaders) { - dimensionDocValueIterator.nextDoc(currentDocId); + dimensionDocValueIterator.nextEntry(currentDocId); Long val = dimensionDocValueIterator.value(currentDocId); dims[i] = val; i++; @@ -100,7 +100,7 @@ public static StarTreeDocument getStarTreeDocument( i = 0; Object[] metrics = new Object[metricReaders.size()]; for (SequentialDocValuesIterator metricDocValuesIterator : metricReaders) { - metricDocValuesIterator.nextDoc(currentDocId); + metricDocValuesIterator.nextEntry(currentDocId); metrics[i] = toAggregatorValueType(metricDocValuesIterator.value(currentDocId), fieldValueConverters.get(i)); i++; } diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/AbstractStarTreeBuilderTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/AbstractStarTreeBuilderTests.java index 65adc43ea8bea..b77200f173e71 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/AbstractStarTreeBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/builder/AbstractStarTreeBuilderTests.java @@ -53,6 +53,8 @@ import org.opensearch.index.compositeindex.datacube.startree.node.StarTreeNodeType; import org.opensearch.index.compositeindex.datacube.startree.utils.SequentialDocValuesIterator; import org.opensearch.index.compositeindex.datacube.startree.utils.StarTreeUtils; +import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedNumericStarTreeValuesIterator; +import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.StarTreeValuesIterator; import org.opensearch.index.mapper.ContentPath; import org.opensearch.index.mapper.DocumentMapper; import org.opensearch.index.mapper.FieldValueConverter; @@ -395,7 +397,9 @@ SequentialDocValuesIterator[] getDimensionIterators(StarTreeDocument[] starTreeD docsWithField.add(i); } } - sequentialDocValuesIterators[j] = new SequentialDocValuesIterator(getSortedNumericMock(dimList, docsWithField)); + sequentialDocValuesIterators[j] = new SequentialDocValuesIterator( + new SortedNumericStarTreeValuesIterator(getSortedNumericMock(dimList, docsWithField)) + ); } return sequentialDocValuesIterators; } @@ -412,7 +416,9 @@ List getMetricIterators(StarTreeDocument[] starTree docsWithField.add(i); } } - sequentialDocValuesIterators.add(new SequentialDocValuesIterator(getSortedNumericMock(metricslist, docsWithField))); + sequentialDocValuesIterators.add( + new SequentialDocValuesIterator(new SortedNumericStarTreeValuesIterator(getSortedNumericMock(metricslist, docsWithField))) + ); } return sequentialDocValuesIterators; } @@ -1985,10 +1991,16 @@ public void testFlushFlow() throws IOException { List metricsWithField = List.of(0, 1, 2, 3, 4, 5); compositeField = getStarTreeFieldWithMultipleMetrics(); - SortedNumericDocValues d1sndv = getSortedNumericMock(dimList, docsWithField); - SortedNumericDocValues d2sndv = getSortedNumericMock(dimList2, docsWithField2); - SortedNumericDocValues m1sndv = getSortedNumericMock(metricsList, metricsWithField); - SortedNumericDocValues m2sndv = getSortedNumericMock(metricsList, metricsWithField); + SortedNumericStarTreeValuesIterator d1sndv = new SortedNumericStarTreeValuesIterator(getSortedNumericMock(dimList, docsWithField)); + SortedNumericStarTreeValuesIterator d2sndv = new SortedNumericStarTreeValuesIterator( + getSortedNumericMock(dimList2, docsWithField2) + ); + SortedNumericStarTreeValuesIterator m1sndv = new SortedNumericStarTreeValuesIterator( + getSortedNumericMock(metricsList, metricsWithField) + ); + SortedNumericStarTreeValuesIterator m2sndv = new SortedNumericStarTreeValuesIterator( + getSortedNumericMock(metricsList, metricsWithField) + ); writeState = getWriteState(6, writeState.segmentInfo.getId()); builder = getStarTreeBuilder(metaOut, dataOut, compositeField, writeState, mapperService); @@ -2081,10 +2093,16 @@ public void testFlushFlowDimsReverse() throws IOException { List metricsWithField = List.of(0, 1, 2, 3, 4, 5); compositeField = getStarTreeFieldWithMultipleMetrics(); - SortedNumericDocValues d1sndv = getSortedNumericMock(dimList, docsWithField); - SortedNumericDocValues d2sndv = getSortedNumericMock(dimList2, docsWithField2); - SortedNumericDocValues m1sndv = getSortedNumericMock(metricsList, metricsWithField); - SortedNumericDocValues m2sndv = getSortedNumericMock(metricsList, metricsWithField); + SortedNumericStarTreeValuesIterator d1sndv = new SortedNumericStarTreeValuesIterator(getSortedNumericMock(dimList, docsWithField)); + SortedNumericStarTreeValuesIterator d2sndv = new SortedNumericStarTreeValuesIterator( + getSortedNumericMock(dimList2, docsWithField2) + ); + SortedNumericStarTreeValuesIterator m1sndv = new SortedNumericStarTreeValuesIterator( + getSortedNumericMock(metricsList, metricsWithField) + ); + SortedNumericStarTreeValuesIterator m2sndv = new SortedNumericStarTreeValuesIterator( + getSortedNumericMock(metricsList, metricsWithField) + ); writeState = getWriteState(6, writeState.segmentInfo.getId()); this.docValuesConsumer = LuceneDocValuesConsumerFactory.getDocValuesConsumerForCompositeCodec( @@ -2508,9 +2526,14 @@ private StarTreeValues getStarTreeValues( SortedNumericDocValues d1sndv = dimList; SortedNumericDocValues d2sndv = dimList2; SortedNumericDocValues m1sndv = metricsList; - Map> dimDocIdSetIterators = Map.of("field1", () -> d1sndv, "field3", () -> d2sndv); + Map> dimDocIdSetIterators = Map.of( + "field1", + () -> new SortedNumericStarTreeValuesIterator(d1sndv), + "field3", + () -> new SortedNumericStarTreeValuesIterator(d2sndv) + ); - Map> metricDocIdSetIterators = new LinkedHashMap<>(); + Map> metricDocIdSetIterators = new LinkedHashMap<>(); for (Metric metric : sf.getMetrics()) { for (MetricStat metricStat : metric.getMetrics()) { String metricFullName = fullyQualifiedFieldNameForStarTreeMetricsDocValues( @@ -2518,7 +2541,7 @@ private StarTreeValues getStarTreeValues( metric.getField(), metricStat.getTypeName() ); - metricDocIdSetIterators.put(metricFullName, () -> m1sndv); + metricDocIdSetIterators.put(metricFullName, () -> new SortedNumericStarTreeValuesIterator(m1sndv)); } } @@ -3648,18 +3671,18 @@ private StarTreeValues getStarTreeValues( SortedNumericDocValues d4sndv = getSortedNumericMock(dimList4, docsWithField4); SortedNumericDocValues m1sndv = getSortedNumericMock(metricsList, metricsWithField); SortedNumericDocValues m2sndv = getSortedNumericMock(metricsList1, metricsWithField1); - Map> dimDocIdSetIterators = Map.of( + Map> dimDocIdSetIterators = Map.of( "field1", - () -> d1sndv, + () -> new SortedNumericStarTreeValuesIterator(d1sndv), "field3", - () -> d2sndv, + () -> new SortedNumericStarTreeValuesIterator(d2sndv), "field5", - () -> d3sndv, + () -> new SortedNumericStarTreeValuesIterator(d3sndv), "field8", - () -> d4sndv + () -> new SortedNumericStarTreeValuesIterator(d4sndv) ); - Map> metricDocIdSetIterators = new LinkedHashMap<>(); + Map> metricDocIdSetIterators = new LinkedHashMap<>(); metricDocIdSetIterators.put( fullyQualifiedFieldNameForStarTreeMetricsDocValues( @@ -3667,7 +3690,7 @@ private StarTreeValues getStarTreeValues( "field2", sf.getMetrics().get(0).getMetrics().get(0).getTypeName() ), - () -> m1sndv + () -> new SortedNumericStarTreeValuesIterator(m1sndv) ); metricDocIdSetIterators.put( fullyQualifiedFieldNameForStarTreeMetricsDocValues( @@ -3675,7 +3698,7 @@ private StarTreeValues getStarTreeValues( "_doc_count", sf.getMetrics().get(1).getMetrics().get(0).getTypeName() ), - () -> m2sndv + () -> new SortedNumericStarTreeValuesIterator(m2sndv) ); // metricDocIdSetIterators.put("field2", () -> m1sndv); // metricDocIdSetIterators.put("_doc_count", () -> m2sndv); @@ -4093,24 +4116,24 @@ public void testMergeFlow() throws IOException { SortedNumericDocValues m1sndv = getSortedNumericMock(metricsList, metricsWithField); SortedNumericDocValues valucountsndv = getSortedNumericMock(metricsListValueCount, metricsWithFieldValueCount); SortedNumericDocValues m2sndv = DocValues.emptySortedNumeric(); - Map> dimDocIdSetIterators = Map.of( + Map> dimDocIdSetIterators = Map.of( "field1", - () -> d1sndv, + () -> new SortedNumericStarTreeValuesIterator(d1sndv), "field3", - () -> d2sndv, + () -> new SortedNumericStarTreeValuesIterator(d2sndv), "field5", - () -> d3sndv, + () -> new SortedNumericStarTreeValuesIterator(d3sndv), "field8", - () -> d4sndv + () -> new SortedNumericStarTreeValuesIterator(d4sndv) ); - Map> metricDocIdSetIterators = Map.of( + Map> metricDocIdSetIterators = Map.of( "sf_field2_sum_metric", - () -> m1sndv, + () -> new SortedNumericStarTreeValuesIterator(m1sndv), "sf_field2_value_count_metric", - () -> valucountsndv, + () -> new SortedNumericStarTreeValuesIterator(valucountsndv), "sf__doc_count_doc_count_metric", - () -> m2sndv + () -> new SortedNumericStarTreeValuesIterator(m2sndv) ); StarTreeValues starTreeValues = new StarTreeValues( @@ -4129,24 +4152,24 @@ public void testMergeFlow() throws IOException { SortedNumericDocValues f2m1sndv = getSortedNumericMock(metricsList, metricsWithField); SortedNumericDocValues f2valucountsndv = getSortedNumericMock(metricsListValueCount, metricsWithFieldValueCount); SortedNumericDocValues f2m2sndv = DocValues.emptySortedNumeric(); - Map> f2dimDocIdSetIterators = Map.of( + Map> f2dimDocIdSetIterators = Map.of( "field1", - () -> f2d1sndv, + () -> new SortedNumericStarTreeValuesIterator(f2d1sndv), "field3", - () -> f2d2sndv, + () -> new SortedNumericStarTreeValuesIterator(f2d2sndv), "field5", - () -> f2d3sndv, + () -> new SortedNumericStarTreeValuesIterator(f2d3sndv), "field8", - () -> f2d4sndv + () -> new SortedNumericStarTreeValuesIterator(f2d4sndv) ); - Map> f2metricDocIdSetIterators = Map.of( + Map> f2metricDocIdSetIterators = Map.of( "sf_field2_sum_metric", - () -> f2m1sndv, + () -> new SortedNumericStarTreeValuesIterator(f2m1sndv), "sf_field2_value_count_metric", - () -> f2valucountsndv, + () -> new SortedNumericStarTreeValuesIterator(f2valucountsndv), "sf__doc_count_doc_count_metric", - () -> f2m2sndv + () -> new SortedNumericStarTreeValuesIterator(f2m2sndv) ); StarTreeValues starTreeValues2 = new StarTreeValues( compositeField, diff --git a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/utils/SequentialDocValuesIteratorTests.java b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/utils/SequentialDocValuesIteratorTests.java index f56f7d9906ae1..78d63800abd16 100644 --- a/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/utils/SequentialDocValuesIteratorTests.java +++ b/server/src/test/java/org/opensearch/index/compositeindex/datacube/startree/utils/SequentialDocValuesIteratorTests.java @@ -9,15 +9,13 @@ package org.opensearch.index.compositeindex.datacube.startree.utils; import org.apache.lucene.codecs.DocValuesProducer; -import org.apache.lucene.index.BinaryDocValues; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.index.VectorEncoding; import org.apache.lucene.index.VectorSimilarityFunction; -import org.apache.lucene.search.DocIdSetIterator; -import org.apache.lucene.util.BytesRef; +import org.opensearch.index.compositeindex.datacube.startree.utils.iterator.SortedNumericStarTreeValuesIterator; import org.opensearch.test.OpenSearchTestCase; import org.junit.BeforeClass; @@ -59,50 +57,32 @@ public void testCreateIterator_SortedNumeric() throws IOException { DocValuesProducer producer = Mockito.mock(DocValuesProducer.class); SortedNumericDocValues iterator = Mockito.mock(SortedNumericDocValues.class); when(producer.getSortedNumeric(mockFieldInfo)).thenReturn(iterator); - SequentialDocValuesIterator result = new SequentialDocValuesIterator(producer.getSortedNumeric(mockFieldInfo)); - assertEquals(iterator.getClass(), result.getDocIdSetIterator().getClass()); - } - - public void testCreateIterator_UnsupportedType() throws IOException { - DocValuesProducer producer = Mockito.mock(DocValuesProducer.class); - BinaryDocValues iterator = Mockito.mock(BinaryDocValues.class); - when(producer.getBinary(mockFieldInfo)).thenReturn(iterator); - SequentialDocValuesIterator result = new SequentialDocValuesIterator(producer.getBinary(mockFieldInfo)); - assertEquals(iterator.getClass(), result.getDocIdSetIterator().getClass()); - when(iterator.nextDoc()).thenReturn(0); - when(iterator.binaryValue()).thenReturn(new BytesRef("123")); + SequentialDocValuesIterator result = new SequentialDocValuesIterator( + new SortedNumericStarTreeValuesIterator(producer.getSortedNumeric(mockFieldInfo)) + ); - IllegalStateException exception = expectThrows(IllegalStateException.class, () -> { - result.nextDoc(0); - result.value(0); - }); - assertEquals("Unsupported Iterator requested for SequentialDocValuesIterator", exception.getMessage()); } public void testGetNextValue_SortedNumeric() throws IOException { SortedNumericDocValues iterator = Mockito.mock(SortedNumericDocValues.class); when(iterator.nextDoc()).thenReturn(0); when(iterator.nextValue()).thenReturn(123L); - SequentialDocValuesIterator sequentialDocValuesIterator = new SequentialDocValuesIterator(iterator); - sequentialDocValuesIterator.nextDoc(0); + SequentialDocValuesIterator sequentialDocValuesIterator = new SequentialDocValuesIterator( + new SortedNumericStarTreeValuesIterator(iterator) + ); + sequentialDocValuesIterator.nextEntry(0); long result = sequentialDocValuesIterator.value(0); assertEquals(123L, result); } - public void testGetNextValue_UnsupportedIterator() { - DocIdSetIterator iterator = Mockito.mock(DocIdSetIterator.class); - SequentialDocValuesIterator sequentialDocValuesIterator = new SequentialDocValuesIterator(iterator); - - IllegalStateException exception = expectThrows(IllegalStateException.class, () -> { sequentialDocValuesIterator.value(0); }); - assertEquals("Unsupported Iterator requested for SequentialDocValuesIterator", exception.getMessage()); - } - - public void testNextDoc() throws IOException { + public void testNextEntry() throws IOException { SortedNumericDocValues iterator = Mockito.mock(SortedNumericDocValues.class); - SequentialDocValuesIterator sequentialDocValuesIterator = new SequentialDocValuesIterator(iterator); + SequentialDocValuesIterator sequentialDocValuesIterator = new SequentialDocValuesIterator( + new SortedNumericStarTreeValuesIterator(iterator) + ); when(iterator.nextDoc()).thenReturn(5); - int result = sequentialDocValuesIterator.nextDoc(5); + int result = sequentialDocValuesIterator.nextEntry(5); assertEquals(5, result); } @@ -110,8 +90,12 @@ public void test_multipleCoordinatedDocumentReader() throws IOException { SortedNumericDocValues iterator1 = Mockito.mock(SortedNumericDocValues.class); SortedNumericDocValues iterator2 = Mockito.mock(SortedNumericDocValues.class); - SequentialDocValuesIterator sequentialDocValuesIterator1 = new SequentialDocValuesIterator(iterator1); - SequentialDocValuesIterator sequentialDocValuesIterator2 = new SequentialDocValuesIterator(iterator2); + SequentialDocValuesIterator sequentialDocValuesIterator1 = new SequentialDocValuesIterator( + new SortedNumericStarTreeValuesIterator(iterator1) + ); + SequentialDocValuesIterator sequentialDocValuesIterator2 = new SequentialDocValuesIterator( + new SortedNumericStarTreeValuesIterator(iterator2) + ); when(iterator1.nextDoc()).thenReturn(0); when(iterator2.nextDoc()).thenReturn(1); @@ -119,13 +103,13 @@ public void test_multipleCoordinatedDocumentReader() throws IOException { when(iterator1.nextValue()).thenReturn(9L); when(iterator2.nextValue()).thenReturn(9L); - sequentialDocValuesIterator1.nextDoc(0); - sequentialDocValuesIterator2.nextDoc(0); - assertEquals(0, sequentialDocValuesIterator1.getDocId()); + sequentialDocValuesIterator1.nextEntry(0); + sequentialDocValuesIterator2.nextEntry(0); + assertEquals(0, sequentialDocValuesIterator1.getEntryId()); assertEquals(9L, (long) sequentialDocValuesIterator1.value(0)); assertNull(sequentialDocValuesIterator2.value(0)); - assertNotEquals(0, sequentialDocValuesIterator2.getDocId()); - assertEquals(1, sequentialDocValuesIterator2.getDocId()); + assertNotEquals(0, sequentialDocValuesIterator2.getEntryId()); + assertEquals(1, sequentialDocValuesIterator2.getEntryId()); assertEquals(9L, (long) sequentialDocValuesIterator2.value(1)); } } diff --git a/server/src/test/java/org/opensearch/index/mapper/ObjectMapperTests.java b/server/src/test/java/org/opensearch/index/mapper/ObjectMapperTests.java index cb06bf23d9cbe..b415e1e657f7f 100644 --- a/server/src/test/java/org/opensearch/index/mapper/ObjectMapperTests.java +++ b/server/src/test/java/org/opensearch/index/mapper/ObjectMapperTests.java @@ -51,8 +51,12 @@ import java.io.IOException; import java.util.Collection; +import java.util.Collections; + +import org.mockito.Mockito; import static org.opensearch.common.util.FeatureFlags.STAR_TREE_INDEX; +import static org.opensearch.index.mapper.ObjectMapper.Nested.isParent; import static org.hamcrest.Matchers.containsString; public class ObjectMapperTests extends OpenSearchSingleNodeTestCase { @@ -568,6 +572,49 @@ public void testCompositeFields() throws Exception { FeatureFlags.initializeFeatureFlags(Settings.EMPTY); } + public void testNestedIsParent() throws Exception { + String mapping = XContentFactory.jsonBuilder() + .startObject() + .startObject("properties") + .startObject("a") + .field("type", "nested") + .startObject("properties") + .field("b1", Collections.singletonMap("type", "keyword")) + .startObject("b2") + .field("type", "nested") + .startObject("properties") + .startObject("c") + .field("type", "nested") + .startObject("properties") + .field("d", Collections.singletonMap("type", "keyword")) + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .endObject() + .toString(); + + DocumentMapper documentMapper = createIndex("test").mapperService() + .documentMapperParser() + .parse("_doc", new CompressedXContent(mapping)); + + MapperService mapperService = Mockito.mock(MapperService.class); + Mockito.when(mapperService.getObjectMapper(("a"))).thenReturn(documentMapper.objectMappers().get("a")); + Mockito.when(mapperService.getObjectMapper(("a.b2"))).thenReturn(documentMapper.objectMappers().get("a.b2")); + Mockito.when(mapperService.getObjectMapper(("a.b2.c"))).thenReturn(documentMapper.objectMappers().get("a.b2.c")); + + assertTrue(isParent(documentMapper.objectMappers().get("a"), documentMapper.objectMappers().get("a.b2.c"), mapperService)); + assertTrue(isParent(documentMapper.objectMappers().get("a"), documentMapper.objectMappers().get("a.b2"), mapperService)); + assertTrue(isParent(documentMapper.objectMappers().get("a.b2"), documentMapper.objectMappers().get("a.b2.c"), mapperService)); + + assertFalse(isParent(documentMapper.objectMappers().get("a.b2.c"), documentMapper.objectMappers().get("a"), mapperService)); + assertFalse(isParent(documentMapper.objectMappers().get("a.b2"), documentMapper.objectMappers().get("a"), mapperService)); + assertFalse(isParent(documentMapper.objectMappers().get("a.b2.c"), documentMapper.objectMappers().get("a.b2"), mapperService)); + } + @Override protected Collection> getPlugins() { return pluginList(InternalSettingsPlugin.class); diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceTests.java index 10e4cc6cfb1ef..fddc6c0c94005 100644 --- a/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceTests.java +++ b/server/src/test/java/org/opensearch/index/translog/transfer/BlobStoreTransferServiceTests.java @@ -17,6 +17,7 @@ import org.opensearch.common.blobstore.BlobContainer; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; +import org.opensearch.common.blobstore.DeleteResult; import org.opensearch.common.blobstore.fs.FsBlobContainer; import org.opensearch.common.blobstore.fs.FsBlobStore; import org.opensearch.common.blobstore.stream.read.ReadContext; @@ -51,6 +52,7 @@ import java.nio.file.Path; import java.nio.file.StandardOpenOption; import java.util.Base64; +import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -340,5 +342,15 @@ public boolean remoteIntegrityCheckSupported() { public BlobContainer getDelegate() { return delegate; } + + @Override + public void deleteBlobsAsyncIgnoringIfNotExists(List blobNames, ActionListener completionListener) { + throw new RuntimeException("deleteBlobsAsyncIgnoringIfNotExists not supported"); + } + + @Override + public void deleteAsync(ActionListener completionListener) { + throw new RuntimeException("deleteAsync not supported"); + } } } diff --git a/server/src/test/java/org/opensearch/rest/RestControllerTests.java b/server/src/test/java/org/opensearch/rest/RestControllerTests.java index ef9257d746573..f7f1b02847854 100644 --- a/server/src/test/java/org/opensearch/rest/RestControllerTests.java +++ b/server/src/test/java/org/opensearch/rest/RestControllerTests.java @@ -55,13 +55,11 @@ import org.opensearch.http.HttpResponse; import org.opensearch.http.HttpServerTransport; import org.opensearch.http.HttpStats; -import org.opensearch.identity.IdentityService; import org.opensearch.indices.breaker.HierarchyCircuitBreakerService; import org.opensearch.rest.action.admin.indices.RestCreateIndexAction; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.client.NoOpNodeClient; import org.opensearch.test.rest.FakeRestRequest; -import org.opensearch.threadpool.ThreadPool; import org.opensearch.usage.UsageService; import org.junit.After; import org.junit.Before; @@ -97,7 +95,6 @@ public class RestControllerTests extends OpenSearchTestCase { private RestController restController; private HierarchyCircuitBreakerService circuitBreakerService; private UsageService usageService; - private IdentityService identityService; private NodeClient client; @Before @@ -115,11 +112,9 @@ public void setup() { // we can do this here only because we know that we don't adjust breaker settings dynamically in the test inFlightRequestsBreaker = circuitBreakerService.getBreaker(CircuitBreaker.IN_FLIGHT_REQUESTS); - identityService = new IdentityService(Settings.EMPTY, mock(ThreadPool.class), List.of()); - HttpServerTransport httpServerTransport = new TestHttpServerTransport(); client = new NoOpNodeClient(this.getTestName()); - restController = new RestController(Collections.emptySet(), null, client, circuitBreakerService, usageService, identityService); + restController = new RestController(Collections.emptySet(), null, client, circuitBreakerService, usageService); restController.registerHandler( RestRequest.Method.GET, "/", @@ -140,7 +135,7 @@ public void teardown() throws IOException { } public void testDefaultRestControllerGetAllHandlersContainsFavicon() { - final RestController restController = new RestController(null, null, null, circuitBreakerService, usageService, identityService); + final RestController restController = new RestController(null, null, null, circuitBreakerService, usageService); Iterator handlers = restController.getAllHandlers(); assertTrue(handlers.hasNext()); MethodHandlers faviconHandler = handlers.next(); @@ -150,7 +145,7 @@ public void testDefaultRestControllerGetAllHandlersContainsFavicon() { } public void testRestControllerGetAllHandlers() { - final RestController restController = new RestController(null, null, null, circuitBreakerService, usageService, identityService); + final RestController restController = new RestController(null, null, null, circuitBreakerService, usageService); restController.registerHandler(RestRequest.Method.PATCH, "/foo", mock(RestHandler.class)); restController.registerHandler(RestRequest.Method.GET, "/foo", mock(RestHandler.class)); @@ -175,7 +170,7 @@ public void testApplyRelevantHeaders() throws Exception { Set headers = new HashSet<>( Arrays.asList(new RestHeaderDefinition("header.1", true), new RestHeaderDefinition("header.2", true)) ); - final RestController restController = new RestController(headers, null, null, circuitBreakerService, usageService, identityService); + final RestController restController = new RestController(headers, null, null, circuitBreakerService, usageService); Map> restHeaders = new HashMap<>(); restHeaders.put("header.1", Collections.singletonList("true")); restHeaders.put("header.2", Collections.singletonList("true")); @@ -211,7 +206,7 @@ public void testRequestWithDisallowedMultiValuedHeader() { Set headers = new HashSet<>( Arrays.asList(new RestHeaderDefinition("header.1", true), new RestHeaderDefinition("header.2", false)) ); - final RestController restController = new RestController(headers, null, null, circuitBreakerService, usageService, identityService); + final RestController restController = new RestController(headers, null, null, circuitBreakerService, usageService); Map> restHeaders = new HashMap<>(); restHeaders.put("header.1", Collections.singletonList("boo")); restHeaders.put("header.2", Arrays.asList("foo", "bar")); @@ -226,14 +221,7 @@ public void testRequestWithDisallowedMultiValuedHeaderButSameValues() { Set headers = new HashSet<>( Arrays.asList(new RestHeaderDefinition("header.1", true), new RestHeaderDefinition("header.2", false)) ); - final RestController restController = new RestController( - headers, - null, - client, - circuitBreakerService, - usageService, - identityService - ); + final RestController restController = new RestController(headers, null, client, circuitBreakerService, usageService); Map> restHeaders = new HashMap<>(); restHeaders.put("header.1", Collections.singletonList("boo")); restHeaders.put("header.2", Arrays.asList("foo", "foo")); @@ -294,7 +282,7 @@ public void testRegisterWithDeprecatedHandler() { } public void testRegisterSecondMethodWithDifferentNamedWildcard() { - final RestController restController = new RestController(null, null, null, circuitBreakerService, usageService, identityService); + final RestController restController = new RestController(null, null, null, circuitBreakerService, usageService); RestRequest.Method firstMethod = randomFrom(RestRequest.Method.values()); RestRequest.Method secondMethod = randomFrom( @@ -322,7 +310,7 @@ public void testRestHandlerWrapper() throws Exception { final RestController restController = new RestController(Collections.emptySet(), h -> { assertSame(handler, h); return (RestRequest request, RestChannel channel, NodeClient client) -> wrapperCalled.set(true); - }, client, circuitBreakerService, usageService, identityService); + }, client, circuitBreakerService, usageService); restController.registerHandler(RestRequest.Method.GET, "/wrapped", handler); RestRequest request = testRestRequest("/wrapped", "{}", MediaTypeRegistry.JSON); AssertingChannel channel = new AssertingChannel(request, true, RestStatus.BAD_REQUEST); @@ -385,7 +373,7 @@ public void testDispatchRequiresContentTypeForRequestsWithContent() { String content = randomAlphaOfLength((int) Math.round(BREAKER_LIMIT.getBytes() / inFlightRequestsBreaker.getOverhead())); RestRequest request = testRestRequest("/", content, null); AssertingChannel channel = new AssertingChannel(request, true, RestStatus.NOT_ACCEPTABLE); - restController = new RestController(Collections.emptySet(), null, null, circuitBreakerService, usageService, identityService); + restController = new RestController(Collections.emptySet(), null, null, circuitBreakerService, usageService); restController.registerHandler( RestRequest.Method.GET, "/", diff --git a/server/src/test/java/org/opensearch/rest/RestHttpResponseHeadersTests.java b/server/src/test/java/org/opensearch/rest/RestHttpResponseHeadersTests.java index 983121a4f481d..b8602cdc20e6a 100644 --- a/server/src/test/java/org/opensearch/rest/RestHttpResponseHeadersTests.java +++ b/server/src/test/java/org/opensearch/rest/RestHttpResponseHeadersTests.java @@ -39,12 +39,10 @@ import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.indices.breaker.CircuitBreakerService; import org.opensearch.core.rest.RestStatus; -import org.opensearch.identity.IdentityService; import org.opensearch.indices.breaker.HierarchyCircuitBreakerService; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.rest.FakeRestChannel; import org.opensearch.test.rest.FakeRestRequest; -import org.opensearch.threadpool.ThreadPool; import org.opensearch.usage.UsageService; import java.util.ArrayList; @@ -56,7 +54,6 @@ import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.is; -import static org.mockito.Mockito.mock; public class RestHttpResponseHeadersTests extends OpenSearchTestCase { @@ -106,17 +103,8 @@ public void testUnsupportedMethodResponseHttpHeader() throws Exception { new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) ); - final Settings settings = Settings.EMPTY; UsageService usageService = new UsageService(); - final IdentityService identityService = new IdentityService(settings, mock(ThreadPool.class), List.of()); - RestController restController = new RestController( - Collections.emptySet(), - null, - null, - circuitBreakerService, - usageService, - identityService - ); + RestController restController = new RestController(Collections.emptySet(), null, null, circuitBreakerService, usageService); // A basic RestHandler handles requests to the endpoint RestHandler restHandler = new RestHandler() { diff --git a/server/src/test/java/org/opensearch/rest/action/admin/indices/RestValidateQueryActionTests.java b/server/src/test/java/org/opensearch/rest/action/admin/indices/RestValidateQueryActionTests.java index c3cf33f4e9034..6aa1d10d71e50 100644 --- a/server/src/test/java/org/opensearch/rest/action/admin/indices/RestValidateQueryActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/admin/indices/RestValidateQueryActionTests.java @@ -44,7 +44,6 @@ import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.core.xcontent.MediaTypeRegistry; -import org.opensearch.identity.IdentityService; import org.opensearch.rest.RestController; import org.opensearch.rest.RestRequest; import org.opensearch.search.AbstractSearchTestCase; @@ -61,7 +60,6 @@ import java.util.Collections; import java.util.HashMap; -import java.util.List; import java.util.Map; import static java.util.Collections.emptyMap; @@ -75,15 +73,7 @@ public class RestValidateQueryActionTests extends AbstractSearchTestCase { private static NodeClient client = new NodeClient(Settings.EMPTY, threadPool); private static UsageService usageService = new UsageService(); - private static IdentityService identityService = new IdentityService(Settings.EMPTY, threadPool, List.of()); - private static RestController controller = new RestController( - emptySet(), - null, - client, - new NoneCircuitBreakerService(), - usageService, - identityService - ); + private static RestController controller = new RestController(emptySet(), null, client, new NoneCircuitBreakerService(), usageService); private static RestValidateQueryAction action = new RestValidateQueryAction(); /** diff --git a/server/src/test/java/org/opensearch/snapshots/BlobStoreFormatTests.java b/server/src/test/java/org/opensearch/snapshots/BlobStoreFormatTests.java index 95a8267734a07..72798f7691eb1 100644 --- a/server/src/test/java/org/opensearch/snapshots/BlobStoreFormatTests.java +++ b/server/src/test/java/org/opensearch/snapshots/BlobStoreFormatTests.java @@ -39,6 +39,7 @@ import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; +import org.opensearch.common.blobstore.DeleteResult; import org.opensearch.common.blobstore.fs.FsBlobContainer; import org.opensearch.common.blobstore.fs.FsBlobStore; import org.opensearch.common.blobstore.stream.read.ReadContext; @@ -63,6 +64,7 @@ import java.io.IOException; import java.io.InputStream; import java.nio.file.Path; +import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; @@ -335,5 +337,15 @@ public boolean remoteIntegrityCheckSupported() { public BlobContainer getDelegate() { return delegate; } + + @Override + public void deleteAsync(ActionListener completionListener) { + throw new RuntimeException("deleteAsync not supported"); + } + + @Override + public void deleteBlobsAsyncIgnoringIfNotExists(List blobNames, ActionListener completionListener) { + throw new RuntimeException("deleteBlobsAsyncIgnoringIfNotExists not supported"); + } } } diff --git a/test/framework/src/main/java/org/opensearch/test/rest/RestActionTestCase.java b/test/framework/src/main/java/org/opensearch/test/rest/RestActionTestCase.java index c7a0fe35b0237..fec1699c9ef64 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/RestActionTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/RestActionTestCase.java @@ -40,25 +40,20 @@ import org.opensearch.core.action.ActionListener; import org.opensearch.core.action.ActionResponse; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; -import org.opensearch.identity.IdentityService; import org.opensearch.rest.RestController; import org.opensearch.rest.RestRequest; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskListener; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.client.NoOpNodeClient; -import org.opensearch.threadpool.ThreadPool; import org.opensearch.usage.UsageService; import org.junit.After; import org.junit.Before; import java.util.Collections; -import java.util.List; import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiFunction; -import static org.mockito.Mockito.mock; - /** * A common base class for Rest*ActionTests. Provides access to a {@link RestController} * that can be used to register individual REST actions, and test request handling. @@ -70,15 +65,7 @@ public abstract class RestActionTestCase extends OpenSearchTestCase { @Before public void setUpController() { verifyingClient = new VerifyingClient(this.getTestName()); - final IdentityService identityService = new IdentityService(Settings.EMPTY, mock(ThreadPool.class), List.of()); - controller = new RestController( - Collections.emptySet(), - null, - verifyingClient, - new NoneCircuitBreakerService(), - new UsageService(), - identityService - ); + controller = new RestController(Collections.emptySet(), null, verifyingClient, new NoneCircuitBreakerService(), new UsageService()); } @After