From 04eb8171b8e24222282d430e0b6514822778a77c Mon Sep 17 00:00:00 2001 From: Nishchay Malhotra <114057571+nishchay21@users.noreply.github.com> Date: Thu, 20 Oct 2022 01:04:10 +0530 Subject: [PATCH] Adding feature to exclude indexes starting with dot from shard validator (#4695) * Adding feature to exclude indexes starting with dot from shard validator Signed-off-by: Nishchay Malhotra --- CHANGELOG.md | 1 + .../cluster/shards/ClusterShardLimitIT.java | 306 +++++++++++++++++- .../common/settings/ClusterSettings.java | 1 + .../indices/ShardLimitValidator.java | 71 +++- .../MetadataCreateIndexServiceTests.java | 10 +- .../MetadataIndexTemplateServiceTests.java | 4 +- .../indices/ShardLimitValidatorTests.java | 229 ++++++++++++- 7 files changed, 601 insertions(+), 21 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b0fa8716327ea..aae68b7f715d7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -163,6 +163,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Addition of GeoShape ValueSource level code interfaces for accessing the DocValues. - Addition of Missing Value feature in the GeoShape Aggregations. - Install and configure Log4j JUL Adapter for Lucene 9.4 ([#4754](https://github.com/opensearch-project/OpenSearch/pull/4754)) +- Added feature to ignore indexes starting with dot during shard limit validation.([#4695](https://github.com/opensearch-project/OpenSearch/pull/4695)) ### Changed ### Deprecated ### Removed diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java index a92849a077376..a88d42c07f8d6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java @@ -43,15 +43,28 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.common.Priority; +import org.opensearch.common.network.NetworkModule; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.ByteSizeUnit; +import org.opensearch.core.internal.io.IOUtils; import org.opensearch.indices.ShardLimitValidator; import org.opensearch.snapshots.SnapshotInfo; import org.opensearch.snapshots.SnapshotState; +import org.opensearch.snapshots.mockstore.MockRepository; +import org.opensearch.test.InternalSettingsPlugin; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.MockHttpTransport; +import org.opensearch.test.NodeConfigurationSource; import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.transport.MockTransportService; +import org.opensearch.transport.nio.MockNioTransportPlugin; +import java.io.IOException; +import java.nio.file.Path; +import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.function.Function; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; @@ -63,12 +76,18 @@ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) public class ClusterShardLimitIT extends OpenSearchIntegTestCase { private static final String shardsPerNodeKey = ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(); + private static final String ignoreDotIndexKey = ShardLimitValidator.SETTING_CLUSTER_IGNORE_DOT_INDEXES.getKey(); public void testSettingClusterMaxShards() { int shardsPerNode = between(1, 500_000); setShardsPerNode(shardsPerNode); } + public void testSettingIgnoreDotIndexes() { + boolean ignoreDotIndexes = randomBoolean(); + setIgnoreDotIndex(ignoreDotIndexes); + } + public void testMinimumPerNode() { int negativeShardsPerNode = between(-50_000, 0); try { @@ -100,7 +119,6 @@ public void testIndexCreationOverLimit() { ShardCounts counts = ShardCounts.forDataNodeCount(dataNodes); setShardsPerNode(counts.getShardsPerNode()); - // Create an index that will bring us up to the limit createIndex( "test", @@ -127,6 +145,164 @@ public void testIndexCreationOverLimit() { assertFalse(clusterState.getMetadata().hasIndex("should-fail")); } + /** + * The test checks if the index starting with the dot can be created if the node has + * number of shards equivalent to the cluster.max_shards_per_node and the cluster.ignore_Dot_indexes + * setting is set to true. If the cluster.ignore_Dot_indexes is set to true index creation of + * indexes starting with dot would succeed. + */ + public void testIndexCreationOverLimitForDotIndexesSucceeds() { + int dataNodes = client().admin().cluster().prepareState().get().getState().getNodes().getDataNodes().size(); + + // Setting the cluster.max_shards_per_node setting according to the data node count. + setShardsPerNode(dataNodes); + setIgnoreDotIndex(true); + + /* + Create an index that will bring us up to the limit. It would create index with primary equal to the + dataNodes * dataNodes so that cluster.max_shards_per_node setting is reached. + */ + createIndex( + "test", + Settings.builder() + .put(indexSettings()) + .put(SETTING_NUMBER_OF_SHARDS, dataNodes * dataNodes) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ); + + // Getting total active shards in the cluster. + int currentActiveShards = client().admin().cluster().prepareHealth().get().getActiveShards(); + + // Getting cluster.max_shards_per_node setting + ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + String maxShardsPerNode = clusterState.getMetadata() + .settings() + .get(ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey()); + + // Checking if the total shards created are equivalent to dataNodes * cluster.max_shards_per_node + assertEquals(dataNodes * Integer.parseInt(maxShardsPerNode), currentActiveShards); + + createIndex( + ".test-index", + Settings.builder().put(indexSettings()).put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0).build() + ); + + clusterState = client().admin().cluster().prepareState().get().getState(); + assertTrue(clusterState.getMetadata().hasIndex(".test-index")); + } + + /** + * The test checks if the index starting with the dot should not be created if the node has + * number of shards equivalent to the cluster.max_shards_per_node and the cluster.ignore_Dot_indexes + * setting is set to false. If the cluster.ignore_Dot_indexes is set to false index creation of + * indexes starting with dot would fail as well. + */ + public void testIndexCreationOverLimitForDotIndexesFail() { + int dataNodes = client().admin().cluster().prepareState().get().getState().getNodes().getDataNodes().size(); + int maxAllowedShards = dataNodes * dataNodes; + + // Setting the cluster.max_shards_per_node setting according to the data node count. + setShardsPerNode(dataNodes); + + /* + Create an index that will bring us up to the limit. It would create index with primary equal to the + dataNodes * dataNodes so that cluster.max_shards_per_node setting is reached. + */ + createIndex( + "test", + Settings.builder() + .put(indexSettings()) + .put(SETTING_NUMBER_OF_SHARDS, maxAllowedShards) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ); + + // Getting total active shards in the cluster. + int currentActiveShards = client().admin().cluster().prepareHealth().get().getActiveShards(); + + // Getting cluster.max_shards_per_node setting + ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + String maxShardsPerNode = clusterState.getMetadata() + .settings() + .get(ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey()); + + // Checking if the total shards created are equivalent to dataNodes * cluster.max_shards_per_node + assertEquals(dataNodes * Integer.parseInt(maxShardsPerNode), currentActiveShards); + + int extraShardCount = 1; + try { + createIndex( + ".test-index", + Settings.builder() + .put(indexSettings()) + .put(SETTING_NUMBER_OF_SHARDS, extraShardCount) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ); + } catch (IllegalArgumentException e) { + verifyException(maxAllowedShards, currentActiveShards, extraShardCount, e); + } + clusterState = client().admin().cluster().prepareState().get().getState(); + assertFalse(clusterState.getMetadata().hasIndex(".test-index")); + } + + /** + * The test checks if the index starting with the .ds- can be created if the node has + * number of shards equivalent to the cluster.max_shards_per_node and the cluster.ignore_Dot_indexes + * setting is set to true. If the cluster.ignore_Dot_indexes is set to true index creation of + * indexes starting with dot would only succeed and dataStream indexes would still have validation applied. + */ + public void testIndexCreationOverLimitForDataStreamIndexes() { + int dataNodes = client().admin().cluster().prepareState().get().getState().getNodes().getDataNodes().size(); + int maxAllowedShards = dataNodes * dataNodes; + + // Setting the cluster.max_shards_per_node setting according to the data node count. + setShardsPerNode(dataNodes); + setIgnoreDotIndex(true); + + /* + Create an index that will bring us up to the limit. It would create index with primary equal to the + dataNodes * dataNodes so that cluster.max_shards_per_node setting is reached. + */ + createIndex( + "test", + Settings.builder() + .put(indexSettings()) + .put(SETTING_NUMBER_OF_SHARDS, maxAllowedShards) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ); + + // Getting total active shards in the cluster. + int currentActiveShards = client().admin().cluster().prepareHealth().get().getActiveShards(); + + // Getting cluster.max_shards_per_node setting + ClusterState clusterState = client().admin().cluster().prepareState().get().getState(); + String maxShardsPerNode = clusterState.getMetadata() + .settings() + .get(ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey()); + + // Checking if the total shards created are equivalent to dataNodes * cluster.max_shards_per_node + assertEquals(dataNodes * Integer.parseInt(maxShardsPerNode), currentActiveShards); + + int extraShardCount = 1; + try { + createIndex( + ".ds-test-index", + Settings.builder() + .put(indexSettings()) + .put(SETTING_NUMBER_OF_SHARDS, extraShardCount) + .put(SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ); + } catch (IllegalArgumentException e) { + verifyException(maxAllowedShards, currentActiveShards, extraShardCount, e); + } + clusterState = client().admin().cluster().prepareState().get().getState(); + assertFalse(clusterState.getMetadata().hasIndex(".ds-test-index")); + } + public void testIndexCreationOverLimitFromTemplate() { int dataNodes = client().admin().cluster().prepareState().get().getState().getNodes().getDataNodes().size(); @@ -414,6 +590,100 @@ public void testOpenIndexOverLimit() { assertFalse(clusterState.getMetadata().hasIndex("snapshot-index")); } + public void testIgnoreDotSettingOnMultipleNodes() throws IOException, InterruptedException { + int maxAllowedShardsPerNode = 10, indexPrimaryShards = 11, indexReplicaShards = 1; + + InternalTestCluster cluster = new InternalTestCluster( + randomLong(), + createTempDir(), + true, + true, + 0, + 0, + "cluster", + new NodeConfigurationSource() { + @Override + public Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(ClusterShardLimitIT.this.nodeSettings(nodeOrdinal)) + .put(NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType()) + .build(); + } + + @Override + public Path nodeConfigPath(int nodeOrdinal) { + return null; + } + }, + 0, + "cluster-", + Arrays.asList( + TestSeedPlugin.class, + MockHttpTransport.TestPlugin.class, + MockTransportService.TestPlugin.class, + MockNioTransportPlugin.class, + InternalSettingsPlugin.class, + MockRepository.Plugin.class + ), + Function.identity() + ); + cluster.beforeTest(random()); + + // Starting 3 ClusterManagerOnlyNode nodes + cluster.startClusterManagerOnlyNode(Settings.builder().put("cluster.ignore_dot_indexes", true).build()); + cluster.startClusterManagerOnlyNode(Settings.builder().put("cluster.ignore_dot_indexes", false).build()); + cluster.startClusterManagerOnlyNode(Settings.builder().put("cluster.ignore_dot_indexes", false).build()); + + // Starting 2 data nodes + cluster.startDataOnlyNode(Settings.builder().put("cluster.ignore_dot_indexes", false).build()); + cluster.startDataOnlyNode(Settings.builder().put("cluster.ignore_dot_indexes", false).build()); + + // Setting max shards per node to be 10 + cluster.client() + .admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put(shardsPerNodeKey, maxAllowedShardsPerNode)) + .get(); + + // Creating an index starting with dot having shards greater thn the desired node limit + cluster.client() + .admin() + .indices() + .prepareCreate(".test-index") + .setSettings( + Settings.builder().put(SETTING_NUMBER_OF_SHARDS, indexPrimaryShards).put(SETTING_NUMBER_OF_REPLICAS, indexReplicaShards) + ) + .get(); + + // As active ClusterManagerNode setting takes precedence killing the active one. + // This would be the first one where cluster.ignore_dot_indexes is true because the above calls are blocking. + cluster.stopCurrentClusterManagerNode(); + + // Waiting for all shards to get assigned + cluster.client().admin().cluster().prepareHealth().setWaitForGreenStatus().get(); + + // Creating an index starting with dot having shards greater thn the desired node limit + try { + cluster.client() + .admin() + .indices() + .prepareCreate(".test-index1") + .setSettings( + Settings.builder().put(SETTING_NUMBER_OF_SHARDS, indexPrimaryShards).put(SETTING_NUMBER_OF_REPLICAS, indexReplicaShards) + ) + .get(); + } catch (IllegalArgumentException e) { + ClusterHealthResponse clusterHealth = cluster.client().admin().cluster().prepareHealth().get(); + int currentActiveShards = clusterHealth.getActiveShards(); + int dataNodeCount = clusterHealth.getNumberOfDataNodes(); + int extraShardCount = indexPrimaryShards * (1 + indexReplicaShards); + verifyException(maxAllowedShardsPerNode * dataNodeCount, currentActiveShards, extraShardCount, e); + } + + IOUtils.close(cluster); + } + private int ensureMultipleDataNodes(int dataNodes) { if (dataNodes == 1) { internalCluster().startNode(dataNode()); @@ -457,6 +727,29 @@ private void setShardsPerNode(int shardsPerNode) { } } + private void setIgnoreDotIndex(boolean ignoreDotIndex) { + try { + ClusterUpdateSettingsResponse response; + if (frequently()) { + response = client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put(ignoreDotIndexKey, ignoreDotIndex).build()) + .get(); + assertEquals(ignoreDotIndex, response.getPersistentSettings().getAsBoolean(ignoreDotIndexKey, true)); + } else { + response = client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().put(ignoreDotIndexKey, ignoreDotIndex).build()) + .get(); + assertEquals(ignoreDotIndex, response.getTransientSettings().getAsBoolean(ignoreDotIndexKey, true)); + } + } catch (IllegalArgumentException ex) { + fail(ex.getMessage()); + } + } + private void verifyException(int dataNodes, ShardCounts counts, IllegalArgumentException e) { int totalShards = counts.getFailingIndexShards() * (1 + counts.getFailingIndexReplicas()); int currentShards = counts.getFirstIndexShards() * (1 + counts.getFirstIndexReplicas()); @@ -471,4 +764,15 @@ private void verifyException(int dataNodes, ShardCounts counts, IllegalArgumentE assertEquals(expectedError, e.getMessage()); } + private void verifyException(int maxShards, int currentShards, int extraShards, IllegalArgumentException e) { + String expectedError = "Validation Failed: 1: this action would add [" + + extraShards + + "] total shards, but this cluster currently has [" + + currentShards + + "]/[" + + maxShards + + "] maximum shards open;"; + assertEquals(expectedError, e.getMessage()); + } + } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 320cb5457b21c..12a3a883af347 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -255,6 +255,7 @@ public void apply(Settings value, Settings current, Settings previous) { Metadata.SETTING_READ_ONLY_SETTING, Metadata.SETTING_READ_ONLY_ALLOW_DELETE_SETTING, ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE, + ShardLimitValidator.SETTING_CLUSTER_IGNORE_DOT_INDEXES, RecoverySettings.INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING, RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING, RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING, diff --git a/server/src/main/java/org/opensearch/indices/ShardLimitValidator.java b/server/src/main/java/org/opensearch/indices/ShardLimitValidator.java index a5ec6cbecaf55..e803e387448bc 100644 --- a/server/src/main/java/org/opensearch/indices/ShardLimitValidator.java +++ b/server/src/main/java/org/opensearch/indices/ShardLimitValidator.java @@ -33,6 +33,7 @@ package org.opensearch.indices; import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.DataStream; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.ValidationException; @@ -64,12 +65,23 @@ public class ShardLimitValidator { Setting.Property.Dynamic, Setting.Property.NodeScope ); + + public static final Setting SETTING_CLUSTER_IGNORE_DOT_INDEXES = Setting.boolSetting( + "cluster.ignore_dot_indexes", + false, + Setting.Property.Dynamic, + Setting.Property.NodeScope + ); + protected final AtomicInteger shardLimitPerNode = new AtomicInteger(); private final SystemIndices systemIndices; + private volatile boolean ignoreDotIndexes; public ShardLimitValidator(final Settings settings, ClusterService clusterService, SystemIndices systemIndices) { this.shardLimitPerNode.set(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.get(settings)); + this.ignoreDotIndexes = SETTING_CLUSTER_IGNORE_DOT_INDEXES.get(settings); clusterService.getClusterSettings().addSettingsUpdateConsumer(SETTING_CLUSTER_MAX_SHARDS_PER_NODE, this::setShardLimitPerNode); + clusterService.getClusterSettings().addSettingsUpdateConsumer(SETTING_CLUSTER_IGNORE_DOT_INDEXES, this::setIgnoreDotIndexes); this.systemIndices = systemIndices; } @@ -85,8 +97,15 @@ public int getShardLimitPerNode() { return shardLimitPerNode.get(); } + private void setIgnoreDotIndexes(boolean newValue) { + this.ignoreDotIndexes = newValue; + } + /** * Checks whether an index can be created without going over the cluster shard limit. + * Validate shard limit only for non system indices as it is not hard limit anyways. + * Further also validates if the cluster.ignore_dot_indexes is set to true. + * If so then it does not validate any index which starts with '.' except data-stream index. * * @param indexName the name of the index being created * @param settings the settings of the index to be created @@ -94,8 +113,12 @@ public int getShardLimitPerNode() { * @throws ValidationException if creating this index would put the cluster over the cluster shard limit */ public void validateShardLimit(final String indexName, final Settings settings, final ClusterState state) { - // Validate shard limit only for non system indices as it is not hard limit anyways - if (systemIndices.validateSystemIndex(indexName)) { + /* + Validate shard limit only for non system indices as it is not hard limit anyways. + Further also validates if the cluster.ignore_dot_indexes is set to true. + If so then it does not validate any index which starts with '.'. + */ + if (shouldIndexBeIgnored(indexName)) { return; } @@ -113,7 +136,9 @@ public void validateShardLimit(final String indexName, final Settings settings, /** * Validates whether a list of indices can be opened without going over the cluster shard limit. Only counts indices which are - * currently closed and will be opened, ignores indices which are already open. + * currently closed and will be opened, ignores indices which are already open. Adding to this it validates the + * shard limit only for non system indices and if the cluster.ignore_dot_indexes property is set to true + * then the indexes starting with '.' are ignored except the data-stream indexes. * * @param currentState The current cluster state. * @param indicesToOpen The indices which are to be opened. @@ -121,8 +146,13 @@ public void validateShardLimit(final String indexName, final Settings settings, */ public void validateShardLimit(ClusterState currentState, Index[] indicesToOpen) { int shardsToOpen = Arrays.stream(indicesToOpen) - // Validate shard limit only for non system indices as it is not hard limit anyways - .filter(index -> !systemIndices.validateSystemIndex(index.getName())) + /* + Validate shard limit only for non system indices as it is not hard limit anyways. + Further also validates if the cluster.ignore_dot_indexes is set to true. + If so then it does not validate any index which starts with '.' + however data-stream indexes are still validated. + */ + .filter(index -> !shouldIndexBeIgnored(index.getName())) .filter(index -> currentState.metadata().index(index).getState().equals(IndexMetadata.State.CLOSE)) .mapToInt(index -> getTotalShardCount(currentState, index)) .sum(); @@ -140,6 +170,37 @@ private static int getTotalShardCount(ClusterState state, Index index) { return indexMetadata.getNumberOfShards() * (1 + indexMetadata.getNumberOfReplicas()); } + /** + * Returns true if the index should be ignored during validation. + * Index is ignored if it is a system index or if cluster.ignore_dot_indexes is set to true + * then indexes which are starting with dot and are not data stream index are ignored. + * + * @param indexName The index which needs to be validated. + */ + private boolean shouldIndexBeIgnored(String indexName) { + if (this.ignoreDotIndexes) { + return validateDotIndex(indexName) && !isDataStreamIndex(indexName); + } else return systemIndices.validateSystemIndex(indexName); + } + + /** + * Returns true if the index name starts with '.' else false. + * + * @param indexName The index which needs to be validated. + */ + private boolean validateDotIndex(String indexName) { + return indexName.charAt(0) == '.'; + } + + /** + * Returns true if the index is dataStreamIndex false otherwise. + * + * @param indexName The index which needs to be validated. + */ + private boolean isDataStreamIndex(String indexName) { + return indexName.startsWith(DataStream.BACKING_INDEX_PREFIX); + } + /** * Checks to see if an operation can be performed without taking the cluster over the cluster-wide shard limit. * Returns an error message if appropriate, or an empty {@link Optional} otherwise. diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index ac03ab49bbbbd..5348fd0a778f7 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -592,7 +592,7 @@ public void testValidateIndexName() throws Exception { null, null, null, - createTestShardLimitService(randomIntBetween(1, 1000), clusterService), + createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), null, null, threadPool, @@ -674,7 +674,7 @@ public void testValidateDotIndex() { null, null, null, - createTestShardLimitService(randomIntBetween(1, 1000), clusterService), + createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), null, null, threadPool, @@ -1090,7 +1090,7 @@ public void testvalidateIndexSettings() { null, null, null, - createTestShardLimitService(randomIntBetween(1, 1000), clusterService), + createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), new Environment(Settings.builder().put("path.home", "dummy").build(), null), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, threadPool, @@ -1234,7 +1234,7 @@ public void testIndexLifecycleNameSetting() { null, null, null, - createTestShardLimitService(randomIntBetween(1, 1000), clusterService), + createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), new Environment(Settings.builder().put("path.home", "dummy").build(), null), new IndexScopedSettings(ilnSetting, Collections.emptySet()), threadPool, @@ -1311,7 +1311,7 @@ private static Map convertMappings(ImmutableOpenMap< } private ShardLimitValidator randomShardLimitService() { - return createTestShardLimitService(randomIntBetween(10, 10000)); + return createTestShardLimitService(randomIntBetween(10, 10000), false); } private void withTemporaryClusterService(BiConsumer consumer) { diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java index 887d8469bd01c..aa1c500030bd6 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java @@ -2100,7 +2100,7 @@ private static List putTemplate(NamedXContentRegistry xContentRegistr null, null, null, - createTestShardLimitService(randomIntBetween(1, 1000)), + createTestShardLimitService(randomIntBetween(1, 1000), false), new Environment(builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build(), null), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, null, @@ -2163,7 +2163,7 @@ private MetadataIndexTemplateService getMetadataIndexTemplateService() { indicesService, null, null, - createTestShardLimitService(randomIntBetween(1, 1000)), + createTestShardLimitService(randomIntBetween(1, 1000), false), new Environment(builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).build(), null), IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, null, diff --git a/server/src/test/java/org/opensearch/indices/ShardLimitValidatorTests.java b/server/src/test/java/org/opensearch/indices/ShardLimitValidatorTests.java index 60c467e3864e9..f19689565dd92 100644 --- a/server/src/test/java/org/opensearch/indices/ShardLimitValidatorTests.java +++ b/server/src/test/java/org/opensearch/indices/ShardLimitValidatorTests.java @@ -59,6 +59,7 @@ import static org.opensearch.cluster.metadata.MetadataIndexStateServiceTests.addClosedIndex; import static org.opensearch.cluster.metadata.MetadataIndexStateServiceTests.addOpenedIndex; import static org.opensearch.cluster.shards.ShardCounts.forDataNodeCount; +import static org.opensearch.indices.ShardLimitValidator.SETTING_CLUSTER_IGNORE_DOT_INDEXES; import static org.opensearch.indices.ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -113,7 +114,7 @@ public void testUnderShardLimit() { * even though it exceeds the cluster max shard limit */ public void testSystemIndexCreationSucceeds() { - final ShardLimitValidator shardLimitValidator = createTestShardLimitService(1); + final ShardLimitValidator shardLimitValidator = createTestShardLimitService(1, false); final Settings settings = Settings.builder() .put(SETTING_VERSION_CREATED, Version.CURRENT) .put(SETTING_NUMBER_OF_SHARDS, 1) @@ -128,7 +129,7 @@ public void testSystemIndexCreationSucceeds() { * fails when it exceeds the cluster max shard limit */ public void testNonSystemIndexCreationFails() { - final ShardLimitValidator shardLimitValidator = createTestShardLimitService(1); + final ShardLimitValidator shardLimitValidator = createTestShardLimitService(1, false); final Settings settings = Settings.builder() .put(SETTING_VERSION_CREATED, Version.CURRENT) .put(SETTING_NUMBER_OF_SHARDS, 1) @@ -151,6 +152,92 @@ public void testNonSystemIndexCreationFails() { ); } + /** + * This test validates that index starting with dot creation Succeeds + * when the setting cluster.ignore_dot_indexes is set to true. + */ + public void testDotIndexCreationSucceeds() { + final ShardLimitValidator shardLimitValidator = createTestShardLimitService(1, true); + final Settings settings = Settings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_NUMBER_OF_REPLICAS, 1) + .build(); + final ClusterState state = createClusterForShardLimitTest(1, 1, 0); + shardLimitValidator.validateShardLimit(".test-index", settings, state); + } + + /** + * This test validates that index starting with dot creation fails + * when the setting cluster.ignore_dot_indexes is set to false. + */ + public void testDotIndexCreationFails() { + final ShardLimitValidator shardLimitValidator = createTestShardLimitService(1, false); + final Settings settings = Settings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_NUMBER_OF_REPLICAS, 1) + .build(); + final ClusterState state = createClusterForShardLimitTest(1, 1, 0); + final ValidationException exception = expectThrows( + ValidationException.class, + () -> shardLimitValidator.validateShardLimit(".test-index", settings, state) + ); + assertEquals( + "Validation Failed: 1: this action would add [" + + 2 + + "] total shards, but this cluster currently has [" + + 1 + + "]/[" + + 1 + + "] maximum shards open;", + exception.getMessage() + ); + } + + /** + * This test validates that dataStream index creation fails + * when the cluster.ignore_dot_indexes is set to true, and we reach the max shard per node limit. + */ + public void testDataStreamIndexCreationFails() { + final ShardLimitValidator shardLimitValidator = createTestShardLimitService(1, true); + final Settings settings = Settings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_NUMBER_OF_REPLICAS, 1) + .build(); + final ClusterState state = createClusterForShardLimitTest(1, 1, 0); + final ValidationException exception = expectThrows( + ValidationException.class, + () -> shardLimitValidator.validateShardLimit(".ds-test-index", settings, state) + ); + assertEquals( + "Validation Failed: 1: this action would add [" + + 2 + + "] total shards, but this cluster currently has [" + + 1 + + "]/[" + + 1 + + "] maximum shards open;", + exception.getMessage() + ); + } + + /** + * This test validates that dataStream index creation succeeds + * when the cluster.ignore_dot_indexes is set to true, and we don't reach the max shard per node limit. + */ + public void testDataStreamIndexCreationSucceeds() { + final ShardLimitValidator shardLimitValidator = createTestShardLimitService(4, true); + final Settings settings = Settings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_NUMBER_OF_REPLICAS, 1) + .build(); + final ClusterState state = createClusterForShardLimitTest(1, 1, 0); + shardLimitValidator.validateShardLimit(".ds-test-index", settings, state); + } + /** * This test validates that non-system index opening * fails when it exceeds the cluster max shard limit @@ -174,7 +261,7 @@ public void testNonSystemIndexOpeningFails() { int totalShards = counts.getFailingIndexShards() * (1 + counts.getFailingIndexReplicas()); int currentShards = counts.getFirstIndexShards() * (1 + counts.getFirstIndexReplicas()); int maxShards = counts.getShardsPerNode() * nodesInCluster; - ShardLimitValidator shardLimitValidator = createTestShardLimitService(counts.getShardsPerNode()); + ShardLimitValidator shardLimitValidator = createTestShardLimitService(counts.getShardsPerNode(), false); ValidationException exception = expectThrows( ValidationException.class, () -> shardLimitValidator.validateShardLimit(state, indices) @@ -214,10 +301,124 @@ public void testSystemIndexOpeningSucceeds() { .toArray(new Index[2]); // Shard limit validation succeeds without any issues as system index is being opened - ShardLimitValidator shardLimitValidator = createTestShardLimitService(counts.getShardsPerNode()); + ShardLimitValidator shardLimitValidator = createTestShardLimitService(counts.getShardsPerNode(), false); + shardLimitValidator.validateShardLimit(state, indices); + } + + /** + * This test validates that index having '.' in the first character + * opening of such indexes succeeds even when it exceeds the cluster max shard limit if the + * cluster.ignore_dot_indexes setting is set to true. + */ + public void testDotIndexOpeningSucceeds() { + int nodesInCluster = randomIntBetween(2, 90); + ShardCounts counts = forDataNodeCount(nodesInCluster); + ClusterState state = createClusterForShardLimitTest( + nodesInCluster, + randomAlphaOfLengthBetween(5, 15), + counts.getFirstIndexShards(), + counts.getFirstIndexReplicas(), + ".test-index", // Adding closed index starting with dot to cluster state + counts.getFailingIndexShards(), + counts.getFailingIndexReplicas() + ); + + Index[] indices = Arrays.stream(state.metadata().indices().values().toArray(IndexMetadata.class)) + .map(IndexMetadata::getIndex) + .collect(Collectors.toList()) + .toArray(new Index[2]); + + // Shard limit validation succeeds without any issues + ShardLimitValidator shardLimitValidator = createTestShardLimitService(counts.getShardsPerNode(), true); shardLimitValidator.validateShardLimit(state, indices); } + /** + * This test validates that index having '.' in the first character + * opening fails when it exceeds the cluster max shard limit if the + * cluster.ignore_dot_indexes is set to false. + */ + public void testDotIndexOpeningFails() { + int nodesInCluster = randomIntBetween(2, 90); + ShardCounts counts = forDataNodeCount(nodesInCluster); + ClusterState state = createClusterForShardLimitTest( + nodesInCluster, + randomAlphaOfLengthBetween(5, 15), + counts.getFirstIndexShards(), + counts.getFirstIndexReplicas(), + ".test-index", // Adding closed index starting with dot to cluster state + counts.getFailingIndexShards(), + counts.getFailingIndexReplicas() + ); + + Index[] indices = Arrays.stream(state.metadata().indices().values().toArray(IndexMetadata.class)) + .map(IndexMetadata::getIndex) + .collect(Collectors.toList()) + .toArray(new Index[2]); + + int totalShards = counts.getFailingIndexShards() * (1 + counts.getFailingIndexReplicas()); + int currentShards = counts.getFirstIndexShards() * (1 + counts.getFirstIndexReplicas()); + int maxShards = counts.getShardsPerNode() * nodesInCluster; + ShardLimitValidator shardLimitValidator = createTestShardLimitService(counts.getShardsPerNode(), false); + ValidationException exception = expectThrows( + ValidationException.class, + () -> shardLimitValidator.validateShardLimit(state, indices) + ); + assertEquals( + "Validation Failed: 1: this action would add [" + + totalShards + + "] total shards, but this cluster currently has [" + + currentShards + + "]/[" + + maxShards + + "] maximum shards open;", + exception.getMessage() + ); + } + + /** + * This test validates that index starting with '.ds-' + * opening fails when it exceeds the cluster max shard limit if the + * cluster.ignore_dot_indexes is set to true. + */ + public void testDataStreamIndexOpeningFails() { + int nodesInCluster = randomIntBetween(2, 90); + ShardCounts counts = forDataNodeCount(nodesInCluster); + ClusterState state = createClusterForShardLimitTest( + nodesInCluster, + randomAlphaOfLengthBetween(5, 15), + counts.getFirstIndexShards(), + counts.getFirstIndexReplicas(), + ".ds-test-index", // Adding closed data stream index to cluster state + counts.getFailingIndexShards(), + counts.getFailingIndexReplicas() + ); + + Index[] indices = Arrays.stream(state.metadata().indices().values().toArray(IndexMetadata.class)) + .map(IndexMetadata::getIndex) + .collect(Collectors.toList()) + .toArray(new Index[2]); + + int totalShards = counts.getFailingIndexShards() * (1 + counts.getFailingIndexReplicas()); + int currentShards = counts.getFirstIndexShards() * (1 + counts.getFirstIndexReplicas()); + int maxShards = counts.getShardsPerNode() * nodesInCluster; + ShardLimitValidator shardLimitValidator = createTestShardLimitService(counts.getShardsPerNode(), true); + ValidationException exception = expectThrows( + ValidationException.class, + () -> shardLimitValidator.validateShardLimit(state, indices) + ); + assertEquals( + "Validation Failed: 1: this action would add [" + + totalShards + + "] total shards, but this cluster currently has [" + + currentShards + + "]/[" + + maxShards + + "] maximum shards open;", + exception.getMessage() + ); + } + public static ClusterState createClusterForShardLimitTest(int nodesInCluster, int shardsInIndex, int replicas) { ImmutableOpenMap.Builder dataNodes = ImmutableOpenMap.builder(); for (int i = 0; i < nodesInCluster; i++) { @@ -292,12 +493,16 @@ public static ClusterState createClusterForShardLimitTest( * Creates a {@link ShardLimitValidator} for testing with the given setting and a mocked cluster service. * * @param maxShardsPerNode the value to use for the max shards per node setting + * @param ignoreDotIndexes validates if index starting with dot should be ignored or not * @return a test instance */ - public static ShardLimitValidator createTestShardLimitService(int maxShardsPerNode) { + public static ShardLimitValidator createTestShardLimitService(int maxShardsPerNode, boolean ignoreDotIndexes) { // Use a mocked clusterService - for unit tests we won't be updating the setting anyway. ClusterService clusterService = mock(ClusterService.class); - Settings limitOnlySettings = Settings.builder().put(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), maxShardsPerNode).build(); + Settings limitOnlySettings = Settings.builder() + .put(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), maxShardsPerNode) + .put(SETTING_CLUSTER_IGNORE_DOT_INDEXES.getKey(), ignoreDotIndexes) + .build(); when(clusterService.getClusterSettings()).thenReturn( new ClusterSettings(limitOnlySettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) ); @@ -309,11 +514,19 @@ public static ShardLimitValidator createTestShardLimitService(int maxShardsPerNo * Creates a {@link ShardLimitValidator} for testing with the given setting and a given cluster service. * * @param maxShardsPerNode the value to use for the max shards per node setting + * @param ignoreDotIndexes validates if index starting with dot should be ignored or not * @param clusterService the cluster service to use * @return a test instance */ - public static ShardLimitValidator createTestShardLimitService(int maxShardsPerNode, ClusterService clusterService) { - Settings limitOnlySettings = Settings.builder().put(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), maxShardsPerNode).build(); + public static ShardLimitValidator createTestShardLimitService( + int maxShardsPerNode, + boolean ignoreDotIndexes, + ClusterService clusterService + ) { + Settings limitOnlySettings = Settings.builder() + .put(SETTING_CLUSTER_MAX_SHARDS_PER_NODE.getKey(), maxShardsPerNode) + .put(SETTING_CLUSTER_IGNORE_DOT_INDEXES.getKey(), ignoreDotIndexes) + .build(); return new ShardLimitValidator(limitOnlySettings, clusterService, new SystemIndices(emptyMap())); }