Skip to content

Commit

Permalink
[Refactor] Refactors remaining ImmutableOpenMap usage to j.u.Map (#7309)
Browse files Browse the repository at this point in the history
Refactors all remaining usage of HPPC backed ImmutableOpenMap to
j.u.Map. This is the last usage of the class so ImmutableOpenMap is
completely removed in favor of java maps.

Signed-off-by: Nicholas Walter Knize <[email protected]>
  • Loading branch information
nknize authored Apr 26, 2023
1 parent 77f6d13 commit d984f50
Show file tree
Hide file tree
Showing 64 changed files with 327 additions and 1,094 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@
package org.opensearch.client;

import org.opensearch.common.bytes.BytesReference;
import org.opensearch.common.collect.ImmutableOpenMap;
import org.opensearch.common.xcontent.LoggingDeprecationHandler;
import org.opensearch.core.xcontent.NamedXContentRegistry;
import org.opensearch.core.xcontent.ToXContent;
Expand All @@ -43,10 +42,6 @@
import org.opensearch.test.OpenSearchTestCase;

import java.io.IOException;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;

/**
* Base class for HLRC response parsing tests.
Expand Down Expand Up @@ -103,17 +98,4 @@ public final void testFromXContent() throws IOException {
protected ToXContent.Params getParams() {
return ToXContent.EMPTY_PARAMS;
}

protected static <T> void assertMapEquals(ImmutableOpenMap<String, T> expected, Map<String, T> actual) {
Set<String> expectedKeys = new HashSet<>();
Iterator<String> keysIt = expected.keysIt();
while (keysIt.hasNext()) {
expectedKeys.add(keysIt.next());
}

assertEquals(expectedKeys, actual.keySet());
for (String key : expectedKeys) {
assertEquals(expected.get(key), actual.get(key));
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@
import org.opensearch.client.GetAliasesResponseTests;
import org.opensearch.cluster.metadata.AliasMetadata;
import org.opensearch.cluster.metadata.MappingMetadata;
import org.opensearch.common.collect.ImmutableOpenMap;
import org.opensearch.common.settings.IndexScopedSettings;
import org.opensearch.common.settings.Settings;
import org.opensearch.core.xcontent.XContentParser;
Expand All @@ -64,9 +63,9 @@ protected org.opensearch.action.admin.indices.get.GetIndexResponse createServerT
String[] indices = generateRandomStringArray(5, 5, false, false);
final Map<String, MappingMetadata> mappings = new HashMap<>();
final Map<String, List<AliasMetadata>> aliases = new HashMap<>();
ImmutableOpenMap.Builder<String, Settings> settings = ImmutableOpenMap.builder();
ImmutableOpenMap.Builder<String, Settings> defaultSettings = ImmutableOpenMap.builder();
ImmutableOpenMap.Builder<String, String> dataStreams = ImmutableOpenMap.builder();
final Map<String, Settings> settings = new HashMap<>();
final Map<String, Settings> defaultSettings = new HashMap<>();
final Map<String, String> dataStreams = new HashMap<>();
IndexScopedSettings indexScopedSettings = IndexScopedSettings.DEFAULT_SCOPED_SETTINGS;
boolean includeDefaults = randomBoolean();
for (String index : indices) {
Expand Down Expand Up @@ -96,9 +95,9 @@ protected org.opensearch.action.admin.indices.get.GetIndexResponse createServerT
indices,
mappings,
aliases,
settings.build(),
defaultSettings.build(),
dataStreams.build()
settings,
defaultSettings,
dataStreams
);
}

Expand All @@ -114,8 +113,8 @@ protected void assertInstances(
) {
assertArrayEquals(serverTestInstance.getIndices(), clientInstance.getIndices());
assertEquals(serverTestInstance.getMappings(), clientInstance.getMappings());
assertMapEquals(serverTestInstance.getSettings(), clientInstance.getSettings());
assertMapEquals(serverTestInstance.defaultSettings(), clientInstance.getDefaultSettings());
assertEquals(serverTestInstance.getSettings(), clientInstance.getSettings());
assertEquals(serverTestInstance.defaultSettings(), clientInstance.getDefaultSettings());
assertEquals(serverTestInstance.getAliases(), clientInstance.getAliases());
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,6 @@
import org.opensearch.cluster.routing.UnassignedInfo;
import org.opensearch.cluster.routing.allocation.decider.EnableAllocationDecider;
import org.opensearch.common.Priority;
import org.opensearch.common.collect.ImmutableOpenMap;
import org.opensearch.common.settings.Settings;
import org.opensearch.common.unit.ByteSizeValue;
import org.opensearch.common.unit.TimeValue;
Expand Down Expand Up @@ -113,15 +112,9 @@ public void testCreateShrinkIndexToN() {
.setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON)
.get();
}
ImmutableOpenMap<String, DiscoveryNode> dataNodes = client().admin()
.cluster()
.prepareState()
.get()
.getState()
.nodes()
.getDataNodes();
final Map<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes();
assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2);
DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class);
DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(new DiscoveryNode[0]);
String mergeNode = discoveryNodes[0].getName();
// ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node
// if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due
Expand Down Expand Up @@ -212,15 +205,9 @@ public void testShrinkIndexPrimaryTerm() throws Exception {
internalCluster().ensureAtLeastNumDataNodes(2);
prepareCreate("source").setSettings(Settings.builder().put(indexSettings()).put("number_of_shards", numberOfShards)).get();

final ImmutableOpenMap<String, DiscoveryNode> dataNodes = client().admin()
.cluster()
.prepareState()
.get()
.getState()
.nodes()
.getDataNodes();
final Map<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes();
assertThat(dataNodes.size(), greaterThanOrEqualTo(2));
final DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class);
final DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(new DiscoveryNode[0]);
final String mergeNode = discoveryNodes[0].getName();
// This needs more than the default timeout if a large number of shards were created.
ensureGreen(TimeValue.timeValueSeconds(120));
Expand Down Expand Up @@ -298,15 +285,9 @@ public void testCreateShrinkIndex() {
for (int i = 0; i < docs; i++) {
client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get();
}
ImmutableOpenMap<String, DiscoveryNode> dataNodes = client().admin()
.cluster()
.prepareState()
.get()
.getState()
.nodes()
.getDataNodes();
final Map<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes();
assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2);
DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class);
DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(new DiscoveryNode[0]);
// ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node
// if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due
// to the require._name below.
Expand Down Expand Up @@ -426,15 +407,9 @@ public void testCreateShrinkIndexFails() throws Exception {
for (int i = 0; i < 20; i++) {
client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get();
}
ImmutableOpenMap<String, DiscoveryNode> dataNodes = client().admin()
.cluster()
.prepareState()
.get()
.getState()
.nodes()
.getDataNodes();
final Map<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes();
assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2);
DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class);
DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(new DiscoveryNode[0]);
String spareNode = discoveryNodes[0].getName();
String mergeNode = discoveryNodes[1].getName();
// ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node
Expand Down Expand Up @@ -534,15 +509,9 @@ public void testCreateShrinkWithIndexSort() throws Exception {
.setSource("{\"foo\" : \"bar\", \"id\" : " + i + "}", XContentType.JSON)
.get();
}
ImmutableOpenMap<String, DiscoveryNode> dataNodes = client().admin()
.cluster()
.prepareState()
.get()
.getState()
.nodes()
.getDataNodes();
final Map<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes();
assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2);
DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class);
DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(new DiscoveryNode[0]);
String mergeNode = discoveryNodes[0].getName();
// ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node
// if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due
Expand Down Expand Up @@ -614,14 +583,8 @@ public void testShrinkCommitsMergeOnIdle() throws Exception {
client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get();
}
client().admin().indices().prepareFlush("source").get();
ImmutableOpenMap<String, DiscoveryNode> dataNodes = client().admin()
.cluster()
.prepareState()
.get()
.getState()
.nodes()
.getDataNodes();
DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class);
final Map<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes();
DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(new DiscoveryNode[0]);
// ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node
// if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due
// to the require._name below.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@
import org.opensearch.action.support.IndicesOptions;
import org.opensearch.cluster.metadata.AliasMetadata;
import org.opensearch.cluster.metadata.MappingMetadata;
import org.opensearch.common.collect.ImmutableOpenMap;
import org.opensearch.common.settings.Settings;
import org.opensearch.index.IndexNotFoundException;
import org.opensearch.test.OpenSearchIntegTestCase;
Expand Down Expand Up @@ -254,7 +253,7 @@ private GetIndexResponse runWithRandomFeatureMethod(GetIndexRequestBuilder reque
}

private void assertSettings(GetIndexResponse response, String indexName) {
ImmutableOpenMap<String, Settings> settings = response.settings();
final Map<String, Settings> settings = response.settings();
assertThat(settings, notNullValue());
assertThat(settings.size(), equalTo(1));
Settings indexSettings = settings.get(indexName);
Expand All @@ -263,7 +262,7 @@ private void assertSettings(GetIndexResponse response, String indexName) {
}

private void assertNonEmptySettings(GetIndexResponse response, String indexName) {
ImmutableOpenMap<String, Settings> settings = response.settings();
final Map<String, Settings> settings = response.settings();
assertThat(settings, notNullValue());
assertThat(settings.size(), equalTo(1));
Settings indexSettings = settings.get(indexName);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,6 @@
import org.opensearch.cluster.routing.ShardRouting;
import org.opensearch.cluster.routing.ShardRoutingState;
import org.opensearch.common.collect.ImmutableOpenIntMap;
import org.opensearch.common.collect.ImmutableOpenMap;
import org.opensearch.common.settings.Settings;
import org.opensearch.index.Index;
import org.opensearch.index.IndexService;
Expand Down Expand Up @@ -151,8 +150,7 @@ public void testIndices() throws Exception {
.indices()
.shardStores(Requests.indicesShardStoresRequest().shardStatuses("all"))
.get();
ImmutableOpenMap<String, ImmutableOpenIntMap<List<IndicesShardStoresResponse.StoreStatus>>> shardStatuses = response
.getStoreStatuses();
Map<String, ImmutableOpenIntMap<List<IndicesShardStoresResponse.StoreStatus>>> shardStatuses = response.getStoreStatuses();
assertThat(shardStatuses.containsKey(index1), equalTo(true));
assertThat(shardStatuses.containsKey(index2), equalTo(true));
assertThat(shardStatuses.get(index1).size(), equalTo(2));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -172,9 +172,9 @@ public void testClusterStateDiffSerialization() throws Exception {
assertThat(clusterStateFromDiffs.nodes().getNodes(), equalTo(clusterState.nodes().getNodes()));
assertThat(clusterStateFromDiffs.nodes().getLocalNodeId(), equalTo(previousClusterStateFromDiffs.nodes().getLocalNodeId()));
assertThat(clusterStateFromDiffs.nodes().getNodes(), equalTo(clusterState.nodes().getNodes()));
for (ObjectCursor<String> node : clusterStateFromDiffs.nodes().getNodes().keys()) {
DiscoveryNode node1 = clusterState.nodes().get(node.value);
DiscoveryNode node2 = clusterStateFromDiffs.nodes().get(node.value);
for (final String node : clusterStateFromDiffs.nodes().getNodes().keySet()) {
DiscoveryNode node1 = clusterState.nodes().get(node);
DiscoveryNode node2 = clusterStateFromDiffs.nodes().get(node);
assertThat(node1.getVersion(), equalTo(node2.getVersion()));
assertThat(node1.getAddress(), equalTo(node2.getAddress()));
assertThat(node1.getAttributes(), equalTo(node2.getAttributes()));
Expand Down Expand Up @@ -256,7 +256,7 @@ private ClusterState.Builder randomNodes(ClusterState clusterState) {
DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes());
List<String> nodeIds = randomSubsetOf(
randomInt(clusterState.nodes().getNodes().size() - 1),
clusterState.nodes().getNodes().keys().toArray(String.class)
clusterState.nodes().getNodes().keySet().toArray(new String[0])
);
for (String nodeId : nodeIds) {
if (nodeId.startsWith("node-")) {
Expand Down Expand Up @@ -291,15 +291,15 @@ private ClusterState.Builder randomRoutingTable(ClusterState clusterState) {
builder.add(
randomChangeToIndexRoutingTable(
clusterState.routingTable().indicesRouting().get(index),
clusterState.nodes().getNodes().keys().toArray(String.class)
clusterState.nodes().getNodes().keySet().toArray(new String[0])
)
);
}
}
}
int additionalIndexCount = randomIntBetween(1, 20);
for (int i = 0; i < additionalIndexCount; i++) {
builder.add(randomIndexRoutingTable("index-" + randomInt(), clusterState.nodes().getNodes().keys().toArray(String.class)));
builder.add(randomIndexRoutingTable("index-" + randomInt(), clusterState.nodes().getNodes().keySet().toArray(new String[0])));
}
return ClusterState.builder(clusterState).routingTable(builder.build());
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -507,7 +507,7 @@ private void assertNodesRemovedAfterZoneDecommission(boolean originalClusterMana
assertEquals(clusterState.nodes().getDataNodes().size(), 8);
assertEquals(clusterState.nodes().getClusterManagerNodes().size(), 2);

Iterator<DiscoveryNode> discoveryNodeIterator = clusterState.nodes().getNodes().valuesIt();
Iterator<DiscoveryNode> discoveryNodeIterator = clusterState.nodes().getNodes().values().iterator();
while (discoveryNodeIterator.hasNext()) {
// assert no node has decommissioned attribute
DiscoveryNode node = discoveryNodeIterator.next();
Expand Down Expand Up @@ -717,7 +717,7 @@ public void testDecommissionStatusUpdatePublishedToAllNodes() throws ExecutionEx

logger.info("--> Got cluster state with 4 nodes.");
// assert status on nodes that are part of cluster currently
Iterator<DiscoveryNode> discoveryNodeIterator = clusterState.nodes().getNodes().valuesIt();
Iterator<DiscoveryNode> discoveryNodeIterator = clusterState.nodes().getNodes().values().iterator();
DiscoveryNode clusterManagerNodeAfterDecommission = null;
while (discoveryNodeIterator.hasNext()) {
// assert no node has decommissioned attribute
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -437,7 +437,16 @@ public Settings onNodeStopped(String nodeName) {
}

private String getCoordinatingOnlyNode() {
return client().admin().cluster().prepareState().get().getState().nodes().getCoordinatingOnlyNodes().iterator().next().value
return client().admin()
.cluster()
.prepareState()
.get()
.getState()
.nodes()
.getCoordinatingOnlyNodes()
.values()
.iterator()
.next()
.getName();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -778,7 +778,16 @@ private Tuple<String, String> getPrimaryReplicaNodeNames(String indexName) {
}

private String getCoordinatingOnlyNode() {
return client().admin().cluster().prepareState().get().getState().nodes().getCoordinatingOnlyNodes().iterator().next().value
return client().admin()
.cluster()
.prepareState()
.get()
.getState()
.nodes()
.getCoordinatingOnlyNodes()
.values()
.iterator()
.next()
.getName();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -920,7 +920,16 @@ private Tuple<String, String> getPrimaryReplicaNodeNames(String indexName) {
}

private String getCoordinatingOnlyNode() {
return client().admin().cluster().prepareState().get().getState().nodes().getCoordinatingOnlyNodes().iterator().next().value
return client().admin()
.cluster()
.prepareState()
.get()
.getState()
.nodes()
.getCoordinatingOnlyNodes()
.values()
.iterator()
.next()
.getName();
}

Expand Down
Loading

0 comments on commit d984f50

Please sign in to comment.