sourceTuple = restRequest.contentOrSourceParam();
PutSearchPipelineRequest request = new PutSearchPipelineRequest(restRequest.param("id"), sourceTuple.v2(), sourceTuple.v1());
request.clusterManagerNodeTimeout(restRequest.paramAsTime("cluster_manager_timeout", request.clusterManagerNodeTimeout()));
request.timeout(restRequest.paramAsTime("timeout", request.timeout()));
diff --git a/server/src/main/java/org/opensearch/script/StoredScriptSource.java b/server/src/main/java/org/opensearch/script/StoredScriptSource.java
index 1d6f2b8e4ccea..9d3af9e4c9caf 100644
--- a/server/src/main/java/org/opensearch/script/StoredScriptSource.java
+++ b/server/src/main/java/org/opensearch/script/StoredScriptSource.java
@@ -44,6 +44,7 @@
import org.opensearch.core.common.io.stream.Writeable;
import org.opensearch.common.logging.DeprecationLogger;
import org.opensearch.common.xcontent.LoggingDeprecationHandler;
+import org.opensearch.core.xcontent.MediaType;
import org.opensearch.core.xcontent.NamedXContentRegistry;
import org.opensearch.core.xcontent.ObjectParser;
import org.opensearch.core.xcontent.ObjectParser.ValueType;
@@ -197,71 +198,72 @@ private StoredScriptSource build(boolean ignoreEmpty) {
/**
* This will parse XContent into a {@link StoredScriptSource}. The following formats can be parsed:
- *
+ *
* The simple script format with no compiler options or user-defined params:
- *
+ *
* Example:
* {@code
* {"script": "return Math.log(doc.popularity) * 100;"}
* }
- *
+ *
* The above format requires the lang to be specified using the deprecated stored script namespace
* (as a url parameter during a put request). See {@link ScriptMetadata} for more information about
* the stored script namespaces.
- *
+ *
* The complex script format using the new stored script namespace
* where lang and source are required but options is optional:
- *
+ *
* {@code
* {
- * "script" : {
- * "lang" : "",
- * "source" : "",
- * "options" : {
- * "option0" : "",
- * "option1" : "",
- * ...
- * }
- * }
+ * "script" : {
+ * "lang" : "",
+ * "source" : "",
+ * "options" : {
+ * "option0" : "",
+ * "option1" : "",
+ * ...
* }
* }
- *
+ * }
+ * }
+ *
* Example:
* {@code
* {
- * "script": {
- * "lang" : "painless",
- * "source" : "return Math.log(doc.popularity) * params.multiplier"
- * }
+ * "script": {
+ * "lang" : "painless",
+ * "source" : "return Math.log(doc.popularity) * params.multiplier"
* }
* }
- *
+ * }
+ *
* The use of "source" may also be substituted with "code" for backcompat with 5.3 to 5.5 format. For example:
- *
+ *
* {@code
* {
- * "script" : {
- * "lang" : "",
- * "code" : "",
- * "options" : {
- * "option0" : "",
- * "option1" : "",
- * ...
- * }
- * }
+ * "script" : {
+ * "lang" : "",
+ * "code" : "",
+ * "options" : {
+ * "option0" : "",
+ * "option1" : "",
+ * ...
* }
* }
- *
+ * }
+ * }
+ *
* Note that the "source" parameter can also handle template parsing including from
* a complex JSON object.
*
- * @param content The content from the request to be parsed as described above.
- * @return The parsed {@link StoredScriptSource}.
+ * @param content The content from the request to be parsed as described above.
+ * @param mediaType The media type of the request
+ * @return The parsed {@link StoredScriptSource}.
*/
- public static StoredScriptSource parse(BytesReference content, XContentType xContentType) {
+ public static StoredScriptSource parse(BytesReference content, MediaType mediaType) {
try (
InputStream stream = content.streamInput();
- XContentParser parser = xContentType.xContent()
+ XContentParser parser = mediaType.xContent()
.createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)
) {
Token token = parser.nextToken();
diff --git a/server/src/main/java/org/opensearch/search/fetch/FetchPhase.java b/server/src/main/java/org/opensearch/search/fetch/FetchPhase.java
index b82764c38f747..97a6093ea39d1 100644
--- a/server/src/main/java/org/opensearch/search/fetch/FetchPhase.java
+++ b/server/src/main/java/org/opensearch/search/fetch/FetchPhase.java
@@ -52,6 +52,7 @@
import org.opensearch.common.xcontent.XContentHelper;
import org.opensearch.common.xcontent.XContentType;
import org.opensearch.common.xcontent.support.XContentMapValues;
+import org.opensearch.core.xcontent.MediaType;
import org.opensearch.index.IndexSettings;
import org.opensearch.index.fieldvisitor.CustomFieldsVisitor;
import org.opensearch.index.fieldvisitor.FieldsVisitor;
@@ -377,7 +378,7 @@ private HitContext prepareNestedHitContext(
String rootId;
Map rootSourceAsMap = null;
- XContentType rootSourceContentType = null;
+ MediaType rootSourceContentType = null;
int nestedDocId = nestedTopDocId - subReaderContext.docBase;
diff --git a/server/src/main/java/org/opensearch/search/lookup/SourceLookup.java b/server/src/main/java/org/opensearch/search/lookup/SourceLookup.java
index 1d4a424550c12..1341fc0fdabb3 100644
--- a/server/src/main/java/org/opensearch/search/lookup/SourceLookup.java
+++ b/server/src/main/java/org/opensearch/search/lookup/SourceLookup.java
@@ -75,8 +75,8 @@ public Map source() {
return source;
}
- public XContentType sourceContentType() {
- return XContentType.fromMediaType(sourceContentType);
+ public MediaType sourceContentType() {
+ return sourceContentType;
}
public int docId() {
@@ -155,7 +155,7 @@ public void setSource(BytesReference source) {
this.sourceAsBytes = source;
}
- public void setSourceContentType(XContentType sourceContentType) {
+ public void setSourceContentType(MediaType sourceContentType) {
this.sourceContentType = sourceContentType;
}
diff --git a/server/src/main/java/org/opensearch/search/pipeline/PipelineConfiguration.java b/server/src/main/java/org/opensearch/search/pipeline/PipelineConfiguration.java
index 512e400c54784..b4f6549c83390 100644
--- a/server/src/main/java/org/opensearch/search/pipeline/PipelineConfiguration.java
+++ b/server/src/main/java/org/opensearch/search/pipeline/PipelineConfiguration.java
@@ -8,6 +8,7 @@
package org.opensearch.search.pipeline;
+import org.opensearch.Version;
import org.opensearch.cluster.AbstractDiffable;
import org.opensearch.cluster.Diff;
import org.opensearch.common.Strings;
@@ -56,7 +57,7 @@ private static class Builder {
private String id;
private BytesReference config;
- private XContentType xContentType;
+ private MediaType mediaType;
void setId(String id) {
this.id = id;
@@ -67,11 +68,11 @@ void setConfig(BytesReference config, MediaType mediaType) {
throw new IllegalArgumentException("PipelineConfiguration does not support media type [" + mediaType.getClass() + "]");
}
this.config = config;
- this.xContentType = XContentType.fromMediaType(mediaType);
+ this.mediaType = mediaType;
}
PipelineConfiguration build() {
- return new PipelineConfiguration(id, config, xContentType);
+ return new PipelineConfiguration(id, config, mediaType);
}
}
@@ -80,16 +81,12 @@ PipelineConfiguration build() {
// and the way the map of maps config is read requires a deep copy (it removes instead of gets entries to check for unused options)
// also the get pipeline api just directly returns this to the caller
private final BytesReference config;
- private final XContentType xContentType;
+ private final MediaType mediaType;
- public PipelineConfiguration(String id, BytesReference config, XContentType xContentType) {
+ public PipelineConfiguration(String id, BytesReference config, MediaType mediaType) {
this.id = Objects.requireNonNull(id);
this.config = Objects.requireNonNull(config);
- this.xContentType = Objects.requireNonNull(xContentType);
- }
-
- public PipelineConfiguration(String id, BytesReference config, MediaType mediaType) {
- this(id, config, XContentType.fromMediaType(mediaType));
+ this.mediaType = Objects.requireNonNull(mediaType);
}
public String getId() {
@@ -97,12 +94,12 @@ public String getId() {
}
public Map getConfigAsMap() {
- return XContentHelper.convertToMap(config, true, xContentType).v2();
+ return XContentHelper.convertToMap(config, true, mediaType).v2();
}
// pkg-private for tests
- XContentType getXContentType() {
- return xContentType;
+ MediaType getMediaType() {
+ return mediaType;
}
// pkg-private for tests
@@ -120,7 +117,11 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
}
public static PipelineConfiguration readFrom(StreamInput in) throws IOException {
- return new PipelineConfiguration(in.readString(), in.readBytesReference(), in.readEnum(XContentType.class));
+ return new PipelineConfiguration(
+ in.readString(),
+ in.readBytesReference(),
+ in.getVersion().onOrAfter(Version.V_3_0_0) ? in.readMediaType() : in.readEnum(XContentType.class)
+ );
}
public static Diff readDiffFrom(StreamInput in) throws IOException {
@@ -136,7 +137,11 @@ public String toString() {
public void writeTo(StreamOutput out) throws IOException {
out.writeString(id);
out.writeBytesReference(config);
- out.writeEnum(xContentType);
+ if (out.getVersion().onOrAfter(Version.V_3_0_0)) {
+ mediaType.writeTo(out);
+ } else {
+ out.writeEnum((XContentType) mediaType);
+ }
}
@Override
diff --git a/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java b/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java
index 7384b494d71fc..1066d836e5183 100644
--- a/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java
+++ b/server/src/main/java/org/opensearch/search/pipeline/SearchPipelineService.java
@@ -259,7 +259,7 @@ static ClusterState innerPut(PutSearchPipelineRequest request, ClusterState curr
} else {
pipelines = new HashMap<>();
}
- pipelines.put(request.getId(), new PipelineConfiguration(request.getId(), request.getSource(), request.getXContentType()));
+ pipelines.put(request.getId(), new PipelineConfiguration(request.getId(), request.getSource(), request.getMediaType()));
ClusterState.Builder newState = ClusterState.builder(currentState);
newState.metadata(
Metadata.builder(currentState.getMetadata())
@@ -273,7 +273,7 @@ void validatePipeline(Map searchPipelineInfos
if (searchPipelineInfos.isEmpty()) {
throw new IllegalStateException("Search pipeline info is empty");
}
- Map pipelineConfig = XContentHelper.convertToMap(request.getSource(), false, request.getXContentType()).v2();
+ Map pipelineConfig = XContentHelper.convertToMap(request.getSource(), false, request.getMediaType()).v2();
Pipeline pipeline = PipelineWithMetrics.create(
request.getId(),
pipelineConfig,
diff --git a/server/src/main/java/org/opensearch/transport/TransportService.java b/server/src/main/java/org/opensearch/transport/TransportService.java
index c2163ae1d490a..b8d7d130e846b 100644
--- a/server/src/main/java/org/opensearch/transport/TransportService.java
+++ b/server/src/main/java/org/opensearch/transport/TransportService.java
@@ -59,8 +59,10 @@
import org.opensearch.common.util.concurrent.ThreadContext;
import org.opensearch.common.util.io.IOUtils;
import org.opensearch.common.lease.Releasable;
+import org.opensearch.common.xcontent.XContentType;
import org.opensearch.core.common.Strings;
import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException;
+import org.opensearch.core.xcontent.MediaTypeParserRegistry;
import org.opensearch.node.NodeClosedException;
import org.opensearch.node.ReportingService;
import org.opensearch.tasks.Task;
@@ -169,7 +171,10 @@ public void close() {}
* over the {@link StreamOutput} and {@link StreamInput} wire
*/
Streamables.registerStreamables();
+ /** Registers OpenSearch server specific exceptions (exceptions outside of core library) */
OpenSearchServerException.registerExceptions();
+ // set the default media type to JSON (fallback if a media type is not specified)
+ MediaTypeParserRegistry.setDefaultMediaType(XContentType.JSON);
}
/** does nothing. easy way to ensure class is loaded so the above static block is called to register the streamables */
diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java
index 6ba7961ccecf3..e92096f139d17 100644
--- a/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java
+++ b/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java
@@ -244,7 +244,7 @@ private RestRequest toRestRequest(ClusterRerouteRequest original) throws IOExcep
FakeRestRequest.Builder requestBuilder = new FakeRestRequest.Builder(xContentRegistry());
requestBuilder.withParams(params);
if (hasBody) {
- requestBuilder.withContent(BytesReference.bytes(builder), XContentType.fromMediaType(builder.contentType()));
+ requestBuilder.withContent(BytesReference.bytes(builder), builder.contentType());
}
return requestBuilder.build();
}
diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequestTests.java
index 2eb6fa504baf2..cfdd776e60832 100644
--- a/server/src/test/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequestTests.java
+++ b/server/src/test/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequestTests.java
@@ -56,13 +56,13 @@ public void testSerialization() throws IOException {
new StoredScriptSource("foo", "bar", Collections.emptyMap())
);
- assertEquals(XContentType.JSON, storedScriptRequest.xContentType());
+ assertEquals(XContentType.JSON, storedScriptRequest.mediaType());
try (BytesStreamOutput output = new BytesStreamOutput()) {
storedScriptRequest.writeTo(output);
try (StreamInput in = output.bytes().streamInput()) {
PutStoredScriptRequest serialized = new PutStoredScriptRequest(in);
- assertEquals(XContentType.JSON, serialized.xContentType());
+ assertEquals(XContentType.JSON, serialized.mediaType());
assertEquals(storedScriptRequest.id(), serialized.id());
assertEquals(storedScriptRequest.context(), serialized.context());
}
diff --git a/server/src/test/java/org/opensearch/action/ingest/PutPipelineRequestTests.java b/server/src/test/java/org/opensearch/action/ingest/PutPipelineRequestTests.java
index a86257fd741c0..336ec67546dc5 100644
--- a/server/src/test/java/org/opensearch/action/ingest/PutPipelineRequestTests.java
+++ b/server/src/test/java/org/opensearch/action/ingest/PutPipelineRequestTests.java
@@ -49,14 +49,14 @@ public class PutPipelineRequestTests extends OpenSearchTestCase {
public void testSerializationWithXContent() throws IOException {
PutPipelineRequest request = new PutPipelineRequest("1", new BytesArray("{}".getBytes(StandardCharsets.UTF_8)), XContentType.JSON);
- assertEquals(XContentType.JSON, request.getXContentType());
+ assertEquals(XContentType.JSON, request.getMediaType());
BytesStreamOutput output = new BytesStreamOutput();
request.writeTo(output);
StreamInput in = StreamInput.wrap(output.bytes().toBytesRef().bytes);
PutPipelineRequest serialized = new PutPipelineRequest(in);
- assertEquals(XContentType.JSON, serialized.getXContentType());
+ assertEquals(XContentType.JSON, serialized.getMediaType());
assertEquals("{}", serialized.getSource().utf8ToString());
}
diff --git a/server/src/test/java/org/opensearch/common/xcontent/XContentTypeTests.java b/server/src/test/java/org/opensearch/common/xcontent/XContentTypeTests.java
index 53607f432bbd5..8c53d7edebca8 100644
--- a/server/src/test/java/org/opensearch/common/xcontent/XContentTypeTests.java
+++ b/server/src/test/java/org/opensearch/common/xcontent/XContentTypeTests.java
@@ -44,81 +44,80 @@ public class XContentTypeTests extends OpenSearchTestCase {
public void testFromJson() throws Exception {
String mediaType = "application/json";
XContentType expectedXContentType = XContentType.JSON;
- assertThat(XContentType.fromMediaType(mediaType), equalTo(expectedXContentType));
- assertThat(XContentType.fromMediaType(mediaType + ";"), equalTo(expectedXContentType));
- assertThat(XContentType.fromMediaType(mediaType + "; charset=UTF-8"), equalTo(expectedXContentType));
+ assertThat(MediaType.fromMediaType(mediaType), equalTo(expectedXContentType));
+ assertThat(MediaType.fromMediaType(mediaType + ";"), equalTo(expectedXContentType));
+ assertThat(MediaType.fromMediaType(mediaType + "; charset=UTF-8"), equalTo(expectedXContentType));
}
public void testFromNdJson() throws Exception {
String mediaType = "application/x-ndjson";
XContentType expectedXContentType = XContentType.JSON;
- assertThat(XContentType.fromMediaType(mediaType), equalTo(expectedXContentType));
- assertThat(XContentType.fromMediaType(mediaType + ";"), equalTo(expectedXContentType));
- assertThat(XContentType.fromMediaType(mediaType + "; charset=UTF-8"), equalTo(expectedXContentType));
+ assertThat(MediaType.fromMediaType(mediaType), equalTo(expectedXContentType));
+ assertThat(MediaType.fromMediaType(mediaType + ";"), equalTo(expectedXContentType));
+ assertThat(MediaType.fromMediaType(mediaType + "; charset=UTF-8"), equalTo(expectedXContentType));
}
public void testFromJsonUppercase() throws Exception {
String mediaType = "application/json".toUpperCase(Locale.ROOT);
XContentType expectedXContentType = XContentType.JSON;
- assertThat(XContentType.fromMediaType(mediaType), equalTo(expectedXContentType));
- assertThat(XContentType.fromMediaType(mediaType + ";"), equalTo(expectedXContentType));
- assertThat(XContentType.fromMediaType(mediaType + "; charset=UTF-8"), equalTo(expectedXContentType));
+ assertThat(MediaType.fromMediaType(mediaType), equalTo(expectedXContentType));
+ assertThat(MediaType.fromMediaType(mediaType + ";"), equalTo(expectedXContentType));
+ assertThat(MediaType.fromMediaType(mediaType + "; charset=UTF-8"), equalTo(expectedXContentType));
}
public void testFromYaml() throws Exception {
String mediaType = "application/yaml";
XContentType expectedXContentType = XContentType.YAML;
- assertThat(XContentType.fromMediaType(mediaType), equalTo(expectedXContentType));
- assertThat(XContentType.fromMediaType(mediaType + ";"), equalTo(expectedXContentType));
- assertThat(XContentType.fromMediaType(mediaType + "; charset=UTF-8"), equalTo(expectedXContentType));
+ assertThat(MediaType.fromMediaType(mediaType), equalTo(expectedXContentType));
+ assertThat(MediaType.fromMediaType(mediaType + ";"), equalTo(expectedXContentType));
+ assertThat(MediaType.fromMediaType(mediaType + "; charset=UTF-8"), equalTo(expectedXContentType));
}
public void testFromSmile() throws Exception {
String mediaType = "application/smile";
XContentType expectedXContentType = XContentType.SMILE;
- assertThat(XContentType.fromMediaType(mediaType), equalTo(expectedXContentType));
- assertThat(XContentType.fromMediaType(mediaType + ";"), equalTo(expectedXContentType));
+ assertThat(MediaType.fromMediaType(mediaType), equalTo(expectedXContentType));
+ assertThat(MediaType.fromMediaType(mediaType + ";"), equalTo(expectedXContentType));
}
public void testFromCbor() throws Exception {
String mediaType = "application/cbor";
XContentType expectedXContentType = XContentType.CBOR;
- assertThat(XContentType.fromMediaType(mediaType), equalTo(expectedXContentType));
- assertThat(XContentType.fromMediaType(mediaType + ";"), equalTo(expectedXContentType));
+ assertThat(MediaType.fromMediaType(mediaType), equalTo(expectedXContentType));
+ assertThat(MediaType.fromMediaType(mediaType + ";"), equalTo(expectedXContentType));
}
public void testFromWildcard() throws Exception {
String mediaType = "application/*";
XContentType expectedXContentType = XContentType.JSON;
- assertThat(XContentType.fromMediaType(mediaType), equalTo(expectedXContentType));
- assertThat(XContentType.fromMediaType(mediaType + ";"), equalTo(expectedXContentType));
+ assertThat(MediaType.fromMediaType(mediaType), equalTo(expectedXContentType));
+ assertThat(MediaType.fromMediaType(mediaType + ";"), equalTo(expectedXContentType));
}
public void testFromWildcardUppercase() throws Exception {
String mediaType = "APPLICATION/*";
XContentType expectedXContentType = XContentType.JSON;
- assertThat(XContentType.fromMediaType(mediaType), equalTo(expectedXContentType));
- assertThat(XContentType.fromMediaType(mediaType + ";"), equalTo(expectedXContentType));
+ assertThat(MediaType.fromMediaType(mediaType), equalTo(expectedXContentType));
+ assertThat(MediaType.fromMediaType(mediaType + ";"), equalTo(expectedXContentType));
}
public void testFromRubbish() throws Exception {
- assertThat(XContentType.fromMediaType((String) null), nullValue());
- assertThat(XContentType.fromMediaType((MediaType) null), nullValue());
- assertThat(XContentType.fromMediaType(""), nullValue());
- assertThat(XContentType.fromMediaType("text/plain"), nullValue());
- assertThat(XContentType.fromMediaType("gobbly;goop"), nullValue());
+ assertThat(MediaType.fromMediaType(null), nullValue());
+ assertThat(MediaType.fromMediaType(""), nullValue());
+ assertThat(MediaType.fromMediaType("text/plain"), nullValue());
+ assertThat(MediaType.fromMediaType("gobbly;goop"), nullValue());
}
public void testVersionedMediaType() throws Exception {
- assertThat(XContentType.fromMediaType("application/vnd.opensearch+json;compatible-with=7"), equalTo(XContentType.JSON));
- assertThat(XContentType.fromMediaType("application/vnd.opensearch+yaml;compatible-with=7"), equalTo(XContentType.YAML));
- assertThat(XContentType.fromMediaType("application/vnd.opensearch+cbor;compatible-with=7"), equalTo(XContentType.CBOR));
- assertThat(XContentType.fromMediaType("application/vnd.opensearch+smile;compatible-with=7"), equalTo(XContentType.SMILE));
+ assertThat(MediaType.fromMediaType("application/vnd.opensearch+json;compatible-with=7"), equalTo(XContentType.JSON));
+ assertThat(MediaType.fromMediaType("application/vnd.opensearch+yaml;compatible-with=7"), equalTo(XContentType.YAML));
+ assertThat(MediaType.fromMediaType("application/vnd.opensearch+cbor;compatible-with=7"), equalTo(XContentType.CBOR));
+ assertThat(MediaType.fromMediaType("application/vnd.opensearch+smile;compatible-with=7"), equalTo(XContentType.SMILE));
- assertThat(XContentType.fromMediaType("application/vnd.opensearch+json ;compatible-with=7"), equalTo(XContentType.JSON));
+ assertThat(MediaType.fromMediaType("application/vnd.opensearch+json ;compatible-with=7"), equalTo(XContentType.JSON));
String mthv = "application/vnd.opensearch+json ;compatible-with=7;charset=utf-8";
- assertThat(XContentType.fromMediaType(mthv), equalTo(XContentType.JSON));
- assertThat(XContentType.fromMediaType(mthv.toUpperCase(Locale.ROOT)), equalTo(XContentType.JSON));
+ assertThat(MediaType.fromMediaType(mthv), equalTo(XContentType.JSON));
+ assertThat(MediaType.fromMediaType(mthv.toUpperCase(Locale.ROOT)), equalTo(XContentType.JSON));
}
}
diff --git a/server/src/test/java/org/opensearch/index/mapper/DynamicMappingTests.java b/server/src/test/java/org/opensearch/index/mapper/DynamicMappingTests.java
index 365d3738f4022..0e04e57a290b6 100644
--- a/server/src/test/java/org/opensearch/index/mapper/DynamicMappingTests.java
+++ b/server/src/test/java/org/opensearch/index/mapper/DynamicMappingTests.java
@@ -367,9 +367,7 @@ private void doTestDefaultFloatingPointMappings(DocumentMapper mapper, XContentB
.field("quux", "3.2") // float detected through numeric detection
.endObject()
);
- ParsedDocument parsedDocument = mapper.parse(
- new SourceToParse("index", "id", source, XContentType.fromMediaType(builder.contentType()))
- );
+ ParsedDocument parsedDocument = mapper.parse(new SourceToParse("index", "id", source, builder.contentType()));
Mapping update = parsedDocument.dynamicMappingsUpdate();
assertNotNull(update);
assertThat(((FieldMapper) update.root().getMapper("foo")).fieldType().typeName(), equalTo("float"));
diff --git a/server/src/test/java/org/opensearch/ingest/PipelineConfigurationTests.java b/server/src/test/java/org/opensearch/ingest/PipelineConfigurationTests.java
index 51ebd1b1f7ad5..aed08c2d1875a 100644
--- a/server/src/test/java/org/opensearch/ingest/PipelineConfigurationTests.java
+++ b/server/src/test/java/org/opensearch/ingest/PipelineConfigurationTests.java
@@ -58,13 +58,13 @@ public void testSerialization() throws IOException {
new BytesArray("{}".getBytes(StandardCharsets.UTF_8)),
XContentType.JSON
);
- assertEquals(XContentType.JSON, configuration.getXContentType());
+ assertEquals(XContentType.JSON, configuration.getMediaType());
BytesStreamOutput out = new BytesStreamOutput();
configuration.writeTo(out);
StreamInput in = StreamInput.wrap(out.bytes().toBytesRef().bytes);
PipelineConfiguration serialized = PipelineConfiguration.readFrom(in);
- assertEquals(XContentType.JSON, serialized.getXContentType());
+ assertEquals(XContentType.JSON, serialized.getMediaType());
assertEquals("{}", serialized.getConfig().utf8ToString());
}
@@ -83,8 +83,8 @@ public void testParser() throws IOException {
XContentParser xContentParser = xContentType.xContent()
.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, bytes.streamInput());
PipelineConfiguration parsed = parser.parse(xContentParser, null);
- assertEquals(xContentType, parsed.getXContentType());
- assertEquals("{}", XContentHelper.convertToJson(parsed.getConfig(), false, parsed.getXContentType()));
+ assertEquals(xContentType, parsed.getMediaType());
+ assertEquals("{}", XContentHelper.convertToJson(parsed.getConfig(), false, parsed.getMediaType()));
assertEquals("1", parsed.getId());
}
diff --git a/server/src/test/java/org/opensearch/rest/RestRequestTests.java b/server/src/test/java/org/opensearch/rest/RestRequestTests.java
index 15066362555ef..97350824dd1e4 100644
--- a/server/src/test/java/org/opensearch/rest/RestRequestTests.java
+++ b/server/src/test/java/org/opensearch/rest/RestRequestTests.java
@@ -203,12 +203,12 @@ public void testContentTypeParsing() {
Map> map = new HashMap<>();
map.put("Content-Type", Collections.singletonList(xContentType.mediaType()));
RestRequest restRequest = contentRestRequest("", Collections.emptyMap(), map);
- assertEquals(xContentType, restRequest.getXContentType());
+ assertEquals(xContentType, restRequest.getMediaType());
map = new HashMap<>();
map.put("Content-Type", Collections.singletonList(xContentType.mediaTypeWithoutParameters()));
restRequest = contentRestRequest("", Collections.emptyMap(), map);
- assertEquals(xContentType, restRequest.getXContentType());
+ assertEquals(xContentType, restRequest.getMediaType());
}
}
@@ -221,7 +221,7 @@ public void testPlainTextSupport() {
Collections.singletonList(randomFrom("text/plain", "text/plain; charset=utf-8", "text/plain;charset=utf-8"))
)
);
- assertNull(restRequest.getXContentType());
+ assertNull(restRequest.getMediaType());
}
public void testMalformedContentTypeHeader() {
@@ -237,7 +237,7 @@ public void testMalformedContentTypeHeader() {
public void testNoContentTypeHeader() {
RestRequest contentRestRequest = contentRestRequest("", Collections.emptyMap(), Collections.emptyMap());
- assertNull(contentRestRequest.getXContentType());
+ assertNull(contentRestRequest.getMediaType());
}
public void testMultipleContentTypeHeaders() {
diff --git a/server/src/test/java/org/opensearch/script/ScriptMetadataTests.java b/server/src/test/java/org/opensearch/script/ScriptMetadataTests.java
index bcab5dc7b776a..83e7a3712a9ad 100644
--- a/server/src/test/java/org/opensearch/script/ScriptMetadataTests.java
+++ b/server/src/test/java/org/opensearch/script/ScriptMetadataTests.java
@@ -36,6 +36,7 @@
import org.opensearch.core.common.bytes.BytesReference;
import org.opensearch.core.common.io.stream.Writeable;
import org.opensearch.core.xcontent.DeprecationHandler;
+import org.opensearch.core.xcontent.MediaType;
import org.opensearch.core.xcontent.NamedXContentRegistry;
import org.opensearch.core.xcontent.XContentBuilder;
import org.opensearch.common.xcontent.XContentFactory;
@@ -130,13 +131,13 @@ public void testGetScript() throws Exception {
.endObject()
.endObject()
.endObject();
- XContentType xContentType = XContentType.fromMediaType(sourceBuilder.contentType());
- builder.storeScript("source_template", StoredScriptSource.parse(BytesReference.bytes(sourceBuilder), xContentType));
+ MediaType mediaType = sourceBuilder.contentType();
+ builder.storeScript("source_template", StoredScriptSource.parse(BytesReference.bytes(sourceBuilder), mediaType));
sourceBuilder = XContentFactory.jsonBuilder();
- xContentType = XContentType.fromMediaType(sourceBuilder.contentType());
+ mediaType = sourceBuilder.contentType();
sourceBuilder.startObject().startObject("script").field("lang", "_lang").field("source", "_source").endObject().endObject();
- builder.storeScript("script", StoredScriptSource.parse(BytesReference.bytes(sourceBuilder), xContentType));
+ builder.storeScript("script", StoredScriptSource.parse(BytesReference.bytes(sourceBuilder), mediaType));
ScriptMetadata scriptMetadata = builder.build();
assertEquals("_source", scriptMetadata.getStoredScript("script").getSource());
@@ -303,7 +304,7 @@ private ScriptMetadata randomScriptMetadata(XContentType sourceContentType, int
.endObject();
builder.storeScript(
randomAlphaOfLength(i + 1),
- StoredScriptSource.parse(BytesReference.bytes(sourceBuilder), XContentType.fromMediaType(sourceBuilder.contentType()))
+ StoredScriptSource.parse(BytesReference.bytes(sourceBuilder), sourceBuilder.contentType())
);
}
return builder.build();
diff --git a/test/framework/src/main/java/org/opensearch/test/RandomObjects.java b/test/framework/src/main/java/org/opensearch/test/RandomObjects.java
index e81f0df9a5ac4..a328e6778dfaf 100644
--- a/test/framework/src/main/java/org/opensearch/test/RandomObjects.java
+++ b/test/framework/src/main/java/org/opensearch/test/RandomObjects.java
@@ -98,7 +98,7 @@ public static Tuple, List> randomStoredFieldValues(Random r
List originalValues = randomStoredFieldValues(random, numValues);
List expectedParsedValues = new ArrayList<>(numValues);
for (Object originalValue : originalValues) {
- expectedParsedValues.add(getExpectedParsedValue(XContentType.fromMediaType(mediaType), originalValue));
+ expectedParsedValues.add(getExpectedParsedValue(mediaType, originalValue));
}
return Tuple.tuple(originalValues, expectedParsedValues);
}
@@ -154,15 +154,15 @@ private static List randomStoredFieldValues(Random random, int numValues
* Generates values based on what can get printed out. Stored fields values are retrieved from lucene and converted via
* {@link org.opensearch.index.mapper.MappedFieldType#valueForDisplay(Object)} to either strings, numbers or booleans.
*/
- public static Object getExpectedParsedValue(XContentType xContentType, Object value) {
+ public static Object getExpectedParsedValue(MediaType mediaType, Object value) {
if (value instanceof BytesArray) {
- if (xContentType == XContentType.JSON) {
+ if (mediaType == XContentType.JSON) {
// JSON writes base64 format
return Base64.getEncoder().encodeToString(((BytesArray) value).toBytesRef().bytes);
}
}
if (value instanceof Float) {
- if (xContentType == XContentType.CBOR || xContentType == XContentType.SMILE) {
+ if (mediaType == XContentType.CBOR || mediaType == XContentType.SMILE) {
// with binary content types we pass back the object as is
return value;
}
diff --git a/test/framework/src/main/java/org/opensearch/test/rest/FakeRestRequest.java b/test/framework/src/main/java/org/opensearch/test/rest/FakeRestRequest.java
index 375768ed12cc4..2bbaf8c80dfa4 100644
--- a/test/framework/src/main/java/org/opensearch/test/rest/FakeRestRequest.java
+++ b/test/framework/src/main/java/org/opensearch/test/rest/FakeRestRequest.java
@@ -35,8 +35,8 @@
import org.opensearch.action.ActionListener;
import org.opensearch.core.common.bytes.BytesArray;
import org.opensearch.core.common.bytes.BytesReference;
+import org.opensearch.core.xcontent.MediaType;
import org.opensearch.core.xcontent.NamedXContentRegistry;
-import org.opensearch.common.xcontent.XContentType;
import org.opensearch.http.HttpChannel;
import org.opensearch.http.HttpRequest;
import org.opensearch.http.HttpResponse;
@@ -231,10 +231,10 @@ public Builder withParams(Map params) {
return this;
}
- public Builder withContent(BytesReference content, XContentType xContentType) {
+ public Builder withContent(BytesReference content, MediaType mediaType) {
this.content = content;
- if (xContentType != null) {
- headers.put("Content-Type", Collections.singletonList(xContentType.mediaType()));
+ if (mediaType != null) {
+ headers.put("Content-Type", Collections.singletonList(mediaType.mediaType()));
}
return this;
}
diff --git a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java
index 512369fdfe8a1..6d3c2eaa972e6 100644
--- a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java
+++ b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java
@@ -66,6 +66,7 @@
import org.opensearch.common.unit.TimeValue;
import org.opensearch.common.util.concurrent.ThreadContext;
import org.opensearch.core.xcontent.DeprecationHandler;
+import org.opensearch.core.xcontent.MediaType;
import org.opensearch.core.xcontent.NamedXContentRegistry;
import org.opensearch.core.xcontent.XContentBuilder;
import org.opensearch.common.xcontent.XContentHelper;
@@ -143,10 +144,10 @@ public abstract class OpenSearchRestTestCase extends OpenSearchTestCase {
* Convert the entity from a {@link Response} into a map of maps.
*/
public static Map entityAsMap(Response response) throws IOException {
- XContentType xContentType = XContentType.fromMediaType(response.getEntity().getContentType());
+ MediaType mediaType = MediaType.fromMediaType(response.getEntity().getContentType());
// EMPTY and THROW are fine here because `.map` doesn't use named x content or deprecation
try (
- XContentParser parser = xContentType.xContent()
+ XContentParser parser = mediaType.xContent()
.createParser(
NamedXContentRegistry.EMPTY,
DeprecationHandler.THROW_UNSUPPORTED_OPERATION,
@@ -161,10 +162,10 @@ public static Map entityAsMap(Response response) throws IOExcept
* Convert the entity from a {@link Response} into a list of maps.
*/
public static List entityAsList(Response response) throws IOException {
- XContentType xContentType = XContentType.fromMediaType(response.getEntity().getContentType());
+ MediaType mediaType = MediaType.fromMediaType(response.getEntity().getContentType());
// EMPTY and THROW are fine here because `.map` doesn't use named x content or deprecation
try (
- XContentParser parser = xContentType.xContent()
+ XContentParser parser = mediaType.xContent()
.createParser(
NamedXContentRegistry.EMPTY,
DeprecationHandler.THROW_UNSUPPORTED_OPERATION,
@@ -1074,7 +1075,7 @@ protected static Map getAsMap(final String endpoint) throws IOEx
}
protected static Map responseAsMap(Response response) throws IOException {
- XContentType entityContentType = XContentType.fromMediaType(response.getEntity().getContentType());
+ MediaType entityContentType = MediaType.fromMediaType(response.getEntity().getContentType());
Map responseEntity = XContentHelper.convertToMap(
entityContentType.xContent(),
response.getEntity().getContent(),
diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestExecutionContext.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestExecutionContext.java
index cb460f17cbec0..635dc49ff5166 100644
--- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestExecutionContext.java
+++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestExecutionContext.java
@@ -41,6 +41,7 @@
import org.apache.lucene.util.BytesRef;
import org.opensearch.Version;
import org.opensearch.client.NodeSelector;
+import org.opensearch.core.xcontent.MediaType;
import org.opensearch.core.common.bytes.BytesReference;
import org.opensearch.core.xcontent.XContentBuilder;
import org.opensearch.common.xcontent.XContentFactory;
@@ -138,20 +139,20 @@ private HttpEntity createEntity(List> bodies, Map bytesRefList = new ArrayList<>(bodies.size());
int totalBytesLength = 0;
for (Map body : bodies) {
- BytesRef bytesRef = bodyAsBytesRef(body, xContentType);
+ BytesRef bytesRef = bodyAsBytesRef(body, mediaType);
bytesRefList.add(bytesRef);
totalBytesLength += bytesRef.length - bytesRef.offset + 1;
}
@@ -161,20 +162,20 @@ private HttpEntity createEntity(List> bodies, Map headers, XContentType[] supportedContentTypes) {
- XContentType xContentType = null;
+ private MediaType getContentType(Map headers, XContentType[] supportedContentTypes) {
+ MediaType mediaType = null;
String contentType = headers.get("Content-Type");
if (contentType != null) {
- xContentType = XContentType.fromMediaType(contentType);
+ mediaType = MediaType.fromMediaType(contentType);
}
- if (xContentType != null) {
- return xContentType;
+ if (mediaType != null) {
+ return mediaType;
}
if (randomizeContentType) {
return RandomizedTest.randomFrom(supportedContentTypes);
@@ -182,9 +183,9 @@ private XContentType getContentType(Map headers, XContentType[]
return XContentType.JSON;
}
- private BytesRef bodyAsBytesRef(Map bodyAsMap, XContentType xContentType) throws IOException {
+ private BytesRef bodyAsBytesRef(Map bodyAsMap, MediaType mediaType) throws IOException {
Map finalBodyAsMap = stash.replaceStashedValues(bodyAsMap);
- try (XContentBuilder builder = XContentFactory.contentBuilder(xContentType)) {
+ try (XContentBuilder builder = XContentFactory.contentBuilder(mediaType)) {
return BytesReference.bytes(builder.map(finalBodyAsMap)).toBytesRef();
}
}
diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestResponse.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestResponse.java
index 5c6190ab860b2..0f14461eb0f86 100644
--- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestResponse.java
+++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestResponse.java
@@ -38,6 +38,7 @@
import org.opensearch.common.Strings;
import org.opensearch.core.common.bytes.BytesArray;
import org.opensearch.common.xcontent.LoggingDeprecationHandler;
+import org.opensearch.core.xcontent.MediaType;
import org.opensearch.core.xcontent.NamedXContentRegistry;
import org.opensearch.core.xcontent.XContentBuilder;
import org.opensearch.common.xcontent.XContentFactory;
@@ -58,7 +59,7 @@ public class ClientYamlTestResponse {
private final Response response;
private final byte[] body;
- private final XContentType bodyContentType;
+ private final MediaType bodyContentType;
private ObjectPath parsedResponse;
private String bodyAsString;
@@ -66,7 +67,7 @@ public ClientYamlTestResponse(Response response) throws IOException {
this.response = response;
if (response.getEntity() != null) {
String contentType = response.getHeader("Content-Type");
- this.bodyContentType = XContentType.fromMediaType(contentType);
+ this.bodyContentType = MediaType.fromMediaType(contentType);
try {
byte[] bytes = EntityUtils.toByteArray(response.getEntity());
// skip parsing if we got text back (e.g. if we called _cat apis)
diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ObjectPath.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ObjectPath.java
index 8b62f0d0a9783..6c1510f8ca60a 100644
--- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ObjectPath.java
+++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ObjectPath.java
@@ -36,11 +36,11 @@
import org.opensearch.core.common.bytes.BytesArray;
import org.opensearch.core.common.bytes.BytesReference;
import org.opensearch.core.xcontent.DeprecationHandler;
+import org.opensearch.core.xcontent.MediaType;
import org.opensearch.core.xcontent.NamedXContentRegistry;
import org.opensearch.core.xcontent.XContent;
import org.opensearch.core.xcontent.XContentBuilder;
import org.opensearch.core.xcontent.XContentParser;
-import org.opensearch.common.xcontent.XContentType;
import java.io.IOException;
import java.util.ArrayList;
@@ -57,8 +57,8 @@ public class ObjectPath {
public static ObjectPath createFromResponse(Response response) throws IOException {
byte[] bytes = EntityUtils.toByteArray(response.getEntity());
String contentType = response.getHeader("Content-Type");
- XContentType xContentType = XContentType.fromMediaType(contentType);
- return ObjectPath.createFromXContent(xContentType.xContent(), new BytesArray(bytes));
+ MediaType mediaType = MediaType.fromMediaType(contentType);
+ return ObjectPath.createFromXContent(mediaType.xContent(), new BytesArray(bytes));
}
public static ObjectPath createFromXContent(XContent xContent, BytesReference input) throws IOException {
From 5d78de61f33bd813913563076d3dfcf8ddaf1498 Mon Sep 17 00:00:00 2001
From: Andrew Ross
Date: Mon, 24 Jul 2023 15:52:32 -0500
Subject: [PATCH 07/71] Retry only specified tests (#8825)
This list was compiled by finding every test failure that succeeded on a
retry in the past month (approx. June 20-July 20, 2023). The
[flaky-test-finder.rb][1] script was used to collect this list.
```
ruby flaky-test-finder.rb -s 18000 -e 20681 | sed 's/[0-9]*[[:space:]]\(.*\)\..*/includeClasses\.add(\"\1\")/' | sort | uniq
```
[1]: https://gist.github.com/andrross/ee07a8a05beb63f1173bcb98523918b9
Signed-off-by: Andrew Ross
---
TESTING.md | 5 ++++
build.gradle | 76 ++++++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 81 insertions(+)
diff --git a/TESTING.md b/TESTING.md
index 93b4615da6f0b..a76ee07e1faac 100644
--- a/TESTING.md
+++ b/TESTING.md
@@ -13,6 +13,7 @@ OpenSearch uses [jUnit](https://junit.org/junit5/) for testing, it also uses ran
- [Test groups](#test-groups)
- [Load balancing and caches](#load-balancing-and-caches)
- [Test compatibility](#test-compatibility)
+ - [Retries](#retries)
- [Miscellaneous](#miscellaneous)
- [Running verification tasks](#running-verification-tasks)
- [Testing the REST layer](#testing-the-rest-layer)
@@ -160,6 +161,10 @@ It is possible to provide a version that allows to adapt the tests' behaviour to
./gradlew test -Dtests.compatibility=1.0.0
+## Retries
+
+The goal of tests is to be completely deterministic such that any test failure can be easily and reliably reproduced. However, the reality is that many OpenSearch integration tests have non-deterministic behavior which results in rare test failures that cannot be easily reproduced even using the same random test seed. To mitigate the pain of frequent non-reproducible test failures, limited retries have been introduced using the Gradle [test-retry](https://plugins.gradle.org/plugin/org.gradle.test-retry) plugin. The known flaky tests are explicitly listed in the test-retry configuration of the build.gradle file. This is intended as a temporary mitigation for existing flakiness, and as such new tests should not be added to the retry list. Any new addition to the retry list must provide a thorough rationale as to why adding retries is the right thing to do as opposed to fixing the underlying flakiness. Existing flaky tests are tracked in GitHub with the [Flaky Random Test Failure](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aopen+is%3Aissue+label%3A%22flaky-test%22) label.
+
## Miscellaneous
Run all tests without stopping on errors (inspect log files).
diff --git a/build.gradle b/build.gradle
index 88c485dae9350..c7b6987b1103f 100644
--- a/build.gradle
+++ b/build.gradle
@@ -470,6 +470,82 @@ subprojects {
maxFailures = 10
}
failOnPassedAfterRetry = false
+ classRetry {
+ includeClasses.add("org.opensearch.action.admin.cluster.node.tasks.ResourceAwareTasksTests")
+ includeClasses.add("org.opensearch.action.admin.cluster.tasks.PendingTasksBlocksIT")
+ includeClasses.add("org.opensearch.action.admin.indices.create.CreateIndexIT")
+ includeClasses.add("org.opensearch.action.admin.indices.create.ShrinkIndexIT")
+ includeClasses.add("org.opensearch.aliases.IndexAliasesIT")
+ includeClasses.add("org.opensearch.backwards.MixedClusterClientYamlTestSuiteIT")
+ includeClasses.add("org.opensearch.blocks.SimpleBlocksIT")
+ includeClasses.add("org.opensearch.client.PitIT")
+ includeClasses.add("org.opensearch.client.ReindexIT")
+ includeClasses.add("org.opensearch.cluster.ClusterHealthIT")
+ includeClasses.add("org.opensearch.cluster.allocation.AwarenessAllocationIT")
+ includeClasses.add("org.opensearch.cluster.allocation.ClusterRerouteIT")
+ includeClasses.add("org.opensearch.cluster.coordination.AwarenessAttributeDecommissionIT")
+ includeClasses.add("org.opensearch.cluster.metadata.IndexGraveyardTests")
+ includeClasses.add("org.opensearch.cluster.routing.MovePrimaryFirstTests")
+ includeClasses.add("org.opensearch.cluster.routing.allocation.decider.DiskThresholdDeciderIT")
+ includeClasses.add("org.opensearch.cluster.service.MasterServiceTests")
+ includeClasses.add("org.opensearch.common.util.concurrent.QueueResizableOpenSearchThreadPoolExecutorTests")
+ includeClasses.add("org.opensearch.gateway.RecoveryFromGatewayIT")
+ includeClasses.add("org.opensearch.gateway.ReplicaShardAllocatorIT")
+ includeClasses.add("org.opensearch.http.SearchRestCancellationIT")
+ includeClasses.add("org.opensearch.http.netty4.Netty4HttpServerTransportTests")
+ includeClasses.add("org.opensearch.index.IndexServiceTests")
+ includeClasses.add("org.opensearch.index.IndexSettingsTests")
+ includeClasses.add("org.opensearch.index.SegmentReplicationPressureIT")
+ includeClasses.add("org.opensearch.index.ShardIndexingPressureIT")
+ includeClasses.add("org.opensearch.index.ShardIndexingPressureSettingsIT")
+ includeClasses.add("org.opensearch.index.reindex.BulkByScrollResponseTests")
+ includeClasses.add("org.opensearch.index.reindex.DeleteByQueryBasicTests")
+ includeClasses.add("org.opensearch.index.reindex.UpdateByQueryBasicTests")
+ includeClasses.add("org.opensearch.index.shard.IndexShardIT")
+ includeClasses.add("org.opensearch.index.shard.RemoteStoreRefreshListenerTests")
+ includeClasses.add("org.opensearch.index.translog.RemoteFSTranslogTests")
+ includeClasses.add("org.opensearch.indices.DateMathIndexExpressionsIntegrationIT")
+ includeClasses.add("org.opensearch.indices.replication.RemoteStoreReplicationSourceTests")
+ includeClasses.add("org.opensearch.indices.replication.SegmentReplicationAllocationIT")
+ includeClasses.add("org.opensearch.indices.replication.SegmentReplicationIT")
+ includeClasses.add("org.opensearch.indices.replication.SegmentReplicationRelocationIT")
+ includeClasses.add("org.opensearch.indices.replication.SegmentReplicationTargetServiceTests")
+ includeClasses.add("org.opensearch.indices.state.CloseWhileRelocatingShardsIT")
+ includeClasses.add("org.opensearch.monitor.fs.FsHealthServiceTests")
+ includeClasses.add("org.opensearch.recovery.ReplicationCollectionTests")
+ includeClasses.add("org.opensearch.remotestore.CreateRemoteIndexClusterDefaultDocRep")
+ includeClasses.add("org.opensearch.remotestore.CreateRemoteIndexIT")
+ includeClasses.add("org.opensearch.remotestore.CreateRemoteIndexTranslogDisabledIT")
+ includeClasses.add("org.opensearch.remotestore.RemoteIndexPrimaryRelocationIT")
+ includeClasses.add("org.opensearch.remotestore.RemoteStoreBackpressureIT")
+ includeClasses.add("org.opensearch.remotestore.RemoteStoreIT")
+ includeClasses.add("org.opensearch.remotestore.RemoteStoreRefreshListenerIT")
+ includeClasses.add("org.opensearch.remotestore.RemoteStoreStatsIT")
+ includeClasses.add("org.opensearch.remotestore.SegmentReplicationRemoteStoreIT")
+ includeClasses.add("org.opensearch.remotestore.SegmentReplicationUsingRemoteStoreIT")
+ includeClasses.add("org.opensearch.remotestore.multipart.RemoteStoreMultipartIT")
+ includeClasses.add("org.opensearch.repositories.azure.AzureBlobContainerRetriesTests")
+ includeClasses.add("org.opensearch.repositories.azure.AzureBlobStoreRepositoryTests")
+ includeClasses.add("org.opensearch.repositories.gcs.GoogleCloudStorageBlobContainerRetriesTests")
+ includeClasses.add("org.opensearch.repositories.gcs.GoogleCloudStorageBlobStoreRepositoryTests")
+ includeClasses.add("org.opensearch.repositories.s3.S3BlobStoreRepositoryTests")
+ includeClasses.add("org.opensearch.search.ConcurrentSegmentSearchTimeoutIT")
+ includeClasses.add("org.opensearch.search.SearchTimeoutIT")
+ includeClasses.add("org.opensearch.search.SearchWeightedRoutingIT")
+ includeClasses.add("org.opensearch.search.aggregations.bucket.DoubleTermsIT")
+ includeClasses.add("org.opensearch.search.aggregations.bucket.terms.StringTermsIT")
+ includeClasses.add("org.opensearch.search.aggregations.metrics.CardinalityIT")
+ includeClasses.add("org.opensearch.search.backpressure.SearchBackpressureIT")
+ includeClasses.add("org.opensearch.search.basic.SearchWithRandomIOExceptionsIT")
+ includeClasses.add("org.opensearch.search.pit.DeletePitMultiNodeIT")
+ includeClasses.add("org.opensearch.smoketest.SmokeTestMultiNodeClientYamlTestSuiteIT")
+ includeClasses.add("org.opensearch.snapshots.CloneSnapshotIT")
+ includeClasses.add("org.opensearch.snapshots.DedicatedClusterSnapshotRestoreIT")
+ includeClasses.add("org.opensearch.snapshots.RestoreSnapshotIT")
+ includeClasses.add("org.opensearch.snapshots.SnapshotStatusApisIT")
+ includeClasses.add("org.opensearch.test.rest.ClientYamlTestSuiteIT")
+ includeClasses.add("org.opensearch.upgrade.DetectEsInstallationTaskTests")
+ }
}
}
}
From 077e12c0cf32e42500949c9e39c6fba83feca76d Mon Sep 17 00:00:00 2001
From: Kunal Kotwani
Date: Mon, 24 Jul 2023 14:50:28 -0700
Subject: [PATCH 08/71] Fix versions for 2.x on main (#8846)
Signed-off-by: Kunal Kotwani
---
CHANGELOG.md | 2 +-
README.md | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 3552c66c83229..4bc645ed1bd6a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -103,4 +103,4 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
### Security
[Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD
-[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.8...2.x
\ No newline at end of file
+[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.10...2.x
diff --git a/README.md b/README.md
index 55ff067e4d779..b5fc45509b002 100644
--- a/README.md
+++ b/README.md
@@ -7,7 +7,7 @@
[![Security Vulnerabilities](https://img.shields.io/github/issues/opensearch-project/OpenSearch/security%20vulnerability?labelColor=red)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"security%20vulnerability")
[![Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch)](https://github.com/opensearch-project/OpenSearch/issues)
[![Open Pull Requests](https://img.shields.io/github/issues-pr/opensearch-project/OpenSearch)](https://github.com/opensearch-project/OpenSearch/pulls)
-[![2.8 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v2.8.0)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v2.8.0")
+[![2.10 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v2.10.0)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v2.10.0")
[![3.0 Open Issues](https://img.shields.io/github/issues/opensearch-project/OpenSearch/v3.0.0)](https://github.com/opensearch-project/OpenSearch/issues?q=is%3Aissue+is%3Aopen+label%3A"v3.0.0")
[![GHA gradle check](https://github.com/opensearch-project/OpenSearch/actions/workflows/gradle-check.yml/badge.svg)](https://github.com/opensearch-project/OpenSearch/actions/workflows/gradle-check.yml)
[![GHA validate pull request](https://github.com/opensearch-project/OpenSearch/actions/workflows/wrapper.yml/badge.svg)](https://github.com/opensearch-project/OpenSearch/actions/workflows/wrapper.yml)
From a3baa68b7b014520d257efbb0d4b13f66e134d12 Mon Sep 17 00:00:00 2001
From: Marc Handalian
Date: Mon, 24 Jul 2023 16:44:54 -0700
Subject: [PATCH 09/71] Fix flaky test
SegmentReplicationRemoteStoreIT.testPressureServiceStats (#8827)
* Fix ReplicationTracker to not include unavailable former primary shards when computing replication stats.
Signed-off-by: Marc Handalian
* Fix relocation IT relying on stats to determine if segrep has occured. The API should still show a result for the replica even if it has not sync'd.
Signed-off-by: Marc Handalian
---------
Signed-off-by: Marc Handalian
---
.../replication/SegmentReplicationIT.java | 81 +++++++++++--------
.../SegmentReplicationRelocationIT.java | 22 ++++-
.../index/seqno/ReplicationTracker.java | 72 +++++++++++------
.../checkpoint/ReplicationCheckpoint.java | 4 +
.../index/seqno/ReplicationTrackerTests.java | 54 +++++++------
5 files changed, 150 insertions(+), 83 deletions(-)
diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java
index d345578c7de6e..2a7e8e58b2d03 100644
--- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java
@@ -875,69 +875,84 @@ public void testPressureServiceStats() throws Exception {
waitForDocs(initialDocCount, indexer);
refresh(INDEX_NAME);
+ // get shard references.
+ final IndexShard primaryShard = getIndexShard(primaryNode, INDEX_NAME);
+ final IndexShard replicaShard = getIndexShard(replicaNode, INDEX_NAME);
+ logger.info("Replica aid {}", replicaShard.routingEntry().allocationId());
+ logger.info("former primary aid {}", primaryShard.routingEntry().allocationId());
+
+ // fetch pressure stats from the Primary's Node.
SegmentReplicationPressureService pressureService = internalCluster().getInstance(
SegmentReplicationPressureService.class,
primaryNode
);
- final Map shardStats = pressureService.nodeStats().getShardStats();
- assertEquals(1, shardStats.size());
- final IndexShard primaryShard = getIndexShard(primaryNode, INDEX_NAME);
- IndexShard replica = getIndexShard(replicaNode, INDEX_NAME);
- SegmentReplicationPerGroupStats groupStats = shardStats.get(primaryShard.shardId());
- Set replicaStats = groupStats.getReplicaStats();
- assertEquals(1, replicaStats.size());
-
- // assert replica node returns nothing.
+ // Fetch pressure stats from the Replica's Node we will assert replica node returns nothing until it is promoted.
SegmentReplicationPressureService replicaNode_service = internalCluster().getInstance(
SegmentReplicationPressureService.class,
replicaNode
);
+
+ final Map shardStats = pressureService.nodeStats().getShardStats();
+ assertEquals("We should have stats returned for the replication group", 1, shardStats.size());
+
+ SegmentReplicationPerGroupStats groupStats = shardStats.get(primaryShard.shardId());
+ Set replicaStats = groupStats.getReplicaStats();
+ assertAllocationIdsInReplicaShardStats(Set.of(replicaShard.routingEntry().allocationId().getId()), replicaStats);
+
assertTrue(replicaNode_service.nodeStats().getShardStats().isEmpty());
- // drop the primary, this won't hand off SR state.
+ // drop the primary, this won't hand off pressure stats between old/new primary.
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNode));
ensureYellowAndNoInitializingShards(INDEX_NAME);
- replicaNode_service = internalCluster().getInstance(SegmentReplicationPressureService.class, replicaNode);
- replica = getIndexShard(replicaNode, INDEX_NAME);
- assertTrue("replica should be promoted as a primary", replica.routingEntry().primary());
- assertEquals(1, replicaNode_service.nodeStats().getShardStats().size());
- // we don't have a replica assigned yet, so this should be 0.
- assertEquals(0, replicaNode_service.nodeStats().getShardStats().get(primaryShard.shardId()).getReplicaStats().size());
+
+ assertTrue("replica should be promoted as a primary", replicaShard.routingEntry().primary());
+ assertEquals(
+ "We should have stats returned for the replication group",
+ 1,
+ replicaNode_service.nodeStats().getShardStats().size()
+ );
+ // after the primary is dropped and replica is promoted we won't have a replica assigned yet, so stats per replica should return
+ // empty.
+ replicaStats = replicaNode_service.nodeStats().getShardStats().get(primaryShard.shardId()).getReplicaStats();
+ assertTrue(replicaStats.isEmpty());
// start another replica.
String replicaNode_2 = internalCluster().startDataOnlyNode();
ensureGreen(INDEX_NAME);
- String docId = String.valueOf(initialDocCount + 1);
- client().prepareIndex(INDEX_NAME).setId(docId).setSource("foo", "bar").get();
- refresh(INDEX_NAME);
- waitForSearchableDocs(initialDocCount + 1, replicaNode_2);
+ final IndexShard secondReplicaShard = getIndexShard(replicaNode_2, INDEX_NAME);
+ final String second_replica_aid = secondReplicaShard.routingEntry().allocationId().getId();
+ waitForSearchableDocs(initialDocCount, replicaNode_2);
- replicaNode_service = internalCluster().getInstance(SegmentReplicationPressureService.class, replicaNode);
- replica = getIndexShard(replicaNode_2, INDEX_NAME);
- assertEquals(1, replicaNode_service.nodeStats().getShardStats().size());
- replicaStats = replicaNode_service.nodeStats().getShardStats().get(primaryShard.shardId()).getReplicaStats();
- assertEquals(1, replicaStats.size());
+ assertEquals(
+ "We should have stats returned for the replication group",
+ 1,
+ replicaNode_service.nodeStats().getShardStats().size()
+ );
+ replicaStats = replicaNode_service.nodeStats().getShardStats().get(replicaShard.shardId()).getReplicaStats();
+ assertAllocationIdsInReplicaShardStats(Set.of(second_replica_aid), replicaStats);
+ final SegmentReplicationShardStats replica_entry = replicaStats.stream().findFirst().get();
+ assertEquals(replica_entry.getCheckpointsBehindCount(), 0);
// test a checkpoint without any new segments
flush(INDEX_NAME);
assertBusy(() -> {
- final SegmentReplicationPressureService service = internalCluster().getInstance(
- SegmentReplicationPressureService.class,
- replicaNode
- );
- assertEquals(1, service.nodeStats().getShardStats().size());
- final Set shardStatsSet = service.nodeStats()
+ assertEquals(1, replicaNode_service.nodeStats().getShardStats().size());
+ final Set shardStatsSet = replicaNode_service.nodeStats()
.getShardStats()
- .get(primaryShard.shardId())
+ .get(replicaShard.shardId())
.getReplicaStats();
- assertEquals(1, shardStatsSet.size());
+ assertAllocationIdsInReplicaShardStats(Set.of(second_replica_aid), shardStatsSet);
final SegmentReplicationShardStats stats = shardStatsSet.stream().findFirst().get();
assertEquals(0, stats.getCheckpointsBehindCount());
});
}
}
+ private void assertAllocationIdsInReplicaShardStats(Set expected, Set replicaStats) {
+ assertEquals(expected, replicaStats.stream().map(SegmentReplicationShardStats::getAllocationId).collect(Collectors.toSet()));
+ }
+
/**
* Tests a scroll query on the replica
* @throws Exception when issue is encountered
diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationRelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationRelocationIT.java
index 7426ca2f13f84..7cf7e5148dd4a 100644
--- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationRelocationIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationRelocationIT.java
@@ -22,6 +22,8 @@
import org.opensearch.common.Priority;
import org.opensearch.common.settings.Settings;
import org.opensearch.common.unit.TimeValue;
+import org.opensearch.index.SegmentReplicationShardStats;
+import org.opensearch.index.shard.IndexShard;
import org.opensearch.indices.IndicesService;
import org.opensearch.test.OpenSearchIntegTestCase;
import org.opensearch.test.transport.MockTransportService;
@@ -29,8 +31,10 @@
import java.util.ArrayList;
import java.util.List;
+import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
+import java.util.stream.Collectors;
import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS;
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
@@ -528,13 +532,27 @@ public void testFlushAfterRelocation() throws Exception {
client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().actionGet();
}
- // Verify segment replication event never happened on replica shard
+ final IndexShard replicaShard = getIndexShard(replicaNode, INDEX_NAME);
+
+ // Verify segment replication event never happened on replica shard other than recovery.
+ assertHitCount(client(primaryNode).prepareSearch(INDEX_NAME).setPreference("_only_local").setSize(0).get(), 0);
+ assertHitCount(client(replicaNode).prepareSearch(INDEX_NAME).setPreference("_only_local").setSize(0).get(), 0);
+
SegmentReplicationStatsResponse segmentReplicationStatsResponse = client().admin()
.indices()
.prepareSegmentReplicationStats(INDEX_NAME)
.execute()
.actionGet();
- assertTrue(segmentReplicationStatsResponse.getReplicationStats().get(INDEX_NAME).get(0).getReplicaStats().isEmpty());
+ final Set replicaStats = segmentReplicationStatsResponse.getReplicationStats()
+ .get(INDEX_NAME)
+ .get(0)
+ .getReplicaStats();
+ assertEquals(
+ Set.of(replicaShard.routingEntry().allocationId().getId()),
+ replicaStats.stream().map(SegmentReplicationShardStats::getAllocationId).collect(Collectors.toSet())
+ );
+ // the primary still has not refreshed to update its checkpoint, so our replica is not yet behind.
+ assertEquals(0, replicaStats.stream().findFirst().get().getCheckpointsBehindCount());
// Relocate primary to new primary. When new primary starts it does perform a flush.
logger.info("--> relocate the shard from primary to newPrimary");
diff --git a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java
index 4d6cffa58510f..19fe9ee97cd2f 100644
--- a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java
+++ b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java
@@ -247,7 +247,7 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L
private final Consumer onReplicationGroupUpdated;
- private volatile ReplicationCheckpoint lastPublishedReplicationCheckpoint;
+ private volatile ReplicationCheckpoint latestReplicationCheckpoint;
/**
* Get all retention leases tracked on this shard.
@@ -1054,6 +1054,7 @@ public ReplicationTracker(
this.fileBasedRecoveryThreshold = IndexSettings.FILE_BASED_RECOVERY_THRESHOLD_SETTING.get(indexSettings.getSettings());
this.safeCommitInfoSupplier = safeCommitInfoSupplier;
this.onReplicationGroupUpdated = onReplicationGroupUpdated;
+ this.latestReplicationCheckpoint = indexSettings.isSegRepEnabled() ? ReplicationCheckpoint.empty(shardId) : null;
assert Version.V_EMPTY.equals(indexSettings.getIndexVersionCreated()) == false;
assert invariant();
}
@@ -1212,26 +1213,42 @@ public synchronized void updateVisibleCheckpointForShard(final String allocation
*/
public synchronized void setLatestReplicationCheckpoint(ReplicationCheckpoint checkpoint) {
assert indexSettings.isSegRepEnabled();
- assert handoffInProgress == false;
- if (checkpoint.equals(lastPublishedReplicationCheckpoint) == false) {
- this.lastPublishedReplicationCheckpoint = checkpoint;
- for (Map.Entry entry : checkpoints.entrySet()) {
- if (entry.getKey().equals(this.shardAllocationId) == false) {
- final CheckpointState cps = entry.getValue();
- if (cps.inSync) {
- cps.checkpointTimers.computeIfAbsent(checkpoint, ignored -> {
- final ReplicationTimer replicationTimer = new ReplicationTimer();
- replicationTimer.start();
- return replicationTimer;
- });
- logger.trace(
- () -> new ParameterizedMessage(
- "updated last published checkpoint to {} - timers [{}]",
- checkpoint,
- cps.checkpointTimers.keySet()
- )
- );
- }
+ if (checkpoint.equals(latestReplicationCheckpoint) == false) {
+ this.latestReplicationCheckpoint = checkpoint;
+ }
+ if (primaryMode) {
+ startReplicationLagTimers();
+ }
+ }
+
+ public ReplicationCheckpoint getLatestReplicationCheckpoint() {
+ return this.latestReplicationCheckpoint;
+ }
+
+ private void startReplicationLagTimers() {
+ for (Map.Entry entry : checkpoints.entrySet()) {
+ final String allocationId = entry.getKey();
+ if (allocationId.equals(this.shardAllocationId) == false) {
+ final CheckpointState cps = entry.getValue();
+ // if the shard is in checkpoints but is unavailable or out of sync we will not track its replication state.
+ // it is possible for a shard to be in-sync but not yet removed from the checkpoints collection after a failover event.
+ if (cps.inSync
+ && replicationGroup.getUnavailableInSyncShards().contains(allocationId) == false
+ && latestReplicationCheckpoint.isAheadOf(cps.visibleReplicationCheckpoint)) {
+ cps.checkpointTimers.computeIfAbsent(latestReplicationCheckpoint, ignored -> {
+ final ReplicationTimer replicationTimer = new ReplicationTimer();
+ replicationTimer.start();
+ return replicationTimer;
+ });
+ logger.trace(
+ () -> new ParameterizedMessage(
+ "updated last published checkpoint for {} at visible cp {} to {} - timers [{}]",
+ allocationId,
+ cps.visibleReplicationCheckpoint,
+ latestReplicationCheckpoint,
+ cps.checkpointTimers.keySet()
+ )
+ );
}
}
}
@@ -1244,12 +1261,17 @@ public synchronized void setLatestReplicationCheckpoint(ReplicationCheckpoint ch
*/
public synchronized Set getSegmentReplicationStats() {
assert indexSettings.isSegRepEnabled();
- final ReplicationCheckpoint lastPublishedCheckpoint = this.lastPublishedReplicationCheckpoint;
- if (primaryMode && lastPublishedCheckpoint != null) {
+ if (primaryMode) {
return this.checkpoints.entrySet()
.stream()
- .filter(entry -> entry.getKey().equals(this.shardAllocationId) == false && entry.getValue().inSync)
- .map(entry -> buildShardStats(lastPublishedCheckpoint.getLength(), entry.getKey(), entry.getValue()))
+ // filter out this shard's allocation id, any shards that are out of sync or unavailable (shard marked in-sync but has not
+ // been assigned to a node).
+ .filter(
+ entry -> entry.getKey().equals(this.shardAllocationId) == false
+ && entry.getValue().inSync
+ && replicationGroup.getUnavailableInSyncShards().contains(entry.getKey()) == false
+ )
+ .map(entry -> buildShardStats(latestReplicationCheckpoint.getLength(), entry.getKey(), entry.getValue()))
.collect(Collectors.toUnmodifiableSet());
}
return Collections.emptySet();
diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java
index da01023ace47c..7549f3450e7f2 100644
--- a/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java
+++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java
@@ -33,6 +33,10 @@ public class ReplicationCheckpoint implements Writeable, Comparable tracker.getTrackedLocalCheckpointForShard(a.getId()).inSync));
- // get insync ids, filter out the primary.
- final Set inSyncAllocationIds = tracker.getReplicationGroup()
- .getInSyncAllocationIds()
- .stream()
- .filter(id -> tracker.shardAllocationId.equals(id) == false)
- .collect(Collectors.toSet());
+
+ initializingIds.forEach(aId -> markAsTrackingAndInSyncQuietly(tracker, aId.getId(), NO_OPS_PERFORMED));
final ReplicationCheckpoint initialCheckpoint = new ReplicationCheckpoint(
tracker.shardId(),
@@ -1831,8 +1827,10 @@ public void testSegmentReplicationCheckpointTracking() {
tracker.setLatestReplicationCheckpoint(secondCheckpoint);
tracker.setLatestReplicationCheckpoint(thirdCheckpoint);
+ final Set expectedIds = ids(initializingIds);
+
Set groupStats = tracker.getSegmentReplicationStats();
- assertEquals(inSyncAllocationIds.size(), groupStats.size());
+ assertEquals(expectedIds.size(), groupStats.size());
for (SegmentReplicationShardStats shardStat : groupStats) {
assertEquals(3, shardStat.getCheckpointsBehindCount());
assertEquals(100L, shardStat.getBytesBehindCount());
@@ -1840,7 +1838,7 @@ public void testSegmentReplicationCheckpointTracking() {
// simulate replicas moved up to date.
final Map checkpoints = tracker.checkpoints;
- for (String id : inSyncAllocationIds) {
+ for (String id : expectedIds) {
final ReplicationTracker.CheckpointState checkpointState = checkpoints.get(id);
assertEquals(3, checkpointState.checkpointTimers.size());
tracker.updateVisibleCheckpointForShard(id, initialCheckpoint);
@@ -1848,13 +1846,13 @@ public void testSegmentReplicationCheckpointTracking() {
}
groupStats = tracker.getSegmentReplicationStats();
- assertEquals(inSyncAllocationIds.size(), groupStats.size());
+ assertEquals(expectedIds.size(), groupStats.size());
for (SegmentReplicationShardStats shardStat : groupStats) {
assertEquals(2, shardStat.getCheckpointsBehindCount());
assertEquals(99L, shardStat.getBytesBehindCount());
}
- for (String id : inSyncAllocationIds) {
+ for (String id : expectedIds) {
final ReplicationTracker.CheckpointState checkpointState = checkpoints.get(id);
assertEquals(2, checkpointState.checkpointTimers.size());
tracker.updateVisibleCheckpointForShard(id, thirdCheckpoint);
@@ -1862,7 +1860,7 @@ public void testSegmentReplicationCheckpointTracking() {
}
groupStats = tracker.getSegmentReplicationStats();
- assertEquals(inSyncAllocationIds.size(), groupStats.size());
+ assertEquals(expectedIds.size(), groupStats.size());
for (SegmentReplicationShardStats shardStat : groupStats) {
assertEquals(0, shardStat.getCheckpointsBehindCount());
assertEquals(0L, shardStat.getBytesBehindCount());
@@ -1883,19 +1881,24 @@ public void testSegmentReplicationCheckpointTrackingInvalidAllocationIDs() {
AllocationId primaryId = activeAllocationIds.iterator().next();
IndexShardRoutingTable routingTable = routingTable(initializingIds, primaryId);
final ReplicationTracker tracker = newTracker(primaryId, settings);
-
tracker.updateFromClusterManager(initialClusterStateVersion, ids(activeAllocationIds), routingTable);
tracker.activatePrimaryMode(NO_OPS_PERFORMED);
- assertThat(tracker.getReplicationGroup().getInSyncAllocationIds(), equalTo(ids(activeAllocationIds)));
- assertThat(tracker.getReplicationGroup().getRoutingTable(), equalTo(routingTable));
- assertTrue(activeAllocationIds.stream().allMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).inSync));
- // get insync ids, filter out the primary.
- final Set inSyncAllocationIds = tracker.getReplicationGroup()
- .getInSyncAllocationIds()
- .stream()
- .filter(id -> tracker.shardAllocationId.equals(id) == false)
- .collect(Collectors.toSet());
+ initializingIds.forEach(aId -> markAsTrackingAndInSyncQuietly(tracker, aId.getId(), NO_OPS_PERFORMED));
+
+ assertEquals(tracker.getReplicationGroup().getRoutingTable(), routingTable);
+ assertEquals(
+ "All active & initializing ids are now marked in-sync",
+ Sets.union(ids(activeAllocationIds), ids(initializingIds)),
+ tracker.getReplicationGroup().getInSyncAllocationIds()
+ );
+
+ assertEquals(
+ "Active ids are in-sync but still unavailable",
+ tracker.getReplicationGroup().getUnavailableInSyncShards(),
+ Sets.difference(ids(activeAllocationIds), Set.of(primaryId.getId()))
+ );
+ assertTrue(activeAllocationIds.stream().allMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).inSync));
final ReplicationCheckpoint initialCheckpoint = new ReplicationCheckpoint(
tracker.shardId(),
@@ -1907,15 +1910,20 @@ public void testSegmentReplicationCheckpointTrackingInvalidAllocationIDs() {
);
tracker.setLatestReplicationCheckpoint(initialCheckpoint);
+ // we expect that the only returned ids from getSegmentReplicationStats will be the initializing ids we marked with
+ // markAsTrackingAndInSyncQuietly.
+ // This is because the ids marked active initially are still unavailable (don't have an associated routing entry).
+ final Set expectedIds = ids(initializingIds);
Set groupStats = tracker.getSegmentReplicationStats();
- assertEquals(inSyncAllocationIds.size(), groupStats.size());
+ final Set actualIds = groupStats.stream().map(SegmentReplicationShardStats::getAllocationId).collect(Collectors.toSet());
+ assertEquals(expectedIds, actualIds);
for (SegmentReplicationShardStats shardStat : groupStats) {
assertEquals(1, shardStat.getCheckpointsBehindCount());
}
// simulate replicas moved up to date.
final Map checkpoints = tracker.checkpoints;
- for (String id : inSyncAllocationIds) {
+ for (String id : expectedIds) {
final ReplicationTracker.CheckpointState checkpointState = checkpoints.get(id);
assertEquals(1, checkpointState.checkpointTimers.size());
tracker.updateVisibleCheckpointForShard(id, initialCheckpoint);
From 8de5e1ce511e7ed6fa13c823062529ef333657a3 Mon Sep 17 00:00:00 2001
From: Andriy Redko
Date: Tue, 25 Jul 2023 12:39:18 -0400
Subject: [PATCH 10/71] OpenJDK Update (July 2023 Patch releases) (#8868)
Signed-off-by: Andriy Redko
---
CHANGELOG.md | 1 +
.../java/org/opensearch/gradle/test/DistroTestPlugin.java | 4 ++--
buildSrc/version.properties | 2 +-
3 files changed, 4 insertions(+), 3 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 4bc645ed1bd6a..68893592ea8d4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -86,6 +86,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
- Bump `org.gradle.test-retry` from 1.5.3 to 1.5.4 ([#8842](https://github.com/opensearch-project/OpenSearch/pull/8842))
- Bump `com.netflix.nebula.ospackage-base` from 11.3.0 to 11.4.0 ([#8838](https://github.com/opensearch-project/OpenSearch/pull/8838))
- Bump `com.google.http-client:google-http-client-gson` from 1.43.2 to 1.43.3 ([#8840](https://github.com/opensearch-project/OpenSearch/pull/8840))
+- OpenJDK Update (July 2023 Patch releases) ([#8868](https://github.com/opensearch-project/OpenSearch/pull/8868)
### Changed
- Perform aggregation postCollection in ContextIndexSearcher after searching leaves ([#8303](https://github.com/opensearch-project/OpenSearch/pull/8303))
diff --git a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java
index 7e0dc72cb9108..d32172758cfce 100644
--- a/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java
+++ b/buildSrc/src/main/java/org/opensearch/gradle/test/DistroTestPlugin.java
@@ -75,9 +75,9 @@
import java.util.stream.Stream;
public class DistroTestPlugin implements Plugin {
- private static final String SYSTEM_JDK_VERSION = "11.0.19+7";
+ private static final String SYSTEM_JDK_VERSION = "11.0.20+8";
private static final String SYSTEM_JDK_VENDOR = "adoptium";
- private static final String GRADLE_JDK_VERSION = "17.0.7+7";
+ private static final String GRADLE_JDK_VERSION = "17.0.8+7";
private static final String GRADLE_JDK_VENDOR = "adoptium";
// all distributions used by distro tests. this is temporary until tests are per distribution
diff --git a/buildSrc/version.properties b/buildSrc/version.properties
index 246f6e3444224..69aa4c9667c9c 100644
--- a/buildSrc/version.properties
+++ b/buildSrc/version.properties
@@ -2,7 +2,7 @@ opensearch = 3.0.0
lucene = 9.7.0
bundled_jdk_vendor = adoptium
-bundled_jdk = 20.0.1+9
+bundled_jdk = 20.0.2+9
# See please https://github.com/adoptium/temurin-build/issues/3371
bundled_jdk_linux_ppc64le = 20+36
From f6078707b9ae3be2ecbf51e45ac21112cf76245d Mon Sep 17 00:00:00 2001
From: Andriy Redko
Date: Tue, 25 Jul 2023 13:43:46 -0400
Subject: [PATCH 11/71] Update Apache Lucene to 9.8.0-snapshot-4373c3b (#8668)
Signed-off-by: Andriy Redko
---
buildSrc/version.properties | 2 +-
libs/core/licenses/lucene-core-9.7.0.jar.sha1 | 1 -
.../lucene-core-9.8.0-snapshot-4373c3b.jar.sha1 | 1 +
.../core/src/main/java/org/opensearch/Version.java | 4 ++--
.../licenses/lucene-expressions-9.7.0.jar.sha1 | 1 -
...ene-expressions-9.8.0-snapshot-4373c3b.jar.sha1 | 1 +
.../licenses/lucene-analysis-icu-9.7.0.jar.sha1 | 1 -
...ne-analysis-icu-9.8.0-snapshot-4373c3b.jar.sha1 | 1 +
.../lucene-analysis-kuromoji-9.7.0.jar.sha1 | 1 -
...alysis-kuromoji-9.8.0-snapshot-4373c3b.jar.sha1 | 1 +
.../licenses/lucene-analysis-nori-9.7.0.jar.sha1 | 1 -
...e-analysis-nori-9.8.0-snapshot-4373c3b.jar.sha1 | 1 +
.../lucene-analysis-phonetic-9.7.0.jar.sha1 | 1 -
...alysis-phonetic-9.8.0-snapshot-4373c3b.jar.sha1 | 1 +
.../lucene-analysis-smartcn-9.7.0.jar.sha1 | 1 -
...nalysis-smartcn-9.8.0-snapshot-4373c3b.jar.sha1 | 1 +
.../lucene-analysis-stempel-9.7.0.jar.sha1 | 1 -
...nalysis-stempel-9.8.0-snapshot-4373c3b.jar.sha1 | 1 +
.../lucene-analysis-morfologik-9.7.0.jar.sha1 | 1 -
...ysis-morfologik-9.8.0-snapshot-4373c3b.jar.sha1 | 1 +
.../licenses/lucene-analysis-common-9.7.0.jar.sha1 | 1 -
...analysis-common-9.8.0-snapshot-4373c3b.jar.sha1 | 1 +
.../licenses/lucene-backward-codecs-9.7.0.jar.sha1 | 1 -
...backward-codecs-9.8.0-snapshot-4373c3b.jar.sha1 | 1 +
server/licenses/lucene-core-9.7.0.jar.sha1 | 1 -
.../lucene-core-9.8.0-snapshot-4373c3b.jar.sha1 | 1 +
server/licenses/lucene-grouping-9.7.0.jar.sha1 | 1 -
...lucene-grouping-9.8.0-snapshot-4373c3b.jar.sha1 | 1 +
server/licenses/lucene-highlighter-9.7.0.jar.sha1 | 1 -
...ene-highlighter-9.8.0-snapshot-4373c3b.jar.sha1 | 1 +
server/licenses/lucene-join-9.7.0.jar.sha1 | 1 -
.../lucene-join-9.8.0-snapshot-4373c3b.jar.sha1 | 1 +
server/licenses/lucene-memory-9.7.0.jar.sha1 | 1 -
.../lucene-memory-9.8.0-snapshot-4373c3b.jar.sha1 | 1 +
server/licenses/lucene-misc-9.7.0.jar.sha1 | 1 -
.../lucene-misc-9.8.0-snapshot-4373c3b.jar.sha1 | 1 +
server/licenses/lucene-queries-9.7.0.jar.sha1 | 1 -
.../lucene-queries-9.8.0-snapshot-4373c3b.jar.sha1 | 1 +
server/licenses/lucene-queryparser-9.7.0.jar.sha1 | 1 -
...ene-queryparser-9.8.0-snapshot-4373c3b.jar.sha1 | 1 +
server/licenses/lucene-sandbox-9.7.0.jar.sha1 | 1 -
.../lucene-sandbox-9.8.0-snapshot-4373c3b.jar.sha1 | 1 +
.../licenses/lucene-spatial-extras-9.7.0.jar.sha1 | 1 -
...-spatial-extras-9.8.0-snapshot-4373c3b.jar.sha1 | 1 +
server/licenses/lucene-spatial3d-9.7.0.jar.sha1 | 1 -
...ucene-spatial3d-9.8.0-snapshot-4373c3b.jar.sha1 | 1 +
server/licenses/lucene-suggest-9.7.0.jar.sha1 | 1 -
.../lucene-suggest-9.8.0-snapshot-4373c3b.jar.sha1 | 1 +
.../search/functionscore/ExplainableScriptIT.java | 14 ++++++++++++--
.../suggest/completion/CompletionSuggester.java | 1 +
50 files changed, 39 insertions(+), 28 deletions(-)
delete mode 100644 libs/core/licenses/lucene-core-9.7.0.jar.sha1
create mode 100644 libs/core/licenses/lucene-core-9.8.0-snapshot-4373c3b.jar.sha1
delete mode 100644 modules/lang-expression/licenses/lucene-expressions-9.7.0.jar.sha1
create mode 100644 modules/lang-expression/licenses/lucene-expressions-9.8.0-snapshot-4373c3b.jar.sha1
delete mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.7.0.jar.sha1
create mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0-snapshot-4373c3b.jar.sha1
delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.7.0.jar.sha1
create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0-snapshot-4373c3b.jar.sha1
delete mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.7.0.jar.sha1
create mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0-snapshot-4373c3b.jar.sha1
delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.7.0.jar.sha1
create mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0-snapshot-4373c3b.jar.sha1
delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.7.0.jar.sha1
create mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0-snapshot-4373c3b.jar.sha1
delete mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.7.0.jar.sha1
create mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0-snapshot-4373c3b.jar.sha1
delete mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.7.0.jar.sha1
create mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0-snapshot-4373c3b.jar.sha1
delete mode 100644 server/licenses/lucene-analysis-common-9.7.0.jar.sha1
create mode 100644 server/licenses/lucene-analysis-common-9.8.0-snapshot-4373c3b.jar.sha1
delete mode 100644 server/licenses/lucene-backward-codecs-9.7.0.jar.sha1
create mode 100644 server/licenses/lucene-backward-codecs-9.8.0-snapshot-4373c3b.jar.sha1
delete mode 100644 server/licenses/lucene-core-9.7.0.jar.sha1
create mode 100644 server/licenses/lucene-core-9.8.0-snapshot-4373c3b.jar.sha1
delete mode 100644 server/licenses/lucene-grouping-9.7.0.jar.sha1
create mode 100644 server/licenses/lucene-grouping-9.8.0-snapshot-4373c3b.jar.sha1
delete mode 100644 server/licenses/lucene-highlighter-9.7.0.jar.sha1
create mode 100644 server/licenses/lucene-highlighter-9.8.0-snapshot-4373c3b.jar.sha1
delete mode 100644 server/licenses/lucene-join-9.7.0.jar.sha1
create mode 100644 server/licenses/lucene-join-9.8.0-snapshot-4373c3b.jar.sha1
delete mode 100644 server/licenses/lucene-memory-9.7.0.jar.sha1
create mode 100644 server/licenses/lucene-memory-9.8.0-snapshot-4373c3b.jar.sha1
delete mode 100644 server/licenses/lucene-misc-9.7.0.jar.sha1
create mode 100644 server/licenses/lucene-misc-9.8.0-snapshot-4373c3b.jar.sha1
delete mode 100644 server/licenses/lucene-queries-9.7.0.jar.sha1
create mode 100644 server/licenses/lucene-queries-9.8.0-snapshot-4373c3b.jar.sha1
delete mode 100644 server/licenses/lucene-queryparser-9.7.0.jar.sha1
create mode 100644 server/licenses/lucene-queryparser-9.8.0-snapshot-4373c3b.jar.sha1
delete mode 100644 server/licenses/lucene-sandbox-9.7.0.jar.sha1
create mode 100644 server/licenses/lucene-sandbox-9.8.0-snapshot-4373c3b.jar.sha1
delete mode 100644 server/licenses/lucene-spatial-extras-9.7.0.jar.sha1
create mode 100644 server/licenses/lucene-spatial-extras-9.8.0-snapshot-4373c3b.jar.sha1
delete mode 100644 server/licenses/lucene-spatial3d-9.7.0.jar.sha1
create mode 100644 server/licenses/lucene-spatial3d-9.8.0-snapshot-4373c3b.jar.sha1
delete mode 100644 server/licenses/lucene-suggest-9.7.0.jar.sha1
create mode 100644 server/licenses/lucene-suggest-9.8.0-snapshot-4373c3b.jar.sha1
diff --git a/buildSrc/version.properties b/buildSrc/version.properties
index 69aa4c9667c9c..e4a9293c59b8f 100644
--- a/buildSrc/version.properties
+++ b/buildSrc/version.properties
@@ -1,5 +1,5 @@
opensearch = 3.0.0
-lucene = 9.7.0
+lucene = 9.8.0-snapshot-4373c3b
bundled_jdk_vendor = adoptium
bundled_jdk = 20.0.2+9
diff --git a/libs/core/licenses/lucene-core-9.7.0.jar.sha1 b/libs/core/licenses/lucene-core-9.7.0.jar.sha1
deleted file mode 100644
index 2b0f77275c0ab..0000000000000
--- a/libs/core/licenses/lucene-core-9.7.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-ad391210ffd806931334be9670a35af00c56f959
\ No newline at end of file
diff --git a/libs/core/licenses/lucene-core-9.8.0-snapshot-4373c3b.jar.sha1 b/libs/core/licenses/lucene-core-9.8.0-snapshot-4373c3b.jar.sha1
new file mode 100644
index 0000000000000..dc363f2776429
--- /dev/null
+++ b/libs/core/licenses/lucene-core-9.8.0-snapshot-4373c3b.jar.sha1
@@ -0,0 +1 @@
+30c3afcf058532d3d2b8820375043000e7f34a9b
\ No newline at end of file
diff --git a/libs/core/src/main/java/org/opensearch/Version.java b/libs/core/src/main/java/org/opensearch/Version.java
index f85da63bdbb1f..9329f221922ea 100644
--- a/libs/core/src/main/java/org/opensearch/Version.java
+++ b/libs/core/src/main/java/org/opensearch/Version.java
@@ -55,7 +55,7 @@ public class Version implements Comparable, ToXContentFragment {
* The logic for ID is: XXYYZZAA, where XX is major version, YY is minor version, ZZ is revision, and AA is alpha/beta/rc indicator AA
* values below 25 are for alpha builder (since 5.0), and above 25 and below 50 are beta builds, and below 99 are RC builds, with 99
* indicating a release the (internal) format of the id is there so we can easily do after/before checks on the id
- *
+ *>>
* IMPORTANT: Unreleased vs. Released Versions
*
* All listed versions MUST be released versions, except the last major, the last minor and the last revison. ONLY those are required
@@ -91,7 +91,7 @@ public class Version implements Comparable, ToXContentFragment {
public static final Version V_2_8_1 = new Version(2080199, org.apache.lucene.util.Version.LUCENE_9_6_0);
public static final Version V_2_9_0 = new Version(2090099, org.apache.lucene.util.Version.LUCENE_9_7_0);
public static final Version V_2_10_0 = new Version(2100099, org.apache.lucene.util.Version.LUCENE_9_7_0);
- public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_7_0);
+ public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_8_0);
public static final Version CURRENT = V_3_0_0;
public static Version fromId(int id) {
diff --git a/modules/lang-expression/licenses/lucene-expressions-9.7.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.7.0.jar.sha1
deleted file mode 100644
index ecf696b4b3b83..0000000000000
--- a/modules/lang-expression/licenses/lucene-expressions-9.7.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-297e1cfade4ef71466cc9d4f361d81807c8dc4c8
\ No newline at end of file
diff --git a/modules/lang-expression/licenses/lucene-expressions-9.8.0-snapshot-4373c3b.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.8.0-snapshot-4373c3b.jar.sha1
new file mode 100644
index 0000000000000..6eaa40708e4ae
--- /dev/null
+++ b/modules/lang-expression/licenses/lucene-expressions-9.8.0-snapshot-4373c3b.jar.sha1
@@ -0,0 +1 @@
+9f8a34fc3d450343ab05ccb5af318a836a6a5fb3
\ No newline at end of file
diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.7.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.7.0.jar.sha1
deleted file mode 100644
index 0ed030926ab93..0000000000000
--- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.7.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-94293b169fb8572f440a5a4a523320ecf9778ffe
\ No newline at end of file
diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0-snapshot-4373c3b.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0-snapshot-4373c3b.jar.sha1
new file mode 100644
index 0000000000000..8a3332c950b6d
--- /dev/null
+++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.8.0-snapshot-4373c3b.jar.sha1
@@ -0,0 +1 @@
+fde64e3b23bc9a0849b9897febfe9f13c5113143
\ No newline at end of file
diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.7.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.7.0.jar.sha1
deleted file mode 100644
index ddd67276606a5..0000000000000
--- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.7.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-2df800a38b64867b8dcd61fc2cd986114e4a80cb
\ No newline at end of file
diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0-snapshot-4373c3b.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0-snapshot-4373c3b.jar.sha1
new file mode 100644
index 0000000000000..33c2afacf2395
--- /dev/null
+++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.8.0-snapshot-4373c3b.jar.sha1
@@ -0,0 +1 @@
+b01a791705fa01fce48dd02ea79fa8045de8dd5e
\ No newline at end of file
diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.7.0.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.7.0.jar.sha1
deleted file mode 100644
index 0cd68af98e724..0000000000000
--- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.7.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-a01e8153f34d72e8c8c0180c1dea5b10f677dd3a
\ No newline at end of file
diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0-snapshot-4373c3b.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0-snapshot-4373c3b.jar.sha1
new file mode 100644
index 0000000000000..1e7986dafa11e
--- /dev/null
+++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.8.0-snapshot-4373c3b.jar.sha1
@@ -0,0 +1 @@
+43d19320b1b9cd18638b1602fa87d5f21ee043bc
\ No newline at end of file
diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.7.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.7.0.jar.sha1
deleted file mode 100644
index c7b4d2dc6da75..0000000000000
--- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.7.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-b7d47d54683b0b1e09b271c32d1b7d3eb1990f49
\ No newline at end of file
diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0-snapshot-4373c3b.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0-snapshot-4373c3b.jar.sha1
new file mode 100644
index 0000000000000..14880d9c2d243
--- /dev/null
+++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.8.0-snapshot-4373c3b.jar.sha1
@@ -0,0 +1 @@
+9244dc232f175010b480d4d88e13945c17a0b28b
\ No newline at end of file
diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.7.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.7.0.jar.sha1
deleted file mode 100644
index 8df7245044171..0000000000000
--- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.7.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-5e68b9816e6cff8ee15f5b350cf2ffa54f9828b7
\ No newline at end of file
diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0-snapshot-4373c3b.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0-snapshot-4373c3b.jar.sha1
new file mode 100644
index 0000000000000..edc4de3fffe28
--- /dev/null
+++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.8.0-snapshot-4373c3b.jar.sha1
@@ -0,0 +1 @@
+3101a4f79820c1ca3dfb8f49b74c5fb5b32940e1
\ No newline at end of file
diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.7.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.7.0.jar.sha1
deleted file mode 100644
index 974e4202f5ffb..0000000000000
--- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.7.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-d23b1f05b471e05d0d6068b3ece7c8c65672eae7
\ No newline at end of file
diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0-snapshot-4373c3b.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0-snapshot-4373c3b.jar.sha1
new file mode 100644
index 0000000000000..54c310277b09b
--- /dev/null
+++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.8.0-snapshot-4373c3b.jar.sha1
@@ -0,0 +1 @@
+f12b2a22cd5ebcd84f40a40e78fdd4e268b3b26d
\ No newline at end of file
diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.7.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.7.0.jar.sha1
deleted file mode 100644
index dce408a7d40ef..0000000000000
--- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.7.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-dfb4313f3c68d337310522840d7144c1605d084a
\ No newline at end of file
diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0-snapshot-4373c3b.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0-snapshot-4373c3b.jar.sha1
new file mode 100644
index 0000000000000..358db9ea3f0f5
--- /dev/null
+++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.8.0-snapshot-4373c3b.jar.sha1
@@ -0,0 +1 @@
+7dbf5cc3dff93cc1ffe45d79b129859590d001dd
\ No newline at end of file
diff --git a/server/licenses/lucene-analysis-common-9.7.0.jar.sha1 b/server/licenses/lucene-analysis-common-9.7.0.jar.sha1
deleted file mode 100644
index 45d8f459573b1..0000000000000
--- a/server/licenses/lucene-analysis-common-9.7.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-27ba6caaa4587a982cd451f7217b5a982bcfc44a
\ No newline at end of file
diff --git a/server/licenses/lucene-analysis-common-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-analysis-common-9.8.0-snapshot-4373c3b.jar.sha1
new file mode 100644
index 0000000000000..e7c7dc2bbc046
--- /dev/null
+++ b/server/licenses/lucene-analysis-common-9.8.0-snapshot-4373c3b.jar.sha1
@@ -0,0 +1 @@
+1446b7641743a1082b566179d1bf2960f5a0724b
\ No newline at end of file
diff --git a/server/licenses/lucene-backward-codecs-9.7.0.jar.sha1 b/server/licenses/lucene-backward-codecs-9.7.0.jar.sha1
deleted file mode 100644
index 3981ea4fa226e..0000000000000
--- a/server/licenses/lucene-backward-codecs-9.7.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-6389463bfbfcf902c8d31d12e9513a6818ac9d5e
\ No newline at end of file
diff --git a/server/licenses/lucene-backward-codecs-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-backward-codecs-9.8.0-snapshot-4373c3b.jar.sha1
new file mode 100644
index 0000000000000..d0f64519cd6ff
--- /dev/null
+++ b/server/licenses/lucene-backward-codecs-9.8.0-snapshot-4373c3b.jar.sha1
@@ -0,0 +1 @@
+127032ea137d2501b24f0e35e5f9a2e1c7864633
\ No newline at end of file
diff --git a/server/licenses/lucene-core-9.7.0.jar.sha1 b/server/licenses/lucene-core-9.7.0.jar.sha1
deleted file mode 100644
index 2b0f77275c0ab..0000000000000
--- a/server/licenses/lucene-core-9.7.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-ad391210ffd806931334be9670a35af00c56f959
\ No newline at end of file
diff --git a/server/licenses/lucene-core-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-core-9.8.0-snapshot-4373c3b.jar.sha1
new file mode 100644
index 0000000000000..dc363f2776429
--- /dev/null
+++ b/server/licenses/lucene-core-9.8.0-snapshot-4373c3b.jar.sha1
@@ -0,0 +1 @@
+30c3afcf058532d3d2b8820375043000e7f34a9b
\ No newline at end of file
diff --git a/server/licenses/lucene-grouping-9.7.0.jar.sha1 b/server/licenses/lucene-grouping-9.7.0.jar.sha1
deleted file mode 100644
index 90acbf6dcee8d..0000000000000
--- a/server/licenses/lucene-grouping-9.7.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-8e6f0c229f4861be641047c33b05067176e4279c
\ No newline at end of file
diff --git a/server/licenses/lucene-grouping-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-grouping-9.8.0-snapshot-4373c3b.jar.sha1
new file mode 100644
index 0000000000000..294beba43f62a
--- /dev/null
+++ b/server/licenses/lucene-grouping-9.8.0-snapshot-4373c3b.jar.sha1
@@ -0,0 +1 @@
+e6f742efe0ef3b383468fe38f88ab2dd69ed3d2c
\ No newline at end of file
diff --git a/server/licenses/lucene-highlighter-9.7.0.jar.sha1 b/server/licenses/lucene-highlighter-9.7.0.jar.sha1
deleted file mode 100644
index bfcca0bc6cb5b..0000000000000
--- a/server/licenses/lucene-highlighter-9.7.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-facb7c7ee0f75ed457a2d98f10d6430e25a53691
\ No newline at end of file
diff --git a/server/licenses/lucene-highlighter-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-highlighter-9.8.0-snapshot-4373c3b.jar.sha1
new file mode 100644
index 0000000000000..c2a2ef5b13946
--- /dev/null
+++ b/server/licenses/lucene-highlighter-9.8.0-snapshot-4373c3b.jar.sha1
@@ -0,0 +1 @@
+3162856444777130dee2c4cabe1bf6d18710ff63
\ No newline at end of file
diff --git a/server/licenses/lucene-join-9.7.0.jar.sha1 b/server/licenses/lucene-join-9.7.0.jar.sha1
deleted file mode 100644
index 0dab3a7ddc41a..0000000000000
--- a/server/licenses/lucene-join-9.7.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-d041bdc0947a14223cf68357407ee18b21027587
\ No newline at end of file
diff --git a/server/licenses/lucene-join-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-join-9.8.0-snapshot-4373c3b.jar.sha1
new file mode 100644
index 0000000000000..7c6adaaba9cf1
--- /dev/null
+++ b/server/licenses/lucene-join-9.8.0-snapshot-4373c3b.jar.sha1
@@ -0,0 +1 @@
+5fe8383516eca7300f978ce38042e327b0a57877
\ No newline at end of file
diff --git a/server/licenses/lucene-memory-9.7.0.jar.sha1 b/server/licenses/lucene-memory-9.7.0.jar.sha1
deleted file mode 100644
index 357a9c4b2ea26..0000000000000
--- a/server/licenses/lucene-memory-9.7.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-0fade51ee353e15ddbbc45262aafe6f99ed020f1
\ No newline at end of file
diff --git a/server/licenses/lucene-memory-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-memory-9.8.0-snapshot-4373c3b.jar.sha1
new file mode 100644
index 0000000000000..586702c968a77
--- /dev/null
+++ b/server/licenses/lucene-memory-9.8.0-snapshot-4373c3b.jar.sha1
@@ -0,0 +1 @@
+b3e77970485be6d2dd59b999bbaa65a2cb993744
\ No newline at end of file
diff --git a/server/licenses/lucene-misc-9.7.0.jar.sha1 b/server/licenses/lucene-misc-9.7.0.jar.sha1
deleted file mode 100644
index da5e1921626b2..0000000000000
--- a/server/licenses/lucene-misc-9.7.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-7fcf451e2376526c3a027958812866cc5b0ff13f
\ No newline at end of file
diff --git a/server/licenses/lucene-misc-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-misc-9.8.0-snapshot-4373c3b.jar.sha1
new file mode 100644
index 0000000000000..493598eefff5e
--- /dev/null
+++ b/server/licenses/lucene-misc-9.8.0-snapshot-4373c3b.jar.sha1
@@ -0,0 +1 @@
+86d667ea2f7fb2142d2acacf801dcea47d014a5e
\ No newline at end of file
diff --git a/server/licenses/lucene-queries-9.7.0.jar.sha1 b/server/licenses/lucene-queries-9.7.0.jar.sha1
deleted file mode 100644
index fa82e95a7e19f..0000000000000
--- a/server/licenses/lucene-queries-9.7.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-126989d4622419aa06fcbf3a342e859cab8c8799
\ No newline at end of file
diff --git a/server/licenses/lucene-queries-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-queries-9.8.0-snapshot-4373c3b.jar.sha1
new file mode 100644
index 0000000000000..1bf937f10d795
--- /dev/null
+++ b/server/licenses/lucene-queries-9.8.0-snapshot-4373c3b.jar.sha1
@@ -0,0 +1 @@
+930d004de698f374da8ac5530fd80e241edeba45
\ No newline at end of file
diff --git a/server/licenses/lucene-queryparser-9.7.0.jar.sha1 b/server/licenses/lucene-queryparser-9.7.0.jar.sha1
deleted file mode 100644
index 438db0aea66e1..0000000000000
--- a/server/licenses/lucene-queryparser-9.7.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-6e77bde908ff698354e4a2149e6dd4658b56d7b0
\ No newline at end of file
diff --git a/server/licenses/lucene-queryparser-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-queryparser-9.8.0-snapshot-4373c3b.jar.sha1
new file mode 100644
index 0000000000000..74458bc93f90b
--- /dev/null
+++ b/server/licenses/lucene-queryparser-9.8.0-snapshot-4373c3b.jar.sha1
@@ -0,0 +1 @@
+f62882823d5aa9ed4cf0081a8c18f35e21992080
\ No newline at end of file
diff --git a/server/licenses/lucene-sandbox-9.7.0.jar.sha1 b/server/licenses/lucene-sandbox-9.7.0.jar.sha1
deleted file mode 100644
index 38b0b1cccbc29..0000000000000
--- a/server/licenses/lucene-sandbox-9.7.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-9f3e8e1947f2f1c5784132444af51a060ff0b4bf
\ No newline at end of file
diff --git a/server/licenses/lucene-sandbox-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-sandbox-9.8.0-snapshot-4373c3b.jar.sha1
new file mode 100644
index 0000000000000..3231d0e067940
--- /dev/null
+++ b/server/licenses/lucene-sandbox-9.8.0-snapshot-4373c3b.jar.sha1
@@ -0,0 +1 @@
+f1ec1527e283b423b7ff5e12cd8d889e7247199d
\ No newline at end of file
diff --git a/server/licenses/lucene-spatial-extras-9.7.0.jar.sha1 b/server/licenses/lucene-spatial-extras-9.7.0.jar.sha1
deleted file mode 100644
index 48679df469fd1..0000000000000
--- a/server/licenses/lucene-spatial-extras-9.7.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-01b0bc7a407d8c35a70a1adf7966bb3e7caae928
\ No newline at end of file
diff --git a/server/licenses/lucene-spatial-extras-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-spatial-extras-9.8.0-snapshot-4373c3b.jar.sha1
new file mode 100644
index 0000000000000..dd47faf91f206
--- /dev/null
+++ b/server/licenses/lucene-spatial-extras-9.8.0-snapshot-4373c3b.jar.sha1
@@ -0,0 +1 @@
+de787c052879893e47d21fa161c93413665d55d7
\ No newline at end of file
diff --git a/server/licenses/lucene-spatial3d-9.7.0.jar.sha1 b/server/licenses/lucene-spatial3d-9.7.0.jar.sha1
deleted file mode 100644
index 55d4d217fa6b9..0000000000000
--- a/server/licenses/lucene-spatial3d-9.7.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-7c6b1b6e0a70c9cd177371e648648c2f896742a2
\ No newline at end of file
diff --git a/server/licenses/lucene-spatial3d-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-spatial3d-9.8.0-snapshot-4373c3b.jar.sha1
new file mode 100644
index 0000000000000..2b378438bfb14
--- /dev/null
+++ b/server/licenses/lucene-spatial3d-9.8.0-snapshot-4373c3b.jar.sha1
@@ -0,0 +1 @@
+7e541ed960a571f5d9a0ecff5c26fd5ca857581e
\ No newline at end of file
diff --git a/server/licenses/lucene-suggest-9.7.0.jar.sha1 b/server/licenses/lucene-suggest-9.7.0.jar.sha1
deleted file mode 100644
index d4d7e6cd6bed9..0000000000000
--- a/server/licenses/lucene-suggest-9.7.0.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-5c37fd9a5d71dc87fe1cd4c18ff295ec8cfac170
\ No newline at end of file
diff --git a/server/licenses/lucene-suggest-9.8.0-snapshot-4373c3b.jar.sha1 b/server/licenses/lucene-suggest-9.8.0-snapshot-4373c3b.jar.sha1
new file mode 100644
index 0000000000000..1e3ed6561e3ef
--- /dev/null
+++ b/server/licenses/lucene-suggest-9.8.0-snapshot-4373c3b.jar.sha1
@@ -0,0 +1 @@
+4b222ef09a5f20896d031a8322f2e69304c16384
\ No newline at end of file
diff --git a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/ExplainableScriptIT.java b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/ExplainableScriptIT.java
index f67b913a75871..3651a7354e5de 100644
--- a/server/src/internalClusterTest/java/org/opensearch/search/functionscore/ExplainableScriptIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/search/functionscore/ExplainableScriptIT.java
@@ -179,8 +179,18 @@ public void testExplainScript() throws InterruptedException, IOException, Execut
for (SearchHit hit : hits.getHits()) {
assertThat(hit.getId(), equalTo(Integer.toString(idCounter)));
assertThat(hit.getExplanation().toString(), containsString(Double.toString(idCounter)));
- assertThat(hit.getExplanation().toString(), containsString("1 = n"));
- assertThat(hit.getExplanation().toString(), containsString("1 = N"));
+
+ // Since Apache Lucene 9.8, the scores are not computed because script (see please ExplainableScriptPlugin)
+ // says "needs_score() == false"
+ // 19.0 = min of:
+ // 19.0 = This script returned 19.0
+ // 0.0 = _score:
+ // 0.0 = weight(text:text in 0) [PerFieldSimilarity], result of:
+ // 0.0 = score(freq=1.0), with freq of:
+ // 1.0 = freq, occurrences of term within document
+ // 3.4028235E38 = maxBoost
+
+ assertThat(hit.getExplanation().toString(), containsString("1.0 = freq, occurrences of term within document"));
assertThat(hit.getExplanation().getDetails().length, equalTo(2));
idCounter--;
}
diff --git a/server/src/main/java/org/opensearch/search/suggest/completion/CompletionSuggester.java b/server/src/main/java/org/opensearch/search/suggest/completion/CompletionSuggester.java
index c7498635d2553..175503cb94e3d 100644
--- a/server/src/main/java/org/opensearch/search/suggest/completion/CompletionSuggester.java
+++ b/server/src/main/java/org/opensearch/search/suggest/completion/CompletionSuggester.java
@@ -116,6 +116,7 @@ private static void suggest(IndexSearcher searcher, CompletionQuery query, TopSu
}
}
}
+ collector.finish();
}
@Override
From b9b5e5c676150f1148c2eb9306e3ce1c8a57c87c Mon Sep 17 00:00:00 2001
From: Jay Deng
Date: Tue, 25 Jul 2023 11:02:32 -0700
Subject: [PATCH 12/71] Create separate SourceLookup instance per segment slice
in SignificantTextAggregatorFactory (#8807)
* Remove flakey assertion in SearchTimeoutIT
Signed-off-by: Jay Deng
* Create separate SourceLookup instance per segment slice in SignificantTextAggregatorFactory
Signed-off-by: Jay Deng
---------
Signed-off-by: Jay Deng
---
CHANGELOG.md | 1 +
.../java/org/opensearch/search/SearchTimeoutIT.java | 1 -
.../bucket/terms/SignificantTextAggregatorFactory.java | 6 +++---
3 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 68893592ea8d4..a4c1d8a693df0 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -94,6 +94,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
- Change InternalSignificantTerms to sum shard-level superset counts only in final reduce ([#8735](https://github.com/opensearch-project/OpenSearch/pull/8735))
- Exclude 'benchmarks' from codecov report ([#8805](https://github.com/opensearch-project/OpenSearch/pull/8805))
- [Refactor] MediaTypeParser to MediaTypeParserRegistry ([#8636](https://github.com/opensearch-project/OpenSearch/pull/8636))
+- Create separate SourceLookup instance per segment slice in SignificantTextAggregatorFactory ([#8807](https://github.com/opensearch-project/OpenSearch/pull/8807))
### Deprecated
diff --git a/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java b/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java
index c7392b260319a..aa8ef3f29c989 100644
--- a/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/search/SearchTimeoutIT.java
@@ -79,7 +79,6 @@ public void testSimpleTimeout() throws Exception {
.get();
assertTrue(searchResponse.isTimedOut());
assertEquals(0, searchResponse.getFailedShards());
- assertTrue(numDocs > searchResponse.getHits().getTotalHits().value);
}
public void testSimpleDoesNotTimeout() throws Exception {
diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregatorFactory.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregatorFactory.java
index e5cc3f9dbaabd..7f5804c8b9561 100644
--- a/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregatorFactory.java
+++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/terms/SignificantTextAggregatorFactory.java
@@ -148,7 +148,6 @@ protected Aggregator createInternal(
: includeExclude.convertToStringFilter(DocValueFormat.RAW, maxRegexLength);
MapStringTermsAggregator.CollectorSource collectorSource = new SignificantTextCollectorSource(
- queryShardContext.lookup().source(),
queryShardContext.bigArrays(),
fieldType,
sourceFieldNames,
@@ -186,13 +185,14 @@ private static class SignificantTextCollectorSource implements MapStringTermsAgg
private ObjectArray dupSequenceSpotters;
SignificantTextCollectorSource(
- SourceLookup sourceLookup,
BigArrays bigArrays,
MappedFieldType fieldType,
String[] sourceFieldNames,
boolean filterDuplicateText
) {
- this.sourceLookup = sourceLookup;
+ // Create a new SourceLookup instance per aggregator instead of use the shared one from SearchLookup. This is fine because it
+ // will only be accessed by this Aggregator instance and not anywhere else.
+ this.sourceLookup = new SourceLookup();
this.bigArrays = bigArrays;
this.fieldType = fieldType;
this.sourceFieldNames = sourceFieldNames;
From 6786608a87d3e7fa306382a38e5702ae830f60e4 Mon Sep 17 00:00:00 2001
From: Tianli Feng
Date: Tue, 25 Jul 2023 11:32:32 -0700
Subject: [PATCH 13/71] Bump version of Hadoop dependencies to 3.3.5 (#6995)
Signed-off-by: Tianli Feng
Signed-off-by: Andriy Redko
---
CHANGELOG.md | 1 +
distribution/src/config/jvm.options | 3 ++
.../SecuredForkJoinWorkerThreadFactory.java | 46 +++++++++++++++++++
plugins/repository-hdfs/build.gradle | 6 ++-
.../licenses/hadoop-client-api-3.3.4.jar.sha1 | 1 -
.../licenses/hadoop-client-api-3.3.6.jar.sha1 | 1 +
.../hadoop-client-runtime-3.3.4.jar.sha1 | 1 -
.../hadoop-client-runtime-3.3.6.jar.sha1 | 1 +
.../licenses/hadoop-hdfs-3.3.4.jar.sha1 | 1 -
.../licenses/hadoop-hdfs-3.3.6.jar.sha1 | 1 +
.../hdfs/HdfsClientThreadLeakFilter.java | 5 +-
11 files changed, 62 insertions(+), 5 deletions(-)
create mode 100644 libs/secure-sm/src/main/java/org/opensearch/secure_sm/SecuredForkJoinWorkerThreadFactory.java
delete mode 100644 plugins/repository-hdfs/licenses/hadoop-client-api-3.3.4.jar.sha1
create mode 100644 plugins/repository-hdfs/licenses/hadoop-client-api-3.3.6.jar.sha1
delete mode 100644 plugins/repository-hdfs/licenses/hadoop-client-runtime-3.3.4.jar.sha1
create mode 100644 plugins/repository-hdfs/licenses/hadoop-client-runtime-3.3.6.jar.sha1
delete mode 100644 plugins/repository-hdfs/licenses/hadoop-hdfs-3.3.4.jar.sha1
create mode 100644 plugins/repository-hdfs/licenses/hadoop-hdfs-3.3.6.jar.sha1
diff --git a/CHANGELOG.md b/CHANGELOG.md
index a4c1d8a693df0..e661846a8fc72 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -87,6 +87,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
- Bump `com.netflix.nebula.ospackage-base` from 11.3.0 to 11.4.0 ([#8838](https://github.com/opensearch-project/OpenSearch/pull/8838))
- Bump `com.google.http-client:google-http-client-gson` from 1.43.2 to 1.43.3 ([#8840](https://github.com/opensearch-project/OpenSearch/pull/8840))
- OpenJDK Update (July 2023 Patch releases) ([#8868](https://github.com/opensearch-project/OpenSearch/pull/8868)
+- Bump `hadoop` libraries from 3.3.4 to 3.3.6 ([#6995](https://github.com/opensearch-project/OpenSearch/pull/6995))
### Changed
- Perform aggregation postCollection in ContextIndexSearcher after searching leaves ([#8303](https://github.com/opensearch-project/OpenSearch/pull/8303))
diff --git a/distribution/src/config/jvm.options b/distribution/src/config/jvm.options
index e15afc0f677c3..952110c6c0289 100644
--- a/distribution/src/config/jvm.options
+++ b/distribution/src/config/jvm.options
@@ -82,3 +82,6 @@ ${error.file}
# JDK 20+ Incubating Vector Module for SIMD optimizations;
# disabling may reduce performance on vector optimized lucene
20:--add-modules=jdk.incubator.vector
+
+# HDFS ForkJoinPool.common() support by SecurityManager
+-Djava.util.concurrent.ForkJoinPool.common.threadFactory=org.opensearch.secure_sm.SecuredForkJoinWorkerThreadFactory
diff --git a/libs/secure-sm/src/main/java/org/opensearch/secure_sm/SecuredForkJoinWorkerThreadFactory.java b/libs/secure-sm/src/main/java/org/opensearch/secure_sm/SecuredForkJoinWorkerThreadFactory.java
new file mode 100644
index 0000000000000..fe239fea8129e
--- /dev/null
+++ b/libs/secure-sm/src/main/java/org/opensearch/secure_sm/SecuredForkJoinWorkerThreadFactory.java
@@ -0,0 +1,46 @@
+/*
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * The OpenSearch Contributors require contributions made to
+ * this file be licensed under the Apache-2.0 license or a
+ * compatible open source license.
+ */
+
+package org.opensearch.secure_sm;
+
+import java.security.AccessControlContext;
+import java.security.AccessController;
+import java.security.Permission;
+import java.security.Permissions;
+import java.security.PrivilegedAction;
+import java.security.ProtectionDomain;
+import java.util.concurrent.ForkJoinPool;
+import java.util.concurrent.ForkJoinPool.ForkJoinWorkerThreadFactory;
+import java.util.concurrent.ForkJoinWorkerThread;
+
+public class SecuredForkJoinWorkerThreadFactory implements ForkJoinWorkerThreadFactory {
+ static AccessControlContext contextWithPermissions(Permission... perms) {
+ Permissions permissions = new Permissions();
+ for (Permission perm : perms)
+ permissions.add(perm);
+ return new AccessControlContext(new ProtectionDomain[] { new ProtectionDomain(null, permissions) });
+ }
+
+ // ACC for access to the factory
+ private static final AccessControlContext ACC = contextWithPermissions(
+ new RuntimePermission("getClassLoader"),
+ new RuntimePermission("setContextClassLoader"),
+ new RuntimePermission("modifyThreadGroup"),
+ new RuntimePermission("modifyThread")
+ );
+
+ public final ForkJoinWorkerThread newThread(ForkJoinPool pool) {
+ return AccessController.doPrivileged(new PrivilegedAction<>() {
+ public ForkJoinWorkerThread run() {
+ return new ForkJoinWorkerThread(pool) {
+
+ };
+ }
+ }, ACC);
+ }
+}
diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle
index 0b13e18ba3f4d..6626bfccc6662 100644
--- a/plugins/repository-hdfs/build.gradle
+++ b/plugins/repository-hdfs/build.gradle
@@ -48,7 +48,7 @@ opensearchplugin {
}
versions << [
- 'hadoop3': '3.3.4'
+ 'hadoop3': '3.3.6'
]
testFixtures.useFixture ":test:fixtures:krb5kdc-fixture", "hdfs"
@@ -440,3 +440,7 @@ thirdPartyAudit {
'org.apache.avro.reflect.FieldAccessUnsafe$UnsafeShortField',
)
}
+
+tasks.withType(JavaForkOptions) {
+ systemProperty "java.util.concurrent.ForkJoinPool.common.threadFactory", "org.opensearch.secure_sm.SecuredForkJoinWorkerThreadFactory"
+}
diff --git a/plugins/repository-hdfs/licenses/hadoop-client-api-3.3.4.jar.sha1 b/plugins/repository-hdfs/licenses/hadoop-client-api-3.3.4.jar.sha1
deleted file mode 100644
index dd79b8a10cebc..0000000000000
--- a/plugins/repository-hdfs/licenses/hadoop-client-api-3.3.4.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-6339a8f7279310c8b1f7ef314b592d8c71ca72ef
\ No newline at end of file
diff --git a/plugins/repository-hdfs/licenses/hadoop-client-api-3.3.6.jar.sha1 b/plugins/repository-hdfs/licenses/hadoop-client-api-3.3.6.jar.sha1
new file mode 100644
index 0000000000000..d99793bc56522
--- /dev/null
+++ b/plugins/repository-hdfs/licenses/hadoop-client-api-3.3.6.jar.sha1
@@ -0,0 +1 @@
+12ac6f103a0ff29fce17a078c7c64d25320b6165
\ No newline at end of file
diff --git a/plugins/repository-hdfs/licenses/hadoop-client-runtime-3.3.4.jar.sha1 b/plugins/repository-hdfs/licenses/hadoop-client-runtime-3.3.4.jar.sha1
deleted file mode 100644
index 32d58d1dc501a..0000000000000
--- a/plugins/repository-hdfs/licenses/hadoop-client-runtime-3.3.4.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-21f7a9a2da446f1e5b3e5af16ebf956d3ee43ee0
\ No newline at end of file
diff --git a/plugins/repository-hdfs/licenses/hadoop-client-runtime-3.3.6.jar.sha1 b/plugins/repository-hdfs/licenses/hadoop-client-runtime-3.3.6.jar.sha1
new file mode 100644
index 0000000000000..ea22d763b7bfa
--- /dev/null
+++ b/plugins/repository-hdfs/licenses/hadoop-client-runtime-3.3.6.jar.sha1
@@ -0,0 +1 @@
+81065531e63fccbe85fb04a3274709593fb00d3c
\ No newline at end of file
diff --git a/plugins/repository-hdfs/licenses/hadoop-hdfs-3.3.4.jar.sha1 b/plugins/repository-hdfs/licenses/hadoop-hdfs-3.3.4.jar.sha1
deleted file mode 100644
index 532d25a44531f..0000000000000
--- a/plugins/repository-hdfs/licenses/hadoop-hdfs-3.3.4.jar.sha1
+++ /dev/null
@@ -1 +0,0 @@
-036ef2f86dc44410d2bb5d54ce40435d2484d9a5
\ No newline at end of file
diff --git a/plugins/repository-hdfs/licenses/hadoop-hdfs-3.3.6.jar.sha1 b/plugins/repository-hdfs/licenses/hadoop-hdfs-3.3.6.jar.sha1
new file mode 100644
index 0000000000000..fe60968056eb7
--- /dev/null
+++ b/plugins/repository-hdfs/licenses/hadoop-hdfs-3.3.6.jar.sha1
@@ -0,0 +1 @@
+ba40aca60f39599d5b1f1d32b35295bfde1f3c8b
\ No newline at end of file
diff --git a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsClientThreadLeakFilter.java b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsClientThreadLeakFilter.java
index b9b0e9e87dd0c..2758bd020e979 100644
--- a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsClientThreadLeakFilter.java
+++ b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsClientThreadLeakFilter.java
@@ -43,6 +43,9 @@
* to ignore the offending thread until a version of Hadoop is released that addresses the incorrect
* interrupt handling.
*
+ * In Hadoop 3.3.6, the org.apache.hadoop.fs.statistics.impl.EvaluatingStatisticsMap uses ForkJoinPool
+ * to perform statistics calculation, leaving dangling workers.
+ *
* @see https://issues.apache.org/jira/browse/HADOOP-12829
* @see "org.apache.hadoop.fs.FileSystem.Statistics.StatisticsDataReferenceCleaner"
* @see "org.apache.hadoop.fs.FileSystem.Statistics"
@@ -53,6 +56,6 @@ public final class HdfsClientThreadLeakFilter implements ThreadFilter {
@Override
public boolean reject(Thread t) {
- return t.getName().equals(OFFENDING_THREAD_NAME);
+ return t.getName().equals(OFFENDING_THREAD_NAME) || t.getName().startsWith("ForkJoinPool.commonPool-");
}
}
From 96ce9de3a2ae45a1909cacea1aba93841c68a5c0 Mon Sep 17 00:00:00 2001
From: ccook-ibm <101672260+ccook-ibm@users.noreply.github.com>
Date: Tue, 25 Jul 2023 15:22:36 -0700
Subject: [PATCH 14/71] Extend publishXToMavenLocal explicit dependencies to
all repo tasks (#8823)
* Extend publishXToMavenLocal explicit dependencies to all repo tasks
Signed-off-by: Christopher Cook
* Simplify expression
Signed-off-by: Christopher Cook
Signed-off-by: ccook-ibm
---------
Signed-off-by: Christopher Cook
Signed-off-by: ccook-ibm
---
modules/lang-painless/build.gradle | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/modules/lang-painless/build.gradle b/modules/lang-painless/build.gradle
index 3b3f2a7f9ca38..d7af8621c478a 100644
--- a/modules/lang-painless/build.gradle
+++ b/modules/lang-painless/build.gradle
@@ -84,8 +84,7 @@ shadowJar {
tasks.validateNebulaPom.dependsOn tasks.generatePomFileForShadowPublication
tasks.validateShadowPom.dependsOn tasks.generatePomFileForNebulaPublication
-tasks.publishNebulaPublicationToMavenLocal.dependsOn tasks.generatePomFileForShadowPublication
-tasks.publishShadowPublicationToMavenLocal.dependsOn tasks.generatePomFileForNebulaPublication
+tasks.withType(AbstractPublishToMaven)*.dependsOn "generatePomFileForShadowPublication", "generatePomFileForNebulaPublication"
tasks.named("dependencyLicenses").configure {
mapping from: /asm-.*/, to: 'asm'
From 1b5e64a4d91cf47d10584440f16aac7fa92f7318 Mon Sep 17 00:00:00 2001
From: Suraj Singh
Date: Tue, 25 Jul 2023 21:50:53 -0700
Subject: [PATCH 15/71] Skip testPitCreatedOnReplica IT with remote store
(#8877)
* Skip testPitCreatedOnReplica IT with remote store
Signed-off-by: Suraj Singh
* Address review comment
Signed-off-by: Suraj Singh
---------
Signed-off-by: Suraj Singh
---
.../opensearch/indices/replication/SegmentReplicationIT.java | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java
index 2a7e8e58b2d03..3ab1a2a8564c5 100644
--- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java
@@ -1177,6 +1177,10 @@ public void testScrollWithOngoingSegmentReplication() throws Exception {
}
public void testPitCreatedOnReplica() throws Exception {
+ assumeFalse(
+ "Skipping the test as it is flaky with remote store. Tracking issue https://github.com/opensearch-project/OpenSearch/issues/8850",
+ segmentReplicationWithRemoteEnabled()
+ );
final String primary = internalCluster().startDataOnlyNode();
createIndex(INDEX_NAME);
ensureYellowAndNoInitializingShards(INDEX_NAME);
From e1a41255de203bc57c1e29469a9bb5d50fe0d84b Mon Sep 17 00:00:00 2001
From: Marc Handalian
Date: Wed, 26 Jul 2023 01:19:10 -0700
Subject: [PATCH 16/71] Fix testReplicaHasDiffFilesThanPrimary. (#8863)
This test is failing in two ways.
First it fails when copying segments from the remote store and there is a cksum mismatch. In this case
it is not guaranteed the directory implementation will replace the existing file when copying from the store. This change ensures the mismatched file is cleaned up but only if the shard is not serving reads. In that case we fail the shard so it is re-recovered rather than deleting the segment underneath it.
This test also fails with a divide by 0 in RemoteStoreRefreshListener.
Signed-off-by: Marc Handalian
---
.../main/java/org/opensearch/index/shard/IndexShard.java | 8 ++++++++
.../index/shard/RemoteStoreRefreshListener.java | 5 ++---
2 files changed, 10 insertions(+), 3 deletions(-)
diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java
index 0d8a54147d65e..8b6d083379fe1 100644
--- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java
+++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java
@@ -4808,6 +4808,14 @@ private boolean localDirectoryContains(Directory localDirectory, String file, lo
return true;
} else {
logger.warn("Checksum mismatch between local and remote segment file: {}, will override local file", file);
+ // If there is a checksum mismatch and we are not serving reads it is safe to go ahead and delete the file now.
+ // Outside of engine resets this method will be invoked during recovery so this is safe.
+ if (isReadAllowed() == false) {
+ localDirectory.deleteFile(file);
+ } else {
+ // segment conflict with remote store while the shard is serving reads.
+ failShard("Local copy of segment " + file + " has a different checksum than the version in remote store", null);
+ }
}
} catch (NoSuchFileException | FileNotFoundException e) {
logger.debug("File {} does not exist in local FS, downloading from remote store", file);
diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java
index 3ea8278038ac5..2385b906a7ae5 100644
--- a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java
+++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java
@@ -457,11 +457,10 @@ private void updateLocalSizeMapAndTracker(Collection segmentFiles) {
private void updateFinalStatusInSegmentTracker(boolean uploadStatus, long bytesBeforeUpload, long startTimeInNS) {
if (uploadStatus) {
long bytesUploaded = segmentTracker.getUploadBytesSucceeded() - bytesBeforeUpload;
- long timeTakenInMS = (System.nanoTime() - startTimeInNS) / 1_000_000L;
-
+ long timeTakenInMS = TimeValue.nsecToMSec(System.nanoTime() - startTimeInNS);
segmentTracker.incrementTotalUploadsSucceeded();
segmentTracker.addUploadBytes(bytesUploaded);
- segmentTracker.addUploadBytesPerSec((bytesUploaded * 1_000L) / timeTakenInMS);
+ segmentTracker.addUploadBytesPerSec((bytesUploaded * 1_000L) / Math.max(1, timeTakenInMS));
segmentTracker.addUploadTimeMs(timeTakenInMS);
} else {
segmentTracker.incrementTotalUploadsFailed();
From 4fd6877eef7c9cb0b48268a7505506aeb33278a2 Mon Sep 17 00:00:00 2001
From: luyuncheng
Date: Thu, 27 Jul 2023 00:30:33 +0800
Subject: [PATCH 17/71] Replace the deprecated IndexReader APIs with new
storedFields() & termVectors() (#7792)
* 1. Remove calling deprecated document api
Signed-off-by: luyuncheng
* 1. Remove calling deprecated document api
2. Fixed some calling and Tests
Signed-off-by: luyuncheng
* 1. Remove calling deprecated document api
2. Fixed some calling and Tests
3. Spotless java
Signed-off-by: luyuncheng
* add changelog
Signed-off-by: luyuncheng
* add changelog
Signed-off-by: luyuncheng
* merge main into branch
Signed-off-by: luyuncheng
* merge main into branch
update CHANGELOG.md
Signed-off-by: luyuncheng
---------
Signed-off-by: luyuncheng
---
CHANGELOG.md | 1 +
.../common/lucene/search/XMoreLikeThis.java | 8 +++-
.../gateway/PersistedClusterStateService.java | 6 +--
.../index/engine/InternalEngine.java | 4 +-
.../index/engine/LuceneChangesSnapshot.java | 2 +-
.../index/engine/TranslogLeafReader.java | 41 +++++++++++--------
.../opensearch/index/get/ShardGetService.java | 2 +-
.../index/shard/ShardSplittingQuery.java | 2 +-
.../index/termvectors/TermVectorsService.java | 7 +++-
.../opensearch/search/fetch/FetchPhase.java | 2 +-
.../subphase/highlight/HighlightUtils.java | 2 +-
.../search/lookup/LeafFieldsLookup.java | 2 +-
.../search/lookup/SourceLookup.java | 2 +-
.../opensearch/common/lucene/LuceneTests.java | 7 +++-
.../lucene/index/FreqTermsEnumTests.java | 4 +-
.../index/engine/InternalEngineTests.java | 7 +++-
.../RecoverySourcePruneMergePolicyTests.java | 10 +++--
.../query/MoreLikeThisQueryBuilderTests.java | 2 +-
.../index/shard/RefreshListenersTests.java | 6 +--
.../index/shard/ShardSplittingQueryTests.java | 2 +-
.../indices/IndicesRequestCacheTests.java | 2 +-
.../lucene/queries/BlendedTermQueryTests.java | 6 +--
.../search/lookup/LeafFieldsLookupTests.java | 15 +++----
.../slice/DocValuesSliceQueryTests.java | 2 +-
.../search/slice/TermsSliceQueryTests.java | 2 +-
.../index/engine/EngineTestCase.java | 7 +++-
26 files changed, 92 insertions(+), 61 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index e661846a8fc72..a84f5138bd112 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -46,6 +46,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
- Change http code on create index API with bad input raising NotXContentException from 500 to 400 ([#4773](https://github.com/opensearch-project/OpenSearch/pull/4773))
- Improve summary error message for invalid setting updates ([#4792](https://github.com/opensearch-project/OpenSearch/pull/4792))
- Remote Segment Store Repository setting moved from `index.remote_store.repository` to `index.remote_store.segment.repository` and `cluster.remote_store.repository` to `cluster.remote_store.segment.repository` respectively for Index and Cluster level settings ([#8719](https://github.com/opensearch-project/OpenSearch/pull/8719))
+- Replace the deprecated IndexReader APIs with new storedFields() & termVectors() ([#7792](https://github.com/opensearch-project/OpenSearch/pull/7792))
### Deprecated
diff --git a/server/src/main/java/org/opensearch/common/lucene/search/XMoreLikeThis.java b/server/src/main/java/org/opensearch/common/lucene/search/XMoreLikeThis.java
index d7ffa2df943b7..49148890abd55 100644
--- a/server/src/main/java/org/opensearch/common/lucene/search/XMoreLikeThis.java
+++ b/server/src/main/java/org/opensearch/common/lucene/search/XMoreLikeThis.java
@@ -56,7 +56,9 @@
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.PostingsEnum;
+import org.apache.lucene.index.StoredFields;
import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermVectors;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.TermsEnum;
import org.apache.lucene.search.BooleanClause;
@@ -808,8 +810,10 @@ public String describeParams() {
*/
private PriorityQueue retrieveTerms(int docNum) throws IOException {
Map termFreqMap = new HashMap<>();
+ final TermVectors termVectors = ir.termVectors();
+ final StoredFields storedFields = ir.storedFields();
for (String fieldName : fieldNames) {
- final Fields vectors = ir.getTermVectors(docNum);
+ final Fields vectors = termVectors.get(docNum);
final Terms vector;
if (vectors != null) {
vector = vectors.terms(fieldName);
@@ -819,7 +823,7 @@ private PriorityQueue retrieveTerms(int docNum) throws IOException {
// field does not store term vector info
if (vector == null) {
- Document d = ir.document(docNum);
+ Document d = storedFields.document(docNum);
IndexableField fields[] = d.getFields(fieldName);
for (IndexableField field : fields) {
final String stringValue = field.stringValue();
diff --git a/server/src/main/java/org/opensearch/gateway/PersistedClusterStateService.java b/server/src/main/java/org/opensearch/gateway/PersistedClusterStateService.java
index 8940b0ed25ed4..caddf92150cff 100644
--- a/server/src/main/java/org/opensearch/gateway/PersistedClusterStateService.java
+++ b/server/src/main/java/org/opensearch/gateway/PersistedClusterStateService.java
@@ -45,6 +45,7 @@
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.index.SerialMergeScheduler;
+import org.apache.lucene.index.StoredFields;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.IndexSearcher;
@@ -507,12 +508,11 @@ private static void consumeFromType(IndexSearcher indexSearcher, String type, Ch
final Bits liveDocs = leafReaderContext.reader().getLiveDocs();
final IntPredicate isLiveDoc = liveDocs == null ? i -> true : liveDocs::get;
final DocIdSetIterator docIdSetIterator = scorer.iterator();
+ final StoredFields storedFields = leafReaderContext.reader().storedFields();
while (docIdSetIterator.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
if (isLiveDoc.test(docIdSetIterator.docID())) {
logger.trace("processing doc {}", docIdSetIterator.docID());
- bytesRefConsumer.accept(
- leafReaderContext.reader().document(docIdSetIterator.docID()).getBinaryValue(DATA_FIELD_NAME)
- );
+ bytesRefConsumer.accept(storedFields.document(docIdSetIterator.docID()).getBinaryValue(DATA_FIELD_NAME));
}
}
}
diff --git a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java
index 77e2f5cbef7f9..6f8b6d449695e 100644
--- a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java
+++ b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java
@@ -49,6 +49,7 @@
import org.apache.lucene.index.ShuffleForcedMergePolicy;
import org.apache.lucene.index.SoftDeletesRetentionMergePolicy;
import org.apache.lucene.index.StandardDirectoryReader;
+import org.apache.lucene.index.StoredFields;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.BooleanClause;
import org.apache.lucene.search.BooleanQuery;
@@ -2889,6 +2890,7 @@ private void restoreVersionMapAndCheckpointTracker(DirectoryReader directoryRead
final CombinedDocValues dv = new CombinedDocValues(leaf.reader());
final IdOnlyFieldVisitor idFieldVisitor = new IdOnlyFieldVisitor();
final DocIdSetIterator iterator = scorer.iterator();
+ final StoredFields storedFields = leaf.reader().storedFields();
int docId;
while ((docId = iterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
final long primaryTerm = dv.docPrimaryTerm(docId);
@@ -2896,7 +2898,7 @@ private void restoreVersionMapAndCheckpointTracker(DirectoryReader directoryRead
localCheckpointTracker.markSeqNoAsProcessed(seqNo);
localCheckpointTracker.markSeqNoAsPersisted(seqNo);
idFieldVisitor.reset();
- leaf.reader().document(docId, idFieldVisitor);
+ storedFields.document(docId, idFieldVisitor);
if (idFieldVisitor.getId() == null) {
assert dv.isTombstone(docId);
continue;
diff --git a/server/src/main/java/org/opensearch/index/engine/LuceneChangesSnapshot.java b/server/src/main/java/org/opensearch/index/engine/LuceneChangesSnapshot.java
index abde2aff6e9e6..23fe59456887e 100644
--- a/server/src/main/java/org/opensearch/index/engine/LuceneChangesSnapshot.java
+++ b/server/src/main/java/org/opensearch/index/engine/LuceneChangesSnapshot.java
@@ -289,7 +289,7 @@ private Translog.Operation readDocAsOp(int docIndex) throws IOException {
? SourceFieldMapper.RECOVERY_SOURCE_NAME
: SourceFieldMapper.NAME;
final FieldsVisitor fields = new FieldsVisitor(true, sourceField);
- leaf.reader().document(segmentDocID, fields);
+ leaf.reader().storedFields().document(segmentDocID, fields);
final Translog.Operation op;
final boolean isTombstone = parallelArray.isTombStone[docIndex];
diff --git a/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java b/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java
index 5efafb562df74..417cdd5a8f030 100644
--- a/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java
+++ b/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java
@@ -221,28 +221,33 @@ public int maxDoc() {
@Override
public void document(int docID, StoredFieldVisitor visitor) throws IOException {
- if (docID != 0) {
- throw new IllegalArgumentException("no such doc ID " + docID);
- }
- if (visitor.needsField(FAKE_SOURCE_FIELD) == StoredFieldVisitor.Status.YES) {
- assert operation.source().toBytesRef().offset == 0;
- assert operation.source().toBytesRef().length == operation.source().toBytesRef().bytes.length;
- visitor.binaryField(FAKE_SOURCE_FIELD, operation.source().toBytesRef().bytes);
- }
- if (operation.routing() != null && visitor.needsField(FAKE_ROUTING_FIELD) == StoredFieldVisitor.Status.YES) {
- visitor.stringField(FAKE_ROUTING_FIELD, operation.routing());
- }
- if (visitor.needsField(FAKE_ID_FIELD) == StoredFieldVisitor.Status.YES) {
- BytesRef bytesRef = Uid.encodeId(operation.id());
- final byte[] id = new byte[bytesRef.length];
- System.arraycopy(bytesRef.bytes, bytesRef.offset, id, 0, bytesRef.length);
- visitor.binaryField(FAKE_ID_FIELD, id);
- }
+ storedFields().document(docID, visitor);
}
@Override
public StoredFields storedFields() throws IOException {
- throw new UnsupportedOperationException();
+ return new StoredFields() {
+ @Override
+ public void document(int docID, StoredFieldVisitor visitor) throws IOException {
+ if (docID != 0) {
+ throw new IllegalArgumentException("no such doc ID " + docID);
+ }
+ if (visitor.needsField(FAKE_SOURCE_FIELD) == StoredFieldVisitor.Status.YES) {
+ assert operation.source().toBytesRef().offset == 0;
+ assert operation.source().toBytesRef().length == operation.source().toBytesRef().bytes.length;
+ visitor.binaryField(FAKE_SOURCE_FIELD, operation.source().toBytesRef().bytes);
+ }
+ if (operation.routing() != null && visitor.needsField(FAKE_ROUTING_FIELD) == StoredFieldVisitor.Status.YES) {
+ visitor.stringField(FAKE_ROUTING_FIELD, operation.routing());
+ }
+ if (visitor.needsField(FAKE_ID_FIELD) == StoredFieldVisitor.Status.YES) {
+ BytesRef bytesRef = Uid.encodeId(operation.id());
+ final byte[] id = new byte[bytesRef.length];
+ System.arraycopy(bytesRef.bytes, bytesRef.offset, id, 0, bytesRef.length);
+ visitor.binaryField(FAKE_ID_FIELD, id);
+ }
+ }
+ };
}
@Override
diff --git a/server/src/main/java/org/opensearch/index/get/ShardGetService.java b/server/src/main/java/org/opensearch/index/get/ShardGetService.java
index a815074119fb1..b3715e097322d 100644
--- a/server/src/main/java/org/opensearch/index/get/ShardGetService.java
+++ b/server/src/main/java/org/opensearch/index/get/ShardGetService.java
@@ -276,7 +276,7 @@ private GetResult innerGetLoadFromStoredFields(
);
if (fieldVisitor != null) {
try {
- docIdAndVersion.reader.document(docIdAndVersion.docId, fieldVisitor);
+ docIdAndVersion.reader.storedFields().document(docIdAndVersion.docId, fieldVisitor);
} catch (IOException e) {
throw new OpenSearchException("Failed to get id [" + id + "]", e);
}
diff --git a/server/src/main/java/org/opensearch/index/shard/ShardSplittingQuery.java b/server/src/main/java/org/opensearch/index/shard/ShardSplittingQuery.java
index 6d559a6cc0673..219ead931797a 100644
--- a/server/src/main/java/org/opensearch/index/shard/ShardSplittingQuery.java
+++ b/server/src/main/java/org/opensearch/index/shard/ShardSplittingQuery.java
@@ -286,7 +286,7 @@ public Status needsField(FieldInfo fieldInfo) throws IOException {
boolean matches(int doc) throws IOException {
routing = id = null;
leftToVisit = 2;
- leafReader.document(doc, this);
+ leafReader.storedFields().document(doc, this);
assert id != null : "docID must not be null - we might have hit a nested document";
int targetShardId = OperationRouting.generateShardId(indexMetadata, id, routing);
return targetShardId != shardId;
diff --git a/server/src/main/java/org/opensearch/index/termvectors/TermVectorsService.java b/server/src/main/java/org/opensearch/index/termvectors/TermVectorsService.java
index 0e9d7c2f14113..4d515fadb5a13 100644
--- a/server/src/main/java/org/opensearch/index/termvectors/TermVectorsService.java
+++ b/server/src/main/java/org/opensearch/index/termvectors/TermVectorsService.java
@@ -39,6 +39,7 @@
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.MultiTerms;
import org.apache.lucene.index.Term;
+import org.apache.lucene.index.TermVectors;
import org.apache.lucene.index.Terms;
import org.apache.lucene.index.memory.MemoryIndex;
import org.opensearch.OpenSearchException;
@@ -127,7 +128,8 @@ static TermVectorsResponse getTermVectors(IndexShard indexShard, TermVectorsRequ
/* or from an existing document */
else if (docIdAndVersion != null) {
// fields with stored term vectors
- termVectorsByField = docIdAndVersion.reader.getTermVectors(docIdAndVersion.docId);
+ TermVectors termVectors = docIdAndVersion.reader.termVectors();
+ termVectorsByField = termVectors.get(docIdAndVersion.docId);
Set selectedFields = request.selectedFields();
// generate tvs for fields where analyzer is overridden
if (selectedFields == null && request.perFieldAnalyzer() != null) {
@@ -322,7 +324,8 @@ private static Fields generateTermVectors(
}
}
/* and read vectors from it */
- return index.createSearcher().getIndexReader().getTermVectors(0);
+ TermVectors termVectors = index.createSearcher().getIndexReader().termVectors();
+ return termVectors.get(0);
}
private static Fields generateTermVectorsFromDoc(IndexShard indexShard, TermVectorsRequest request) throws IOException {
diff --git a/server/src/main/java/org/opensearch/search/fetch/FetchPhase.java b/server/src/main/java/org/opensearch/search/fetch/FetchPhase.java
index 97a6093ea39d1..67c6eeae02271 100644
--- a/server/src/main/java/org/opensearch/search/fetch/FetchPhase.java
+++ b/server/src/main/java/org/opensearch/search/fetch/FetchPhase.java
@@ -160,7 +160,7 @@ public void execute(SearchContext context) {
SequentialStoredFieldsLeafReader lf = (SequentialStoredFieldsLeafReader) currentReaderContext.reader();
fieldReader = lf.getSequentialStoredFieldsReader()::document;
} else {
- fieldReader = currentReaderContext.reader()::document;
+ fieldReader = currentReaderContext.reader().storedFields()::document;
}
for (FetchSubPhaseProcessor processor : processors) {
processor.setNextReader(currentReaderContext);
diff --git a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/HighlightUtils.java b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/HighlightUtils.java
index 7a358b7e4b252..2238554a12149 100644
--- a/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/HighlightUtils.java
+++ b/server/src/main/java/org/opensearch/search/fetch/subphase/highlight/HighlightUtils.java
@@ -72,7 +72,7 @@ public static List loadFieldValues(
) throws IOException {
if (forceSource == false && fieldType.isStored()) {
CustomFieldsVisitor fieldVisitor = new CustomFieldsVisitor(singleton(fieldType.name()), false);
- hitContext.reader().document(hitContext.docId(), fieldVisitor);
+ hitContext.reader().storedFields().document(hitContext.docId(), fieldVisitor);
List textsToHighlight = fieldVisitor.fields().get(fieldType.name());
return textsToHighlight != null ? textsToHighlight : Collections.emptyList();
}
diff --git a/server/src/main/java/org/opensearch/search/lookup/LeafFieldsLookup.java b/server/src/main/java/org/opensearch/search/lookup/LeafFieldsLookup.java
index 007ee49b3e697..47bb8754a5803 100644
--- a/server/src/main/java/org/opensearch/search/lookup/LeafFieldsLookup.java
+++ b/server/src/main/java/org/opensearch/search/lookup/LeafFieldsLookup.java
@@ -153,7 +153,7 @@ private FieldLookup loadFieldData(String name) {
List values = new ArrayList<>(2);
SingleFieldsVisitor visitor = new SingleFieldsVisitor(data.fieldType(), values);
try {
- reader.document(docId, visitor);
+ reader.storedFields().document(docId, visitor);
} catch (IOException e) {
throw new OpenSearchParseException("failed to load field [{}]", e, name);
}
diff --git a/server/src/main/java/org/opensearch/search/lookup/SourceLookup.java b/server/src/main/java/org/opensearch/search/lookup/SourceLookup.java
index 1341fc0fdabb3..00fdca4e143ee 100644
--- a/server/src/main/java/org/opensearch/search/lookup/SourceLookup.java
+++ b/server/src/main/java/org/opensearch/search/lookup/SourceLookup.java
@@ -140,7 +140,7 @@ public void setSegmentAndDocument(LeafReaderContext context, int docId) {
SequentialStoredFieldsLeafReader lf = (SequentialStoredFieldsLeafReader) context.reader();
fieldReader = lf.getSequentialStoredFieldsReader()::document;
} else {
- fieldReader = context.reader()::document;
+ fieldReader = context.reader().storedFields()::document;
}
} catch (IOException e) {
throw new UncheckedIOException(e);
diff --git a/server/src/test/java/org/opensearch/common/lucene/LuceneTests.java b/server/src/test/java/org/opensearch/common/lucene/LuceneTests.java
index ef0bedac72541..f7be2c4876e6f 100644
--- a/server/src/test/java/org/opensearch/common/lucene/LuceneTests.java
+++ b/server/src/test/java/org/opensearch/common/lucene/LuceneTests.java
@@ -35,6 +35,7 @@
import org.apache.lucene.index.IndexCommit;
import org.apache.lucene.index.IndexFormatTooOldException;
import org.apache.lucene.index.StandardDirectoryReader;
+import org.apache.lucene.index.StoredFields;
import org.apache.lucene.tests.analysis.MockAnalyzer;
import org.apache.lucene.analysis.core.KeywordAnalyzer;
import org.apache.lucene.document.Document;
@@ -565,12 +566,13 @@ public void testWrapAllDocsLive() throws Exception {
}
try (DirectoryReader unwrapped = DirectoryReader.open(writer)) {
DirectoryReader reader = Lucene.wrapAllDocsLive(unwrapped);
+ StoredFields storedFields = reader.storedFields();
assertThat(reader.numDocs(), equalTo(liveDocs.size()));
IndexSearcher searcher = new IndexSearcher(reader);
Set actualDocs = new HashSet<>();
TopDocs topDocs = searcher.search(new MatchAllDocsQuery(), Integer.MAX_VALUE);
for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
- actualDocs.add(reader.document(scoreDoc.doc).get("id"));
+ actualDocs.add(storedFields.document(scoreDoc.doc).get("id"));
}
assertThat(actualDocs, equalTo(liveDocs));
}
@@ -609,13 +611,14 @@ public void testWrapLiveDocsNotExposeAbortedDocuments() throws Exception {
}
try (DirectoryReader unwrapped = DirectoryReader.open(writer)) {
DirectoryReader reader = Lucene.wrapAllDocsLive(unwrapped);
+ StoredFields storedFields = reader.storedFields();
assertThat(reader.maxDoc(), equalTo(numDocs + abortedDocs));
assertThat(reader.numDocs(), equalTo(liveDocs.size()));
IndexSearcher searcher = new IndexSearcher(reader);
List actualDocs = new ArrayList<>();
TopDocs topDocs = searcher.search(new MatchAllDocsQuery(), Integer.MAX_VALUE);
for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
- actualDocs.add(reader.document(scoreDoc.doc).get("id"));
+ actualDocs.add(storedFields.document(scoreDoc.doc).get("id"));
}
assertThat(actualDocs, equalTo(liveDocs));
}
diff --git a/server/src/test/java/org/opensearch/common/lucene/index/FreqTermsEnumTests.java b/server/src/test/java/org/opensearch/common/lucene/index/FreqTermsEnumTests.java
index 60f39208d008a..9423d3c17c98a 100644
--- a/server/src/test/java/org/opensearch/common/lucene/index/FreqTermsEnumTests.java
+++ b/server/src/test/java/org/opensearch/common/lucene/index/FreqTermsEnumTests.java
@@ -43,6 +43,7 @@
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.IndexableField;
import org.apache.lucene.index.NoMergePolicy;
+import org.apache.lucene.index.StoredFields;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.TermInSetQuery;
import org.apache.lucene.search.Query;
@@ -149,9 +150,10 @@ public void setUp() throws Exception {
// now go over each doc, build the relevant references and filter
reader = DirectoryReader.open(iw);
+ StoredFields storedFields = reader.storedFields();
List filterTerms = new ArrayList<>();
for (int docId = 0; docId < reader.maxDoc(); docId++) {
- Document doc = reader.document(docId);
+ Document doc = storedFields.document(docId);
addFreqs(doc, referenceAll);
if (!deletedIds.contains(doc.getField("id").stringValue())) {
addFreqs(doc, referenceNotDeleted);
diff --git a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java
index 612bebf74bc70..c2ac8b0e1d3b3 100644
--- a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java
+++ b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java
@@ -2541,6 +2541,7 @@ class OpAndVersion {
final Term uidTerm = newUid(doc);
engine.index(indexForDoc(doc));
final BiFunction searcherFactory = engine::acquireSearcher;
+
for (int i = 0; i < thread.length; i++) {
thread[i] = new Thread(() -> {
startGun.countDown();
@@ -2549,10 +2550,12 @@ class OpAndVersion {
} catch (InterruptedException e) {
throw new AssertionError(e);
}
+
for (int op = 0; op < opsPerThread; op++) {
try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.id(), uidTerm), searcherFactory)) {
+
FieldsVisitor visitor = new FieldsVisitor(true);
- get.docIdAndVersion().reader.document(get.docIdAndVersion().docId, visitor);
+ get.docIdAndVersion().reader.storedFields().document(get.docIdAndVersion().docId, visitor);
List values = new ArrayList<>(Strings.commaDelimitedListToSet(visitor.source().utf8ToString()));
String removed = op % 3 == 0 && values.size() > 0 ? values.remove(0) : null;
String added = "v_" + idGenerator.incrementAndGet();
@@ -2608,7 +2611,7 @@ class OpAndVersion {
try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.id(), uidTerm), searcherFactory)) {
FieldsVisitor visitor = new FieldsVisitor(true);
- get.docIdAndVersion().reader.document(get.docIdAndVersion().docId, visitor);
+ get.docIdAndVersion().reader.storedFields().document(get.docIdAndVersion().docId, visitor);
List values = Arrays.asList(Strings.commaDelimitedListToStringArray(visitor.source().utf8ToString()));
assertThat(currentValues, equalTo(new HashSet<>(values)));
}
diff --git a/server/src/test/java/org/opensearch/index/engine/RecoverySourcePruneMergePolicyTests.java b/server/src/test/java/org/opensearch/index/engine/RecoverySourcePruneMergePolicyTests.java
index a8279ed908779..fed521e2d5ed9 100644
--- a/server/src/test/java/org/opensearch/index/engine/RecoverySourcePruneMergePolicyTests.java
+++ b/server/src/test/java/org/opensearch/index/engine/RecoverySourcePruneMergePolicyTests.java
@@ -49,6 +49,7 @@
import org.apache.lucene.index.SegmentInfos;
import org.apache.lucene.index.ShuffleForcedMergePolicy;
import org.apache.lucene.index.StandardDirectoryReader;
+import org.apache.lucene.index.StoredFields;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.MatchAllDocsQuery;
@@ -89,8 +90,9 @@ public void testPruneAll() throws IOException {
writer.forceMerge(1);
writer.commit();
try (DirectoryReader reader = DirectoryReader.open(writer)) {
+ StoredFields storedFields = reader.storedFields();
for (int i = 0; i < reader.maxDoc(); i++) {
- Document document = reader.document(i);
+ Document document = storedFields.document(i);
assertEquals(1, document.getFields().size());
assertEquals("source", document.getFields().get(0).name());
}
@@ -157,11 +159,12 @@ public void testPruneSome() throws IOException {
writer.forceMerge(1);
writer.commit();
try (DirectoryReader reader = DirectoryReader.open(writer)) {
+ StoredFields storedFields = reader.storedFields();
assertEquals(1, reader.leaves().size());
NumericDocValues extra_source = reader.leaves().get(0).reader().getNumericDocValues("extra_source");
assertNotNull(extra_source);
for (int i = 0; i < reader.maxDoc(); i++) {
- Document document = reader.document(i);
+ Document document = storedFields.document(i);
Set collect = document.getFields().stream().map(IndexableField::name).collect(Collectors.toSet());
assertTrue(collect.contains("source"));
assertTrue(collect.contains("even"));
@@ -197,11 +200,12 @@ public void testPruneNone() throws IOException {
writer.forceMerge(1);
writer.commit();
try (DirectoryReader reader = DirectoryReader.open(writer)) {
+ StoredFields storedFields = reader.storedFields();
assertEquals(1, reader.leaves().size());
NumericDocValues extra_source = reader.leaves().get(0).reader().getNumericDocValues("extra_source");
assertNotNull(extra_source);
for (int i = 0; i < reader.maxDoc(); i++) {
- Document document = reader.document(i);
+ Document document = storedFields.document(i);
Set collect = document.getFields().stream().map(IndexableField::name).collect(Collectors.toSet());
assertTrue(collect.contains("source"));
assertTrue(collect.contains("extra_source"));
diff --git a/server/src/test/java/org/opensearch/index/query/MoreLikeThisQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/MoreLikeThisQueryBuilderTests.java
index e6ec132a10474..2d950f0994976 100644
--- a/server/src/test/java/org/opensearch/index/query/MoreLikeThisQueryBuilderTests.java
+++ b/server/src/test/java/org/opensearch/index/query/MoreLikeThisQueryBuilderTests.java
@@ -268,7 +268,7 @@ private static Fields generateFields(String[] fieldNames, String text) throws IO
for (String fieldName : fieldNames) {
index.addField(fieldName, text, new WhitespaceAnalyzer());
}
- return index.createSearcher().getIndexReader().getTermVectors(0);
+ return index.createSearcher().getIndexReader().termVectors().get(0);
}
@Override
diff --git a/server/src/test/java/org/opensearch/index/shard/RefreshListenersTests.java b/server/src/test/java/org/opensearch/index/shard/RefreshListenersTests.java
index f812312004b21..1be6c07539cb7 100644
--- a/server/src/test/java/org/opensearch/index/shard/RefreshListenersTests.java
+++ b/server/src/test/java/org/opensearch/index/shard/RefreshListenersTests.java
@@ -36,6 +36,7 @@
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.StoredFields;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.store.Directory;
@@ -369,9 +370,8 @@ public void testLotsOfThreads() throws Exception {
try (Engine.GetResult getResult = engine.get(get, engine::acquireSearcher)) {
assertTrue("document not found", getResult.exists());
assertEquals(iteration, getResult.version());
- org.apache.lucene.document.Document document = getResult.docIdAndVersion().reader.document(
- getResult.docIdAndVersion().docId
- );
+ StoredFields storedFields = getResult.docIdAndVersion().reader.storedFields();
+ org.apache.lucene.document.Document document = storedFields.document(getResult.docIdAndVersion().docId);
assertThat(document.getValues("test"), arrayContaining(testFieldValue));
}
} catch (Exception t) {
diff --git a/server/src/test/java/org/opensearch/index/shard/ShardSplittingQueryTests.java b/server/src/test/java/org/opensearch/index/shard/ShardSplittingQueryTests.java
index 04dcea210640c..940d9a4ead5f9 100644
--- a/server/src/test/java/org/opensearch/index/shard/ShardSplittingQueryTests.java
+++ b/server/src/test/java/org/opensearch/index/shard/ShardSplittingQueryTests.java
@@ -326,7 +326,7 @@ void assertSplit(Directory dir, IndexMetadata metadata, int targetShardId, boole
}
assertEquals(shard_id.docID(), doc);
long shardID = shard_id.nextValue();
- BytesRef id = reader.document(doc).getBinaryValue("_id");
+ BytesRef id = reader.storedFields().document(doc).getBinaryValue("_id");
String actualId = Uid.decodeId(id.bytes, id.offset, id.length);
assertNotEquals(ctx.reader() + " docID: " + doc + " actualID: " + actualId, shardID, targetShardId);
}
diff --git a/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java b/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java
index 3508020cb23d7..a06688150a38a 100644
--- a/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java
+++ b/server/src/test/java/org/opensearch/indices/IndicesRequestCacheTests.java
@@ -353,7 +353,7 @@ public BytesReference get() {
IndexSearcher searcher = new IndexSearcher(reader);
TopDocs topDocs = searcher.search(new TermQuery(new Term("id", Integer.toString(id))), 1);
assertEquals(1, topDocs.totalHits.value);
- Document document = reader.document(topDocs.scoreDocs[0].doc);
+ Document document = reader.storedFields().document(topDocs.scoreDocs[0].doc);
out.writeString(document.get("value"));
loadedFromCache = false;
return out.bytes();
diff --git a/server/src/test/java/org/opensearch/lucene/queries/BlendedTermQueryTests.java b/server/src/test/java/org/opensearch/lucene/queries/BlendedTermQueryTests.java
index bf9aac344e3df..6844742759883 100644
--- a/server/src/test/java/org/opensearch/lucene/queries/BlendedTermQueryTests.java
+++ b/server/src/test/java/org/opensearch/lucene/queries/BlendedTermQueryTests.java
@@ -112,7 +112,7 @@ public void testDismaxQuery() throws IOException {
query.add(BlendedTermQuery.dismaxBlendedQuery(toTerms(fields, "generator"), 0.1f), BooleanClause.Occur.SHOULD);
TopDocs search = searcher.search(query.build(), 10);
ScoreDoc[] scoreDocs = search.scoreDocs;
- assertEquals(Integer.toString(0), reader.document(scoreDocs[0].doc).getField("id").stringValue());
+ assertEquals(Integer.toString(0), reader.storedFields().document(scoreDocs[0].doc).getField("id").stringValue());
}
{
BooleanQuery.Builder query = new BooleanQuery.Builder();
@@ -134,7 +134,7 @@ public void testDismaxQuery() throws IOException {
query.add(gen, BooleanClause.Occur.SHOULD);
TopDocs search = searcher.search(query.build(), 4);
ScoreDoc[] scoreDocs = search.scoreDocs;
- assertEquals(Integer.toString(1), reader.document(scoreDocs[0].doc).getField("id").stringValue());
+ assertEquals(Integer.toString(1), reader.storedFields().document(scoreDocs[0].doc).getField("id").stringValue());
}
{
@@ -269,7 +269,7 @@ public void testMinTTF() throws IOException {
Query query = BlendedTermQuery.dismaxBlendedQuery(toTerms(fields, "foo"), 0.1f);
TopDocs search = searcher.search(query, 10);
ScoreDoc[] scoreDocs = search.scoreDocs;
- assertEquals(Integer.toString(0), reader.document(scoreDocs[0].doc).getField("id").stringValue());
+ assertEquals(Integer.toString(0), reader.storedFields().document(scoreDocs[0].doc).getField("id").stringValue());
}
reader.close();
w.close();
diff --git a/server/src/test/java/org/opensearch/search/lookup/LeafFieldsLookupTests.java b/server/src/test/java/org/opensearch/search/lookup/LeafFieldsLookupTests.java
index 0155e288a96fd..85aacfbd63ee2 100644
--- a/server/src/test/java/org/opensearch/search/lookup/LeafFieldsLookupTests.java
+++ b/server/src/test/java/org/opensearch/search/lookup/LeafFieldsLookupTests.java
@@ -36,6 +36,7 @@
import org.apache.lucene.index.IndexOptions;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.StoredFieldVisitor;
+import org.apache.lucene.index.StoredFields;
import org.apache.lucene.index.VectorEncoding;
import org.apache.lucene.index.VectorSimilarityFunction;
import org.opensearch.index.mapper.MappedFieldType;
@@ -43,11 +44,11 @@
import org.opensearch.test.OpenSearchTestCase;
import org.junit.Before;
+import java.io.IOException;
import java.util.Collections;
import java.util.List;
import static org.mockito.Mockito.any;
-import static org.mockito.Mockito.anyInt;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -88,12 +89,12 @@ public void setUp() throws Exception {
);
LeafReader leafReader = mock(LeafReader.class);
- doAnswer(invocation -> {
- Object[] args = invocation.getArguments();
- StoredFieldVisitor visitor = (StoredFieldVisitor) args[1];
- visitor.doubleField(mockFieldInfo, 2.718);
- return null;
- }).when(leafReader).document(anyInt(), any(StoredFieldVisitor.class));
+ doAnswer(invocation -> new StoredFields() {
+ @Override
+ public void document(int docID, StoredFieldVisitor visitor) throws IOException {
+ visitor.doubleField(mockFieldInfo, 2.718);
+ }
+ }).when(leafReader).storedFields();
fieldsLookup = new LeafFieldsLookup(mapperService, leafReader);
}
diff --git a/server/src/test/java/org/opensearch/search/slice/DocValuesSliceQueryTests.java b/server/src/test/java/org/opensearch/search/slice/DocValuesSliceQueryTests.java
index 86dc27d73d21f..4d2a441a180a6 100644
--- a/server/src/test/java/org/opensearch/search/slice/DocValuesSliceQueryTests.java
+++ b/server/src/test/java/org/opensearch/search/slice/DocValuesSliceQueryTests.java
@@ -110,7 +110,7 @@ public void setScorer(Scorable scorer) throws IOException {}
@Override
public void collect(int doc) throws IOException {
- Document d = context.reader().document(doc, Collections.singleton("uuid"));
+ Document d = context.reader().storedFields().document(doc, Collections.singleton("uuid"));
String uuid = d.get("uuid");
assertThat(keys.contains(uuid), equalTo(true));
keys.remove(uuid);
diff --git a/server/src/test/java/org/opensearch/search/slice/TermsSliceQueryTests.java b/server/src/test/java/org/opensearch/search/slice/TermsSliceQueryTests.java
index 84a65b3c8a7cb..8efa4eeef80dd 100644
--- a/server/src/test/java/org/opensearch/search/slice/TermsSliceQueryTests.java
+++ b/server/src/test/java/org/opensearch/search/slice/TermsSliceQueryTests.java
@@ -120,7 +120,7 @@ public void setScorer(Scorable scorer) throws IOException {}
@Override
public void collect(int doc) throws IOException {
- Document d = context.reader().document(doc, Collections.singleton("uuid"));
+ Document d = context.reader().storedFields().document(doc, Collections.singleton("uuid"));
String uuid = d.get("uuid");
assertThat(keys.contains(uuid), equalTo(true));
keys.remove(uuid);
diff --git a/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java
index 9c6f36c87896c..1ac92bbb479c3 100644
--- a/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java
+++ b/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java
@@ -49,6 +49,7 @@
import org.apache.lucene.index.LiveIndexWriterConfig;
import org.apache.lucene.index.MergePolicy;
import org.apache.lucene.index.NumericDocValues;
+import org.apache.lucene.index.StoredFields;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.DocIdSetIterator;
import org.apache.lucene.search.IndexSearcher;
@@ -1302,6 +1303,7 @@ public static List getDocIds(Engine engine, boolean refresh
NumericDocValues primaryTermDocValues = reader.getNumericDocValues(SeqNoFieldMapper.PRIMARY_TERM_NAME);
NumericDocValues versionDocValues = reader.getNumericDocValues(VersionFieldMapper.NAME);
Bits liveDocs = reader.getLiveDocs();
+ StoredFields storedFields = reader.storedFields();
for (int i = 0; i < reader.maxDoc(); i++) {
if (liveDocs == null || liveDocs.get(i)) {
if (primaryTermDocValues.advanceExact(i) == false) {
@@ -1309,7 +1311,7 @@ public static List getDocIds(Engine engine, boolean refresh
continue;
}
final long primaryTerm = primaryTermDocValues.longValue();
- Document doc = reader.document(i, Sets.newHashSet(IdFieldMapper.NAME, SourceFieldMapper.NAME));
+ Document doc = storedFields.document(i, Sets.newHashSet(IdFieldMapper.NAME, SourceFieldMapper.NAME));
BytesRef binaryID = doc.getBinaryValue(IdFieldMapper.NAME);
String id = Uid.decodeId(Arrays.copyOfRange(binaryID.bytes, binaryID.offset, binaryID.offset + binaryID.length));
final BytesRef source = doc.getBinaryValue(SourceFieldMapper.NAME);
@@ -1463,6 +1465,7 @@ public static void assertAtMostOneLuceneDocumentPerSequenceNumber(IndexSettings
for (LeafReaderContext leaf : wrappedReader.leaves()) {
NumericDocValues primaryTermDocValues = leaf.reader().getNumericDocValues(SeqNoFieldMapper.PRIMARY_TERM_NAME);
NumericDocValues seqNoDocValues = leaf.reader().getNumericDocValues(SeqNoFieldMapper.NAME);
+ final StoredFields storedFields = leaf.reader().storedFields();
int docId;
while ((docId = seqNoDocValues.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
assertTrue(seqNoDocValues.advanceExact(docId));
@@ -1471,7 +1474,7 @@ public static void assertAtMostOneLuceneDocumentPerSequenceNumber(IndexSettings
if (primaryTermDocValues.advanceExact(docId)) {
if (seqNos.add(seqNo) == false) {
final IdOnlyFieldVisitor idFieldVisitor = new IdOnlyFieldVisitor();
- leaf.reader().document(docId, idFieldVisitor);
+ storedFields.document(docId, idFieldVisitor);
throw new AssertionError("found multiple documents for seq=" + seqNo + " id=" + idFieldVisitor.getId());
}
}
From 4319f2b571c82d7ec5608ffdc31ca4105961d064 Mon Sep 17 00:00:00 2001
From: Marc Handalian
Date: Wed, 26 Jul 2023 10:23:15 -0700
Subject: [PATCH 18/71] Remove unnecessary refresh listeners from
NRTReplicationReaderManager. (#8859)
* Remove unnecessary refresh listeners from NRTReplicationReaderManager.
This change removes RefreshListeners used by InternalEngine to provide waitFor functionality.
These listeners were previously registered onto NRT replicas only to be force released on the next refresh cycle without actually refreshing the reader.
This change also removes the unnecessary blocking refresh from NRTReaderManager because we no longer have conflicting refresh invocations from scheduledRefresh.
Signed-off-by: Marc Handalian
* Reduce the amount of docs ingested with testPrimaryRelocation and testPrimaryRelocationWithSegRepFailure.
These tests were ingesting 100-1k docs and randomly selecting a refresh policy. Wtih the IMMEDIATE refresh policy a blocking refresh is performed that increase the time required for the primary to block operations for relocation. On my machine this change reduces the test time with max docs from 1m to 5-6s.
Signed-off-by: Marc Handalian
---------
Signed-off-by: Marc Handalian
---
.../SegmentReplicationRelocationIT.java | 4 ++--
.../index/engine/NRTReplicationEngine.java | 21 ++++++-------------
.../engine/NRTReplicationReaderManager.java | 2 +-
.../opensearch/index/shard/IndexShard.java | 3 ++-
.../SegmentReplicationIndexShardTests.java | 6 ++++++
5 files changed, 17 insertions(+), 19 deletions(-)
diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationRelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationRelocationIT.java
index 7cf7e5148dd4a..3024eeb798b48 100644
--- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationRelocationIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationRelocationIT.java
@@ -60,7 +60,7 @@ public void testPrimaryRelocation() throws Exception {
createIndex(1);
final String replica = internalCluster().startNode();
ensureGreen(INDEX_NAME);
- final int initialDocCount = scaledRandomIntBetween(100, 1000);
+ final int initialDocCount = scaledRandomIntBetween(10, 100);
final WriteRequest.RefreshPolicy refreshPolicy = randomFrom(WriteRequest.RefreshPolicy.values());
final List> pendingIndexResponses = new ArrayList<>();
for (int i = 0; i < initialDocCount; i++) {
@@ -137,7 +137,7 @@ public void testPrimaryRelocationWithSegRepFailure() throws Exception {
createIndex(1);
final String replica = internalCluster().startNode();
ensureGreen(INDEX_NAME);
- final int initialDocCount = scaledRandomIntBetween(100, 1000);
+ final int initialDocCount = scaledRandomIntBetween(10, 100);
final WriteRequest.RefreshPolicy refreshPolicy = randomFrom(WriteRequest.RefreshPolicy.values());
final List> pendingIndexResponses = new ArrayList<>();
for (int i = 0; i < initialDocCount; i++) {
diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java
index a9f7a2e70884c..b55508b7facd3 100644
--- a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java
+++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java
@@ -77,9 +77,10 @@ public NRTReplicationEngine(EngineConfig engineConfig) {
this.completionStatsCache = new CompletionStatsCache(() -> acquireSearcher("completion_stats"));
this.readerManager = readerManager;
this.readerManager.addListener(completionStatsCache);
- for (ReferenceManager.RefreshListener listener : engineConfig.getExternalRefreshListener()) {
- this.readerManager.addListener(listener);
- }
+ // NRT Replicas do not have a concept of Internal vs External reader managers.
+ // We also do not want to wire up refresh listeners for waitFor & pending refresh location.
+ // which are the current external listeners set from IndexShard.
+ // Only wire up the internal listeners.
for (ReferenceManager.RefreshListener listener : engineConfig.getInternalRefreshListener()) {
this.readerManager.addListener(listener);
}
@@ -322,22 +323,12 @@ public List segments(boolean verbose) {
@Override
public void refresh(String source) throws EngineException {
- maybeRefresh(source);
+ // Refresh on this engine should only ever happen in the reader after new segments arrive.
}
@Override
public boolean maybeRefresh(String source) throws EngineException {
- ensureOpen();
- try {
- return readerManager.maybeRefresh();
- } catch (IOException e) {
- try {
- failEngine("refresh failed source[" + source + "]", e);
- } catch (Exception inner) {
- e.addSuppressed(inner);
- }
- throw new RefreshFailedEngineException(shardId, e);
- }
+ return false;
}
@Override
diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationReaderManager.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationReaderManager.java
index 35409437f605a..7b4c93c7235fe 100644
--- a/server/src/main/java/org/opensearch/index/engine/NRTReplicationReaderManager.java
+++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationReaderManager.java
@@ -103,7 +103,7 @@ public void updateSegments(SegmentInfos infos) throws IOException {
// is always increased.
infos.updateGeneration(currentInfos);
currentInfos = infos;
- maybeRefreshBlocking();
+ maybeRefresh();
}
public SegmentInfos getSegmentInfos() {
diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java
index 8b6d083379fe1..e43b9773cc1e0 100644
--- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java
+++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java
@@ -4423,7 +4423,8 @@ public void addRefreshListener(Translog.Location location, Consumer lis
readAllowed = isReadAllowed();
}
}
- if (readAllowed) {
+ // NRT Replicas will not accept refresh listeners.
+ if (readAllowed && isSegmentReplicationAllowed() == false) {
refreshListeners.addOrNotify(location, listener);
} else {
// we're not yet ready fo ready for reads, just ignore refresh cycles
diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java
index d988e34ef18dc..0c68512f93ea6 100644
--- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java
+++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java
@@ -120,6 +120,12 @@ public void testReplicationCheckpointNotNullForSegRep() throws IOException {
closeShards(indexShard);
}
+ public void testNRTReplicasDoNotAcceptRefreshListeners() throws IOException {
+ final IndexShard indexShard = newStartedShard(false, settings, new NRTReplicationEngineFactory());
+ indexShard.addRefreshListener(mock(Translog.Location.class), Assert::assertFalse);
+ closeShards(indexShard);
+ }
+
public void testSegmentInfosAndReplicationCheckpointTuple() throws Exception {
try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) {
shards.startAll();
From 99f28cb6f6bd73a6d832c6bbb88e5b9c2ccdb139 Mon Sep 17 00:00:00 2001
From: Varun Jain
Date: Wed, 26 Jul 2023 11:17:22 -0700
Subject: [PATCH 19/71] Updated Version.java path from server to libs in
version.yml (#8883)
* Updating Version.java from server/ to buildSrc/
Signed-off-by: Varun Jain
* Adding Changelog
Signed-off-by: Varun Jain
* Path update
Signed-off-by: Varun Jain
* Changelog Update
Signed-off-by: Varun Jain
* Removing Changelog from commit
Signed-off-by: Varun Jain
---------
Signed-off-by: Varun Jain
---
.github/workflows/version.yml | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/.github/workflows/version.yml b/.github/workflows/version.yml
index fdf42a9a2731e..764a365e7411c 100644
--- a/.github/workflows/version.yml
+++ b/.github/workflows/version.yml
@@ -54,8 +54,8 @@ jobs:
echo " - \"$CURRENT_VERSION\"" >> .ci/bwcVersions
sed -i "s/opensearch = $CURRENT_VERSION/opensearch = $NEXT_VERSION/g" buildSrc/version.properties
echo Adding $NEXT_VERSION_UNDERSCORE after $CURRENT_VERSION_UNDERSCORE
- sed -i "s/public static final Version $CURRENT_VERSION_UNDERSCORE = new Version(\([[:digit:]]\+\)\(.*\));/\0\n public static final Version $NEXT_VERSION_UNDERSCORE = new Version($NEXT_VERSION_ID\2);/g" server/src/main/java/org/opensearch/Version.java
- sed -i "s/CURRENT = $CURRENT_VERSION_UNDERSCORE;/CURRENT = $NEXT_VERSION_UNDERSCORE;/g" server/src/main/java/org/opensearch/Version.java
+ sed -i "s/public static final Version $CURRENT_VERSION_UNDERSCORE = new Version(\([[:digit:]]\+\)\(.*\));/\0\n public static final Version $NEXT_VERSION_UNDERSCORE = new Version($NEXT_VERSION_ID\2);/g" libs/core/src/main/java/org/opensearch/Version.java
+ sed -i "s/CURRENT = $CURRENT_VERSION_UNDERSCORE;/CURRENT = $NEXT_VERSION_UNDERSCORE;/g" libs/core/src/main/java/org/opensearch/Version.java
- name: Create Pull Request
uses: peter-evans/create-pull-request@v3
@@ -82,7 +82,7 @@ jobs:
echo Adding bwc version $NEXT_VERSION after $CURRENT_VERSION
sed -i "s/- \"$CURRENT_VERSION\"/\0\n - \"$NEXT_VERSION\"/g" .ci/bwcVersions
echo Adding $NEXT_VERSION_UNDERSCORE after $CURRENT_VERSION_UNDERSCORE
- sed -i "s/public static final Version $CURRENT_VERSION_UNDERSCORE = new Version(\([[:digit:]]\+\)\(.*\));/\0\n public static final Version $NEXT_VERSION_UNDERSCORE = new Version($NEXT_VERSION_ID\2);/g" server/src/main/java/org/opensearch/Version.java
+ sed -i "s/public static final Version $CURRENT_VERSION_UNDERSCORE = new Version(\([[:digit:]]\+\)\(.*\));/\0\n public static final Version $NEXT_VERSION_UNDERSCORE = new Version($NEXT_VERSION_ID\2);/g" libs/core/src/main/java/org/opensearch/Version.java
- name: Create Pull Request
uses: peter-evans/create-pull-request@v3
From c25c175c94efa4d90a1d98e3c7eac3e4aad6e41f Mon Sep 17 00:00:00 2001
From: Sachin Kale
Date: Thu, 27 Jul 2023 08:34:04 +0530
Subject: [PATCH 20/71] [Remote Store] Add support to restore only unassigned
shards of an index (#8792)
* Add support to restore only unassigned shards of an index
---------
Signed-off-by: Sachin Kale
Signed-off-by: Sachin Kale
Co-authored-by: Sachin Kale
---
CHANGELOG.md | 1 +
.../RemoteStoreBaseIntegTestCase.java | 10 +-
.../remotestore/RemoteStoreForceMergeIT.java | 12 +-
.../opensearch/remotestore/RemoteStoreIT.java | 140 ++++++++++++------
.../snapshots/RestoreSnapshotIT.java | 14 +-
.../restore/RestoreRemoteStoreRequest.java | 33 ++++-
.../cluster/routing/IndexRoutingTable.java | 12 +-
.../cluster/routing/RoutingTable.java | 8 +-
.../cluster/RestRestoreRemoteStoreAction.java | 1 +
.../opensearch/snapshots/RestoreService.java | 44 ++++--
.../RestoreRemoteStoreRequestTests.java | 2 +
.../cluster/routing/RoutingTableTests.java | 34 ++++-
.../SegmentReplicationIndexShardTests.java | 3 -
13 files changed, 236 insertions(+), 78 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index a84f5138bd112..12dae4fca545e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -46,6 +46,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
- Change http code on create index API with bad input raising NotXContentException from 500 to 400 ([#4773](https://github.com/opensearch-project/OpenSearch/pull/4773))
- Improve summary error message for invalid setting updates ([#4792](https://github.com/opensearch-project/OpenSearch/pull/4792))
- Remote Segment Store Repository setting moved from `index.remote_store.repository` to `index.remote_store.segment.repository` and `cluster.remote_store.repository` to `cluster.remote_store.segment.repository` respectively for Index and Cluster level settings ([#8719](https://github.com/opensearch-project/OpenSearch/pull/8719))
+- [Remote Store] Add support to restore only unassigned shards of an index ([#8792](https://github.com/opensearch-project/OpenSearch/pull/8792))
- Replace the deprecated IndexReader APIs with new storedFields() & termVectors() ([#7792](https://github.com/opensearch-project/OpenSearch/pull/7792))
### Deprecated
diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java
index c5d023bdd7a64..2887fbc56106c 100644
--- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java
+++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java
@@ -26,6 +26,7 @@
import java.nio.file.Path;
import java.nio.file.SimpleFileVisitor;
import java.nio.file.attribute.BasicFileAttributes;
+import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
@@ -37,6 +38,13 @@ public class RemoteStoreBaseIntegTestCase extends OpenSearchIntegTestCase {
protected static final int REPLICA_COUNT = 1;
protected Path absolutePath;
protected Path absolutePath2;
+ private final List documentKeys = List.of(
+ randomAlphaOfLength(5),
+ randomAlphaOfLength(5),
+ randomAlphaOfLength(5),
+ randomAlphaOfLength(5),
+ randomAlphaOfLength(5)
+ );
@Override
protected boolean addMockInternalEngine() {
@@ -59,7 +67,7 @@ public Settings indexSettings() {
IndexResponse indexSingleDoc(String indexName) {
return client().prepareIndex(indexName)
.setId(UUIDs.randomBase64UUID())
- .setSource(randomAlphaOfLength(5), randomAlphaOfLength(5))
+ .setSource(documentKeys.get(randomIntBetween(0, documentKeys.size() - 1)), randomAlphaOfLength(5))
.get();
}
diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreForceMergeIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreForceMergeIT.java
index b4456f887cbaa..4d5648c74ba5c 100644
--- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreForceMergeIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreForceMergeIT.java
@@ -104,9 +104,17 @@ private void testRestoreWithMergeFlow(int numberOfIterations, boolean invokeFlus
Map indexStats = indexData(numberOfIterations, invokeFlush, flushAfterMerge, deletedDocs);
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(INDEX_NAME)));
- assertAcked(client().admin().indices().prepareClose(INDEX_NAME));
- client().admin().cluster().restoreRemoteStore(new RestoreRemoteStoreRequest().indices(INDEX_NAME), PlainActionFuture.newFuture());
+ boolean restoreAllShards = randomBoolean();
+ if (restoreAllShards) {
+ assertAcked(client().admin().indices().prepareClose(INDEX_NAME));
+ }
+ client().admin()
+ .cluster()
+ .restoreRemoteStore(
+ new RestoreRemoteStoreRequest().indices(INDEX_NAME).restoreAllShards(restoreAllShards),
+ PlainActionFuture.newFuture()
+ );
ensureGreen(INDEX_NAME);
if (deletedDocs == -1) {
diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java
index 42bd4b5173fa3..693c4113f8f3b 100644
--- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java
@@ -11,6 +11,7 @@
import org.hamcrest.MatcherAssert;
import org.junit.Before;
import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest;
+import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreResponse;
import org.opensearch.action.admin.indices.delete.DeleteIndexRequest;
import org.opensearch.action.admin.indices.recovery.RecoveryResponse;
import org.opensearch.action.index.IndexResponse;
@@ -18,7 +19,6 @@
import org.opensearch.cluster.health.ClusterHealthStatus;
import org.opensearch.cluster.metadata.IndexMetadata;
import org.opensearch.cluster.routing.RecoverySource;
-import org.opensearch.common.UUIDs;
import org.opensearch.common.settings.Settings;
import org.opensearch.index.shard.RemoteStoreRefreshListener;
import org.opensearch.indices.recovery.RecoveryState;
@@ -34,15 +34,16 @@
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
+import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
-import static org.hamcrest.Matchers.comparesEqualTo;
-import static org.hamcrest.Matchers.oneOf;
import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.oneOf;
+import static org.hamcrest.Matchers.comparesEqualTo;
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount;
-@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0)
+@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, numDataNodes = 0)
public class RemoteStoreIT extends RemoteStoreBaseIntegTestCase {
private static final String INDEX_NAME = "remote-store-test-idx-1";
@@ -68,13 +69,6 @@ public Settings indexSettings() {
return remoteStoreIndexSettings(0);
}
- private IndexResponse indexSingleDoc() {
- return client().prepareIndex(INDEX_NAME)
- .setId(UUIDs.randomBase64UUID())
- .setSource(randomAlphaOfLength(5), randomAlphaOfLength(5))
- .get();
- }
-
private Map indexData(int numberOfIterations, boolean invokeFlush, String index) {
long totalOperations = 0;
long refreshedOrFlushedOperations = 0;
@@ -93,7 +87,7 @@ private Map indexData(int numberOfIterations, boolean invokeFlush,
refreshedOrFlushedOperations = totalOperations;
int numberOfOperations = randomIntBetween(20, 50);
for (int j = 0; j < numberOfOperations; j++) {
- IndexResponse response = INDEX_NAME.equals(index) ? indexSingleDoc() : indexSingleDoc(index);
+ IndexResponse response = indexSingleDoc(index);
maxSeqNo = response.getSeqNo();
shardId = response.getShardId().id();
indexingStats.put(MAX_SEQ_NO_TOTAL + "-shard-" + shardId, maxSeqNo);
@@ -109,12 +103,14 @@ private Map indexData(int numberOfIterations, boolean invokeFlush,
}
private void verifyRestoredData(Map indexStats, boolean checkTotal, String indexName) {
+ // This is required to get updated number from already active shards which were not restored
+ refresh(indexName);
String statsGranularity = checkTotal ? TOTAL_OPERATIONS : REFRESHED_OR_FLUSHED_OPERATIONS;
String maxSeqNoGranularity = checkTotal ? MAX_SEQ_NO_TOTAL : MAX_SEQ_NO_REFRESHED_OR_FLUSHED;
ensureYellowAndNoInitializingShards(indexName);
ensureGreen(indexName);
assertHitCount(client().prepareSearch(indexName).setSize(0).get(), indexStats.get(statsGranularity));
- IndexResponse response = INDEX_NAME.equals(indexName) ? indexSingleDoc() : indexSingleDoc(indexName);
+ IndexResponse response = indexSingleDoc(indexName);
assertEquals(indexStats.get(maxSeqNoGranularity + "-shard-" + response.getShardId().id()) + 1, response.getSeqNo());
refresh(indexName);
assertHitCount(client().prepareSearch(indexName).setSize(0).get(), indexStats.get(statsGranularity) + 1);
@@ -130,6 +126,28 @@ private void prepareCluster(int numClusterManagerNodes, int numDataOnlyNodes, St
}
}
+ private void restore(String... indices) {
+ boolean restoreAllShards = randomBoolean();
+ if (restoreAllShards) {
+ assertAcked(client().admin().indices().prepareClose(indices));
+ }
+ client().admin()
+ .cluster()
+ .restoreRemoteStore(
+ new RestoreRemoteStoreRequest().indices(indices).restoreAllShards(restoreAllShards),
+ PlainActionFuture.newFuture()
+ );
+ }
+
+ private void restoreAndVerify(int shardCount, int replicaCount, Map indexStats) {
+ restore(INDEX_NAME);
+ ensureGreen(INDEX_NAME);
+ // This is required to get updated number from already active shards which were not restored
+ assertEquals(shardCount * (1 + replicaCount), getNumShards(INDEX_NAME).totalNumShards);
+ assertEquals(replicaCount, getNumShards(INDEX_NAME).numReplicas);
+ verifyRestoredData(indexStats, true, INDEX_NAME);
+ }
+
/**
* Helper function to test restoring an index with no replication from remote store. Only primary node is dropped.
* @param numberOfIterations Number of times a refresh/flush should be invoked, followed by indexing some data.
@@ -144,23 +162,16 @@ private void testRestoreFlow(int numberOfIterations, boolean invokeFlush, int sh
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName(INDEX_NAME)));
ensureRed(INDEX_NAME);
- assertAcked(client().admin().indices().prepareClose(INDEX_NAME));
- client().admin().cluster().restoreRemoteStore(new RestoreRemoteStoreRequest().indices(INDEX_NAME), PlainActionFuture.newFuture());
-
- ensureGreen(INDEX_NAME);
- assertEquals(shardCount, getNumShards(INDEX_NAME).totalNumShards);
- verifyRestoredData(indexStats, true, INDEX_NAME);
+ restoreAndVerify(shardCount, 0, indexStats);
}
/**
* Helper function to test restoring an index having replicas from remote store when all the nodes housing the primary/replica drop.
- * @param remoteTranslog If true, Remote Translog Store is also enabled in addition to Remote Segment Store.
* @param numberOfIterations Number of times a refresh/flush should be invoked, followed by indexing some data.
* @param invokeFlush If true, a flush is invoked. Otherwise, a refresh is invoked.
* @throws IOException IO Exception.
*/
- private void testRestoreFlowBothPrimaryReplicasDown(boolean remoteTranslog, int numberOfIterations, boolean invokeFlush, int shardCount)
- throws IOException {
+ private void testRestoreFlowBothPrimaryReplicasDown(int numberOfIterations, boolean invokeFlush, int shardCount) throws IOException {
prepareCluster(1, 2, INDEX_NAME, 1, shardCount);
Map indexStats = indexData(numberOfIterations, invokeFlush, INDEX_NAME);
assertEquals(shardCount, getNumShards(INDEX_NAME).totalNumShards);
@@ -170,14 +181,7 @@ private void testRestoreFlowBothPrimaryReplicasDown(boolean remoteTranslog, int
ensureRed(INDEX_NAME);
internalCluster().startDataOnlyNodes(2);
- assertAcked(client().admin().indices().prepareClose(INDEX_NAME));
- client().admin().cluster().restoreRemoteStore(new RestoreRemoteStoreRequest().indices(INDEX_NAME), PlainActionFuture.newFuture());
-
- ensureGreen(INDEX_NAME);
-
- assertEquals(shardCount, getNumShards(INDEX_NAME).totalNumShards);
- assertEquals(0, getNumShards(INDEX_NAME).numReplicas);
- verifyRestoredData(indexStats, true, INDEX_NAME);
+ restoreAndVerify(shardCount, 1, indexStats);
}
/**
@@ -212,10 +216,16 @@ private void testRestoreFlowMultipleIndices(int numberOfIterations, boolean invo
ensureRed(indices);
internalCluster().startDataOnlyNodes(3);
- assertAcked(client().admin().indices().prepareClose(indices));
+ boolean restoreAllShards = randomBoolean();
+ if (restoreAllShards) {
+ assertAcked(client().admin().indices().prepareClose(indices));
+ }
client().admin()
.cluster()
- .restoreRemoteStore(new RestoreRemoteStoreRequest().indices(INDEX_NAMES_WILDCARD.split(",")), PlainActionFuture.newFuture());
+ .restoreRemoteStore(
+ new RestoreRemoteStoreRequest().indices(INDEX_NAMES_WILDCARD.split(",")).restoreAllShards(restoreAllShards),
+ PlainActionFuture.newFuture()
+ );
ensureGreen(indices);
for (String index : indices) {
assertEquals(shardCount, getNumShards(index).totalNumShards);
@@ -223,6 +233,37 @@ private void testRestoreFlowMultipleIndices(int numberOfIterations, boolean invo
}
}
+ public void testRestoreFlowAllShardsNoRedIndex() throws InterruptedException {
+ int shardCount = randomIntBetween(1, 5);
+ prepareCluster(0, 3, INDEX_NAME, 0, shardCount);
+ indexData(randomIntBetween(2, 5), true, INDEX_NAME);
+ assertEquals(shardCount, getNumShards(INDEX_NAME).totalNumShards);
+
+ PlainActionFuture future = PlainActionFuture.newFuture();
+ client().admin().cluster().restoreRemoteStore(new RestoreRemoteStoreRequest().indices(INDEX_NAME).restoreAllShards(true), future);
+ try {
+ future.get();
+ } catch (ExecutionException e) {
+ // If the request goes to co-ordinator, e.getCause() can be RemoteTransportException
+ assertTrue(e.getCause() instanceof IllegalStateException || e.getCause().getCause() instanceof IllegalStateException);
+ }
+ }
+
+ public void testRestoreFlowNoRedIndex() {
+ int shardCount = randomIntBetween(1, 5);
+ prepareCluster(0, 3, INDEX_NAME, 0, shardCount);
+ Map indexStats = indexData(randomIntBetween(2, 5), true, INDEX_NAME);
+ assertEquals(shardCount, getNumShards(INDEX_NAME).totalNumShards);
+
+ client().admin()
+ .cluster()
+ .restoreRemoteStore(new RestoreRemoteStoreRequest().indices(INDEX_NAME).restoreAllShards(false), PlainActionFuture.newFuture());
+
+ ensureGreen(INDEX_NAME);
+ assertEquals(shardCount, getNumShards(INDEX_NAME).totalNumShards);
+ verifyRestoredData(indexStats, true, INDEX_NAME);
+ }
+
/**
* Simulates all data restored using Remote Translog Store.
* @throws IOException IO Exception.
@@ -265,7 +306,7 @@ public void testRemoteTranslogRestoreWithCommittedData() throws IOException {
// @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/6188")
@AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8479")
public void testRTSRestoreWithNoDataPostCommitPrimaryReplicaDown() throws IOException {
- testRestoreFlowBothPrimaryReplicasDown(true, 1, true, randomIntBetween(1, 5));
+ testRestoreFlowBothPrimaryReplicasDown(1, true, randomIntBetween(1, 5));
}
/**
@@ -274,7 +315,7 @@ public void testRTSRestoreWithNoDataPostCommitPrimaryReplicaDown() throws IOExce
*/
@AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8479")
public void testRTSRestoreWithNoDataPostRefreshPrimaryReplicaDown() throws IOException {
- testRestoreFlowBothPrimaryReplicasDown(true, 1, false, randomIntBetween(1, 5));
+ testRestoreFlowBothPrimaryReplicasDown(1, false, randomIntBetween(1, 5));
}
/**
@@ -284,7 +325,7 @@ public void testRTSRestoreWithNoDataPostRefreshPrimaryReplicaDown() throws IOExc
*/
@AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8479")
public void testRTSRestoreWithRefreshedDataPrimaryReplicaDown() throws IOException {
- testRestoreFlowBothPrimaryReplicasDown(true, randomIntBetween(2, 5), false, randomIntBetween(1, 5));
+ testRestoreFlowBothPrimaryReplicasDown(randomIntBetween(2, 5), false, randomIntBetween(1, 5));
}
/**
@@ -294,7 +335,7 @@ public void testRTSRestoreWithRefreshedDataPrimaryReplicaDown() throws IOExcepti
*/
@AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8479")
public void testRTSRestoreWithCommittedDataPrimaryReplicaDown() throws IOException {
- testRestoreFlowBothPrimaryReplicasDown(true, randomIntBetween(2, 5), true, randomIntBetween(1, 5));
+ testRestoreFlowBothPrimaryReplicasDown(randomIntBetween(2, 5), true, randomIntBetween(1, 5));
}
/**
@@ -341,10 +382,7 @@ public void testRTSRestoreWithCommittedDataDefaultAllIndices() throws IOExceptio
ensureRed(indices);
internalCluster().startDataOnlyNodes(3);
- assertAcked(client().admin().indices().prepareClose(indices));
- client().admin()
- .cluster()
- .restoreRemoteStore(new RestoreRemoteStoreRequest().indices(new String[] {}), PlainActionFuture.newFuture());
+ restore(indices);
ensureGreen(indices);
for (String index : indices) {
@@ -381,10 +419,16 @@ public void testRTSRestoreWithCommittedDataNotAllRedRemoteIndices() throws IOExc
ensureRed(indices);
internalCluster().startDataOnlyNodes(3);
- assertAcked(client().admin().indices().prepareClose(indices[0], indices[1]));
+ boolean restoreAllShards = randomBoolean();
+ if (restoreAllShards) {
+ assertAcked(client().admin().indices().prepareClose(indices[0], indices[1]));
+ }
client().admin()
.cluster()
- .restoreRemoteStore(new RestoreRemoteStoreRequest().indices(indices[0], indices[1]), PlainActionFuture.newFuture());
+ .restoreRemoteStore(
+ new RestoreRemoteStoreRequest().indices(indices[0], indices[1]).restoreAllShards(restoreAllShards),
+ PlainActionFuture.newFuture()
+ );
ensureGreen(indices[0], indices[1]);
assertEquals(shardCount, getNumShards(indices[0]).totalNumShards);
verifyRestoredData(indicesStats.get(indices[0]), true, indices[0]);
@@ -427,10 +471,16 @@ public void testRTSRestoreWithCommittedDataExcludeIndicesPatterns() throws IOExc
ensureRed(indices);
internalCluster().startDataOnlyNodes(3);
- assertAcked(client().admin().indices().prepareClose(indices[0], indices[1]));
+ boolean restoreAllShards = randomBoolean();
+ if (restoreAllShards) {
+ assertAcked(client().admin().indices().prepareClose(indices[0], indices[1]));
+ }
client().admin()
.cluster()
- .restoreRemoteStore(new RestoreRemoteStoreRequest().indices("*", "-remote-store-test-index-*"), PlainActionFuture.newFuture());
+ .restoreRemoteStore(
+ new RestoreRemoteStoreRequest().indices("*", "-remote-store-test-index-*").restoreAllShards(restoreAllShards),
+ PlainActionFuture.newFuture()
+ );
ensureGreen(indices[0], indices[1]);
assertEquals(shardCount, getNumShards(indices[0]).totalNumShards);
verifyRestoredData(indicesStats.get(indices[0]), true, indices[0]);
@@ -490,7 +540,7 @@ private void testPeerRecovery(int numberOfIterations, boolean invokeFlush) throw
assertEquals(0, recoverySource.get().getIndex().recoveredFileCount());
}
- IndexResponse response = indexSingleDoc();
+ IndexResponse response = indexSingleDoc(INDEX_NAME);
assertEquals(indexStats.get(MAX_SEQ_NO_TOTAL) + 1, response.getSeqNo());
refresh(INDEX_NAME);
assertBusy(
diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java
index dbd96a7fd109f..30a836b41e29e 100644
--- a/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java
+++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java
@@ -276,7 +276,10 @@ public void testRestoreOperationsShallowCopyEnabled() throws IOException, Execut
assertAcked(client.admin().indices().prepareClose(restoredIndexName1));
client.admin()
.cluster()
- .restoreRemoteStore(new RestoreRemoteStoreRequest().indices(restoredIndexName1), PlainActionFuture.newFuture());
+ .restoreRemoteStore(
+ new RestoreRemoteStoreRequest().indices(restoredIndexName1).restoreAllShards(true),
+ PlainActionFuture.newFuture()
+ );
ensureYellowAndNoInitializingShards(restoredIndexName1);
ensureGreen(restoredIndexName1);
assertDocsPresentInIndex(client(), restoredIndexName1, numDocsInIndex1);
@@ -434,7 +437,9 @@ public void testRestoreInSameRemoteStoreEnabledIndex() throws IOException {
// Re-initialize client to make sure we are not using client from stopped node.
client = client(clusterManagerNode);
assertAcked(client.admin().indices().prepareClose(indexName1));
- client.admin().cluster().restoreRemoteStore(new RestoreRemoteStoreRequest().indices(indexName1), PlainActionFuture.newFuture());
+ client.admin()
+ .cluster()
+ .restoreRemoteStore(new RestoreRemoteStoreRequest().indices(indexName1).restoreAllShards(true), PlainActionFuture.newFuture());
ensureYellowAndNoInitializingShards(indexName1);
ensureGreen(indexName1);
assertDocsPresentInIndex(client(), indexName1, numDocsInIndex1);
@@ -515,7 +520,10 @@ public void testRestoreShallowCopySnapshotWithDifferentRepo() throws IOException
assertAcked(client.admin().indices().prepareClose(restoredIndexName1));
client.admin()
.cluster()
- .restoreRemoteStore(new RestoreRemoteStoreRequest().indices(restoredIndexName1), PlainActionFuture.newFuture());
+ .restoreRemoteStore(
+ new RestoreRemoteStoreRequest().indices(restoredIndexName1).restoreAllShards(true),
+ PlainActionFuture.newFuture()
+ );
ensureYellowAndNoInitializingShards(restoredIndexName1);
ensureGreen(restoredIndexName1);
// indexing some new docs and validating
diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreRequest.java
index 703b9575a88ad..eb1935158c231 100644
--- a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreRequest.java
+++ b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreRequest.java
@@ -35,7 +35,8 @@
public class RestoreRemoteStoreRequest extends ClusterManagerNodeRequest implements ToXContentObject {
private String[] indices = Strings.EMPTY_ARRAY;
- private Boolean waitForCompletion;
+ private Boolean waitForCompletion = false;
+ private Boolean restoreAllShards = false;
public RestoreRemoteStoreRequest() {}
@@ -43,6 +44,7 @@ public RestoreRemoteStoreRequest(StreamInput in) throws IOException {
super(in);
indices = in.readStringArray();
waitForCompletion = in.readOptionalBoolean();
+ restoreAllShards = in.readOptionalBoolean();
}
@Override
@@ -50,6 +52,7 @@ public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeStringArray(indices);
out.writeOptionalBoolean(waitForCompletion);
+ out.writeOptionalBoolean(restoreAllShards);
}
@Override
@@ -118,6 +121,27 @@ public boolean waitForCompletion() {
return waitForCompletion;
}
+ /**
+ * Set the value for restoreAllShards, denoting whether to restore all shards or only unassigned shards
+ *
+ * @param restoreAllShards If true, the operation will restore all the shards of the given indices.
+ * If false, the operation will restore only the unassigned shards of the given indices.
+ * @return this request
+ */
+ public RestoreRemoteStoreRequest restoreAllShards(boolean restoreAllShards) {
+ this.restoreAllShards = restoreAllShards;
+ return this;
+ }
+
+ /**
+ * Returns restoreAllShards setting
+ *
+ * @return true if the operation will restore all the shards of the given indices
+ */
+ public boolean restoreAllShards() {
+ return restoreAllShards;
+ }
+
/**
* Parses restore definition
*
@@ -167,12 +191,14 @@ public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
RestoreRemoteStoreRequest that = (RestoreRemoteStoreRequest) o;
- return waitForCompletion == that.waitForCompletion && Arrays.equals(indices, that.indices);
+ return waitForCompletion == that.waitForCompletion
+ && restoreAllShards == that.restoreAllShards
+ && Arrays.equals(indices, that.indices);
}
@Override
public int hashCode() {
- int result = Objects.hash(waitForCompletion);
+ int result = Objects.hash(waitForCompletion, restoreAllShards);
result = 31 * result + Arrays.hashCode(indices);
return result;
}
@@ -181,4 +207,5 @@ public int hashCode() {
public String toString() {
return org.opensearch.common.Strings.toString(XContentType.JSON, this);
}
+
}
diff --git a/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java
index af348c1c98f2d..781ca5bb2255a 100644
--- a/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java
+++ b/server/src/main/java/org/opensearch/cluster/routing/IndexRoutingTable.java
@@ -450,7 +450,11 @@ public Builder initializeAsRestore(IndexMetadata indexMetadata, SnapshotRecovery
/**
* Initializes an existing index, to be restored from remote store
*/
- public Builder initializeAsRemoteStoreRestore(IndexMetadata indexMetadata, RemoteStoreRecoverySource recoverySource) {
+ public Builder initializeAsRemoteStoreRestore(
+ IndexMetadata indexMetadata,
+ RemoteStoreRecoverySource recoverySource,
+ Map activeInitializingShards
+ ) {
final UnassignedInfo unassignedInfo = new UnassignedInfo(
UnassignedInfo.Reason.EXISTING_INDEX_RESTORED,
"restore_source[remote_store]"
@@ -462,7 +466,11 @@ public Builder initializeAsRemoteStoreRestore(IndexMetadata indexMetadata, Remot
for (int shardNumber = 0; shardNumber < indexMetadata.getNumberOfShards(); shardNumber++) {
ShardId shardId = new ShardId(index, shardNumber);
IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId);
- indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(shardId, true, recoverySource, unassignedInfo));
+ if (activeInitializingShards.containsKey(shardId)) {
+ indexShardRoutingBuilder.addShard(activeInitializingShards.get(shardId));
+ } else {
+ indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(shardId, true, recoverySource, unassignedInfo));
+ }
shards.put(shardNumber, indexShardRoutingBuilder.build());
}
return this;
diff --git a/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java b/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java
index 1bee5d8176a0f..7934649a6d3eb 100644
--- a/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java
+++ b/server/src/main/java/org/opensearch/cluster/routing/RoutingTable.java
@@ -562,9 +562,13 @@ public Builder addAsFromOpenToClose(IndexMetadata indexMetadata) {
return add(indexRoutingBuilder);
}
- public Builder addAsRemoteStoreRestore(IndexMetadata indexMetadata, RemoteStoreRecoverySource recoverySource) {
+ public Builder addAsRemoteStoreRestore(
+ IndexMetadata indexMetadata,
+ RemoteStoreRecoverySource recoverySource,
+ Map activeInitializingShards
+ ) {
IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetadata.getIndex())
- .initializeAsRemoteStoreRestore(indexMetadata, recoverySource);
+ .initializeAsRemoteStoreRestore(indexMetadata, recoverySource, activeInitializingShards);
add(indexRoutingBuilder);
return this;
}
diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestRestoreRemoteStoreAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestRestoreRemoteStoreAction.java
index fca6745167bb4..414c82b4a470f 100644
--- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestRestoreRemoteStoreAction.java
+++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestRestoreRemoteStoreAction.java
@@ -44,6 +44,7 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC
request.paramAsTime("cluster_manager_timeout", restoreRemoteStoreRequest.masterNodeTimeout())
);
restoreRemoteStoreRequest.waitForCompletion(request.paramAsBoolean("wait_for_completion", false));
+ restoreRemoteStoreRequest.restoreAllShards(request.paramAsBoolean("restore_all_shards", false));
request.applyContentParser(p -> restoreRemoteStoreRequest.source(p.mapOrdered()));
return channel -> client.admin().cluster().restoreRemoteStore(restoreRemoteStoreRequest, new RestToXContentListener<>(channel));
}
diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java
index 54d55f67ccdcd..d7e89172c5837 100644
--- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java
+++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java
@@ -62,6 +62,7 @@
import org.opensearch.cluster.metadata.MetadataIndexUpgradeService;
import org.opensearch.cluster.metadata.RepositoriesMetadata;
import org.opensearch.cluster.node.DiscoveryNode;
+import org.opensearch.cluster.routing.IndexShardRoutingTable;
import org.opensearch.cluster.routing.RecoverySource;
import org.opensearch.cluster.routing.RecoverySource.SnapshotRecoverySource;
import org.opensearch.cluster.routing.RecoverySource.RemoteStoreRecoverySource;
@@ -234,21 +235,34 @@ public ClusterState execute(ClusterState currentState) {
continue;
}
if (currentIndexMetadata.getSettings().getAsBoolean(SETTING_REMOTE_STORE_ENABLED, false)) {
- if (currentIndexMetadata.getState() != IndexMetadata.State.CLOSE) {
- throw new IllegalStateException(
- "cannot restore index ["
- + index
- + "] because an open index "
- + "with same name already exists in the cluster. Close the existing index"
- );
+ IndexMetadata updatedIndexMetadata = currentIndexMetadata;
+ Map activeInitializingShards = new HashMap<>();
+ if (request.restoreAllShards()) {
+ if (currentIndexMetadata.getState() != IndexMetadata.State.CLOSE) {
+ throw new IllegalStateException(
+ "cannot restore index ["
+ + index
+ + "] because an open index "
+ + "with same name already exists in the cluster. Close the existing index"
+ );
+ }
+ updatedIndexMetadata = IndexMetadata.builder(currentIndexMetadata)
+ .state(IndexMetadata.State.OPEN)
+ .version(1 + currentIndexMetadata.getVersion())
+ .mappingVersion(1 + currentIndexMetadata.getMappingVersion())
+ .settingsVersion(1 + currentIndexMetadata.getSettingsVersion())
+ .aliasesVersion(1 + currentIndexMetadata.getAliasesVersion())
+ .build();
+ } else {
+ activeInitializingShards = currentState.routingTable()
+ .index(index)
+ .shards()
+ .values()
+ .stream()
+ .map(IndexShardRoutingTable::primaryShard)
+ .filter(shardRouting -> shardRouting.unassigned() == false)
+ .collect(Collectors.toMap(ShardRouting::shardId, Function.identity()));
}
- IndexMetadata updatedIndexMetadata = IndexMetadata.builder(currentIndexMetadata)
- .state(IndexMetadata.State.OPEN)
- .version(1 + currentIndexMetadata.getVersion())
- .mappingVersion(1 + currentIndexMetadata.getMappingVersion())
- .settingsVersion(1 + currentIndexMetadata.getSettingsVersion())
- .aliasesVersion(1 + currentIndexMetadata.getAliasesVersion())
- .build();
IndexId indexId = new IndexId(index, updatedIndexMetadata.getIndexUUID());
@@ -257,7 +271,7 @@ public ClusterState execute(ClusterState currentState) {
updatedIndexMetadata.getCreationVersion(),
indexId
);
- rtBuilder.addAsRemoteStoreRestore(updatedIndexMetadata, recoverySource);
+ rtBuilder.addAsRemoteStoreRestore(updatedIndexMetadata, recoverySource, activeInitializingShards);
blocks.updateBlocks(updatedIndexMetadata);
mdBuilder.put(updatedIndexMetadata, true);
indicesToBeRestored.add(index);
diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreRequestTests.java
index 81d7074977253..2edfa23286658 100644
--- a/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreRequestTests.java
+++ b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/restore/RestoreRemoteStoreRequestTests.java
@@ -38,6 +38,7 @@ private RestoreRemoteStoreRequest randomState(RestoreRemoteStoreRequest instance
}
instance.waitForCompletion(randomBoolean());
+ instance.restoreAllShards(randomBoolean());
if (randomBoolean()) {
instance.masterNodeTimeout(randomTimeValue());
@@ -76,6 +77,7 @@ public void testSource() throws IOException {
RestoreRemoteStoreRequest processed = new RestoreRemoteStoreRequest();
processed.masterNodeTimeout(original.masterNodeTimeout());
processed.waitForCompletion(original.waitForCompletion());
+ processed.restoreAllShards(original.restoreAllShards());
processed.source(map);
assertEquals(original, processed);
diff --git a/server/src/test/java/org/opensearch/cluster/routing/RoutingTableTests.java b/server/src/test/java/org/opensearch/cluster/routing/RoutingTableTests.java
index 6b869ffed7d23..0ff9d6f07751a 100644
--- a/server/src/test/java/org/opensearch/cluster/routing/RoutingTableTests.java
+++ b/server/src/test/java/org/opensearch/cluster/routing/RoutingTableTests.java
@@ -49,10 +49,13 @@
import org.junit.Before;
import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
import java.util.Set;
import java.util.stream.Collectors;
import java.util.function.Predicate;
+import static org.mockito.Mockito.mock;
import static org.opensearch.cluster.routing.ShardRoutingState.UNASSIGNED;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
@@ -502,13 +505,40 @@ public void testAddAsRemoteStoreRestore() {
Version.CURRENT,
new IndexId(TEST_INDEX_1, "1")
);
- final RoutingTable routingTable = new RoutingTable.Builder().addAsRemoteStoreRestore(indexMetadata, remoteStoreRecoverySource)
- .build();
+ final RoutingTable routingTable = new RoutingTable.Builder().addAsRemoteStoreRestore(
+ indexMetadata,
+ remoteStoreRecoverySource,
+ new HashMap<>()
+ ).build();
assertTrue(routingTable.hasIndex(TEST_INDEX_1));
assertEquals(this.numberOfShards, routingTable.allShards(TEST_INDEX_1).size());
assertEquals(this.numberOfShards, routingTable.index(TEST_INDEX_1).shardsWithState(UNASSIGNED).size());
}
+ public void testAddAsRemoteStoreRestoreWithActiveShards() {
+ final IndexMetadata indexMetadata = createIndexMetadata(TEST_INDEX_1).state(IndexMetadata.State.OPEN).build();
+ final RemoteStoreRecoverySource remoteStoreRecoverySource = new RemoteStoreRecoverySource(
+ "restore_uuid",
+ Version.CURRENT,
+ new IndexId(TEST_INDEX_1, "1")
+ );
+ Map activeInitializingShards = new HashMap<>();
+ for (int i = 0; i < randomIntBetween(1, this.numberOfShards); i++) {
+ activeInitializingShards.put(new ShardId(indexMetadata.getIndex(), i), mock(ShardRouting.class));
+ }
+ final RoutingTable routingTable = new RoutingTable.Builder().addAsRemoteStoreRestore(
+ indexMetadata,
+ remoteStoreRecoverySource,
+ activeInitializingShards
+ ).build();
+ assertTrue(routingTable.hasIndex(TEST_INDEX_1));
+ assertEquals(this.numberOfShards, routingTable.allShards(TEST_INDEX_1).size());
+ assertEquals(
+ this.numberOfShards - activeInitializingShards.size(),
+ routingTable.index(TEST_INDEX_1).shardsWithState(UNASSIGNED).size()
+ );
+ }
+
/** reverse engineer the in sync aid based on the given indexRoutingTable **/
public static IndexMetadata updateActiveAllocations(IndexRoutingTable indexRoutingTable, IndexMetadata indexMetadata) {
IndexMetadata.Builder imdBuilder = IndexMetadata.builder(indexMetadata);
diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java
index 0c68512f93ea6..cc4fa6f28bafc 100644
--- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java
+++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java
@@ -190,7 +190,6 @@ public void testIsSegmentReplicationAllowed_WrongEngineType() throws IOException
* reader close operation on replica shard deletes the segment files copied in current round of segment replication.
* It does this by blocking the finalizeReplication on replica shard and performing close operation on acquired
* searcher that triggers the reader close operation.
- * @throws Exception
*/
public void testSegmentReplication_With_ReaderClosedConcurrently() throws Exception {
String mappings = "{ \"" + MapperService.SINGLE_MAPPING_NAME + "\": { \"properties\": { \"foo\": { \"type\": \"keyword\"} }}}";
@@ -240,7 +239,6 @@ public void testSegmentReplication_With_ReaderClosedConcurrently() throws Except
/**
* Similar to test above, this test shows the issue where an engine close operation during active segment replication
* can result in Lucene CorruptIndexException.
- * @throws Exception
*/
public void testSegmentReplication_With_EngineClosedConcurrently() throws Exception {
String mappings = "{ \"" + MapperService.SINGLE_MAPPING_NAME + "\": { \"properties\": { \"foo\": { \"type\": \"keyword\"} }}}";
@@ -289,7 +287,6 @@ public void testSegmentReplication_With_EngineClosedConcurrently() throws Except
/**
* Verifies that commits on replica engine resulting from engine or reader close does not cleanup the temporary
* replication files from ongoing round of segment replication
- * @throws Exception
*/
public void testTemporaryFilesNotCleanup() throws Exception {
String mappings = "{ \"" + MapperService.SINGLE_MAPPING_NAME + "\": { \"properties\": { \"foo\": { \"type\": \"keyword\"} }}}";
From 91bc891c1e771dda8742105051731247e4198c73 Mon Sep 17 00:00:00 2001
From: Andriy Redko
Date: Thu, 27 Jul 2023 13:30:36 -0400
Subject: [PATCH 21/71] Add 2.9.1 to BWC and known versions (#8923) (#8929)
Signed-off-by: Andriy Redko
(cherry picked from commit 212dba457d6b2d540694e040a0485529d6e52e12)
---
.ci/bwcVersions | 1 +
libs/core/src/main/java/org/opensearch/Version.java | 1 +
2 files changed, 2 insertions(+)
diff --git a/.ci/bwcVersions b/.ci/bwcVersions
index 31b1cb5efe3a1..8cfd636b9fd2e 100644
--- a/.ci/bwcVersions
+++ b/.ci/bwcVersions
@@ -21,4 +21,5 @@ BWC_VERSION:
- "2.8.0"
- "2.8.1"
- "2.9.0"
+ - "2.9.1"
- "2.10.0"
diff --git a/libs/core/src/main/java/org/opensearch/Version.java b/libs/core/src/main/java/org/opensearch/Version.java
index 9329f221922ea..3f83282245fd8 100644
--- a/libs/core/src/main/java/org/opensearch/Version.java
+++ b/libs/core/src/main/java/org/opensearch/Version.java
@@ -90,6 +90,7 @@ public class Version implements Comparable, ToXContentFragment {
public static final Version V_2_8_0 = new Version(2080099, org.apache.lucene.util.Version.LUCENE_9_6_0);
public static final Version V_2_8_1 = new Version(2080199, org.apache.lucene.util.Version.LUCENE_9_6_0);
public static final Version V_2_9_0 = new Version(2090099, org.apache.lucene.util.Version.LUCENE_9_7_0);
+ public static final Version V_2_9_1 = new Version(2090199, org.apache.lucene.util.Version.LUCENE_9_7_0);
public static final Version V_2_10_0 = new Version(2100099, org.apache.lucene.util.Version.LUCENE_9_7_0);
public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_8_0);
public static final Version CURRENT = V_3_0_0;
From e2a664c49d5a182a2351293ec56bf9dc9b3311d9 Mon Sep 17 00:00:00 2001
From: Andrew Ross
Date: Thu, 27 Jul 2023 16:38:44 -0500
Subject: [PATCH 22/71] Fix flakiness in
MasterServiceTests.testThrottlingForMultipleTaskTypes (#8901)
* Fix flakiness in MasterServiceTests.testThrottlingForMultipleTaskTypes
The test configured a [timeout duration of zero][1] for certain tasks
and asserted that all tasks were throttled or timed out. This is not a
valid assertion because it is possible for a task to complete before the
[asynchronous timeout operation runs][2], which means the task would
complete successfully. The fix is to adjust the assertion to allow for
successful tasks in this case.
[1]: https://github.com/opensearch-project/OpenSearch/blob/60985bc300d9eafd36c1ab25d46235e1c925c565/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java#L941
[2]: https://github.com/opensearch-project/OpenSearch/blob/9fc3f4096958159ec9b53012fc7ced19fd793e1b/server/src/main/java/org/opensearch/common/util/concurrent/PrioritizedOpenSearchThreadPoolExecutor.java#L266
Signed-off-by: Andrew Ross
* Add a deterministic test case for timeout
Signed-off-by: Andrew Ross
---------
Signed-off-by: Andrew Ross
---
.../cluster/service/MasterServiceTests.java | 77 ++++++++++++++++++-
1 file changed, 76 insertions(+), 1 deletion(-)
diff --git a/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java b/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java
index 3c27748daa87d..d4804b18bd160 100644
--- a/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java
+++ b/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java
@@ -86,6 +86,8 @@
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
+import com.carrotsearch.randomizedtesting.annotations.Timeout;
+
import static java.util.Collections.emptyMap;
import static java.util.Collections.emptySet;
import static org.hamcrest.Matchers.anyOf;
@@ -863,6 +865,7 @@ public ClusterManagerTaskThrottler.ThrottlingKey getClusterManagerThrottlingKey(
AtomicInteger throttledTask3 = new AtomicInteger();
AtomicInteger succeededTask1 = new AtomicInteger();
AtomicInteger succeededTask2 = new AtomicInteger();
+ AtomicInteger succeededTask3 = new AtomicInteger();
AtomicInteger timedOutTask3 = new AtomicInteger();
final ClusterStateTaskListener listener = new ClusterStateTaskListener() {
@@ -880,6 +883,8 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS
succeededTask1.incrementAndGet();
} else if (source.equals(task2)) {
succeededTask2.incrementAndGet();
+ } else if (source.equals(task3)) {
+ succeededTask3.incrementAndGet();
}
latch.countDown();
}
@@ -955,7 +960,7 @@ public void run() {
assertEquals(numberOfTask1, throttledTask1.get() + succeededTask1.get());
assertEquals(numberOfTask2, succeededTask2.get());
assertEquals(0, throttledTask2.get());
- assertEquals(numberOfTask3, throttledTask3.get() + timedOutTask3.get());
+ assertEquals(numberOfTask3, throttledTask3.get() + timedOutTask3.get() + succeededTask3.get());
masterService.close();
}
@@ -1378,6 +1383,76 @@ public void testDeprecatedMasterServiceUpdateTaskThreadName() {
assertThrows(AssertionError.class, () -> MasterService.assertClusterManagerUpdateThread());
}
+ @Timeout(millis = 5_000)
+ public void testTaskTimeout() throws InterruptedException {
+ try (ClusterManagerService clusterManagerService = createClusterManagerService(true)) {
+ final AtomicInteger failureCount = new AtomicInteger();
+ final AtomicInteger successCount = new AtomicInteger();
+ final CountDownLatch taskStartLatch = new CountDownLatch(1);
+ final CountDownLatch blockingTaskLatch = new CountDownLatch(1);
+ final CountDownLatch timeoutLatch = new CountDownLatch(1);
+ final ClusterStateTaskListener blockingListener = new ClusterStateTaskListener() {
+ @Override
+ public void onFailure(String source, Exception e) {
+ fail("Unexpected failure");
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ successCount.incrementAndGet();
+ taskStartLatch.countDown();
+ try {
+ blockingTaskLatch.await();
+ } catch (InterruptedException e) {
+ fail("Interrupted");
+ }
+ }
+ };
+ final ClusterStateTaskListener timeoutListener = new ClusterStateTaskListener() {
+ @Override
+ public void onFailure(String source, Exception e) {
+ assertEquals("timeout", source);
+ failureCount.incrementAndGet();
+ timeoutLatch.countDown();
+ }
+
+ @Override
+ public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
+ fail("Unexpected success");
+ }
+ };
+
+ final ClusterStateTaskExecutor executor = (currentState, tasks) -> ClusterStateTaskExecutor.ClusterTasksResult.builder()
+ .successes(tasks)
+ .build(currentState);
+
+ // start a task and wait for it to start and block on the clusterStateProcessed callback
+ clusterManagerService.submitStateUpdateTask(
+ "success",
+ new Object(),
+ ClusterStateTaskConfig.build(randomFrom(Priority.values())),
+ executor,
+ blockingListener
+ );
+ taskStartLatch.await();
+
+ // start a second task that is guaranteed to timeout as the first task is still running
+ clusterManagerService.submitStateUpdateTask(
+ "timeout",
+ new Object(),
+ ClusterStateTaskConfig.build(randomFrom(Priority.values()), TimeValue.timeValueMillis(1L)),
+ executor,
+ timeoutListener
+ );
+
+ // wait for the timeout to happen, then unblock and assert one success and one failure
+ timeoutLatch.await();
+ blockingTaskLatch.countDown();
+ assertEquals(1, failureCount.get());
+ assertEquals(1, successCount.get());
+ }
+ }
+
/**
* Returns the cluster state that the cluster-manager service uses (and that is provided by the discovery layer)
*/
From 4ad418210a51c518119a4c9c565fbf7e9bc4b5c1 Mon Sep 17 00:00:00 2001
From: Marc Handalian
Date: Thu, 27 Jul 2023 15:17:15 -0700
Subject: [PATCH 23/71] Fix
SegmentReplicationIT.testReplicaHasDiffFilesThanPrimary for node-node
replication (#8912)
* Fix SegmentReplicationIT.testReplicahasDiffFilesThanPrimary
This test is now failing for node-node replication. On the primary shard the prepareSegmentReplication method should cancel any ongoing replication if it is running and start a new sync. Thisis incorrectly using Map.compute which will not replace the existing handler entry in the allocationIdToHandlers map. It will only cancel the existing source handler. As a result this can leave the copyState map with an entry and hold an index commit while the test is cleaning up. The copyState is only cleared when a handler is cancelled directly or from a cluster state update.
Signed-off-by: Marc Handalian
* PR feedback.
Signed-off-by: Marc Handalian
---------
Signed-off-by: Marc Handalian
---
.../OngoingSegmentReplications.java | 26 ++++++++++----
.../OngoingSegmentReplicationsTests.java | 34 +++++++++++++++++++
2 files changed, 53 insertions(+), 7 deletions(-)
diff --git a/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java b/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java
index e0e356f1531e1..4712ae6c18759 100644
--- a/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java
+++ b/server/src/main/java/org/opensearch/indices/replication/OngoingSegmentReplications.java
@@ -139,13 +139,25 @@ void startSegmentCopy(GetSegmentFilesRequest request, ActionListener {
- if (segrepHandler != null) {
- logger.warn("Override handler for allocation id {}", request.getTargetAllocationId());
- cancelHandlers(handler -> handler.getAllocationId().equals(request.getTargetAllocationId()), "cancel due to retry");
- }
- return createTargetHandler(request.getTargetNode(), copyState, request.getTargetAllocationId(), fileChunkWriter);
- });
+ final SegmentReplicationSourceHandler newHandler = createTargetHandler(
+ request.getTargetNode(),
+ copyState,
+ request.getTargetAllocationId(),
+ fileChunkWriter
+ );
+ final SegmentReplicationSourceHandler existingHandler = allocationIdToHandlers.putIfAbsent(
+ request.getTargetAllocationId(),
+ newHandler
+ );
+ // If we are already replicating to this allocation Id, cancel the old and replace with a new execution.
+ // This will clear the old handler & referenced copy state holding an incref'd indexCommit.
+ if (existingHandler != null) {
+ logger.warn("Override handler for allocation id {}", request.getTargetAllocationId());
+ cancelHandlers(handler -> handler.getAllocationId().equals(request.getTargetAllocationId()), "cancel due to retry");
+ assert allocationIdToHandlers.containsKey(request.getTargetAllocationId()) == false;
+ allocationIdToHandlers.put(request.getTargetAllocationId(), newHandler);
+ }
+ assert allocationIdToHandlers.containsKey(request.getTargetAllocationId());
return copyState;
}
diff --git a/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java b/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java
index 3b289114f5ca1..84a53ae22a6bc 100644
--- a/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java
+++ b/server/src/test/java/org/opensearch/indices/replication/OngoingSegmentReplicationsTests.java
@@ -403,4 +403,38 @@ public void testCancelForMissingIds() throws IOException {
assertEquals(0, replications.cachedCopyStateSize());
closeShards(replica_2);
}
+
+ public void testPrepareForReplicationAlreadyReplicating() throws IOException {
+ OngoingSegmentReplications replications = new OngoingSegmentReplications(mockIndicesService, recoverySettings);
+ final String replicaAllocationId = replica.routingEntry().allocationId().getId();
+ final CheckpointInfoRequest request = new CheckpointInfoRequest(1L, replicaAllocationId, primaryDiscoveryNode, testCheckpoint);
+
+ final CopyState copyState = replications.prepareForReplication(request, mock(FileChunkWriter.class));
+
+ final SegmentReplicationSourceHandler handler = replications.getHandlers().get(replicaAllocationId);
+ assertEquals(handler.getCopyState(), copyState);
+ assertEquals(1, copyState.refCount());
+
+ ReplicationCheckpoint secondCheckpoint = new ReplicationCheckpoint(
+ testCheckpoint.getShardId(),
+ testCheckpoint.getPrimaryTerm(),
+ testCheckpoint.getSegmentsGen(),
+ testCheckpoint.getSegmentInfosVersion() + 1,
+ testCheckpoint.getCodec()
+ );
+
+ final CheckpointInfoRequest secondRequest = new CheckpointInfoRequest(
+ 1L,
+ replicaAllocationId,
+ primaryDiscoveryNode,
+ secondCheckpoint
+ );
+
+ final CopyState secondCopyState = replications.prepareForReplication(secondRequest, mock(FileChunkWriter.class));
+ final SegmentReplicationSourceHandler secondHandler = replications.getHandlers().get(replicaAllocationId);
+ assertEquals(secondHandler.getCopyState(), secondCopyState);
+ assertEquals("New copy state is incref'd", 1, secondCopyState.refCount());
+ assertEquals("Old copy state is cleaned up", 0, copyState.refCount());
+
+ }
}
From 5495c641f894bc79aec5bbe4767b3fc7710ecbe0 Mon Sep 17 00:00:00 2001
From: Andrew Ross
Date: Thu, 27 Jul 2023 17:56:38 -0500
Subject: [PATCH 24/71] Configure test-retry plugin filter properly (#8933)
The intent of #8825 was to retry only specified tests. The wrong
parameter was configured though: ['filter' should be set][1], not
'classRetry'.
[1]: https://github.com/gradle/test-retry-gradle-plugin/blob/main/README.adoc#filtering
Signed-off-by: Andrew Ross
---
build.gradle | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/build.gradle b/build.gradle
index c7b6987b1103f..12499bbf6a817 100644
--- a/build.gradle
+++ b/build.gradle
@@ -470,7 +470,7 @@ subprojects {
maxFailures = 10
}
failOnPassedAfterRetry = false
- classRetry {
+ filter {
includeClasses.add("org.opensearch.action.admin.cluster.node.tasks.ResourceAwareTasksTests")
includeClasses.add("org.opensearch.action.admin.cluster.tasks.PendingTasksBlocksIT")
includeClasses.add("org.opensearch.action.admin.indices.create.CreateIndexIT")
From 3c973bafe709565380eb4224242e860499fb4acc Mon Sep 17 00:00:00 2001
From: Nick Knize
Date: Thu, 27 Jul 2023 18:56:42 -0500
Subject: [PATCH 25/71] [Refactor] MediaTypeParserRegistry to MediaTypeRegistry
(#8940)
This commit rote refactors MediaTypeParserRegistry to MediaTypeRegistry
to make the class naming align with the intention of the logic. The
MediaTypeRegistry is a mechanism for downstream extensions to register
concrete MediaTypes thus having Parser in the name is unneeded.
Signed-off-by: Nicholas Walter Knize
---
.../client/indices/CreateIndexRequest.java | 4 +--
.../client/indices/PutMappingRequest.java | 4 +--
.../core/common/io/stream/StreamInput.java | 4 +--
.../opensearch/core/xcontent/MediaType.java | 4 +--
...erRegistry.java => MediaTypeRegistry.java} | 2 +-
.../common/xcontent/XContentType.java | 4 +--
.../common/xcontent/MediaTypeParserTests.java | 27 +++++++++----------
.../opensearch/rest/AbstractRestChannel.java | 4 +--
.../transport/TransportService.java | 4 +--
9 files changed, 27 insertions(+), 30 deletions(-)
rename libs/core/src/main/java/org/opensearch/core/xcontent/{MediaTypeParserRegistry.java => MediaTypeRegistry.java} (99%)
diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/CreateIndexRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/CreateIndexRequest.java
index 3405e7e81e122..16915b32c16fe 100644
--- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/CreateIndexRequest.java
+++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/CreateIndexRequest.java
@@ -47,7 +47,7 @@
import org.opensearch.core.ParseField;
import org.opensearch.core.xcontent.DeprecationHandler;
import org.opensearch.core.xcontent.MediaType;
-import org.opensearch.core.xcontent.MediaTypeParserRegistry;
+import org.opensearch.core.xcontent.MediaTypeRegistry;
import org.opensearch.core.xcontent.NamedXContentRegistry;
import org.opensearch.core.xcontent.ToXContentObject;
import org.opensearch.core.xcontent.XContentBuilder;
@@ -187,7 +187,7 @@ public CreateIndexRequest mapping(XContentBuilder source) {
*/
public CreateIndexRequest mapping(Map source) {
try {
- XContentBuilder builder = XContentFactory.contentBuilder(MediaTypeParserRegistry.getDefaultMediaType());
+ XContentBuilder builder = XContentFactory.contentBuilder(MediaTypeRegistry.getDefaultMediaType());
builder.map(source);
return mapping(BytesReference.bytes(builder), builder.contentType());
} catch (IOException e) {
diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/PutMappingRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/PutMappingRequest.java
index d17dc54713789..721d6094f7502 100644
--- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/PutMappingRequest.java
+++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/PutMappingRequest.java
@@ -40,7 +40,7 @@
import org.opensearch.core.common.bytes.BytesReference;
import org.opensearch.common.xcontent.XContentFactory;
import org.opensearch.core.xcontent.MediaType;
-import org.opensearch.core.xcontent.MediaTypeParserRegistry;
+import org.opensearch.core.xcontent.MediaTypeRegistry;
import org.opensearch.core.xcontent.ToXContent;
import org.opensearch.core.xcontent.ToXContentObject;
import org.opensearch.core.xcontent.XContentBuilder;
@@ -111,7 +111,7 @@ public MediaType mediaType() {
*/
public PutMappingRequest source(Map mappingSource) {
try {
- XContentBuilder builder = XContentFactory.contentBuilder(MediaTypeParserRegistry.getDefaultMediaType());
+ XContentBuilder builder = XContentFactory.contentBuilder(MediaTypeRegistry.getDefaultMediaType());
builder.map(mappingSource);
return source(builder);
} catch (IOException e) {
diff --git a/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamInput.java b/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamInput.java
index 1d7321bf2c6de..d9040da569345 100644
--- a/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamInput.java
+++ b/libs/core/src/main/java/org/opensearch/core/common/io/stream/StreamInput.java
@@ -54,7 +54,7 @@
import org.opensearch.core.common.Strings;
import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException;
import org.opensearch.core.xcontent.MediaType;
-import org.opensearch.core.xcontent.MediaTypeParserRegistry;
+import org.opensearch.core.xcontent.MediaTypeRegistry;
import java.io.ByteArrayInputStream;
import java.io.EOFException;
@@ -347,7 +347,7 @@ public BigInteger readBigInteger() throws IOException {
}
public MediaType readMediaType() throws IOException {
- return MediaTypeParserRegistry.fromMediaType(readString());
+ return MediaTypeRegistry.fromMediaType(readString());
}
@Nullable
diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/MediaType.java b/libs/core/src/main/java/org/opensearch/core/xcontent/MediaType.java
index c1409e551e47d..7193cd3bd97bb 100644
--- a/libs/core/src/main/java/org/opensearch/core/xcontent/MediaType.java
+++ b/libs/core/src/main/java/org/opensearch/core/xcontent/MediaType.java
@@ -82,7 +82,7 @@ default String mediaType() {
* This method will return {@code null} if no match is found
*/
static MediaType fromFormat(String mediaType) {
- return MediaTypeParserRegistry.fromFormat(mediaType);
+ return MediaTypeRegistry.fromFormat(mediaType);
}
/**
@@ -93,7 +93,7 @@ static MediaType fromFormat(String mediaType) {
*/
static MediaType fromMediaType(String mediaTypeHeaderValue) {
mediaTypeHeaderValue = removeVersionInMediaType(mediaTypeHeaderValue);
- return MediaTypeParserRegistry.fromMediaType(mediaTypeHeaderValue);
+ return MediaTypeRegistry.fromMediaType(mediaTypeHeaderValue);
}
/**
diff --git a/libs/core/src/main/java/org/opensearch/core/xcontent/MediaTypeParserRegistry.java b/libs/core/src/main/java/org/opensearch/core/xcontent/MediaTypeRegistry.java
similarity index 99%
rename from libs/core/src/main/java/org/opensearch/core/xcontent/MediaTypeParserRegistry.java
rename to libs/core/src/main/java/org/opensearch/core/xcontent/MediaTypeRegistry.java
index 62a26b4458b09..8ac92504a12d8 100644
--- a/libs/core/src/main/java/org/opensearch/core/xcontent/MediaTypeParserRegistry.java
+++ b/libs/core/src/main/java/org/opensearch/core/xcontent/MediaTypeRegistry.java
@@ -41,7 +41,7 @@
*
* @opensearch.internal
*/
-public final class MediaTypeParserRegistry {
+public final class MediaTypeRegistry {
private static Map formatToMediaType = Map.of();
private static Map typeWithSubtypeToMediaType = Map.of();
diff --git a/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentType.java b/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentType.java
index 023caa49e1f39..9291981f32113 100644
--- a/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentType.java
+++ b/libs/x-content/src/main/java/org/opensearch/common/xcontent/XContentType.java
@@ -38,7 +38,7 @@
import org.opensearch.common.xcontent.yaml.YamlXContent;
import org.opensearch.core.common.io.stream.StreamOutput;
import org.opensearch.core.xcontent.MediaType;
-import org.opensearch.core.xcontent.MediaTypeParserRegistry;
+import org.opensearch.core.xcontent.MediaTypeRegistry;
import org.opensearch.core.xcontent.XContent;
import java.io.IOException;
@@ -133,7 +133,7 @@ public XContent xContent() {
static {
/** a parser of media types */
- MediaTypeParserRegistry.register(XContentType.values(), Map.of("application/*", JSON, "application/x-ndjson", JSON));
+ MediaTypeRegistry.register(XContentType.values(), Map.of("application/*", JSON, "application/x-ndjson", JSON));
}
private int index;
diff --git a/libs/x-content/src/test/java/org/opensearch/common/xcontent/MediaTypeParserTests.java b/libs/x-content/src/test/java/org/opensearch/common/xcontent/MediaTypeParserTests.java
index 15492b7351984..64d36f0a8b78f 100644
--- a/libs/x-content/src/test/java/org/opensearch/common/xcontent/MediaTypeParserTests.java
+++ b/libs/x-content/src/test/java/org/opensearch/common/xcontent/MediaTypeParserTests.java
@@ -32,7 +32,7 @@
package org.opensearch.common.xcontent;
-import org.opensearch.core.xcontent.MediaTypeParserRegistry;
+import org.opensearch.core.xcontent.MediaTypeRegistry;
import org.opensearch.test.OpenSearchTestCase;
import java.util.Collections;
@@ -46,40 +46,37 @@ public class MediaTypeParserTests extends OpenSearchTestCase {
public void testJsonWithParameters() throws Exception {
String mediaType = "application/json";
- assertThat(MediaTypeParserRegistry.parseMediaType(mediaType).getParameters(), equalTo(Collections.emptyMap()));
- assertThat(MediaTypeParserRegistry.parseMediaType(mediaType + ";").getParameters(), equalTo(Collections.emptyMap()));
+ assertThat(MediaTypeRegistry.parseMediaType(mediaType).getParameters(), equalTo(Collections.emptyMap()));
+ assertThat(MediaTypeRegistry.parseMediaType(mediaType + ";").getParameters(), equalTo(Collections.emptyMap()));
+ assertThat(MediaTypeRegistry.parseMediaType(mediaType + "; charset=UTF-8").getParameters(), equalTo(Map.of("charset", "utf-8")));
assertThat(
- MediaTypeParserRegistry.parseMediaType(mediaType + "; charset=UTF-8").getParameters(),
- equalTo(Map.of("charset", "utf-8"))
- );
- assertThat(
- MediaTypeParserRegistry.parseMediaType(mediaType + "; custom=123;charset=UTF-8").getParameters(),
+ MediaTypeRegistry.parseMediaType(mediaType + "; custom=123;charset=UTF-8").getParameters(),
equalTo(Map.of("charset", "utf-8", "custom", "123"))
);
}
public void testWhiteSpaceInTypeSubtype() {
String mediaType = " application/json ";
- assertThat(MediaTypeParserRegistry.parseMediaType(mediaType).getMediaType(), equalTo(XContentType.JSON));
+ assertThat(MediaTypeRegistry.parseMediaType(mediaType).getMediaType(), equalTo(XContentType.JSON));
assertThat(
- MediaTypeParserRegistry.parseMediaType(mediaType + "; custom=123; charset=UTF-8").getParameters(),
+ MediaTypeRegistry.parseMediaType(mediaType + "; custom=123; charset=UTF-8").getParameters(),
equalTo(Map.of("charset", "utf-8", "custom", "123"))
);
assertThat(
- MediaTypeParserRegistry.parseMediaType(mediaType + "; custom=123;\n charset=UTF-8").getParameters(),
+ MediaTypeRegistry.parseMediaType(mediaType + "; custom=123;\n charset=UTF-8").getParameters(),
equalTo(Map.of("charset", "utf-8", "custom", "123"))
);
mediaType = " application / json ";
- assertThat(MediaTypeParserRegistry.parseMediaType(mediaType), is(nullValue()));
+ assertThat(MediaTypeRegistry.parseMediaType(mediaType), is(nullValue()));
}
public void testInvalidParameters() {
String mediaType = "application/json";
- assertThat(MediaTypeParserRegistry.parseMediaType(mediaType + "; keyvalueNoEqualsSign"), is(nullValue()));
+ assertThat(MediaTypeRegistry.parseMediaType(mediaType + "; keyvalueNoEqualsSign"), is(nullValue()));
- assertThat(MediaTypeParserRegistry.parseMediaType(mediaType + "; key = value"), is(nullValue()));
- assertThat(MediaTypeParserRegistry.parseMediaType(mediaType + "; key="), is(nullValue()));
+ assertThat(MediaTypeRegistry.parseMediaType(mediaType + "; key = value"), is(nullValue()));
+ assertThat(MediaTypeRegistry.parseMediaType(mediaType + "; key="), is(nullValue()));
}
}
diff --git a/server/src/main/java/org/opensearch/rest/AbstractRestChannel.java b/server/src/main/java/org/opensearch/rest/AbstractRestChannel.java
index dcee6500325b9..32499b1fc155b 100644
--- a/server/src/main/java/org/opensearch/rest/AbstractRestChannel.java
+++ b/server/src/main/java/org/opensearch/rest/AbstractRestChannel.java
@@ -36,7 +36,7 @@
import org.opensearch.common.io.stream.BytesStreamOutput;
import org.opensearch.core.common.Strings;
import org.opensearch.core.xcontent.MediaType;
-import org.opensearch.core.xcontent.MediaTypeParserRegistry;
+import org.opensearch.core.xcontent.MediaTypeRegistry;
import org.opensearch.core.xcontent.XContentBuilder;
import org.opensearch.common.xcontent.XContentFactory;
@@ -132,7 +132,7 @@ public XContentBuilder newBuilder(@Nullable MediaType requestContentType, @Nulla
responseContentType = requestContentType;
} else {
// default to JSON output when all else fails
- responseContentType = MediaTypeParserRegistry.getDefaultMediaType();
+ responseContentType = MediaTypeRegistry.getDefaultMediaType();
}
}
diff --git a/server/src/main/java/org/opensearch/transport/TransportService.java b/server/src/main/java/org/opensearch/transport/TransportService.java
index b8d7d130e846b..c3e287b458fc5 100644
--- a/server/src/main/java/org/opensearch/transport/TransportService.java
+++ b/server/src/main/java/org/opensearch/transport/TransportService.java
@@ -62,7 +62,7 @@
import org.opensearch.common.xcontent.XContentType;
import org.opensearch.core.common.Strings;
import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException;
-import org.opensearch.core.xcontent.MediaTypeParserRegistry;
+import org.opensearch.core.xcontent.MediaTypeRegistry;
import org.opensearch.node.NodeClosedException;
import org.opensearch.node.ReportingService;
import org.opensearch.tasks.Task;
@@ -174,7 +174,7 @@ public void close() {}
/** Registers OpenSearch server specific exceptions (exceptions outside of core library) */
OpenSearchServerException.registerExceptions();
// set the default media type to JSON (fallback if a media type is not specified)
- MediaTypeParserRegistry.setDefaultMediaType(XContentType.JSON);
+ MediaTypeRegistry.setDefaultMediaType(XContentType.JSON);
}
/** does nothing. easy way to ensure class is loaded so the above static block is called to register the streamables */
From 8edc1ddd030e6b867db6f3029cca32a684154632 Mon Sep 17 00:00:00 2001
From: Suraj Singh
Date: Thu, 27 Jul 2023 19:00:31 -0700
Subject: [PATCH 26/71] [Segment Replication] Use deterministic mechanism to
have concurrent invocation of segment replication (#8937)
* [Segment Replication] Use deterministic mechanism to have concurrent invocation of segment replication
Signed-off-by: Suraj Singh
* Clean up
Signed-off-by: Suraj Singh
---------
Signed-off-by: Suraj Singh
---
.../SegmentReplicationTargetServiceTests.java | 57 +++++++++++++++----
1 file changed, 46 insertions(+), 11 deletions(-)
diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java
index 4643615d45d7e..94e57f4a0d3e4 100644
--- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java
+++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java
@@ -32,6 +32,7 @@
import org.opensearch.indices.recovery.ForceSyncRequest;
import org.opensearch.indices.recovery.RecoverySettings;
import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint;
+import org.opensearch.indices.replication.common.CopyState;
import org.opensearch.indices.replication.common.ReplicationCollection;
import org.opensearch.indices.replication.common.ReplicationFailedException;
import org.opensearch.indices.replication.common.ReplicationLuceneIndex;
@@ -49,6 +50,7 @@
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
+import static org.junit.Assert.assertEquals;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyLong;
import static org.mockito.Mockito.atLeastOnce;
@@ -70,10 +72,7 @@ public class SegmentReplicationTargetServiceTests extends IndexShardTestCase {
private IndexShard replicaShard;
private IndexShard primaryShard;
private ReplicationCheckpoint checkpoint;
- private SegmentReplicationSource replicationSource;
private SegmentReplicationTargetService sut;
-
- private ReplicationCheckpoint initialCheckpoint;
private ReplicationCheckpoint aheadCheckpoint;
private ReplicationCheckpoint newPrimaryCheckpoint;
@@ -83,11 +82,10 @@ public class SegmentReplicationTargetServiceTests extends IndexShardTestCase {
private DiscoveryNode localNode;
private IndicesService indicesService;
- private ClusterService clusterService;
private SegmentReplicationState state;
- private static long TRANSPORT_TIMEOUT = 30000;// 30sec
+ private static final long TRANSPORT_TIMEOUT = 30000;// 30sec
@Override
public void setUp() throws Exception {
@@ -107,9 +105,6 @@ public void setUp() throws Exception {
0L,
replicaShard.getLatestReplicationCheckpoint().getCodec()
);
- SegmentReplicationSourceFactory replicationSourceFactory = mock(SegmentReplicationSourceFactory.class);
- replicationSource = mock(SegmentReplicationSource.class);
- when(replicationSourceFactory.get(replicaShard)).thenReturn(replicationSource);
testThreadPool = new TestThreadPool("test", Settings.EMPTY);
localNode = new DiscoveryNode("local", buildNewFakeTransportAddress(), Version.CURRENT);
@@ -126,7 +121,7 @@ public void setUp() throws Exception {
transportService.acceptIncomingRequests();
indicesService = mock(IndicesService.class);
- clusterService = mock(ClusterService.class);
+ ClusterService clusterService = mock(ClusterService.class);
ClusterState clusterState = mock(ClusterState.class);
RoutingTable mockRoutingTable = mock(RoutingTable.class);
when(clusterService.state()).thenReturn(clusterState);
@@ -135,7 +130,7 @@ public void setUp() throws Exception {
when(clusterState.nodes()).thenReturn(DiscoveryNodes.builder().add(localNode).build());
sut = prepareForReplication(primaryShard, replicaShard, transportService, indicesService, clusterService);
- initialCheckpoint = replicaShard.getLatestReplicationCheckpoint();
+ ReplicationCheckpoint initialCheckpoint = replicaShard.getLatestReplicationCheckpoint();
aheadCheckpoint = new ReplicationCheckpoint(
initialCheckpoint.getShardId(),
initialCheckpoint.getPrimaryTerm(),
@@ -242,7 +237,46 @@ public void testAlreadyOnNewCheckpoint() {
}
public void testShardAlreadyReplicating() {
- sut.startReplication(replicaShard, mock(SegmentReplicationTargetService.SegmentReplicationListener.class));
+ CountDownLatch blockGetCheckpointMetadata = new CountDownLatch(1);
+ SegmentReplicationSource source = new TestReplicationSource() {
+ @Override
+ public void getCheckpointMetadata(
+ long replicationId,
+ ReplicationCheckpoint checkpoint,
+ ActionListener listener
+ ) {
+ try {
+ blockGetCheckpointMetadata.await();
+ final CopyState copyState = new CopyState(
+ ReplicationCheckpoint.empty(primaryShard.shardId(), primaryShard.getLatestReplicationCheckpoint().getCodec()),
+ primaryShard
+ );
+ listener.onResponse(
+ new CheckpointInfoResponse(copyState.getCheckpoint(), copyState.getMetadataMap(), copyState.getInfosBytes())
+ );
+ } catch (InterruptedException | IOException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ @Override
+ public void getSegmentFiles(
+ long replicationId,
+ ReplicationCheckpoint checkpoint,
+ List filesToFetch,
+ IndexShard indexShard,
+ ActionListener listener
+ ) {
+ listener.onResponse(new GetSegmentFilesResponse(Collections.emptyList()));
+ }
+ };
+ final SegmentReplicationTarget target = spy(
+ new SegmentReplicationTarget(replicaShard, source, mock(SegmentReplicationTargetService.SegmentReplicationListener.class))
+ );
+ // Start first round of segment replication.
+ sut.startReplication(target);
+
+ // Start second round of segment replication, this should fail to start as first round is still in-progress
sut.startReplication(replicaShard, new SegmentReplicationTargetService.SegmentReplicationListener() {
@Override
public void onReplicationDone(SegmentReplicationState state) {
@@ -255,6 +289,7 @@ public void onReplicationFailure(SegmentReplicationState state, ReplicationFaile
assertFalse(sendShardFailure);
}
});
+ blockGetCheckpointMetadata.countDown();
}
public void testOnNewCheckpointFromNewPrimaryCancelOngoingReplication() throws InterruptedException {
From 3952d5e0be809d1fa42d37618edf5f31448e7053 Mon Sep 17 00:00:00 2001
From: Nick Knize
Date: Thu, 27 Jul 2023 21:15:01 -0500
Subject: [PATCH 27/71] Register MediaTypes through SPI (#8938)
* Register MediaTypes through SPI
This commit provides a new SPI interface MediaContentProvider. Modules,
Plugins, Extensions, implement this interface and provide the concrete
MediaType implementations (and MIME aliases) through getMediaTypes and
getAdditionalMediaTypes, respectively. This enables downstream
extensions (e.g., serverless or cloud native implementations) to
register their own custom MediaType and define the serialization format
that is registered when the classloader loads the MediaTypeRegistry
instead of having to register the types explicitly in application code.
Signed-off-by: Nicholas Walter Knize