From 4a1cbd9e19521881b741a12addfbc8463965b6e0 Mon Sep 17 00:00:00 2001 From: Bharathwaj G <58062316+bharath-techie@users.noreply.github.com> Date: Thu, 19 May 2022 11:08:28 +0530 Subject: [PATCH 01/75] Create PIT API (#2745) * Create Point In Time API changes Signed-off-by: Bharathwaj G --- .../opensearch/client/RequestConverters.java | 23 +- .../client/RestHighLevelClient.java | 42 ++ .../java/org/opensearch/client/PitIT.java | 58 ++ .../client/RequestConvertersTests.java | 23 + .../java/org/opensearch/client/SearchIT.java | 35 ++ .../rest-api-spec/api/create_pit.json | 43 ++ .../search/searchafter/SearchAfterIT.java | 59 +- .../search/slice/SearchSliceIT.java | 86 ++- .../org/opensearch/action/ActionModule.java | 7 + .../action/search/CreatePitAction.java | 23 + .../action/search/CreatePitController.java | 273 ++++++++ .../action/search/CreatePitRequest.java | 195 ++++++ .../action/search/CreatePitResponse.java | 232 +++++++ .../action/search/SearchContextId.java | 2 +- .../action/search/SearchTransportService.java | 75 +++ .../opensearch/action/search/SearchUtils.java | 43 ++ .../search/TransportCreatePitAction.java | 139 ++++ .../search/UpdatePitContextRequest.java | 67 ++ .../search/UpdatePitContextResponse.java | 58 ++ .../java/org/opensearch/client/Client.java | 7 + .../client/support/AbstractClient.java | 8 + .../common/settings/ClusterSettings.java | 4 + .../common/settings/IndexScopedSettings.java | 1 + .../org/opensearch/index/IndexSettings.java | 28 + .../index/shard/SearchOperationListener.java | 44 ++ .../action/search/RestCreatePitAction.java | 57 ++ .../search/DefaultSearchContext.java | 20 +- .../org/opensearch/search/SearchService.java | 158 ++++- .../search/internal/PitReaderContext.java | 70 +++ .../search/internal/ReaderContext.java | 15 +- .../search/CreatePitControllerTests.java | 592 ++++++++++++++++++ .../search/DefaultSearchContextTests.java | 51 +- .../opensearch/search/PitMultiNodeTests.java | 211 +++++++ .../opensearch/search/PitSingleNodeTests.java | 575 +++++++++++++++++ .../opensearch/search/SearchServiceTests.java | 100 ++- .../search/pit/RestCreatePitActionTests.java | 78 +++ 36 files changed, 3481 insertions(+), 21 deletions(-) create mode 100644 client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/api/create_pit.json create mode 100644 server/src/main/java/org/opensearch/action/search/CreatePitAction.java create mode 100644 server/src/main/java/org/opensearch/action/search/CreatePitController.java create mode 100644 server/src/main/java/org/opensearch/action/search/CreatePitRequest.java create mode 100644 server/src/main/java/org/opensearch/action/search/CreatePitResponse.java create mode 100644 server/src/main/java/org/opensearch/action/search/SearchUtils.java create mode 100644 server/src/main/java/org/opensearch/action/search/TransportCreatePitAction.java create mode 100644 server/src/main/java/org/opensearch/action/search/UpdatePitContextRequest.java create mode 100644 server/src/main/java/org/opensearch/action/search/UpdatePitContextResponse.java create mode 100644 server/src/main/java/org/opensearch/rest/action/search/RestCreatePitAction.java create mode 100644 server/src/main/java/org/opensearch/search/internal/PitReaderContext.java create mode 100644 server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java create mode 100644 server/src/test/java/org/opensearch/search/PitMultiNodeTests.java create mode 100644 server/src/test/java/org/opensearch/search/PitSingleNodeTests.java create mode 100644 server/src/test/java/org/opensearch/search/pit/RestCreatePitActionTests.java diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java index afecdc3eea1a3..277759c921fbf 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java @@ -54,6 +54,7 @@ import org.opensearch.action.get.MultiGetRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.search.ClearScrollRequest; +import org.opensearch.action.search.CreatePitRequest; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchScrollRequest; @@ -92,6 +93,7 @@ import org.opensearch.index.reindex.ReindexRequest; import org.opensearch.index.reindex.UpdateByQueryRequest; import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.rest.action.search.RestCreatePitAction; import org.opensearch.rest.action.search.RestSearchAction; import org.opensearch.script.mustache.MultiSearchTemplateRequest; import org.opensearch.script.mustache.SearchTemplateRequest; @@ -433,9 +435,15 @@ static void addSearchRequestParams(Params params, SearchRequest searchRequest) { params.putParam(RestSearchAction.TYPED_KEYS_PARAM, "true"); params.withRouting(searchRequest.routing()); params.withPreference(searchRequest.preference()); - params.withIndicesOptions(searchRequest.indicesOptions()); + if (searchRequest.pointInTimeBuilder() == null) { + params.withIndicesOptions(searchRequest.indicesOptions()); + } params.withSearchType(searchRequest.searchType().name().toLowerCase(Locale.ROOT)); - params.putParam("ccs_minimize_roundtrips", Boolean.toString(searchRequest.isCcsMinimizeRoundtrips())); + if (searchRequest.pointInTimeBuilder() != null) { + params.putParam("ccs_minimize_roundtrips", "false"); + } else { + params.putParam("ccs_minimize_roundtrips", Boolean.toString(searchRequest.isCcsMinimizeRoundtrips())); + } if (searchRequest.getPreFilterShardSize() != null) { params.putParam("pre_filter_shard_size", Integer.toString(searchRequest.getPreFilterShardSize())); } @@ -458,6 +466,17 @@ static Request searchScroll(SearchScrollRequest searchScrollRequest) throws IOEx return request; } + static Request createPit(CreatePitRequest createPitRequest) throws IOException { + Params params = new Params(); + params.putParam(RestCreatePitAction.ALLOW_PARTIAL_PIT_CREATION, Boolean.toString(createPitRequest.shouldAllowPartialPitCreation())); + params.putParam(RestCreatePitAction.KEEP_ALIVE, createPitRequest.getKeepAlive()); + params.withIndicesOptions(createPitRequest.indicesOptions()); + Request request = new Request(HttpPost.METHOD_NAME, endpoint(createPitRequest.indices(), "_search/point_in_time")); + request.addParameters(params.asMap()); + request.setEntity(createEntity(createPitRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + static Request clearScroll(ClearScrollRequest clearScrollRequest) throws IOException { Request request = new Request(HttpDelete.METHOD_NAME, "/_search/scroll"); request.setEntity(createEntity(clearScrollRequest, REQUEST_BODY_CONTENT_TYPE)); diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java index d293b979debb5..f3360630a26b7 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java @@ -59,6 +59,8 @@ import org.opensearch.action.index.IndexResponse; import org.opensearch.action.search.ClearScrollRequest; import org.opensearch.action.search.ClearScrollResponse; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.MultiSearchResponse; import org.opensearch.action.search.SearchRequest; @@ -1256,6 +1258,46 @@ public final Cancellable scrollAsync( ); } + /** + * Create PIT context using create PIT API + * + * @param createPitRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + */ + public final CreatePitResponse createPit(CreatePitRequest createPitRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity( + createPitRequest, + RequestConverters::createPit, + options, + CreatePitResponse::fromXContent, + emptySet() + ); + } + + /** + * Asynchronously Create PIT context using create PIT API + * + * @param createPitRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + * @return the response + */ + public final Cancellable createPitAsync( + CreatePitRequest createPitRequest, + RequestOptions options, + ActionListener listener + ) { + return performRequestAsyncAndParseEntity( + createPitRequest, + RequestConverters::createPit, + options, + CreatePitResponse::fromXContent, + listener, + emptySet() + ); + } + /** * Clears one or more scroll ids using the Clear Scroll API. * diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java new file mode 100644 index 0000000000000..99901eabc91aa --- /dev/null +++ b/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java @@ -0,0 +1,58 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.client; + +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.junit.Before; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.common.unit.TimeValue; + +import java.io.IOException; +import java.util.concurrent.TimeUnit; + +/** + * Tests point in time API with rest high level client + */ +public class PitIT extends OpenSearchRestHighLevelClientTestCase { + + @Before + public void indexDocuments() throws IOException { + Request doc1 = new Request(HttpPut.METHOD_NAME, "/index/_doc/1"); + doc1.setJsonEntity("{\"type\":\"type1\", \"id\":1, \"num\":10, \"num2\":50}"); + client().performRequest(doc1); + Request doc2 = new Request(HttpPut.METHOD_NAME, "/index/_doc/2"); + doc2.setJsonEntity("{\"type\":\"type1\", \"id\":2, \"num\":20, \"num2\":40}"); + client().performRequest(doc2); + Request doc3 = new Request(HttpPut.METHOD_NAME, "/index/_doc/3"); + doc3.setJsonEntity("{\"type\":\"type1\", \"id\":3, \"num\":50, \"num2\":35}"); + client().performRequest(doc3); + Request doc4 = new Request(HttpPut.METHOD_NAME, "/index/_doc/4"); + doc4.setJsonEntity("{\"type\":\"type2\", \"id\":4, \"num\":100, \"num2\":10}"); + client().performRequest(doc4); + Request doc5 = new Request(HttpPut.METHOD_NAME, "/index/_doc/5"); + doc5.setJsonEntity("{\"type\":\"type2\", \"id\":5, \"num\":100, \"num2\":10}"); + client().performRequest(doc5); + client().performRequest(new Request(HttpPost.METHOD_NAME, "/_refresh")); + } + + public void testCreatePit() throws IOException { + CreatePitRequest pitRequest = new CreatePitRequest(new TimeValue(1, TimeUnit.DAYS), true, "index"); + CreatePitResponse pitResponse = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); + assertTrue(pitResponse.getId() != null); + assertEquals(1, pitResponse.getTotalShards()); + assertEquals(1, pitResponse.getSuccessfulShards()); + assertEquals(0, pitResponse.getFailedShards()); + assertEquals(0, pitResponse.getSkippedShards()); + } + /** + * Todo: add deletion logic and test cluster settings + */ +} diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java index 0415b864ba35e..4f0b2ac0d88a1 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java @@ -53,6 +53,7 @@ import org.opensearch.action.get.MultiGetRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.search.ClearScrollRequest; +import org.opensearch.action.search.CreatePitRequest; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchScrollRequest; @@ -131,6 +132,7 @@ import java.util.Locale; import java.util.Map; import java.util.StringJoiner; +import java.util.concurrent.TimeUnit; import java.util.function.BiFunction; import java.util.function.Consumer; import java.util.function.Function; @@ -1303,6 +1305,27 @@ public void testClearScroll() throws IOException { assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); } + public void testCreatePit() throws IOException { + String[] indices = randomIndicesNames(0, 5); + Map expectedParams = new HashMap<>(); + expectedParams.put("keep_alive", "1d"); + expectedParams.put("allow_partial_pit_creation", "true"); + CreatePitRequest createPitRequest = new CreatePitRequest(new TimeValue(1, TimeUnit.DAYS), true, indices); + setRandomIndicesOptions(createPitRequest::indicesOptions, createPitRequest::indicesOptions, expectedParams); + Request request = RequestConverters.createPit(createPitRequest); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + String index = String.join(",", indices); + if (Strings.hasLength(index)) { + endpoint.add(index); + } + endpoint.add("_search/point_in_time"); + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); + assertEquals(endpoint.toString(), request.getEndpoint()); + assertEquals(expectedParams, request.getParameters()); + assertToXContentBody(createPitRequest, request.getEntity()); + assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + } + public void testSearchTemplate() throws Exception { // Create a random request. String[] indices = randomIndicesNames(0, 5); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java index 19e287fb91be5..01a7f892c80a1 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java @@ -43,6 +43,8 @@ import org.opensearch.action.fieldcaps.FieldCapabilitiesResponse; import org.opensearch.action.search.ClearScrollRequest; import org.opensearch.action.search.ClearScrollResponse; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.MultiSearchResponse; import org.opensearch.action.search.SearchRequest; @@ -89,6 +91,7 @@ import org.opensearch.search.aggregations.metrics.WeightedAvgAggregationBuilder; import org.opensearch.search.aggregations.support.MultiValuesSourceFieldConfig; import org.opensearch.search.aggregations.support.ValueType; +import org.opensearch.search.builder.PointInTimeBuilder; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.search.fetch.subphase.FetchSourceContext; import org.opensearch.search.fetch.subphase.highlight.HighlightBuilder; @@ -105,6 +108,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.TimeUnit; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertToXContentEquivalent; @@ -762,6 +766,37 @@ public void testSearchScroll() throws Exception { } } + public void testSearchWithPit() throws Exception { + for (int i = 0; i < 100; i++) { + XContentBuilder builder = jsonBuilder().startObject().field("field", i).endObject(); + Request doc = new Request(HttpPut.METHOD_NAME, "/test/_doc/" + Integer.toString(i)); + doc.setJsonEntity(Strings.toString(builder)); + client().performRequest(doc); + } + client().performRequest(new Request(HttpPost.METHOD_NAME, "/test/_refresh")); + + CreatePitRequest pitRequest = new CreatePitRequest(new TimeValue(1, TimeUnit.DAYS), true, "test"); + CreatePitResponse pitResponse = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); + + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().size(35) + .sort("field", SortOrder.ASC) + .pointInTimeBuilder(new PointInTimeBuilder(pitResponse.getId())); + SearchRequest searchRequest = new SearchRequest().source(searchSourceBuilder); + SearchResponse searchResponse = execute(searchRequest, highLevelClient()::search, highLevelClient()::searchAsync); + + try { + long counter = 0; + assertSearchHeader(searchResponse); + assertThat(searchResponse.getHits().getTotalHits().value, equalTo(100L)); + assertThat(searchResponse.getHits().getHits().length, equalTo(35)); + for (SearchHit hit : searchResponse.getHits()) { + assertThat(((Number) hit.getSortValues()[0]).longValue(), equalTo(counter++)); + } + } finally { + // TODO : Delete PIT + } + } + public void testMultiSearch() throws Exception { MultiSearchRequest multiSearchRequest = new MultiSearchRequest(); SearchRequest searchRequest1 = new SearchRequest("index1"); diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/create_pit.json b/rest-api-spec/src/main/resources/rest-api-spec/api/create_pit.json new file mode 100644 index 0000000000000..eb5f4977b99d9 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/create_pit.json @@ -0,0 +1,43 @@ +{ + "create_pit":{ + "documentation":{ + "url":"https://opensearch.org/docs/latest/opensearch/rest-api/point_in_time/", + "description":"Creates point in time context." + }, + "stability":"stable", + "url":{ + "paths":[ + { + "path":"/{index}/_search/point_in_time", + "methods":[ + "POST" + ], + "parts":{ + "index":{ + "type":"list", + "description":"A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices" + } + } + } + ] + }, + "params":{ + "allow_partial_pit_creation":{ + "type":"boolean", + "description":"Allow if point in time can be created with partial failures" + }, + "keep_alive":{ + "type":"string", + "description":"Specify the keep alive for point in time" + }, + "preference":{ + "type":"string", + "description":"Specify the node or shard the operation should be performed on (default: random)" + }, + "routing":{ + "type":"list", + "description":"A comma-separated list of specific routing values" + } + } + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java b/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java index 926e21294ffc8..c2591c28a95d4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/searchafter/SearchAfterIT.java @@ -32,15 +32,21 @@ package org.opensearch.search.searchafter; +import org.opensearch.action.ActionFuture; import org.opensearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.action.search.CreatePitAction; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.common.UUIDs; +import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.search.SearchHit; +import org.opensearch.search.builder.PointInTimeBuilder; import org.opensearch.search.sort.SortOrder; import org.opensearch.test.OpenSearchIntegTestCase; import org.hamcrest.Matchers; @@ -50,7 +56,6 @@ import java.util.Comparator; import java.util.Collections; import java.util.Arrays; - import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; import static org.opensearch.index.query.QueryBuilders.matchAllQuery; @@ -155,6 +160,58 @@ public void testsShouldFail() throws Exception { } } + public void testPitWithSearchAfter() throws Exception { + assertAcked(client().admin().indices().prepareCreate("test").setMapping("field1", "type=long", "field2", "type=keyword").get()); + ensureGreen(); + indexRandom( + true, + client().prepareIndex("test").setId("0").setSource("field1", 0), + client().prepareIndex("test").setId("1").setSource("field1", 100, "field2", "toto"), + client().prepareIndex("test").setId("2").setSource("field1", 101), + client().prepareIndex("test").setId("3").setSource("field1", 99) + ); + + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "test" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + SearchResponse sr = client().prepareSearch() + .addSort("field1", SortOrder.ASC) + .setQuery(matchAllQuery()) + .searchAfter(new Object[] { 99 }) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .get(); + assertEquals(2, sr.getHits().getHits().length); + sr = client().prepareSearch() + .addSort("field1", SortOrder.ASC) + .setQuery(matchAllQuery()) + .searchAfter(new Object[] { 100 }) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .get(); + assertEquals(1, sr.getHits().getHits().length); + sr = client().prepareSearch() + .addSort("field1", SortOrder.ASC) + .setQuery(matchAllQuery()) + .searchAfter(new Object[] { 0 }) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .get(); + assertEquals(3, sr.getHits().getHits().length); + /** + * Add new data and assert PIT results remain the same and normal search results gets refreshed + */ + indexRandom(true, client().prepareIndex("test").setId("4").setSource("field1", 102)); + sr = client().prepareSearch() + .addSort("field1", SortOrder.ASC) + .setQuery(matchAllQuery()) + .searchAfter(new Object[] { 0 }) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .get(); + assertEquals(3, sr.getHits().getHits().length); + sr = client().prepareSearch().addSort("field1", SortOrder.ASC).setQuery(matchAllQuery()).searchAfter(new Object[] { 0 }).get(); + assertEquals(4, sr.getHits().getHits().length); + client().admin().indices().prepareDelete("test").get(); + } + public void testWithNullStrings() throws InterruptedException { assertAcked(client().admin().indices().prepareCreate("test").setMapping("field2", "type=keyword").get()); ensureGreen(); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/slice/SearchSliceIT.java b/server/src/internalClusterTest/java/org/opensearch/search/slice/SearchSliceIT.java index 9c735c42052e3..eacbcc42a8157 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/slice/SearchSliceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/slice/SearchSliceIT.java @@ -32,9 +32,13 @@ package org.opensearch.search.slice; +import org.opensearch.action.ActionFuture; import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest; import org.opensearch.action.index.IndexRequestBuilder; +import org.opensearch.action.search.CreatePitAction; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; @@ -46,6 +50,7 @@ import org.opensearch.search.Scroll; import org.opensearch.search.SearchException; import org.opensearch.search.SearchHit; +import org.opensearch.search.builder.PointInTimeBuilder; import org.opensearch.search.sort.SortBuilders; import org.opensearch.test.OpenSearchIntegTestCase; @@ -86,7 +91,12 @@ private void setupIndex(int numDocs, int numberOfShards) throws IOException, Exe client().admin() .indices() .prepareCreate("test") - .setSettings(Settings.builder().put("number_of_shards", numberOfShards).put("index.max_slices_per_scroll", 10000)) + .setSettings( + Settings.builder() + .put("number_of_shards", numberOfShards) + .put("index.max_slices_per_scroll", 10000) + .put("index.max_slices_per_pit", 10000) + ) .setMapping(mapping) ); ensureGreen(); @@ -129,6 +139,78 @@ public void testSearchSort() throws Exception { } } + public void testSearchSortWithoutPitOrScroll() throws Exception { + int numShards = randomIntBetween(1, 7); + int numDocs = randomIntBetween(100, 1000); + setupIndex(numDocs, numShards); + int fetchSize = randomIntBetween(10, 100); + SearchRequestBuilder request = client().prepareSearch("test") + .setQuery(matchAllQuery()) + .setSize(fetchSize) + .addSort(SortBuilders.fieldSort("_doc")); + SliceBuilder sliceBuilder = new SliceBuilder("_id", 0, 4); + SearchPhaseExecutionException ex = expectThrows(SearchPhaseExecutionException.class, () -> request.slice(sliceBuilder).get()); + assertTrue(ex.getMessage().contains("all shards failed")); + } + + public void testSearchSortWithPIT() throws Exception { + int numShards = randomIntBetween(1, 7); + int numDocs = randomIntBetween(100, 1000); + setupIndex(numDocs, numShards); + int max = randomIntBetween(2, numShards * 3); + CreatePitRequest pitRequest = new CreatePitRequest(TimeValue.timeValueDays(1), true); + pitRequest.setIndices(new String[] { "test" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, pitRequest); + CreatePitResponse pitResponse = execute.get(); + for (String field : new String[] { "_id", "random_int", "static_int" }) { + int fetchSize = randomIntBetween(10, 100); + + // test _doc sort + SearchRequestBuilder request = client().prepareSearch("test") + .setQuery(matchAllQuery()) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .setSize(fetchSize) + .addSort(SortBuilders.fieldSort("_doc")); + assertSearchSlicesWithPIT(request, field, max, numDocs); + + // test numeric sort + request = client().prepareSearch("test") + .setQuery(matchAllQuery()) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .setSize(fetchSize) + .addSort(SortBuilders.fieldSort("random_int")); + assertSearchSlicesWithPIT(request, field, max, numDocs); + } + client().admin().indices().prepareDelete("test").get(); + } + + private void assertSearchSlicesWithPIT(SearchRequestBuilder request, String field, int numSlice, int numDocs) { + int totalResults = 0; + List keys = new ArrayList<>(); + for (int id = 0; id < numSlice; id++) { + SliceBuilder sliceBuilder = new SliceBuilder(field, id, numSlice); + SearchResponse searchResponse = request.slice(sliceBuilder).setFrom(0).get(); + totalResults += searchResponse.getHits().getHits().length; + int expectedSliceResults = (int) searchResponse.getHits().getTotalHits().value; + int numSliceResults = searchResponse.getHits().getHits().length; + for (SearchHit hit : searchResponse.getHits().getHits()) { + assertTrue(keys.add(hit.getId())); + } + while (searchResponse.getHits().getHits().length > 0) { + searchResponse = request.setFrom(numSliceResults).slice(sliceBuilder).get(); + totalResults += searchResponse.getHits().getHits().length; + numSliceResults += searchResponse.getHits().getHits().length; + for (SearchHit hit : searchResponse.getHits().getHits()) { + assertTrue(keys.add(hit.getId())); + } + } + assertThat(numSliceResults, equalTo(expectedSliceResults)); + } + assertThat(totalResults, equalTo(numDocs)); + assertThat(keys.size(), equalTo(numDocs)); + assertThat(new HashSet(keys).size(), equalTo(numDocs)); + } + public void testWithPreferenceAndRoutings() throws Exception { int numShards = 10; int totalDocs = randomIntBetween(100, 1000); @@ -217,7 +299,7 @@ public void testInvalidQuery() throws Exception { ); Throwable rootCause = findRootCause(exc); assertThat(rootCause.getClass(), equalTo(SearchException.class)); - assertThat(rootCause.getMessage(), equalTo("`slice` cannot be used outside of a scroll context")); + assertThat(rootCause.getMessage(), equalTo("`slice` cannot be used outside of a scroll context or PIT context")); } private void assertSearchSlicesWithScroll(SearchRequestBuilder request, String field, int numSlice, int numDocs) { diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index 790f8f6cbdc36..36b7aeea3d262 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -232,10 +232,12 @@ import org.opensearch.action.main.MainAction; import org.opensearch.action.main.TransportMainAction; import org.opensearch.action.search.ClearScrollAction; +import org.opensearch.action.search.CreatePitAction; import org.opensearch.action.search.MultiSearchAction; import org.opensearch.action.search.SearchAction; import org.opensearch.action.search.SearchScrollAction; import org.opensearch.action.search.TransportClearScrollAction; +import org.opensearch.action.search.TransportCreatePitAction; import org.opensearch.action.search.TransportMultiSearchAction; import org.opensearch.action.search.TransportSearchAction; import org.opensearch.action.search.TransportSearchScrollAction; @@ -396,6 +398,7 @@ import org.opensearch.rest.action.ingest.RestSimulatePipelineAction; import org.opensearch.rest.action.search.RestClearScrollAction; import org.opensearch.rest.action.search.RestCountAction; +import org.opensearch.rest.action.search.RestCreatePitAction; import org.opensearch.rest.action.search.RestExplainAction; import org.opensearch.rest.action.search.RestMultiSearchAction; import org.opensearch.rest.action.search.RestSearchAction; @@ -656,6 +659,7 @@ public void reg actions.register(ImportDanglingIndexAction.INSTANCE, TransportImportDanglingIndexAction.class); actions.register(DeleteDanglingIndexAction.INSTANCE, TransportDeleteDanglingIndexAction.class); actions.register(FindDanglingIndexAction.INSTANCE, TransportFindDanglingIndexAction.class); + actions.register(CreatePitAction.INSTANCE, TransportCreatePitAction.class); return unmodifiableMap(actions.getRegistry()); } @@ -828,6 +832,9 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestRepositoriesAction()); registerHandler.accept(new RestSnapshotAction()); registerHandler.accept(new RestTemplatesAction()); + + // Point in time API + registerHandler.accept(new RestCreatePitAction()); for (ActionPlugin plugin : actionPlugins) { for (RestHandler handler : plugin.getRestHandlers( settings, diff --git a/server/src/main/java/org/opensearch/action/search/CreatePitAction.java b/server/src/main/java/org/opensearch/action/search/CreatePitAction.java new file mode 100644 index 0000000000000..1af56a044205b --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/CreatePitAction.java @@ -0,0 +1,23 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.ActionType; + +/** + * Action type for creating PIT reader context + */ +public class CreatePitAction extends ActionType { + public static final CreatePitAction INSTANCE = new CreatePitAction(); + public static final String NAME = "indices:data/read/point_in_time"; + + private CreatePitAction() { + super(NAME, CreatePitResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/CreatePitController.java b/server/src/main/java/org/opensearch/action/search/CreatePitController.java new file mode 100644 index 0000000000000..3d2ecc8b695c6 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/CreatePitController.java @@ -0,0 +1,273 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.OpenSearchException; +import org.opensearch.action.ActionListener; +import org.opensearch.action.StepListener; +import org.opensearch.action.support.GroupedActionListener; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.Strings; +import org.opensearch.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.index.shard.ShardId; +import org.opensearch.search.SearchPhaseResult; +import org.opensearch.search.SearchShardTarget; +import org.opensearch.tasks.Task; +import org.opensearch.transport.Transport; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.Set; +import java.util.function.BiFunction; +import java.util.stream.Collectors; + +import static org.opensearch.common.unit.TimeValue.timeValueSeconds; + +/** + * Controller for creating PIT reader context + * Phase 1 of create PIT request : Create PIT reader contexts in the associated shards with a temporary keep alive + * Phase 2 of create PIT : Update PIT reader context with PIT ID and keep alive from request and + * fail user request if any of the updates in this phase are failed - we clean up PITs in case of such failures. + * This two phase approach is used to save PIT ID as part of context which is later used for other use cases like list PIT etc. + */ +public class CreatePitController { + private final SearchTransportService searchTransportService; + private final ClusterService clusterService; + private final TransportSearchAction transportSearchAction; + private final NamedWriteableRegistry namedWriteableRegistry; + private final Task task; + private final ActionListener listener; + private final CreatePitRequest request; + private static final Logger logger = LogManager.getLogger(CreatePitController.class); + public static final Setting PIT_INIT_KEEP_ALIVE = Setting.positiveTimeSetting( + "pit.init.keep_alive", + timeValueSeconds(30), + Setting.Property.NodeScope + ); + + public CreatePitController( + CreatePitRequest request, + SearchTransportService searchTransportService, + ClusterService clusterService, + TransportSearchAction transportSearchAction, + NamedWriteableRegistry namedWriteableRegistry, + Task task, + ActionListener listener + ) { + this.searchTransportService = searchTransportService; + this.clusterService = clusterService; + this.transportSearchAction = transportSearchAction; + this.namedWriteableRegistry = namedWriteableRegistry; + this.task = task; + this.listener = listener; + this.request = request; + } + + /** + * This method creates PIT reader context + */ + public void executeCreatePit(StepListener createPitListener, ActionListener updatePitIdListener) { + SearchRequest searchRequest = new SearchRequest(request.getIndices()); + searchRequest.preference(request.getPreference()); + searchRequest.routing(request.getRouting()); + searchRequest.indicesOptions(request.getIndicesOptions()); + searchRequest.allowPartialSearchResults(request.shouldAllowPartialPitCreation()); + SearchTask searchTask = searchRequest.createTask( + task.getId(), + task.getType(), + task.getAction(), + task.getParentTaskId(), + Collections.emptyMap() + ); + /** + * Phase 1 of create PIT + */ + executeCreatePit(searchTask, searchRequest, createPitListener); + + /** + * Phase 2 of create PIT where we update pit id in pit contexts + */ + createPitListener.whenComplete( + searchResponse -> { executeUpdatePitId(request, searchRequest, searchResponse, updatePitIdListener); }, + updatePitIdListener::onFailure + ); + } + + /** + * Creates PIT reader context with temporary keep alive + */ + void executeCreatePit(Task task, SearchRequest searchRequest, StepListener createPitListener) { + logger.debug( + () -> new ParameterizedMessage("Executing creation of PIT context for indices [{}]", Arrays.toString(searchRequest.indices())) + ); + transportSearchAction.executeRequest( + task, + searchRequest, + TransportCreatePitAction.CREATE_PIT_ACTION, + true, + new TransportSearchAction.SinglePhaseSearchAction() { + @Override + public void executeOnShardTarget( + SearchTask searchTask, + SearchShardTarget target, + Transport.Connection connection, + ActionListener searchPhaseResultActionListener + ) { + searchTransportService.createPitContext( + connection, + new TransportCreatePitAction.CreateReaderContextRequest( + target.getShardId(), + PIT_INIT_KEEP_ALIVE.get(clusterService.getSettings()) + ), + searchTask, + ActionListener.wrap(r -> searchPhaseResultActionListener.onResponse(r), searchPhaseResultActionListener::onFailure) + ); + } + }, + createPitListener + ); + } + + /** + * Updates PIT ID, keep alive and createdTime of PIT reader context + */ + void executeUpdatePitId( + CreatePitRequest request, + SearchRequest searchRequest, + SearchResponse searchResponse, + ActionListener updatePitIdListener + ) { + logger.debug( + () -> new ParameterizedMessage( + "Updating PIT context with PIT ID [{}], creation time and keep alive", + searchResponse.pointInTimeId() + ) + ); + /** + * store the create time ( same create time for all PIT contexts across shards ) to be used + * for list PIT api + */ + final long relativeStartNanos = System.nanoTime(); + final TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider( + searchRequest.getOrCreateAbsoluteStartMillis(), + relativeStartNanos, + System::nanoTime + ); + final long creationTime = timeProvider.getAbsoluteStartMillis(); + CreatePitResponse createPITResponse = new CreatePitResponse( + searchResponse.pointInTimeId(), + creationTime, + searchResponse.getTotalShards(), + searchResponse.getSuccessfulShards(), + searchResponse.getSkippedShards(), + searchResponse.getFailedShards(), + searchResponse.getShardFailures() + ); + SearchContextId contextId = SearchContextId.decode(namedWriteableRegistry, createPITResponse.getId()); + final StepListener> lookupListener = getConnectionLookupListener(contextId); + lookupListener.whenComplete(nodelookup -> { + final ActionListener groupedActionListener = getGroupedListener( + updatePitIdListener, + createPITResponse, + contextId.shards().size(), + contextId.shards().values() + ); + for (Map.Entry entry : contextId.shards().entrySet()) { + DiscoveryNode node = nodelookup.apply(entry.getValue().getClusterAlias(), entry.getValue().getNode()); + try { + final Transport.Connection connection = searchTransportService.getConnection(entry.getValue().getClusterAlias(), node); + searchTransportService.updatePitContext( + connection, + new UpdatePitContextRequest( + entry.getValue().getSearchContextId(), + createPITResponse.getId(), + request.getKeepAlive().millis(), + creationTime + ), + groupedActionListener + ); + } catch (Exception e) { + logger.error( + () -> new ParameterizedMessage( + "Create pit update phase failed for PIT ID [{}] on node [{}]", + searchResponse.pointInTimeId(), + node + ), + e + ); + groupedActionListener.onFailure( + new OpenSearchException( + "Create pit update phase for PIT ID [" + searchResponse.pointInTimeId() + "] failed on node[" + node + "]", + e + ) + ); + } + } + }, updatePitIdListener::onFailure); + } + + private StepListener> getConnectionLookupListener(SearchContextId contextId) { + ClusterState state = clusterService.state(); + final Set clusters = contextId.shards() + .values() + .stream() + .filter(ctx -> Strings.isEmpty(ctx.getClusterAlias()) == false) + .map(SearchContextIdForNode::getClusterAlias) + .collect(Collectors.toSet()); + return SearchUtils.getConnectionLookupListener(searchTransportService.getRemoteClusterService(), state, clusters); + } + + private ActionListener getGroupedListener( + ActionListener updatePitIdListener, + CreatePitResponse createPITResponse, + int size, + Collection contexts + ) { + return new GroupedActionListener<>(new ActionListener<>() { + @Override + public void onResponse(final Collection responses) { + updatePitIdListener.onResponse(createPITResponse); + } + + @Override + public void onFailure(final Exception e) { + cleanupContexts(contexts); + updatePitIdListener.onFailure(e); + } + }, size); + } + + /** + * Cleanup all created PIT contexts in case of failure + */ + private void cleanupContexts(Collection contexts) { + ActionListener deleteListener = new ActionListener<>() { + @Override + public void onResponse(Integer freed) { + // log the number of freed contexts - this is invoke and forget call + logger.debug(() -> new ParameterizedMessage("Cleaned up {} contexts out of {}", freed, contexts.size())); + } + + @Override + public void onFailure(Exception e) { + logger.error("Cleaning up PIT contexts failed ", e); + } + }; + ClearScrollController.closeContexts(clusterService.state().getNodes(), searchTransportService, contexts, deleteListener); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/CreatePitRequest.java b/server/src/main/java/org/opensearch/action/search/CreatePitRequest.java new file mode 100644 index 0000000000000..45d6d9e2c9f54 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/CreatePitRequest.java @@ -0,0 +1,195 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.ActionRequest; +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.IndicesRequest; +import org.opensearch.action.support.IndicesOptions; +import org.opensearch.common.Nullable; +import org.opensearch.common.Strings; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.xcontent.ToXContent; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.tasks.Task; +import org.opensearch.tasks.TaskId; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +import static org.opensearch.action.ValidateActions.addValidationError; + +/** + * A request to make create point in time against one or more indices. + */ +public class CreatePitRequest extends ActionRequest implements IndicesRequest.Replaceable, ToXContent { + + // keep alive for pit reader context + private TimeValue keepAlive; + + // this describes whether PIT can be created with partial failures + private Boolean allowPartialPitCreation; + @Nullable + private String routing = null; + @Nullable + private String preference = null; + private String[] indices = Strings.EMPTY_ARRAY; + private IndicesOptions indicesOptions = SearchRequest.DEFAULT_INDICES_OPTIONS; + + public CreatePitRequest(TimeValue keepAlive, Boolean allowPartialPitCreation, String... indices) { + this.keepAlive = keepAlive; + this.allowPartialPitCreation = allowPartialPitCreation; + this.indices = indices; + } + + public CreatePitRequest(StreamInput in) throws IOException { + super(in); + indices = in.readStringArray(); + indicesOptions = IndicesOptions.readIndicesOptions(in); + routing = in.readOptionalString(); + preference = in.readOptionalString(); + keepAlive = in.readTimeValue(); + allowPartialPitCreation = in.readOptionalBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArray(indices); + indicesOptions.writeIndicesOptions(out); + out.writeOptionalString(routing); + out.writeOptionalString(preference); + out.writeTimeValue(keepAlive); + out.writeOptionalBoolean(allowPartialPitCreation); + } + + public String getRouting() { + return routing; + } + + public String getPreference() { + return preference; + } + + public String[] getIndices() { + return indices; + } + + public IndicesOptions getIndicesOptions() { + return indicesOptions; + } + + public TimeValue getKeepAlive() { + return keepAlive; + } + + /** + * Sets if this request should allow partial results. + */ + public void allowPartialPitCreation(Boolean allowPartialPitCreation) { + this.allowPartialPitCreation = allowPartialPitCreation; + } + + public boolean shouldAllowPartialPitCreation() { + return allowPartialPitCreation; + } + + public void setRouting(String routing) { + this.routing = routing; + } + + public void setPreference(String preference) { + this.preference = preference; + } + + public void setIndices(String[] indices) { + this.indices = indices; + } + + public void setIndicesOptions(IndicesOptions indicesOptions) { + this.indicesOptions = Objects.requireNonNull(indicesOptions, "indicesOptions must not be null"); + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (keepAlive == null) { + validationException = addValidationError("keep alive not specified", validationException); + } + return validationException; + } + + @Override + public String[] indices() { + return indices; + } + + @Override + public IndicesOptions indicesOptions() { + return indicesOptions; + } + + public CreatePitRequest indicesOptions(IndicesOptions indicesOptions) { + this.indicesOptions = Objects.requireNonNull(indicesOptions, "indicesOptions must not be null"); + return this; + } + + public void setKeepAlive(TimeValue keepAlive) { + this.keepAlive = keepAlive; + } + + public final String buildDescription() { + StringBuilder sb = new StringBuilder(); + sb.append("indices["); + Strings.arrayToDelimitedString(indices, ",", sb); + sb.append("], "); + sb.append("pointintime[").append(keepAlive).append("], "); + sb.append("allowPartialPitCreation[").append(allowPartialPitCreation).append("], "); + return sb.toString(); + } + + @Override + public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { + return new Task(id, type, action, this.buildDescription(), parentTaskId, headers); + } + + private void validateIndices(String... indices) { + Objects.requireNonNull(indices, "indices must not be null"); + for (String index : indices) { + Objects.requireNonNull(index, "index must not be null"); + } + } + + @Override + public CreatePitRequest indices(String... indices) { + validateIndices(indices); + this.indices = indices; + return this; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field("keep_alive", keepAlive); + builder.field("allow_partial_pit_creation", allowPartialPitCreation); + if (indices != null) { + builder.startArray("indices"); + for (String index : indices) { + builder.value(index); + } + builder.endArray(); + } + if (indicesOptions != null) { + indicesOptions.toXContent(builder, params); + } + return builder; + } +} diff --git a/server/src/main/java/org/opensearch/action/search/CreatePitResponse.java b/server/src/main/java/org/opensearch/action/search/CreatePitResponse.java new file mode 100644 index 0000000000000..25eb9aff9e3d7 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/CreatePitResponse.java @@ -0,0 +1,232 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.ActionResponse; +import org.opensearch.common.ParseField; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.xcontent.StatusToXContentObject; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.rest.RestStatus; +import org.opensearch.rest.action.RestActions; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken; + +/** + * Create point in time response with point in time id and shard success / failures + */ +public class CreatePitResponse extends ActionResponse implements StatusToXContentObject { + private static final ParseField ID = new ParseField("id"); + private static final ParseField CREATION_TIME = new ParseField("creation_time"); + + // point in time id + private final String id; + private final int totalShards; + private final int successfulShards; + private final int failedShards; + private final int skippedShards; + private final ShardSearchFailure[] shardFailures; + private final long creationTime; + + public CreatePitResponse(StreamInput in) throws IOException { + super(in); + id = in.readString(); + totalShards = in.readVInt(); + successfulShards = in.readVInt(); + failedShards = in.readVInt(); + skippedShards = in.readVInt(); + creationTime = in.readLong(); + int size = in.readVInt(); + if (size == 0) { + shardFailures = ShardSearchFailure.EMPTY_ARRAY; + } else { + shardFailures = new ShardSearchFailure[size]; + for (int i = 0; i < shardFailures.length; i++) { + shardFailures[i] = ShardSearchFailure.readShardSearchFailure(in); + } + } + } + + public CreatePitResponse( + String id, + long creationTime, + int totalShards, + int successfulShards, + int skippedShards, + int failedShards, + ShardSearchFailure[] shardFailures + ) { + this.id = id; + this.creationTime = creationTime; + this.totalShards = totalShards; + this.successfulShards = successfulShards; + this.skippedShards = skippedShards; + this.failedShards = failedShards; + this.shardFailures = shardFailures; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(ID.getPreferredName(), id); + RestActions.buildBroadcastShardsHeader( + builder, + params, + getTotalShards(), + getSuccessfulShards(), + getSkippedShards(), + getFailedShards(), + getShardFailures() + ); + builder.field(CREATION_TIME.getPreferredName(), creationTime); + builder.endObject(); + return builder; + } + + /** + * Parse the create PIT response body into a new {@link CreatePitResponse} object + */ + public static CreatePitResponse fromXContent(XContentParser parser) throws IOException { + ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.nextToken(), parser); + parser.nextToken(); + return innerFromXContent(parser); + } + + public static CreatePitResponse innerFromXContent(XContentParser parser) throws IOException { + ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.currentToken(), parser); + String currentFieldName = parser.currentName(); + int successfulShards = -1; + int totalShards = -1; + int skippedShards = 0; + int failedShards = 0; + String id = null; + long creationTime = 0; + List failures = new ArrayList<>(); + for (XContentParser.Token token = parser.nextToken(); token != XContentParser.Token.END_OBJECT; token = parser.nextToken()) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + if (CREATION_TIME.match(currentFieldName, parser.getDeprecationHandler())) { + creationTime = parser.longValue(); + } else if (ID.match(currentFieldName, parser.getDeprecationHandler())) { + id = parser.text(); + } else { + parser.skipChildren(); + } + } else if (token == XContentParser.Token.START_OBJECT) { + if (RestActions._SHARDS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + if (RestActions.FAILED_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + failedShards = parser.intValue(); // we don't need it but need to consume it + } else if (RestActions.SUCCESSFUL_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + successfulShards = parser.intValue(); + } else if (RestActions.TOTAL_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + totalShards = parser.intValue(); + } else if (RestActions.SKIPPED_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + skippedShards = parser.intValue(); + } else { + parser.skipChildren(); + } + } else if (token == XContentParser.Token.START_ARRAY) { + if (RestActions.FAILURES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + failures.add(ShardSearchFailure.fromXContent(parser)); + } + } else { + parser.skipChildren(); + } + } else { + parser.skipChildren(); + } + } + } else { + parser.skipChildren(); + } + } + } + + return new CreatePitResponse( + id, + creationTime, + totalShards, + successfulShards, + skippedShards, + failedShards, + failures.toArray(ShardSearchFailure.EMPTY_ARRAY) + ); + } + + public long getCreationTime() { + return creationTime; + } + + /** + * The failed number of shards the search was executed on. + */ + public int getFailedShards() { + return shardFailures.length; + } + + /** + * The failures that occurred during the search. + */ + public ShardSearchFailure[] getShardFailures() { + return this.shardFailures; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(id); + out.writeVInt(totalShards); + out.writeVInt(successfulShards); + out.writeVInt(failedShards); + out.writeVInt(skippedShards); + out.writeLong(creationTime); + out.writeVInt(shardFailures.length); + for (ShardSearchFailure shardSearchFailure : shardFailures) { + shardSearchFailure.writeTo(out); + } + } + + public String getId() { + return id; + } + + /** + * The total number of shards the create pit operation was executed on. + */ + public int getTotalShards() { + return totalShards; + } + + /** + * The successful number of shards the create pit operation was executed on. + */ + public int getSuccessfulShards() { + return successfulShards; + } + + public int getSkippedShards() { + return skippedShards; + } + + @Override + public RestStatus status() { + return RestStatus.status(successfulShards, totalShards, shardFailures); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/SearchContextId.java b/server/src/main/java/org/opensearch/action/search/SearchContextId.java index c2bb46a7b0e57..8a9cf1dc9772d 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchContextId.java +++ b/server/src/main/java/org/opensearch/action/search/SearchContextId.java @@ -116,7 +116,7 @@ public static SearchContextId decode(NamedWriteableRegistry namedWriteableRegist } return new SearchContextId(Collections.unmodifiableMap(shards), Collections.unmodifiableMap(aliasFilters)); } catch (IOException e) { - throw new IllegalArgumentException(e); + throw new IllegalArgumentException("invalid id: [" + id + "]", e); } } diff --git a/server/src/main/java/org/opensearch/action/search/SearchTransportService.java b/server/src/main/java/org/opensearch/action/search/SearchTransportService.java index f91276960397a..f41bd7938b3b6 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/opensearch/action/search/SearchTransportService.java @@ -95,6 +95,8 @@ public class SearchTransportService { public static final String FETCH_ID_SCROLL_ACTION_NAME = "indices:data/read/search[phase/fetch/id/scroll]"; public static final String FETCH_ID_ACTION_NAME = "indices:data/read/search[phase/fetch/id]"; public static final String QUERY_CAN_MATCH_NAME = "indices:data/read/search[can_match]"; + public static final String CREATE_READER_CONTEXT_ACTION_NAME = "indices:data/read/search[create_context]"; + public static final String UPDATE_READER_CONTEXT_ACTION_NAME = "indices:data/read/search[update_context]"; private final TransportService transportService; private final BiFunction responseWrapper; @@ -142,6 +144,36 @@ public void sendFreeContext( ); } + public void updatePitContext( + Transport.Connection connection, + UpdatePitContextRequest request, + ActionListener actionListener + ) { + transportService.sendRequest( + connection, + UPDATE_READER_CONTEXT_ACTION_NAME, + request, + TransportRequestOptions.EMPTY, + new ActionListenerResponseHandler<>(actionListener, UpdatePitContextResponse::new) + ); + } + + public void createPitContext( + Transport.Connection connection, + TransportCreatePitAction.CreateReaderContextRequest request, + SearchTask task, + ActionListener actionListener + ) { + transportService.sendChildRequest( + connection, + CREATE_READER_CONTEXT_ACTION_NAME, + request, + task, + TransportRequestOptions.EMPTY, + new ActionListenerResponseHandler<>(actionListener, TransportCreatePitAction.CreateReaderContextResponse::new) + ); + } + public void sendCanMatch( Transport.Connection connection, final ShardSearchRequest request, @@ -562,6 +594,49 @@ public static void registerRequestHandler(TransportService transportService, Sea } ); TransportActionProxy.registerProxyAction(transportService, QUERY_CAN_MATCH_NAME, SearchService.CanMatchResponse::new); + transportService.registerRequestHandler( + CREATE_READER_CONTEXT_ACTION_NAME, + ThreadPool.Names.SAME, + TransportCreatePitAction.CreateReaderContextRequest::new, + (request, channel, task) -> { + ChannelActionListener< + TransportCreatePitAction.CreateReaderContextResponse, + TransportCreatePitAction.CreateReaderContextRequest> listener = new ChannelActionListener<>( + channel, + CREATE_READER_CONTEXT_ACTION_NAME, + request + ); + searchService.createPitReaderContext( + request.getShardId(), + request.getKeepAlive(), + ActionListener.wrap( + r -> listener.onResponse(new TransportCreatePitAction.CreateReaderContextResponse(r)), + listener::onFailure + ) + ); + } + ); + TransportActionProxy.registerProxyAction( + transportService, + CREATE_READER_CONTEXT_ACTION_NAME, + TransportCreatePitAction.CreateReaderContextResponse::new + ); + + transportService.registerRequestHandler( + UPDATE_READER_CONTEXT_ACTION_NAME, + ThreadPool.Names.SAME, + UpdatePitContextRequest::new, + (request, channel, task) -> { + ChannelActionListener listener = new ChannelActionListener<>( + channel, + UPDATE_READER_CONTEXT_ACTION_NAME, + request + ); + searchService.updatePitIdAndKeepAlive(request, listener); + } + ); + TransportActionProxy.registerProxyAction(transportService, UPDATE_READER_CONTEXT_ACTION_NAME, UpdatePitContextResponse::new); + } /** diff --git a/server/src/main/java/org/opensearch/action/search/SearchUtils.java b/server/src/main/java/org/opensearch/action/search/SearchUtils.java new file mode 100644 index 0000000000000..148d1645568b1 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/SearchUtils.java @@ -0,0 +1,43 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.StepListener; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.transport.RemoteClusterService; + +import java.util.Set; +import java.util.function.BiFunction; + +/** + * Helper class for common search functions + */ +public class SearchUtils { + + public SearchUtils() {} + + /** + * Get connection lookup listener for list of clusters passed + */ + public static StepListener> getConnectionLookupListener( + RemoteClusterService remoteClusterService, + ClusterState state, + Set clusters + ) { + final StepListener> lookupListener = new StepListener<>(); + + if (clusters.isEmpty()) { + lookupListener.onResponse((cluster, nodeId) -> state.getNodes().get(nodeId)); + } else { + remoteClusterService.collectNodes(clusters, lookupListener); + } + return lookupListener; + } +} diff --git a/server/src/main/java/org/opensearch/action/search/TransportCreatePitAction.java b/server/src/main/java/org/opensearch/action/search/TransportCreatePitAction.java new file mode 100644 index 0000000000000..3ec821dbed9c4 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/TransportCreatePitAction.java @@ -0,0 +1,139 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.action.ActionListener; +import org.opensearch.action.StepListener; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.HandledTransportAction; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.index.shard.ShardId; +import org.opensearch.search.SearchPhaseResult; +import org.opensearch.search.internal.ShardSearchContextId; +import org.opensearch.tasks.Task; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.Arrays; + +/** + * Transport action for creating PIT reader context + */ +public class TransportCreatePitAction extends HandledTransportAction { + + public static final String CREATE_PIT_ACTION = "create_pit"; + private final TransportService transportService; + private final SearchTransportService searchTransportService; + private final ClusterService clusterService; + private final TransportSearchAction transportSearchAction; + private final NamedWriteableRegistry namedWriteableRegistry; + + @Inject + public TransportCreatePitAction( + TransportService transportService, + ActionFilters actionFilters, + SearchTransportService searchTransportService, + ClusterService clusterService, + TransportSearchAction transportSearchAction, + NamedWriteableRegistry namedWriteableRegistry + ) { + super(CreatePitAction.NAME, transportService, actionFilters, in -> new CreatePitRequest(in)); + this.transportService = transportService; + this.searchTransportService = searchTransportService; + this.clusterService = clusterService; + this.transportSearchAction = transportSearchAction; + this.namedWriteableRegistry = namedWriteableRegistry; + } + + @Override + protected void doExecute(Task task, CreatePitRequest request, ActionListener listener) { + CreatePitController controller = new CreatePitController( + request, + searchTransportService, + clusterService, + transportSearchAction, + namedWriteableRegistry, + task, + listener + ); + final StepListener createPitListener = new StepListener<>(); + final ActionListener updatePitIdListener = ActionListener.wrap(r -> listener.onResponse(r), e -> { + logger.error( + () -> new ParameterizedMessage( + "PIT creation failed while updating PIT ID for indices [{}]", + Arrays.toString(request.indices()) + ) + ); + listener.onFailure(e); + }); + controller.executeCreatePit(createPitListener, updatePitIdListener); + } + + /** + * Request to create pit reader context with keep alive + */ + public static class CreateReaderContextRequest extends TransportRequest { + private final ShardId shardId; + private final TimeValue keepAlive; + + public CreateReaderContextRequest(ShardId shardId, TimeValue keepAlive) { + this.shardId = shardId; + this.keepAlive = keepAlive; + } + + public ShardId getShardId() { + return shardId; + } + + public TimeValue getKeepAlive() { + return keepAlive; + } + + public CreateReaderContextRequest(StreamInput in) throws IOException { + super(in); + this.shardId = new ShardId(in); + this.keepAlive = in.readTimeValue(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + shardId.writeTo(out); + out.writeTimeValue(keepAlive); + } + } + + /** + * Create pit reader context response which holds the contextId + */ + public static class CreateReaderContextResponse extends SearchPhaseResult { + public CreateReaderContextResponse(ShardSearchContextId shardSearchContextId) { + this.contextId = shardSearchContextId; + } + + public CreateReaderContextResponse(StreamInput in) throws IOException { + super(in); + contextId = new ShardSearchContextId(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + contextId.writeTo(out); + } + } + +} diff --git a/server/src/main/java/org/opensearch/action/search/UpdatePitContextRequest.java b/server/src/main/java/org/opensearch/action/search/UpdatePitContextRequest.java new file mode 100644 index 0000000000000..e6c9befb7938f --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/UpdatePitContextRequest.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.search.internal.ShardSearchContextId; +import org.opensearch.transport.TransportRequest; + +import java.io.IOException; + +/** + * Request used to update PIT reader contexts with pitId, keepAlive and creationTime + */ +public class UpdatePitContextRequest extends TransportRequest { + private final String pitId; + private final long keepAlive; + + private final long creationTime; + private final ShardSearchContextId searchContextId; + + public UpdatePitContextRequest(ShardSearchContextId searchContextId, String pitId, long keepAlive, long creationTime) { + this.pitId = pitId; + this.searchContextId = searchContextId; + this.keepAlive = keepAlive; + this.creationTime = creationTime; + } + + UpdatePitContextRequest(StreamInput in) throws IOException { + super(in); + pitId = in.readString(); + keepAlive = in.readLong(); + creationTime = in.readLong(); + searchContextId = new ShardSearchContextId(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(pitId); + out.writeLong(keepAlive); + out.writeLong(creationTime); + searchContextId.writeTo(out); + } + + public ShardSearchContextId getSearchContextId() { + return searchContextId; + } + + public String getPitId() { + return pitId; + } + + public long getCreationTime() { + return creationTime; + } + + public long getKeepAlive() { + return keepAlive; + } +} diff --git a/server/src/main/java/org/opensearch/action/search/UpdatePitContextResponse.java b/server/src/main/java/org/opensearch/action/search/UpdatePitContextResponse.java new file mode 100644 index 0000000000000..919dd87ea3041 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/UpdatePitContextResponse.java @@ -0,0 +1,58 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.transport.TransportResponse; + +import java.io.IOException; + +/** + * Update PIT context response with creation time, keep alive etc. + */ +public class UpdatePitContextResponse extends TransportResponse { + private final String pitId; + + private final long creationTime; + + private final long keepAlive; + + UpdatePitContextResponse(StreamInput in) throws IOException { + super(in); + pitId = in.readString(); + creationTime = in.readLong(); + keepAlive = in.readLong(); + } + + public UpdatePitContextResponse(String pitId, long creationTime, long keepAlive) { + this.pitId = pitId; + this.keepAlive = keepAlive; + this.creationTime = creationTime; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(pitId); + out.writeLong(creationTime); + out.writeLong(keepAlive); + } + + public String getPitId() { + return pitId; + } + + public long getKeepAlive() { + return keepAlive; + } + + public long getCreationTime() { + return creationTime; + } +} diff --git a/server/src/main/java/org/opensearch/client/Client.java b/server/src/main/java/org/opensearch/client/Client.java index 50f8f52253815..a73f8200ab277 100644 --- a/server/src/main/java/org/opensearch/client/Client.java +++ b/server/src/main/java/org/opensearch/client/Client.java @@ -58,6 +58,8 @@ import org.opensearch.action.search.ClearScrollRequest; import org.opensearch.action.search.ClearScrollRequestBuilder; import org.opensearch.action.search.ClearScrollResponse; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.MultiSearchRequestBuilder; import org.opensearch.action.search.MultiSearchResponse; @@ -325,6 +327,11 @@ public interface Client extends OpenSearchClient, Releasable { */ SearchScrollRequestBuilder prepareSearchScroll(String scrollId); + /** + * Create point in time for one or more indices + */ + void createPit(CreatePitRequest createPITRequest, ActionListener listener); + /** * Performs multiple search requests. */ diff --git a/server/src/main/java/org/opensearch/client/support/AbstractClient.java b/server/src/main/java/org/opensearch/client/support/AbstractClient.java index 4fdf4b1166bd6..6cc0827310bd1 100644 --- a/server/src/main/java/org/opensearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/opensearch/client/support/AbstractClient.java @@ -324,6 +324,9 @@ import org.opensearch.action.search.ClearScrollRequest; import org.opensearch.action.search.ClearScrollRequestBuilder; import org.opensearch.action.search.ClearScrollResponse; +import org.opensearch.action.search.CreatePitAction; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; import org.opensearch.action.search.MultiSearchAction; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.MultiSearchRequestBuilder; @@ -574,6 +577,11 @@ public SearchScrollRequestBuilder prepareSearchScroll(String scrollId) { return new SearchScrollRequestBuilder(this, SearchScrollAction.INSTANCE, scrollId); } + @Override + public void createPit(final CreatePitRequest createPITRequest, final ActionListener listener) { + execute(CreatePitAction.INSTANCE, createPITRequest, listener); + } + @Override public ActionFuture multiSearch(MultiSearchRequest request) { return execute(MultiSearchAction.INSTANCE, request); diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index be92bf1643aee..1d0039c26670a 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -32,6 +32,7 @@ package org.opensearch.common.settings; import org.apache.logging.log4j.LogManager; +import org.opensearch.action.search.CreatePitController; import org.opensearch.cluster.routing.allocation.decider.NodeLoadAwareAllocationDecider; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; @@ -464,6 +465,9 @@ public void apply(Settings value, Settings current, Settings previous) { SearchService.KEEPALIVE_INTERVAL_SETTING, SearchService.MAX_KEEPALIVE_SETTING, SearchService.ALLOW_EXPENSIVE_QUERIES, + SearchService.MAX_OPEN_PIT_CONTEXT, + SearchService.MAX_PIT_KEEPALIVE_SETTING, + CreatePitController.PIT_INIT_KEEP_ALIVE, MultiBucketConsumerService.MAX_BUCKET_SETTING, SearchService.LOW_LEVEL_CANCELLATION_SETTING, SearchService.MAX_OPEN_SCROLL_CONTEXT, diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index ba2666b53d7a8..3eb68a7686c96 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -149,6 +149,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexSettings.INDEX_CHECK_ON_STARTUP, IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD, IndexSettings.MAX_SLICES_PER_SCROLL, + IndexSettings.MAX_SLICES_PER_PIT, IndexSettings.MAX_REGEX_LENGTH_SETTING, ShardsLimitAllocationDecider.INDEX_TOTAL_SHARDS_PER_NODE_SETTING, IndexSettings.INDEX_GC_DELETES_SETTING, diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index e40acb94ee498..2da9fc0c6d995 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -451,6 +451,17 @@ public final class IndexSettings { Property.IndexScope ); + /** + * The maximum number of slices allowed in a search request with PIT + */ + public static final Setting MAX_SLICES_PER_PIT = Setting.intSetting( + "index.max_slices_per_pit", + 1024, + 1, + Property.Dynamic, + Property.IndexScope + ); + /** * The maximum length of regex string allowed in a regexp query. */ @@ -603,6 +614,10 @@ private void setRetentionLeaseMillis(final TimeValue retentionLease) { * The maximum number of slices allowed in a scroll request. */ private volatile int maxSlicesPerScroll; + /** + * The maximum number of slices allowed in a PIT request. + */ + private volatile int maxSlicesPerPit; /** * The maximum length of regex string allowed in a regexp query. @@ -717,6 +732,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti maxShingleDiff = scopedSettings.get(MAX_SHINGLE_DIFF_SETTING); maxRefreshListeners = scopedSettings.get(MAX_REFRESH_LISTENERS_PER_SHARD); maxSlicesPerScroll = scopedSettings.get(MAX_SLICES_PER_SCROLL); + maxSlicesPerPit = scopedSettings.get(MAX_SLICES_PER_PIT); maxAnalyzedOffset = scopedSettings.get(MAX_ANALYZED_OFFSET_SETTING); maxTermsCount = scopedSettings.get(MAX_TERMS_COUNT_SETTING); maxRegexLength = scopedSettings.get(MAX_REGEX_LENGTH_SETTING); @@ -789,6 +805,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti scopedSettings.addSettingsUpdateConsumer(MAX_ANALYZED_OFFSET_SETTING, this::setHighlightMaxAnalyzedOffset); scopedSettings.addSettingsUpdateConsumer(MAX_TERMS_COUNT_SETTING, this::setMaxTermsCount); scopedSettings.addSettingsUpdateConsumer(MAX_SLICES_PER_SCROLL, this::setMaxSlicesPerScroll); + scopedSettings.addSettingsUpdateConsumer(MAX_SLICES_PER_PIT, this::setMaxSlicesPerPit); scopedSettings.addSettingsUpdateConsumer(DEFAULT_FIELD_SETTING, this::setDefaultFields); scopedSettings.addSettingsUpdateConsumer(INDEX_SEARCH_IDLE_AFTER, this::setSearchIdleAfter); scopedSettings.addSettingsUpdateConsumer(MAX_REGEX_LENGTH_SETTING, this::setMaxRegexLength); @@ -1249,6 +1266,17 @@ public int getMaxSlicesPerScroll() { return maxSlicesPerScroll; } + /** + * The maximum number of slices allowed in a PIT request. + */ + public int getMaxSlicesPerPit() { + return maxSlicesPerPit; + } + + private void setMaxSlicesPerPit(int value) { + this.maxSlicesPerPit = value; + } + private void setMaxSlicesPerScroll(int value) { this.maxSlicesPerScroll = value; } diff --git a/server/src/main/java/org/opensearch/index/shard/SearchOperationListener.java b/server/src/main/java/org/opensearch/index/shard/SearchOperationListener.java index d3177055a5bd8..0a7c80f5e87d3 100644 --- a/server/src/main/java/org/opensearch/index/shard/SearchOperationListener.java +++ b/server/src/main/java/org/opensearch/index/shard/SearchOperationListener.java @@ -131,6 +131,19 @@ default void onFreeScrollContext(ReaderContext readerContext) {} */ default void validateReaderContext(ReaderContext readerContext, TransportRequest transportRequest) {} + /** + * Executed when a new Point-In-Time {@link ReaderContext} was created + * @param readerContext the created reader context + */ + default void onNewPitContext(ReaderContext readerContext) {} + + /** + * Executed when a Point-In-Time search {@link SearchContext} is freed. + * This happens on deletion of a Point-In-Time or on it's keep-alive is expiring. + * @param readerContext the freed search context + */ + default void onFreePitContext(ReaderContext readerContext) {} + /** * A Composite listener that multiplexes calls to each of the listeners methods. */ @@ -265,5 +278,36 @@ public void validateReaderContext(ReaderContext readerContext, TransportRequest } ExceptionsHelper.reThrowIfNotNull(exception); } + + /** + * Executed when a new Point-In-Time {@link ReaderContext} was created + * @param readerContext the created reader context + */ + @Override + public void onNewPitContext(ReaderContext readerContext) { + for (SearchOperationListener listener : listeners) { + try { + listener.onNewPitContext(readerContext); + } catch (Exception e) { + logger.warn("onNewPitContext listener failed", e); + } + } + } + + /** + * Executed when a Point-In-Time search {@link SearchContext} is freed. + * This happens on deletion of a Point-In-Time or on it's keep-alive is expiring. + * @param readerContext the freed search context + */ + @Override + public void onFreePitContext(ReaderContext readerContext) { + for (SearchOperationListener listener : listeners) { + try { + listener.onFreePitContext(readerContext); + } catch (Exception e) { + logger.warn("onFreePitContext listener failed", e); + } + } + } } } diff --git a/server/src/main/java/org/opensearch/rest/action/search/RestCreatePitAction.java b/server/src/main/java/org/opensearch/rest/action/search/RestCreatePitAction.java new file mode 100644 index 0000000000000..9439670880015 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/search/RestCreatePitAction.java @@ -0,0 +1,57 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.search; + +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.support.IndicesOptions; +import org.opensearch.client.node.NodeClient; +import org.opensearch.common.Strings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestStatusToXContentListener; + +import java.io.IOException; +import java.util.List; + +import static java.util.Arrays.asList; +import static java.util.Collections.unmodifiableList; +import static org.opensearch.rest.RestRequest.Method.POST; + +/** + * Rest action for creating PIT context + */ +public class RestCreatePitAction extends BaseRestHandler { + public static String ALLOW_PARTIAL_PIT_CREATION = "allow_partial_pit_creation"; + public static String KEEP_ALIVE = "keep_alive"; + + @Override + public String getName() { + return "create_pit_action"; + } + + @Override + public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + boolean allowPartialPitCreation = request.paramAsBoolean(ALLOW_PARTIAL_PIT_CREATION, true); + String[] indices = Strings.splitStringByCommaToArray(request.param("index")); + TimeValue keepAlive = request.paramAsTime(KEEP_ALIVE, null); + CreatePitRequest createPitRequest = new CreatePitRequest(keepAlive, allowPartialPitCreation, indices); + createPitRequest.setIndicesOptions(IndicesOptions.fromRequest(request, createPitRequest.indicesOptions())); + createPitRequest.setPreference(request.param("preference")); + createPitRequest.setRouting(request.param("routing")); + + return channel -> client.createPit(createPitRequest, new RestStatusToXContentListener<>(channel)); + } + + @Override + public List routes() { + return unmodifiableList(asList(new Route(POST, "/{index}/_search/point_in_time"))); + } + +} diff --git a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java index d09143e3373b4..e6c9a6d0e37e9 100644 --- a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java @@ -75,6 +75,7 @@ import org.opensearch.search.fetch.subphase.ScriptFieldsContext; import org.opensearch.search.fetch.subphase.highlight.SearchHighlightContext; import org.opensearch.search.internal.ContextIndexSearcher; +import org.opensearch.search.internal.PitReaderContext; import org.opensearch.search.internal.ReaderContext; import org.opensearch.search.internal.ScrollContext; import org.opensearch.search.internal.SearchContext; @@ -287,7 +288,7 @@ public void preProcess(boolean rewrite) { } } - if (sliceBuilder != null) { + if (sliceBuilder != null && scrollContext() != null) { int sliceLimit = indexService.getIndexSettings().getMaxSlicesPerScroll(); int numSlices = sliceBuilder.getMax(); if (numSlices > sliceLimit) { @@ -304,6 +305,23 @@ public void preProcess(boolean rewrite) { } } + if (sliceBuilder != null && readerContext != null && readerContext instanceof PitReaderContext) { + int sliceLimit = indexService.getIndexSettings().getMaxSlicesPerPit(); + int numSlices = sliceBuilder.getMax(); + if (numSlices > sliceLimit) { + throw new IllegalArgumentException( + "The number of slices [" + + numSlices + + "] is too large. It must " + + "be less than [" + + sliceLimit + + "]. This limit can be set by changing the [" + + IndexSettings.MAX_SLICES_PER_PIT.getKey() + + "] index level setting." + ); + } + } + // initialize the filtering alias based on the provided filters try { final QueryBuilder queryBuilder = request.getAliasFilter().getQueryBuilder(); diff --git a/server/src/main/java/org/opensearch/search/SearchService.java b/server/src/main/java/org/opensearch/search/SearchService.java index 3b24d52bebe53..0f7a39a31535e 100644 --- a/server/src/main/java/org/opensearch/search/SearchService.java +++ b/server/src/main/java/org/opensearch/search/SearchService.java @@ -44,6 +44,8 @@ import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchShardTask; import org.opensearch.action.search.SearchType; +import org.opensearch.action.search.UpdatePitContextRequest; +import org.opensearch.action.search.UpdatePitContextResponse; import org.opensearch.action.support.TransportActions; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.service.ClusterService; @@ -111,6 +113,7 @@ import org.opensearch.search.internal.AliasFilter; import org.opensearch.search.internal.InternalScrollSearchRequest; import org.opensearch.search.internal.LegacyReaderContext; +import org.opensearch.search.internal.PitReaderContext; import org.opensearch.search.internal.ReaderContext; import org.opensearch.search.internal.SearchContext; import org.opensearch.search.internal.ShardSearchContextId; @@ -172,6 +175,15 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv Property.NodeScope, Property.Dynamic ); + /** + * This setting will help validate the max keep alive that can be set during creation or extension for a PIT reader context + */ + public static final Setting MAX_PIT_KEEPALIVE_SETTING = Setting.positiveTimeSetting( + "pit.max_keep_alive", + timeValueHours(24), + Property.NodeScope, + Property.Dynamic + ); public static final Setting KEEPALIVE_INTERVAL_SETTING = Setting.positiveTimeSetting( "search.keep_alive_interval", timeValueMinutes(1), @@ -218,6 +230,19 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv Property.NodeScope ); + /** + * This setting defines the maximum number of active PIT reader contexts in the node , since each PIT context + * has a resource cost attached to it. This setting is less than scroll since users are + * encouraged to share the PIT details. + */ + public static final Setting MAX_OPEN_PIT_CONTEXT = Setting.intSetting( + "search.max_open_pit_context", + 300, + 0, + Property.Dynamic, + Property.NodeScope + ); + public static final int DEFAULT_SIZE = 10; public static final int DEFAULT_FROM = 0; @@ -243,6 +268,8 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv private volatile long maxKeepAlive; + private volatile long maxPitKeepAlive; + private volatile TimeValue defaultSearchTimeout; private volatile boolean defaultAllowPartialSearchResults; @@ -251,6 +278,8 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv private volatile int maxOpenScrollContext; + private volatile int maxOpenPitContext; + private final Cancellable keepAliveReaper; private final AtomicLong idGenerator = new AtomicLong(); @@ -259,6 +288,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv private final MultiBucketConsumerService multiBucketConsumerService; + private final AtomicInteger openPitContexts = new AtomicInteger(); private final AtomicInteger openScrollContexts = new AtomicInteger(); private final String sessionId = UUIDs.randomBase64UUID(); private final Executor indexSearcherExecutor; @@ -293,9 +323,16 @@ public SearchService( TimeValue keepAliveInterval = KEEPALIVE_INTERVAL_SETTING.get(settings); setKeepAlives(DEFAULT_KEEPALIVE_SETTING.get(settings), MAX_KEEPALIVE_SETTING.get(settings)); - clusterService.getClusterSettings() .addSettingsUpdateConsumer(DEFAULT_KEEPALIVE_SETTING, MAX_KEEPALIVE_SETTING, this::setKeepAlives, this::validateKeepAlives); + setPitKeepAlives(DEFAULT_KEEPALIVE_SETTING.get(settings), MAX_PIT_KEEPALIVE_SETTING.get(settings)); + clusterService.getClusterSettings() + .addSettingsUpdateConsumer( + DEFAULT_KEEPALIVE_SETTING, + MAX_PIT_KEEPALIVE_SETTING, + this::setPitKeepAlives, + this::validatePitKeepAlives + ); this.keepAliveReaper = threadPool.scheduleWithFixedDelay(new Reaper(), keepAliveInterval, Names.SAME); @@ -309,6 +346,9 @@ public SearchService( maxOpenScrollContext = MAX_OPEN_SCROLL_CONTEXT.get(settings); clusterService.getClusterSettings().addSettingsUpdateConsumer(MAX_OPEN_SCROLL_CONTEXT, this::setMaxOpenScrollContext); + maxOpenPitContext = MAX_OPEN_PIT_CONTEXT.get(settings); + clusterService.getClusterSettings().addSettingsUpdateConsumer(MAX_OPEN_PIT_CONTEXT, this::setMaxOpenPitContext); + lowLevelCancellation = LOW_LEVEL_CANCELLATION_SETTING.get(settings); clusterService.getClusterSettings().addSettingsUpdateConsumer(LOW_LEVEL_CANCELLATION_SETTING, this::setLowLevelCancellation); } @@ -331,12 +371,38 @@ private void validateKeepAlives(TimeValue defaultKeepAlive, TimeValue maxKeepAli } } + /** + * Default keep alive search setting should be less than max PIT keep alive + */ + private void validatePitKeepAlives(TimeValue defaultKeepAlive, TimeValue maxPitKeepAlive) { + if (defaultKeepAlive.millis() > maxPitKeepAlive.millis()) { + throw new IllegalArgumentException( + "Default keep alive setting for request [" + + DEFAULT_KEEPALIVE_SETTING.getKey() + + "]" + + " should be smaller than max keep alive for PIT [" + + MAX_PIT_KEEPALIVE_SETTING.getKey() + + "], " + + "was (" + + defaultKeepAlive + + " > " + + maxPitKeepAlive + + ")" + ); + } + } + private void setKeepAlives(TimeValue defaultKeepAlive, TimeValue maxKeepAlive) { validateKeepAlives(defaultKeepAlive, maxKeepAlive); this.defaultKeepAlive = defaultKeepAlive.millis(); this.maxKeepAlive = maxKeepAlive.millis(); } + private void setPitKeepAlives(TimeValue defaultKeepAlive, TimeValue maxPitKeepAlive) { + validatePitKeepAlives(defaultKeepAlive, maxPitKeepAlive); + this.maxPitKeepAlive = maxPitKeepAlive.millis(); + } + private void setDefaultSearchTimeout(TimeValue defaultSearchTimeout) { this.defaultSearchTimeout = defaultSearchTimeout; } @@ -353,6 +419,10 @@ private void setMaxOpenScrollContext(int maxOpenScrollContext) { this.maxOpenScrollContext = maxOpenScrollContext; } + private void setMaxOpenPitContext(int maxOpenPitContext) { + this.maxOpenPitContext = maxOpenPitContext; + } + private void setLowLevelCancellation(Boolean lowLevelCancellation) { this.lowLevelCancellation = lowLevelCancellation; } @@ -793,8 +863,8 @@ final ReaderContext createAndPutReaderContext( * Opens the reader context for given shardId. The newly opened reader context will be keep * until the {@code keepAlive} elapsed unless it is manually released. */ - public void openReaderContext(ShardId shardId, TimeValue keepAlive, ActionListener listener) { - checkKeepAliveLimit(keepAlive.millis()); + public void createPitReaderContext(ShardId shardId, TimeValue keepAlive, ActionListener listener) { + checkPitKeepAliveLimit(keepAlive.millis()); final IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); final IndexShard shard = indexService.getShard(shardId.id()); final SearchOperationListener searchOperationListener = shard.getSearchOperationListener(); @@ -802,13 +872,31 @@ public void openReaderContext(ShardId shardId, TimeValue keepAlive, ActionListen Engine.SearcherSupplier searcherSupplier = null; ReaderContext readerContext = null; try { + if (openPitContexts.incrementAndGet() > maxOpenPitContext) { + throw new OpenSearchRejectedExecutionException( + "Trying to create too many Point In Time contexts. Must be less than or equal to: [" + + maxOpenPitContext + + "]. " + + "This limit can be set by changing the [" + + MAX_OPEN_PIT_CONTEXT.getKey() + + "] setting." + ); + } searcherSupplier = shard.acquireSearcherSupplier(); final ShardSearchContextId id = new ShardSearchContextId(sessionId, idGenerator.incrementAndGet()); - readerContext = new ReaderContext(id, indexService, shard, searcherSupplier, keepAlive.millis(), false); + readerContext = new PitReaderContext(id, indexService, shard, searcherSupplier, keepAlive.millis(), false); final ReaderContext finalReaderContext = readerContext; searcherSupplier = null; // transfer ownership to reader context + searchOperationListener.onNewReaderContext(readerContext); - readerContext.addOnClose(() -> searchOperationListener.onFreeReaderContext(finalReaderContext)); + searchOperationListener.onNewPitContext(finalReaderContext); + + readerContext.addOnClose(() -> { + openPitContexts.decrementAndGet(); + searchOperationListener.onFreeReaderContext(finalReaderContext); + searchOperationListener.onFreePitContext(finalReaderContext); + }); + // add the newly created pit reader context to active readers putReaderContext(readerContext); readerContext = null; listener.onResponse(finalReaderContext.id()); @@ -932,6 +1020,29 @@ public boolean freeReaderContext(ShardSearchContextId contextId) { return false; } + /** + * Update PIT reader with pit id, keep alive and created time etc + */ + public void updatePitIdAndKeepAlive(UpdatePitContextRequest request, ActionListener listener) { + checkPitKeepAliveLimit(request.getKeepAlive()); + PitReaderContext readerContext = getPitReaderContext(request.getSearchContextId()); + if (readerContext == null) { + throw new SearchContextMissingException(request.getSearchContextId()); + } + Releasable updatePit = null; + try { + updatePit = readerContext.updatePitIdAndKeepAlive(request.getKeepAlive(), request.getPitId(), request.getCreationTime()); + listener.onResponse(new UpdatePitContextResponse(request.getPitId(), request.getCreationTime(), request.getKeepAlive())); + } catch (Exception e) { + freeReaderContext(readerContext.id()); + listener.onFailure(e); + } finally { + if (updatePit != null) { + updatePit.close(); + } + } + } + public void freeAllScrollContexts() { for (ReaderContext readerContext : activeReaders.values()) { if (readerContext.scrollContext() != null) { @@ -944,7 +1055,11 @@ private long getKeepAlive(ShardSearchRequest request) { if (request.scroll() != null) { return getScrollKeepAlive(request.scroll()); } else if (request.keepAlive() != null) { - checkKeepAliveLimit(request.keepAlive().millis()); + if (getReaderContext(request.readerId()) instanceof PitReaderContext) { + checkPitKeepAliveLimit(request.keepAlive().millis()); + } else { + checkKeepAliveLimit(request.keepAlive().millis()); + } return request.keepAlive().getMillis(); } else { return request.readerId() == null ? defaultKeepAlive : -1; @@ -975,6 +1090,25 @@ private void checkKeepAliveLimit(long keepAlive) { } } + /** + * check if request keep alive is greater than max keep alive + */ + private void checkPitKeepAliveLimit(long keepAlive) { + if (keepAlive > maxPitKeepAlive) { + throw new IllegalArgumentException( + "Keep alive for request (" + + TimeValue.timeValueMillis(keepAlive) + + ") is too large. " + + "It must be less than (" + + TimeValue.timeValueMillis(maxPitKeepAlive) + + "). " + + "This limit can be set by changing the [" + + MAX_PIT_KEEPALIVE_SETTING.getKey() + + "] cluster level setting." + ); + } + } + private ActionListener wrapFailureListener(ActionListener listener, ReaderContext context, Releasable releasable) { return new ActionListener() { @Override @@ -1165,8 +1299,8 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc } if (source.slice() != null) { - if (context.scrollContext() == null) { - throw new SearchException(shardTarget, "`slice` cannot be used outside of a scroll context"); + if (context.scrollContext() == null && !(context.readerContext() instanceof PitReaderContext)) { + throw new SearchException(shardTarget, "`slice` cannot be used outside of a scroll context or PIT context"); } context.sliceBuilder(source.slice()); } @@ -1261,6 +1395,14 @@ public ResponseCollectorService getResponseCollectorService() { return this.responseCollectorService; } + public PitReaderContext getPitReaderContext(ShardSearchContextId id) { + ReaderContext context = activeReaders.get(id.getId()); + if (context instanceof PitReaderContext) { + return (PitReaderContext) context; + } + return null; + } + class Reaper implements Runnable { @Override public void run() { diff --git a/server/src/main/java/org/opensearch/search/internal/PitReaderContext.java b/server/src/main/java/org/opensearch/search/internal/PitReaderContext.java new file mode 100644 index 0000000000000..43ca7e0ebd823 --- /dev/null +++ b/server/src/main/java/org/opensearch/search/internal/PitReaderContext.java @@ -0,0 +1,70 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.internal; + +import org.apache.lucene.util.SetOnce; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; +import org.opensearch.index.IndexService; +import org.opensearch.index.engine.Engine; +import org.opensearch.index.shard.IndexShard; + +/** + * PIT reader context containing PIT specific information such as pit id, create time etc. + */ +public class PitReaderContext extends ReaderContext { + + // Storing the encoded PIT ID as part of PIT reader context for use cases such as list pit API + private final SetOnce pitId = new SetOnce<>(); + // Creation time of PIT contexts which helps users to differentiate between multiple PIT reader contexts + private final SetOnce creationTime = new SetOnce<>(); + + public PitReaderContext( + ShardSearchContextId id, + IndexService indexService, + IndexShard indexShard, + Engine.SearcherSupplier searcherSupplier, + long keepAliveInMillis, + boolean singleSession + ) { + super(id, indexService, indexShard, searcherSupplier, keepAliveInMillis, singleSession); + } + + public String getPitId() { + return this.pitId.get(); + } + + public void setPitId(final String pitId) { + this.pitId.set(pitId); + } + + /** + * Returns a releasable to indicate that the caller has stopped using this reader. + * The pit id can be updated and time to live of the reader usage can be extended using the provided + * keepAliveInMillis. + */ + public Releasable updatePitIdAndKeepAlive(long keepAliveInMillis, String pitId, long createTime) { + getRefCounted().incRef(); + tryUpdateKeepAlive(keepAliveInMillis); + setPitId(pitId); + setCreationTime(createTime); + return Releasables.releaseOnce(() -> { + getLastAccessTime().updateAndGet(curr -> Math.max(curr, nowInMillis())); + getRefCounted().decRef(); + }); + } + + public long getCreationTime() { + return this.creationTime.get(); + } + + public void setCreationTime(final long creationTime) { + this.creationTime.set(creationTime); + } +} diff --git a/server/src/main/java/org/opensearch/search/internal/ReaderContext.java b/server/src/main/java/org/opensearch/search/internal/ReaderContext.java index 5bcc491f4ffdb..04791e05f603c 100644 --- a/server/src/main/java/org/opensearch/search/internal/ReaderContext.java +++ b/server/src/main/java/org/opensearch/search/internal/ReaderContext.java @@ -105,10 +105,18 @@ public void validate(TransportRequest request) { indexShard.getSearchOperationListener().validateReaderContext(this, request); } - private long nowInMillis() { + protected long nowInMillis() { return indexShard.getThreadPool().relativeTimeInMillis(); } + protected AbstractRefCounted getRefCounted() { + return refCounted; + } + + protected AtomicLong getLastAccessTime() { + return lastAccessTime; + } + @Override public final void close() { if (closed.compareAndSet(false, true)) { @@ -140,7 +148,10 @@ public Engine.Searcher acquireSearcher(String source) { return searcherSupplier.acquireSearcher(source); } - private void tryUpdateKeepAlive(long keepAlive) { + /** + * Update keep alive if it is greater than current keep alive + */ + public void tryUpdateKeepAlive(long keepAlive) { this.keepAlive.updateAndGet(curr -> Math.max(curr, keepAlive)); } diff --git a/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java b/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java new file mode 100644 index 0000000000000..f07bbe5975535 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java @@ -0,0 +1,592 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.apache.lucene.search.TotalHits; +import org.junit.Before; +import org.opensearch.Version; +import org.opensearch.action.ActionListener; +import org.opensearch.action.LatchedActionListener; +import org.opensearch.action.StepListener; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.AtomicArray; +import org.opensearch.index.query.IdsQueryBuilder; +import org.opensearch.index.query.MatchAllQueryBuilder; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.index.shard.ShardId; +import org.opensearch.search.SearchHit; +import org.opensearch.search.SearchHits; +import org.opensearch.search.SearchPhaseResult; +import org.opensearch.search.SearchShardTarget; +import org.opensearch.search.aggregations.InternalAggregations; +import org.opensearch.search.internal.AliasFilter; +import org.opensearch.search.internal.InternalSearchResponse; +import org.opensearch.search.internal.ShardSearchContextId; +import org.opensearch.tasks.Task; +import org.opensearch.tasks.TaskId; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.transport.MockTransportService; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.RemoteClusterConnectionTests; +import org.opensearch.transport.Transport; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Functional tests for various methods in create pit controller. Covers update pit phase specifically since + * integration tests don't cover it. + */ +public class CreatePitControllerTests extends OpenSearchTestCase { + + DiscoveryNode node1 = null; + DiscoveryNode node2 = null; + DiscoveryNode node3 = null; + String pitId = null; + TransportSearchAction transportSearchAction = null; + Task task = null; + DiscoveryNodes nodes = null; + NamedWriteableRegistry namedWriteableRegistry = null; + SearchResponse searchResponse = null; + ActionListener createPitListener = null; + ClusterService clusterServiceMock = null; + + private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); + + @Override + public void tearDown() throws Exception { + super.tearDown(); + ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); + } + + private MockTransportService startTransport(String id, List knownNodes, Version version) { + return startTransport(id, knownNodes, version, Settings.EMPTY); + } + + private MockTransportService startTransport( + final String id, + final List knownNodes, + final Version version, + final Settings settings + ) { + return RemoteClusterConnectionTests.startTransport(id, knownNodes, version, threadPool, settings); + } + + @Before + public void setupData() { + node1 = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Version.CURRENT); + node2 = new DiscoveryNode("node_2", buildNewFakeTransportAddress(), Version.CURRENT); + node3 = new DiscoveryNode("node_3", buildNewFakeTransportAddress(), Version.CURRENT); + setPitId(); + namedWriteableRegistry = new NamedWriteableRegistry( + Arrays.asList( + new NamedWriteableRegistry.Entry(QueryBuilder.class, TermQueryBuilder.NAME, TermQueryBuilder::new), + new NamedWriteableRegistry.Entry(QueryBuilder.class, MatchAllQueryBuilder.NAME, MatchAllQueryBuilder::new), + new NamedWriteableRegistry.Entry(QueryBuilder.class, IdsQueryBuilder.NAME, IdsQueryBuilder::new) + ) + ); + nodes = DiscoveryNodes.builder().add(node1).add(node2).add(node3).build(); + transportSearchAction = mock(TransportSearchAction.class); + task = new Task( + randomLong(), + "transport", + SearchAction.NAME, + "description", + new TaskId(randomLong() + ":" + randomLong()), + Collections.emptyMap() + ); + InternalSearchResponse response = new InternalSearchResponse( + new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), Float.NaN), + InternalAggregations.EMPTY, + null, + null, + false, + null, + 1 + ); + searchResponse = new SearchResponse( + response, + null, + 3, + 3, + 0, + 100, + ShardSearchFailure.EMPTY_ARRAY, + SearchResponse.Clusters.EMPTY, + pitId + ); + createPitListener = new ActionListener() { + @Override + public void onResponse(CreatePitResponse createPITResponse) { + assertEquals(3, createPITResponse.getTotalShards()); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError(e); + } + }; + + clusterServiceMock = mock(ClusterService.class); + ClusterState state = mock(ClusterState.class); + + final Settings keepAliveSettings = Settings.builder().put(CreatePitController.PIT_INIT_KEEP_ALIVE.getKey(), 30000).build(); + when(clusterServiceMock.getSettings()).thenReturn(keepAliveSettings); + + when(state.getMetadata()).thenReturn(Metadata.EMPTY_METADATA); + when(state.metadata()).thenReturn(Metadata.EMPTY_METADATA); + when(clusterServiceMock.state()).thenReturn(state); + when(state.getNodes()).thenReturn(nodes); + } + + /** + * Test if transport call for update pit is made to all nodes present as part of PIT ID returned from phase one of create pit + */ + public void testUpdatePitAfterCreatePitSuccess() throws InterruptedException { + List updateNodesInvoked = new CopyOnWriteArrayList<>(); + List deleteNodesInvoked = new CopyOnWriteArrayList<>(); + List knownNodes = new CopyOnWriteArrayList<>(); + try ( + MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); + MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, Version.CURRENT) + ) { + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + knownNodes.add(cluster2Transport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + + try ( + MockTransportService transportService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + null + ) + ) { + transportService.start(); + transportService.acceptIncomingRequests(); + SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { + @Override + public void updatePitContext( + Transport.Connection connection, + UpdatePitContextRequest request, + ActionListener listener + ) { + updateNodesInvoked.add(connection.getNode()); + Thread t = new Thread(() -> listener.onResponse(new UpdatePitContextResponse("pitid", 500000, 500000))); + t.start(); + } + + /** + * Test if cleanup request is called + */ + @Override + public void sendFreeContext( + Transport.Connection connection, + ShardSearchContextId contextId, + ActionListener listener + ) { + deleteNodesInvoked.add(connection.getNode()); + Thread t = new Thread(() -> listener.onResponse(new SearchFreeContextResponse(true))); + t.start(); + } + + @Override + public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + return new SearchAsyncActionTests.MockConnection(node); + } + }; + + CountDownLatch latch = new CountDownLatch(1); + + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + CreatePitController controller = new CreatePitController( + request, + searchTransportService, + clusterServiceMock, + transportSearchAction, + namedWriteableRegistry, + task, + createPitListener + ); + + ActionListener updatelistener = new LatchedActionListener<>(new ActionListener() { + @Override + public void onResponse(CreatePitResponse createPITResponse) { + assertEquals(3, createPITResponse.getTotalShards()); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError(e); + } + }, latch); + + StepListener createListener = new StepListener<>(); + controller.executeCreatePit(createListener, updatelistener); + createListener.onResponse(searchResponse); + latch.await(); + assertEquals(3, updateNodesInvoked.size()); + assertEquals(0, deleteNodesInvoked.size()); + } + } + } + + /** + * If create phase results in failure, update pit phase should not proceed and propagate the exception + */ + public void testUpdatePitAfterCreatePitFailure() throws InterruptedException { + List updateNodesInvoked = new CopyOnWriteArrayList<>(); + List deleteNodesInvoked = new CopyOnWriteArrayList<>(); + List knownNodes = new CopyOnWriteArrayList<>(); + try ( + MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); + MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, Version.CURRENT) + ) { + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + knownNodes.add(cluster2Transport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + + try ( + MockTransportService transportService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + null + ) + ) { + transportService.start(); + transportService.acceptIncomingRequests(); + SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { + @Override + public void updatePitContext( + Transport.Connection connection, + UpdatePitContextRequest request, + ActionListener listener + ) { + updateNodesInvoked.add(connection.getNode()); + Thread t = new Thread(() -> listener.onResponse(new UpdatePitContextResponse("pitid", 500000, 500000))); + t.start(); + } + + @Override + public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + return new SearchAsyncActionTests.MockConnection(node); + } + + @Override + public void sendFreeContext( + Transport.Connection connection, + ShardSearchContextId contextId, + ActionListener listener + ) { + deleteNodesInvoked.add(connection.getNode()); + Thread t = new Thread(() -> listener.onResponse(new SearchFreeContextResponse(true))); + t.start(); + } + }; + + CountDownLatch latch = new CountDownLatch(1); + + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + + CreatePitController controller = new CreatePitController( + request, + searchTransportService, + clusterServiceMock, + transportSearchAction, + namedWriteableRegistry, + task, + createPitListener + ); + + ActionListener updatelistener = new LatchedActionListener<>(new ActionListener() { + @Override + public void onResponse(CreatePitResponse createPITResponse) { + throw new AssertionError("on response is called"); + } + + @Override + public void onFailure(Exception e) { + assertTrue(e.getCause().getMessage().contains("Exception occurred in phase 1")); + } + }, latch); + + StepListener createListener = new StepListener<>(); + + controller.executeCreatePit(createListener, updatelistener); + createListener.onFailure(new Exception("Exception occurred in phase 1")); + latch.await(); + assertEquals(0, updateNodesInvoked.size()); + /** + * cleanup is not called on create pit phase one failure + */ + assertEquals(0, deleteNodesInvoked.size()); + } + } + } + + /** + * Testing that any update pit failures fails the request + */ + public void testUpdatePitFailureForNodeDrop() throws InterruptedException { + List updateNodesInvoked = new CopyOnWriteArrayList<>(); + List deleteNodesInvoked = new CopyOnWriteArrayList<>(); + List knownNodes = new CopyOnWriteArrayList<>(); + try ( + MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); + MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, Version.CURRENT) + ) { + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + knownNodes.add(cluster2Transport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + + try ( + MockTransportService transportService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + null + ) + ) { + transportService.start(); + transportService.acceptIncomingRequests(); + + SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { + @Override + public void updatePitContext( + Transport.Connection connection, + UpdatePitContextRequest request, + ActionListener listener + ) { + + updateNodesInvoked.add(connection.getNode()); + if (connection.getNode().getId() == "node_3") { + Thread t = new Thread(() -> listener.onFailure(new Exception("node 3 down"))); + t.start(); + } else { + Thread t = new Thread(() -> listener.onResponse(new UpdatePitContextResponse("pitid", 500000, 500000))); + t.start(); + } + } + + @Override + public void sendFreeContext( + Transport.Connection connection, + ShardSearchContextId contextId, + ActionListener listener + ) { + deleteNodesInvoked.add(connection.getNode()); + Thread t = new Thread(() -> listener.onResponse(new SearchFreeContextResponse(true))); + t.start(); + } + + @Override + public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + return new SearchAsyncActionTests.MockConnection(node); + } + }; + + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + CreatePitController controller = new CreatePitController( + request, + searchTransportService, + clusterServiceMock, + transportSearchAction, + namedWriteableRegistry, + task, + createPitListener + ); + + CountDownLatch latch = new CountDownLatch(1); + + ActionListener updatelistener = new LatchedActionListener<>(new ActionListener() { + @Override + public void onResponse(CreatePitResponse createPITResponse) { + throw new AssertionError("response is called"); + } + + @Override + public void onFailure(Exception e) { + assertTrue(e.getMessage().contains("node 3 down")); + } + }, latch); + + StepListener createListener = new StepListener<>(); + controller.executeCreatePit(createListener, updatelistener); + createListener.onResponse(searchResponse); + latch.await(); + assertEquals(3, updateNodesInvoked.size()); + /** + * check if cleanup is called for all nodes in case of update pit failure + */ + assertEquals(3, deleteNodesInvoked.size()); + } + } + } + + public void testUpdatePitFailureWhereAllNodesDown() throws InterruptedException { + List updateNodesInvoked = new CopyOnWriteArrayList<>(); + List deleteNodesInvoked = new CopyOnWriteArrayList<>(); + List knownNodes = new CopyOnWriteArrayList<>(); + try ( + MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); + MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, Version.CURRENT) + ) { + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + knownNodes.add(cluster2Transport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + + try ( + MockTransportService transportService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + null + ) + ) { + transportService.start(); + transportService.acceptIncomingRequests(); + SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { + @Override + public void updatePitContext( + Transport.Connection connection, + UpdatePitContextRequest request, + ActionListener listener + ) { + updateNodesInvoked.add(connection.getNode()); + Thread t = new Thread(() -> listener.onFailure(new Exception("node down"))); + t.start(); + } + + @Override + public void sendFreeContext( + Transport.Connection connection, + ShardSearchContextId contextId, + ActionListener listener + ) { + deleteNodesInvoked.add(connection.getNode()); + Thread t = new Thread(() -> listener.onResponse(new SearchFreeContextResponse(true))); + t.start(); + } + + @Override + public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + return new SearchAsyncActionTests.MockConnection(node); + } + }; + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + CreatePitController controller = new CreatePitController( + request, + searchTransportService, + clusterServiceMock, + transportSearchAction, + namedWriteableRegistry, + task, + createPitListener + ); + + CountDownLatch latch = new CountDownLatch(1); + + ActionListener updatelistener = new LatchedActionListener<>(new ActionListener() { + @Override + public void onResponse(CreatePitResponse createPITResponse) { + throw new AssertionError("response is called"); + } + + @Override + public void onFailure(Exception e) { + assertTrue(e.getMessage().contains("node down")); + } + }, latch); + + StepListener createListener = new StepListener<>(); + controller.executeCreatePit(createListener, updatelistener); + createListener.onResponse(searchResponse); + latch.await(); + assertEquals(3, updateNodesInvoked.size()); + /** + * check if cleanup is called for all nodes in case of update pit failure + */ + assertEquals(3, deleteNodesInvoked.size()); + } + } + + } + + QueryBuilder randomQueryBuilder() { + if (randomBoolean()) { + return new TermQueryBuilder(randomAlphaOfLength(10), randomAlphaOfLength(10)); + } else if (randomBoolean()) { + return new MatchAllQueryBuilder(); + } else { + return new IdsQueryBuilder().addIds(randomAlphaOfLength(10)); + } + } + + private void setPitId() { + AtomicArray array = new AtomicArray<>(3); + SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult1 = new SearchAsyncActionTests.TestSearchPhaseResult( + new ShardSearchContextId("a", 1), + node1 + ); + testSearchPhaseResult1.setSearchShardTarget(new SearchShardTarget("node_1", new ShardId("idx", "uuid1", 2), null, null)); + SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult2 = new SearchAsyncActionTests.TestSearchPhaseResult( + new ShardSearchContextId("b", 12), + node2 + ); + testSearchPhaseResult2.setSearchShardTarget(new SearchShardTarget("node_2", new ShardId("idy", "uuid2", 42), null, null)); + SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult3 = new SearchAsyncActionTests.TestSearchPhaseResult( + new ShardSearchContextId("c", 42), + node3 + ); + testSearchPhaseResult3.setSearchShardTarget(new SearchShardTarget("node_3", new ShardId("idy", "uuid2", 43), null, null)); + array.setOnce(0, testSearchPhaseResult1); + array.setOnce(1, testSearchPhaseResult2); + array.setOnce(2, testSearchPhaseResult3); + + final Version version = Version.CURRENT; + final Map aliasFilters = new HashMap<>(); + for (SearchPhaseResult result : array.asList()) { + final AliasFilter aliasFilter; + if (randomBoolean()) { + aliasFilter = new AliasFilter(randomQueryBuilder()); + } else if (randomBoolean()) { + aliasFilter = new AliasFilter(randomQueryBuilder(), "alias-" + between(1, 10)); + } else { + aliasFilter = AliasFilter.EMPTY; + } + if (randomBoolean()) { + aliasFilters.put(result.getSearchShardTarget().getShardId().getIndex().getUUID(), aliasFilter); + } + } + pitId = SearchContextId.encode(array.asList(), aliasFilters, version); + } + +} diff --git a/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java b/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java index f6ca12f1c514c..3c83f899dd1b5 100644 --- a/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java +++ b/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java @@ -67,6 +67,7 @@ import org.opensearch.indices.breaker.NoneCircuitBreakerService; import org.opensearch.search.internal.AliasFilter; import org.opensearch.search.internal.LegacyReaderContext; +import org.opensearch.search.internal.PitReaderContext; import org.opensearch.search.internal.ReaderContext; import org.opensearch.search.internal.ShardSearchContextId; import org.opensearch.search.internal.ShardSearchRequest; @@ -134,10 +135,12 @@ public void testPreProcess() throws Exception { int maxResultWindow = randomIntBetween(50, 100); int maxRescoreWindow = randomIntBetween(50, 100); int maxSlicesPerScroll = randomIntBetween(50, 100); + int maxSlicesPerPit = randomIntBetween(50, 100); Settings settings = Settings.builder() .put("index.max_result_window", maxResultWindow) .put("index.max_slices_per_scroll", maxSlicesPerScroll) .put("index.max_rescore_window", maxRescoreWindow) + .put("index.max_slices_per_pit", maxSlicesPerPit) .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2) @@ -300,13 +303,13 @@ protected Engine.Searcher acquireSearcherInternal(String source) { ); readerContext.close(); - readerContext = new ReaderContext( + readerContext = new LegacyReaderContext( newContextId(), indexService, indexShard, searcherSupplier.get(), - randomNonNegativeLong(), - false + shardSearchRequest, + randomNonNegativeLong() ); // rescore is null but sliceBuilder is not null DefaultSearchContext context2 = new DefaultSearchContext( @@ -400,6 +403,48 @@ protected Engine.Searcher acquireSearcherInternal(String source) { assertTrue(query1 instanceof MatchNoDocsQuery || query2 instanceof MatchNoDocsQuery); readerContext.close(); + + ReaderContext pitReaderContext = new PitReaderContext( + newContextId(), + indexService, + indexShard, + searcherSupplier.get(), + 1000, + true + ); + DefaultSearchContext context5 = new DefaultSearchContext( + pitReaderContext, + shardSearchRequest, + target, + null, + bigArrays, + null, + timeout, + null, + false, + Version.CURRENT, + false, + executor + ); + int numSlicesForPit = maxSlicesPerPit + randomIntBetween(1, 100); + when(sliceBuilder.getMax()).thenReturn(numSlicesForPit); + context5.sliceBuilder(sliceBuilder); + + exception = expectThrows(IllegalArgumentException.class, () -> context5.preProcess(false)); + assertThat( + exception.getMessage(), + equalTo( + "The number of slices [" + + numSlicesForPit + + "] is too large. It must " + + "be less than [" + + maxSlicesPerPit + + "]. This limit can be set by changing the [" + + IndexSettings.MAX_SLICES_PER_PIT.getKey() + + "] index level setting." + ) + ); + pitReaderContext.close(); threadPool.shutdown(); } } diff --git a/server/src/test/java/org/opensearch/search/PitMultiNodeTests.java b/server/src/test/java/org/opensearch/search/PitMultiNodeTests.java new file mode 100644 index 0000000000000..b2cdd156576d8 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/PitMultiNodeTests.java @@ -0,0 +1,211 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search; + +import org.junit.After; +import org.junit.Before; +import org.opensearch.action.ActionFuture; +import org.opensearch.action.search.CreatePitAction; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.search.builder.PointInTimeBuilder; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.concurrent.ExecutionException; + +import static org.hamcrest.Matchers.containsString; +import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +/** + * Multi node integration tests for PIT creation and search operation with PIT ID. + */ +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, numDataNodes = 2) +public class PitMultiNodeTests extends OpenSearchIntegTestCase { + + @Before + public void setupIndex() throws ExecutionException, InterruptedException { + createIndex("index", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build()); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).execute().get(); + ensureGreen(); + } + + @After + public void clearIndex() { + client().admin().indices().prepareDelete("index").get(); + } + + public void testPit() throws Exception { + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + SearchResponse searchResponse = client().prepareSearch("index") + .setSize(2) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1))) + .get(); + assertEquals(2, searchResponse.getSuccessfulShards()); + assertEquals(2, searchResponse.getTotalShards()); + } + + public void testCreatePitWhileNodeDropWithAllowPartialCreationFalse() throws Exception { + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), false); + request.setIndices(new String[] { "index" }); + internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) throws Exception { + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + ExecutionException ex = expectThrows(ExecutionException.class, execute::get); + assertTrue(ex.getMessage().contains("Failed to execute phase [create_pit]")); + assertTrue(ex.getMessage().contains("Partial shards failure")); + return super.onNodeStopped(nodeName); + } + }); + } + + public void testCreatePitWhileNodeDropWithAllowPartialCreationTrue() throws Exception { + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) throws Exception { + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + assertEquals(1, pitResponse.getSuccessfulShards()); + assertEquals(2, pitResponse.getTotalShards()); + SearchResponse searchResponse = client().prepareSearch("index") + .setSize(2) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1))) + .get(); + assertEquals(1, searchResponse.getSuccessfulShards()); + assertEquals(1, searchResponse.getTotalShards()); + return super.onNodeStopped(nodeName); + } + }); + } + + public void testPitSearchWithNodeDrop() throws Exception { + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) throws Exception { + SearchResponse searchResponse = client().prepareSearch() + .setSize(2) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1))) + .get(); + assertEquals(1, searchResponse.getSuccessfulShards()); + assertEquals(1, searchResponse.getFailedShards()); + assertEquals(0, searchResponse.getSkippedShards()); + assertEquals(2, searchResponse.getTotalShards()); + return super.onNodeStopped(nodeName); + } + }); + } + + public void testPitSearchWithNodeDropWithPartialSearchResultsFalse() throws Exception { + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) throws Exception { + ActionFuture execute = client().prepareSearch() + .setSize(2) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1))) + .setAllowPartialSearchResults(false) + .execute(); + ExecutionException ex = expectThrows(ExecutionException.class, execute::get); + assertTrue(ex.getMessage().contains("Partial shards failure")); + return super.onNodeStopped(nodeName); + } + }); + } + + public void testPitInvalidDefaultKeepAlive() { + IllegalArgumentException exc = expectThrows( + IllegalArgumentException.class, + () -> client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put("pit.max_keep_alive", "1m").put("search.default_keep_alive", "2m")) + .get() + ); + assertThat(exc.getMessage(), containsString("was (2m > 1m)")); + + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put("search.default_keep_alive", "5m").put("pit.max_keep_alive", "5m")) + .get() + ); + + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put("search.default_keep_alive", "2m")) + .get() + ); + + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put("pit.max_keep_alive", "2m")) + .get() + ); + + exc = expectThrows( + IllegalArgumentException.class, + () -> client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put("search.default_keep_alive", "3m")) + .get() + ); + assertThat(exc.getMessage(), containsString("was (3m > 2m)")); + + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put("search.default_keep_alive", "1m")) + .get() + ); + + exc = expectThrows( + IllegalArgumentException.class, + () -> client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().put("pit.max_keep_alive", "30s")) + .get() + ); + assertThat(exc.getMessage(), containsString("was (1m > 30s)")); + + assertAcked( + client().admin() + .cluster() + .prepareUpdateSettings() + .setPersistentSettings(Settings.builder().putNull("*")) + .setTransientSettings(Settings.builder().putNull("*")) + ); + + } + +} diff --git a/server/src/test/java/org/opensearch/search/PitSingleNodeTests.java b/server/src/test/java/org/opensearch/search/PitSingleNodeTests.java new file mode 100644 index 0000000000000..2275a07e0d807 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/PitSingleNodeTests.java @@ -0,0 +1,575 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search; + +import org.hamcrest.Matchers; +import org.opensearch.action.ActionFuture; +import org.opensearch.action.search.*; +import org.opensearch.common.Priority; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.index.IndexNotFoundException; +import org.opensearch.search.builder.PointInTimeBuilder; +import org.opensearch.search.sort.SortOrder; +import org.opensearch.test.OpenSearchSingleNodeTestCase; + +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; +import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.index.query.QueryBuilders.matchAllQuery; +import static org.opensearch.index.query.QueryBuilders.queryStringQuery; +import static org.opensearch.index.query.QueryBuilders.termQuery; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; + +/** + * Single node integration tests for various PIT use cases such as create pit, search etc + */ +public class PitSingleNodeTests extends OpenSearchSingleNodeTestCase { + @Override + protected boolean resetNodeAfterTest() { + return true; + } + + @Override + protected Settings nodeSettings() { + // very frequent checks + return Settings.builder() + .put(super.nodeSettings()) + .put(SearchService.KEEPALIVE_INTERVAL_SETTING.getKey(), TimeValue.timeValueMillis(1)) + .put(CreatePitController.PIT_INIT_KEEP_ALIVE.getKey(), TimeValue.timeValueSeconds(1)) + .build(); + } + + public void testCreatePITSuccess() throws ExecutionException, InterruptedException { + createIndex("index", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build()); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + client().prepareIndex("index").setId("2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + SearchResponse searchResponse = client().prepareSearch("index") + .setSize(2) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1))) + .get(); + assertHitCount(searchResponse, 1); + + SearchService service = getInstanceFromNode(SearchService.class); + assertEquals(2, service.getActiveContexts()); + service.doClose(); // this kills the keep-alive reaper we have to reset the node after this test + } + + public void testCreatePITWithMultipleIndicesSuccess() throws ExecutionException, InterruptedException { + createIndex("index", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build()); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + createIndex("index1", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build()); + client().prepareIndex("index1").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index", "index1" }); + SearchService service = getInstanceFromNode(SearchService.class); + + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse response = execute.get(); + assertEquals(4, response.getSuccessfulShards()); + assertEquals(4, service.getActiveContexts()); + service.doClose(); + } + + public void testCreatePITWithShardReplicasSuccess() throws ExecutionException, InterruptedException { + createIndex("index", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 1).build()); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + + client().prepareIndex("index").setId("2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + SearchResponse searchResponse = client().prepareSearch("index") + .setSize(2) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1))) + .get(); + assertHitCount(searchResponse, 1); + + SearchService service = getInstanceFromNode(SearchService.class); + assertEquals(2, service.getActiveContexts()); + service.doClose(); + } + + public void testCreatePITWithNonExistentIndex() { + createIndex("index", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build()); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index", "index1" }); + SearchService service = getInstanceFromNode(SearchService.class); + + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + + ExecutionException ex = expectThrows(ExecutionException.class, execute::get); + + assertTrue(ex.getMessage().contains("no such index [index1]")); + assertEquals(0, service.getActiveContexts()); + service.doClose(); + } + + public void testCreatePITOnCloseIndex() { + createIndex("index", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build()); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + client().prepareIndex("index").setId("2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + client().admin().indices().prepareClose("index").get(); + + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + + ExecutionException ex = expectThrows(ExecutionException.class, execute::get); + + assertTrue(ex.getMessage().contains("IndexClosedException")); + + SearchService service = getInstanceFromNode(SearchService.class); + assertEquals(0, service.getActiveContexts()); + service.doClose(); + } + + public void testPitSearchOnDeletedIndex() throws ExecutionException, InterruptedException { + createIndex("index", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build()); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + client().admin().indices().prepareDelete("index").get(); + + IndexNotFoundException ex = expectThrows(IndexNotFoundException.class, () -> { + client().prepareSearch() + .setSize(2) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1))) + .get(); + }); + assertTrue(ex.getMessage().contains("no such index [index]")); + SearchService service = getInstanceFromNode(SearchService.class); + assertEquals(0, service.getActiveContexts()); + service.doClose(); + } + + public void testInvalidPitId() { + createIndex("idx"); + String id = "c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1"; + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> client().prepareSearch() + .setSize(2) + .setPointInTime(new PointInTimeBuilder(id).setKeepAlive(TimeValue.timeValueDays(1))) + .get() + ); + assertEquals("invalid id: [" + id + "]", e.getMessage()); + } + + public void testPitSearchOnCloseIndex() throws ExecutionException, InterruptedException { + createIndex("index", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build()); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + SearchService service = getInstanceFromNode(SearchService.class); + assertEquals(2, service.getActiveContexts()); + client().admin().indices().prepareClose("index").get(); + SearchPhaseExecutionException ex = expectThrows(SearchPhaseExecutionException.class, () -> { + SearchResponse searchResponse = client().prepareSearch() + .setSize(2) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1))) + .get(); + }); + assertTrue(ex.shardFailures()[0].reason().contains("SearchContextMissingException")); + assertEquals(0, service.getActiveContexts()); + + // PIT reader contexts are lost after close, verifying it with open index api + client().admin().indices().prepareOpen("index").get(); + ex = expectThrows(SearchPhaseExecutionException.class, () -> { + client().prepareSearch() + .setSize(2) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1))) + .get(); + }); + assertTrue(ex.shardFailures()[0].reason().contains("SearchContextMissingException")); + assertEquals(0, service.getActiveContexts()); + service.doClose(); + } + + public void testSearchWithFirstPhaseKeepAliveExpiry() throws ExecutionException, InterruptedException { + createIndex("index", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build()); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueMillis(100), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + SearchService service = getInstanceFromNode(SearchService.class); + assertEquals(2, service.getActiveContexts()); + // since first phase temporary keep alive is set at 1 second in this test file + // and create pit request keep alive is less than that, keep alive is set to 1 second, (max of 2 keep alives) + // so reader context will clear up after 1 second + Thread.sleep(1000); + client().prepareIndex("index").setId("2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + + SearchPhaseExecutionException ex = expectThrows(SearchPhaseExecutionException.class, () -> { + client().prepareSearch() + .setSize(2) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1))) + .get(); + }); + assertTrue(ex.shardFailures()[0].reason().contains("SearchContextMissingException")); + assertEquals(0, service.getActiveContexts()); + service.doClose(); + } + + public void testSearchWithPitSecondPhaseKeepAliveExpiry() throws ExecutionException, InterruptedException { + createIndex("index", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build()); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueSeconds(2), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + SearchService service = getInstanceFromNode(SearchService.class); + assertEquals(2, service.getActiveContexts()); + Thread.sleep(1000); + assertEquals(2, service.getActiveContexts()); + Thread.sleep(1500); + assertEquals(0, service.getActiveContexts()); + client().prepareIndex("index").setId("2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + SearchPhaseExecutionException ex = expectThrows(SearchPhaseExecutionException.class, () -> { + client().prepareSearch() + .setSize(2) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1))) + .get(); + }); + assertTrue(ex.shardFailures()[0].reason().contains("SearchContextMissingException")); + service.doClose(); + } + + public void testSearchWithPitKeepAliveExtension() throws ExecutionException, InterruptedException { + createIndex("index", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build()); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueSeconds(1), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + SearchService service = getInstanceFromNode(SearchService.class); + assertEquals(2, service.getActiveContexts()); + client().prepareSearch() + .setSize(2) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueSeconds(3))) + .get(); + client().prepareIndex("index").setId("2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + Thread.sleep(2500); + assertEquals(2, service.getActiveContexts()); + Thread.sleep(1000); + assertEquals(0, service.getActiveContexts()); + SearchPhaseExecutionException ex = expectThrows(SearchPhaseExecutionException.class, () -> { + client().prepareSearch("index") + .setSize(2) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueMinutes(1))) + .get(); + }); + assertTrue(ex.shardFailures()[0].reason().contains("SearchContextMissingException")); + service.doClose(); + } + + public void testMaxOpenPitContexts() throws Exception { + createIndex("index"); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + SearchService service = getInstanceFromNode(SearchService.class); + + for (int i = 0; i < SearchService.MAX_OPEN_PIT_CONTEXT.get(Settings.EMPTY); i++) { + client().execute(CreatePitAction.INSTANCE, request).get(); + } + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + ExecutionException ex = expectThrows(ExecutionException.class, execute::get); + + assertTrue( + ex.getMessage() + .contains( + "Trying to create too many Point In Time contexts. " + + "Must be less than or equal to: [" + + SearchService.MAX_OPEN_PIT_CONTEXT.get(Settings.EMPTY) + + "]. " + + "This limit can be set by changing the [search.max_open_pit_context] setting." + ) + ); + service.doClose(); + } + + public void testOpenPitContextsConcurrently() throws Exception { + createIndex("index"); + final int maxPitContexts = SearchService.MAX_OPEN_PIT_CONTEXT.get(Settings.EMPTY); + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + SearchService service = getInstanceFromNode(SearchService.class); + Thread[] threads = new Thread[randomIntBetween(2, 8)]; + CountDownLatch latch = new CountDownLatch(threads.length); + for (int i = 0; i < threads.length; i++) { + threads[i] = new Thread(() -> { + latch.countDown(); + try { + latch.await(); + for (;;) { + try { + client().execute(CreatePitAction.INSTANCE, request).get(); + } catch (ExecutionException e) { + assertTrue( + e.getMessage() + .contains( + "Trying to create too many Point In Time contexts. " + + "Must be less than or equal to: [" + + SearchService.MAX_OPEN_PIT_CONTEXT.get(Settings.EMPTY) + + "]. " + + "This limit can be set by changing the [" + + SearchService.MAX_OPEN_PIT_CONTEXT.getKey() + + "] setting." + ) + ); + return; + } + } + } catch (Exception e) { + throw new AssertionError(e); + } + }); + threads[i].setName("opensearch[node_s_0][search]"); + threads[i].start(); + } + for (Thread thread : threads) { + thread.join(); + } + assertThat(service.getActiveContexts(), equalTo(maxPitContexts)); + service.doClose(); + } + + /** + * Point in time search should return the same results as creation time and index updates should not affect the PIT search results + */ + public void testPitAfterUpdateIndex() throws Exception { + client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 5)).get(); + client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().get(); + + for (int i = 0; i < 50; i++) { + client().prepareIndex("test") + .setId(Integer.toString(i)) + .setSource( + jsonBuilder().startObject() + .field("user", "foobar") + .field("postDate", System.currentTimeMillis()) + .field("message", "test") + .endObject() + ) + .get(); + } + client().admin().indices().prepareRefresh().get(); + + // create pit + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueMinutes(2), true); + request.setIndices(new String[] { "test" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + SearchService service = getInstanceFromNode(SearchService.class); + + assertThat( + client().prepareSearch() + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .setSize(0) + .setQuery(matchAllQuery()) + .get() + .getHits() + .getTotalHits().value, + Matchers.equalTo(50L) + ); + + assertThat( + client().prepareSearch() + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .setSize(0) + .setQuery(termQuery("message", "test")) + .get() + .getHits() + .getTotalHits().value, + Matchers.equalTo(50L) + ); + assertThat( + client().prepareSearch() + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .setSize(0) + .setQuery(termQuery("message", "test")) + .get() + .getHits() + .getTotalHits().value, + Matchers.equalTo(50L) + ); + assertThat( + client().prepareSearch() + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .setSize(0) + .setQuery(termQuery("message", "update")) + .get() + .getHits() + .getTotalHits().value, + Matchers.equalTo(0L) + ); + assertThat( + client().prepareSearch() + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .setSize(0) + .setQuery(termQuery("message", "update")) + .get() + .getHits() + .getTotalHits().value, + Matchers.equalTo(0L) + ); + + // update index + SearchResponse searchResponse = client().prepareSearch() + .setQuery(queryStringQuery("user:foobar")) + .setSize(50) + .addSort("postDate", SortOrder.ASC) + .get(); + try { + do { + for (SearchHit searchHit : searchResponse.getHits().getHits()) { + Map map = searchHit.getSourceAsMap(); + map.put("message", "update"); + client().prepareIndex("test").setId(searchHit.getId()).setSource(map).get(); + } + searchResponse = client().prepareSearch().setSize(0).setQuery(termQuery("message", "test")).get(); + + } while (searchResponse.getHits().getHits().length > 0); + + client().admin().indices().prepareRefresh().get(); + assertThat( + client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get().getHits().getTotalHits().value, + Matchers.equalTo(50L) + ); + /** + * assert without point in time + */ + + assertThat( + client().prepareSearch().setSize(0).setQuery(termQuery("message", "test")).get().getHits().getTotalHits().value, + Matchers.equalTo(0L) + ); + assertThat( + client().prepareSearch().setSize(0).setQuery(termQuery("message", "test")).get().getHits().getTotalHits().value, + Matchers.equalTo(0L) + ); + assertThat( + client().prepareSearch().setSize(0).setQuery(termQuery("message", "update")).get().getHits().getTotalHits().value, + Matchers.equalTo(50L) + ); + assertThat( + client().prepareSearch().setSize(0).setQuery(termQuery("message", "update")).get().getHits().getTotalHits().value, + Matchers.equalTo(50L) + ); + /** + * using point in time id will have the same search results as ones before update + */ + assertThat( + client().prepareSearch() + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .setSize(0) + .setQuery(termQuery("message", "test")) + .get() + .getHits() + .getTotalHits().value, + Matchers.equalTo(50L) + ); + assertThat( + client().prepareSearch() + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .setSize(0) + .setQuery(termQuery("message", "test")) + .get() + .getHits() + .getTotalHits().value, + Matchers.equalTo(50L) + ); + assertThat( + client().prepareSearch() + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .setSize(0) + .setQuery(termQuery("message", "update")) + .get() + .getHits() + .getTotalHits().value, + Matchers.equalTo(0L) + ); + assertThat( + client().prepareSearch() + .setPointInTime(new PointInTimeBuilder(pitResponse.getId())) + .setSize(0) + .setQuery(termQuery("message", "update")) + .get() + .getHits() + .getTotalHits().value, + Matchers.equalTo(0L) + ); + } finally { + service.doClose(); + assertEquals(0, service.getActiveContexts()); + } + } + + public void testConcurrentSearches() throws Exception { + createIndex("index", Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0).build()); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + Thread[] threads = new Thread[5]; + CountDownLatch latch = new CountDownLatch(threads.length); + + for (int i = 0; i < threads.length; i++) { + threads[i] = new Thread(() -> { + latch.countDown(); + try { + latch.await(); + for (int j = 0; j < 50; j++) { + client().prepareSearch() + .setSize(2) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1))) + .execute() + .get(); + } + } catch (Exception e) { + throw new AssertionError(e); + } + }); + threads[i].setName("opensearch[node_s_0][search]"); + threads[i].start(); + } + for (Thread thread : threads) { + thread.join(); + } + + SearchService service = getInstanceFromNode(SearchService.class); + assertEquals(2, service.getActiveContexts()); + service.doClose(); + assertEquals(0, service.getActiveContexts()); + } +} diff --git a/server/src/test/java/org/opensearch/search/SearchServiceTests.java b/server/src/test/java/org/opensearch/search/SearchServiceTests.java index 4e342875e4599..aca537ab07a29 100644 --- a/server/src/test/java/org/opensearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/opensearch/search/SearchServiceTests.java @@ -46,6 +46,8 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchShardTask; import org.opensearch.action.search.SearchType; +import org.opensearch.action.search.UpdatePitContextRequest; +import org.opensearch.action.search.UpdatePitContextResponse; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.WriteRequest; @@ -1406,12 +1408,108 @@ public void testOpenReaderContext() { createIndex("index"); SearchService searchService = getInstanceFromNode(SearchService.class); PlainActionFuture future = new PlainActionFuture<>(); - searchService.openReaderContext(new ShardId(resolveIndex("index"), 0), TimeValue.timeValueMinutes(between(1, 10)), future); + searchService.createPitReaderContext(new ShardId(resolveIndex("index"), 0), TimeValue.timeValueMinutes(between(1, 10)), future); future.actionGet(); assertThat(searchService.getActiveContexts(), equalTo(1)); assertTrue(searchService.freeReaderContext(future.actionGet())); } + public void testPitContextMaxKeepAlive() { + createIndex("index"); + SearchService searchService = getInstanceFromNode(SearchService.class); + PlainActionFuture future = new PlainActionFuture<>(); + + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> { + searchService.createPitReaderContext(new ShardId(resolveIndex("index"), 0), TimeValue.timeValueHours(25), future); + future.actionGet(); + }); + assertEquals( + "Keep alive for request (1d) is too large. " + + "It must be less than (" + + SearchService.MAX_PIT_KEEPALIVE_SETTING.get(Settings.EMPTY) + + "). " + + "This limit can be set by changing the [" + + SearchService.MAX_PIT_KEEPALIVE_SETTING.getKey() + + "] cluster level setting.", + ex.getMessage() + ); + assertThat(searchService.getActiveContexts(), equalTo(0)); + } + + public void testUpdatePitId() { + createIndex("index"); + SearchService searchService = getInstanceFromNode(SearchService.class); + PlainActionFuture future = new PlainActionFuture<>(); + searchService.createPitReaderContext(new ShardId(resolveIndex("index"), 0), TimeValue.timeValueMinutes(between(1, 10)), future); + ShardSearchContextId id = future.actionGet(); + PlainActionFuture updateFuture = new PlainActionFuture<>(); + UpdatePitContextRequest updateRequest = new UpdatePitContextRequest( + id, + "pitId", + TimeValue.timeValueMinutes(between(1, 10)).millis(), + System.currentTimeMillis() + ); + searchService.updatePitIdAndKeepAlive(updateRequest, updateFuture); + UpdatePitContextResponse updateResponse = updateFuture.actionGet(); + assertTrue(updateResponse.getPitId().equalsIgnoreCase("pitId")); + assertTrue(updateResponse.getCreationTime() == updateRequest.getCreationTime()); + assertTrue(updateResponse.getKeepAlive() == updateRequest.getKeepAlive()); + assertTrue(updateResponse.getPitId().equalsIgnoreCase("pitId")); + assertThat(searchService.getActiveContexts(), equalTo(1)); + assertTrue(searchService.freeReaderContext(future.actionGet())); + } + + public void testUpdatePitIdMaxKeepAlive() { + createIndex("index"); + SearchService searchService = getInstanceFromNode(SearchService.class); + PlainActionFuture future = new PlainActionFuture<>(); + searchService.createPitReaderContext(new ShardId(resolveIndex("index"), 0), TimeValue.timeValueMinutes(between(1, 10)), future); + ShardSearchContextId id = future.actionGet(); + + UpdatePitContextRequest updateRequest = new UpdatePitContextRequest( + id, + "pitId", + TimeValue.timeValueHours(25).millis(), + System.currentTimeMillis() + ); + IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> { + PlainActionFuture updateFuture = new PlainActionFuture<>(); + searchService.updatePitIdAndKeepAlive(updateRequest, updateFuture); + }); + + assertEquals( + "Keep alive for request (1d) is too large. " + + "It must be less than (" + + SearchService.MAX_PIT_KEEPALIVE_SETTING.get(Settings.EMPTY) + + "). " + + "This limit can be set by changing the [" + + SearchService.MAX_PIT_KEEPALIVE_SETTING.getKey() + + "] cluster level setting.", + ex.getMessage() + ); + assertThat(searchService.getActiveContexts(), equalTo(1)); + assertTrue(searchService.freeReaderContext(future.actionGet())); + } + + public void testUpdatePitIdWithInvalidReaderId() { + SearchService searchService = getInstanceFromNode(SearchService.class); + ShardSearchContextId id = new ShardSearchContextId("session", 9); + + UpdatePitContextRequest updateRequest = new UpdatePitContextRequest( + id, + "pitId", + TimeValue.timeValueHours(23).millis(), + System.currentTimeMillis() + ); + SearchContextMissingException ex = expectThrows(SearchContextMissingException.class, () -> { + PlainActionFuture updateFuture = new PlainActionFuture<>(); + searchService.updatePitIdAndKeepAlive(updateRequest, updateFuture); + }); + + assertEquals("No search context found for id [" + id.getId() + "]", ex.getMessage()); + assertThat(searchService.getActiveContexts(), equalTo(0)); + } + private ReaderContext createReaderContext(IndexService indexService, IndexShard indexShard) { return new ReaderContext( new ShardSearchContextId(UUIDs.randomBase64UUID(), randomNonNegativeLong()), diff --git a/server/src/test/java/org/opensearch/search/pit/RestCreatePitActionTests.java b/server/src/test/java/org/opensearch/search/pit/RestCreatePitActionTests.java new file mode 100644 index 0000000000000..5ca384daedbff --- /dev/null +++ b/server/src/test/java/org/opensearch/search/pit/RestCreatePitActionTests.java @@ -0,0 +1,78 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pit; + +import org.apache.lucene.util.SetOnce; +import org.opensearch.action.ActionListener; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.client.node.NodeClient; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.search.RestCreatePitAction; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.client.NoOpNodeClient; +import org.opensearch.test.rest.FakeRestChannel; +import org.opensearch.test.rest.FakeRestRequest; + +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +/** + * Tests to verify behavior of create pit rest action + */ +public class RestCreatePitActionTests extends OpenSearchTestCase { + public void testRestCreatePit() throws Exception { + SetOnce createPitCalled = new SetOnce<>(); + RestCreatePitAction action = new RestCreatePitAction(); + try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName()) { + @Override + public void createPit(CreatePitRequest request, ActionListener listener) { + createPitCalled.set(true); + assertThat(request.getKeepAlive().getStringRep(), equalTo("1m")); + assertFalse(request.shouldAllowPartialPitCreation()); + } + }) { + Map params = new HashMap<>(); + params.put("keep_alive", "1m"); + params.put("allow_partial_pit_creation", "false"); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams(params) + .withMethod(RestRequest.Method.POST) + .build(); + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + action.handleRequest(request, channel, nodeClient); + + assertThat(createPitCalled.get(), equalTo(true)); + } + } + + public void testRestCreatePitDefaultPartialCreation() throws Exception { + SetOnce createPitCalled = new SetOnce<>(); + RestCreatePitAction action = new RestCreatePitAction(); + try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName()) { + @Override + public void createPit(CreatePitRequest request, ActionListener listener) { + createPitCalled.set(true); + assertThat(request.getKeepAlive().getStringRep(), equalTo("1m")); + assertTrue(request.shouldAllowPartialPitCreation()); + } + }) { + Map params = new HashMap<>(); + params.put("keep_alive", "1m"); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams(params) + .withMethod(RestRequest.Method.POST) + .build(); + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + action.handleRequest(request, channel, nodeClient); + + assertThat(createPitCalled.get(), equalTo(true)); + } + } +} From 25cc2b14955bf87fc051b06ad736cb71cc6dcb9f Mon Sep 17 00:00:00 2001 From: Bharathwaj G Date: Thu, 19 May 2022 12:11:23 +0530 Subject: [PATCH 02/75] Delete PIT changes Signed-off-by: Bharathwaj G --- .../opensearch/client/RequestConverters.java | 12 + .../client/RestHighLevelClient.java | 82 +++ .../java/org/opensearch/client/PitIT.java | 29 +- .../client/RequestConvertersTests.java | 22 + .../java/org/opensearch/client/SearchIT.java | 13 +- .../rest-api-spec/api/delete_all_pits.json | 19 + .../rest-api-spec/api/delete_pit.json | 22 + .../org/opensearch/action/ActionModule.java | 6 + .../action/search/CreatePitController.java | 2 +- .../action/search/DeletePitAction.java | 25 + .../action/search/DeletePitRequest.java | 136 ++++ .../action/search/DeletePitResponse.java | 91 +++ .../action/search/SearchTransportService.java | 75 +++ .../opensearch/action/search/SearchUtils.java | 58 ++ .../search/TransportDeletePitAction.java | 122 ++++ .../java/org/opensearch/client/Client.java | 7 + .../client/support/AbstractClient.java | 8 + .../action/search/RestDeletePitAction.java | 59 ++ .../org/opensearch/search/SearchService.java | 34 + .../search/CreatePitControllerTests.java | 68 +- .../action/search/PitTestsUtil.java | 84 +++ .../search/TransportDeletePitActionTests.java | 615 ++++++++++++++++++ ...ests.java => CreatePitMultiNodeTests.java} | 2 +- ...sts.java => CreatePitSingleNodeTests.java} | 9 +- .../search/DeletePitMultiNodeTests.java | 204 ++++++ .../opensearch/search/SearchServiceTests.java | 27 + .../search/pit/RestDeletePitActionTests.java | 133 ++++ 27 files changed, 1894 insertions(+), 70 deletions(-) create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/api/delete_all_pits.json create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/api/delete_pit.json create mode 100644 server/src/main/java/org/opensearch/action/search/DeletePitAction.java create mode 100644 server/src/main/java/org/opensearch/action/search/DeletePitRequest.java create mode 100644 server/src/main/java/org/opensearch/action/search/DeletePitResponse.java create mode 100644 server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java create mode 100644 server/src/main/java/org/opensearch/rest/action/search/RestDeletePitAction.java create mode 100644 server/src/test/java/org/opensearch/action/search/PitTestsUtil.java create mode 100644 server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java rename server/src/test/java/org/opensearch/search/{PitMultiNodeTests.java => CreatePitMultiNodeTests.java} (99%) rename server/src/test/java/org/opensearch/search/{PitSingleNodeTests.java => CreatePitSingleNodeTests.java} (98%) create mode 100644 server/src/test/java/org/opensearch/search/DeletePitMultiNodeTests.java create mode 100644 server/src/test/java/org/opensearch/search/pit/RestDeletePitActionTests.java diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java index 277759c921fbf..b872ee21f9927 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java @@ -55,6 +55,7 @@ import org.opensearch.action.index.IndexRequest; import org.opensearch.action.search.ClearScrollRequest; import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.DeletePitRequest; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchScrollRequest; @@ -477,6 +478,17 @@ static Request createPit(CreatePitRequest createPitRequest) throws IOException { return request; } + static Request deletePit(DeletePitRequest deletePitRequest) throws IOException { + Request request = new Request(HttpDelete.METHOD_NAME, "/_search/point_in_time"); + request.setEntity(createEntity(deletePitRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request deleteAllPits(DeletePitRequest deletePitRequest) { + Request request = new Request(HttpDelete.METHOD_NAME, "/_search/point_in_time/_all"); + return request; + } + static Request clearScroll(ClearScrollRequest clearScrollRequest) throws IOException { Request request = new Request(HttpDelete.METHOD_NAME, "/_search/scroll"); request.setEntity(createEntity(clearScrollRequest, REQUEST_BODY_CONTENT_TYPE)); diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java index f3360630a26b7..4cb3ff999f793 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java @@ -61,6 +61,8 @@ import org.opensearch.action.search.ClearScrollResponse; import org.opensearch.action.search.CreatePitRequest; import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.MultiSearchResponse; import org.opensearch.action.search.SearchRequest; @@ -1298,6 +1300,86 @@ public final Cancellable createPitAsync( ); } + /** + * Delete PIT context using delete PIT API + * + * @param deletePitRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + */ + public final DeletePitResponse deletePit(DeletePitRequest deletePitRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity( + deletePitRequest, + RequestConverters::deletePit, + options, + DeletePitResponse::fromXContent, + emptySet() + ); + } + + /** + * Asynchronously Delete PIT context using delete PIT API + * + * @param deletePitRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + * @return the response + */ + public final Cancellable deletePitAsync( + DeletePitRequest deletePitRequest, + RequestOptions options, + ActionListener listener + ) { + return performRequestAsyncAndParseEntity( + deletePitRequest, + RequestConverters::deletePit, + options, + DeletePitResponse::fromXContent, + listener, + emptySet() + ); + } + + /** + * Delete all PIT contexts using delete all PITs API + * + * @param deletePitRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + */ + public final DeletePitResponse deleteAllPits(DeletePitRequest deletePitRequest, RequestOptions options) throws IOException { + return performRequestAndParseEntity( + deletePitRequest, + RequestConverters::deleteAllPits, + options, + DeletePitResponse::fromXContent, + emptySet() + ); + } + + /** + * Asynchronously Delete all PIT contexts using delete all PITs API + * + * @param deletePitRequest the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + * @return the response + */ + public final Cancellable deleteAllPitsAsync( + DeletePitRequest deletePitRequest, + RequestOptions options, + ActionListener listener + ) { + return performRequestAsyncAndParseEntity( + deletePitRequest, + RequestConverters::deleteAllPits, + options, + DeletePitResponse::fromXContent, + listener, + emptySet() + ); + } + /** * Clears one or more scroll ids using the Clear Scroll API. * diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java index 99901eabc91aa..13b821eb7e44c 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java @@ -13,9 +13,13 @@ import org.junit.Before; import org.opensearch.action.search.CreatePitRequest; import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; import org.opensearch.common.unit.TimeValue; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import java.util.concurrent.TimeUnit; /** @@ -51,8 +55,27 @@ public void testCreatePit() throws IOException { assertEquals(1, pitResponse.getSuccessfulShards()); assertEquals(0, pitResponse.getFailedShards()); assertEquals(0, pitResponse.getSkippedShards()); + List pitIds = new ArrayList<>(); + pitIds.add(pitResponse.getId()); + DeletePitRequest deletePitRequest = new DeletePitRequest(pitIds); + DeletePitResponse deletePitResponse = execute(deletePitRequest, highLevelClient()::deletePit, highLevelClient()::deletePitAsync); + assertTrue(deletePitResponse.isSucceeded()); + } + + public void testDeleteAllPits() throws IOException { + CreatePitRequest pitRequest = new CreatePitRequest(new TimeValue(1, TimeUnit.DAYS), true, "index"); + CreatePitResponse pitResponse = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); + CreatePitResponse pitResponse1 = execute(pitRequest, highLevelClient()::createPit, highLevelClient()::createPitAsync); + assertTrue(pitResponse.getId() != null); + assertTrue(pitResponse1.getId() != null); + List pitIds = new ArrayList<>(); + pitIds.add("_all"); + DeletePitRequest deletePitRequest = new DeletePitRequest(pitIds); + DeletePitResponse deletePitResponse = execute( + deletePitRequest, + highLevelClient()::deleteAllPits, + highLevelClient()::deleteAllPitsAsync + ); + assertTrue(deletePitResponse.isSucceeded()); } - /** - * Todo: add deletion logic and test cluster settings - */ } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java index 4f0b2ac0d88a1..f0a8d805c6d54 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java @@ -54,6 +54,7 @@ import org.opensearch.action.index.IndexRequest; import org.opensearch.action.search.ClearScrollRequest; import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.DeletePitRequest; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchScrollRequest; @@ -1326,6 +1327,27 @@ public void testCreatePit() throws IOException { assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); } + public void testDeletePit() throws IOException { + List pitIds = new ArrayList<>(); + pitIds.add("pitid1"); + pitIds.add("pitid2"); + DeletePitRequest deletePitRequest = new DeletePitRequest(pitIds); + Request request = RequestConverters.deletePit(deletePitRequest); + String endpoint = "/_search/point_in_time"; + assertEquals(HttpDelete.METHOD_NAME, request.getMethod()); + assertEquals(endpoint, request.getEndpoint()); + assertToXContentBody(deletePitRequest, request.getEntity()); + assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + } + + public void testDeleteAllPits() { + DeletePitRequest deletePitRequest = new DeletePitRequest(); + Request request = RequestConverters.deleteAllPits(deletePitRequest); + String endpoint = "/_search/point_in_time/_all"; + assertEquals(HttpDelete.METHOD_NAME, request.getMethod()); + assertEquals(endpoint, request.getEndpoint()); + } + public void testSearchTemplate() throws Exception { // Create a random request. String[] indices = randomIndicesNames(0, 5); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java index 01a7f892c80a1..e0b7ed81d405c 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java @@ -45,6 +45,8 @@ import org.opensearch.action.search.ClearScrollResponse; import org.opensearch.action.search.CreatePitRequest; import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.MultiSearchResponse; import org.opensearch.action.search.SearchRequest; @@ -103,6 +105,7 @@ import org.junit.Before; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; @@ -793,7 +796,15 @@ public void testSearchWithPit() throws Exception { assertThat(((Number) hit.getSortValues()[0]).longValue(), equalTo(counter++)); } } finally { - // TODO : Delete PIT + List pitIds = new ArrayList<>(); + pitIds.add(pitResponse.getId()); + DeletePitRequest deletePitRequest = new DeletePitRequest(pitIds); + DeletePitResponse deletePitResponse = execute( + deletePitRequest, + highLevelClient()::deletePit, + highLevelClient()::deletePitAsync + ); + assertTrue(deletePitResponse.isSucceeded()); } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_all_pits.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_all_pits.json new file mode 100644 index 0000000000000..0ee8d54dfcc59 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_all_pits.json @@ -0,0 +1,19 @@ +{ + "delete_all_pits":{ + "documentation":{ + "url":"https://opensearch.org/docs/latest/opensearch/rest-api/point_in_time/", + "description":"Deletes all active point in time contexts." + }, + "stability":"stable", + "url":{ + "paths":[ + { + "path":"/_search/point_in_time/_all", + "methods":[ + "DELETE" + ] + } + ] + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_pit.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_pit.json new file mode 100644 index 0000000000000..0f19e36fa4bc7 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_pit.json @@ -0,0 +1,22 @@ +{ + "delete_pit":{ + "documentation":{ + "url":"https://opensearch.org/docs/latest/opensearch/rest-api/point_in_time/", + "description":"Deletes one or more point in time contexts." + }, + "stability":"stable", + "url":{ + "paths":[ + { + "path":"/_search/point_in_time", + "methods":[ + "DELETE" + ] + } + ] + }, + "body":{ + "description":"A comma-separated list of pit IDs to clear" + } + } +} diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index 36b7aeea3d262..4ad1c5abf0ca4 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -233,11 +233,13 @@ import org.opensearch.action.main.TransportMainAction; import org.opensearch.action.search.ClearScrollAction; import org.opensearch.action.search.CreatePitAction; +import org.opensearch.action.search.DeletePitAction; import org.opensearch.action.search.MultiSearchAction; import org.opensearch.action.search.SearchAction; import org.opensearch.action.search.SearchScrollAction; import org.opensearch.action.search.TransportClearScrollAction; import org.opensearch.action.search.TransportCreatePitAction; +import org.opensearch.action.search.TransportDeletePitAction; import org.opensearch.action.search.TransportMultiSearchAction; import org.opensearch.action.search.TransportSearchAction; import org.opensearch.action.search.TransportSearchScrollAction; @@ -399,6 +401,7 @@ import org.opensearch.rest.action.search.RestClearScrollAction; import org.opensearch.rest.action.search.RestCountAction; import org.opensearch.rest.action.search.RestCreatePitAction; +import org.opensearch.rest.action.search.RestDeletePitAction; import org.opensearch.rest.action.search.RestExplainAction; import org.opensearch.rest.action.search.RestMultiSearchAction; import org.opensearch.rest.action.search.RestSearchAction; @@ -660,6 +663,7 @@ public void reg actions.register(DeleteDanglingIndexAction.INSTANCE, TransportDeleteDanglingIndexAction.class); actions.register(FindDanglingIndexAction.INSTANCE, TransportFindDanglingIndexAction.class); actions.register(CreatePitAction.INSTANCE, TransportCreatePitAction.class); + actions.register(DeletePitAction.INSTANCE, TransportDeletePitAction.class); return unmodifiableMap(actions.getRegistry()); } @@ -835,6 +839,8 @@ public void initRestHandlers(Supplier nodesInCluster) { // Point in time API registerHandler.accept(new RestCreatePitAction()); + registerHandler.accept(new RestDeletePitAction()); + for (ActionPlugin plugin : actionPlugins) { for (RestHandler handler : plugin.getRestHandlers( settings, diff --git a/server/src/main/java/org/opensearch/action/search/CreatePitController.java b/server/src/main/java/org/opensearch/action/search/CreatePitController.java index 3d2ecc8b695c6..3aa7b60f18fc3 100644 --- a/server/src/main/java/org/opensearch/action/search/CreatePitController.java +++ b/server/src/main/java/org/opensearch/action/search/CreatePitController.java @@ -268,6 +268,6 @@ public void onFailure(Exception e) { logger.error("Cleaning up PIT contexts failed ", e); } }; - ClearScrollController.closeContexts(clusterService.state().getNodes(), searchTransportService, contexts, deleteListener); + SearchUtils.deletePits(contexts, deleteListener, clusterService.state(), searchTransportService); } } diff --git a/server/src/main/java/org/opensearch/action/search/DeletePitAction.java b/server/src/main/java/org/opensearch/action/search/DeletePitAction.java new file mode 100644 index 0000000000000..73932886121c5 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/DeletePitAction.java @@ -0,0 +1,25 @@ + +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.ActionType; + +/** + * Action type for deleting PIT reader contexts + */ +public class DeletePitAction extends ActionType { + + public static final DeletePitAction INSTANCE = new DeletePitAction(); + public static final String NAME = "indices:admin/read/pit/delete"; + + private DeletePitAction() { + super(NAME, DeletePitResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java b/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java new file mode 100644 index 0000000000000..26b49f53aadac --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java @@ -0,0 +1,136 @@ + +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.ActionRequest; +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.xcontent.ToXContent; +import org.opensearch.common.xcontent.ToXContentObject; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static org.opensearch.action.ValidateActions.addValidationError; + +/** + * Request to delete one or more PIT contexts based on IDs. + */ +public class DeletePitRequest extends ActionRequest implements ToXContentObject { + + /** + * List of PIT IDs to be deleted , and use "_all" to delete all PIT reader contexts + */ + private List pitIds; + + public DeletePitRequest(StreamInput in) throws IOException { + super(in); + pitIds = Arrays.asList(in.readStringArray()); + } + + public DeletePitRequest(String... pitIds) { + if (pitIds != null) { + this.pitIds = Arrays.asList(pitIds); + } + } + + public DeletePitRequest(List pitIds) { + if (pitIds != null) { + this.pitIds = pitIds; + } + } + + public DeletePitRequest() {} + + public List getPitIds() { + return pitIds; + } + + public void setPitIds(List pitIds) { + this.pitIds = pitIds; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = null; + if (pitIds == null || pitIds.isEmpty()) { + validationException = addValidationError("no pit ids specified", validationException); + } + return validationException; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + if (pitIds == null) { + out.writeVInt(0); + } else { + out.writeStringArray(pitIds.toArray(new String[pitIds.size()])); + } + } + + public void addPitId(String pitId) { + if (pitIds == null) { + pitIds = new ArrayList<>(); + } + pitIds.add(pitId); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + builder.startArray("pit_id"); + for (String pitId : pitIds) { + builder.value(pitId); + } + builder.endArray(); + builder.endObject(); + return builder; + } + + public void fromXContent(XContentParser parser) throws IOException { + pitIds = null; + if (parser.nextToken() != XContentParser.Token.START_OBJECT) { + throw new IllegalArgumentException("Malformed content, must start with an object"); + } else { + XContentParser.Token token; + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if ("pit_id".equals(currentFieldName)) { + if (token == XContentParser.Token.START_ARRAY) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + if (token.isValue() == false) { + throw new IllegalArgumentException("pit_id array element should only contain pit_id"); + } + addPitId(parser.text()); + } + } else { + if (token.isValue() == false) { + throw new IllegalArgumentException("pit_id element should only contain pit_id"); + } + addPitId(parser.text()); + } + } else { + throw new IllegalArgumentException( + "Unknown parameter [" + currentFieldName + "] in request body or parameter is of the wrong type[" + token + "] " + ); + } + } + } + } + +} diff --git a/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java b/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java new file mode 100644 index 0000000000000..44fef162af623 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java @@ -0,0 +1,91 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.action.ActionResponse; +import org.opensearch.common.ParseField; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.xcontent.ConstructingObjectParser; +import org.opensearch.common.xcontent.ObjectParser; +import org.opensearch.common.xcontent.StatusToXContentObject; +import org.opensearch.common.xcontent.ToXContent; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.rest.RestStatus; + +import java.io.IOException; + +import static org.opensearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.opensearch.rest.RestStatus.NOT_FOUND; +import static org.opensearch.rest.RestStatus.OK; + +/** + * Response class for delete pit flow which returns if the contexts are freed + */ +public class DeletePitResponse extends ActionResponse implements StatusToXContentObject { + + /** + * This will be true if all PIT reader contexts are deleted. + */ + private final boolean succeeded; + + public DeletePitResponse(boolean succeeded) { + this.succeeded = succeeded; + } + + public DeletePitResponse(StreamInput in) throws IOException { + super(in); + succeeded = in.readBoolean(); + } + + /** + * @return Whether the attempt to delete PIT was successful. + */ + public boolean isSucceeded() { + return succeeded; + } + + @Override + public RestStatus status() { + return succeeded ? OK : NOT_FOUND; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeBoolean(succeeded); + } + + private static final ParseField SUCCEEDED = new ParseField("succeeded"); + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "delete_pit", + true, + a -> new DeletePitResponse((boolean) a[0]) + ); + static { + PARSER.declareField(constructorArg(), (parser, context) -> parser.booleanValue(), SUCCEEDED, ObjectParser.ValueType.BOOLEAN); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + builder.field(SUCCEEDED.getPreferredName(), succeeded); + builder.endObject(); + return builder; + } + + /** + * Parse the delete PIT response body into a new {@link DeletePitResponse} object + */ + public static DeletePitResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.apply(parser, null); + } + +} diff --git a/server/src/main/java/org/opensearch/action/search/SearchTransportService.java b/server/src/main/java/org/opensearch/action/search/SearchTransportService.java index f41bd7938b3b6..a12ebe2837938 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/opensearch/action/search/SearchTransportService.java @@ -85,8 +85,10 @@ public class SearchTransportService { public static final String FREE_CONTEXT_SCROLL_ACTION_NAME = "indices:data/read/search[free_context/scroll]"; + public static final String FREE_PIT_CONTEXT_ACTION_NAME = "indices:data/read/search[free_context/pit]"; public static final String FREE_CONTEXT_ACTION_NAME = "indices:data/read/search[free_context]"; public static final String CLEAR_SCROLL_CONTEXTS_ACTION_NAME = "indices:data/read/search[clear_scroll_contexts]"; + public static final String FREE_ALL_PIT_CONTEXTS_ACTION_NAME = "indices:data/read/search[delete_pit_contexts]"; public static final String DFS_ACTION_NAME = "indices:data/read/search[phase/dfs]"; public static final String QUERY_ACTION_NAME = "indices:data/read/search[phase/query]"; public static final String QUERY_ID_ACTION_NAME = "indices:data/read/search[phase/query/id]"; @@ -200,6 +202,30 @@ public void sendClearAllScrollContexts(Transport.Connection connection, final Ac ); } + public void sendFreePITContext( + Transport.Connection connection, + ShardSearchContextId contextId, + ActionListener listener + ) { + transportService.sendRequest( + connection, + FREE_PIT_CONTEXT_ACTION_NAME, + new PitFreeContextRequest(contextId), + TransportRequestOptions.EMPTY, + new ActionListenerResponseHandler<>(listener, SearchFreeContextResponse::new) + ); + } + + public void sendFreeAllPitContexts(Transport.Connection connection, final ActionListener listener) { + transportService.sendRequest( + connection, + FREE_ALL_PIT_CONTEXTS_ACTION_NAME, + TransportRequest.Empty.INSTANCE, + TransportRequestOptions.EMPTY, + new ActionListenerResponseHandler<>(listener, SearchFreeContextResponse::new) + ); + } + public void sendExecuteDfs( Transport.Connection connection, final ShardSearchRequest request, @@ -370,6 +396,32 @@ public ShardSearchContextId id() { } + /** + * Request to free the PIT context based on id + */ + static class PitFreeContextRequest extends TransportRequest { + private ShardSearchContextId contextId; + + PitFreeContextRequest(ShardSearchContextId contextId) { + this.contextId = Objects.requireNonNull(contextId); + } + + PitFreeContextRequest(StreamInput in) throws IOException { + super(in); + contextId = new ShardSearchContextId(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + contextId.writeTo(out); + } + + public ShardSearchContextId id() { + return this.contextId; + } + } + /** * A search free context request * @@ -454,6 +506,29 @@ public static void registerRequestHandler(TransportService transportService, Sea } ); TransportActionProxy.registerProxyAction(transportService, FREE_CONTEXT_SCROLL_ACTION_NAME, SearchFreeContextResponse::new); + + transportService.registerRequestHandler( + FREE_PIT_CONTEXT_ACTION_NAME, + ThreadPool.Names.SAME, + PitFreeContextRequest::new, + (request, channel, task) -> { + boolean freed = searchService.freeReaderContextIfFound(request.id()); + channel.sendResponse(new SearchFreeContextResponse(freed)); + } + ); + TransportActionProxy.registerProxyAction(transportService, FREE_PIT_CONTEXT_ACTION_NAME, SearchFreeContextResponse::new); + + transportService.registerRequestHandler( + FREE_ALL_PIT_CONTEXTS_ACTION_NAME, + ThreadPool.Names.SAME, + TransportRequest.Empty::new, + (request, channel, task) -> { + boolean freed = searchService.freeAllPitContexts(); + channel.sendResponse(new SearchFreeContextResponse(freed)); + } + ); + TransportActionProxy.registerProxyAction(transportService, FREE_ALL_PIT_CONTEXTS_ACTION_NAME, SearchFreeContextResponse::new); + transportService.registerRequestHandler( FREE_CONTEXT_ACTION_NAME, ThreadPool.Names.SAME, diff --git a/server/src/main/java/org/opensearch/action/search/SearchUtils.java b/server/src/main/java/org/opensearch/action/search/SearchUtils.java index 148d1645568b1..7d40978e0be43 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchUtils.java +++ b/server/src/main/java/org/opensearch/action/search/SearchUtils.java @@ -8,18 +8,28 @@ package org.opensearch.action.search; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.OpenSearchException; +import org.opensearch.action.ActionListener; import org.opensearch.action.StepListener; +import org.opensearch.action.support.GroupedActionListener; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.Strings; import org.opensearch.transport.RemoteClusterService; +import org.opensearch.transport.Transport; +import java.util.Collection; import java.util.Set; import java.util.function.BiFunction; +import java.util.stream.Collectors; /** * Helper class for common search functions */ public class SearchUtils { + private static final Logger logger = LogManager.getLogger(SearchUtils.class); public SearchUtils() {} @@ -40,4 +50,52 @@ public static StepListener> getConnect } return lookupListener; } + + /** + * Delete list of pits, return success if all reader contexts are deleted ( or not found ). + */ + public static void deletePits( + Collection contexts, + ActionListener listener, + ClusterState state, + SearchTransportService searchTransportService + ) { + final Set clusters = contexts.stream() + .filter(ctx -> Strings.isEmpty(ctx.getClusterAlias()) == false) + .map(SearchContextIdForNode::getClusterAlias) + .collect(Collectors.toSet()); + StepListener> lookupListener = getConnectionLookupListener( + searchTransportService.getRemoteClusterService(), + state, + clusters + ); + lookupListener.whenComplete(nodeLookup -> { + final GroupedActionListener groupedListener = new GroupedActionListener<>( + ActionListener.delegateFailure( + listener, + (l, result) -> l.onResponse(Math.toIntExact(result.stream().filter(r -> r).count())) + ), + contexts.size() + ); + + for (SearchContextIdForNode contextId : contexts) { + final DiscoveryNode node = nodeLookup.apply(contextId.getClusterAlias(), contextId.getNode()); + if (node == null) { + groupedListener.onFailure(new OpenSearchException("node not found")); + } else { + try { + final Transport.Connection connection = searchTransportService.getConnection(contextId.getClusterAlias(), node); + searchTransportService.sendFreePITContext( + connection, + contextId.getSearchContextId(), + ActionListener.wrap(r -> groupedListener.onResponse(r.isFreed()), e -> groupedListener.onResponse(false)) + ); + } catch (Exception e) { + logger.debug("Delete PIT failed ", e); + groupedListener.onResponse(false); + } + } + } + }, listener::onFailure); + } } diff --git a/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java b/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java new file mode 100644 index 0000000000000..334029ca60cf3 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java @@ -0,0 +1,122 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.action.ActionListener; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.GroupedActionListener; +import org.opensearch.action.support.HandledTransportAction; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.NamedWriteableRegistry; +import org.opensearch.tasks.Task; +import org.opensearch.transport.Transport; +import org.opensearch.transport.TransportService; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +/** + * Transport action for deleting pit reader context - supports deleting list and all pit contexts + */ +public class TransportDeletePitAction extends HandledTransportAction { + private final NamedWriteableRegistry namedWriteableRegistry; + private TransportSearchAction transportSearchAction; + private final ClusterService clusterService; + private final SearchTransportService searchTransportService; + private static final Logger logger = LogManager.getLogger(TransportDeletePitAction.class); + + @Inject + public TransportDeletePitAction( + TransportService transportService, + ActionFilters actionFilters, + NamedWriteableRegistry namedWriteableRegistry, + TransportSearchAction transportSearchAction, + ClusterService clusterService, + SearchTransportService searchTransportService + ) { + super(DeletePitAction.NAME, transportService, actionFilters, DeletePitRequest::new); + this.namedWriteableRegistry = namedWriteableRegistry; + this.transportSearchAction = transportSearchAction; + this.clusterService = clusterService; + this.searchTransportService = searchTransportService; + } + + /** + * Invoke 'delete all pits' or 'delete list of pits' workflow based on request + */ + @Override + protected void doExecute(Task task, DeletePitRequest request, ActionListener listener) { + List pitIds = request.getPitIds(); + if (pitIds.size() == 1 && "_all".equals(pitIds.get(0))) { + deleteAllPits(listener); + } else { + deletePits(listener, request); + } + } + + /** + * Deletes list of pits, return success if all reader contexts are deleted ( or not found ). + */ + private void deletePits(ActionListener listener, DeletePitRequest request) { + List contexts = new ArrayList<>(); + for (String pitId : request.getPitIds()) { + SearchContextId contextId = SearchContextId.decode(namedWriteableRegistry, pitId); + contexts.addAll(contextId.shards().values()); + } + ActionListener deleteListener = ActionListener.wrap(r -> { + if (r == contexts.size()) { + listener.onResponse(new DeletePitResponse(true)); + } else { + logger.debug(() -> new ParameterizedMessage("Delete PITs failed. " + "Cleared {} contexts out of {}", r, contexts.size())); + listener.onResponse(new DeletePitResponse(false)); + } + }, e -> { + logger.debug("Delete PITs failed ", e); + listener.onResponse(new DeletePitResponse(false)); + }); + SearchUtils.deletePits(contexts, deleteListener, clusterService.state(), searchTransportService); + } + + /** + * Delete all active PIT reader contexts + */ + private void deleteAllPits(ActionListener listener) { + int size = clusterService.state().getNodes().getSize(); + ActionListener groupedActionListener = new GroupedActionListener( + new ActionListener<>() { + @Override + public void onResponse(final Collection responses) { + boolean hasFailures = responses.stream().anyMatch(r -> !r.isFreed()); + listener.onResponse(new DeletePitResponse(!hasFailures)); + } + + @Override + public void onFailure(final Exception e) { + logger.debug("Delete all PITs failed ", e); + listener.onResponse(new DeletePitResponse(false)); + } + }, + size + ); + for (final DiscoveryNode node : clusterService.state().getNodes()) { + try { + Transport.Connection connection = searchTransportService.getConnection(null, node); + searchTransportService.sendFreeAllPitContexts(connection, groupedActionListener); + } catch (Exception e) { + groupedActionListener.onFailure(e); + } + } + } +} diff --git a/server/src/main/java/org/opensearch/client/Client.java b/server/src/main/java/org/opensearch/client/Client.java index a73f8200ab277..1d3bbfcba43f9 100644 --- a/server/src/main/java/org/opensearch/client/Client.java +++ b/server/src/main/java/org/opensearch/client/Client.java @@ -60,6 +60,8 @@ import org.opensearch.action.search.ClearScrollResponse; import org.opensearch.action.search.CreatePitRequest; import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.MultiSearchRequestBuilder; import org.opensearch.action.search.MultiSearchResponse; @@ -332,6 +334,11 @@ public interface Client extends OpenSearchClient, Releasable { */ void createPit(CreatePitRequest createPITRequest, ActionListener listener); + /** + * Delete one or more point in time contexts + */ + void deletePits(DeletePitRequest deletePITRequest, ActionListener listener); + /** * Performs multiple search requests. */ diff --git a/server/src/main/java/org/opensearch/client/support/AbstractClient.java b/server/src/main/java/org/opensearch/client/support/AbstractClient.java index 6cc0827310bd1..f99454a8a8913 100644 --- a/server/src/main/java/org/opensearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/opensearch/client/support/AbstractClient.java @@ -327,6 +327,9 @@ import org.opensearch.action.search.CreatePitAction; import org.opensearch.action.search.CreatePitRequest; import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.DeletePitAction; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; import org.opensearch.action.search.MultiSearchAction; import org.opensearch.action.search.MultiSearchRequest; import org.opensearch.action.search.MultiSearchRequestBuilder; @@ -582,6 +585,11 @@ public void createPit(final CreatePitRequest createPITRequest, final ActionListe execute(CreatePitAction.INSTANCE, createPITRequest, listener); } + @Override + public void deletePits(final DeletePitRequest deletePITRequest, final ActionListener listener) { + execute(DeletePitAction.INSTANCE, deletePITRequest, listener); + } + @Override public ActionFuture multiSearch(MultiSearchRequest request) { return execute(MultiSearchAction.INSTANCE, request); diff --git a/server/src/main/java/org/opensearch/rest/action/search/RestDeletePitAction.java b/server/src/main/java/org/opensearch/rest/action/search/RestDeletePitAction.java new file mode 100644 index 0000000000000..e225dd2fed2b3 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/search/RestDeletePitAction.java @@ -0,0 +1,59 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.search; + +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.client.node.NodeClient; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestStatusToXContentListener; + +import java.io.IOException; +import java.util.List; + +import static java.util.Arrays.asList; +import static java.util.Collections.unmodifiableList; +import static org.opensearch.rest.RestRequest.Method.DELETE; + +/** + * Rest action for deleting PIT contexts + */ +public class RestDeletePitAction extends BaseRestHandler { + + @Override + public String getName() { + return "delete_pit_action"; + } + + @Override + public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + String allPitIdsQualifier = "_all"; + DeletePitRequest deletePITRequest = new DeletePitRequest(); + if (request.path().contains(allPitIdsQualifier)) { + deletePITRequest.setPitIds(asList(allPitIdsQualifier)); + } else { + request.withContentOrSourceParamParserOrNull((xContentParser -> { + if (xContentParser != null) { + try { + deletePITRequest.fromXContent(xContentParser); + } catch (IOException e) { + throw new IllegalArgumentException("Failed to parse request body", e); + } + } + })); + } + return channel -> client.deletePits(deletePITRequest, new RestStatusToXContentListener(channel)); + } + + @Override + public List routes() { + return unmodifiableList(asList(new Route(DELETE, "/_search/point_in_time"), new Route(DELETE, "/_search/point_in_time/_all"))); + } +} diff --git a/server/src/main/java/org/opensearch/search/SearchService.java b/server/src/main/java/org/opensearch/search/SearchService.java index 0f7a39a31535e..b3b22368bb665 100644 --- a/server/src/main/java/org/opensearch/search/SearchService.java +++ b/server/src/main/java/org/opensearch/search/SearchService.java @@ -36,6 +36,7 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.TopDocs; +import org.apache.lucene.util.SetOnce; import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; @@ -1020,6 +1021,39 @@ public boolean freeReaderContext(ShardSearchContextId contextId) { return false; } + /** + * Free reader context if found otherwise return false + */ + public boolean freeReaderContextIfFound(ShardSearchContextId contextId) { + try { + if (getReaderContext(contextId) != null) { + try (ReaderContext context = removeReaderContext(contextId.getId())) { + return context != null; + } + } + } catch (SearchContextMissingException e) { + return true; + } + return true; + } + + /** + * Free all active pit contexts + */ + public boolean freeAllPitContexts() { + final SetOnce isFreed = new SetOnce<>(); + for (ReaderContext readerContext : activeReaders.values()) { + if (readerContext instanceof PitReaderContext) { + final boolean succeeded = freeReaderContextIfFound(readerContext.id()); + if (!succeeded) { + isFreed.trySet(false); + } + } + } + isFreed.trySet(true); + return isFreed.get(); + } + /** * Update PIT reader with pit id, keep alive and created time etc */ diff --git a/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java b/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java index f07bbe5975535..528c8e991c3e1 100644 --- a/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java +++ b/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java @@ -22,18 +22,13 @@ import org.opensearch.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.concurrent.AtomicArray; import org.opensearch.index.query.IdsQueryBuilder; import org.opensearch.index.query.MatchAllQueryBuilder; import org.opensearch.index.query.QueryBuilder; import org.opensearch.index.query.TermQueryBuilder; -import org.opensearch.index.shard.ShardId; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; -import org.opensearch.search.SearchPhaseResult; -import org.opensearch.search.SearchShardTarget; import org.opensearch.search.aggregations.InternalAggregations; -import org.opensearch.search.internal.AliasFilter; import org.opensearch.search.internal.InternalSearchResponse; import org.opensearch.search.internal.ShardSearchContextId; import org.opensearch.tasks.Task; @@ -46,15 +41,14 @@ import org.opensearch.transport.Transport; import java.util.Arrays; import java.util.Collections; -import java.util.HashMap; import java.util.List; -import java.util.Map; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import static org.opensearch.action.search.PitTestsUtil.getPitId; /** * Functional tests for various methods in create pit controller. Covers update pit phase specifically since @@ -100,7 +94,7 @@ public void setupData() { node1 = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Version.CURRENT); node2 = new DiscoveryNode("node_2", buildNewFakeTransportAddress(), Version.CURRENT); node3 = new DiscoveryNode("node_3", buildNewFakeTransportAddress(), Version.CURRENT); - setPitId(); + pitId = getPitId(); namedWriteableRegistry = new NamedWriteableRegistry( Arrays.asList( new NamedWriteableRegistry.Entry(QueryBuilder.class, TermQueryBuilder.NAME, TermQueryBuilder::new), @@ -203,7 +197,7 @@ public void updatePitContext( * Test if cleanup request is called */ @Override - public void sendFreeContext( + public void sendFreePITContext( Transport.Connection connection, ShardSearchContextId contextId, ActionListener listener @@ -298,7 +292,7 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod } @Override - public void sendFreeContext( + public void sendFreePITContext( Transport.Connection connection, ShardSearchContextId contextId, ActionListener listener @@ -395,7 +389,7 @@ public void updatePitContext( } @Override - public void sendFreeContext( + public void sendFreePITContext( Transport.Connection connection, ShardSearchContextId contextId, ActionListener listener @@ -485,7 +479,7 @@ public void updatePitContext( } @Override - public void sendFreeContext( + public void sendFreePITContext( Transport.Connection connection, ShardSearchContextId contextId, ActionListener listener @@ -537,56 +531,6 @@ public void onFailure(Exception e) { assertEquals(3, deleteNodesInvoked.size()); } } - - } - - QueryBuilder randomQueryBuilder() { - if (randomBoolean()) { - return new TermQueryBuilder(randomAlphaOfLength(10), randomAlphaOfLength(10)); - } else if (randomBoolean()) { - return new MatchAllQueryBuilder(); - } else { - return new IdsQueryBuilder().addIds(randomAlphaOfLength(10)); - } - } - - private void setPitId() { - AtomicArray array = new AtomicArray<>(3); - SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult1 = new SearchAsyncActionTests.TestSearchPhaseResult( - new ShardSearchContextId("a", 1), - node1 - ); - testSearchPhaseResult1.setSearchShardTarget(new SearchShardTarget("node_1", new ShardId("idx", "uuid1", 2), null, null)); - SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult2 = new SearchAsyncActionTests.TestSearchPhaseResult( - new ShardSearchContextId("b", 12), - node2 - ); - testSearchPhaseResult2.setSearchShardTarget(new SearchShardTarget("node_2", new ShardId("idy", "uuid2", 42), null, null)); - SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult3 = new SearchAsyncActionTests.TestSearchPhaseResult( - new ShardSearchContextId("c", 42), - node3 - ); - testSearchPhaseResult3.setSearchShardTarget(new SearchShardTarget("node_3", new ShardId("idy", "uuid2", 43), null, null)); - array.setOnce(0, testSearchPhaseResult1); - array.setOnce(1, testSearchPhaseResult2); - array.setOnce(2, testSearchPhaseResult3); - - final Version version = Version.CURRENT; - final Map aliasFilters = new HashMap<>(); - for (SearchPhaseResult result : array.asList()) { - final AliasFilter aliasFilter; - if (randomBoolean()) { - aliasFilter = new AliasFilter(randomQueryBuilder()); - } else if (randomBoolean()) { - aliasFilter = new AliasFilter(randomQueryBuilder(), "alias-" + between(1, 10)); - } else { - aliasFilter = AliasFilter.EMPTY; - } - if (randomBoolean()) { - aliasFilters.put(result.getSearchShardTarget().getShardId().getIndex().getUUID(), aliasFilter); - } - } - pitId = SearchContextId.encode(array.asList(), aliasFilters, version); } } diff --git a/server/src/test/java/org/opensearch/action/search/PitTestsUtil.java b/server/src/test/java/org/opensearch/action/search/PitTestsUtil.java new file mode 100644 index 0000000000000..ec83cb45697d9 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/search/PitTestsUtil.java @@ -0,0 +1,84 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.Version; +import org.opensearch.common.util.concurrent.AtomicArray; +import org.opensearch.index.query.IdsQueryBuilder; +import org.opensearch.index.query.MatchAllQueryBuilder; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.index.shard.ShardId; +import org.opensearch.search.SearchPhaseResult; +import org.opensearch.search.SearchShardTarget; +import org.opensearch.search.internal.AliasFilter; +import org.opensearch.search.internal.ShardSearchContextId; + +import java.util.HashMap; +import java.util.Map; + +import static org.opensearch.test.OpenSearchTestCase.between; +import static org.opensearch.test.OpenSearchTestCase.randomAlphaOfLength; +import static org.opensearch.test.OpenSearchTestCase.randomBoolean; + +/** + * Helper class for common pit tests functions + */ +public class PitTestsUtil { + private PitTestsUtil() {} + + public static QueryBuilder randomQueryBuilder() { + if (randomBoolean()) { + return new TermQueryBuilder(randomAlphaOfLength(10), randomAlphaOfLength(10)); + } else if (randomBoolean()) { + return new MatchAllQueryBuilder(); + } else { + return new IdsQueryBuilder().addIds(randomAlphaOfLength(10)); + } + } + + public static String getPitId() { + AtomicArray array = new AtomicArray<>(3); + SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult1 = new SearchAsyncActionTests.TestSearchPhaseResult( + new ShardSearchContextId("a", 1), + null + ); + testSearchPhaseResult1.setSearchShardTarget(new SearchShardTarget("node_1", new ShardId("idx", "uuid1", 2), null, null)); + SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult2 = new SearchAsyncActionTests.TestSearchPhaseResult( + new ShardSearchContextId("b", 12), + null + ); + testSearchPhaseResult2.setSearchShardTarget(new SearchShardTarget("node_2", new ShardId("idy", "uuid2", 42), null, null)); + SearchAsyncActionTests.TestSearchPhaseResult testSearchPhaseResult3 = new SearchAsyncActionTests.TestSearchPhaseResult( + new ShardSearchContextId("c", 42), + null + ); + testSearchPhaseResult3.setSearchShardTarget(new SearchShardTarget("node_3", new ShardId("idy", "uuid2", 43), null, null)); + array.setOnce(0, testSearchPhaseResult1); + array.setOnce(1, testSearchPhaseResult2); + array.setOnce(2, testSearchPhaseResult3); + + final Version version = Version.CURRENT; + final Map aliasFilters = new HashMap<>(); + for (SearchPhaseResult result : array.asList()) { + final AliasFilter aliasFilter; + if (randomBoolean()) { + aliasFilter = new AliasFilter(randomQueryBuilder()); + } else if (randomBoolean()) { + aliasFilter = new AliasFilter(randomQueryBuilder(), "alias-" + between(1, 10)); + } else { + aliasFilter = AliasFilter.EMPTY; + } + if (randomBoolean()) { + aliasFilters.put(result.getSearchShardTarget().getShardId().getIndex().getUUID(), aliasFilter); + } + } + return SearchContextId.encode(array.asList(), aliasFilters, version); + } +} diff --git a/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java b/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java new file mode 100644 index 0000000000000..0a0d248a874b2 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java @@ -0,0 +1,615 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ +package org.opensearch.action.search; + +import org.apache.lucene.search.TotalHits; +import org.junit.Before; +import org.opensearch.Version; +import org.opensearch.action.ActionListener; +import org.opensearch.action.support.ActionFilter; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.query.IdsQueryBuilder; +import org.opensearch.index.query.MatchAllQueryBuilder; +import org.opensearch.index.query.QueryBuilder; +import org.opensearch.index.query.TermQueryBuilder; +import org.opensearch.search.SearchHit; +import org.opensearch.search.SearchHits; +import org.opensearch.search.aggregations.InternalAggregations; +import org.opensearch.search.internal.InternalSearchResponse; +import org.opensearch.search.internal.ShardSearchContextId; +import org.opensearch.tasks.Task; +import org.opensearch.tasks.TaskId; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.transport.MockTransportService; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.RemoteClusterConnectionTests; +import org.opensearch.transport.Transport; +import org.opensearch.transport.TransportResponse; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.opensearch.action.search.PitTestsUtil.getPitId; +import static org.opensearch.action.support.PlainActionFuture.newFuture; + +/** + * Functional tests for transport delete pit action + */ +public class TransportDeletePitActionTests extends OpenSearchTestCase { + DiscoveryNode node1 = null; + DiscoveryNode node2 = null; + DiscoveryNode node3 = null; + String pitId = null; + TransportSearchAction transportSearchAction = null; + Task task = null; + DiscoveryNodes nodes = null; + NamedWriteableRegistry namedWriteableRegistry = null; + ClusterService clusterServiceMock = null; + Settings settings = Settings.builder().put("node.name", TransportMultiSearchActionTests.class.getSimpleName()).build(); + private ThreadPool threadPool = new ThreadPool(settings); + + @Override + public void tearDown() throws Exception { + super.tearDown(); + ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); + } + + private MockTransportService startTransport(String id, List knownNodes, Version version) { + return startTransport(id, knownNodes, version, Settings.EMPTY); + } + + private MockTransportService startTransport( + final String id, + final List knownNodes, + final Version version, + final Settings settings + ) { + return RemoteClusterConnectionTests.startTransport(id, knownNodes, version, threadPool, settings); + } + + @Before + public void setupData() { + node1 = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Version.CURRENT); + node2 = new DiscoveryNode("node_2", buildNewFakeTransportAddress(), Version.CURRENT); + node3 = new DiscoveryNode("node_3", buildNewFakeTransportAddress(), Version.CURRENT); + pitId = getPitId(); + namedWriteableRegistry = new NamedWriteableRegistry( + Arrays.asList( + new NamedWriteableRegistry.Entry(QueryBuilder.class, TermQueryBuilder.NAME, TermQueryBuilder::new), + new NamedWriteableRegistry.Entry(QueryBuilder.class, MatchAllQueryBuilder.NAME, MatchAllQueryBuilder::new), + new NamedWriteableRegistry.Entry(QueryBuilder.class, IdsQueryBuilder.NAME, IdsQueryBuilder::new) + ) + ); + nodes = DiscoveryNodes.builder().add(node1).add(node2).add(node3).build(); + transportSearchAction = mock(TransportSearchAction.class); + task = new Task( + randomLong(), + "transport", + SearchAction.NAME, + "description", + new TaskId(randomLong() + ":" + randomLong()), + Collections.emptyMap() + ); + InternalSearchResponse response = new InternalSearchResponse( + new SearchHits(new SearchHit[0], new TotalHits(0, TotalHits.Relation.EQUAL_TO), Float.NaN), + InternalAggregations.EMPTY, + null, + null, + false, + null, + 1 + ); + + clusterServiceMock = mock(ClusterService.class); + ClusterState state = mock(ClusterState.class); + + final Settings keepAliveSettings = Settings.builder().put(CreatePitController.PIT_INIT_KEEP_ALIVE.getKey(), 30000).build(); + when(clusterServiceMock.getSettings()).thenReturn(keepAliveSettings); + + when(state.getMetadata()).thenReturn(Metadata.EMPTY_METADATA); + when(state.metadata()).thenReturn(Metadata.EMPTY_METADATA); + when(clusterServiceMock.state()).thenReturn(state); + when(state.getNodes()).thenReturn(nodes); + } + + /** + * Test if transport call for update pit is made to all nodes present as part of PIT ID returned from phase one of create pit + */ + public void testDeletePitSuccess() throws InterruptedException, ExecutionException { + List deleteNodesInvoked = new CopyOnWriteArrayList<>(); + ActionFilters actionFilters = mock(ActionFilters.class); + when(actionFilters.filters()).thenReturn(new ActionFilter[0]); + List knownNodes = new CopyOnWriteArrayList<>(); + try ( + MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); + MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, Version.CURRENT) + ) { + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + knownNodes.add(cluster2Transport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + + try ( + MockTransportService transportService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + null + ) + ) { + transportService.start(); + transportService.acceptIncomingRequests(); + SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { + + @Override + public void sendFreePITContext( + Transport.Connection connection, + ShardSearchContextId contextId, + ActionListener listener + ) { + deleteNodesInvoked.add(connection.getNode()); + Thread t = new Thread(() -> listener.onResponse(new SearchFreeContextResponse(true))); + t.start(); + } + + @Override + public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + return new SearchAsyncActionTests.MockConnection(node); + } + }; + TransportDeletePitAction action = new TransportDeletePitAction( + transportService, + actionFilters, + namedWriteableRegistry, + transportSearchAction, + clusterServiceMock, + searchTransportService + ); + DeletePitRequest deletePITRequest = new DeletePitRequest(pitId); + PlainActionFuture future = newFuture(); + action.execute(task, deletePITRequest, future); + DeletePitResponse dr = future.get(); + assertEquals(true, dr.isSucceeded()); + assertEquals(3, deleteNodesInvoked.size()); + + } + } + } + + public void testDeleteAllPITSuccess() throws InterruptedException, ExecutionException { + List deleteNodesInvoked = new CopyOnWriteArrayList<>(); + ActionFilters actionFilters = mock(ActionFilters.class); + when(actionFilters.filters()).thenReturn(new ActionFilter[0]); + List knownNodes = new CopyOnWriteArrayList<>(); + try ( + MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); + MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, Version.CURRENT) + ) { + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + knownNodes.add(cluster2Transport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + + try ( + MockTransportService transportService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + null + ) + ) { + transportService.start(); + transportService.acceptIncomingRequests(); + SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { + public void sendFreeAllPitContexts(Transport.Connection connection, final ActionListener listener) { + deleteNodesInvoked.add(connection.getNode()); + Thread t = new Thread(() -> listener.onResponse(new SearchFreeContextResponse(true))); + t.start(); + } + + @Override + public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + return new SearchAsyncActionTests.MockConnection(node); + } + }; + TransportDeletePitAction action = new TransportDeletePitAction( + transportService, + actionFilters, + namedWriteableRegistry, + transportSearchAction, + clusterServiceMock, + searchTransportService + ); + DeletePitRequest deletePITRequest = new DeletePitRequest("_all"); + PlainActionFuture future = newFuture(); + action.execute(task, deletePITRequest, future); + DeletePitResponse dr = future.get(); + assertEquals(true, dr.isSucceeded()); + assertEquals(3, deleteNodesInvoked.size()); + + } + } + } + + public void testDeletePitWhenNodeIsDown() throws InterruptedException, ExecutionException { + List deleteNodesInvoked = new CopyOnWriteArrayList<>(); + ActionFilters actionFilters = mock(ActionFilters.class); + when(actionFilters.filters()).thenReturn(new ActionFilter[0]); + List knownNodes = new CopyOnWriteArrayList<>(); + try ( + MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); + MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, Version.CURRENT) + ) { + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + knownNodes.add(cluster2Transport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + + try ( + MockTransportService transportService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + null + ) + ) { + transportService.start(); + transportService.acceptIncomingRequests(); + SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { + + @Override + public void sendFreePITContext( + Transport.Connection connection, + ShardSearchContextId contextId, + ActionListener listener + ) { + deleteNodesInvoked.add(connection.getNode()); + + if (connection.getNode().getId() == "node_3") { + Thread t = new Thread(() -> listener.onFailure(new Exception("node 3 down"))); + t.start(); + } else { + Thread t = new Thread(() -> listener.onResponse(new SearchFreeContextResponse(true))); + t.start(); + } + } + + @Override + public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + return new SearchAsyncActionTests.MockConnection(node); + } + }; + TransportDeletePitAction action = new TransportDeletePitAction( + transportService, + actionFilters, + namedWriteableRegistry, + transportSearchAction, + clusterServiceMock, + searchTransportService + ); + DeletePitRequest deletePITRequest = new DeletePitRequest(pitId); + PlainActionFuture future = newFuture(); + action.execute(task, deletePITRequest, future); + DeletePitResponse dr = future.get(); + assertEquals(false, dr.isSucceeded()); + assertEquals(3, deleteNodesInvoked.size()); + } + } + } + + public void testDeletePitWhenAllNodesAreDown() throws InterruptedException, ExecutionException { + List deleteNodesInvoked = new CopyOnWriteArrayList<>(); + ActionFilters actionFilters = mock(ActionFilters.class); + when(actionFilters.filters()).thenReturn(new ActionFilter[0]); + List knownNodes = new CopyOnWriteArrayList<>(); + try ( + MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); + MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, Version.CURRENT) + ) { + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + knownNodes.add(cluster2Transport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + try ( + MockTransportService transportService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + null + ) + ) { + transportService.start(); + transportService.acceptIncomingRequests(); + SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { + @Override + public void sendFreePITContext( + Transport.Connection connection, + ShardSearchContextId contextId, + ActionListener listener + ) { + deleteNodesInvoked.add(connection.getNode()); + Thread t = new Thread(() -> listener.onFailure(new Exception("node 3 down"))); + t.start(); + } + + @Override + public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + return new SearchAsyncActionTests.MockConnection(node); + } + }; + TransportDeletePitAction action = new TransportDeletePitAction( + transportService, + actionFilters, + namedWriteableRegistry, + transportSearchAction, + clusterServiceMock, + searchTransportService + ); + DeletePitRequest deletePITRequest = new DeletePitRequest(pitId); + PlainActionFuture future = newFuture(); + action.execute(task, deletePITRequest, future); + DeletePitResponse dr = future.get(); + assertEquals(false, dr.isSucceeded()); + assertEquals(3, deleteNodesInvoked.size()); + } + } + } + + public void testDeletePitFailure() throws InterruptedException, ExecutionException { + List deleteNodesInvoked = new CopyOnWriteArrayList<>(); + ActionFilters actionFilters = mock(ActionFilters.class); + when(actionFilters.filters()).thenReturn(new ActionFilter[0]); + + List knownNodes = new CopyOnWriteArrayList<>(); + try ( + MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); + MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, Version.CURRENT) + ) { + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + knownNodes.add(cluster2Transport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + + try ( + MockTransportService transportService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + null + ) + ) { + transportService.start(); + transportService.acceptIncomingRequests(); + SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { + + @Override + public void sendFreePITContext( + Transport.Connection connection, + ShardSearchContextId contextId, + ActionListener listener + ) { + deleteNodesInvoked.add(connection.getNode()); + + if (connection.getNode().getId() == "node_3") { + Thread t = new Thread(() -> listener.onResponse(new SearchFreeContextResponse(false))); + t.start(); + } else { + Thread t = new Thread(() -> listener.onResponse(new SearchFreeContextResponse(true))); + t.start(); + } + } + + @Override + public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + return new SearchAsyncActionTests.MockConnection(node); + } + }; + TransportDeletePitAction action = new TransportDeletePitAction( + transportService, + actionFilters, + namedWriteableRegistry, + transportSearchAction, + clusterServiceMock, + searchTransportService + ); + DeletePitRequest deletePITRequest = new DeletePitRequest(pitId); + PlainActionFuture future = newFuture(); + action.execute(task, deletePITRequest, future); + DeletePitResponse dr = future.get(); + assertEquals(false, dr.isSucceeded()); + assertEquals(3, deleteNodesInvoked.size()); + } + } + } + + public void testDeleteAllPitWhenNodeIsDown() throws InterruptedException, ExecutionException { + List deleteNodesInvoked = new CopyOnWriteArrayList<>(); + ActionFilters actionFilters = mock(ActionFilters.class); + when(actionFilters.filters()).thenReturn(new ActionFilter[0]); + + List knownNodes = new CopyOnWriteArrayList<>(); + try ( + MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); + MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, Version.CURRENT) + ) { + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + knownNodes.add(cluster2Transport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + + try ( + MockTransportService transportService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + null + ) + ) { + transportService.start(); + transportService.acceptIncomingRequests(); + SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { + @Override + public void sendFreeAllPitContexts(Transport.Connection connection, final ActionListener listener) { + deleteNodesInvoked.add(connection.getNode()); + if (connection.getNode().getId() == "node_3") { + Thread t = new Thread(() -> listener.onFailure(new Exception("node 3 down"))); + t.start(); + } else { + Thread t = new Thread(() -> listener.onResponse(new SearchFreeContextResponse(true))); + t.start(); + } + } + + @Override + public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + return new SearchAsyncActionTests.MockConnection(node); + } + }; + TransportDeletePitAction action = new TransportDeletePitAction( + transportService, + actionFilters, + namedWriteableRegistry, + transportSearchAction, + clusterServiceMock, + searchTransportService + ); + DeletePitRequest deletePITRequest = new DeletePitRequest("_all"); + PlainActionFuture future = newFuture(); + action.execute(task, deletePITRequest, future); + DeletePitResponse dr = future.get(); + assertEquals(false, dr.isSucceeded()); + assertEquals(3, deleteNodesInvoked.size()); + } + } + } + + public void testDeleteAllPitWhenAllNodesAreDown() throws InterruptedException, ExecutionException { + List deleteNodesInvoked = new CopyOnWriteArrayList<>(); + ActionFilters actionFilters = mock(ActionFilters.class); + when(actionFilters.filters()).thenReturn(new ActionFilter[0]); + + List knownNodes = new CopyOnWriteArrayList<>(); + try ( + MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); + MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, Version.CURRENT) + ) { + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + knownNodes.add(cluster2Transport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + + try ( + MockTransportService transportService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + null + ) + ) { + transportService.start(); + transportService.acceptIncomingRequests(); + SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { + + @Override + public void sendFreeAllPitContexts(Transport.Connection connection, final ActionListener listener) { + deleteNodesInvoked.add(connection.getNode()); + Thread t = new Thread(() -> listener.onFailure(new Exception("node down"))); + t.start(); + } + + @Override + public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + return new SearchAsyncActionTests.MockConnection(node); + } + }; + TransportDeletePitAction action = new TransportDeletePitAction( + transportService, + actionFilters, + namedWriteableRegistry, + transportSearchAction, + clusterServiceMock, + searchTransportService + ); + DeletePitRequest deletePITRequest = new DeletePitRequest("_all"); + PlainActionFuture future = newFuture(); + action.execute(task, deletePITRequest, future); + DeletePitResponse dr = future.get(); + assertEquals(false, dr.isSucceeded()); + assertEquals(3, deleteNodesInvoked.size()); + } + } + } + + public void testDeleteAllPitFailure() throws InterruptedException, ExecutionException { + List deleteNodesInvoked = new CopyOnWriteArrayList<>(); + ActionFilters actionFilters = mock(ActionFilters.class); + when(actionFilters.filters()).thenReturn(new ActionFilter[0]); + + List knownNodes = new CopyOnWriteArrayList<>(); + try ( + MockTransportService cluster1Transport = startTransport("cluster_1_node", knownNodes, Version.CURRENT); + MockTransportService cluster2Transport = startTransport("cluster_2_node", knownNodes, Version.CURRENT) + ) { + knownNodes.add(cluster1Transport.getLocalDiscoNode()); + knownNodes.add(cluster2Transport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + + try ( + MockTransportService transportService = MockTransportService.createNewService( + Settings.EMPTY, + Version.CURRENT, + threadPool, + null + ) + ) { + transportService.start(); + transportService.acceptIncomingRequests(); + SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { + + public void sendFreeAllPitContexts(Transport.Connection connection, final ActionListener listener) { + deleteNodesInvoked.add(connection.getNode()); + if (connection.getNode().getId() == "node_3") { + Thread t = new Thread(() -> listener.onResponse(new SearchFreeContextResponse(false))); + t.start(); + } else { + Thread t = new Thread(() -> listener.onResponse(new SearchFreeContextResponse(true))); + t.start(); + } + } + + @Override + public Transport.Connection getConnection(String clusterAlias, DiscoveryNode node) { + return new SearchAsyncActionTests.MockConnection(node); + } + }; + TransportDeletePitAction action = new TransportDeletePitAction( + transportService, + actionFilters, + namedWriteableRegistry, + transportSearchAction, + clusterServiceMock, + searchTransportService + ); + DeletePitRequest deletePITRequest = new DeletePitRequest("_all"); + PlainActionFuture future = newFuture(); + action.execute(task, deletePITRequest, future); + DeletePitResponse dr = future.get(); + assertEquals(false, dr.isSucceeded()); + assertEquals(3, deleteNodesInvoked.size()); + } + } + } + +} diff --git a/server/src/test/java/org/opensearch/search/PitMultiNodeTests.java b/server/src/test/java/org/opensearch/search/CreatePitMultiNodeTests.java similarity index 99% rename from server/src/test/java/org/opensearch/search/PitMultiNodeTests.java rename to server/src/test/java/org/opensearch/search/CreatePitMultiNodeTests.java index b2cdd156576d8..a72204ef426fb 100644 --- a/server/src/test/java/org/opensearch/search/PitMultiNodeTests.java +++ b/server/src/test/java/org/opensearch/search/CreatePitMultiNodeTests.java @@ -31,7 +31,7 @@ * Multi node integration tests for PIT creation and search operation with PIT ID. */ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, numDataNodes = 2) -public class PitMultiNodeTests extends OpenSearchIntegTestCase { +public class CreatePitMultiNodeTests extends OpenSearchIntegTestCase { @Before public void setupIndex() throws ExecutionException, InterruptedException { diff --git a/server/src/test/java/org/opensearch/search/PitSingleNodeTests.java b/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java similarity index 98% rename from server/src/test/java/org/opensearch/search/PitSingleNodeTests.java rename to server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java index 2275a07e0d807..d67a9595197cd 100644 --- a/server/src/test/java/org/opensearch/search/PitSingleNodeTests.java +++ b/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java @@ -10,7 +10,12 @@ import org.hamcrest.Matchers; import org.opensearch.action.ActionFuture; -import org.opensearch.action.search.*; +import org.opensearch.action.search.CreatePitAction; +import org.opensearch.action.search.CreatePitController; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.SearchPhaseExecutionException; +import org.opensearch.action.search.SearchResponse; import org.opensearch.common.Priority; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; @@ -34,7 +39,7 @@ /** * Single node integration tests for various PIT use cases such as create pit, search etc */ -public class PitSingleNodeTests extends OpenSearchSingleNodeTestCase { +public class CreatePitSingleNodeTests extends OpenSearchSingleNodeTestCase { @Override protected boolean resetNodeAfterTest() { return true; diff --git a/server/src/test/java/org/opensearch/search/DeletePitMultiNodeTests.java b/server/src/test/java/org/opensearch/search/DeletePitMultiNodeTests.java new file mode 100644 index 0000000000000..f6bd765ba4dc7 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/DeletePitMultiNodeTests.java @@ -0,0 +1,204 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search; + +import org.junit.After; +import org.junit.Before; +import org.opensearch.action.ActionFuture; +import org.opensearch.action.search.CreatePitAction; +import org.opensearch.action.search.CreatePitRequest; +import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.DeletePitAction; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.search.builder.PointInTimeBuilder; +import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; + +/** + * Multi node integration tests for delete PIT use cases + */ +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.SUITE, numDataNodes = 2) +public class DeletePitMultiNodeTests extends OpenSearchIntegTestCase { + + @Before + public void setupIndex() throws ExecutionException, InterruptedException { + createIndex("index", Settings.builder().put("index.number_of_shards", 5).put("index.number_of_replicas", 1).build()); + client().prepareIndex("index").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).execute().get(); + ensureGreen(); + } + + @After + public void clearIndex() { + client().admin().indices().prepareDelete("index").get(); + } + + private CreatePitResponse createPitOnIndex(String index) throws ExecutionException, InterruptedException { + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { index }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + return execute.get(); + } + + public void testDeletePit() throws Exception { + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + List pitIds = new ArrayList<>(); + pitIds.add(pitResponse.getId()); + execute = client().execute(CreatePitAction.INSTANCE, request); + pitResponse = execute.get(); + pitIds.add(pitResponse.getId()); + DeletePitRequest deletePITRequest = new DeletePitRequest(pitIds); + ActionFuture deleteExecute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); + DeletePitResponse deletePITResponse = deleteExecute.get(); + assertTrue(deletePITResponse.isSucceeded()); + /** + * Checking deleting the same PIT id again results in succeeded + */ + deleteExecute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); + deletePITResponse = deleteExecute.get(); + assertTrue(deletePITResponse.isSucceeded()); + + } + + public void testDeleteAllPits() throws Exception { + createPitOnIndex("index"); + createIndex("index1", Settings.builder().put("index.number_of_shards", 5).put("index.number_of_replicas", 1).build()); + client().prepareIndex("index1").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).execute().get(); + ensureGreen(); + createPitOnIndex("index1"); + DeletePitRequest deletePITRequest = new DeletePitRequest("_all"); + + /** + * When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context + * not found exceptions don't result in failures ( as deletion in one node is successful ) + */ + ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); + DeletePitResponse deletePITResponse = execute.get(); + assertTrue(deletePITResponse.isSucceeded()); + client().admin().indices().prepareDelete("index1").get(); + } + + public void testDeletePitWhileNodeDrop() throws Exception { + CreatePitResponse pitResponse = createPitOnIndex("index"); + createIndex("index1", Settings.builder().put("index.number_of_shards", 5).put("index.number_of_replicas", 1).build()); + client().prepareIndex("index1").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).execute().get(); + ensureGreen(); + List pitIds = new ArrayList<>(); + pitIds.add(pitResponse.getId()); + CreatePitResponse pitResponse1 = createPitOnIndex("index1"); + pitIds.add(pitResponse1.getId()); + DeletePitRequest deletePITRequest = new DeletePitRequest(pitIds); + internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) throws Exception { + ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); + DeletePitResponse deletePITResponse = execute.get(); + assertFalse(deletePITResponse.isSucceeded()); + return super.onNodeStopped(nodeName); + } + }); + + ensureGreen(); + /** + * When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context + * not found exceptions don't result in failures ( as deletion in one node is successful ) + */ + ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); + DeletePitResponse deletePITResponse = execute.get(); + assertTrue(deletePITResponse.isSucceeded()); + client().admin().indices().prepareDelete("index1").get(); + } + + public void testDeleteAllPitsWhileNodeDrop() throws Exception { + createPitOnIndex("index"); + createIndex("index1", Settings.builder().put("index.number_of_shards", 5).put("index.number_of_replicas", 1).build()); + client().prepareIndex("index1").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).execute().get(); + ensureGreen(); + DeletePitRequest deletePITRequest = new DeletePitRequest("_all"); + internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) throws Exception { + ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); + DeletePitResponse deletePITResponse = execute.get(); + assertFalse(deletePITResponse.isSucceeded()); + return super.onNodeStopped(nodeName); + } + }); + + ensureGreen(); + /** + * When we invoke delete again, returns success after clearing the remaining readers. Asserting reader context + * not found exceptions don't result in failures ( as deletion in one node is successful ) + */ + ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); + DeletePitResponse deletePITResponse = execute.get(); + assertTrue(deletePITResponse.isSucceeded()); + client().admin().indices().prepareDelete("index1").get(); + } + + public void testDeleteWhileSearch() throws Exception { + CreatePitResponse pitResponse = createPitOnIndex("index"); + ensureGreen(); + List pitIds = new ArrayList<>(); + pitIds.add(pitResponse.getId()); + DeletePitRequest deletePITRequest = new DeletePitRequest(pitIds); + Thread[] threads = new Thread[5]; + CountDownLatch latch = new CountDownLatch(threads.length); + final AtomicBoolean deleted = new AtomicBoolean(false); + + for (int i = 0; i < threads.length; i++) { + threads[i] = new Thread(() -> { + latch.countDown(); + try { + latch.await(); + for (int j = 0; j < 30; j++) { + client().prepareSearch() + .setSize(2) + .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueDays(1))) + .execute() + .get(); + } + } catch (Exception e) { + /** + * assert for exception once delete pit goes through. throw error in case of any exeption before that. + */ + if (deleted.get() == true) { + if (!e.getMessage().contains("all shards failed")) throw new AssertionError(e); + return; + } + throw new AssertionError(e); + } + }); + threads[i].setName("opensearch[node_s_0][search]"); + threads[i].start(); + } + ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); + DeletePitResponse deletePITResponse = execute.get(); + deleted.set(true); + assertTrue(deletePITResponse.isSucceeded()); + + for (Thread thread : threads) { + thread.join(); + } + } + +} diff --git a/server/src/test/java/org/opensearch/search/SearchServiceTests.java b/server/src/test/java/org/opensearch/search/SearchServiceTests.java index aca537ab07a29..f2e51b7e984f7 100644 --- a/server/src/test/java/org/opensearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/opensearch/search/SearchServiceTests.java @@ -1414,6 +1414,33 @@ public void testOpenReaderContext() { assertTrue(searchService.freeReaderContext(future.actionGet())); } + public void testDeletePitReaderContext() { + createIndex("index"); + SearchService searchService = getInstanceFromNode(SearchService.class); + PlainActionFuture future = new PlainActionFuture<>(); + searchService.createPitReaderContext(new ShardId(resolveIndex("index"), 0), TimeValue.timeValueMinutes(between(1, 10)), future); + future.actionGet(); + assertThat(searchService.getActiveContexts(), equalTo(1)); + assertTrue(searchService.freeReaderContextIfFound(future.actionGet())); + // assert true for reader context not found + assertTrue(searchService.freeReaderContextIfFound(future.actionGet())); + // adding this assert to showcase behavior difference + assertFalse(searchService.freeReaderContext(future.actionGet())); + } + + public void testDeleteAllPitReaderContexts() { + createIndex("index"); + SearchService searchService = getInstanceFromNode(SearchService.class); + PlainActionFuture future = new PlainActionFuture<>(); + searchService.createPitReaderContext(new ShardId(resolveIndex("index"), 0), TimeValue.timeValueMinutes(between(1, 10)), future); + future.actionGet(); + searchService.createPitReaderContext(new ShardId(resolveIndex("index"), 0), TimeValue.timeValueMinutes(between(1, 10)), future); + future.actionGet(); + assertThat(searchService.getActiveContexts(), equalTo(2)); + searchService.freeAllPitContexts(); + assertThat(searchService.getActiveContexts(), equalTo(0)); + } + public void testPitContextMaxKeepAlive() { createIndex("index"); SearchService searchService = getInstanceFromNode(SearchService.class); diff --git a/server/src/test/java/org/opensearch/search/pit/RestDeletePitActionTests.java b/server/src/test/java/org/opensearch/search/pit/RestDeletePitActionTests.java new file mode 100644 index 0000000000000..36a1f91a02887 --- /dev/null +++ b/server/src/test/java/org/opensearch/search/pit/RestDeletePitActionTests.java @@ -0,0 +1,133 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.pit; + +import org.apache.lucene.util.SetOnce; +import org.opensearch.action.ActionListener; +import org.opensearch.action.search.DeletePitRequest; +import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.client.node.NodeClient; +import org.opensearch.common.bytes.BytesArray; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.search.RestDeletePitAction; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.client.NoOpNodeClient; +import org.opensearch.test.rest.FakeRestChannel; +import org.opensearch.test.rest.FakeRestRequest; + +import java.util.Collections; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +/** + * Tests to verify the behavior of rest delete pit action for list delete and delete all PIT endpoints + */ +public class RestDeletePitActionTests extends OpenSearchTestCase { + public void testParseDeletePitRequestWithInvalidJsonThrowsException() throws Exception { + RestDeletePitAction action = new RestDeletePitAction(); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent( + new BytesArray("{invalid_json}"), + XContentType.JSON + ).build(); + Exception e = expectThrows(IllegalArgumentException.class, () -> action.prepareRequest(request, null)); + assertThat(e.getMessage(), equalTo("Failed to parse request body")); + } + + public void testDeletePitWithBody() throws Exception { + SetOnce pitCalled = new SetOnce<>(); + try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName()) { + @Override + public void deletePits(DeletePitRequest request, ActionListener listener) { + pitCalled.set(true); + assertThat(request.getPitIds(), hasSize(1)); + assertThat(request.getPitIds().get(0), equalTo("BODY")); + } + }) { + RestDeletePitAction action = new RestDeletePitAction(); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent( + new BytesArray("{\"pit_id\": [\"BODY\"]}"), + XContentType.JSON + ).build(); + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + action.handleRequest(request, channel, nodeClient); + + assertThat(pitCalled.get(), equalTo(true)); + } + } + + public void testDeleteAllPit() throws Exception { + SetOnce pitCalled = new SetOnce<>(); + try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName()) { + @Override + public void deletePits(DeletePitRequest request, ActionListener listener) { + pitCalled.set(true); + assertThat(request.getPitIds(), hasSize(1)); + assertThat(request.getPitIds().get(0), equalTo("_all")); + } + }) { + RestDeletePitAction action = new RestDeletePitAction(); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withPath("/_all").build(); + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + action.handleRequest(request, channel, nodeClient); + + assertThat(pitCalled.get(), equalTo(true)); + } + } + + public void testDeleteAllPitWithBody() throws Exception { + SetOnce pitCalled = new SetOnce<>(); + try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName()) { + @Override + public void deletePits(DeletePitRequest request, ActionListener listener) { + pitCalled.set(true); + assertThat(request.getPitIds(), hasSize(1)); + assertThat(request.getPitIds().get(0), equalTo("_all")); + } + }) { + RestDeletePitAction action = new RestDeletePitAction(); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withContent( + new BytesArray("{\"pit_id\": [\"BODY\"]}"), + XContentType.JSON + ).withPath("/_all").build(); + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> action.handleRequest(request, channel, nodeClient) + ); + assertTrue(ex.getMessage().contains("request [GET /_all] does not support having a body")); + } + } + + public void testDeletePitQueryStringParamsShouldThrowException() { + SetOnce pitCalled = new SetOnce<>(); + try (NodeClient nodeClient = new NoOpNodeClient(this.getTestName()) { + @Override + public void deletePits(DeletePitRequest request, ActionListener listener) { + pitCalled.set(true); + assertThat(request.getPitIds(), hasSize(2)); + assertThat(request.getPitIds().get(0), equalTo("QUERY_STRING")); + assertThat(request.getPitIds().get(1), equalTo("QUERY_STRING_1")); + } + }) { + RestDeletePitAction action = new RestDeletePitAction(); + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()).withParams( + Collections.singletonMap("pit_id", "QUERY_STRING,QUERY_STRING_1") + ).build(); + FakeRestChannel channel = new FakeRestChannel(request, false, 0); + IllegalArgumentException ex = expectThrows( + IllegalArgumentException.class, + () -> action.handleRequest(request, channel, nodeClient) + ); + assertTrue(ex.getMessage().contains("unrecognized param")); + } + } +} From 724dffca07f434cb7597279231faeec77683d725 Mon Sep 17 00:00:00 2001 From: Bharathwaj G Date: Fri, 20 May 2022 08:31:22 +0530 Subject: [PATCH 03/75] Delete PIT changes - adding tests for invalid ids Signed-off-by: Bharathwaj G --- .../search/DeletePitMultiNodeTests.java | 42 +++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/server/src/test/java/org/opensearch/search/DeletePitMultiNodeTests.java b/server/src/test/java/org/opensearch/search/DeletePitMultiNodeTests.java index f6bd765ba4dc7..3d2b33a2a7baf 100644 --- a/server/src/test/java/org/opensearch/search/DeletePitMultiNodeTests.java +++ b/server/src/test/java/org/opensearch/search/DeletePitMultiNodeTests.java @@ -29,6 +29,7 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicBoolean; +import static org.hamcrest.Matchers.containsString; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; /** @@ -76,7 +77,48 @@ public void testDeletePit() throws Exception { deleteExecute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); deletePITResponse = deleteExecute.get(); assertTrue(deletePITResponse.isSucceeded()); + } + public void testDeletePitWithValidAndDeletedIds() throws Exception { + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + List pitIds = new ArrayList<>(); + pitIds.add(pitResponse.getId()); + + /** + * Delete Pit #1 + */ + DeletePitRequest deletePITRequest = new DeletePitRequest(pitIds); + ActionFuture deleteExecute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); + DeletePitResponse deletePITResponse = deleteExecute.get(); + assertTrue(deletePITResponse.isSucceeded()); + + execute = client().execute(CreatePitAction.INSTANCE, request); + pitResponse = execute.get(); + pitIds.add(pitResponse.getId()); + /** + * Delete PIT with both Ids #1 (which is deleted) and #2 (which is present) + */ + deletePITRequest = new DeletePitRequest(pitIds); + deleteExecute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); + deletePITResponse = deleteExecute.get(); + assertTrue(deletePITResponse.isSucceeded()); + } + + public void testDeletePitWithValidAndInvalidIds() throws Exception { + CreatePitRequest request = new CreatePitRequest(TimeValue.timeValueDays(1), true); + request.setIndices(new String[] { "index" }); + ActionFuture execute = client().execute(CreatePitAction.INSTANCE, request); + CreatePitResponse pitResponse = execute.get(); + List pitIds = new ArrayList<>(); + pitIds.add(pitResponse.getId()); + pitIds.add("nondecodableid"); + DeletePitRequest deletePITRequest = new DeletePitRequest(pitIds); + ActionFuture deleteExecute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); + Exception e = assertThrows(ExecutionException.class, () -> deleteExecute.get()); + assertThat(e.getMessage(), containsString("invalid id")); } public void testDeleteAllPits() throws Exception { From e48205338f4e1fa2bd266b73f3a1d7109d12ec11 Mon Sep 17 00:00:00 2001 From: Bharathwaj G Date: Tue, 24 May 2022 15:02:55 +0530 Subject: [PATCH 04/75] Addressing comments Signed-off-by: Bharathwaj G --- .../rest-api-spec/api/delete_all_pits.json | 2 +- .../rest-api-spec/api/delete_pit.json | 2 +- .../action/search/CreatePitController.java | 14 +++++- .../action/search/DeletePitAction.java | 2 +- .../action/search/DeletePitRequest.java | 29 +++-------- .../action/search/DeletePitResponse.java | 4 +- .../action/search/SearchTransportService.java | 49 +++++++++++-------- .../opensearch/action/search/SearchUtils.java | 32 +++++++----- .../search/TransportDeletePitAction.java | 28 ++++++++--- .../action/search/RestDeletePitAction.java | 5 +- .../org/opensearch/search/SearchService.java | 35 ++++++++++--- .../search/CreatePitControllerTests.java | 16 +++--- .../search/TransportDeletePitActionTests.java | 16 +++--- .../search/CreatePitSingleNodeTests.java | 3 +- 14 files changed, 141 insertions(+), 96 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_all_pits.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_all_pits.json index 0ee8d54dfcc59..5ff01aa746df9 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_all_pits.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_all_pits.json @@ -2,7 +2,7 @@ "delete_all_pits":{ "documentation":{ "url":"https://opensearch.org/docs/latest/opensearch/rest-api/point_in_time/", - "description":"Deletes all active point in time contexts." + "description":"Deletes all active point in time searches." }, "stability":"stable", "url":{ diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_pit.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_pit.json index 0f19e36fa4bc7..d674eb80e4722 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_pit.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_pit.json @@ -2,7 +2,7 @@ "delete_pit":{ "documentation":{ "url":"https://opensearch.org/docs/latest/opensearch/rest-api/point_in_time/", - "description":"Deletes one or more point in time contexts." + "description":"Deletes one or more point in time searches based on the IDs passed." }, "stability":"stable", "url":{ diff --git a/server/src/main/java/org/opensearch/action/search/CreatePitController.java b/server/src/main/java/org/opensearch/action/search/CreatePitController.java index 3aa7b60f18fc3..8463518162db9 100644 --- a/server/src/main/java/org/opensearch/action/search/CreatePitController.java +++ b/server/src/main/java/org/opensearch/action/search/CreatePitController.java @@ -28,9 +28,12 @@ import org.opensearch.tasks.Task; import org.opensearch.transport.Transport; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.function.BiFunction; @@ -260,7 +263,7 @@ private void cleanupContexts(Collection contexts) { @Override public void onResponse(Integer freed) { // log the number of freed contexts - this is invoke and forget call - logger.debug(() -> new ParameterizedMessage("Cleaned up {} contexts out of {}", freed, contexts.size())); + logger.debug(() -> new ParameterizedMessage("Cleared contexts in {} nodes out of {}", freed, contexts.size())); } @Override @@ -268,6 +271,13 @@ public void onFailure(Exception e) { logger.error("Cleaning up PIT contexts failed ", e); } }; - SearchUtils.deletePits(contexts, deleteListener, clusterService.state(), searchTransportService); + + Map> nodeToContextsMap = new HashMap<>(); + for (SearchContextIdForNode context : contexts) { + List contextIdsForNode = nodeToContextsMap.getOrDefault(context.getNode(), new ArrayList<>()); + contextIdsForNode.add(context); + nodeToContextsMap.put(context.getNode(), contextIdsForNode); + } + SearchUtils.deletePitContexts(nodeToContextsMap, deleteListener, clusterService.state(), searchTransportService); } } diff --git a/server/src/main/java/org/opensearch/action/search/DeletePitAction.java b/server/src/main/java/org/opensearch/action/search/DeletePitAction.java index 73932886121c5..f807a82301aba 100644 --- a/server/src/main/java/org/opensearch/action/search/DeletePitAction.java +++ b/server/src/main/java/org/opensearch/action/search/DeletePitAction.java @@ -12,7 +12,7 @@ import org.opensearch.action.ActionType; /** - * Action type for deleting PIT reader contexts + * Action type for deleting point in time searches */ public class DeletePitAction extends ActionType { diff --git a/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java b/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java index 26b49f53aadac..0cfc8e82fa13f 100644 --- a/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java +++ b/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java @@ -33,23 +33,19 @@ public class DeletePitRequest extends ActionRequest implements ToXContentObject /** * List of PIT IDs to be deleted , and use "_all" to delete all PIT reader contexts */ - private List pitIds; + private final List pitIds = new ArrayList<>(); public DeletePitRequest(StreamInput in) throws IOException { super(in); - pitIds = Arrays.asList(in.readStringArray()); + pitIds.addAll(Arrays.asList(in.readStringArray())); } public DeletePitRequest(String... pitIds) { - if (pitIds != null) { - this.pitIds = Arrays.asList(pitIds); - } + this.pitIds.addAll(Arrays.asList(pitIds)); } public DeletePitRequest(List pitIds) { - if (pitIds != null) { - this.pitIds = pitIds; - } + this.pitIds.addAll(pitIds); } public DeletePitRequest() {} @@ -58,10 +54,6 @@ public List getPitIds() { return pitIds; } - public void setPitIds(List pitIds) { - this.pitIds = pitIds; - } - @Override public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; @@ -81,13 +73,6 @@ public void writeTo(StreamOutput out) throws IOException { } } - public void addPitId(String pitId) { - if (pitIds == null) { - pitIds = new ArrayList<>(); - } - pitIds.add(pitId); - } - @Override public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { builder.startObject(); @@ -101,7 +86,7 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par } public void fromXContent(XContentParser parser) throws IOException { - pitIds = null; + pitIds.clear(); if (parser.nextToken() != XContentParser.Token.START_OBJECT) { throw new IllegalArgumentException("Malformed content, must start with an object"); } else { @@ -116,13 +101,13 @@ public void fromXContent(XContentParser parser) throws IOException { if (token.isValue() == false) { throw new IllegalArgumentException("pit_id array element should only contain pit_id"); } - addPitId(parser.text()); + pitIds.add(parser.text()); } } else { if (token.isValue() == false) { throw new IllegalArgumentException("pit_id element should only contain pit_id"); } - addPitId(parser.text()); + pitIds.add(parser.text()); } } else { throw new IllegalArgumentException( diff --git a/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java b/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java index 44fef162af623..66eeb56ddcb37 100644 --- a/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java +++ b/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java @@ -27,12 +27,12 @@ import static org.opensearch.rest.RestStatus.OK; /** - * Response class for delete pit flow which returns if the contexts are freed + * Response class for delete pits flow which returns if the contexts are freed */ public class DeletePitResponse extends ActionResponse implements StatusToXContentObject { /** - * This will be true if all PIT reader contexts are deleted. + * This will be true if PIT reader contexts are deleted ond also if contexts are not found. */ private final boolean succeeded; diff --git a/server/src/main/java/org/opensearch/action/search/SearchTransportService.java b/server/src/main/java/org/opensearch/action/search/SearchTransportService.java index a12ebe2837938..9db460f420174 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/opensearch/action/search/SearchTransportService.java @@ -71,7 +71,9 @@ import org.opensearch.transport.TransportService; import java.io.IOException; +import java.util.ArrayList; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.Objects; import java.util.function.BiFunction; @@ -88,7 +90,7 @@ public class SearchTransportService { public static final String FREE_PIT_CONTEXT_ACTION_NAME = "indices:data/read/search[free_context/pit]"; public static final String FREE_CONTEXT_ACTION_NAME = "indices:data/read/search[free_context]"; public static final String CLEAR_SCROLL_CONTEXTS_ACTION_NAME = "indices:data/read/search[clear_scroll_contexts]"; - public static final String FREE_ALL_PIT_CONTEXTS_ACTION_NAME = "indices:data/read/search[delete_pit_contexts]"; + public static final String FREE_ALL_PIT_CONTEXTS_ACTION_NAME = "indices:data/read/search[free_pit_contexts]"; public static final String DFS_ACTION_NAME = "indices:data/read/search[phase/dfs]"; public static final String QUERY_ACTION_NAME = "indices:data/read/search[phase/query]"; public static final String QUERY_ID_ACTION_NAME = "indices:data/read/search[phase/query/id]"; @@ -202,15 +204,15 @@ public void sendClearAllScrollContexts(Transport.Connection connection, final Ac ); } - public void sendFreePITContext( + public void sendFreePITContexts( Transport.Connection connection, - ShardSearchContextId contextId, + List contextIds, ActionListener listener ) { transportService.sendRequest( connection, FREE_PIT_CONTEXT_ACTION_NAME, - new PitFreeContextRequest(contextId), + new PitFreeContextsRequest(contextIds), TransportRequestOptions.EMPTY, new ActionListenerResponseHandler<>(listener, SearchFreeContextResponse::new) ); @@ -399,26 +401,37 @@ public ShardSearchContextId id() { /** * Request to free the PIT context based on id */ - static class PitFreeContextRequest extends TransportRequest { - private ShardSearchContextId contextId; + static class PitFreeContextsRequest extends TransportRequest { + private List contextIds; - PitFreeContextRequest(ShardSearchContextId contextId) { - this.contextId = Objects.requireNonNull(contextId); + PitFreeContextsRequest(List contextIds) { + this.contextIds = new ArrayList<>(); + this.contextIds.addAll(contextIds); } - PitFreeContextRequest(StreamInput in) throws IOException { + PitFreeContextsRequest(StreamInput in) throws IOException { super(in); - contextId = new ShardSearchContextId(in); + int size = in.readVInt(); + if (size > 0) { + this.contextIds = new ArrayList<>(); + for (int i = 0; i < size; i++) { + ShardSearchContextId contextId = new ShardSearchContextId(in); + contextIds.add(contextId); + } + } } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - contextId.writeTo(out); + out.writeVInt(contextIds.size()); + for (ShardSearchContextId contextId : contextIds) { + contextId.writeTo(out); + } } - public ShardSearchContextId id() { - return this.contextId; + public List getContextIds() { + return this.contextIds; } } @@ -510,10 +523,9 @@ public static void registerRequestHandler(TransportService transportService, Sea transportService.registerRequestHandler( FREE_PIT_CONTEXT_ACTION_NAME, ThreadPool.Names.SAME, - PitFreeContextRequest::new, + PitFreeContextsRequest::new, (request, channel, task) -> { - boolean freed = searchService.freeReaderContextIfFound(request.id()); - channel.sendResponse(new SearchFreeContextResponse(freed)); + channel.sendResponse(new SearchFreeContextResponse(searchService.freeReaderContextsIfFound(request.getContextIds()))); } ); TransportActionProxy.registerProxyAction(transportService, FREE_PIT_CONTEXT_ACTION_NAME, SearchFreeContextResponse::new); @@ -522,10 +534,7 @@ public static void registerRequestHandler(TransportService transportService, Sea FREE_ALL_PIT_CONTEXTS_ACTION_NAME, ThreadPool.Names.SAME, TransportRequest.Empty::new, - (request, channel, task) -> { - boolean freed = searchService.freeAllPitContexts(); - channel.sendResponse(new SearchFreeContextResponse(freed)); - } + (request, channel, task) -> { channel.sendResponse(new SearchFreeContextResponse(searchService.freeAllPitContexts())); } ); TransportActionProxy.registerProxyAction(transportService, FREE_ALL_PIT_CONTEXTS_ACTION_NAME, SearchFreeContextResponse::new); diff --git a/server/src/main/java/org/opensearch/action/search/SearchUtils.java b/server/src/main/java/org/opensearch/action/search/SearchUtils.java index 7d40978e0be43..c203ed12b1d6e 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchUtils.java +++ b/server/src/main/java/org/opensearch/action/search/SearchUtils.java @@ -17,10 +17,13 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Strings; +import org.opensearch.search.internal.ShardSearchContextId; import org.opensearch.transport.RemoteClusterService; import org.opensearch.transport.Transport; import java.util.Collection; +import java.util.List; +import java.util.Map; import java.util.Set; import java.util.function.BiFunction; import java.util.stream.Collectors; @@ -52,15 +55,17 @@ public static StepListener> getConnect } /** - * Delete list of pits, return success if all reader contexts are deleted ( or not found ). + * Delete list of pit contexts. Returns success only if each reader context is either deleted or not found. */ - public static void deletePits( - Collection contexts, + public static void deletePitContexts( + Map> nodeToContextsMap, ActionListener listener, ClusterState state, SearchTransportService searchTransportService ) { - final Set clusters = contexts.stream() + final Set clusters = nodeToContextsMap.values() + .stream() + .flatMap(Collection::stream) .filter(ctx -> Strings.isEmpty(ctx.getClusterAlias()) == false) .map(SearchContextIdForNode::getClusterAlias) .collect(Collectors.toSet()); @@ -75,23 +80,28 @@ public static void deletePits( listener, (l, result) -> l.onResponse(Math.toIntExact(result.stream().filter(r -> r).count())) ), - contexts.size() + nodeToContextsMap.size() ); - for (SearchContextIdForNode contextId : contexts) { - final DiscoveryNode node = nodeLookup.apply(contextId.getClusterAlias(), contextId.getNode()); + for (Map.Entry> entry : nodeToContextsMap.entrySet()) { + String clusterAlias = entry.getValue().get(0).getClusterAlias(); + final DiscoveryNode node = nodeLookup.apply(clusterAlias, entry.getValue().get(0).getNode()); if (node == null) { groupedListener.onFailure(new OpenSearchException("node not found")); } else { try { - final Transport.Connection connection = searchTransportService.getConnection(contextId.getClusterAlias(), node); - searchTransportService.sendFreePITContext( + final Transport.Connection connection = searchTransportService.getConnection(clusterAlias, node); + List contextIds = entry.getValue() + .stream() + .map(r -> r.getSearchContextId()) + .collect(Collectors.toList()); + searchTransportService.sendFreePITContexts( connection, - contextId.getSearchContextId(), + contextIds, ActionListener.wrap(r -> groupedListener.onResponse(r.isFreed()), e -> groupedListener.onResponse(false)) ); } catch (Exception e) { - logger.debug("Delete PIT failed ", e); + logger.error("Delete PIT failed ", e); groupedListener.onResponse(false); } } diff --git a/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java b/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java index 334029ca60cf3..2a6a9dede094b 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java @@ -25,10 +25,12 @@ import java.util.ArrayList; import java.util.Collection; +import java.util.HashMap; import java.util.List; +import java.util.Map; /** - * Transport action for deleting pit reader context - supports deleting list and all pit contexts + * Transport action for deleting point in time searches - supports deleting list and all point in time searches */ public class TransportDeletePitAction extends HandledTransportAction { private final NamedWriteableRegistry namedWriteableRegistry; @@ -70,23 +72,33 @@ protected void doExecute(Task task, DeletePitRequest request, ActionListener listener, DeletePitRequest request) { - List contexts = new ArrayList<>(); + Map> nodeToContextsMap = new HashMap<>(); for (String pitId : request.getPitIds()) { SearchContextId contextId = SearchContextId.decode(namedWriteableRegistry, pitId); - contexts.addAll(contextId.shards().values()); + for (SearchContextIdForNode contextIdForNode : contextId.shards().values()) { + List contexts = nodeToContextsMap.getOrDefault(contextIdForNode.getNode(), new ArrayList<>()); + contexts.add(contextIdForNode); + nodeToContextsMap.put(contextIdForNode.getNode(), contexts); + } } ActionListener deleteListener = ActionListener.wrap(r -> { - if (r == contexts.size()) { + if (r == nodeToContextsMap.size()) { listener.onResponse(new DeletePitResponse(true)); } else { - logger.debug(() -> new ParameterizedMessage("Delete PITs failed. " + "Cleared {} contexts out of {}", r, contexts.size())); + logger.debug( + () -> new ParameterizedMessage( + "Delete PITs failed. Cleared contexts in {} nodes out of {}", + r, + nodeToContextsMap.size() + ) + ); listener.onResponse(new DeletePitResponse(false)); } }, e -> { - logger.debug("Delete PITs failed ", e); + logger.error("Delete PITs failed ", e); listener.onResponse(new DeletePitResponse(false)); }); - SearchUtils.deletePits(contexts, deleteListener, clusterService.state(), searchTransportService); + SearchUtils.deletePitContexts(nodeToContextsMap, deleteListener, clusterService.state(), searchTransportService); } /** @@ -104,7 +116,7 @@ public void onResponse(final Collection { if (xContentParser != null) { try { diff --git a/server/src/main/java/org/opensearch/search/SearchService.java b/server/src/main/java/org/opensearch/search/SearchService.java index b3b22368bb665..c1c9f685f500a 100644 --- a/server/src/main/java/org/opensearch/search/SearchService.java +++ b/server/src/main/java/org/opensearch/search/SearchService.java @@ -36,7 +36,6 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.TopDocs; -import org.apache.lucene.util.SetOnce; import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; @@ -1022,7 +1021,7 @@ public boolean freeReaderContext(ShardSearchContextId contextId) { } /** - * Free reader context if found otherwise return false + * Free reader context if found */ public boolean freeReaderContextIfFound(ShardSearchContextId contextId) { try { @@ -1037,21 +1036,41 @@ public boolean freeReaderContextIfFound(ShardSearchContextId contextId) { return true; } + /** + * Free reader contexts if found + */ + public boolean freeReaderContextsIfFound(List contextIds) { + boolean success = true; + for (ShardSearchContextId contextId : contextIds) { + try { + if (getReaderContext(contextId) != null) { + try (ReaderContext context = removeReaderContext(contextId.getId())) { + boolean freed = context != null; + if (!freed) { + success = false; + } + } + } + } catch (SearchContextMissingException e) { + // do nothing in case of context not found case + } + } + return success; + } + /** * Free all active pit contexts */ public boolean freeAllPitContexts() { - final SetOnce isFreed = new SetOnce<>(); + boolean success = true; for (ReaderContext readerContext : activeReaders.values()) { if (readerContext instanceof PitReaderContext) { - final boolean succeeded = freeReaderContextIfFound(readerContext.id()); - if (!succeeded) { - isFreed.trySet(false); + if (!freeReaderContextIfFound(readerContext.id())) { + success = false; } } } - isFreed.trySet(true); - return isFreed.get(); + return success; } /** diff --git a/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java b/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java index 528c8e991c3e1..dacc4bf2bca05 100644 --- a/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java +++ b/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java @@ -197,9 +197,9 @@ public void updatePitContext( * Test if cleanup request is called */ @Override - public void sendFreePITContext( + public void sendFreePITContexts( Transport.Connection connection, - ShardSearchContextId contextId, + List contextIds, ActionListener listener ) { deleteNodesInvoked.add(connection.getNode()); @@ -292,9 +292,9 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod } @Override - public void sendFreePITContext( + public void sendFreePITContexts( Transport.Connection connection, - ShardSearchContextId contextId, + List contextIds, ActionListener listener ) { deleteNodesInvoked.add(connection.getNode()); @@ -389,9 +389,9 @@ public void updatePitContext( } @Override - public void sendFreePITContext( + public void sendFreePITContexts( Transport.Connection connection, - ShardSearchContextId contextId, + List contextIds, ActionListener listener ) { deleteNodesInvoked.add(connection.getNode()); @@ -479,9 +479,9 @@ public void updatePitContext( } @Override - public void sendFreePITContext( + public void sendFreePITContexts( Transport.Connection connection, - ShardSearchContextId contextId, + List contextIds, ActionListener listener ) { deleteNodesInvoked.add(connection.getNode()); diff --git a/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java b/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java index 0a0d248a874b2..32f246c56f715 100644 --- a/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java @@ -160,9 +160,9 @@ public void testDeletePitSuccess() throws InterruptedException, ExecutionExcepti SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { @Override - public void sendFreePITContext( + public void sendFreePITContexts( Transport.Connection connection, - ShardSearchContextId contextId, + List contextIds, ActionListener listener ) { deleteNodesInvoked.add(connection.getNode()); @@ -274,9 +274,9 @@ public void testDeletePitWhenNodeIsDown() throws InterruptedException, Execution SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { @Override - public void sendFreePITContext( + public void sendFreePITContexts( Transport.Connection connection, - ShardSearchContextId contextId, + List contextIds, ActionListener listener ) { deleteNodesInvoked.add(connection.getNode()); @@ -337,9 +337,9 @@ public void testDeletePitWhenAllNodesAreDown() throws InterruptedException, Exec transportService.acceptIncomingRequests(); SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { @Override - public void sendFreePITContext( + public void sendFreePITContexts( Transport.Connection connection, - ShardSearchContextId contextId, + List contextIds, ActionListener listener ) { deleteNodesInvoked.add(connection.getNode()); @@ -397,9 +397,9 @@ public void testDeletePitFailure() throws InterruptedException, ExecutionExcepti SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { @Override - public void sendFreePITContext( + public void sendFreePITContexts( Transport.Connection connection, - ShardSearchContextId contextId, + List contextId, ActionListener listener ) { deleteNodesInvoked.add(connection.getNode()); diff --git a/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java b/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java index d67a9595197cd..eb6d93e0f440a 100644 --- a/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java +++ b/server/src/test/java/org/opensearch/search/CreatePitSingleNodeTests.java @@ -280,9 +280,8 @@ public void testSearchWithPitKeepAliveExtension() throws ExecutionException, Int .setPointInTime(new PointInTimeBuilder(pitResponse.getId()).setKeepAlive(TimeValue.timeValueSeconds(3))) .get(); client().prepareIndex("index").setId("2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - Thread.sleep(2500); assertEquals(2, service.getActiveContexts()); - Thread.sleep(1000); + Thread.sleep(3500); assertEquals(0, service.getActiveContexts()); SearchPhaseExecutionException ex = expectThrows(SearchPhaseExecutionException.class, () -> { client().prepareSearch("index") From 233b15d1b4e42d838f2529adbb3ca5d568a45edc Mon Sep 17 00:00:00 2001 From: Bharathwaj G Date: Tue, 24 May 2022 23:19:28 +0530 Subject: [PATCH 05/75] Addressing comments Signed-off-by: Bharathwaj G --- .../client/RestHighLevelClient.java | 8 ++--- .../action/search/CreatePitController.java | 1 - .../action/search/DeletePitRequest.java | 2 +- .../action/search/DeletePitResponse.java | 2 +- .../opensearch/action/search/SearchUtils.java | 5 +-- .../org/opensearch/search/SearchService.java | 31 +++---------------- .../opensearch/search/SearchServiceTests.java | 7 +++-- 7 files changed, 17 insertions(+), 39 deletions(-) diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java index 4cb3ff999f793..0c4d41b595b5c 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java @@ -1301,7 +1301,7 @@ public final Cancellable createPitAsync( } /** - * Delete PIT context using delete PIT API + * Delete point in time searches using delete PIT API * * @param deletePitRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized @@ -1318,7 +1318,7 @@ public final DeletePitResponse deletePit(DeletePitRequest deletePitRequest, Requ } /** - * Asynchronously Delete PIT context using delete PIT API + * Asynchronously Delete point in time searches using delete PIT API * * @param deletePitRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized @@ -1341,7 +1341,7 @@ public final Cancellable deletePitAsync( } /** - * Delete all PIT contexts using delete all PITs API + * Delete all point in time searches using delete all PITs API * * @param deletePitRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized @@ -1358,7 +1358,7 @@ public final DeletePitResponse deleteAllPits(DeletePitRequest deletePitRequest, } /** - * Asynchronously Delete all PIT contexts using delete all PITs API + * Asynchronously Delete all point in time searches using delete all PITs API * * @param deletePitRequest the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized diff --git a/server/src/main/java/org/opensearch/action/search/CreatePitController.java b/server/src/main/java/org/opensearch/action/search/CreatePitController.java index 8463518162db9..0c425b16a06b0 100644 --- a/server/src/main/java/org/opensearch/action/search/CreatePitController.java +++ b/server/src/main/java/org/opensearch/action/search/CreatePitController.java @@ -271,7 +271,6 @@ public void onFailure(Exception e) { logger.error("Cleaning up PIT contexts failed ", e); } }; - Map> nodeToContextsMap = new HashMap<>(); for (SearchContextIdForNode context : contexts) { List contextIdsForNode = nodeToContextsMap.getOrDefault(context.getNode(), new ArrayList<>()); diff --git a/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java b/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java index 0cfc8e82fa13f..945fcfd17eb6c 100644 --- a/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java +++ b/server/src/main/java/org/opensearch/action/search/DeletePitRequest.java @@ -26,7 +26,7 @@ import static org.opensearch.action.ValidateActions.addValidationError; /** - * Request to delete one or more PIT contexts based on IDs. + * Request to delete one or more PIT search contexts based on IDs. */ public class DeletePitRequest extends ActionRequest implements ToXContentObject { diff --git a/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java b/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java index 66eeb56ddcb37..7151f05d6683d 100644 --- a/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java +++ b/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java @@ -27,7 +27,7 @@ import static org.opensearch.rest.RestStatus.OK; /** - * Response class for delete pits flow which returns if the contexts are freed + * Response class for delete pits flow which clears the point in time search contexts */ public class DeletePitResponse extends ActionResponse implements StatusToXContentObject { diff --git a/server/src/main/java/org/opensearch/action/search/SearchUtils.java b/server/src/main/java/org/opensearch/action/search/SearchUtils.java index c203ed12b1d6e..accf91f5ded86 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchUtils.java +++ b/server/src/main/java/org/opensearch/action/search/SearchUtils.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.StepListener; @@ -87,7 +88,7 @@ public static void deletePitContexts( String clusterAlias = entry.getValue().get(0).getClusterAlias(); final DiscoveryNode node = nodeLookup.apply(clusterAlias, entry.getValue().get(0).getNode()); if (node == null) { - groupedListener.onFailure(new OpenSearchException("node not found")); + groupedListener.onFailure(new OpenSearchException("node [" + entry.getValue().get(0).getNode() + "] not found")); } else { try { final Transport.Connection connection = searchTransportService.getConnection(clusterAlias, node); @@ -101,7 +102,7 @@ public static void deletePitContexts( ActionListener.wrap(r -> groupedListener.onResponse(r.isFreed()), e -> groupedListener.onResponse(false)) ); } catch (Exception e) { - logger.error("Delete PIT failed ", e); + logger.error(() -> new ParameterizedMessage("Delete PITs failed on node [{}]", node.getName()), e); groupedListener.onResponse(false); } } diff --git a/server/src/main/java/org/opensearch/search/SearchService.java b/server/src/main/java/org/opensearch/search/SearchService.java index c1c9f685f500a..f7966d7a4413d 100644 --- a/server/src/main/java/org/opensearch/search/SearchService.java +++ b/server/src/main/java/org/opensearch/search/SearchService.java @@ -138,12 +138,7 @@ import org.opensearch.transport.TransportRequest; import java.io.IOException; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Set; +import java.util.*; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicInteger; @@ -1020,22 +1015,6 @@ public boolean freeReaderContext(ShardSearchContextId contextId) { return false; } - /** - * Free reader context if found - */ - public boolean freeReaderContextIfFound(ShardSearchContextId contextId) { - try { - if (getReaderContext(contextId) != null) { - try (ReaderContext context = removeReaderContext(contextId.getId())) { - return context != null; - } - } - } catch (SearchContextMissingException e) { - return true; - } - return true; - } - /** * Free reader contexts if found */ @@ -1062,15 +1041,13 @@ public boolean freeReaderContextsIfFound(List contextIds) * Free all active pit contexts */ public boolean freeAllPitContexts() { - boolean success = true; + List contextIds = new ArrayList<>(); for (ReaderContext readerContext : activeReaders.values()) { if (readerContext instanceof PitReaderContext) { - if (!freeReaderContextIfFound(readerContext.id())) { - success = false; - } + contextIds.add(readerContext.id()); } } - return success; + return freeReaderContextsIfFound(contextIds); } /** diff --git a/server/src/test/java/org/opensearch/search/SearchServiceTests.java b/server/src/test/java/org/opensearch/search/SearchServiceTests.java index f2e51b7e984f7..c96671166c269 100644 --- a/server/src/test/java/org/opensearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/opensearch/search/SearchServiceTests.java @@ -1419,11 +1419,12 @@ public void testDeletePitReaderContext() { SearchService searchService = getInstanceFromNode(SearchService.class); PlainActionFuture future = new PlainActionFuture<>(); searchService.createPitReaderContext(new ShardId(resolveIndex("index"), 0), TimeValue.timeValueMinutes(between(1, 10)), future); - future.actionGet(); + List contextIds = new ArrayList<>(); + contextIds.add(future.actionGet()); assertThat(searchService.getActiveContexts(), equalTo(1)); - assertTrue(searchService.freeReaderContextIfFound(future.actionGet())); + assertTrue(searchService.freeReaderContextsIfFound(contextIds)); // assert true for reader context not found - assertTrue(searchService.freeReaderContextIfFound(future.actionGet())); + assertTrue(searchService.freeReaderContextsIfFound(contextIds)); // adding this assert to showcase behavior difference assertFalse(searchService.freeReaderContext(future.actionGet())); } From 28d8605b23721db8ee866286e3e5b4f86bcde337 Mon Sep 17 00:00:00 2001 From: Bharathwaj G Date: Fri, 17 Jun 2022 18:36:33 +0530 Subject: [PATCH 06/75] Addressing comments - changing delete pit response structure Signed-off-by: Bharathwaj G --- .../java/org/opensearch/client/PitIT.java | 8 +- .../java/org/opensearch/client/SearchIT.java | 3 +- .../action/search/CreatePitController.java | 24 +++-- .../action/search/DeletePitInfo.java | 82 +++++++++++++++++ .../action/search/DeletePitResponse.java | 71 +++++++++------ .../search/PitSearchContextIdForNode.java | 50 +++++++++++ .../action/search/SearchContextIdForNode.java | 2 +- .../action/search/SearchTransportService.java | 30 +++---- .../opensearch/action/search/SearchUtils.java | 85 ++++++++++++------ .../search/TransportDeletePitAction.java | 46 ++-------- .../org/opensearch/search/SearchService.java | 48 ++++++---- .../search/CreatePitControllerTests.java | 27 +++--- .../search/TransportDeletePitActionTests.java | 90 ++++++++++--------- .../search/DeletePitMultiNodeTests.java | 67 +++++++++++--- .../opensearch/search/SearchServiceTests.java | 25 +++--- 15 files changed, 440 insertions(+), 218 deletions(-) create mode 100644 server/src/main/java/org/opensearch/action/search/DeletePitInfo.java create mode 100644 server/src/main/java/org/opensearch/action/search/PitSearchContextIdForNode.java diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java index 13b821eb7e44c..6f4463d6b4c4c 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java @@ -13,6 +13,7 @@ import org.junit.Before; import org.opensearch.action.search.CreatePitRequest; import org.opensearch.action.search.CreatePitResponse; +import org.opensearch.action.search.DeletePitInfo; import org.opensearch.action.search.DeletePitRequest; import org.opensearch.action.search.DeletePitResponse; import org.opensearch.common.unit.TimeValue; @@ -59,7 +60,8 @@ public void testCreatePit() throws IOException { pitIds.add(pitResponse.getId()); DeletePitRequest deletePitRequest = new DeletePitRequest(pitIds); DeletePitResponse deletePitResponse = execute(deletePitRequest, highLevelClient()::deletePit, highLevelClient()::deletePitAsync); - assertTrue(deletePitResponse.isSucceeded()); + assertTrue(deletePitResponse.getDeletePitResults().get(0).isSucceeded()); + assertTrue(deletePitResponse.getDeletePitResults().get(0).getPitId().equals(pitResponse.getId())); } public void testDeleteAllPits() throws IOException { @@ -76,6 +78,8 @@ public void testDeleteAllPits() throws IOException { highLevelClient()::deleteAllPits, highLevelClient()::deleteAllPitsAsync ); - assertTrue(deletePitResponse.isSucceeded()); + for (DeletePitInfo deletePitInfo : deletePitResponse.getDeletePitResults()) { + assertTrue(deletePitInfo.isSucceeded()); + } } } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java index e0b7ed81d405c..e58f2dde380de 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java @@ -804,7 +804,8 @@ public void testSearchWithPit() throws Exception { highLevelClient()::deletePit, highLevelClient()::deletePitAsync ); - assertTrue(deletePitResponse.isSucceeded()); + assertTrue(deletePitResponse.getDeletePitResults().get(0).isSucceeded()); + assertTrue(deletePitResponse.getDeletePitResults().get(0).getPitId().equals(pitResponse.getId())); } } diff --git a/server/src/main/java/org/opensearch/action/search/CreatePitController.java b/server/src/main/java/org/opensearch/action/search/CreatePitController.java index 0c425b16a06b0..6eb1510bab2f7 100644 --- a/server/src/main/java/org/opensearch/action/search/CreatePitController.java +++ b/server/src/main/java/org/opensearch/action/search/CreatePitController.java @@ -249,7 +249,7 @@ public void onResponse(final Collection responses) { @Override public void onFailure(final Exception e) { - cleanupContexts(contexts); + cleanupContexts(contexts, createPITResponse.getId()); updatePitIdListener.onFailure(e); } }, size); @@ -258,12 +258,18 @@ public void onFailure(final Exception e) { /** * Cleanup all created PIT contexts in case of failure */ - private void cleanupContexts(Collection contexts) { - ActionListener deleteListener = new ActionListener<>() { + private void cleanupContexts(Collection contexts, String pitId) { + ActionListener deleteListener = new ActionListener<>() { @Override - public void onResponse(Integer freed) { - // log the number of freed contexts - this is invoke and forget call - logger.debug(() -> new ParameterizedMessage("Cleared contexts in {} nodes out of {}", freed, contexts.size())); + public void onResponse(DeletePitResponse response) { + // this is invoke and forget call + for (DeletePitInfo deletePitInfo : response.getDeletePitResults()) { + if (!deletePitInfo.isSucceeded()) { + logger.debug(() -> new ParameterizedMessage("Failed to delete PIT ID {}", deletePitInfo.getPitId())); + } else { + logger.debug(() -> new ParameterizedMessage("Deleted PIT with ID {}", deletePitInfo.getPitId())); + } + } } @Override @@ -271,10 +277,10 @@ public void onFailure(Exception e) { logger.error("Cleaning up PIT contexts failed ", e); } }; - Map> nodeToContextsMap = new HashMap<>(); + Map> nodeToContextsMap = new HashMap<>(); for (SearchContextIdForNode context : contexts) { - List contextIdsForNode = nodeToContextsMap.getOrDefault(context.getNode(), new ArrayList<>()); - contextIdsForNode.add(context); + List contextIdsForNode = nodeToContextsMap.getOrDefault(context.getNode(), new ArrayList<>()); + contextIdsForNode.add(new PitSearchContextIdForNode(pitId, context)); nodeToContextsMap.put(context.getNode(), contextIdsForNode); } SearchUtils.deletePitContexts(nodeToContextsMap, deleteListener, clusterService.state(), searchTransportService); diff --git a/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java b/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java new file mode 100644 index 0000000000000..25dd48b208a48 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java @@ -0,0 +1,82 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.common.ParseField; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.Writeable; +import org.opensearch.common.xcontent.ConstructingObjectParser; +import org.opensearch.common.xcontent.ToXContent; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.transport.TransportResponse; + +import java.io.IOException; + +import static org.opensearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * This class captures if deletion of pit is successful along with pit id + */ +public class DeletePitInfo extends TransportResponse implements Writeable, ToXContent { + /** + * This will be true if PIT reader contexts are deleted ond also if contexts are not found. + */ + private final boolean succeeded; + + private final String pitId; + + public DeletePitInfo(boolean succeeded, String pitId) { + this.succeeded = succeeded; + this.pitId = pitId; + } + + public DeletePitInfo(StreamInput in) throws IOException { + succeeded = in.readBoolean(); + pitId = in.readString(); + + } + + public boolean isSucceeded() { + return succeeded; + } + + public String getPitId() { + return pitId; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeBoolean(succeeded); + out.writeString(pitId); + } + + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "delete_pit_info", + true, + args -> new DeletePitInfo((boolean) args[0], (String) args[1]) + ); + + static { + PARSER.declareBoolean(constructorArg(), new ParseField("succeeded")); + PARSER.declareString(constructorArg(), new ParseField("pitId")); + } + + private static final ParseField SUCCEEDED = new ParseField("succeeded"); + private static final ParseField PIT_ID = new ParseField("pitId"); + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(SUCCEEDED.getPreferredName(), succeeded); + builder.field(PIT_ID.getPreferredName(), pitId); + builder.endObject(); + return builder; + } +} diff --git a/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java b/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java index 7151f05d6683d..cc377d99e6d2e 100644 --- a/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java +++ b/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java @@ -13,7 +13,6 @@ import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.xcontent.ConstructingObjectParser; -import org.opensearch.common.xcontent.ObjectParser; import org.opensearch.common.xcontent.StatusToXContentObject; import org.opensearch.common.xcontent.ToXContent; import org.opensearch.common.xcontent.XContentBuilder; @@ -21,6 +20,8 @@ import org.opensearch.rest.RestStatus; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import static org.opensearch.common.xcontent.ConstructingObjectParser.constructorArg; import static org.opensearch.rest.RestStatus.NOT_FOUND; @@ -31,61 +32,73 @@ */ public class DeletePitResponse extends ActionResponse implements StatusToXContentObject { - /** - * This will be true if PIT reader contexts are deleted ond also if contexts are not found. - */ - private final boolean succeeded; + private final List deletePitResults; - public DeletePitResponse(boolean succeeded) { - this.succeeded = succeeded; + public DeletePitResponse(List deletePitResults) { + this.deletePitResults = deletePitResults; } public DeletePitResponse(StreamInput in) throws IOException { super(in); - succeeded = in.readBoolean(); + int size = in.readVInt(); + deletePitResults = new ArrayList<>(); + for (int i = 0; i < size; i++) { + deletePitResults.add(new DeletePitInfo(in)); + } + + } + + public List getDeletePitResults() { + return deletePitResults; } /** * @return Whether the attempt to delete PIT was successful. */ - public boolean isSucceeded() { - return succeeded; - } @Override public RestStatus status() { - return succeeded ? OK : NOT_FOUND; + for (DeletePitInfo deletePitResult : deletePitResults) { + if (!deletePitResult.isSucceeded()) return NOT_FOUND; + } + return OK; } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeBoolean(succeeded); - } - - private static final ParseField SUCCEEDED = new ParseField("succeeded"); - - private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( - "delete_pit", - true, - a -> new DeletePitResponse((boolean) a[0]) - ); - static { - PARSER.declareField(constructorArg(), (parser, context) -> parser.booleanValue(), SUCCEEDED, ObjectParser.ValueType.BOOLEAN); + out.writeVInt(deletePitResults.size()); + for (DeletePitInfo deletePitResult : deletePitResults) { + deletePitResult.writeTo(out); + } } @Override public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { builder.startObject(); - builder.field(SUCCEEDED.getPreferredName(), succeeded); + builder.startArray("pits"); + for (DeletePitInfo response : deletePitResults) { + response.toXContent(builder, params); + } + builder.endArray(); builder.endObject(); return builder; } - /** - * Parse the delete PIT response body into a new {@link DeletePitResponse} object - */ + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "delete_pit_response", + true, + (Object[] parsedObjects) -> { + @SuppressWarnings("unchecked") + List deletePitInfoList = (List) parsedObjects[0]; + return new DeletePitResponse(deletePitInfoList); + } + ); + static { + PARSER.declareObjectArray(constructorArg(), DeletePitInfo.PARSER, new ParseField("pits")); + } + public static DeletePitResponse fromXContent(XContentParser parser) throws IOException { - return PARSER.apply(parser, null); + return PARSER.parse(parser, null); } } diff --git a/server/src/main/java/org/opensearch/action/search/PitSearchContextIdForNode.java b/server/src/main/java/org/opensearch/action/search/PitSearchContextIdForNode.java new file mode 100644 index 0000000000000..577a559beb8f9 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/search/PitSearchContextIdForNode.java @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.search; + +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.Writeable; + +import java.io.IOException; + +/** + * Pit ID along with Id for a search context per node. + * + * @opensearch.internal + */ +public class PitSearchContextIdForNode implements Writeable { + + private final String pitId; + private final SearchContextIdForNode searchContextIdForNode; + + public PitSearchContextIdForNode(String pitId, SearchContextIdForNode searchContextIdForNode) { + this.pitId = pitId; + this.searchContextIdForNode = searchContextIdForNode; + } + + PitSearchContextIdForNode(StreamInput in) throws IOException { + this.pitId = in.readString(); + this.searchContextIdForNode = new SearchContextIdForNode(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(pitId); + searchContextIdForNode.writeTo(out); + } + + public String getPitId() { + return pitId; + } + + public SearchContextIdForNode getSearchContextIdForNode() { + return searchContextIdForNode; + } +} diff --git a/server/src/main/java/org/opensearch/action/search/SearchContextIdForNode.java b/server/src/main/java/org/opensearch/action/search/SearchContextIdForNode.java index 8f16a6e3ee226..7f218a3b1a17e 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchContextIdForNode.java +++ b/server/src/main/java/org/opensearch/action/search/SearchContextIdForNode.java @@ -50,7 +50,7 @@ public final class SearchContextIdForNode implements Writeable { private final ShardSearchContextId searchContextId; private final String clusterAlias; - SearchContextIdForNode(@Nullable String clusterAlias, String node, ShardSearchContextId searchContextId) { + public SearchContextIdForNode(@Nullable String clusterAlias, String node, ShardSearchContextId searchContextId) { this.node = node; this.clusterAlias = clusterAlias; this.searchContextId = searchContextId; diff --git a/server/src/main/java/org/opensearch/action/search/SearchTransportService.java b/server/src/main/java/org/opensearch/action/search/SearchTransportService.java index 9db460f420174..0bd21b8c7d4e8 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/opensearch/action/search/SearchTransportService.java @@ -206,25 +206,25 @@ public void sendClearAllScrollContexts(Transport.Connection connection, final Ac public void sendFreePITContexts( Transport.Connection connection, - List contextIds, - ActionListener listener + List contextIds, + ActionListener listener ) { transportService.sendRequest( connection, FREE_PIT_CONTEXT_ACTION_NAME, new PitFreeContextsRequest(contextIds), TransportRequestOptions.EMPTY, - new ActionListenerResponseHandler<>(listener, SearchFreeContextResponse::new) + new ActionListenerResponseHandler<>(listener, DeletePitResponse::new) ); } - public void sendFreeAllPitContexts(Transport.Connection connection, final ActionListener listener) { + public void sendFreeAllPitContexts(Transport.Connection connection, final ActionListener listener) { transportService.sendRequest( connection, FREE_ALL_PIT_CONTEXTS_ACTION_NAME, TransportRequest.Empty.INSTANCE, TransportRequestOptions.EMPTY, - new ActionListenerResponseHandler<>(listener, SearchFreeContextResponse::new) + new ActionListenerResponseHandler<>(listener, DeletePitResponse::new) ); } @@ -402,9 +402,9 @@ public ShardSearchContextId id() { * Request to free the PIT context based on id */ static class PitFreeContextsRequest extends TransportRequest { - private List contextIds; + private List contextIds; - PitFreeContextsRequest(List contextIds) { + PitFreeContextsRequest(List contextIds) { this.contextIds = new ArrayList<>(); this.contextIds.addAll(contextIds); } @@ -415,7 +415,7 @@ static class PitFreeContextsRequest extends TransportRequest { if (size > 0) { this.contextIds = new ArrayList<>(); for (int i = 0; i < size; i++) { - ShardSearchContextId contextId = new ShardSearchContextId(in); + PitSearchContextIdForNode contextId = new PitSearchContextIdForNode(in); contextIds.add(contextId); } } @@ -425,12 +425,12 @@ static class PitFreeContextsRequest extends TransportRequest { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeVInt(contextIds.size()); - for (ShardSearchContextId contextId : contextIds) { + for (PitSearchContextIdForNode contextId : contextIds) { contextId.writeTo(out); } } - public List getContextIds() { + public List getContextIds() { return this.contextIds; } } @@ -524,19 +524,17 @@ public static void registerRequestHandler(TransportService transportService, Sea FREE_PIT_CONTEXT_ACTION_NAME, ThreadPool.Names.SAME, PitFreeContextsRequest::new, - (request, channel, task) -> { - channel.sendResponse(new SearchFreeContextResponse(searchService.freeReaderContextsIfFound(request.getContextIds()))); - } + (request, channel, task) -> { channel.sendResponse(searchService.freeReaderContextsIfFound(request.getContextIds())); } ); - TransportActionProxy.registerProxyAction(transportService, FREE_PIT_CONTEXT_ACTION_NAME, SearchFreeContextResponse::new); + TransportActionProxy.registerProxyAction(transportService, FREE_PIT_CONTEXT_ACTION_NAME, DeletePitResponse::new); transportService.registerRequestHandler( FREE_ALL_PIT_CONTEXTS_ACTION_NAME, ThreadPool.Names.SAME, TransportRequest.Empty::new, - (request, channel, task) -> { channel.sendResponse(new SearchFreeContextResponse(searchService.freeAllPitContexts())); } + (request, channel, task) -> { channel.sendResponse(searchService.freeAllPitContexts()); } ); - TransportActionProxy.registerProxyAction(transportService, FREE_ALL_PIT_CONTEXTS_ACTION_NAME, SearchFreeContextResponse::new); + TransportActionProxy.registerProxyAction(transportService, FREE_ALL_PIT_CONTEXTS_ACTION_NAME, DeletePitResponse::new); transportService.registerRequestHandler( FREE_CONTEXT_ACTION_NAME, diff --git a/server/src/main/java/org/opensearch/action/search/SearchUtils.java b/server/src/main/java/org/opensearch/action/search/SearchUtils.java index accf91f5ded86..c6abf16a0ffd1 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchUtils.java +++ b/server/src/main/java/org/opensearch/action/search/SearchUtils.java @@ -11,18 +11,18 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.StepListener; import org.opensearch.action.support.GroupedActionListener; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.Strings; -import org.opensearch.search.internal.ShardSearchContextId; import org.opensearch.transport.RemoteClusterService; import org.opensearch.transport.Transport; +import java.util.ArrayList; import java.util.Collection; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; @@ -59,16 +59,16 @@ public static StepListener> getConnect * Delete list of pit contexts. Returns success only if each reader context is either deleted or not found. */ public static void deletePitContexts( - Map> nodeToContextsMap, - ActionListener listener, + Map> nodeToContextsMap, + ActionListener listener, ClusterState state, SearchTransportService searchTransportService ) { final Set clusters = nodeToContextsMap.values() .stream() .flatMap(Collection::stream) - .filter(ctx -> Strings.isEmpty(ctx.getClusterAlias()) == false) - .map(SearchContextIdForNode::getClusterAlias) + .filter(ctx -> Strings.isEmpty(ctx.getSearchContextIdForNode().getClusterAlias()) == false) + .map(c -> c.getSearchContextIdForNode().getClusterAlias()) .collect(Collectors.toSet()); StepListener> lookupListener = getConnectionLookupListener( searchTransportService.getRemoteClusterService(), @@ -76,37 +76,72 @@ public static void deletePitContexts( clusters ); lookupListener.whenComplete(nodeLookup -> { - final GroupedActionListener groupedListener = new GroupedActionListener<>( - ActionListener.delegateFailure( - listener, - (l, result) -> l.onResponse(Math.toIntExact(result.stream().filter(r -> r).count())) - ), + final GroupedActionListener groupedListener = getDeletePitGroupedListener( + listener, nodeToContextsMap.size() ); - for (Map.Entry> entry : nodeToContextsMap.entrySet()) { - String clusterAlias = entry.getValue().get(0).getClusterAlias(); - final DiscoveryNode node = nodeLookup.apply(clusterAlias, entry.getValue().get(0).getNode()); + for (Map.Entry> entry : nodeToContextsMap.entrySet()) { + String clusterAlias = entry.getValue().get(0).getSearchContextIdForNode().getClusterAlias(); + final DiscoveryNode node = nodeLookup.apply(clusterAlias, entry.getValue().get(0).getSearchContextIdForNode().getNode()); if (node == null) { - groupedListener.onFailure(new OpenSearchException("node [" + entry.getValue().get(0).getNode() + "] not found")); + logger.error( + () -> new ParameterizedMessage("node [{}] not found", entry.getValue().get(0).getSearchContextIdForNode().getNode()) + ); + List deletePitInfos = new ArrayList<>(); + for (PitSearchContextIdForNode pitSearchContextIdForNode : entry.getValue()) { + deletePitInfos.add(new DeletePitInfo(false, pitSearchContextIdForNode.getPitId())); + } + groupedListener.onResponse(new DeletePitResponse(deletePitInfos)); } else { try { final Transport.Connection connection = searchTransportService.getConnection(clusterAlias, node); - List contextIds = entry.getValue() - .stream() - .map(r -> r.getSearchContextId()) - .collect(Collectors.toList()); - searchTransportService.sendFreePITContexts( - connection, - contextIds, - ActionListener.wrap(r -> groupedListener.onResponse(r.isFreed()), e -> groupedListener.onResponse(false)) - ); + searchTransportService.sendFreePITContexts(connection, entry.getValue(), groupedListener); } catch (Exception e) { logger.error(() -> new ParameterizedMessage("Delete PITs failed on node [{}]", node.getName()), e); - groupedListener.onResponse(false); + List deletePitInfos = new ArrayList<>(); + for (PitSearchContextIdForNode pitSearchContextIdForNode : entry.getValue()) { + deletePitInfos.add(new DeletePitInfo(false, pitSearchContextIdForNode.getPitId())); + } + groupedListener.onResponse(new DeletePitResponse(deletePitInfos)); } } } }, listener::onFailure); } + + public static GroupedActionListener getDeletePitGroupedListener( + ActionListener listener, + int size + ) { + return new GroupedActionListener<>(new ActionListener<>() { + @Override + public void onResponse(final Collection responses) { + Map pitIdToSucceededMap = new HashMap<>(); + for (DeletePitResponse response : responses) { + for (DeletePitInfo deletePitInfo : response.getDeletePitResults()) { + if (!pitIdToSucceededMap.containsKey(deletePitInfo.getPitId())) { + pitIdToSucceededMap.put(deletePitInfo.getPitId(), deletePitInfo.isSucceeded()); + } + if (!deletePitInfo.isSucceeded()) { + logger.debug(() -> new ParameterizedMessage("Deleting PIT with ID {} failed ", deletePitInfo.getPitId())); + pitIdToSucceededMap.put(deletePitInfo.getPitId(), deletePitInfo.isSucceeded()); + } + } + } + List deletePitResults = new ArrayList<>(); + for (Map.Entry entry : pitIdToSucceededMap.entrySet()) { + deletePitResults.add(new DeletePitInfo(entry.getValue(), entry.getKey())); + } + DeletePitResponse deletePitResponse = new DeletePitResponse(deletePitResults); + listener.onResponse(deletePitResponse); + } + + @Override + public void onFailure(final Exception e) { + logger.error("Delete PITs failed", e); + listener.onFailure(e); + } + }, size); + } } diff --git a/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java b/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java index 2a6a9dede094b..180ffbcba46e8 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java @@ -10,10 +10,8 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.GroupedActionListener; import org.opensearch.action.support.HandledTransportAction; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; @@ -24,7 +22,6 @@ import org.opensearch.transport.TransportService; import java.util.ArrayList; -import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -72,33 +69,17 @@ protected void doExecute(Task task, DeletePitRequest request, ActionListener listener, DeletePitRequest request) { - Map> nodeToContextsMap = new HashMap<>(); + Map> nodeToContextsMap = new HashMap<>(); for (String pitId : request.getPitIds()) { SearchContextId contextId = SearchContextId.decode(namedWriteableRegistry, pitId); for (SearchContextIdForNode contextIdForNode : contextId.shards().values()) { - List contexts = nodeToContextsMap.getOrDefault(contextIdForNode.getNode(), new ArrayList<>()); - contexts.add(contextIdForNode); + PitSearchContextIdForNode pitSearchContext = new PitSearchContextIdForNode(pitId, contextIdForNode); + List contexts = nodeToContextsMap.getOrDefault(contextIdForNode.getNode(), new ArrayList<>()); + contexts.add(pitSearchContext); nodeToContextsMap.put(contextIdForNode.getNode(), contexts); } } - ActionListener deleteListener = ActionListener.wrap(r -> { - if (r == nodeToContextsMap.size()) { - listener.onResponse(new DeletePitResponse(true)); - } else { - logger.debug( - () -> new ParameterizedMessage( - "Delete PITs failed. Cleared contexts in {} nodes out of {}", - r, - nodeToContextsMap.size() - ) - ); - listener.onResponse(new DeletePitResponse(false)); - } - }, e -> { - logger.error("Delete PITs failed ", e); - listener.onResponse(new DeletePitResponse(false)); - }); - SearchUtils.deletePitContexts(nodeToContextsMap, deleteListener, clusterService.state(), searchTransportService); + SearchUtils.deletePitContexts(nodeToContextsMap, listener, clusterService.state(), searchTransportService); } /** @@ -106,22 +87,7 @@ private void deletePits(ActionListener listener, DeletePitReq */ private void deleteAllPits(ActionListener listener) { int size = clusterService.state().getNodes().getSize(); - ActionListener groupedActionListener = new GroupedActionListener( - new ActionListener<>() { - @Override - public void onResponse(final Collection responses) { - boolean hasFailures = responses.stream().anyMatch(r -> !r.isFreed()); - listener.onResponse(new DeletePitResponse(!hasFailures)); - } - - @Override - public void onFailure(final Exception e) { - logger.error("Delete all PITs failed ", e); - listener.onResponse(new DeletePitResponse(false)); - } - }, - size - ); + ActionListener groupedActionListener = SearchUtils.getDeletePitGroupedListener(listener, size); for (final DiscoveryNode node : clusterService.state().getNodes()) { try { Transport.Connection connection = searchTransportService.getConnection(null, node); diff --git a/server/src/main/java/org/opensearch/search/SearchService.java b/server/src/main/java/org/opensearch/search/SearchService.java index f7966d7a4413d..d833a5ddde246 100644 --- a/server/src/main/java/org/opensearch/search/SearchService.java +++ b/server/src/main/java/org/opensearch/search/SearchService.java @@ -41,6 +41,9 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; import org.opensearch.action.OriginalIndices; +import org.opensearch.action.search.DeletePitInfo; +import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.action.search.PitSearchContextIdForNode; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchShardTask; import org.opensearch.action.search.SearchType; @@ -138,7 +141,13 @@ import org.opensearch.transport.TransportRequest; import java.io.IOException; -import java.util.*; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicInteger; @@ -1018,36 +1027,43 @@ public boolean freeReaderContext(ShardSearchContextId contextId) { /** * Free reader contexts if found */ - public boolean freeReaderContextsIfFound(List contextIds) { - boolean success = true; - for (ShardSearchContextId contextId : contextIds) { + public DeletePitResponse freeReaderContextsIfFound(List contextIds) { + List deleteResults = new ArrayList<>(); + for (PitSearchContextIdForNode contextId : contextIds) { try { - if (getReaderContext(contextId) != null) { - try (ReaderContext context = removeReaderContext(contextId.getId())) { - boolean freed = context != null; - if (!freed) { - success = false; - } + if (getReaderContext(contextId.getSearchContextIdForNode().getSearchContextId()) != null) { + try (ReaderContext context = removeReaderContext(contextId.getSearchContextIdForNode().getSearchContextId().getId())) { + PitReaderContext pitReaderContext = (PitReaderContext) context; + String pitId = pitReaderContext.getPitId(); + boolean success = context != null; + DeletePitInfo deletePitInfo = new DeletePitInfo(success, pitId); + deleteResults.add(deletePitInfo); } + } else { + DeletePitInfo deletePitInfo = new DeletePitInfo(true, contextId.getPitId()); + deleteResults.add(deletePitInfo); } } catch (SearchContextMissingException e) { - // do nothing in case of context not found case + DeletePitInfo deletePitInfo = new DeletePitInfo(true, contextId.getPitId()); + deleteResults.add(deletePitInfo); } } - return success; + return new DeletePitResponse(deleteResults); } /** * Free all active pit contexts */ - public boolean freeAllPitContexts() { - List contextIds = new ArrayList<>(); + public DeletePitResponse freeAllPitContexts() { + List deleteResults = new ArrayList<>(); for (ReaderContext readerContext : activeReaders.values()) { if (readerContext instanceof PitReaderContext) { - contextIds.add(readerContext.id()); + boolean result = freeReaderContext(readerContext.id()); + DeletePitInfo deletePitInfo = new DeletePitInfo(result, ((PitReaderContext) readerContext).getPitId()); + deleteResults.add(deletePitInfo); } } - return freeReaderContextsIfFound(contextIds); + return new DeletePitResponse(deleteResults); } /** diff --git a/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java b/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java index dacc4bf2bca05..50744f0a3499c 100644 --- a/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java +++ b/server/src/test/java/org/opensearch/action/search/CreatePitControllerTests.java @@ -30,7 +30,6 @@ import org.opensearch.search.SearchHits; import org.opensearch.search.aggregations.InternalAggregations; import org.opensearch.search.internal.InternalSearchResponse; -import org.opensearch.search.internal.ShardSearchContextId; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskId; import org.opensearch.test.OpenSearchTestCase; @@ -39,6 +38,8 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.RemoteClusterConnectionTests; import org.opensearch.transport.Transport; + +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -199,11 +200,11 @@ public void updatePitContext( @Override public void sendFreePITContexts( Transport.Connection connection, - List contextIds, - ActionListener listener + List contextIds, + ActionListener listener ) { deleteNodesInvoked.add(connection.getNode()); - Thread t = new Thread(() -> listener.onResponse(new SearchFreeContextResponse(true))); + Thread t = new Thread(() -> listener.onResponse(new DeletePitResponse(new ArrayList<>()))); t.start(); } @@ -294,11 +295,11 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod @Override public void sendFreePITContexts( Transport.Connection connection, - List contextIds, - ActionListener listener + List contextIds, + ActionListener listener ) { deleteNodesInvoked.add(connection.getNode()); - Thread t = new Thread(() -> listener.onResponse(new SearchFreeContextResponse(true))); + Thread t = new Thread(() -> listener.onResponse(new DeletePitResponse(new ArrayList<>()))); t.start(); } }; @@ -391,11 +392,11 @@ public void updatePitContext( @Override public void sendFreePITContexts( Transport.Connection connection, - List contextIds, - ActionListener listener + List contextIds, + ActionListener listener ) { deleteNodesInvoked.add(connection.getNode()); - Thread t = new Thread(() -> listener.onResponse(new SearchFreeContextResponse(true))); + Thread t = new Thread(() -> listener.onResponse(new DeletePitResponse(new ArrayList<>()))); t.start(); } @@ -481,11 +482,11 @@ public void updatePitContext( @Override public void sendFreePITContexts( Transport.Connection connection, - List contextIds, - ActionListener listener + List contextIds, + ActionListener listener ) { deleteNodesInvoked.add(connection.getNode()); - Thread t = new Thread(() -> listener.onResponse(new SearchFreeContextResponse(true))); + Thread t = new Thread(() -> listener.onResponse(new DeletePitResponse(new ArrayList<>()))); t.start(); } diff --git a/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java b/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java index 32f246c56f715..003cd00e2219d 100644 --- a/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/TransportDeletePitActionTests.java @@ -29,7 +29,6 @@ import org.opensearch.search.SearchHits; import org.opensearch.search.aggregations.InternalAggregations; import org.opensearch.search.internal.InternalSearchResponse; -import org.opensearch.search.internal.ShardSearchContextId; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskId; import org.opensearch.test.OpenSearchTestCase; @@ -37,8 +36,8 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.RemoteClusterConnectionTests; import org.opensearch.transport.Transport; -import org.opensearch.transport.TransportResponse; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -46,6 +45,7 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; +import static org.hamcrest.Matchers.containsString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import static org.opensearch.action.search.PitTestsUtil.getPitId; @@ -162,11 +162,14 @@ public void testDeletePitSuccess() throws InterruptedException, ExecutionExcepti @Override public void sendFreePITContexts( Transport.Connection connection, - List contextIds, - ActionListener listener + List contextIds, + ActionListener listener ) { deleteNodesInvoked.add(connection.getNode()); - Thread t = new Thread(() -> listener.onResponse(new SearchFreeContextResponse(true))); + DeletePitInfo deletePitInfo = new DeletePitInfo(true, "pitId"); + List deletePitInfos = new ArrayList<>(); + deletePitInfos.add(deletePitInfo); + Thread t = new Thread(() -> listener.onResponse(new DeletePitResponse(deletePitInfos))); t.start(); } @@ -187,7 +190,8 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod PlainActionFuture future = newFuture(); action.execute(task, deletePITRequest, future); DeletePitResponse dr = future.get(); - assertEquals(true, dr.isSucceeded()); + assertTrue(dr.getDeletePitResults().get(0).getPitId().equals("pitId")); + assertTrue(dr.getDeletePitResults().get(0).isSucceeded()); assertEquals(3, deleteNodesInvoked.size()); } @@ -218,9 +222,12 @@ public void testDeleteAllPITSuccess() throws InterruptedException, ExecutionExce transportService.start(); transportService.acceptIncomingRequests(); SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { - public void sendFreeAllPitContexts(Transport.Connection connection, final ActionListener listener) { + public void sendFreeAllPitContexts(Transport.Connection connection, final ActionListener listener) { deleteNodesInvoked.add(connection.getNode()); - Thread t = new Thread(() -> listener.onResponse(new SearchFreeContextResponse(true))); + DeletePitInfo deletePitInfo = new DeletePitInfo(true, "pitId"); + List deletePitInfos = new ArrayList<>(); + deletePitInfos.add(deletePitInfo); + Thread t = new Thread(() -> listener.onResponse(new DeletePitResponse(deletePitInfos))); t.start(); } @@ -241,7 +248,8 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod PlainActionFuture future = newFuture(); action.execute(task, deletePITRequest, future); DeletePitResponse dr = future.get(); - assertEquals(true, dr.isSucceeded()); + assertTrue(dr.getDeletePitResults().get(0).getPitId().equals("pitId")); + assertTrue(dr.getDeletePitResults().get(0).isSucceeded()); assertEquals(3, deleteNodesInvoked.size()); } @@ -276,8 +284,8 @@ public void testDeletePitWhenNodeIsDown() throws InterruptedException, Execution @Override public void sendFreePITContexts( Transport.Connection connection, - List contextIds, - ActionListener listener + List contextIds, + ActionListener listener ) { deleteNodesInvoked.add(connection.getNode()); @@ -285,7 +293,7 @@ public void sendFreePITContexts( Thread t = new Thread(() -> listener.onFailure(new Exception("node 3 down"))); t.start(); } else { - Thread t = new Thread(() -> listener.onResponse(new SearchFreeContextResponse(true))); + Thread t = new Thread(() -> listener.onResponse(new DeletePitResponse(new ArrayList<>()))); t.start(); } } @@ -306,14 +314,14 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod DeletePitRequest deletePITRequest = new DeletePitRequest(pitId); PlainActionFuture future = newFuture(); action.execute(task, deletePITRequest, future); - DeletePitResponse dr = future.get(); - assertEquals(false, dr.isSucceeded()); + Exception e = assertThrows(ExecutionException.class, () -> future.get()); + assertThat(e.getMessage(), containsString("node 3 down")); assertEquals(3, deleteNodesInvoked.size()); } } } - public void testDeletePitWhenAllNodesAreDown() throws InterruptedException, ExecutionException { + public void testDeletePitWhenAllNodesAreDown() { List deleteNodesInvoked = new CopyOnWriteArrayList<>(); ActionFilters actionFilters = mock(ActionFilters.class); when(actionFilters.filters()).thenReturn(new ActionFilter[0]); @@ -339,8 +347,8 @@ public void testDeletePitWhenAllNodesAreDown() throws InterruptedException, Exec @Override public void sendFreePITContexts( Transport.Connection connection, - List contextIds, - ActionListener listener + List contextIds, + ActionListener listener ) { deleteNodesInvoked.add(connection.getNode()); Thread t = new Thread(() -> listener.onFailure(new Exception("node 3 down"))); @@ -363,14 +371,14 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod DeletePitRequest deletePITRequest = new DeletePitRequest(pitId); PlainActionFuture future = newFuture(); action.execute(task, deletePITRequest, future); - DeletePitResponse dr = future.get(); - assertEquals(false, dr.isSucceeded()); + Exception e = assertThrows(ExecutionException.class, () -> future.get()); + assertThat(e.getMessage(), containsString("node 3 down")); assertEquals(3, deleteNodesInvoked.size()); } } } - public void testDeletePitFailure() throws InterruptedException, ExecutionException { + public void testDeletePitFailure() { List deleteNodesInvoked = new CopyOnWriteArrayList<>(); ActionFilters actionFilters = mock(ActionFilters.class); when(actionFilters.filters()).thenReturn(new ActionFilter[0]); @@ -399,16 +407,16 @@ public void testDeletePitFailure() throws InterruptedException, ExecutionExcepti @Override public void sendFreePITContexts( Transport.Connection connection, - List contextId, - ActionListener listener + List contextId, + ActionListener listener ) { deleteNodesInvoked.add(connection.getNode()); if (connection.getNode().getId() == "node_3") { - Thread t = new Thread(() -> listener.onResponse(new SearchFreeContextResponse(false))); + Thread t = new Thread(() -> listener.onFailure(new Exception("node down"))); t.start(); } else { - Thread t = new Thread(() -> listener.onResponse(new SearchFreeContextResponse(true))); + Thread t = new Thread(() -> listener.onResponse(new DeletePitResponse(new ArrayList<>()))); t.start(); } } @@ -429,14 +437,14 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod DeletePitRequest deletePITRequest = new DeletePitRequest(pitId); PlainActionFuture future = newFuture(); action.execute(task, deletePITRequest, future); - DeletePitResponse dr = future.get(); - assertEquals(false, dr.isSucceeded()); + Exception e = assertThrows(ExecutionException.class, () -> future.get()); + assertThat(e.getMessage(), containsString("node down")); assertEquals(3, deleteNodesInvoked.size()); } } } - public void testDeleteAllPitWhenNodeIsDown() throws InterruptedException, ExecutionException { + public void testDeleteAllPitWhenNodeIsDown() { List deleteNodesInvoked = new CopyOnWriteArrayList<>(); ActionFilters actionFilters = mock(ActionFilters.class); when(actionFilters.filters()).thenReturn(new ActionFilter[0]); @@ -462,13 +470,13 @@ public void testDeleteAllPitWhenNodeIsDown() throws InterruptedException, Execut transportService.acceptIncomingRequests(); SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { @Override - public void sendFreeAllPitContexts(Transport.Connection connection, final ActionListener listener) { + public void sendFreeAllPitContexts(Transport.Connection connection, final ActionListener listener) { deleteNodesInvoked.add(connection.getNode()); if (connection.getNode().getId() == "node_3") { Thread t = new Thread(() -> listener.onFailure(new Exception("node 3 down"))); t.start(); } else { - Thread t = new Thread(() -> listener.onResponse(new SearchFreeContextResponse(true))); + Thread t = new Thread(() -> listener.onResponse(new DeletePitResponse(new ArrayList<>()))); t.start(); } } @@ -489,14 +497,14 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod DeletePitRequest deletePITRequest = new DeletePitRequest("_all"); PlainActionFuture future = newFuture(); action.execute(task, deletePITRequest, future); - DeletePitResponse dr = future.get(); - assertEquals(false, dr.isSucceeded()); + Exception e = assertThrows(ExecutionException.class, () -> future.get()); + assertThat(e.getMessage(), containsString("node 3 down")); assertEquals(3, deleteNodesInvoked.size()); } } } - public void testDeleteAllPitWhenAllNodesAreDown() throws InterruptedException, ExecutionException { + public void testDeleteAllPitWhenAllNodesAreDown() { List deleteNodesInvoked = new CopyOnWriteArrayList<>(); ActionFilters actionFilters = mock(ActionFilters.class); when(actionFilters.filters()).thenReturn(new ActionFilter[0]); @@ -523,7 +531,7 @@ public void testDeleteAllPitWhenAllNodesAreDown() throws InterruptedException, E SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { @Override - public void sendFreeAllPitContexts(Transport.Connection connection, final ActionListener listener) { + public void sendFreeAllPitContexts(Transport.Connection connection, final ActionListener listener) { deleteNodesInvoked.add(connection.getNode()); Thread t = new Thread(() -> listener.onFailure(new Exception("node down"))); t.start(); @@ -545,14 +553,14 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod DeletePitRequest deletePITRequest = new DeletePitRequest("_all"); PlainActionFuture future = newFuture(); action.execute(task, deletePITRequest, future); - DeletePitResponse dr = future.get(); - assertEquals(false, dr.isSucceeded()); + Exception e = assertThrows(ExecutionException.class, () -> future.get()); + assertThat(e.getMessage(), containsString("node down")); assertEquals(3, deleteNodesInvoked.size()); } } } - public void testDeleteAllPitFailure() throws InterruptedException, ExecutionException { + public void testDeleteAllPitFailure() { List deleteNodesInvoked = new CopyOnWriteArrayList<>(); ActionFilters actionFilters = mock(ActionFilters.class); when(actionFilters.filters()).thenReturn(new ActionFilter[0]); @@ -578,13 +586,13 @@ public void testDeleteAllPitFailure() throws InterruptedException, ExecutionExce transportService.acceptIncomingRequests(); SearchTransportService searchTransportService = new SearchTransportService(transportService, null) { - public void sendFreeAllPitContexts(Transport.Connection connection, final ActionListener listener) { + public void sendFreeAllPitContexts(Transport.Connection connection, final ActionListener listener) { deleteNodesInvoked.add(connection.getNode()); if (connection.getNode().getId() == "node_3") { - Thread t = new Thread(() -> listener.onResponse(new SearchFreeContextResponse(false))); + Thread t = new Thread(() -> listener.onFailure(new Exception("node 3 is down"))); t.start(); } else { - Thread t = new Thread(() -> listener.onResponse(new SearchFreeContextResponse(true))); + Thread t = new Thread(() -> listener.onResponse(new DeletePitResponse(new ArrayList<>()))); t.start(); } } @@ -605,8 +613,8 @@ public Transport.Connection getConnection(String clusterAlias, DiscoveryNode nod DeletePitRequest deletePITRequest = new DeletePitRequest("_all"); PlainActionFuture future = newFuture(); action.execute(task, deletePITRequest, future); - DeletePitResponse dr = future.get(); - assertEquals(false, dr.isSucceeded()); + Exception e = assertThrows(ExecutionException.class, () -> future.get()); + assertThat(e.getMessage(), containsString("java.lang.Exception: node 3 is down")); assertEquals(3, deleteNodesInvoked.size()); } } diff --git a/server/src/test/java/org/opensearch/search/DeletePitMultiNodeTests.java b/server/src/test/java/org/opensearch/search/DeletePitMultiNodeTests.java index 3d2b33a2a7baf..96f82e221f090 100644 --- a/server/src/test/java/org/opensearch/search/DeletePitMultiNodeTests.java +++ b/server/src/test/java/org/opensearch/search/DeletePitMultiNodeTests.java @@ -15,6 +15,7 @@ import org.opensearch.action.search.CreatePitRequest; import org.opensearch.action.search.CreatePitResponse; import org.opensearch.action.search.DeletePitAction; +import org.opensearch.action.search.DeletePitInfo; import org.opensearch.action.search.DeletePitRequest; import org.opensearch.action.search.DeletePitResponse; import org.opensearch.common.settings.Settings; @@ -29,7 +30,9 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicBoolean; +import static org.hamcrest.Matchers.blankOrNullString; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.not; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; /** @@ -70,13 +73,20 @@ public void testDeletePit() throws Exception { DeletePitRequest deletePITRequest = new DeletePitRequest(pitIds); ActionFuture deleteExecute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); DeletePitResponse deletePITResponse = deleteExecute.get(); - assertTrue(deletePITResponse.isSucceeded()); + assertEquals(2, deletePITResponse.getDeletePitResults().size()); + for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { + assertTrue(pitIds.contains(deletePitInfo.getPitId())); + assertTrue(deletePitInfo.isSucceeded()); + } /** * Checking deleting the same PIT id again results in succeeded */ deleteExecute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); deletePITResponse = deleteExecute.get(); - assertTrue(deletePITResponse.isSucceeded()); + for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { + assertTrue(pitIds.contains(deletePitInfo.getPitId())); + assertTrue(deletePitInfo.isSucceeded()); + } } public void testDeletePitWithValidAndDeletedIds() throws Exception { @@ -93,8 +103,10 @@ public void testDeletePitWithValidAndDeletedIds() throws Exception { DeletePitRequest deletePITRequest = new DeletePitRequest(pitIds); ActionFuture deleteExecute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); DeletePitResponse deletePITResponse = deleteExecute.get(); - assertTrue(deletePITResponse.isSucceeded()); - + for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { + assertTrue(pitIds.contains(deletePitInfo.getPitId())); + assertTrue(deletePitInfo.isSucceeded()); + } execute = client().execute(CreatePitAction.INSTANCE, request); pitResponse = execute.get(); pitIds.add(pitResponse.getId()); @@ -104,7 +116,10 @@ public void testDeletePitWithValidAndDeletedIds() throws Exception { deletePITRequest = new DeletePitRequest(pitIds); deleteExecute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); deletePITResponse = deleteExecute.get(); - assertTrue(deletePITResponse.isSucceeded()); + for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { + assertTrue(pitIds.contains(deletePitInfo.getPitId())); + assertTrue(deletePitInfo.isSucceeded()); + } } public void testDeletePitWithValidAndInvalidIds() throws Exception { @@ -135,7 +150,10 @@ public void testDeleteAllPits() throws Exception { */ ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); DeletePitResponse deletePITResponse = execute.get(); - assertTrue(deletePITResponse.isSucceeded()); + for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { + assertThat(deletePitInfo.getPitId(), not(blankOrNullString())); + assertTrue(deletePitInfo.isSucceeded()); + } client().admin().indices().prepareDelete("index1").get(); } @@ -153,8 +171,15 @@ public void testDeletePitWhileNodeDrop() throws Exception { @Override public Settings onNodeStopped(String nodeName) throws Exception { ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); - DeletePitResponse deletePITResponse = execute.get(); - assertFalse(deletePITResponse.isSucceeded()); + try { + DeletePitResponse deletePITResponse = execute.get(); + for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { + assertTrue(pitIds.contains(deletePitInfo.getPitId())); + assertFalse(deletePitInfo.isSucceeded()); + } + } catch (Exception e) { + throw new AssertionError(e); + } return super.onNodeStopped(nodeName); } }); @@ -166,7 +191,10 @@ public Settings onNodeStopped(String nodeName) throws Exception { */ ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); DeletePitResponse deletePITResponse = execute.get(); - assertTrue(deletePITResponse.isSucceeded()); + for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { + assertTrue(pitIds.contains(deletePitInfo.getPitId())); + assertTrue(deletePitInfo.isSucceeded()); + } client().admin().indices().prepareDelete("index1").get(); } @@ -180,8 +208,15 @@ public void testDeleteAllPitsWhileNodeDrop() throws Exception { @Override public Settings onNodeStopped(String nodeName) throws Exception { ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); - DeletePitResponse deletePITResponse = execute.get(); - assertFalse(deletePITResponse.isSucceeded()); + try { + DeletePitResponse deletePITResponse = execute.get(); + for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { + assertThat(deletePitInfo.getPitId(), not(blankOrNullString())); + assertFalse(deletePitInfo.isSucceeded()); + } + } catch (Exception e) { + assertTrue(e.getMessage().contains("Node not connected")); + } return super.onNodeStopped(nodeName); } }); @@ -193,7 +228,10 @@ public Settings onNodeStopped(String nodeName) throws Exception { */ ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); DeletePitResponse deletePITResponse = execute.get(); - assertTrue(deletePITResponse.isSucceeded()); + for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { + assertThat(deletePitInfo.getPitId(), not(blankOrNullString())); + assertTrue(deletePitInfo.isSucceeded()); + } client().admin().indices().prepareDelete("index1").get(); } @@ -236,7 +274,10 @@ public void testDeleteWhileSearch() throws Exception { ActionFuture execute = client().execute(DeletePitAction.INSTANCE, deletePITRequest); DeletePitResponse deletePITResponse = execute.get(); deleted.set(true); - assertTrue(deletePITResponse.isSucceeded()); + for (DeletePitInfo deletePitInfo : deletePITResponse.getDeletePitResults()) { + assertTrue(pitIds.contains(deletePitInfo.getPitId())); + assertTrue(deletePitInfo.isSucceeded()); + } for (Thread thread : threads) { thread.join(); diff --git a/server/src/test/java/org/opensearch/search/SearchServiceTests.java b/server/src/test/java/org/opensearch/search/SearchServiceTests.java index c96671166c269..111bd24bf480d 100644 --- a/server/src/test/java/org/opensearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/opensearch/search/SearchServiceTests.java @@ -40,14 +40,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.OriginalIndices; import org.opensearch.action.index.IndexResponse; -import org.opensearch.action.search.ClearScrollRequest; -import org.opensearch.action.search.SearchPhaseExecutionException; -import org.opensearch.action.search.SearchRequest; -import org.opensearch.action.search.SearchResponse; -import org.opensearch.action.search.SearchShardTask; -import org.opensearch.action.search.SearchType; -import org.opensearch.action.search.UpdatePitContextRequest; -import org.opensearch.action.search.UpdatePitContextResponse; +import org.opensearch.action.search.*; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.WriteRequest; @@ -1419,12 +1412,20 @@ public void testDeletePitReaderContext() { SearchService searchService = getInstanceFromNode(SearchService.class); PlainActionFuture future = new PlainActionFuture<>(); searchService.createPitReaderContext(new ShardId(resolveIndex("index"), 0), TimeValue.timeValueMinutes(between(1, 10)), future); - List contextIds = new ArrayList<>(); - contextIds.add(future.actionGet()); + List contextIds = new ArrayList<>(); + ShardSearchContextId shardSearchContextId = future.actionGet(); + PitSearchContextIdForNode pitSearchContextIdForNode = new PitSearchContextIdForNode( + "1", + new SearchContextIdForNode(null, "node1", shardSearchContextId) + ); + contextIds.add(pitSearchContextIdForNode); + assertThat(searchService.getActiveContexts(), equalTo(1)); - assertTrue(searchService.freeReaderContextsIfFound(contextIds)); + DeletePitResponse deletePitResponse = searchService.freeReaderContextsIfFound(contextIds); + assertTrue(deletePitResponse.getDeletePitResults().get(0).isSucceeded()); // assert true for reader context not found - assertTrue(searchService.freeReaderContextsIfFound(contextIds)); + deletePitResponse = searchService.freeReaderContextsIfFound(contextIds); + assertTrue(deletePitResponse.getDeletePitResults().get(0).isSucceeded()); // adding this assert to showcase behavior difference assertFalse(searchService.freeReaderContext(future.actionGet())); } From e06bac734dd3098bc0b2e663f8ddff3a42267a98 Mon Sep 17 00:00:00 2001 From: Bharathwaj G Date: Mon, 20 Jun 2022 11:30:12 +0530 Subject: [PATCH 07/75] Addressing comments Signed-off-by: Bharathwaj G --- .../org/opensearch/action/search/CreatePitController.java | 1 + .../main/java/org/opensearch/action/search/SearchUtils.java | 2 +- .../opensearch/action/search/TransportDeletePitAction.java | 6 ++---- .../src/main/java/org/opensearch/search/SearchService.java | 4 ++++ 4 files changed, 8 insertions(+), 5 deletions(-) diff --git a/server/src/main/java/org/opensearch/action/search/CreatePitController.java b/server/src/main/java/org/opensearch/action/search/CreatePitController.java index 6eb1510bab2f7..53410a87ecdba 100644 --- a/server/src/main/java/org/opensearch/action/search/CreatePitController.java +++ b/server/src/main/java/org/opensearch/action/search/CreatePitController.java @@ -263,6 +263,7 @@ private void cleanupContexts(Collection contexts, String @Override public void onResponse(DeletePitResponse response) { // this is invoke and forget call + if (!logger.isDebugEnabled()) return; for (DeletePitInfo deletePitInfo : response.getDeletePitResults()) { if (!deletePitInfo.isSucceeded()) { logger.debug(() -> new ParameterizedMessage("Failed to delete PIT ID {}", deletePitInfo.getPitId())); diff --git a/server/src/main/java/org/opensearch/action/search/SearchUtils.java b/server/src/main/java/org/opensearch/action/search/SearchUtils.java index c6abf16a0ffd1..0d4667c0821d1 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchUtils.java +++ b/server/src/main/java/org/opensearch/action/search/SearchUtils.java @@ -56,7 +56,7 @@ public static StepListener> getConnect } /** - * Delete list of pit contexts. Returns success only if each reader context is either deleted or not found. + * Delete list of pit contexts. Returns the details of success of operation per PIT ID. */ public static void deletePitContexts( Map> nodeToContextsMap, diff --git a/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java b/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java index 180ffbcba46e8..89f666636ae74 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportDeletePitAction.java @@ -8,8 +8,6 @@ package org.opensearch.action.search; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.HandledTransportAction; @@ -34,7 +32,6 @@ public class TransportDeletePitAction extends HandledTransportAction listener, DeletePitRequest request) { Map> nodeToContextsMap = new HashMap<>(); @@ -86,6 +83,7 @@ private void deletePits(ActionListener listener, DeletePitReq * Delete all active PIT reader contexts */ private void deleteAllPits(ActionListener listener) { + // TODO: Use list all PITs to delete all PITs in case of remote cluster use case int size = clusterService.state().getNodes().getSize(); ActionListener groupedActionListener = SearchUtils.getDeletePitGroupedListener(listener, size); for (final DiscoveryNode node : clusterService.state().getNodes()) { diff --git a/server/src/main/java/org/opensearch/search/SearchService.java b/server/src/main/java/org/opensearch/search/SearchService.java index d833a5ddde246..7d822496514c1 100644 --- a/server/src/main/java/org/opensearch/search/SearchService.java +++ b/server/src/main/java/org/opensearch/search/SearchService.java @@ -1026,6 +1026,7 @@ public boolean freeReaderContext(ShardSearchContextId contextId) { /** * Free reader contexts if found + * @return response with list of PIT IDs deleted and if operation is successful */ public DeletePitResponse freeReaderContextsIfFound(List contextIds) { List deleteResults = new ArrayList<>(); @@ -1040,10 +1041,12 @@ public DeletePitResponse freeReaderContextsIfFound(List deleteResults = new ArrayList<>(); From 22eb286479277f4e2676a11b1f6021f5e2020de5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 18 May 2022 16:51:54 -0400 Subject: [PATCH 08/75] Bump reactor-netty-core from 1.0.16 to 1.0.19 in /plugins/repository-azure (#3360) * Bump reactor-netty-core in /plugins/repository-azure Bumps [reactor-netty-core](https://github.com/reactor/reactor-netty) from 1.0.16 to 1.0.19. - [Release notes](https://github.com/reactor/reactor-netty/releases) - [Commits](https://github.com/reactor/reactor-netty/compare/v1.0.16...v1.0.19) --- updated-dependencies: - dependency-name: io.projectreactor.netty:reactor-netty-core dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- plugins/repository-azure/build.gradle | 2 +- .../licenses/reactor-netty-core-1.0.16.jar.sha1 | 1 - .../licenses/reactor-netty-core-1.0.19.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-azure/licenses/reactor-netty-core-1.0.16.jar.sha1 create mode 100644 plugins/repository-azure/licenses/reactor-netty-core-1.0.19.jar.sha1 diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index e644a4f37be25..55b4fc638f07b 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -58,7 +58,7 @@ dependencies { api 'org.reactivestreams:reactive-streams:1.0.3' api 'io.projectreactor:reactor-core:3.4.17' api 'io.projectreactor.netty:reactor-netty:1.0.18' - api 'io.projectreactor.netty:reactor-netty-core:1.0.16' + api 'io.projectreactor.netty:reactor-netty-core:1.0.19' api 'io.projectreactor.netty:reactor-netty-http:1.0.18' api "org.slf4j:slf4j-api:${versions.slf4j}" api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.0.16.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.0.16.jar.sha1 deleted file mode 100644 index 0d1a0cb20c80f..0000000000000 --- a/plugins/repository-azure/licenses/reactor-netty-core-1.0.16.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8f842a912677f2bc614ff60fb9e786d4fa429c34 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-netty-core-1.0.19.jar.sha1 b/plugins/repository-azure/licenses/reactor-netty-core-1.0.19.jar.sha1 new file mode 100644 index 0000000000000..74df264a2b908 --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-netty-core-1.0.19.jar.sha1 @@ -0,0 +1 @@ +adb58ba62d297b56d6b7915a50f048eddcfc81a6 \ No newline at end of file From f5fc840558d4dbde31e9a6a4882033d234e9914e Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Wed, 18 May 2022 13:54:51 -0700 Subject: [PATCH 09/75] [Type removal] _type removal from mocked responses of scroll hit tests (#3377) Signed-off-by: Suraj Singh --- .../src/test/resources/responses/failure_with_status.json | 1 - modules/reindex/src/test/resources/responses/rejection.json | 1 - .../src/test/resources/responses/scroll_fully_loaded.json | 1 - .../src/test/resources/responses/scroll_fully_loaded_1_7.json | 1 - modules/reindex/src/test/resources/responses/scroll_ok.json | 1 - modules/reindex/src/test/resources/responses/start_ok.json | 1 - 6 files changed, 6 deletions(-) diff --git a/modules/reindex/src/test/resources/responses/failure_with_status.json b/modules/reindex/src/test/resources/responses/failure_with_status.json index 314de37a6793e..f9de958a571b3 100644 --- a/modules/reindex/src/test/resources/responses/failure_with_status.json +++ b/modules/reindex/src/test/resources/responses/failure_with_status.json @@ -16,7 +16,6 @@ "max_score": 0.0, "hits": [ { "_index": "test", - "_type": "test", "_id": "10000", "_version": 1, "_score": 0.0, diff --git a/modules/reindex/src/test/resources/responses/rejection.json b/modules/reindex/src/test/resources/responses/rejection.json index 0cc48bfca4fb5..e99cf02ee8023 100644 --- a/modules/reindex/src/test/resources/responses/rejection.json +++ b/modules/reindex/src/test/resources/responses/rejection.json @@ -21,7 +21,6 @@ "max_score" : null, "hits" : [ { "_index" : "test", - "_type" : "test", "_id" : "AVToMiC250DjIiBO3yJ_", "_version" : 1, "_score" : null, diff --git a/modules/reindex/src/test/resources/responses/scroll_fully_loaded.json b/modules/reindex/src/test/resources/responses/scroll_fully_loaded.json index a2c1be34e5ccd..7a6b1ff748b92 100644 --- a/modules/reindex/src/test/resources/responses/scroll_fully_loaded.json +++ b/modules/reindex/src/test/resources/responses/scroll_fully_loaded.json @@ -13,7 +13,6 @@ "max_score" : null, "hits" : [ { "_index" : "test", - "_type" : "test", "_id" : "AVToMiDL50DjIiBO3yKA", "_version" : 1, "_score" : null, diff --git a/modules/reindex/src/test/resources/responses/scroll_fully_loaded_1_7.json b/modules/reindex/src/test/resources/responses/scroll_fully_loaded_1_7.json index f8bebddecf3cf..2f5456704a6f0 100644 --- a/modules/reindex/src/test/resources/responses/scroll_fully_loaded_1_7.json +++ b/modules/reindex/src/test/resources/responses/scroll_fully_loaded_1_7.json @@ -13,7 +13,6 @@ "max_score" : null, "hits" : [ { "_index" : "test", - "_type" : "test", "_id" : "AVToMiDL50DjIiBO3yKA", "_version" : 1, "_score" : null, diff --git a/modules/reindex/src/test/resources/responses/scroll_ok.json b/modules/reindex/src/test/resources/responses/scroll_ok.json index 5cdc4a400cbf0..ea6b473ed3e0e 100644 --- a/modules/reindex/src/test/resources/responses/scroll_ok.json +++ b/modules/reindex/src/test/resources/responses/scroll_ok.json @@ -13,7 +13,6 @@ "max_score" : null, "hits" : [ { "_index" : "test", - "_type" : "test", "_id" : "AVToMiDL50DjIiBO3yKA", "_version" : 1, "_score" : null, diff --git a/modules/reindex/src/test/resources/responses/start_ok.json b/modules/reindex/src/test/resources/responses/start_ok.json index a2988341f8caf..97ef80f2dac06 100644 --- a/modules/reindex/src/test/resources/responses/start_ok.json +++ b/modules/reindex/src/test/resources/responses/start_ok.json @@ -12,7 +12,6 @@ "max_score" : null, "hits" : [ { "_index" : "test", - "_type" : "test", "_id" : "AVToMiC250DjIiBO3yJ_", "_version" : 1, "_score" : null, From f14e0413ac86ed3ef6889c98dddff341e7d570ac Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Wed, 18 May 2022 14:01:43 -0700 Subject: [PATCH 10/75] [Type removal] Remove _type deprecation from script and conditional processor (#3239) * [Type removal] Remove _type deprecation from script and conditional processor Signed-off-by: Suraj Singh * Spotless check apply Signed-off-by: Suraj Singh --- .../ingest/common/ScriptProcessor.java | 11 +--- .../ingest/common/ScriptProcessorTests.java | 20 -------- .../ingest/ConditionalProcessor.java | 14 +----- .../org/opensearch/script/UpdateScript.java | 11 +--- .../ingest/ConditionalProcessorTests.java | 50 ------------------- 5 files changed, 3 insertions(+), 103 deletions(-) diff --git a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ScriptProcessor.java b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ScriptProcessor.java index f2568826fa484..960ab25c72288 100644 --- a/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ScriptProcessor.java +++ b/modules/ingest-common/src/main/java/org/opensearch/ingest/common/ScriptProcessor.java @@ -34,7 +34,6 @@ import org.opensearch.common.Nullable; import org.opensearch.common.bytes.BytesReference; -import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.util.CollectionUtils; import org.opensearch.common.xcontent.LoggingDeprecationHandler; import org.opensearch.common.xcontent.NamedXContentRegistry; @@ -45,7 +44,6 @@ import org.opensearch.ingest.AbstractProcessor; import org.opensearch.ingest.IngestDocument; import org.opensearch.ingest.Processor; -import org.opensearch.script.DynamicMap; import org.opensearch.script.IngestScript; import org.opensearch.script.Script; import org.opensearch.script.ScriptException; @@ -55,7 +53,6 @@ import java.io.InputStream; import java.util.Arrays; import java.util.Map; -import java.util.function.Function; import static org.opensearch.ingest.ConfigurationUtils.newConfigurationException; @@ -64,12 +61,6 @@ */ public final class ScriptProcessor extends AbstractProcessor { - private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(DynamicMap.class); - private static final Map> PARAMS_FUNCTIONS = org.opensearch.common.collect.Map.of("_type", value -> { - deprecationLogger.deprecate("script_processor", "[types removal] Looking up doc types [_type] in scripts is deprecated."); - return value; - }); - public static final String TYPE = "script"; private final Script script; @@ -111,7 +102,7 @@ public IngestDocument execute(IngestDocument document) { } else { ingestScript = precompiledIngestScript; } - ingestScript.execute(new DynamicMap(document.getSourceAndMetadata(), PARAMS_FUNCTIONS)); + ingestScript.execute(document.getSourceAndMetadata()); CollectionUtils.ensureNoSelfReferences(document.getSourceAndMetadata(), "ingest script"); return document; } diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ScriptProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ScriptProcessorTests.java index 1aa4898441598..96d9be75c4ab7 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ScriptProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ScriptProcessorTests.java @@ -105,24 +105,4 @@ private void assertIngestDocument(IngestDocument ingestDocument) { int bytesTotal = ingestDocument.getFieldValue("bytes_in", Integer.class) + ingestDocument.getFieldValue("bytes_out", Integer.class); assertThat(ingestDocument.getSourceAndMetadata().get("bytes_total"), is(bytesTotal)); } - - public void testTypeDeprecation() throws Exception { - String scriptName = "script"; - ScriptService scriptService = new ScriptService( - Settings.builder().build(), - Collections.singletonMap( - Script.DEFAULT_SCRIPT_LANG, - new MockScriptEngine(Script.DEFAULT_SCRIPT_LANG, Collections.singletonMap(scriptName, ctx -> { - ctx.get("_type"); - return null; - }), Collections.emptyMap()) - ), - new HashMap<>(ScriptModule.CORE_CONTEXTS) - ); - Script script = new Script(ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, scriptName, Collections.emptyMap()); - IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.emptyMap()); - ScriptProcessor processor = new ScriptProcessor(randomAlphaOfLength(10), null, script, null, scriptService); - processor.execute(ingestDocument); - assertWarnings("[types removal] Looking up doc types [_type] in scripts is deprecated."); - } } diff --git a/server/src/main/java/org/opensearch/ingest/ConditionalProcessor.java b/server/src/main/java/org/opensearch/ingest/ConditionalProcessor.java index 7d4a2b32fbb5f..591a71fd72b8f 100644 --- a/server/src/main/java/org/opensearch/ingest/ConditionalProcessor.java +++ b/server/src/main/java/org/opensearch/ingest/ConditionalProcessor.java @@ -32,8 +32,6 @@ package org.opensearch.ingest; -import org.opensearch.common.logging.DeprecationLogger; -import org.opensearch.script.DynamicMap; import org.opensearch.script.IngestConditionalScript; import org.opensearch.script.Script; import org.opensearch.script.ScriptException; @@ -51,7 +49,6 @@ import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.function.BiConsumer; -import java.util.function.Function; import java.util.function.LongSupplier; import java.util.stream.Collectors; @@ -64,15 +61,6 @@ */ public class ConditionalProcessor extends AbstractProcessor implements WrappingProcessor { - private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(DynamicMap.class); - private static final Map> FUNCTIONS = org.opensearch.common.collect.Map.of("_type", value -> { - deprecationLogger.deprecate( - "conditional-processor__type", - "[types removal] Looking up doc types [_type] in scripts is deprecated." - ); - return value; - }); - static final String TYPE = "conditional"; private final Script condition; @@ -153,7 +141,7 @@ boolean evaluate(IngestDocument ingestDocument) { IngestConditionalScript.Factory factory = scriptService.compile(condition, IngestConditionalScript.CONTEXT); script = factory.newInstance(condition.getParams()); } - return script.execute(new UnmodifiableIngestData(new DynamicMap(ingestDocument.getSourceAndMetadata(), FUNCTIONS))); + return script.execute(new UnmodifiableIngestData(ingestDocument.getSourceAndMetadata())); } public Processor getInnerProcessor() { diff --git a/server/src/main/java/org/opensearch/script/UpdateScript.java b/server/src/main/java/org/opensearch/script/UpdateScript.java index fdceadc03879e..86697e9ae550e 100644 --- a/server/src/main/java/org/opensearch/script/UpdateScript.java +++ b/server/src/main/java/org/opensearch/script/UpdateScript.java @@ -32,10 +32,7 @@ package org.opensearch.script; -import org.opensearch.common.logging.DeprecationLogger; - import java.util.Map; -import java.util.function.Function; /** * An update script. @@ -44,12 +41,6 @@ */ public abstract class UpdateScript { - private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(DynamicMap.class); - private static final Map> PARAMS_FUNCTIONS = org.opensearch.common.collect.Map.of("_type", value -> { - deprecationLogger.deprecate("update-script", "[types removal] Looking up doc types [_type] in scripts is deprecated."); - return value; - }); - public static final String[] PARAMETERS = {}; /** The context used to compile {@link UpdateScript} factories. */ @@ -63,7 +54,7 @@ public abstract class UpdateScript { public UpdateScript(Map params, Map ctx) { this.params = params; - this.ctx = new DynamicMap(ctx, PARAMS_FUNCTIONS); + this.ctx = ctx; } /** Return the parameters for this script. */ diff --git a/server/src/test/java/org/opensearch/ingest/ConditionalProcessorTests.java b/server/src/test/java/org/opensearch/ingest/ConditionalProcessorTests.java index 2fc734c5abbf2..1550dd65442a4 100644 --- a/server/src/test/java/org/opensearch/ingest/ConditionalProcessorTests.java +++ b/server/src/test/java/org/opensearch/ingest/ConditionalProcessorTests.java @@ -163,56 +163,6 @@ public void testActsOnImmutableData() throws Exception { assertMutatingCtxThrows(ctx -> ((List) ctx.get("listField")).remove("bar")); } - public void testTypeDeprecation() throws Exception { - - ScriptService scriptService = new ScriptService( - Settings.builder().build(), - Collections.singletonMap( - Script.DEFAULT_SCRIPT_LANG, - new MockScriptEngine(Script.DEFAULT_SCRIPT_LANG, Collections.singletonMap(scriptName, ctx -> { - ctx.get("_type"); - return true; - }), Collections.emptyMap()) - ), - new HashMap<>(ScriptModule.CORE_CONTEXTS) - ); - - LongSupplier relativeTimeProvider = mock(LongSupplier.class); - when(relativeTimeProvider.getAsLong()).thenReturn(0L, TimeUnit.MILLISECONDS.toNanos(1), 0L, TimeUnit.MILLISECONDS.toNanos(2)); - ConditionalProcessor processor = new ConditionalProcessor( - randomAlphaOfLength(10), - "description", - new Script(ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, scriptName, Collections.emptyMap()), - scriptService, - new Processor() { - @Override - public IngestDocument execute(final IngestDocument ingestDocument) { - return ingestDocument; - } - - @Override - public String getType() { - return null; - } - - @Override - public String getTag() { - return null; - } - - @Override - public String getDescription() { - return null; - } - }, - relativeTimeProvider - ); - - IngestDocument ingestDocument = RandomDocumentPicks.randomIngestDocument(random(), Collections.emptyMap()); - processor.execute(ingestDocument, (result, e) -> {}); - assertWarnings("[types removal] Looking up doc types [_type] in scripts is deprecated."); - } - public void testPrecompiledError() { ScriptService scriptService = MockScriptService.singleContext( IngestConditionalScript.CONTEXT, From 2ffc29253f3da8180e0bb158acfa749cf86bb50c Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Wed, 18 May 2022 14:04:29 -0700 Subject: [PATCH 11/75] [Type removal] Remove _type from _bulk yaml test, scripts, unused constants (#3372) * [Type removal] Remove redundant _type deprecation checks in bulk request Signed-off-by: Suraj Singh * [Type removal] bulk yaml tests validating deprecation on _type and removal from scripts Signed-off-by: Suraj Singh --- .../ingest/common/ForEachProcessorTests.java | 1 - .../rest-api-spec/test/bulk/60_deprecated.yml | 20 ------------------- .../search/fields/SearchFieldsIT.java | 1 - .../termvectors/MultiTermVectorsResponse.java | 1 - .../action/update/UpdateHelper.java | 1 - 5 files changed, 24 deletions(-) delete mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/bulk/60_deprecated.yml diff --git a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ForEachProcessorTests.java b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ForEachProcessorTests.java index 8db3cefc3a6fd..f49d5492a09b3 100644 --- a/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ForEachProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/opensearch/ingest/common/ForEachProcessorTests.java @@ -127,7 +127,6 @@ public void testMetadataAvailable() throws Exception { TestProcessor innerProcessor = new TestProcessor(id -> { id.setFieldValue("_ingest._value.index", id.getSourceAndMetadata().get("_index")); - id.setFieldValue("_ingest._value.type", id.getSourceAndMetadata().get("_type")); id.setFieldValue("_ingest._value.id", id.getSourceAndMetadata().get("_id")); }); ForEachProcessor processor = new ForEachProcessor("_tag", null, "values", innerProcessor, false); diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/60_deprecated.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/60_deprecated.yml deleted file mode 100644 index 8c8a840eb3f47..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/bulk/60_deprecated.yml +++ /dev/null @@ -1,20 +0,0 @@ - ---- -"Deprecated parameters should fail in Bulk query": - - do: - catch: bad_request - bulk: - body: | - { "update": { "_index": "test_index", "_type": "test_type", "_id": "test_id_1", "_version": 1 } } - { "doc": { "f1": "v1" } } - { "update": { "_index": "test_index", "_type": "test_type", "_id": "test_id_2", "_version": 1 } } - { "doc": { "f1": "v2" } } - - - do: - catch: bad_request - bulk: - body: | - { "update": { "_index": "test_index", "_type": "test_type", "_id": "test_id_1", "_routing": "test1" } } - { "doc": { "f1": "v1" } } - { "update": { "_index": "test_index", "_type": "test_type", "_id": "test_id_2", "_routing": "test1" } } - { "doc": { "f1": "v2" } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java b/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java index 941f4982af9cc..e1cf5fe8dca93 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/fields/SearchFieldsIT.java @@ -142,7 +142,6 @@ protected Map, Object>> pluginScripts() { scripts.put("_fields['num1'].value", vars -> fieldsScript(vars, "num1")); scripts.put("_fields._uid.value", vars -> fieldsScript(vars, "_uid")); scripts.put("_fields._id.value", vars -> fieldsScript(vars, "_id")); - scripts.put("_fields._type.value", vars -> fieldsScript(vars, "_type")); scripts.put("_source.obj1", vars -> sourceScript(vars, "obj1")); scripts.put("_source.obj1.test", vars -> sourceScript(vars, "obj1.test")); diff --git a/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsResponse.java b/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsResponse.java index 1a9ec0b63f46c..6f72413b935b7 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsResponse.java +++ b/server/src/main/java/org/opensearch/action/termvectors/MultiTermVectorsResponse.java @@ -161,7 +161,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws static final class Fields { static final String DOCS = "docs"; static final String _INDEX = "_index"; - static final String _TYPE = "_type"; static final String _ID = "_id"; } diff --git a/server/src/main/java/org/opensearch/action/update/UpdateHelper.java b/server/src/main/java/org/opensearch/action/update/UpdateHelper.java index 09a0a6281b8fc..685b21b892fb3 100644 --- a/server/src/main/java/org/opensearch/action/update/UpdateHelper.java +++ b/server/src/main/java/org/opensearch/action/update/UpdateHelper.java @@ -482,7 +482,6 @@ public static class ContextFields { public static final String SOURCE = "_source"; public static final String NOW = "_now"; public static final String INDEX = "_index"; - public static final String TYPE = "_type"; public static final String ID = "_id"; public static final String VERSION = "_version"; public static final String ROUTING = "_routing"; From 833010af6a987b833f4314dc2684f31dff993b80 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Thu, 19 May 2022 07:46:03 -0700 Subject: [PATCH 12/75] Fix Lucene-snapshots repo for jdk 17. (#3396) Signed-off-by: Marc Handalian --- .github/workflows/lucene-snapshots.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/lucene-snapshots.yml b/.github/workflows/lucene-snapshots.yml index 0fe025ad1aa16..78e7b7b269cf4 100644 --- a/.github/workflows/lucene-snapshots.yml +++ b/.github/workflows/lucene-snapshots.yml @@ -40,6 +40,10 @@ jobs: echo "::set-output name=REVISION::$(git rev-parse --short HEAD)" id: version + - name: Initialize gradle settings + working-directory: ./lucene + run: ./gradlew localSettings + - name: Publish Lucene to local maven repo. working-directory: ./lucene run: ./gradlew publishJarsPublicationToMavenLocal -Pversion.suffix=snapshot-${{ steps.version.outputs.REVISION }} From 44ceeedf0de88c6686a1b1f136ee71c2efe01d57 Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Thu, 19 May 2022 10:23:59 -0700 Subject: [PATCH 13/75] Replace internal usages of 'master' term in 'server/src/internalClusterTest' directory (#2521) Signed-off-by: Tianli Feng --- .../cluster/tasks/PendingTasksBlocksIT.java | 4 +- ... => IndexingClusterManagerFailoverIT.java} | 20 +- .../opensearch/cluster/ClusterHealthIT.java | 18 +- .../cluster/ClusterInfoServiceIT.java | 2 +- .../cluster/ClusterStateDiffIT.java | 2 +- ...java => MinimumClusterManagerNodesIT.java} | 62 +++--- ...odeIT.java => NoClusterManagerNodeIT.java} | 83 ++++---- .../cluster/SimpleClusterStateIT.java | 11 +- ...ava => SpecificClusterManagerNodesIT.java} | 76 ++++---- .../action/shard/ShardStateActionIT.java | 21 ++- .../coordination/RareClusterStateIT.java | 60 +++--- .../UnsafeBootstrapAndDetachCommandIT.java | 66 +++---- .../coordination/VotingConfigurationIT.java | 11 +- .../cluster/coordination/ZenDiscoveryIT.java | 16 +- .../cluster/routing/AllocationIdIT.java | 2 +- .../cluster/routing/PrimaryAllocationIT.java | 39 ++-- .../decider/DiskThresholdDeciderIT.java | 2 +- .../allocation/decider/MockDiskUsagesIT.java | 12 +- .../discovery/ClusterDisruptionIT.java | 29 +-- ...T.java => ClusterManagerDisruptionIT.java} | 52 ++--- .../discovery/DiscoveryDisruptionIT.java | 83 ++++---- .../discovery/SnapshotDisruptionIT.java | 34 ++-- ... => StableClusterManagerDisruptionIT.java} | 156 +++++++-------- .../org/opensearch/env/NodeEnvironmentIT.java | 6 +- .../env/NodeRepurposeCommandIT.java | 4 +- .../gateway/GatewayIndexStateIT.java | 10 +- .../opensearch/gateway/MetadataNodesIT.java | 18 +- .../gateway/RecoverAfterNodesIT.java | 40 ++-- .../gateway/RecoveryFromGatewayIT.java | 4 +- .../index/mapper/DynamicMappingIT.java | 10 +- ...catedClusterManagerGetFieldMappingIT.java} | 2 +- .../mapping/UpdateMappingIntegrationIT.java | 6 +- .../indices/recovery/IndexRecoveryIT.java | 18 +- .../state/CloseWhileRelocatingShardsIT.java | 2 +- .../store/IndicesStoreIntegrationIT.java | 2 +- ...gestProcessorNotInstalledOnAllNodesIT.java | 2 +- .../persistent/PersistentTasksExecutorIT.java | 2 +- .../recovery/FullRollingRestartIT.java | 2 +- .../BlobStoreRepositoryCleanupIT.java | 20 +- .../opensearch/snapshots/CloneSnapshotIT.java | 73 ++++---- .../snapshots/ConcurrentSnapshotsIT.java | 177 ++++++++++-------- .../DedicatedClusterSnapshotRestoreIT.java | 10 +- ...etadataLoadingDuringSnapshotRestoreIT.java | 4 +- .../opensearch/snapshots/RepositoriesIT.java | 6 +- .../RepositoryFilterUserMetadataIT.java | 19 +- .../snapshots/SnapshotShardsServiceIT.java | 2 +- .../snapshots/SnapshotStatusApisIT.java | 2 +- .../ConcurrentDocumentOperationIT.java | 4 +- .../AbstractSnapshotIntegTestCase.java | 2 +- .../test/OpenSearchIntegTestCase.java | 2 +- 50 files changed, 693 insertions(+), 617 deletions(-) rename server/src/internalClusterTest/java/org/opensearch/action/support/master/{IndexingMasterFailoverIT.java => IndexingClusterManagerFailoverIT.java} (83%) rename server/src/internalClusterTest/java/org/opensearch/cluster/{MinimumMasterNodesIT.java => MinimumClusterManagerNodesIT.java} (87%) rename server/src/internalClusterTest/java/org/opensearch/cluster/{NoMasterNodeIT.java => NoClusterManagerNodeIT.java} (81%) rename server/src/internalClusterTest/java/org/opensearch/cluster/{SpecificMasterNodesIT.java => SpecificClusterManagerNodesIT.java} (79%) rename server/src/internalClusterTest/java/org/opensearch/discovery/{MasterDisruptionIT.java => ClusterManagerDisruptionIT.java} (85%) rename server/src/internalClusterTest/java/org/opensearch/discovery/{StableMasterDisruptionIT.java => StableClusterManagerDisruptionIT.java} (55%) rename server/src/internalClusterTest/java/org/opensearch/indices/mapping/{DedicatedMasterGetFieldMappingIT.java => DedicatedClusterManagerGetFieldMappingIT.java} (94%) diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java index 8484cce1045d2..7c10d52c7a111 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/tasks/PendingTasksBlocksIT.java @@ -91,7 +91,7 @@ public void testPendingTasksWithClusterNotRecoveredBlock() throws Exception { } // restart the cluster but prevent it from performing state recovery - final int nodeCount = client().admin().cluster().prepareNodesInfo("data:true", "master:true").get().getNodes().size(); + final int nodeCount = client().admin().cluster().prepareNodesInfo("data:true", "cluster_manager:true").get().getNodes().size(); internalCluster().fullRestart(new InternalTestCluster.RestartCallback() { @Override public Settings onNodeStopped(String nodeName) { @@ -107,7 +107,7 @@ public boolean validateClusterForming() { assertNotNull(client().admin().cluster().preparePendingClusterTasks().get().getPendingTasks()); // starting one more node allows the cluster to recover - internalCluster().startDataOnlyNode(); // cannot update minimum_master_nodes before the cluster has formed + internalCluster().startDataOnlyNode(); // cannot update minimum_cluster_manager_nodes before the cluster has formed ensureGreen(); } diff --git a/server/src/internalClusterTest/java/org/opensearch/action/support/master/IndexingMasterFailoverIT.java b/server/src/internalClusterTest/java/org/opensearch/action/support/master/IndexingClusterManagerFailoverIT.java similarity index 83% rename from server/src/internalClusterTest/java/org/opensearch/action/support/master/IndexingMasterFailoverIT.java rename to server/src/internalClusterTest/java/org/opensearch/action/support/master/IndexingClusterManagerFailoverIT.java index b4e4c058be198..14e0dd94ea640 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/support/master/IndexingMasterFailoverIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/support/master/IndexingClusterManagerFailoverIT.java @@ -48,7 +48,7 @@ import static org.hamcrest.Matchers.equalTo; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) -public class IndexingMasterFailoverIT extends OpenSearchIntegTestCase { +public class IndexingClusterManagerFailoverIT extends OpenSearchIntegTestCase { @Override protected Collection> nodePlugins() { @@ -58,12 +58,12 @@ protected Collection> nodePlugins() { } /** - * Indexing operations which entail mapping changes require a blocking request to the master node to update the mapping. - * If the master node is being disrupted or if it cannot commit cluster state changes, it needs to retry within timeout limits. - * This retry logic is implemented in TransportMasterNodeAction and tested by the following master failover scenario. + * Indexing operations which entail mapping changes require a blocking request to the cluster-manager node to update the mapping. + * If the cluster-manager node is being disrupted or if it cannot commit cluster state changes, it needs to retry within timeout limits. + * This retry logic is implemented in TransportMasterNodeAction and tested by the following cluster-manager failover scenario. */ - public void testMasterFailoverDuringIndexingWithMappingChanges() throws Throwable { - logger.info("--> start 4 nodes, 3 master, 1 data"); + public void testClusterManagerFailoverDuringIndexingWithMappingChanges() throws Throwable { + logger.info("--> start 4 nodes, 3 cluster-manager, 1 data"); internalCluster().setBootstrapClusterManagerNodeIndex(2); @@ -74,7 +74,7 @@ public void testMasterFailoverDuringIndexingWithMappingChanges() throws Throwabl logger.info("--> wait for all nodes to join the cluster"); ensureStableCluster(4); - // We index data with mapping changes into cluster and have master failover at same time + // We index data with mapping changes into cluster and have cluster-manager failover at same time client().admin() .indices() .prepareCreate("myindex") @@ -108,14 +108,14 @@ public void run() { barrier.await(); - // interrupt communication between master and other nodes in cluster - NetworkDisruption partition = isolateMasterDisruption(NetworkDisruption.DISCONNECT); + // interrupt communication between cluster-manager and other nodes in cluster + NetworkDisruption partition = isolateClusterManagerDisruption(NetworkDisruption.DISCONNECT); internalCluster().setDisruptionScheme(partition); logger.info("--> disrupting network"); partition.startDisrupting(); - logger.info("--> waiting for new master to be elected"); + logger.info("--> waiting for new cluster-manager to be elected"); ensureStableCluster(3, dataNode); partition.stopDisrupting(); diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterHealthIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterHealthIT.java index 393ab8e5a7544..5381dcfe4bdd2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterHealthIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterHealthIT.java @@ -60,7 +60,7 @@ public class ClusterHealthIT extends OpenSearchIntegTestCase { public void testSimpleLocalHealth() { createIndex("test"); - ensureGreen(); // master should think it's green now. + ensureGreen(); // cluster-manager should think it's green now. for (final String node : internalCluster().getNodeNames()) { // a very high time out, which should never fire due to the local flag @@ -336,7 +336,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS assertFalse(client().admin().cluster().prepareHealth("index").setWaitForGreenStatus().get().isTimedOut()); // at this point the original health response should not have returned: there was never a point where the index was green AND - // the master had processed all pending tasks above LANGUID priority. + // the cluster-manager had processed all pending tasks above LANGUID priority. assertFalse(healthResponseFuture.isDone()); keepSubmittingTasks.set(false); assertFalse(healthResponseFuture.actionGet(TimeValue.timeValueSeconds(30)).isTimedOut()); @@ -346,14 +346,14 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS } } - public void testHealthOnMasterFailover() throws Exception { + public void testHealthOnClusterManagerFailover() throws Exception { final String node = internalCluster().startDataOnlyNode(); final boolean withIndex = randomBoolean(); if (withIndex) { - // Create index with many shards to provoke the health request to wait (for green) while master is being shut down. - // Notice that this is set to 0 after the test completed starting a number of health requests and master restarts. + // Create index with many shards to provoke the health request to wait (for green) while cluster-manager is being shut down. + // Notice that this is set to 0 after the test completed starting a number of health requests and cluster-manager restarts. // This ensures that the cluster is yellow when the health request is made, making the health request wait on the observer, - // triggering a call to observer.onClusterServiceClose when master is shutdown. + // triggering a call to observer.onClusterServiceClose when cluster-manager is shutdown. createIndex( "test", Settings.builder() @@ -364,8 +364,8 @@ public void testHealthOnMasterFailover() throws Exception { ); } final List> responseFutures = new ArrayList<>(); - // Run a few health requests concurrent to master fail-overs against a data-node to make sure master failover is handled - // without exceptions + // Run a few health requests concurrent to cluster-manager fail-overs against a data-node + // to make sure cluster-manager failover is handled without exceptions final int iterations = withIndex ? 10 : 20; for (int i = 0; i < iterations; ++i) { responseFutures.add( @@ -394,7 +394,7 @@ public void testHealthOnMasterFailover() throws Exception { } } - public void testWaitForEventsTimesOutIfMasterBusy() { + public void testWaitForEventsTimesOutIfClusterManagerBusy() { final AtomicBoolean keepSubmittingTasks = new AtomicBoolean(true); final ClusterService clusterService = internalCluster().getInstance(ClusterService.class, internalCluster().getMasterName()); final PlainActionFuture completionFuture = new PlainActionFuture<>(); diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterInfoServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterInfoServiceIT.java index e8ea0bb933a3e..dae9505fe67bf 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterInfoServiceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterInfoServiceIT.java @@ -166,7 +166,7 @@ public void testClusterInfoServiceCollectsInformation() { } ensureGreen(indexName); InternalTestCluster internalTestCluster = internalCluster(); - // Get the cluster info service on the master node + // Get the cluster info service on the cluster-manager node final InternalClusterInfoService infoService = (InternalClusterInfoService) internalTestCluster.getInstance( ClusterInfoService.class, internalTestCluster.getMasterName() diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterStateDiffIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterStateDiffIT.java index 7654a937c8dc0..8b510c6a13829 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterStateDiffIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/ClusterStateDiffIT.java @@ -93,7 +93,7 @@ public class ClusterStateDiffIT extends OpenSearchIntegTestCase { public void testClusterStateDiffSerialization() throws Exception { NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(ClusterModule.getNamedWriteables()); - DiscoveryNode clusterManagerNode = randomNode("master"); + DiscoveryNode clusterManagerNode = randomNode("cluster-manager"); DiscoveryNode otherNode = randomNode("other"); DiscoveryNodes discoveryNodes = DiscoveryNodes.builder() .add(clusterManagerNode) diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumMasterNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumClusterManagerNodesIT.java similarity index 87% rename from server/src/internalClusterTest/java/org/opensearch/cluster/MinimumMasterNodesIT.java rename to server/src/internalClusterTest/java/org/opensearch/cluster/MinimumClusterManagerNodesIT.java index 4981da39197c3..8f512ade7465f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumMasterNodesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/MinimumClusterManagerNodesIT.java @@ -74,7 +74,7 @@ import static org.hamcrest.Matchers.nullValue; @ClusterScope(scope = Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) -public class MinimumMasterNodesIT extends OpenSearchIntegTestCase { +public class MinimumClusterManagerNodesIT extends OpenSearchIntegTestCase { @Override protected Collection> nodePlugins() { @@ -83,7 +83,7 @@ protected Collection> nodePlugins() { return classes; } - public void testTwoNodesNoMasterBlock() throws Exception { + public void testTwoNodesNoClusterManagerBlock() throws Exception { internalCluster().setBootstrapClusterManagerNodeIndex(1); Settings settings = Settings.builder().put("discovery.initial_state_timeout", "500ms").build(); @@ -151,13 +151,13 @@ public void testTwoNodesNoMasterBlock() throws Exception { ); } - String masterNode = internalCluster().getMasterName(); - String otherNode = node1Name.equals(masterNode) ? node2Name : node1Name; - logger.info("--> add voting config exclusion for non-master node, to be sure it's not elected"); + String clusterManagerNode = internalCluster().getMasterName(); + String otherNode = node1Name.equals(clusterManagerNode) ? node2Name : node1Name; + logger.info("--> add voting config exclusion for non-cluster-manager node, to be sure it's not elected"); client().execute(AddVotingConfigExclusionsAction.INSTANCE, new AddVotingConfigExclusionsRequest(otherNode)).get(); - logger.info("--> stop master node, no cluster-manager block should appear"); - Settings masterDataPathSettings = internalCluster().dataPathSettings(masterNode); - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNode)); + logger.info("--> stop cluster-manager node, no cluster-manager block should appear"); + Settings clusterManagerDataPathSettings = internalCluster().dataPathSettings(clusterManagerNode); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(clusterManagerNode)); assertBusy(() -> { ClusterState clusterState = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); @@ -170,8 +170,8 @@ public void testTwoNodesNoMasterBlock() throws Exception { assertThat(state.nodes().getSize(), equalTo(2)); assertThat(state.nodes().getMasterNode(), equalTo(null)); - logger.info("--> starting the previous master node again..."); - node2Name = internalCluster().startNode(Settings.builder().put(settings).put(masterDataPathSettings).build()); + logger.info("--> starting the previous cluster-manager node again..."); + node2Name = internalCluster().startNode(Settings.builder().put(settings).put(clusterManagerDataPathSettings).build()); clusterHealthResponse = client().admin() .cluster() @@ -204,11 +204,11 @@ public void testTwoNodesNoMasterBlock() throws Exception { clearRequest.setWaitForRemoval(false); client().execute(ClearVotingConfigExclusionsAction.INSTANCE, clearRequest).get(); - masterNode = internalCluster().getMasterName(); - otherNode = node1Name.equals(masterNode) ? node2Name : node1Name; - logger.info("--> add voting config exclusion for master node, to be sure it's not elected"); - client().execute(AddVotingConfigExclusionsAction.INSTANCE, new AddVotingConfigExclusionsRequest(masterNode)).get(); - logger.info("--> stop non-master node, no cluster-manager block should appear"); + clusterManagerNode = internalCluster().getMasterName(); + otherNode = node1Name.equals(clusterManagerNode) ? node2Name : node1Name; + logger.info("--> add voting config exclusion for cluster-manager node, to be sure it's not elected"); + client().execute(AddVotingConfigExclusionsAction.INSTANCE, new AddVotingConfigExclusionsRequest(clusterManagerNode)).get(); + logger.info("--> stop non-cluster-manager node, no cluster-manager block should appear"); Settings otherNodeDataPathSettings = internalCluster().dataPathSettings(otherNode); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(otherNode)); @@ -217,7 +217,7 @@ public void testTwoNodesNoMasterBlock() throws Exception { assertThat(state1.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID), equalTo(true)); }); - logger.info("--> starting the previous master node again..."); + logger.info("--> starting the previous cluster-manager node again..."); internalCluster().startNode(Settings.builder().put(settings).put(otherNodeDataPathSettings).build()); ensureGreen(); @@ -249,7 +249,7 @@ public void testTwoNodesNoMasterBlock() throws Exception { } } - public void testThreeNodesNoMasterBlock() throws Exception { + public void testThreeNodesNoClusterManagerBlock() throws Exception { internalCluster().setBootstrapClusterManagerNodeIndex(2); Settings settings = Settings.builder().put("discovery.initial_state_timeout", "500ms").build(); @@ -312,8 +312,8 @@ public void testThreeNodesNoMasterBlock() throws Exception { List nonClusterManagerNodes = new ArrayList<>( Sets.difference(Sets.newHashSet(internalCluster().getNodeNames()), Collections.singleton(internalCluster().getMasterName())) ); - Settings nonMasterDataPathSettings1 = internalCluster().dataPathSettings(nonClusterManagerNodes.get(0)); - Settings nonMasterDataPathSettings2 = internalCluster().dataPathSettings(nonClusterManagerNodes.get(1)); + Settings nonClusterManagerDataPathSettings1 = internalCluster().dataPathSettings(nonClusterManagerNodes.get(0)); + Settings nonClusterManagerDataPathSettings2 = internalCluster().dataPathSettings(nonClusterManagerNodes.get(1)); internalCluster().stopRandomNonMasterNode(); internalCluster().stopRandomNonMasterNode(); @@ -325,7 +325,7 @@ public void testThreeNodesNoMasterBlock() throws Exception { }); logger.info("--> start back the 2 nodes "); - internalCluster().startNodes(nonMasterDataPathSettings1, nonMasterDataPathSettings2); + internalCluster().startNodes(nonClusterManagerDataPathSettings1, nonClusterManagerDataPathSettings2); internalCluster().validateClusterFormed(); ensureGreen(); @@ -347,17 +347,17 @@ public void testCannotCommitStateThreeNodes() throws Exception { internalCluster().startNodes(3, settings); ensureStableCluster(3); - final String master = internalCluster().getMasterName(); + final String clusterManager = internalCluster().getMasterName(); Set otherNodes = new HashSet<>(Arrays.asList(internalCluster().getNodeNames())); - otherNodes.remove(master); - NetworkDisruption partition = isolateMasterDisruption(NetworkDisruption.DISCONNECT); + otherNodes.remove(clusterManager); + NetworkDisruption partition = isolateClusterManagerDisruption(NetworkDisruption.DISCONNECT); internalCluster().setDisruptionScheme(partition); final CountDownLatch latch = new CountDownLatch(1); final AtomicReference failure = new AtomicReference<>(); logger.debug("--> submitting for cluster state to be rejected"); - final ClusterService masterClusterService = internalCluster().clusterService(master); - masterClusterService.submitStateUpdateTask("test", new ClusterStateUpdateTask() { + final ClusterService clusterManagerClusterService = internalCluster().clusterService(clusterManager); + clusterManagerClusterService.submitStateUpdateTask("test", new ClusterStateUpdateTask() { @Override public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { latch.countDown(); @@ -387,11 +387,11 @@ public void onFailure(String source, Exception e) { assertThat(failure.get(), instanceOf(FailedToCommitClusterStateException.class)); logger.debug("--> check that there is no cluster-manager in minor partition"); - assertBusy(() -> assertThat(masterClusterService.state().nodes().getMasterNode(), nullValue())); + assertBusy(() -> assertThat(clusterManagerClusterService.state().nodes().getMasterNode(), nullValue())); - // let major partition to elect new master, to ensure that old master is not elected once partition is restored, - // otherwise persistent setting (which is a part of accepted state on old master) will be propagated to other nodes - logger.debug("--> wait for master to be elected in major partition"); + // let major partition to elect new cluster-manager, to ensure that old cluster-manager is not elected once partition is restored, + // otherwise persistent setting (which is a part of accepted state on old cluster-manager) will be propagated to other nodes + logger.debug("--> wait for cluster-manager to be elected in major partition"); assertBusy(() -> { DiscoveryNode clusterManagerNode = internalCluster().client(randomFrom(otherNodes)) .admin() @@ -403,7 +403,7 @@ public void onFailure(String source, Exception e) { .nodes() .getMasterNode(); assertThat(clusterManagerNode, notNullValue()); - assertThat(clusterManagerNode.getName(), not(equalTo(master))); + assertThat(clusterManagerNode.getName(), not(equalTo(clusterManager))); }); partition.stopDisrupting(); @@ -414,7 +414,7 @@ public void onFailure(String source, Exception e) { for (String node : internalCluster().getNodeNames()) { Settings nodeSetting = internalCluster().clusterService(node).state().metadata().settings(); assertThat( - node + " processed the cluster state despite of a min master node violation", + node + " processed the cluster state despite of a min cluster-manager node violation", nodeSetting.get("_SHOULD_NOT_BE_THERE_"), nullValue() ); diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/NoMasterNodeIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/NoClusterManagerNodeIT.java similarity index 81% rename from server/src/internalClusterTest/java/org/opensearch/cluster/NoMasterNodeIT.java rename to server/src/internalClusterTest/java/org/opensearch/cluster/NoClusterManagerNodeIT.java index 5226eed2b6610..26852e59d1c86 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/NoMasterNodeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/NoClusterManagerNodeIT.java @@ -75,7 +75,7 @@ import static org.hamcrest.Matchers.greaterThan; @ClusterScope(scope = Scope.TEST, numDataNodes = 0) -public class NoMasterNodeIT extends OpenSearchIntegTestCase { +public class NoClusterManagerNodeIT extends OpenSearchIntegTestCase { @Override protected int numberOfReplicas() { @@ -87,7 +87,7 @@ protected Collection> nodePlugins() { return Collections.singletonList(MockTransportService.TestPlugin.class); } - public void testNoMasterActions() throws Exception { + public void testNoClusterManagerActions() throws Exception { Settings settings = Settings.builder() .put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), true) .put(NoMasterBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "all") @@ -107,57 +107,63 @@ public void testNoMasterActions() throws Exception { internalCluster().setDisruptionScheme(disruptionScheme); disruptionScheme.startDisrupting(); - final Client clientToMasterlessNode = client(); + final Client clientToClusterManagerlessNode = client(); assertBusy(() -> { - ClusterState state = clientToMasterlessNode.admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); + ClusterState state = clientToClusterManagerlessNode.admin() + .cluster() + .prepareState() + .setLocal(true) + .execute() + .actionGet() + .getState(); assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); }); assertRequestBuilderThrows( - clientToMasterlessNode.prepareGet("test", "1"), + clientToClusterManagerlessNode.prepareGet("test", "1"), ClusterBlockException.class, RestStatus.SERVICE_UNAVAILABLE ); assertRequestBuilderThrows( - clientToMasterlessNode.prepareGet("no_index", "1"), + clientToClusterManagerlessNode.prepareGet("no_index", "1"), ClusterBlockException.class, RestStatus.SERVICE_UNAVAILABLE ); assertRequestBuilderThrows( - clientToMasterlessNode.prepareMultiGet().add("test", "1"), + clientToClusterManagerlessNode.prepareMultiGet().add("test", "1"), ClusterBlockException.class, RestStatus.SERVICE_UNAVAILABLE ); assertRequestBuilderThrows( - clientToMasterlessNode.prepareMultiGet().add("no_index", "1"), + clientToClusterManagerlessNode.prepareMultiGet().add("no_index", "1"), ClusterBlockException.class, RestStatus.SERVICE_UNAVAILABLE ); assertRequestBuilderThrows( - clientToMasterlessNode.admin().indices().prepareAnalyze("test", "this is a test"), + clientToClusterManagerlessNode.admin().indices().prepareAnalyze("test", "this is a test"), ClusterBlockException.class, RestStatus.SERVICE_UNAVAILABLE ); assertRequestBuilderThrows( - clientToMasterlessNode.admin().indices().prepareAnalyze("no_index", "this is a test"), + clientToClusterManagerlessNode.admin().indices().prepareAnalyze("no_index", "this is a test"), ClusterBlockException.class, RestStatus.SERVICE_UNAVAILABLE ); assertRequestBuilderThrows( - clientToMasterlessNode.prepareSearch("test").setSize(0), + clientToClusterManagerlessNode.prepareSearch("test").setSize(0), ClusterBlockException.class, RestStatus.SERVICE_UNAVAILABLE ); assertRequestBuilderThrows( - clientToMasterlessNode.prepareSearch("no_index").setSize(0), + clientToClusterManagerlessNode.prepareSearch("no_index").setSize(0), ClusterBlockException.class, RestStatus.SERVICE_UNAVAILABLE ); @@ -165,7 +171,7 @@ public void testNoMasterActions() throws Exception { checkUpdateAction( false, timeout, - clientToMasterlessNode.prepareUpdate("test", "1") + clientToClusterManagerlessNode.prepareUpdate("test", "1") .setScript(new Script(ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, "test script", Collections.emptyMap())) .setTimeout(timeout) ); @@ -173,41 +179,49 @@ public void testNoMasterActions() throws Exception { checkUpdateAction( true, timeout, - clientToMasterlessNode.prepareUpdate("no_index", "1") + clientToClusterManagerlessNode.prepareUpdate("no_index", "1") .setScript(new Script(ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, "test script", Collections.emptyMap())) .setTimeout(timeout) ); checkWriteAction( - clientToMasterlessNode.prepareIndex("test") + clientToClusterManagerlessNode.prepareIndex("test") .setId("1") .setSource(XContentFactory.jsonBuilder().startObject().endObject()) .setTimeout(timeout) ); checkWriteAction( - clientToMasterlessNode.prepareIndex("no_index") + clientToClusterManagerlessNode.prepareIndex("no_index") .setId("1") .setSource(XContentFactory.jsonBuilder().startObject().endObject()) .setTimeout(timeout) ); - BulkRequestBuilder bulkRequestBuilder = clientToMasterlessNode.prepareBulk(); + BulkRequestBuilder bulkRequestBuilder = clientToClusterManagerlessNode.prepareBulk(); bulkRequestBuilder.add( - clientToMasterlessNode.prepareIndex("test").setId("1").setSource(XContentFactory.jsonBuilder().startObject().endObject()) + clientToClusterManagerlessNode.prepareIndex("test") + .setId("1") + .setSource(XContentFactory.jsonBuilder().startObject().endObject()) ); bulkRequestBuilder.add( - clientToMasterlessNode.prepareIndex("test").setId("2").setSource(XContentFactory.jsonBuilder().startObject().endObject()) + clientToClusterManagerlessNode.prepareIndex("test") + .setId("2") + .setSource(XContentFactory.jsonBuilder().startObject().endObject()) ); bulkRequestBuilder.setTimeout(timeout); checkWriteAction(bulkRequestBuilder); - bulkRequestBuilder = clientToMasterlessNode.prepareBulk(); + bulkRequestBuilder = clientToClusterManagerlessNode.prepareBulk(); bulkRequestBuilder.add( - clientToMasterlessNode.prepareIndex("no_index").setId("1").setSource(XContentFactory.jsonBuilder().startObject().endObject()) + clientToClusterManagerlessNode.prepareIndex("no_index") + .setId("1") + .setSource(XContentFactory.jsonBuilder().startObject().endObject()) ); bulkRequestBuilder.add( - clientToMasterlessNode.prepareIndex("no_index").setId("2").setSource(XContentFactory.jsonBuilder().startObject().endObject()) + clientToClusterManagerlessNode.prepareIndex("no_index") + .setId("2") + .setSource(XContentFactory.jsonBuilder().startObject().endObject()) ); bulkRequestBuilder.setTimeout(timeout); checkWriteAction(bulkRequestBuilder); @@ -216,7 +230,7 @@ public void testNoMasterActions() throws Exception { } void checkUpdateAction(boolean autoCreateIndex, TimeValue timeout, ActionRequestBuilder builder) { - // we clean the metadata when loosing a master, therefore all operations on indices will auto create it, if allowed + // we clean the metadata when loosing a cluster-manager, therefore all operations on indices will auto create it, if allowed try { builder.get(); fail("expected ClusterBlockException or MasterNotDiscoveredException"); @@ -239,7 +253,7 @@ void checkWriteAction(ActionRequestBuilder builder) { } } - public void testNoMasterActionsWriteMasterBlock() throws Exception { + public void testNoClusterManagerActionsWriteClusterManagerBlock() throws Exception { Settings settings = Settings.builder() .put(AutoCreateIndex.AUTO_CREATE_INDEX_SETTING.getKey(), false) .put(NoMasterBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "write") @@ -270,31 +284,34 @@ public void testNoMasterActionsWriteMasterBlock() throws Exception { internalCluster().setDisruptionScheme(disruptionScheme); disruptionScheme.startDisrupting(); - final Client clientToMasterlessNode = client(); + final Client clientToClusterManagerlessNode = client(); assertBusy(() -> { - ClusterState state = clientToMasterlessNode.admin().cluster().prepareState().setLocal(true).get().getState(); + ClusterState state = clientToClusterManagerlessNode.admin().cluster().prepareState().setLocal(true).get().getState(); assertTrue(state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)); }); - GetResponse getResponse = clientToMasterlessNode.prepareGet("test1", "1").get(); + GetResponse getResponse = clientToClusterManagerlessNode.prepareGet("test1", "1").get(); assertExists(getResponse); - SearchResponse countResponse = clientToMasterlessNode.prepareSearch("test1").setAllowPartialSearchResults(true).setSize(0).get(); + SearchResponse countResponse = clientToClusterManagerlessNode.prepareSearch("test1") + .setAllowPartialSearchResults(true) + .setSize(0) + .get(); assertHitCount(countResponse, 1L); logger.info("--> here 3"); - SearchResponse searchResponse = clientToMasterlessNode.prepareSearch("test1").setAllowPartialSearchResults(true).get(); + SearchResponse searchResponse = clientToClusterManagerlessNode.prepareSearch("test1").setAllowPartialSearchResults(true).get(); assertHitCount(searchResponse, 1L); - countResponse = clientToMasterlessNode.prepareSearch("test2").setAllowPartialSearchResults(true).setSize(0).get(); + countResponse = clientToClusterManagerlessNode.prepareSearch("test2").setAllowPartialSearchResults(true).setSize(0).get(); assertThat(countResponse.getTotalShards(), equalTo(3)); assertThat(countResponse.getSuccessfulShards(), equalTo(1)); TimeValue timeout = TimeValue.timeValueMillis(200); long now = System.currentTimeMillis(); try { - clientToMasterlessNode.prepareUpdate("test1", "1") + clientToClusterManagerlessNode.prepareUpdate("test1", "1") .setDoc(Requests.INDEX_CONTENT_TYPE, "field", "value2") .setTimeout(timeout) .get(); @@ -308,7 +325,7 @@ public void testNoMasterActionsWriteMasterBlock() throws Exception { } try { - clientToMasterlessNode.prepareIndex("test1") + clientToClusterManagerlessNode.prepareIndex("test1") .setId("1") .setSource(XContentFactory.jsonBuilder().startObject().endObject()) .setTimeout(timeout) @@ -321,7 +338,7 @@ public void testNoMasterActionsWriteMasterBlock() throws Exception { internalCluster().clearDisruptionScheme(true); } - public void testNoMasterActionsMetadataWriteMasterBlock() throws Exception { + public void testNoClusterManagerActionsMetadataWriteClusterManagerBlock() throws Exception { Settings settings = Settings.builder() .put(NoMasterBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "metadata_write") .put(MappingUpdatedAction.INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING.getKey(), "100ms") diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/SimpleClusterStateIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/SimpleClusterStateIT.java index 19f7b0b4c630c..e3adeb1ad8d82 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/SimpleClusterStateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/SimpleClusterStateIT.java @@ -286,7 +286,12 @@ public void testLargeClusterStatePublishing() throws Exception { .get() ); ensureGreen(); // wait for green state, so its both green, and there are no more pending events - MappingMetadata masterMappingMetadata = client().admin().indices().prepareGetMappings("test").get().getMappings().get("test"); + MappingMetadata clusterManagerMappingMetadata = client().admin() + .indices() + .prepareGetMappings("test") + .get() + .getMappings() + .get("test"); for (Client client : clients()) { MappingMetadata mappingMetadata = client.admin() .indices() @@ -295,8 +300,8 @@ public void testLargeClusterStatePublishing() throws Exception { .get() .getMappings() .get("test"); - assertThat(mappingMetadata.source().string(), equalTo(masterMappingMetadata.source().string())); - assertThat(mappingMetadata, equalTo(masterMappingMetadata)); + assertThat(mappingMetadata.source().string(), equalTo(clusterManagerMappingMetadata.source().string())); + assertThat(mappingMetadata, equalTo(clusterManagerMappingMetadata)); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/SpecificMasterNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/SpecificClusterManagerNodesIT.java similarity index 79% rename from server/src/internalClusterTest/java/org/opensearch/cluster/SpecificMasterNodesIT.java rename to server/src/internalClusterTest/java/org/opensearch/cluster/SpecificClusterManagerNodesIT.java index e72ce5d85303d..a58a195939db0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/SpecificMasterNodesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/SpecificClusterManagerNodesIT.java @@ -53,11 +53,11 @@ import static org.hamcrest.Matchers.nullValue; @ClusterScope(scope = Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) -public class SpecificMasterNodesIT extends OpenSearchIntegTestCase { +public class SpecificClusterManagerNodesIT extends OpenSearchIntegTestCase { - public void testSimpleOnlyMasterNodeElection() throws IOException { + public void testSimpleOnlyClusterManagerNodeElection() throws IOException { internalCluster().setBootstrapClusterManagerNodeIndex(0); - logger.info("--> start data node / non master node"); + logger.info("--> start data node / non cluster-manager node"); internalCluster().startNode(Settings.builder().put(dataOnlyNode()).put("discovery.initial_state_timeout", "1s")); try { assertThat( @@ -72,12 +72,12 @@ public void testSimpleOnlyMasterNodeElection() throws IOException { .getMasterNodeId(), nullValue() ); - fail("should not be able to find master"); + fail("should not be able to find cluster-manager"); } catch (MasterNotDiscoveredException e) { // all is well, no cluster-manager elected } - logger.info("--> start master node"); - final String masterNodeName = internalCluster().startClusterManagerOnlyNode(); + logger.info("--> start cluster-manager node"); + final String clusterManagerNodeName = internalCluster().startClusterManagerOnlyNode(); assertThat( internalCluster().nonMasterClient() .admin() @@ -89,7 +89,7 @@ public void testSimpleOnlyMasterNodeElection() throws IOException { .nodes() .getMasterNode() .getName(), - equalTo(masterNodeName) + equalTo(clusterManagerNodeName) ); assertThat( internalCluster().masterClient() @@ -102,11 +102,11 @@ public void testSimpleOnlyMasterNodeElection() throws IOException { .nodes() .getMasterNode() .getName(), - equalTo(masterNodeName) + equalTo(clusterManagerNodeName) ); - logger.info("--> stop master node"); - Settings masterDataPathSettings = internalCluster().dataPathSettings(internalCluster().getMasterName()); + logger.info("--> stop cluster-manager node"); + Settings clusterManagerDataPathSettings = internalCluster().dataPathSettings(internalCluster().getMasterName()); internalCluster().stopCurrentMasterNode(); try { @@ -122,14 +122,14 @@ public void testSimpleOnlyMasterNodeElection() throws IOException { .getMasterNodeId(), nullValue() ); - fail("should not be able to find master"); + fail("should not be able to find cluster-manager"); } catch (MasterNotDiscoveredException e) { // all is well, no cluster-manager elected } - logger.info("--> start previous master node again"); - final String nextMasterEligibleNodeName = internalCluster().startNode( - Settings.builder().put(nonDataNode(masterNode())).put(masterDataPathSettings) + logger.info("--> start previous cluster-manager node again"); + final String nextClusterManagerEligibleNodeName = internalCluster().startNode( + Settings.builder().put(nonDataNode(masterNode())).put(clusterManagerDataPathSettings) ); assertThat( internalCluster().nonMasterClient() @@ -142,7 +142,7 @@ public void testSimpleOnlyMasterNodeElection() throws IOException { .nodes() .getMasterNode() .getName(), - equalTo(nextMasterEligibleNodeName) + equalTo(nextClusterManagerEligibleNodeName) ); assertThat( internalCluster().masterClient() @@ -155,13 +155,13 @@ public void testSimpleOnlyMasterNodeElection() throws IOException { .nodes() .getMasterNode() .getName(), - equalTo(nextMasterEligibleNodeName) + equalTo(nextClusterManagerEligibleNodeName) ); } - public void testElectOnlyBetweenMasterNodes() throws Exception { + public void testElectOnlyBetweenClusterManagerNodes() throws Exception { internalCluster().setBootstrapClusterManagerNodeIndex(0); - logger.info("--> start data node / non master node"); + logger.info("--> start data node / non cluster-manager node"); internalCluster().startNode(Settings.builder().put(dataOnlyNode()).put("discovery.initial_state_timeout", "1s")); try { assertThat( @@ -176,12 +176,12 @@ public void testElectOnlyBetweenMasterNodes() throws Exception { .getMasterNodeId(), nullValue() ); - fail("should not be able to find master"); + fail("should not be able to find cluster-manager"); } catch (MasterNotDiscoveredException e) { // all is well, no cluster-manager elected } - logger.info("--> start master node (1)"); - final String masterNodeName = internalCluster().startClusterManagerOnlyNode(); + logger.info("--> start cluster-manager node (1)"); + final String clusterManagerNodeName = internalCluster().startClusterManagerOnlyNode(); assertThat( internalCluster().nonMasterClient() .admin() @@ -193,7 +193,7 @@ public void testElectOnlyBetweenMasterNodes() throws Exception { .nodes() .getMasterNode() .getName(), - equalTo(masterNodeName) + equalTo(clusterManagerNodeName) ); assertThat( internalCluster().masterClient() @@ -206,11 +206,11 @@ public void testElectOnlyBetweenMasterNodes() throws Exception { .nodes() .getMasterNode() .getName(), - equalTo(masterNodeName) + equalTo(clusterManagerNodeName) ); - logger.info("--> start master node (2)"); - final String nextMasterEligableNodeName = internalCluster().startClusterManagerOnlyNode(); + logger.info("--> start cluster-manager node (2)"); + final String nextClusterManagerEligableNodeName = internalCluster().startClusterManagerOnlyNode(); assertThat( internalCluster().nonMasterClient() .admin() @@ -222,7 +222,7 @@ public void testElectOnlyBetweenMasterNodes() throws Exception { .nodes() .getMasterNode() .getName(), - equalTo(masterNodeName) + equalTo(clusterManagerNodeName) ); assertThat( internalCluster().nonMasterClient() @@ -235,7 +235,7 @@ public void testElectOnlyBetweenMasterNodes() throws Exception { .nodes() .getMasterNode() .getName(), - equalTo(masterNodeName) + equalTo(clusterManagerNodeName) ); assertThat( internalCluster().masterClient() @@ -248,12 +248,12 @@ public void testElectOnlyBetweenMasterNodes() throws Exception { .nodes() .getMasterNode() .getName(), - equalTo(masterNodeName) + equalTo(clusterManagerNodeName) ); - logger.info("--> closing master node (1)"); - client().execute(AddVotingConfigExclusionsAction.INSTANCE, new AddVotingConfigExclusionsRequest(masterNodeName)).get(); - // removing the master from the voting configuration immediately triggers the master to step down + logger.info("--> closing cluster-manager node (1)"); + client().execute(AddVotingConfigExclusionsAction.INSTANCE, new AddVotingConfigExclusionsRequest(clusterManagerNodeName)).get(); + // removing the cluster-manager from the voting configuration immediately triggers the cluster-manager to step down assertBusy(() -> { assertThat( internalCluster().nonMasterClient() @@ -266,7 +266,7 @@ public void testElectOnlyBetweenMasterNodes() throws Exception { .nodes() .getMasterNode() .getName(), - equalTo(nextMasterEligableNodeName) + equalTo(nextClusterManagerEligableNodeName) ); assertThat( internalCluster().masterClient() @@ -279,10 +279,10 @@ public void testElectOnlyBetweenMasterNodes() throws Exception { .nodes() .getMasterNode() .getName(), - equalTo(nextMasterEligableNodeName) + equalTo(nextClusterManagerEligableNodeName) ); }); - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNodeName)); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(clusterManagerNodeName)); assertThat( internalCluster().nonMasterClient() .admin() @@ -294,7 +294,7 @@ public void testElectOnlyBetweenMasterNodes() throws Exception { .nodes() .getMasterNode() .getName(), - equalTo(nextMasterEligableNodeName) + equalTo(nextClusterManagerEligableNodeName) ); assertThat( internalCluster().masterClient() @@ -307,16 +307,16 @@ public void testElectOnlyBetweenMasterNodes() throws Exception { .nodes() .getMasterNode() .getName(), - equalTo(nextMasterEligableNodeName) + equalTo(nextClusterManagerEligableNodeName) ); } public void testAliasFilterValidation() { internalCluster().setBootstrapClusterManagerNodeIndex(0); - logger.info("--> start master node / non data"); + logger.info("--> start cluster-manager node / non data"); internalCluster().startClusterManagerOnlyNode(); - logger.info("--> start data node / non master node"); + logger.info("--> start data node / non cluster-manager node"); internalCluster().startDataOnlyNode(); assertAcked( diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/action/shard/ShardStateActionIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/action/shard/ShardStateActionIT.java index dc1814c132d96..b7e895f38ba19 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/action/shard/ShardStateActionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/action/shard/ShardStateActionIT.java @@ -119,10 +119,13 @@ public void testFollowupRerouteCanBeSetToHigherPriority() { .setPersistentSettings(Settings.builder().put(ShardStateAction.FOLLOW_UP_REROUTE_PRIORITY_SETTING.getKey(), "urgent")) ); - // ensure that the master always has a HIGH priority pending task - final AtomicBoolean stopSpammingMaster = new AtomicBoolean(); - final ClusterService masterClusterService = internalCluster().getInstance(ClusterService.class, internalCluster().getMasterName()); - masterClusterService.submitStateUpdateTask("spam", new ClusterStateUpdateTask(Priority.HIGH) { + // ensure that the cluster-manager always has a HIGH priority pending task + final AtomicBoolean stopSpammingClusterManager = new AtomicBoolean(); + final ClusterService clusterManagerClusterService = internalCluster().getInstance( + ClusterService.class, + internalCluster().getMasterName() + ); + clusterManagerClusterService.submitStateUpdateTask("spam", new ClusterStateUpdateTask(Priority.HIGH) { @Override public ClusterState execute(ClusterState currentState) { return currentState; @@ -135,18 +138,18 @@ public void onFailure(String source, Exception e) { @Override public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - if (stopSpammingMaster.get() == false) { - masterClusterService.submitStateUpdateTask("spam", this); + if (stopSpammingClusterManager.get() == false) { + clusterManagerClusterService.submitStateUpdateTask("spam", this); } } }); - // even with the master under such pressure, all shards of the index can be assigned; in particular, after the primaries have - // started there's a follow-up reroute at a higher priority than the spam + // even with the cluster-manager under such pressure, all shards of the index can be assigned; + // in particular, after the primaries have started there's a follow-up reroute at a higher priority than the spam createIndex("test"); assertFalse(client().admin().cluster().prepareHealth().setWaitForGreenStatus().get().isTimedOut()); - stopSpammingMaster.set(true); + stopSpammingClusterManager.set(true); assertFalse(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get().isTimedOut()); assertAcked( diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/RareClusterStateIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/RareClusterStateIT.java index 2e0ff9aeb5956..61b186c951ce8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/RareClusterStateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/RareClusterStateIT.java @@ -105,9 +105,9 @@ public void testAssignmentWithJustAddedNodes() { // close to have some unassigned started shards shards.. client().admin().indices().prepareClose(index).get(); - final String masterName = internalCluster().getMasterName(); - final ClusterService clusterService = internalCluster().clusterService(masterName); - final AllocationService allocationService = internalCluster().getInstance(AllocationService.class, masterName); + final String clusterManagerName = internalCluster().getMasterName(); + final ClusterService clusterService = internalCluster().clusterService(clusterManagerName); + final AllocationService allocationService = internalCluster().getInstance(AllocationService.class, clusterManagerName); clusterService.submitStateUpdateTask("test-inject-node-and-reroute", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { @@ -159,16 +159,16 @@ private ActionFuture { - assertFalse(masterCoordinator.publicationInProgress()); - final long applierVersion = masterCoordinator.getApplierState().version(); + assertFalse(clusterManagerCoordinator.publicationInProgress()); + final long applierVersion = clusterManagerCoordinator.getApplierState().version(); for (Discovery instance : internalCluster().getInstances(Discovery.class)) { assertEquals(((Coordinator) instance).getApplierState().version(), applierVersion); } }); ActionFuture future = req.execute(); - assertBusy(() -> assertTrue(masterCoordinator.cancelCommittedPublication())); + assertBusy(() -> assertTrue(clusterManagerCoordinator.cancelCommittedPublication())); return future; } @@ -179,7 +179,7 @@ public void testDeleteCreateInOneBulk() throws Exception { prepareCreate("test").setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)).get(); ensureGreen("test"); - // block none master node. + // block none cluster-manager node. BlockClusterStateProcessing disruption = new BlockClusterStateProcessing(dataNode, random()); internalCluster().setDisruptionScheme(disruption); logger.info("--> indexing a doc"); @@ -202,9 +202,9 @@ public void testDeleteCreateInOneBulk() throws Exception { ensureGreen(TimeValue.timeValueMinutes(30), "test"); // due to publish_timeout of 0, wait for data node to have cluster state fully applied assertBusy(() -> { - long masterClusterStateVersion = internalCluster().clusterService(internalCluster().getMasterName()).state().version(); + long clusterManagerClusterStateVersion = internalCluster().clusterService(internalCluster().getMasterName()).state().version(); long dataClusterStateVersion = internalCluster().clusterService(dataNode).state().version(); - assertThat(masterClusterStateVersion, equalTo(dataClusterStateVersion)); + assertThat(clusterManagerClusterStateVersion, equalTo(dataClusterStateVersion)); }); assertHitCount(client().prepareSearch("test").get(), 0); } @@ -212,7 +212,7 @@ public void testDeleteCreateInOneBulk() throws Exception { public void testDelayedMappingPropagationOnPrimary() throws Exception { // Here we want to test that things go well if there is a first request // that adds mappings but before mappings are propagated to all nodes - // another index request introduces the same mapping. The master node + // another index request introduces the same mapping. The cluster-manager node // will reply immediately since it did not change the cluster state // but the change might not be on the node that performed the indexing // operation yet @@ -220,36 +220,36 @@ public void testDelayedMappingPropagationOnPrimary() throws Exception { final List nodeNames = internalCluster().startNodes(2); assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut()); - final String master = internalCluster().getMasterName(); - assertThat(nodeNames, hasItem(master)); + final String clusterManager = internalCluster().getMasterName(); + assertThat(nodeNames, hasItem(clusterManager)); String otherNode = null; for (String node : nodeNames) { - if (node.equals(master) == false) { + if (node.equals(clusterManager) == false) { otherNode = node; break; } } assertNotNull(otherNode); - // Don't allocate the shard on the master node + // Don't allocate the shard on the cluster-manager node assertAcked( prepareCreate("index").setSettings( Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put("index.routing.allocation.exclude._name", master) + .put("index.routing.allocation.exclude._name", clusterManager) ).get() ); ensureGreen(); // Check routing tables ClusterState state = client().admin().cluster().prepareState().get().getState(); - assertEquals(master, state.nodes().getMasterNode().getName()); + assertEquals(clusterManager, state.nodes().getMasterNode().getName()); List shards = state.routingTable().allShards("index"); assertThat(shards, hasSize(1)); for (ShardRouting shard : shards) { if (shard.primary()) { - // primary must not be on the master node + // primary must not be on the cluster-manager node assertFalse(state.nodes().getMasterNodeId().equals(shard.currentNodeId())); } else { fail(); // only primaries @@ -266,7 +266,7 @@ public void testDelayedMappingPropagationOnPrimary() throws Exception { client().admin().indices().preparePutMapping("index").setSource("field", "type=long") ); - // ...and wait for mappings to be available on master + // ...and wait for mappings to be available on cluster-manager assertBusy(() -> { MappingMetadata typeMappings = client().admin().indices().prepareGetMappings("index").get().getMappings().get("index"); assertNotNull(typeMappings); @@ -308,24 +308,24 @@ public void testDelayedMappingPropagationOnReplica() throws Exception { final List nodeNames = internalCluster().startNodes(2); assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut()); - final String master = internalCluster().getMasterName(); - assertThat(nodeNames, hasItem(master)); + final String clusterManager = internalCluster().getMasterName(); + assertThat(nodeNames, hasItem(clusterManager)); String otherNode = null; for (String node : nodeNames) { - if (node.equals(master) == false) { + if (node.equals(clusterManager) == false) { otherNode = node; break; } } assertNotNull(otherNode); - // Force allocation of the primary on the master node by first only allocating on the master + // Force allocation of the primary on the cluster-manager node by first only allocating on the cluster-manager // and then allowing all nodes so that the replica gets allocated on the other node prepareCreate("index").setSettings( Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) - .put("index.routing.allocation.include._name", master) + .put("index.routing.allocation.include._name", clusterManager) ).get(); client().admin() .indices() @@ -336,12 +336,12 @@ public void testDelayedMappingPropagationOnReplica() throws Exception { // Check routing tables ClusterState state = client().admin().cluster().prepareState().get().getState(); - assertEquals(master, state.nodes().getMasterNode().getName()); + assertEquals(clusterManager, state.nodes().getMasterNode().getName()); List shards = state.routingTable().allShards("index"); assertThat(shards, hasSize(2)); for (ShardRouting shard : shards) { if (shard.primary()) { - // primary must be on the master + // primary must be on the cluster-manager assertEquals(state.nodes().getMasterNodeId(), shard.currentNodeId()); } else { assertTrue(shard.active()); @@ -357,9 +357,9 @@ public void testDelayedMappingPropagationOnReplica() throws Exception { ); final Index index = resolveIndex("index"); - // Wait for mappings to be available on master + // Wait for mappings to be available on cluster-manager assertBusy(() -> { - final IndicesService indicesService = internalCluster().getInstance(IndicesService.class, master); + final IndicesService indicesService = internalCluster().getInstance(IndicesService.class, clusterManager); final IndexService indexService = indicesService.indexServiceSafe(index); assertNotNull(indexService); final MapperService mapperService = indexService.mapperService(); @@ -381,9 +381,9 @@ public void testDelayedMappingPropagationOnReplica() throws Exception { client().prepareIndex("index").setId("2").setSource("field2", 42) ); - // ...and wait for second mapping to be available on master + // ...and wait for second mapping to be available on cluster-manager assertBusy(() -> { - final IndicesService indicesService = internalCluster().getInstance(IndicesService.class, master); + final IndicesService indicesService = internalCluster().getInstance(IndicesService.class, clusterManager); final IndexService indexService = indicesService.indexServiceSafe(index); assertNotNull(indexService); final MapperService mapperService = indexService.mapperService(); diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java index 5fc0317ad8417..b0153a1306928 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/UnsafeBootstrapAndDetachCommandIT.java @@ -167,7 +167,7 @@ private void removeBlock() { } } - public void testBootstrapNotMasterEligible() { + public void testBootstrapNotClusterManagerEligible() { final Environment environment = TestEnvironment.newEnvironment( Settings.builder().put(nonMasterNode(internalCluster().getDefaultSettings())).build() ); @@ -283,12 +283,12 @@ public void testDetachAbortedByUser() throws IOException { expectThrows(() -> detachCluster(environment, true), OpenSearchNodeCommand.ABORTED_BY_USER_MSG); } - public void test3MasterNodes2Failed() throws Exception { + public void test3ClusterManagerNodes2Failed() throws Exception { internalCluster().setBootstrapClusterManagerNodeIndex(2); - List masterNodes = new ArrayList<>(); + List clusterManagerNodes = new ArrayList<>(); logger.info("--> start 1st cluster-manager-eligible node"); - masterNodes.add( + clusterManagerNodes.add( internalCluster().startClusterManagerOnlyNode( Settings.builder().put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s").build() ) @@ -300,12 +300,12 @@ public void test3MasterNodes2Failed() throws Exception { ); // node ordinal 1 logger.info("--> start 2nd and 3rd cluster-manager-eligible nodes and bootstrap"); - masterNodes.addAll(internalCluster().startMasterOnlyNodes(2)); // node ordinals 2 and 3 + clusterManagerNodes.addAll(internalCluster().startMasterOnlyNodes(2)); // node ordinals 2 and 3 logger.info("--> wait for all nodes to join the cluster"); ensureStableCluster(4); - List currentClusterNodes = new ArrayList<>(masterNodes); + List currentClusterNodes = new ArrayList<>(clusterManagerNodes); currentClusterNodes.add(dataNode); currentClusterNodes.forEach(node -> ensureReadOnlyBlock(false, node)); @@ -313,14 +313,14 @@ public void test3MasterNodes2Failed() throws Exception { createIndex("test"); ensureGreen("test"); - Settings master1DataPathSettings = internalCluster().dataPathSettings(masterNodes.get(0)); - Settings master2DataPathSettings = internalCluster().dataPathSettings(masterNodes.get(1)); - Settings master3DataPathSettings = internalCluster().dataPathSettings(masterNodes.get(2)); + Settings clusterManager1DataPathSettings = internalCluster().dataPathSettings(clusterManagerNodes.get(0)); + Settings clusterManager2DataPathSettings = internalCluster().dataPathSettings(clusterManagerNodes.get(1)); + Settings clusterManager3DataPathSettings = internalCluster().dataPathSettings(clusterManagerNodes.get(2)); Settings dataNodeDataPathSettings = internalCluster().dataPathSettings(dataNode); - logger.info("--> stop 2nd and 3d master eligible node"); - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNodes.get(1))); - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNodes.get(2))); + logger.info("--> stop 2nd and 3d cluster-manager eligible node"); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(clusterManagerNodes.get(1))); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(clusterManagerNodes.get(2))); logger.info("--> ensure NO_MASTER_BLOCK on data-only node"); assertBusy(() -> { @@ -336,19 +336,19 @@ public void test3MasterNodes2Failed() throws Exception { }); logger.info("--> try to unsafely bootstrap 1st cluster-manager-eligible node, while node lock is held"); - Environment environmentMaster1 = TestEnvironment.newEnvironment( - Settings.builder().put(internalCluster().getDefaultSettings()).put(master1DataPathSettings).build() + Environment environmentClusterManager1 = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(clusterManager1DataPathSettings).build() ); - expectThrows(() -> unsafeBootstrap(environmentMaster1), UnsafeBootstrapMasterCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG); + expectThrows(() -> unsafeBootstrap(environmentClusterManager1), UnsafeBootstrapMasterCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG); logger.info("--> stop 1st cluster-manager-eligible node and data-only node"); NodeEnvironment nodeEnvironment = internalCluster().getMasterNodeInstance(NodeEnvironment.class); - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(masterNodes.get(0))); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(clusterManagerNodes.get(0))); assertBusy(() -> internalCluster().getInstance(GatewayMetaState.class, dataNode).allPendingAsyncStatesWritten()); internalCluster().stopRandomDataNode(); logger.info("--> unsafely-bootstrap 1st cluster-manager-eligible node"); - MockTerminal terminal = unsafeBootstrap(environmentMaster1, false, true); + MockTerminal terminal = unsafeBootstrap(environmentClusterManager1, false, true); Metadata metadata = OpenSearchNodeCommand.createPersistedClusterStateService(Settings.EMPTY, nodeEnvironment.nodeDataPaths()) .loadBestOnDiskState().metadata; assertThat( @@ -364,7 +364,7 @@ public void test3MasterNodes2Failed() throws Exception { ); logger.info("--> start 1st cluster-manager-eligible node"); - String masterNode2 = internalCluster().startClusterManagerOnlyNode(master1DataPathSettings); + String clusterManagerNode2 = internalCluster().startClusterManagerOnlyNode(clusterManager1DataPathSettings); logger.info("--> detach-cluster on data-only node"); Environment environmentData = TestEnvironment.newEnvironment( @@ -391,7 +391,7 @@ public void test3MasterNodes2Failed() throws Exception { List bootstrappedNodes = new ArrayList<>(); bootstrappedNodes.add(dataNode2); - bootstrappedNodes.add(masterNode2); + bootstrappedNodes.add(clusterManagerNode2); bootstrappedNodes.forEach(node -> ensureReadOnlyBlock(true, node)); logger.info("--> ensure index test is green"); @@ -400,30 +400,30 @@ public void test3MasterNodes2Failed() throws Exception { assertThat(indexMetadata.getSettings().get(IndexMetadata.SETTING_HISTORY_UUID), notNullValue()); logger.info("--> detach-cluster on 2nd and 3rd cluster-manager-eligible nodes"); - Environment environmentMaster2 = TestEnvironment.newEnvironment( - Settings.builder().put(internalCluster().getDefaultSettings()).put(master2DataPathSettings).build() + Environment environmentClusterManager2 = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(clusterManager2DataPathSettings).build() ); - detachCluster(environmentMaster2, false); - Environment environmentMaster3 = TestEnvironment.newEnvironment( - Settings.builder().put(internalCluster().getDefaultSettings()).put(master3DataPathSettings).build() + detachCluster(environmentClusterManager2, false); + Environment environmentClusterManager3 = TestEnvironment.newEnvironment( + Settings.builder().put(internalCluster().getDefaultSettings()).put(clusterManager3DataPathSettings).build() ); - detachCluster(environmentMaster3, false); + detachCluster(environmentClusterManager3, false); logger.info("--> start 2nd and 3rd cluster-manager-eligible nodes and ensure 4 nodes stable cluster"); - bootstrappedNodes.add(internalCluster().startClusterManagerOnlyNode(master2DataPathSettings)); - bootstrappedNodes.add(internalCluster().startClusterManagerOnlyNode(master3DataPathSettings)); + bootstrappedNodes.add(internalCluster().startClusterManagerOnlyNode(clusterManager2DataPathSettings)); + bootstrappedNodes.add(internalCluster().startClusterManagerOnlyNode(clusterManager3DataPathSettings)); ensureStableCluster(4); bootstrappedNodes.forEach(node -> ensureReadOnlyBlock(true, node)); removeBlock(); } - public void testAllMasterEligibleNodesFailedDanglingIndexImport() throws Exception { + public void testAllClusterManagerEligibleNodesFailedDanglingIndexImport() throws Exception { internalCluster().setBootstrapClusterManagerNodeIndex(0); Settings settings = Settings.builder().put(AUTO_IMPORT_DANGLING_INDICES_SETTING.getKey(), true).build(); logger.info("--> start mixed data and cluster-manager-eligible node and bootstrap cluster"); - String masterNode = internalCluster().startNode(settings); // node ordinal 0 + String clusterManagerNode = internalCluster().startNode(settings); // node ordinal 0 logger.info("--> start data-only node and ensure 2 nodes stable cluster"); String dataNode = internalCluster().startDataOnlyNode(settings); // node ordinal 1 @@ -458,7 +458,7 @@ public void testAllMasterEligibleNodesFailedDanglingIndexImport() throws Excepti detachCluster(environment, false); logger.info("--> stop cluster-manager-eligible node, clear its data and start it again - new cluster should form"); - internalCluster().restartNode(masterNode, new InternalTestCluster.RestartCallback() { + internalCluster().restartNode(clusterManagerNode, new InternalTestCluster.RestartCallback() { @Override public boolean clearData(String nodeName) { return true; @@ -490,7 +490,7 @@ public void testNoInitialBootstrapAfterDetach() throws Exception { String node = internalCluster().startClusterManagerOnlyNode( Settings.builder() - // give the cluster 2 seconds to elect the master (it should not) + // give the cluster 2 seconds to elect the cluster-manager (it should not) .put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "2s") .put(clusterManagerNodeDataPathSettings) .build() @@ -524,9 +524,9 @@ public void testCanRunUnsafeBootstrapAfterErroneousDetachWithoutLoosingMetadata( detachCluster(environment); unsafeBootstrap(environment); // read-only block will remain same as one before bootstrap, in this case it is false - String masterNode2 = internalCluster().startClusterManagerOnlyNode(clusterManagerNodeDataPathSettings); + String clusterManagerNode2 = internalCluster().startClusterManagerOnlyNode(clusterManagerNodeDataPathSettings); ensureGreen(); - ensureReadOnlyBlock(false, masterNode2); + ensureReadOnlyBlock(false, clusterManagerNode2); state = internalCluster().client().admin().cluster().prepareState().execute().actionGet().getState(); assertThat(state.metadata().settings().get(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.getKey()), equalTo("1234kb")); diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/VotingConfigurationIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/VotingConfigurationIT.java index ef2d52e2de7b9..544565f4c1cd4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/VotingConfigurationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/VotingConfigurationIT.java @@ -64,12 +64,12 @@ protected Collection> nodePlugins() { public void testAbdicateAfterVotingConfigExclusionAdded() throws ExecutionException, InterruptedException { internalCluster().setBootstrapClusterManagerNodeIndex(0); internalCluster().startNodes(2); - final String originalMaster = internalCluster().getMasterName(); + final String originalClusterManager = internalCluster().getMasterName(); - logger.info("--> excluding master node {}", originalMaster); - client().execute(AddVotingConfigExclusionsAction.INSTANCE, new AddVotingConfigExclusionsRequest(originalMaster)).get(); + logger.info("--> excluding cluster-manager node {}", originalClusterManager); + client().execute(AddVotingConfigExclusionsAction.INSTANCE, new AddVotingConfigExclusionsRequest(originalClusterManager)).get(); client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); - assertNotEquals(originalMaster, internalCluster().getMasterName()); + assertNotEquals(originalClusterManager, internalCluster().getMasterName()); } public void testElectsNodeNotInVotingConfiguration() throws Exception { @@ -77,7 +77,8 @@ public void testElectsNodeNotInVotingConfiguration() throws Exception { final List nodeNames = internalCluster().startNodes(4); // a 4-node cluster settles on a 3-node configuration; we then prevent the nodes in the configuration from winning an election - // by failing at the pre-voting stage, so that the extra node must be elected instead when the master shuts down. This extra node + // by failing at the pre-voting stage, so that the extra node must be elected instead when the cluster-manager shuts down. This + // extra node // should then add itself into the voting configuration. assertFalse( diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/ZenDiscoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/ZenDiscoveryIT.java index d70da69853f17..84bf25141d5e0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/ZenDiscoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/ZenDiscoveryIT.java @@ -71,10 +71,10 @@ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 0) public class ZenDiscoveryIT extends OpenSearchIntegTestCase { - public void testNoShardRelocationsOccurWhenElectedMasterNodeFails() throws Exception { + public void testNoShardRelocationsOccurWhenElectedClusterManagerNodeFails() throws Exception { - Settings masterNodeSettings = clusterManagerOnlyNode(); - internalCluster().startNodes(2, masterNodeSettings); + Settings clusterManagerNodeSettings = clusterManagerOnlyNode(); + internalCluster().startNodes(2, clusterManagerNodeSettings); Settings dateNodeSettings = dataNode(); internalCluster().startNodes(2, dateNodeSettings); ClusterHealthResponse clusterHealthResponse = client().admin() @@ -89,20 +89,20 @@ public void testNoShardRelocationsOccurWhenElectedMasterNodeFails() throws Excep createIndex("test"); ensureSearchable("test"); RecoveryResponse r = client().admin().indices().prepareRecoveries("test").get(); - int numRecoveriesBeforeNewMaster = r.shardRecoveryStates().get("test").size(); + int numRecoveriesBeforeNewClusterManager = r.shardRecoveryStates().get("test").size(); - final String oldMaster = internalCluster().getMasterName(); + final String oldClusterManager = internalCluster().getMasterName(); internalCluster().stopCurrentMasterNode(); assertBusy(() -> { String current = internalCluster().getMasterName(); assertThat(current, notNullValue()); - assertThat(current, not(equalTo(oldMaster))); + assertThat(current, not(equalTo(oldClusterManager))); }); ensureSearchable("test"); r = client().admin().indices().prepareRecoveries("test").get(); - int numRecoveriesAfterNewMaster = r.shardRecoveryStates().get("test").size(); - assertThat(numRecoveriesAfterNewMaster, equalTo(numRecoveriesBeforeNewMaster)); + int numRecoveriesAfterNewClusterManager = r.shardRecoveryStates().get("test").size(); + assertThat(numRecoveriesAfterNewClusterManager, equalTo(numRecoveriesBeforeNewClusterManager)); } public void testHandleNodeJoin_incompatibleClusterState() throws InterruptedException, ExecutionException, TimeoutException { diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/AllocationIdIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/AllocationIdIT.java index 7b0ffa0ceea32..a57ea705f4f88 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/AllocationIdIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/AllocationIdIT.java @@ -135,7 +135,7 @@ public void testFailedRecoveryOnAllocateStalePrimaryRequiresAnotherAllocateStale // create fake corrupted marker on node1 putFakeCorruptionMarker(indexSettings, shardId, indexPath); - // thanks to master node1 is out of sync + // thanks to cluster-manager node1 is out of sync node1 = internalCluster().startNode(node1DataPathSettings); // there is only _stale_ primary diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/PrimaryAllocationIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/PrimaryAllocationIT.java index 00e603e196a0c..0dbdf57e34269 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/PrimaryAllocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/PrimaryAllocationIT.java @@ -110,7 +110,7 @@ protected boolean addMockInternalEngine() { } public void testBulkWeirdScenario() throws Exception { - String master = internalCluster().startClusterManagerOnlyNode(Settings.EMPTY); + String clusterManager = internalCluster().startClusterManagerOnlyNode(Settings.EMPTY); internalCluster().startDataOnlyNodes(2); assertAcked( @@ -149,7 +149,7 @@ public void testBulkWeirdScenario() throws Exception { } // returns data paths settings of in-sync shard copy - private Settings createStaleReplicaScenario(String master) throws Exception { + private Settings createStaleReplicaScenario(String clusterManager) throws Exception { client().prepareIndex("test").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); refresh(); ClusterState state = client().admin().cluster().prepareState().all().get().getState(); @@ -167,14 +167,14 @@ private Settings createStaleReplicaScenario(String master) throws Exception { } NetworkDisruption partition = new NetworkDisruption( - new TwoPartitions(Sets.newHashSet(master, replicaNode), Collections.singleton(primaryNode)), + new TwoPartitions(Sets.newHashSet(clusterManager, replicaNode), Collections.singleton(primaryNode)), NetworkDisruption.DISCONNECT ); internalCluster().setDisruptionScheme(partition); logger.info("--> partitioning node with primary shard from rest of cluster"); partition.startDisrupting(); - ensureStableCluster(2, master); + ensureStableCluster(2, clusterManager); logger.info("--> index a document into previous replica shard (that is now primary)"); client(replicaNode).prepareIndex("test").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); @@ -183,27 +183,30 @@ private Settings createStaleReplicaScenario(String master) throws Exception { final Settings inSyncDataPathSettings = internalCluster().dataPathSettings(replicaNode); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNode)); - ensureStableCluster(1, master); + ensureStableCluster(1, clusterManager); partition.stopDisrupting(); logger.info("--> waiting for node with old primary shard to rejoin the cluster"); - ensureStableCluster(2, master); + ensureStableCluster(2, clusterManager); logger.info("--> check that old primary shard does not get promoted to primary again"); // kick reroute and wait for all shard states to be fetched - client(master).admin().cluster().prepareReroute().get(); + client(clusterManager).admin().cluster().prepareReroute().get(); assertBusy( - () -> assertThat(internalCluster().getInstance(GatewayAllocator.class, master).getNumberOfInFlightFetches(), equalTo(0)) + () -> assertThat(internalCluster().getInstance(GatewayAllocator.class, clusterManager).getNumberOfInFlightFetches(), equalTo(0)) ); // kick reroute a second time and check that all shards are unassigned - assertThat(client(master).admin().cluster().prepareReroute().get().getState().getRoutingNodes().unassigned().size(), equalTo(2)); + assertThat( + client(clusterManager).admin().cluster().prepareReroute().get().getState().getRoutingNodes().unassigned().size(), + equalTo(2) + ); return inSyncDataPathSettings; } public void testDoNotAllowStaleReplicasToBePromotedToPrimary() throws Exception { - logger.info("--> starting 3 nodes, 1 master, 2 data"); - String master = internalCluster().startClusterManagerOnlyNode(Settings.EMPTY); + logger.info("--> starting 3 nodes, 1 cluster-manager, 2 data"); + String clusterManager = internalCluster().startClusterManagerOnlyNode(Settings.EMPTY); internalCluster().startDataOnlyNodes(2); assertAcked( client().admin() @@ -213,7 +216,7 @@ public void testDoNotAllowStaleReplicasToBePromotedToPrimary() throws Exception .get() ); ensureGreen(); - final Settings inSyncDataPathSettings = createStaleReplicaScenario(master); + final Settings inSyncDataPathSettings = createStaleReplicaScenario(clusterManager); logger.info("--> starting node that reuses data folder with the up-to-date primary shard"); internalCluster().startDataOnlyNode(inSyncDataPathSettings); @@ -291,7 +294,7 @@ public void testFailedAllocationOfStalePrimaryToDataNodeWithNoData() throws Exce } public void testForceStaleReplicaToBePromotedToPrimary() throws Exception { - logger.info("--> starting 3 nodes, 1 master, 2 data"); + logger.info("--> starting 3 nodes, 1 cluster-manager, 2 data"); String clusterManager = internalCluster().startClusterManagerOnlyNode(Settings.EMPTY); internalCluster().startDataOnlyNodes(2); assertAcked( @@ -657,7 +660,7 @@ public void testForceAllocatePrimaryOnNoDecision() throws Exception { * This test asserts that replicas failed to execute resync operations will be failed but not marked as stale. */ public void testPrimaryReplicaResyncFailed() throws Exception { - String master = internalCluster().startClusterManagerOnlyNode(Settings.EMPTY); + String clusterManager = internalCluster().startClusterManagerOnlyNode(Settings.EMPTY); final int numberOfReplicas = between(2, 3); final String oldPrimary = internalCluster().startDataOnlyNode(); assertAcked( @@ -671,7 +674,7 @@ public void testPrimaryReplicaResyncFailed() throws Exception { ensureGreen(); String timeout = randomFrom("0s", "1s", "2s"); assertAcked( - client(master).admin() + client(clusterManager).admin() .cluster() .prepareUpdateSettings() .setTransientSettings(Settings.builder().put("cluster.routing.allocation.enable", "none")) @@ -700,7 +703,7 @@ public void testPrimaryReplicaResyncFailed() throws Exception { internalCluster().stopRandomNode(InternalTestCluster.nameFilter(oldPrimary)); // Checks that we fails replicas in one side but not mark them as stale. assertBusy(() -> { - ClusterState state = client(master).admin().cluster().prepareState().get().getState(); + ClusterState state = client(clusterManager).admin().cluster().prepareState().get().getState(); final IndexShardRoutingTable shardRoutingTable = state.routingTable().shardRoutingTable(shardId); final String newPrimaryNode = state.getRoutingNodes().node(shardRoutingTable.primary.currentNodeId()).node().getName(); assertThat(newPrimaryNode, not(equalTo(oldPrimary))); @@ -712,7 +715,7 @@ public void testPrimaryReplicaResyncFailed() throws Exception { assertThat(state.metadata().index("test").inSyncAllocationIds(shardId.id()), hasSize(numberOfReplicas + 1)); }, 1, TimeUnit.MINUTES); assertAcked( - client(master).admin() + client(clusterManager).admin() .cluster() .prepareUpdateSettings() .setTransientSettings(Settings.builder().put("cluster.routing.allocation.enable", "all")) @@ -722,7 +725,7 @@ public void testPrimaryReplicaResyncFailed() throws Exception { partition.ensureHealthy(internalCluster()); logger.info("--> stop disrupting network and re-enable allocation"); assertBusy(() -> { - ClusterState state = client(master).admin().cluster().prepareState().get().getState(); + ClusterState state = client(clusterManager).admin().cluster().prepareState().get().getState(); assertThat(state.routingTable().shardRoutingTable(shardId).activeShards(), hasSize(numberOfReplicas)); assertThat(state.metadata().index("test").inSyncAllocationIds(shardId.id()), hasSize(numberOfReplicas + 1)); for (String node : replicaNodes) { diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java index 2fe7efdf8fc3a..b5d260f5c3314 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderIT.java @@ -353,7 +353,7 @@ private void refreshDiskUsage() { private void assertBusyWithDiskUsageRefresh(String nodeName, String indexName, Matcher> matcher) throws Exception { assertBusy(() -> { - // refresh the master's ClusterInfoService before checking the assigned shards because DiskThresholdMonitor might still + // refresh the cluster-manager's ClusterInfoService before checking the assigned shards because DiskThresholdMonitor might still // be processing a previous ClusterInfo update and will skip the new one (see DiskThresholdMonitor#onNewInfo(ClusterInfo) // and its internal checkInProgress flag) refreshDiskUsage(); diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java index 05b0f10be02f3..520ad75535033 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/allocation/decider/MockDiskUsagesIT.java @@ -269,9 +269,9 @@ public void testOnlyMovesEnoughShardsToDropBelowHighWatermark() throws Exception final MockInternalClusterInfoService clusterInfoService = getMockInternalClusterInfoService(); - final AtomicReference masterAppliedClusterState = new AtomicReference<>(); + final AtomicReference clusterManagerAppliedClusterState = new AtomicReference<>(); internalCluster().getCurrentMasterNodeInstance(ClusterService.class).addListener(event -> { - masterAppliedClusterState.set(event.state()); + clusterManagerAppliedClusterState.set(event.state()); clusterInfoService.refresh(); // so that a subsequent reroute sees disk usage according to the current state }); @@ -326,7 +326,7 @@ public void testOnlyMovesEnoughShardsToDropBelowHighWatermark() throws Exception fsInfoPath, 1000L, discoveryNode.getId().equals(nodeIds.get(2)) - ? 101L - masterAppliedClusterState.get().getRoutingNodes().node(nodeIds.get(2)).numberOfOwningShards() + ? 101L - clusterManagerAppliedClusterState.get().getRoutingNodes().node(nodeIds.get(2)).numberOfOwningShards() : 1000L ) ); @@ -349,7 +349,7 @@ public void testDoesNotExceedLowWatermarkWhenRebalancing() throws Exception { internalCluster().startNode(Settings.builder().put(Environment.PATH_DATA_SETTING.getKey(), createTempDir())); } - final AtomicReference masterAppliedClusterState = new AtomicReference<>(); + final AtomicReference clusterManagerAppliedClusterState = new AtomicReference<>(); final MockInternalClusterInfoService clusterInfoService = getMockInternalClusterInfoService(); @@ -360,7 +360,7 @@ public void testDoesNotExceedLowWatermarkWhenRebalancing() throws Exception { internalCluster().getCurrentMasterNodeInstance(ClusterService.class).addListener(event -> { assertThat(event.state().getRoutingNodes().node(nodeIds.get(2)).size(), lessThanOrEqualTo(1)); - masterAppliedClusterState.set(event.state()); + clusterManagerAppliedClusterState.set(event.state()); clusterInfoService.refresh(); // so that a subsequent reroute sees disk usage according to the current state }); @@ -385,7 +385,7 @@ public void testDoesNotExceedLowWatermarkWhenRebalancing() throws Exception { fsInfoPath, 1000L, discoveryNode.getId().equals(nodeIds.get(2)) - ? 150L - masterAppliedClusterState.get().getRoutingNodes().node(nodeIds.get(2)).numberOfOwningShards() + ? 150L - clusterManagerAppliedClusterState.get().getRoutingNodes().node(nodeIds.get(2)).numberOfOwningShards() : 1000L ) ); diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionIT.java index c52918ee80fe0..7a8b6b447a68d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionIT.java @@ -240,7 +240,7 @@ public void testAckedIndexing() throws Exception { node ); } - // in case of a bridge partition, shard allocation can fail "index.allocation.max_retries" times if the master + // in case of a bridge partition, shard allocation can fail "index.allocation.max_retries" times if the cluster-manager // is the super-connected node and recovery source and target are on opposite sides of the bridge if (disruptionScheme instanceof NetworkDisruption && ((NetworkDisruption) disruptionScheme).getDisruptedLinks() instanceof Bridge) { @@ -409,7 +409,7 @@ public void onFailure(Exception e) { } } - public void testCannotJoinIfMasterLostDataFolder() throws Exception { + public void testCannotJoinIfClusterManagerLostDataFolder() throws Exception { String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); String dataNode = internalCluster().startDataOnlyNode(); @@ -424,7 +424,7 @@ public Settings onNodeStopped(String nodeName) { return Settings.builder() .put(ClusterBootstrapService.INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey(), nodeName) /* - * the data node might join while the master is still not fully established as master just yet and bypasses the join + * the data node might join while the cluster-manager is still not fully established as cluster-manager just yet and bypasses the join * validation that is done before adding the node to the cluster. Only the join validation when handling the publish * request takes place, but at this point the cluster state has been successfully committed, and will subsequently be * exposed to the applier. The health check below therefore sees the cluster state with the 2 nodes and thinks all is @@ -458,34 +458,35 @@ public boolean validateClusterForming() { } /** - * Tests that indices are properly deleted even if there is a master transition in between. + * Tests that indices are properly deleted even if there is a cluster-manager transition in between. * Test for https://github.com/elastic/elasticsearch/issues/11665 */ public void testIndicesDeleted() throws Exception { final String idxName = "test"; - final List allMasterEligibleNodes = internalCluster().startMasterOnlyNodes(2); + final List allClusterManagerEligibleNodes = internalCluster().startMasterOnlyNodes(2); final String dataNode = internalCluster().startDataOnlyNode(); ensureStableCluster(3); assertAcked(prepareCreate("test")); - final String masterNode1 = internalCluster().getMasterName(); + final String clusterManagerNode1 = internalCluster().getMasterName(); NetworkDisruption networkDisruption = new NetworkDisruption( - new TwoPartitions(masterNode1, dataNode), + new TwoPartitions(clusterManagerNode1, dataNode), NetworkDisruption.UNRESPONSIVE ); internalCluster().setDisruptionScheme(networkDisruption); networkDisruption.startDisrupting(); // We know this will time out due to the partition, we check manually below to not proceed until - // the delete has been applied to the master node and the master eligible node. - internalCluster().client(masterNode1).admin().indices().prepareDelete(idxName).setTimeout("0s").get(); - // Don't restart the master node until we know the index deletion has taken effect on master and the master eligible node. + // the delete has been applied to the cluster-manager node and the cluster-manager eligible node. + internalCluster().client(clusterManagerNode1).admin().indices().prepareDelete(idxName).setTimeout("0s").get(); + // Don't restart the cluster-manager node until we know the index deletion has taken effect on cluster-manager and the + // cluster-manager eligible node. assertBusy(() -> { - for (String masterNode : allMasterEligibleNodes) { - final ClusterState masterState = internalCluster().clusterService(masterNode).state(); - assertTrue("index not deleted on " + masterNode, masterState.metadata().hasIndex(idxName) == false); + for (String clusterManagerNode : allClusterManagerEligibleNodes) { + final ClusterState clusterManagerState = internalCluster().clusterService(clusterManagerNode).state(); + assertTrue("index not deleted on " + clusterManagerNode, clusterManagerState.metadata().hasIndex(idxName) == false); } }); - internalCluster().restartNode(masterNode1, InternalTestCluster.EMPTY_CALLBACK); + internalCluster().restartNode(clusterManagerNode1, InternalTestCluster.EMPTY_CALLBACK); ensureYellow(); assertFalse(client().admin().indices().prepareExists(idxName).get().isExists()); } diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/MasterDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java similarity index 85% rename from server/src/internalClusterTest/java/org/opensearch/discovery/MasterDisruptionIT.java rename to server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java index 090423b380bf7..4515e0828be2e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/MasterDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java @@ -60,20 +60,20 @@ import static org.hamcrest.Matchers.not; /** - * Tests relating to the loss of the master. + * Tests relating to the loss of the cluster-manager. */ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) -public class MasterDisruptionIT extends AbstractDisruptionTestCase { +public class ClusterManagerDisruptionIT extends AbstractDisruptionTestCase { /** - * Test that cluster recovers from a long GC on master that causes other nodes to elect a new one + * Test that cluster recovers from a long GC on cluster-manager that causes other nodes to elect a new one */ - public void testMasterNodeGCs() throws Exception { + public void testClusterManagerNodeGCs() throws Exception { List nodes = startCluster(3); String oldClusterManagerNode = internalCluster().getMasterName(); // a very long GC, but it's OK as we remove the disruption when it has had an effect - SingleNodeDisruption masterNodeDisruption = new IntermittentLongGCDisruption( + SingleNodeDisruption clusterManagerNodeDisruption = new IntermittentLongGCDisruption( random(), oldClusterManagerNode, 100, @@ -81,38 +81,40 @@ public void testMasterNodeGCs() throws Exception { 30000, 60000 ); - internalCluster().setDisruptionScheme(masterNodeDisruption); - masterNodeDisruption.startDisrupting(); + internalCluster().setDisruptionScheme(clusterManagerNodeDisruption); + clusterManagerNodeDisruption.startDisrupting(); Set oldNonClusterManagerNodesSet = new HashSet<>(nodes); oldNonClusterManagerNodesSet.remove(oldClusterManagerNode); List oldNonClusterManagerNodes = new ArrayList<>(oldNonClusterManagerNodesSet); - logger.info("waiting for nodes to de-elect master [{}]", oldClusterManagerNode); + logger.info("waiting for nodes to de-elect cluster-manager [{}]", oldClusterManagerNode); for (String node : oldNonClusterManagerNodesSet) { assertDifferentMaster(node, oldClusterManagerNode); } - logger.info("waiting for nodes to elect a new master"); + logger.info("waiting for nodes to elect a new cluster-manager"); ensureStableCluster(2, oldNonClusterManagerNodes.get(0)); // restore GC - masterNodeDisruption.stopDisrupting(); - final TimeValue waitTime = new TimeValue(DISRUPTION_HEALING_OVERHEAD.millis() + masterNodeDisruption.expectedTimeToHeal().millis()); + clusterManagerNodeDisruption.stopDisrupting(); + final TimeValue waitTime = new TimeValue( + DISRUPTION_HEALING_OVERHEAD.millis() + clusterManagerNodeDisruption.expectedTimeToHeal().millis() + ); ensureStableCluster(3, waitTime, false, oldNonClusterManagerNodes.get(0)); - // make sure all nodes agree on master - String newMaster = internalCluster().getMasterName(); - assertThat(newMaster, not(equalTo(oldClusterManagerNode))); - assertMaster(newMaster, nodes); + // make sure all nodes agree on cluster-manager + String newClusterManager = internalCluster().getMasterName(); + assertThat(newClusterManager, not(equalTo(oldClusterManagerNode))); + assertMaster(newClusterManager, nodes); } /** - * This test isolates the master from rest of the cluster, waits for a new master to be elected, restores the partition + * This test isolates the cluster-manager from rest of the cluster, waits for a new cluster-manager to be elected, restores the partition * and verifies that all node agree on the new cluster state */ - public void testIsolateMasterAndVerifyClusterStateConsensus() throws Exception { + public void testIsolateClusterManagerAndVerifyClusterStateConsensus() throws Exception { final List nodes = startCluster(3); assertAcked( @@ -169,7 +171,7 @@ public void testIsolateMasterAndVerifyClusterStateConsensus() throws Exception { try { assertEquals("unequal versions", state.version(), nodeState.version()); assertEquals("unequal node count", state.nodes().getSize(), nodeState.nodes().getSize()); - assertEquals("different masters ", state.nodes().getMasterNodeId(), nodeState.nodes().getMasterNodeId()); + assertEquals("different cluster-managers ", state.nodes().getMasterNodeId(), nodeState.nodes().getMasterNodeId()); assertEquals("different meta data version", state.metadata().version(), nodeState.metadata().version()); assertEquals("different routing", state.routingTable().toString(), nodeState.routingTable().toString()); } catch (AssertionError t) { @@ -193,7 +195,7 @@ public void testIsolateMasterAndVerifyClusterStateConsensus() throws Exception { } /** - * Verify that the proper block is applied when nodes lose their master + * Verify that the proper block is applied when nodes lose their cluster-manager */ public void testVerifyApiBlocksDuringPartition() throws Exception { internalCluster().startNodes(3, Settings.builder().putNull(NoMasterBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey()).build()); @@ -221,13 +223,13 @@ public void testVerifyApiBlocksDuringPartition() throws Exception { // Simulate a network issue between the unlucky node and the rest of the cluster. networkDisruption.startDisrupting(); - // The unlucky node must report *no* master node, since it can't connect to master and in fact it should + // The unlucky node must report *no* cluster-manager node, since it can't connect to cluster-manager and in fact it should // continuously ping until network failures have been resolved. However - // It may a take a bit before the node detects it has been cut off from the elected master + // It may a take a bit before the node detects it has been cut off from the elected cluster-manager logger.info("waiting for isolated node [{}] to have no cluster-manager", isolatedNode); assertNoMaster(isolatedNode, NoMasterBlockService.NO_MASTER_BLOCK_WRITES, TimeValue.timeValueSeconds(30)); - logger.info("wait until elected master has been removed and a new 2 node cluster was from (via [{}])", isolatedNode); + logger.info("wait until elected cluster-manager has been removed and a new 2 node cluster was from (via [{}])", isolatedNode); ensureStableCluster(2, nonIsolatedNode); for (String node : partitions.getMajoritySide()) { @@ -251,7 +253,7 @@ public void testVerifyApiBlocksDuringPartition() throws Exception { networkDisruption.stopDisrupting(); - // Wait until the master node sees al 3 nodes again. + // Wait until the cluster-manager node sees al 3 nodes again. ensureStableCluster(3, new TimeValue(DISRUPTION_HEALING_OVERHEAD.millis() + networkDisruption.expectedTimeToHeal().millis())); logger.info( @@ -267,9 +269,9 @@ public void testVerifyApiBlocksDuringPartition() throws Exception { networkDisruption.startDisrupting(); - // The unlucky node must report *no* master node, since it can't connect to master and in fact it should + // The unlucky node must report *no* cluster-manager node, since it can't connect to cluster-manager and in fact it should // continuously ping until network failures have been resolved. However - // It may a take a bit before the node detects it has been cut off from the elected master + // It may a take a bit before the node detects it has been cut off from the elected cluster-manager logger.info("waiting for isolated node [{}] to have no cluster-manager", isolatedNode); assertNoMaster(isolatedNode, NoMasterBlockService.NO_MASTER_BLOCK_ALL, TimeValue.timeValueSeconds(30)); diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java index 6fb311ba9a7b2..a4667d62a878c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java @@ -69,22 +69,26 @@ public void testClusterJoinDespiteOfPublishingIssues() throws Exception { DiscoveryNodes discoveryNodes = internalCluster().getInstance(ClusterService.class, nonClusterManagerNode).state().nodes(); - TransportService masterTranspotService = internalCluster().getInstance( + TransportService clusterManagerTranspotService = internalCluster().getInstance( TransportService.class, discoveryNodes.getMasterNode().getName() ); - logger.info("blocking requests from non master [{}] to master [{}]", nonClusterManagerNode, clusterManagerNode); - MockTransportService nonMasterTransportService = (MockTransportService) internalCluster().getInstance( + logger.info("blocking requests from non cluster-manager [{}] to cluster-manager [{}]", nonClusterManagerNode, clusterManagerNode); + MockTransportService nonClusterManagerTransportService = (MockTransportService) internalCluster().getInstance( TransportService.class, nonClusterManagerNode ); - nonMasterTransportService.addFailToSendNoConnectRule(masterTranspotService); + nonClusterManagerTransportService.addFailToSendNoConnectRule(clusterManagerTranspotService); assertNoMaster(nonClusterManagerNode); - logger.info("blocking cluster state publishing from master [{}] to non master [{}]", clusterManagerNode, nonClusterManagerNode); - MockTransportService masterTransportService = (MockTransportService) internalCluster().getInstance( + logger.info( + "blocking cluster state publishing from cluster-manager [{}] to non cluster-manager [{}]", + clusterManagerNode, + nonClusterManagerNode + ); + MockTransportService clusterManagerTransportService = (MockTransportService) internalCluster().getInstance( TransportService.class, clusterManagerNode ); @@ -93,31 +97,40 @@ public void testClusterJoinDespiteOfPublishingIssues() throws Exception { discoveryNodes.getLocalNode().getName() ); if (randomBoolean()) { - masterTransportService.addFailToSendNoConnectRule(localTransportService, PublicationTransportHandler.PUBLISH_STATE_ACTION_NAME); + clusterManagerTransportService.addFailToSendNoConnectRule( + localTransportService, + PublicationTransportHandler.PUBLISH_STATE_ACTION_NAME + ); } else { - masterTransportService.addFailToSendNoConnectRule(localTransportService, PublicationTransportHandler.COMMIT_STATE_ACTION_NAME); + clusterManagerTransportService.addFailToSendNoConnectRule( + localTransportService, + PublicationTransportHandler.COMMIT_STATE_ACTION_NAME + ); } logger.info( - "allowing requests from non master [{}] to master [{}], waiting for two join request", + "allowing requests from non cluster-manager [{}] to cluster-manager [{}], waiting for two join request", nonClusterManagerNode, clusterManagerNode ); final CountDownLatch countDownLatch = new CountDownLatch(2); - nonMasterTransportService.addSendBehavior(masterTransportService, (connection, requestId, action, request, options) -> { - if (action.equals(JoinHelper.JOIN_ACTION_NAME)) { - countDownLatch.countDown(); + nonClusterManagerTransportService.addSendBehavior( + clusterManagerTransportService, + (connection, requestId, action, request, options) -> { + if (action.equals(JoinHelper.JOIN_ACTION_NAME)) { + countDownLatch.countDown(); + } + connection.sendRequest(requestId, action, request, options); } - connection.sendRequest(requestId, action, request, options); - }); + ); - nonMasterTransportService.addConnectBehavior(masterTransportService, Transport::openConnection); + nonClusterManagerTransportService.addConnectBehavior(clusterManagerTransportService, Transport::openConnection); countDownLatch.await(); logger.info("waiting for cluster to reform"); - masterTransportService.clearOutboundRules(localTransportService); - nonMasterTransportService.clearOutboundRules(localTransportService); + clusterManagerTransportService.clearOutboundRules(localTransportService); + nonClusterManagerTransportService.clearOutboundRules(localTransportService); ensureStableCluster(2); @@ -141,7 +154,7 @@ public void testClusterFormingWithASlowNode() { ensureStableCluster(3); } - public void testElectMasterWithLatestVersion() throws Exception { + public void testElectClusterManagerWithLatestVersion() throws Exception { final Set nodes = new HashSet<>(internalCluster().startNodes(3)); ensureStableCluster(3); ServiceDisruptionScheme isolateAllNodes = new NetworkDisruption( @@ -150,22 +163,22 @@ public void testElectMasterWithLatestVersion() throws Exception { ); internalCluster().setDisruptionScheme(isolateAllNodes); - logger.info("--> forcing a complete election to make sure \"preferred\" master is elected"); + logger.info("--> forcing a complete election to make sure \"preferred\" cluster-manager is elected"); isolateAllNodes.startDisrupting(); for (String node : nodes) { assertNoMaster(node); } internalCluster().clearDisruptionScheme(); ensureStableCluster(3); - final String preferredMasterName = internalCluster().getMasterName(); - final DiscoveryNode preferredMaster = internalCluster().clusterService(preferredMasterName).localNode(); + final String preferredClusterManagerName = internalCluster().getMasterName(); + final DiscoveryNode preferredClusterManager = internalCluster().clusterService(preferredClusterManagerName).localNode(); - logger.info("--> preferred master is {}", preferredMaster); + logger.info("--> preferred cluster-manager is {}", preferredClusterManager); final Set nonPreferredNodes = new HashSet<>(nodes); - nonPreferredNodes.remove(preferredMasterName); - final ServiceDisruptionScheme isolatePreferredMaster = isolateMasterDisruption(NetworkDisruption.DISCONNECT); - internalCluster().setDisruptionScheme(isolatePreferredMaster); - isolatePreferredMaster.startDisrupting(); + nonPreferredNodes.remove(preferredClusterManagerName); + final ServiceDisruptionScheme isolatePreferredClusterManager = isolateClusterManagerDisruption(NetworkDisruption.DISCONNECT); + internalCluster().setDisruptionScheme(isolatePreferredClusterManager); + isolatePreferredClusterManager.startDisrupting(); client(randomFrom(nonPreferredNodes)).admin() .indices() @@ -194,11 +207,11 @@ public void testElectMasterWithLatestVersion() throws Exception { } /** - * Adds an asymmetric break between a master and one of the nodes and makes + * Adds an asymmetric break between a cluster-manager and one of the nodes and makes * sure that the node is removed form the cluster, that the node start pinging and that * the cluster reforms when healed. */ - public void testNodeNotReachableFromMaster() throws Exception { + public void testNodeNotReachableFromClusterManager() throws Exception { startCluster(3); String clusterManagerNode = internalCluster().getMasterName(); @@ -210,15 +223,19 @@ public void testNodeNotReachableFromMaster() throws Exception { } } - logger.info("blocking request from master [{}] to [{}]", clusterManagerNode, nonClusterManagerNode); - MockTransportService masterTransportService = (MockTransportService) internalCluster().getInstance( + logger.info("blocking request from cluster-manager [{}] to [{}]", clusterManagerNode, nonClusterManagerNode); + MockTransportService clusterManagerTransportService = (MockTransportService) internalCluster().getInstance( TransportService.class, clusterManagerNode ); if (randomBoolean()) { - masterTransportService.addUnresponsiveRule(internalCluster().getInstance(TransportService.class, nonClusterManagerNode)); + clusterManagerTransportService.addUnresponsiveRule( + internalCluster().getInstance(TransportService.class, nonClusterManagerNode) + ); } else { - masterTransportService.addFailToSendNoConnectRule(internalCluster().getInstance(TransportService.class, nonClusterManagerNode)); + clusterManagerTransportService.addFailToSendNoConnectRule( + internalCluster().getInstance(TransportService.class, nonClusterManagerNode) + ); } logger.info("waiting for [{}] to be removed from cluster", nonClusterManagerNode); @@ -228,7 +245,7 @@ public void testNodeNotReachableFromMaster() throws Exception { assertNoMaster(nonClusterManagerNode); logger.info("healing partition and checking cluster reforms"); - masterTransportService.clearAllRules(); + clusterManagerTransportService.clearAllRules(); ensureStableCluster(3); } diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/SnapshotDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/SnapshotDisruptionIT.java index e6ddfd94871ce..3324b7de077fe 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/SnapshotDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/SnapshotDisruptionIT.java @@ -94,12 +94,12 @@ public void testDisruptionAfterFinalization() throws Exception { createRepository("test-repo", "fs"); - final String masterNode1 = internalCluster().getMasterName(); + final String clusterManagerNode1 = internalCluster().getMasterName(); - NetworkDisruption networkDisruption = isolateMasterDisruption(NetworkDisruption.UNRESPONSIVE); + NetworkDisruption networkDisruption = isolateClusterManagerDisruption(NetworkDisruption.UNRESPONSIVE); internalCluster().setDisruptionScheme(networkDisruption); - ClusterService clusterService = internalCluster().clusterService(masterNode1); + ClusterService clusterService = internalCluster().clusterService(clusterManagerNode1); CountDownLatch disruptionStarted = new CountDownLatch(1); clusterService.addListener(new ClusterStateListener() { @Override @@ -124,7 +124,7 @@ public void clusterChanged(ClusterChangedEvent event) { final String snapshot = "test-snap"; logger.info("--> starting snapshot"); - ActionFuture future = client(masterNode1).admin() + ActionFuture future = client(clusterManagerNode1).admin() .cluster() .prepareCreateSnapshot("test-repo", snapshot) .setWaitForCompletion(true) @@ -147,7 +147,7 @@ public void clusterChanged(ClusterChangedEvent event) { logger.info("--> stopping disrupting"); networkDisruption.stopDisrupting(); - ensureStableCluster(4, masterNode1); + ensureStableCluster(4, clusterManagerNode1); logger.info("--> done"); try { @@ -158,7 +158,7 @@ public void clusterChanged(ClusterChangedEvent event) { assertNotNull(sne); assertThat( sne.getMessage(), - either(endsWith(" Failed to update cluster state during snapshot finalization")).or(endsWith(" no longer master")) + either(endsWith(" Failed to update cluster state during snapshot finalization")).or(endsWith(" no longer cluster-manager")) ); assertThat(sne.getSnapshotName(), is(snapshot)); } @@ -177,13 +177,13 @@ public void testDisruptionAfterShardFinalization() throws Exception { final String repoName = "test-repo"; createRepository(repoName, "mock"); - final String masterNode = internalCluster().getMasterName(); + final String clusterManagerNode = internalCluster().getMasterName(); blockAllDataNodes(repoName); final String snapshot = "test-snap"; logger.info("--> starting snapshot"); - ActionFuture future = client(masterNode).admin() + ActionFuture future = client(clusterManagerNode).admin() .cluster() .prepareCreateSnapshot(repoName, snapshot) .setWaitForCompletion(true) @@ -191,7 +191,7 @@ public void testDisruptionAfterShardFinalization() throws Exception { waitForBlockOnAnyDataNode(repoName, TimeValue.timeValueSeconds(10L)); - NetworkDisruption networkDisruption = isolateMasterDisruption(NetworkDisruption.DISCONNECT); + NetworkDisruption networkDisruption = isolateClusterManagerDisruption(NetworkDisruption.DISCONNECT); internalCluster().setDisruptionScheme(networkDisruption); networkDisruption.startDisrupting(); @@ -203,7 +203,7 @@ public void testDisruptionAfterShardFinalization() throws Exception { networkDisruption.stopDisrupting(); unblockAllDataNodes(repoName); - ensureStableCluster(2, masterNode); + ensureStableCluster(2, clusterManagerNode); logger.info("--> done"); logger.info("--> recreate the index with potentially different shard counts"); @@ -213,17 +213,17 @@ public void testDisruptionAfterShardFinalization() throws Exception { logger.info("--> run a snapshot that fails to finalize but succeeds on the data node"); blockMasterFromFinalizingSnapshotOnIndexFile(repoName); - final ActionFuture snapshotFuture = client(masterNode).admin() + final ActionFuture snapshotFuture = client(clusterManagerNode).admin() .cluster() .prepareCreateSnapshot(repoName, "snapshot-2") .setWaitForCompletion(true) .execute(); - waitForBlock(masterNode, repoName, TimeValue.timeValueSeconds(10L)); - unblockNode(repoName, masterNode); + waitForBlock(clusterManagerNode, repoName, TimeValue.timeValueSeconds(10L)); + unblockNode(repoName, clusterManagerNode); assertFutureThrows(snapshotFuture, SnapshotException.class); logger.info("--> create a snapshot expected to be successful"); - final CreateSnapshotResponse successfulSnapshot = client(masterNode).admin() + final CreateSnapshotResponse successfulSnapshot = client(clusterManagerNode).admin() .cluster() .prepareCreateSnapshot(repoName, "snapshot-2") .setWaitForCompletion(true) @@ -235,7 +235,7 @@ public void testDisruptionAfterShardFinalization() throws Exception { assertAcked(client().admin().cluster().prepareDeleteSnapshot(repoName, "snapshot-2").get()); } - public void testMasterFailOverDuringShardSnapshots() throws Exception { + public void testClusterManagerFailOverDuringShardSnapshots() throws Exception { internalCluster().startMasterOnlyNodes(3); final String dataNode = internalCluster().startDataOnlyNode(); ensureStableCluster(4); @@ -258,7 +258,7 @@ public void testMasterFailOverDuringShardSnapshots() throws Exception { waitForBlock(dataNode, repoName, TimeValue.timeValueSeconds(30L)); - final NetworkDisruption networkDisruption = isolateMasterDisruption(NetworkDisruption.DISCONNECT); + final NetworkDisruption networkDisruption = isolateClusterManagerDisruption(NetworkDisruption.DISCONNECT); internalCluster().setDisruptionScheme(networkDisruption); networkDisruption.startDisrupting(); ensureStableCluster(3, dataNode); @@ -267,7 +267,7 @@ public void testMasterFailOverDuringShardSnapshots() throws Exception { networkDisruption.stopDisrupting(); awaitNoMoreRunningOperations(dataNode); - logger.info("--> make sure isolated master responds to snapshot request"); + logger.info("--> make sure isolated cluster-manager responds to snapshot request"); final SnapshotException sne = expectThrows( SnapshotException.class, () -> snapshotResponse.actionGet(TimeValue.timeValueSeconds(30L)) diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/StableMasterDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/StableClusterManagerDisruptionIT.java similarity index 55% rename from server/src/internalClusterTest/java/org/opensearch/discovery/StableMasterDisruptionIT.java rename to server/src/internalClusterTest/java/org/opensearch/discovery/StableClusterManagerDisruptionIT.java index 614c5a13c3253..fcc2b7a7d5ad4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/StableMasterDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/StableClusterManagerDisruptionIT.java @@ -77,7 +77,7 @@ * not detect a cluster-manager failure too quickly. */ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) -public class StableMasterDisruptionIT extends OpenSearchIntegTestCase { +public class StableClusterManagerDisruptionIT extends OpenSearchIntegTestCase { @Override protected Collection> nodePlugins() { @@ -87,43 +87,43 @@ protected Collection> nodePlugins() { /** * Test that no split brain occurs under partial network partition. See https://github.com/elastic/elasticsearch/issues/2488 */ - public void testFailWithMinimumMasterNodesConfigured() throws Exception { + public void testFailWithMinimumClusterManagerNodesConfigured() throws Exception { List nodes = internalCluster().startNodes(3); ensureStableCluster(3); - // Figure out what is the elected master node - final String masterNode = internalCluster().getMasterName(); - logger.info("---> legit elected master node={}", masterNode); + // Figure out what is the elected cluster-manager node + final String clusterManagerNode = internalCluster().getMasterName(); + logger.info("---> legit elected cluster-manager node={}", clusterManagerNode); - // Pick a node that isn't the elected master. - Set nonMasters = new HashSet<>(nodes); - nonMasters.remove(masterNode); - final String unluckyNode = randomFrom(nonMasters.toArray(Strings.EMPTY_ARRAY)); + // Pick a node that isn't the elected cluster-manager. + Set nonClusterManagers = new HashSet<>(nodes); + nonClusterManagers.remove(clusterManagerNode); + final String unluckyNode = randomFrom(nonClusterManagers.toArray(Strings.EMPTY_ARRAY)); - // Simulate a network issue between the unlucky node and elected master node in both directions. + // Simulate a network issue between the unlucky node and elected cluster-manager node in both directions. NetworkDisruption networkDisconnect = new NetworkDisruption( - new NetworkDisruption.TwoPartitions(masterNode, unluckyNode), + new NetworkDisruption.TwoPartitions(clusterManagerNode, unluckyNode), NetworkDisruption.DISCONNECT ); setDisruptionScheme(networkDisconnect); networkDisconnect.startDisrupting(); - // Wait until elected master has removed that the unlucky node... - ensureStableCluster(2, masterNode); + // Wait until elected cluster-manager has removed that the unlucky node... + ensureStableCluster(2, clusterManagerNode); - // The unlucky node must report *no* master node, since it can't connect to master and in fact it should + // The unlucky node must report *no* cluster-manager node, since it can't connect to cluster-manager and in fact it should // continuously ping until network failures have been resolved. However - // It may a take a bit before the node detects it has been cut off from the elected master + // It may a take a bit before the node detects it has been cut off from the elected cluster-manager ensureNoMaster(unluckyNode); networkDisconnect.stopDisrupting(); - // Wait until the master node sees all 3 nodes again. + // Wait until the cluster-manager node sees all 3 nodes again. ensureStableCluster(3); - // The elected master shouldn't have changed, since the unlucky node never could have elected itself as master - assertThat(internalCluster().getMasterName(), equalTo(masterNode)); + // The elected cluster-manager shouldn't have changed, since the unlucky node never could have elected itself as cluster-manager + assertThat(internalCluster().getMasterName(), equalTo(clusterManagerNode)); } private void ensureNoMaster(String node) throws Exception { @@ -135,17 +135,17 @@ private void ensureNoMaster(String node) throws Exception { } /** - * Verify that nodes fault detection detects a disconnected node after master reelection + * Verify that nodes fault detection detects a disconnected node after cluster-manager reelection */ - public void testFollowerCheckerDetectsDisconnectedNodeAfterMasterReelection() throws Exception { - testFollowerCheckerAfterMasterReelection(NetworkDisruption.DISCONNECT, Settings.EMPTY); + public void testFollowerCheckerDetectsDisconnectedNodeAfterClusterManagerReelection() throws Exception { + testFollowerCheckerAfterClusterManagerReelection(NetworkDisruption.DISCONNECT, Settings.EMPTY); } /** - * Verify that nodes fault detection detects an unresponsive node after master reelection + * Verify that nodes fault detection detects an unresponsive node after cluster-manager reelection */ - public void testFollowerCheckerDetectsUnresponsiveNodeAfterMasterReelection() throws Exception { - testFollowerCheckerAfterMasterReelection( + public void testFollowerCheckerDetectsUnresponsiveNodeAfterClusterManagerReelection() throws Exception { + testFollowerCheckerAfterClusterManagerReelection( NetworkDisruption.UNRESPONSIVE, Settings.builder() .put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "1s") @@ -156,34 +156,34 @@ public void testFollowerCheckerDetectsUnresponsiveNodeAfterMasterReelection() th ); } - private void testFollowerCheckerAfterMasterReelection(NetworkLinkDisruptionType networkLinkDisruptionType, Settings settings) + private void testFollowerCheckerAfterClusterManagerReelection(NetworkLinkDisruptionType networkLinkDisruptionType, Settings settings) throws Exception { internalCluster().startNodes(4, settings); ensureStableCluster(4); - logger.info("--> stopping current master"); + logger.info("--> stopping current cluster-manager"); internalCluster().stopCurrentMasterNode(); ensureStableCluster(3); - final String master = internalCluster().getMasterName(); - final List nonMasters = Arrays.stream(internalCluster().getNodeNames()) - .filter(n -> master.equals(n) == false) + final String clusterManager = internalCluster().getMasterName(); + final List nonClusterManagers = Arrays.stream(internalCluster().getNodeNames()) + .filter(n -> clusterManager.equals(n) == false) .collect(Collectors.toList()); - final String isolatedNode = randomFrom(nonMasters); - final String otherNode = nonMasters.get(nonMasters.get(0).equals(isolatedNode) ? 1 : 0); + final String isolatedNode = randomFrom(nonClusterManagers); + final String otherNode = nonClusterManagers.get(nonClusterManagers.get(0).equals(isolatedNode) ? 1 : 0); logger.info("--> isolating [{}]", isolatedNode); final NetworkDisruption networkDisruption = new NetworkDisruption( - new TwoPartitions(singleton(isolatedNode), Sets.newHashSet(master, otherNode)), + new TwoPartitions(singleton(isolatedNode), Sets.newHashSet(clusterManager, otherNode)), networkLinkDisruptionType ); setDisruptionScheme(networkDisruption); networkDisruption.startDisrupting(); - logger.info("--> waiting for master to remove it"); - ensureStableCluster(2, master); + logger.info("--> waiting for cluster-manager to remove it"); + ensureStableCluster(2, clusterManager); ensureNoMaster(isolatedNode); networkDisruption.stopDisrupting(); @@ -191,10 +191,10 @@ private void testFollowerCheckerAfterMasterReelection(NetworkLinkDisruptionType } /** - * Tests that emulates a frozen elected master node that unfreezes and pushes its cluster state to other nodes that already are - * following another elected master node. These nodes should reject this cluster state and prevent them from following the stale master. + * Tests that emulates a frozen elected cluster-manager node that unfreezes and pushes its cluster state to other nodes that already are + * following another elected cluster-manager node. These nodes should reject this cluster state and prevent them from following the stale cluster-manager. */ - public void testStaleMasterNotHijackingMajority() throws Exception { + public void testStaleClusterManagerNotHijackingMajority() throws Exception { final List nodes = internalCluster().startNodes( 3, Settings.builder() @@ -204,60 +204,63 @@ public void testStaleMasterNotHijackingMajority() throws Exception { ); ensureStableCluster(3); - // Save the current master node as old master node, because that node will get frozen - final String oldMasterNode = internalCluster().getMasterName(); + // Save the current cluster-manager node as old cluster-manager node, because that node will get frozen + final String oldClusterManagerNode = internalCluster().getMasterName(); - // Simulating a painful gc by suspending all threads for a long time on the current elected master node. - SingleNodeDisruption masterNodeDisruption = new LongGCDisruption(random(), oldMasterNode); + // Simulating a painful gc by suspending all threads for a long time on the current elected cluster-manager node. + SingleNodeDisruption clusterManagerNodeDisruption = new LongGCDisruption(random(), oldClusterManagerNode); // Save the majority side final List majoritySide = new ArrayList<>(nodes); - majoritySide.remove(oldMasterNode); + majoritySide.remove(oldClusterManagerNode); - // Keeps track of the previous and current master when a master node transition took place on each node on the majority side: - final Map>> masters = Collections.synchronizedMap(new HashMap<>()); + // Keeps track of the previous and current cluster-manager when a cluster-manager node transition took place on each node on the + // majority side: + final Map>> clusterManagers = Collections.synchronizedMap(new HashMap<>()); for (final String node : majoritySide) { - masters.put(node, new ArrayList<>()); + clusterManagers.put(node, new ArrayList<>()); internalCluster().getInstance(ClusterService.class, node).addListener(event -> { - DiscoveryNode previousMaster = event.previousState().nodes().getMasterNode(); - DiscoveryNode currentMaster = event.state().nodes().getMasterNode(); - if (!Objects.equals(previousMaster, currentMaster)) { + DiscoveryNode previousClusterManager = event.previousState().nodes().getMasterNode(); + DiscoveryNode currentClusterManager = event.state().nodes().getMasterNode(); + if (!Objects.equals(previousClusterManager, currentClusterManager)) { logger.info( "--> node {} received new cluster state: {} \n and had previous cluster state: {}", node, event.state(), event.previousState() ); - String previousClusterManagerNodeName = previousMaster != null ? previousMaster.getName() : null; - String currentMasterNodeName = currentMaster != null ? currentMaster.getName() : null; - masters.get(node).add(new Tuple<>(previousClusterManagerNodeName, currentMasterNodeName)); + String previousClusterManagerNodeName = previousClusterManager != null ? previousClusterManager.getName() : null; + String currentClusterManagerNodeName = currentClusterManager != null ? currentClusterManager.getName() : null; + clusterManagers.get(node).add(new Tuple<>(previousClusterManagerNodeName, currentClusterManagerNodeName)); } }); } - final CountDownLatch oldMasterNodeSteppedDown = new CountDownLatch(1); - internalCluster().getInstance(ClusterService.class, oldMasterNode).addListener(event -> { + final CountDownLatch oldClusterManagerNodeSteppedDown = new CountDownLatch(1); + internalCluster().getInstance(ClusterService.class, oldClusterManagerNode).addListener(event -> { if (event.state().nodes().getMasterNodeId() == null) { - oldMasterNodeSteppedDown.countDown(); + oldClusterManagerNodeSteppedDown.countDown(); } }); - internalCluster().setDisruptionScheme(masterNodeDisruption); - logger.info("--> freezing node [{}]", oldMasterNode); - masterNodeDisruption.startDisrupting(); + internalCluster().setDisruptionScheme(clusterManagerNodeDisruption); + logger.info("--> freezing node [{}]", oldClusterManagerNode); + clusterManagerNodeDisruption.startDisrupting(); - // Wait for majority side to elect a new master + // Wait for majority side to elect a new cluster-manager assertBusy(() -> { - for (final Map.Entry>> entry : masters.entrySet()) { + for (final Map.Entry>> entry : clusterManagers.entrySet()) { final List> transitions = entry.getValue(); assertTrue(entry.getKey() + ": " + transitions, transitions.stream().anyMatch(transition -> transition.v2() != null)); } }); - // The old master node is frozen, but here we submit a cluster state update task that doesn't get executed, but will be queued and - // once the old master node un-freezes it gets executed. The old master node will send this update + the cluster state where it is - // flagged as master to the other nodes that follow the new master. These nodes should ignore this update. - internalCluster().getInstance(ClusterService.class, oldMasterNode) + // The old cluster-manager node is frozen, but here we submit a cluster state update task that doesn't get executed, but will be + // queued and + // once the old cluster-manager node un-freezes it gets executed. The old cluster-manager node will send this update + the cluster + // state where it is + // flagged as cluster-manager to the other nodes that follow the new cluster-manager. These nodes should ignore this update. + internalCluster().getInstance(ClusterService.class, oldClusterManagerNode) .submitStateUpdateTask("sneaky-update", new ClusterStateUpdateTask(Priority.IMMEDIATE) { @Override public ClusterState execute(ClusterState currentState) { @@ -270,25 +273,30 @@ public void onFailure(String source, Exception e) { } }); - // Save the new elected master node - final String newMasterNode = internalCluster().getMasterName(majoritySide.get(0)); - logger.info("--> new detected master node [{}]", newMasterNode); + // Save the new elected cluster-manager node + final String newClusterManagerNode = internalCluster().getMasterName(majoritySide.get(0)); + logger.info("--> new detected cluster-manager node [{}]", newClusterManagerNode); // Stop disruption - logger.info("--> unfreezing node [{}]", oldMasterNode); - masterNodeDisruption.stopDisrupting(); + logger.info("--> unfreezing node [{}]", oldClusterManagerNode); + clusterManagerNodeDisruption.stopDisrupting(); - oldMasterNodeSteppedDown.await(30, TimeUnit.SECONDS); - logger.info("--> [{}] stepped down as master", oldMasterNode); + oldClusterManagerNodeSteppedDown.await(30, TimeUnit.SECONDS); + logger.info("--> [{}] stepped down as cluster-manager", oldClusterManagerNode); ensureStableCluster(3); - assertThat(masters.size(), equalTo(2)); - for (Map.Entry>> entry : masters.entrySet()) { + assertThat(clusterManagers.size(), equalTo(2)); + for (Map.Entry>> entry : clusterManagers.entrySet()) { String nodeName = entry.getKey(); List> transitions = entry.getValue(); assertTrue( - "[" + nodeName + "] should not apply state from old master [" + oldMasterNode + "] but it did: " + transitions, - transitions.stream().noneMatch(t -> oldMasterNode.equals(t.v2())) + "[" + + nodeName + + "] should not apply state from old cluster-manager [" + + oldClusterManagerNode + + "] but it did: " + + transitions, + transitions.stream().noneMatch(t -> oldClusterManagerNode.equals(t.v2())) ); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/env/NodeEnvironmentIT.java b/server/src/internalClusterTest/java/org/opensearch/env/NodeEnvironmentIT.java index 83c103bd82738..a150bf5c86a59 100644 --- a/server/src/internalClusterTest/java/org/opensearch/env/NodeEnvironmentIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/env/NodeEnvironmentIT.java @@ -79,10 +79,10 @@ public void testStartFailureOnDataForNonDataNode() throws Exception { ); } - logger.info("--> restarting the node without the data and master roles"); + logger.info("--> restarting the node without the data and cluster-manager roles"); IllegalStateException ex = expectThrows( IllegalStateException.class, - "node not having the data and master roles while having existing index metadata must fail", + "node not having the data and cluster-manager roles while having existing index metadata must fail", () -> internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { @Override public Settings onNodeStopped(String nodeName) { @@ -100,7 +100,7 @@ public Settings onNodeStopped(String nodeName) { assertThat(ex.getMessage(), startsWith("node does not have the data role but has shard data")); } - logger.info("--> start the node again with data and master roles"); + logger.info("--> start the node again with data and cluster-manager roles"); internalCluster().startNode(dataPathSettings); logger.info("--> indexing a simple document"); diff --git a/server/src/internalClusterTest/java/org/opensearch/env/NodeRepurposeCommandIT.java b/server/src/internalClusterTest/java/org/opensearch/env/NodeRepurposeCommandIT.java index 11ece43ea90d7..7a5c7ac653ab4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/env/NodeRepurposeCommandIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/env/NodeRepurposeCommandIT.java @@ -121,8 +121,8 @@ public void testRepurpose() throws Exception { executeRepurposeCommand(noClusterManagerNoDataSettingsForClusterManagerNode, 1, 0); - // by restarting as master and data node, we can check that the index definition was really deleted and also that the tool - // does not mess things up so much that the nodes cannot boot as master or data node any longer. + // by restarting as cluster-manager and data node, we can check that the index definition was really deleted and also that the tool + // does not mess things up so much that the nodes cannot boot as cluster-manager or data node any longer. internalCluster().startClusterManagerOnlyNode(clusterManagerNodeDataPathSettings); internalCluster().startDataOnlyNode(dataNodeDataPathSettings); diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/GatewayIndexStateIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/GatewayIndexStateIT.java index 1542d6800eaa1..017865a1397e8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/GatewayIndexStateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/GatewayIndexStateIT.java @@ -241,16 +241,16 @@ public void testSimpleOpenClose() throws Exception { client().prepareIndex("test").setId("2").setSource("field1", "value1").execute().actionGet(); } - public void testJustMasterNode() throws Exception { + public void testJustClusterManagerNode() throws Exception { logger.info("--> cleaning nodes"); - logger.info("--> starting 1 master node non data"); + logger.info("--> starting 1 cluster-manager node non data"); internalCluster().startNode(nonDataNode()); logger.info("--> create an index"); client().admin().indices().prepareCreate("test").setWaitForActiveShards(ActiveShardCount.NONE).execute().actionGet(); - logger.info("--> restarting master node"); + logger.info("--> restarting cluster-manager node"); internalCluster().fullRestart(new RestartCallback() { @Override public Settings onNodeStopped(String nodeName) { @@ -273,10 +273,10 @@ public Settings onNodeStopped(String nodeName) { assertThat(clusterStateResponse.getState().metadata().hasIndex("test"), equalTo(true)); } - public void testJustMasterNodeAndJustDataNode() { + public void testJustClusterManagerNodeAndJustDataNode() { logger.info("--> cleaning nodes"); - logger.info("--> starting 1 master node non data"); + logger.info("--> starting 1 cluster-manager node non data"); internalCluster().startClusterManagerOnlyNode(); internalCluster().startDataOnlyNode(); diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/MetadataNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/MetadataNodesIT.java index c96a71d5b2617..4bbd968d851b8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/MetadataNodesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/MetadataNodesIT.java @@ -60,18 +60,18 @@ public class MetadataNodesIT extends OpenSearchIntegTestCase { public void testMetaWrittenAlsoOnDataNode() throws Exception { // this test checks that index state is written on data only nodes if they have a shard allocated - String masterNode = internalCluster().startClusterManagerOnlyNode(Settings.EMPTY); + String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(Settings.EMPTY); String dataNode = internalCluster().startDataOnlyNode(Settings.EMPTY); assertAcked(prepareCreate("test").setSettings(Settings.builder().put("index.number_of_replicas", 0))); index("test", "_doc", "1", jsonBuilder().startObject().field("text", "some text").endObject()); ensureGreen("test"); assertIndexInMetaState(dataNode, "test"); - assertIndexInMetaState(masterNode, "test"); + assertIndexInMetaState(clusterManagerNode, "test"); } public void testIndexFilesAreRemovedIfAllShardsFromIndexRemoved() throws Exception { // this test checks that the index data is removed from a data only node once all shards have been allocated away from it - String masterNode = internalCluster().startClusterManagerOnlyNode(Settings.EMPTY); + String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(Settings.EMPTY); List nodeNames = internalCluster().startDataOnlyNodes(2); String node1 = nodeNames.get(0); String node2 = nodeNames.get(1); @@ -90,8 +90,8 @@ public void testIndexFilesAreRemovedIfAllShardsFromIndexRemoved() throws Excepti Index resolveIndex = resolveIndex(index); assertIndexDirectoryExists(node1, resolveIndex); assertIndexDirectoryDeleted(node2, resolveIndex); - assertIndexInMetaState(masterNode, index); - assertIndexDirectoryDeleted(masterNode, resolveIndex); + assertIndexInMetaState(clusterManagerNode, index); + assertIndexDirectoryDeleted(clusterManagerNode, resolveIndex); logger.debug("relocating index..."); client().admin() @@ -104,8 +104,8 @@ public void testIndexFilesAreRemovedIfAllShardsFromIndexRemoved() throws Excepti assertIndexDirectoryDeleted(node1, resolveIndex); assertIndexInMetaState(node2, index); assertIndexDirectoryExists(node2, resolveIndex); - assertIndexInMetaState(masterNode, index); - assertIndexDirectoryDeleted(masterNode, resolveIndex); + assertIndexInMetaState(clusterManagerNode, index); + assertIndexDirectoryDeleted(clusterManagerNode, resolveIndex); client().admin().indices().prepareDelete(index).get(); assertIndexDirectoryDeleted(node1, resolveIndex); @@ -114,7 +114,7 @@ public void testIndexFilesAreRemovedIfAllShardsFromIndexRemoved() throws Excepti @SuppressWarnings("unchecked") public void testMetaWrittenWhenIndexIsClosedAndMetaUpdated() throws Exception { - String masterNode = internalCluster().startClusterManagerOnlyNode(Settings.EMPTY); + String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(Settings.EMPTY); final String dataNode = internalCluster().startDataOnlyNode(Settings.EMPTY); final String index = "index"; @@ -123,7 +123,7 @@ public void testMetaWrittenWhenIndexIsClosedAndMetaUpdated() throws Exception { ensureGreen(); logger.info("--> wait for meta state written for index"); assertIndexInMetaState(dataNode, index); - assertIndexInMetaState(masterNode, index); + assertIndexInMetaState(clusterManagerNode, index); logger.info("--> close index"); client().admin().indices().prepareClose(index).get(); diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoverAfterNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoverAfterNodesIT.java index 77a9d37063c83..c18d94e02ab9c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoverAfterNodesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoverAfterNodesIT.java @@ -127,12 +127,12 @@ public void testRecoverAfterNodes() throws Exception { assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, clientNode3).isEmpty(), equalTo(true)); } - public void testRecoverAfterMasterNodes() throws Exception { + public void testRecoverAfterClusterManagerNodes() throws Exception { internalCluster().setBootstrapClusterManagerNodeIndex(0); - logger.info("--> start master_node (1)"); - Client master1 = startNode(Settings.builder().put("gateway.recover_after_master_nodes", 2).put(clusterManagerOnlyNode())); + logger.info("--> start cluster_manager_node (1)"); + Client clusterManager1 = startNode(Settings.builder().put("gateway.recover_after_master_nodes", 2).put(clusterManagerOnlyNode())); assertThat( - master1.admin() + clusterManager1.admin() .cluster() .prepareState() .setLocal(true) @@ -147,7 +147,7 @@ public void testRecoverAfterMasterNodes() throws Exception { logger.info("--> start data_node (1)"); Client data1 = startNode(Settings.builder().put("gateway.recover_after_master_nodes", 2).put(dataOnlyNode())); assertThat( - master1.admin() + clusterManager1.admin() .cluster() .prepareState() .setLocal(true) @@ -174,7 +174,7 @@ public void testRecoverAfterMasterNodes() throws Exception { logger.info("--> start data_node (2)"); Client data2 = startNode(Settings.builder().put("gateway.recover_after_master_nodes", 2).put(dataOnlyNode())); assertThat( - master1.admin() + clusterManager1.admin() .cluster() .prepareState() .setLocal(true) @@ -210,20 +210,20 @@ public void testRecoverAfterMasterNodes() throws Exception { hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK) ); - logger.info("--> start master_node (2)"); - Client master2 = startNode(Settings.builder().put("gateway.recover_after_master_nodes", 2).put(clusterManagerOnlyNode())); - assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, master1).isEmpty(), equalTo(true)); - assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, master2).isEmpty(), equalTo(true)); + logger.info("--> start cluster_manager_node (2)"); + Client clusterManager2 = startNode(Settings.builder().put("gateway.recover_after_master_nodes", 2).put(clusterManagerOnlyNode())); + assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, clusterManager1).isEmpty(), equalTo(true)); + assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, clusterManager2).isEmpty(), equalTo(true)); assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, data1).isEmpty(), equalTo(true)); assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, data2).isEmpty(), equalTo(true)); } public void testRecoverAfterDataNodes() throws Exception { internalCluster().setBootstrapClusterManagerNodeIndex(0); - logger.info("--> start master_node (1)"); - Client master1 = startNode(Settings.builder().put("gateway.recover_after_data_nodes", 2).put(clusterManagerOnlyNode())); + logger.info("--> start cluster_manager_node (1)"); + Client clusterManager1 = startNode(Settings.builder().put("gateway.recover_after_data_nodes", 2).put(clusterManagerOnlyNode())); assertThat( - master1.admin() + clusterManager1.admin() .cluster() .prepareState() .setLocal(true) @@ -238,7 +238,7 @@ public void testRecoverAfterDataNodes() throws Exception { logger.info("--> start data_node (1)"); Client data1 = startNode(Settings.builder().put("gateway.recover_after_data_nodes", 2).put(dataOnlyNode())); assertThat( - master1.admin() + clusterManager1.admin() .cluster() .prepareState() .setLocal(true) @@ -262,10 +262,10 @@ public void testRecoverAfterDataNodes() throws Exception { hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK) ); - logger.info("--> start master_node (2)"); - Client master2 = startNode(Settings.builder().put("gateway.recover_after_data_nodes", 2).put(clusterManagerOnlyNode())); + logger.info("--> start cluster_manager_node (2)"); + Client clusterManager2 = startNode(Settings.builder().put("gateway.recover_after_data_nodes", 2).put(clusterManagerOnlyNode())); assertThat( - master2.admin() + clusterManager2.admin() .cluster() .prepareState() .setLocal(true) @@ -289,7 +289,7 @@ public void testRecoverAfterDataNodes() throws Exception { hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK) ); assertThat( - master2.admin() + clusterManager2.admin() .cluster() .prepareState() .setLocal(true) @@ -303,8 +303,8 @@ public void testRecoverAfterDataNodes() throws Exception { logger.info("--> start data_node (2)"); Client data2 = startNode(Settings.builder().put("gateway.recover_after_data_nodes", 2).put(dataOnlyNode())); - assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, master1).isEmpty(), equalTo(true)); - assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, master2).isEmpty(), equalTo(true)); + assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, clusterManager1).isEmpty(), equalTo(true)); + assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, clusterManager2).isEmpty(), equalTo(true)); assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, data1).isEmpty(), equalTo(true)); assertThat(waitForNoBlocksOnNode(BLOCK_WAIT_TIMEOUT, data2).isEmpty(), equalTo(true)); } diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java index 79ffe12d13129..11af1fb3cbfab 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/RecoveryFromGatewayIT.java @@ -445,7 +445,7 @@ public void testLatestVersionLoaded() throws Exception { .setSource(jsonBuilder().startObject().field("field", "value3").endObject()) .execute() .actionGet(); - // TODO: remove once refresh doesn't fail immediately if there a master block: + // TODO: remove once refresh doesn't fail immediately if there a cluster-manager block: // https://github.com/elastic/elasticsearch/issues/9997 // client().admin().cluster().prepareHealth("test").setWaitForYellowStatus().get(); logger.info("--> refreshing all indices after indexing is complete"); @@ -665,7 +665,7 @@ public void assertSyncIdsNotNull() { } public void testStartedShardFoundIfStateNotYetProcessed() throws Exception { - // nodes may need to report the shards they processed the initial recovered cluster state from the master + // nodes may need to report the shards they processed the initial recovered cluster state from the cluster-manager final String nodeName = internalCluster().startNode(); createIndex("test", Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).build()); final String customDataPath = IndexMetadata.INDEX_DATA_PATH_SETTING.get( diff --git a/server/src/internalClusterTest/java/org/opensearch/index/mapper/DynamicMappingIT.java b/server/src/internalClusterTest/java/org/opensearch/index/mapper/DynamicMappingIT.java index d5924155e2ec7..c90c5f45af176 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/mapper/DynamicMappingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/mapper/DynamicMappingIT.java @@ -76,7 +76,7 @@ public void testConflictingDynamicMappings() { assertThat(e.getMessage(), Matchers.containsString("failed to parse field [foo] of type [long]")); } catch (IllegalArgumentException e) { // rare case: the node that processes the index request doesn't have the mappings - // yet and sends a mapping update to the master node to map "bar" as "text". This + // yet and sends a mapping update to the cluster-manager node to map "bar" as "text". This // fails as it had been already mapped as a long by the previous index request. assertThat(e.getMessage(), Matchers.containsString("mapper [foo] cannot be changed from type [long] to [text]")); } @@ -140,19 +140,19 @@ public void run() { } } - public void testPreflightCheckAvoidsMaster() throws InterruptedException { + public void testPreflightCheckAvoidsClusterManager() throws InterruptedException { createIndex("index", Settings.builder().put(INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), 2).build()); ensureGreen("index"); client().prepareIndex("index").setId("1").setSource("field1", "value1").get(); - final CountDownLatch masterBlockedLatch = new CountDownLatch(1); + final CountDownLatch clusterManagerBlockedLatch = new CountDownLatch(1); final CountDownLatch indexingCompletedLatch = new CountDownLatch(1); internalCluster().getInstance(ClusterService.class, internalCluster().getMasterName()) .submitStateUpdateTask("block-state-updates", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) throws Exception { - masterBlockedLatch.countDown(); + clusterManagerBlockedLatch.countDown(); indexingCompletedLatch.await(); return currentState; } @@ -163,7 +163,7 @@ public void onFailure(String source, Exception e) { } }); - masterBlockedLatch.await(); + clusterManagerBlockedLatch.await(); final IndexRequestBuilder indexRequestBuilder = client().prepareIndex("index").setId("2").setSource("field2", "value2"); try { assertThat( diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/mapping/DedicatedMasterGetFieldMappingIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/mapping/DedicatedClusterManagerGetFieldMappingIT.java similarity index 94% rename from server/src/internalClusterTest/java/org/opensearch/indices/mapping/DedicatedMasterGetFieldMappingIT.java rename to server/src/internalClusterTest/java/org/opensearch/indices/mapping/DedicatedClusterManagerGetFieldMappingIT.java index a4123ccc46ab6..72f7bd44541a6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/mapping/DedicatedMasterGetFieldMappingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/mapping/DedicatedClusterManagerGetFieldMappingIT.java @@ -38,7 +38,7 @@ import static org.opensearch.test.OpenSearchIntegTestCase.Scope; @ClusterScope(scope = Scope.TEST, numDataNodes = 0) -public class DedicatedMasterGetFieldMappingIT extends SimpleGetFieldMappingsIT { +public class DedicatedClusterManagerGetFieldMappingIT extends SimpleGetFieldMappingsIT { @Before public void before1() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/mapping/UpdateMappingIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/mapping/UpdateMappingIntegrationIT.java index 0a29794add5a8..51ff5de34240a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/mapping/UpdateMappingIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/mapping/UpdateMappingIntegrationIT.java @@ -386,13 +386,13 @@ private void assertConcreteMappingsOnAll(final String index, final String... fie assertNotNull("field " + fieldName + " doesn't exists on " + node, fieldType); } } - assertMappingOnMaster(index, fieldNames); + assertMappingOnClusterManager(index, fieldNames); } /** - * Waits for the given mapping type to exists on the master node. + * Waits for the given mapping type to exists on the cluster-manager node. */ - private void assertMappingOnMaster(final String index, final String... fieldNames) { + private void assertMappingOnClusterManager(final String index, final String... fieldNames) { GetMappingsResponse response = client().admin().indices().prepareGetMappings(index).get(); MappingMetadata mappings = response.getMappings().get(index); assertThat(mappings, notNullValue()); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java index 09caf8f1e4358..4650000f1e20a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java @@ -852,7 +852,7 @@ public void testTransientErrorsDuringRecoveryAreRetried() throws Exception { .put(NodeConnectionsService.CLUSTER_NODE_RECONNECT_INTERVAL_SETTING.getKey(), "500ms") .put(RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.getKey(), "10s") .build(); - // start a master node + // start a cluster-manager node internalCluster().startNode(nodeSettings); final String blueNodeName = internalCluster().startNode( @@ -1054,7 +1054,7 @@ public void testDisconnectsWhileRecovering() throws Exception { .put(RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.getKey(), "1s") .put(NodeConnectionsService.CLUSTER_NODE_RECONNECT_INTERVAL_SETTING.getKey(), "1s") .build(); - // start a master node + // start a cluster-manager node internalCluster().startNode(nodeSettings); final String blueNodeName = internalCluster().startNode( @@ -1211,8 +1211,8 @@ public void testDisconnectsDuringRecovery() throws Exception { ) .build(); TimeValue disconnectAfterDelay = TimeValue.timeValueMillis(randomIntBetween(0, 100)); - // start a master node - String masterNodeName = internalCluster().startClusterManagerOnlyNode(nodeSettings); + // start a cluster-manager node + String clusterManagerNodeName = internalCluster().startClusterManagerOnlyNode(nodeSettings); final String blueNodeName = internalCluster().startNode( Settings.builder().put("node.attr.color", "blue").put(nodeSettings).build() @@ -1239,9 +1239,9 @@ public void testDisconnectsDuringRecovery() throws Exception { ensureSearchable(indexName); assertHitCount(client().prepareSearch(indexName).get(), numDocs); - MockTransportService masterTransportService = (MockTransportService) internalCluster().getInstance( + MockTransportService clusterManagerTransportService = (MockTransportService) internalCluster().getInstance( TransportService.class, - masterNodeName + clusterManagerNodeName ); MockTransportService blueMockTransportService = (MockTransportService) internalCluster().getInstance( TransportService.class, @@ -1312,7 +1312,7 @@ public void sendRequest( }); for (MockTransportService mockTransportService : Arrays.asList(redMockTransportService, blueMockTransportService)) { - mockTransportService.addSendBehavior(masterTransportService, (connection, requestId, action, request, options) -> { + mockTransportService.addSendBehavior(clusterManagerTransportService, (connection, requestId, action, request, options) -> { logger.info("--> sending request {} on {}", action, connection.getNode()); if ((primaryRelocation && finalized.get()) == false) { assertNotEquals(action, ShardStateAction.SHARD_FAILED_ACTION_NAME); @@ -1466,8 +1466,8 @@ public void testDoNotInfinitelyWaitForMapping() { assertHitCount(client().prepareSearch().get(), numDocs); } - /** Makes sure the new master does not repeatedly fetch index metadata from recovering replicas */ - public void testOngoingRecoveryAndMasterFailOver() throws Exception { + /** Makes sure the new cluster-manager does not repeatedly fetch index metadata from recovering replicas */ + public void testOngoingRecoveryAndClusterManagerFailOver() throws Exception { String indexName = "test"; internalCluster().startNodes(2); String nodeWithPrimary = internalCluster().startDataOnlyNode(); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseWhileRelocatingShardsIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseWhileRelocatingShardsIT.java index caf741e9b8882..3d70622e122c0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseWhileRelocatingShardsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseWhileRelocatingShardsIT.java @@ -147,7 +147,7 @@ public void testCloseWhileRelocatingShards() throws Exception { ); final String targetNode = internalCluster().startDataOnlyNode(); - ensureClusterSizeConsistency(); // wait for the master to finish processing join. + ensureClusterSizeConsistency(); // wait for the cluster-manager to finish processing join. try { final ClusterService clusterService = internalCluster().getInstance(ClusterService.class, internalCluster().getMasterName()); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/store/IndicesStoreIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/store/IndicesStoreIntegrationIT.java index 2e35b7159b6aa..79975a7d080ee 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/store/IndicesStoreIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/store/IndicesStoreIntegrationIT.java @@ -105,7 +105,7 @@ protected Collection> nodePlugins() { @Override protected void ensureClusterStateConsistency() throws IOException { - // testShardActiveElseWhere might change the state of a non-master node + // testShardActiveElseWhere might change the state of a non-cluster-manager node // so we cannot check state consistency of this cluster } diff --git a/server/src/internalClusterTest/java/org/opensearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java index 1aed4de79cd63..a615cceffb5df 100644 --- a/server/src/internalClusterTest/java/org/opensearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java @@ -91,7 +91,7 @@ public void testFailPipelineCreation() throws Exception { } } - public void testFailPipelineCreationProcessorNotInstalledOnMasterNode() throws Exception { + public void testFailPipelineCreationProcessorNotInstalledOnClusterManagerNode() throws Exception { internalCluster().startNode(); installPlugin = true; internalCluster().startNode(); diff --git a/server/src/internalClusterTest/java/org/opensearch/persistent/PersistentTasksExecutorIT.java b/server/src/internalClusterTest/java/org/opensearch/persistent/PersistentTasksExecutorIT.java index 9ea80ae7dbd89..d4ce36ff0575e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/persistent/PersistentTasksExecutorIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/persistent/PersistentTasksExecutorIT.java @@ -403,7 +403,7 @@ public void testUnassignRunningPersistentTask() throws Exception { PlainActionFuture> unassignmentFuture = new PlainActionFuture<>(); - // Disallow re-assignment after it is unallocated to verify master and node state + // Disallow re-assignment after it is unallocated to verify cluster-manager and node state TestPersistentTasksExecutor.setNonClusterStateCondition(false); persistentTasksClusterService.unassignPersistentTask(taskId, task.getAllocationId() + 1, "unassignment test", unassignmentFuture); diff --git a/server/src/internalClusterTest/java/org/opensearch/recovery/FullRollingRestartIT.java b/server/src/internalClusterTest/java/org/opensearch/recovery/FullRollingRestartIT.java index a8423312de271..f636185fd4649 100644 --- a/server/src/internalClusterTest/java/org/opensearch/recovery/FullRollingRestartIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/recovery/FullRollingRestartIT.java @@ -199,7 +199,7 @@ public void testNoRebalanceOnRollingRestart() throws Exception { internalCluster().startClusterManagerOnlyNode(Settings.EMPTY); internalCluster().startDataOnlyNodes(3); /** - * We start 3 nodes and a dedicated master. Restart on of the data-nodes and ensure that we got no relocations. + * We start 3 nodes and a dedicated cluster-manager. Restart on of the data-nodes and ensure that we got no relocations. * Yet we have 6 shards 0 replica so that means if the restarting node comes back both other nodes are subject * to relocating to the restarting node since all had 2 shards and now one node has nothing allocated. * We have a fix for this to wait until we have allocated unallocated shards now so this shouldn't happen. diff --git a/server/src/internalClusterTest/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java b/server/src/internalClusterTest/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java index 7208a608e1ea1..6450644314c08 100644 --- a/server/src/internalClusterTest/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryCleanupIT.java @@ -51,11 +51,11 @@ @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class BlobStoreRepositoryCleanupIT extends AbstractSnapshotIntegTestCase { - public void testMasterFailoverDuringCleanup() throws Exception { + public void testClusterManagerFailoverDuringCleanup() throws Exception { startBlockedCleanup("test-repo"); final int nodeCount = internalCluster().numDataAndMasterNodes(); - logger.info("--> stopping master node"); + logger.info("--> stopping cluster-manager node"); internalCluster().stopCurrentMasterNode(); ensureStableCluster(nodeCount - 1); @@ -67,7 +67,7 @@ public void testMasterFailoverDuringCleanup() throws Exception { } public void testRepeatCleanupsDontRemove() throws Exception { - final String masterNode = startBlockedCleanup("test-repo"); + final String clusterManagerNode = startBlockedCleanup("test-repo"); logger.info("--> sending another cleanup"); assertFutureThrows(client().admin().cluster().prepareCleanupRepository("test-repo").execute(), IllegalStateException.class); @@ -81,8 +81,8 @@ public void testRepeatCleanupsDontRemove() throws Exception { .custom(RepositoryCleanupInProgress.TYPE); assertTrue(cleanup.hasCleanupInProgress()); - logger.info("--> unblocking master node"); - unblockNode("test-repo", masterNode); + logger.info("--> unblocking cluster-manager node"); + unblockNode("test-repo", clusterManagerNode); logger.info("--> wait for cleanup to finish and disappear from cluster state"); awaitClusterState( @@ -91,7 +91,7 @@ public void testRepeatCleanupsDontRemove() throws Exception { } private String startBlockedCleanup(String repoName) throws Exception { - logger.info("--> starting two master nodes and one data node"); + logger.info("--> starting two cluster-manager nodes and one data node"); internalCluster().startMasterOnlyNodes(2); internalCluster().startDataOnlyNodes(1); @@ -117,17 +117,17 @@ private String startBlockedCleanup(String repoName) throws Exception { ); garbageFuture.get(); - final String masterNode = blockMasterFromFinalizingSnapshotOnIndexFile(repoName); + final String clusterManagerNode = blockMasterFromFinalizingSnapshotOnIndexFile(repoName); logger.info("--> starting repository cleanup"); client().admin().cluster().prepareCleanupRepository(repoName).execute(); - logger.info("--> waiting for block to kick in on " + masterNode); - waitForBlock(masterNode, repoName, TimeValue.timeValueSeconds(60)); + logger.info("--> waiting for block to kick in on " + clusterManagerNode); + waitForBlock(clusterManagerNode, repoName, TimeValue.timeValueSeconds(60)); awaitClusterState( state -> state.custom(RepositoryCleanupInProgress.TYPE, RepositoryCleanupInProgress.EMPTY).hasCleanupInProgress() ); - return masterNode; + return clusterManagerNode; } public void testCleanupOldIndexN() throws ExecutionException, InterruptedException { diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java index 7dc33294ce783..d5f36608941d5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java @@ -234,7 +234,7 @@ public void testConcurrentCloneAndSnapshot() throws Exception { public void testLongRunningCloneAllowsConcurrentSnapshot() throws Exception { // large snapshot pool so blocked snapshot threads from cloning don't prevent concurrent snapshot finalizations - final String masterNode = internalCluster().startClusterManagerOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS); + final String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS); internalCluster().startDataOnlyNode(); final String repoName = "test-repo"; createRepository(repoName, "mock"); @@ -245,9 +245,9 @@ public void testLongRunningCloneAllowsConcurrentSnapshot() throws Exception { createFullSnapshot(repoName, sourceSnapshot); final String targetSnapshot = "target-snapshot"; - blockMasterOnShardClone(repoName); + blockClusterManagerOnShardClone(repoName); final ActionFuture cloneFuture = startClone(repoName, sourceSnapshot, targetSnapshot, indexSlow); - waitForBlock(masterNode, repoName, TimeValue.timeValueSeconds(30L)); + waitForBlock(clusterManagerNode, repoName, TimeValue.timeValueSeconds(30L)); final String indexFast = "index-fast"; createIndexWithRandomDocs(indexFast, randomIntBetween(20, 100)); @@ -257,7 +257,7 @@ public void testLongRunningCloneAllowsConcurrentSnapshot() throws Exception { ); assertThat(cloneFuture.isDone(), is(false)); - unblockNode(repoName, masterNode); + unblockNode(repoName, clusterManagerNode); assertAcked(cloneFuture.get()); } @@ -323,7 +323,7 @@ public void testDeletePreventsClone() throws Exception { public void testBackToBackClonesForIndexNotInCluster() throws Exception { // large snapshot pool so blocked snapshot threads from cloning don't prevent concurrent snapshot finalizations - final String masterNode = internalCluster().startClusterManagerOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS); + final String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS); internalCluster().startDataOnlyNode(); final String repoName = "test-repo"; createRepository(repoName, "mock"); @@ -336,9 +336,9 @@ public void testBackToBackClonesForIndexNotInCluster() throws Exception { assertAcked(admin().indices().prepareDelete(indexBlocked).get()); final String targetSnapshot1 = "target-snapshot"; - blockMasterOnShardClone(repoName); + blockClusterManagerOnShardClone(repoName); final ActionFuture cloneFuture1 = startClone(repoName, sourceSnapshot, targetSnapshot1, indexBlocked); - waitForBlock(masterNode, repoName, TimeValue.timeValueSeconds(30L)); + waitForBlock(clusterManagerNode, repoName, TimeValue.timeValueSeconds(30L)); assertThat(cloneFuture1.isDone(), is(false)); final int extraClones = randomIntBetween(1, 5); @@ -366,7 +366,7 @@ public void testBackToBackClonesForIndexNotInCluster() throws Exception { assertFalse(extraSnapshotFuture.isDone()); } - unblockNode(repoName, masterNode); + unblockNode(repoName, clusterManagerNode); assertAcked(cloneFuture1.get()); for (ActionFuture extraCloneFuture : extraCloneFutures) { @@ -377,7 +377,7 @@ public void testBackToBackClonesForIndexNotInCluster() throws Exception { } } - public void testMasterFailoverDuringCloneStep1() throws Exception { + public void testClusterManagerFailoverDuringCloneStep1() throws Exception { internalCluster().startMasterOnlyNodes(3); internalCluster().startDataOnlyNode(); final String repoName = "test-repo"; @@ -388,13 +388,13 @@ public void testMasterFailoverDuringCloneStep1() throws Exception { final String sourceSnapshot = "source-snapshot"; createFullSnapshot(repoName, sourceSnapshot); - blockMasterOnReadIndexMeta(repoName); + blockClusterManagerOnReadIndexMeta(repoName); final String cloneName = "target-snapshot"; final ActionFuture cloneFuture = startCloneFromDataNode(repoName, sourceSnapshot, cloneName, testIndex); awaitNumberOfSnapshotsInProgress(1); - final String masterNode = internalCluster().getMasterName(); - waitForBlock(masterNode, repoName, TimeValue.timeValueSeconds(30L)); - internalCluster().restartNode(masterNode); + final String clusterManagerNode = internalCluster().getMasterName(); + waitForBlock(clusterManagerNode, repoName, TimeValue.timeValueSeconds(30L)); + internalCluster().restartNode(clusterManagerNode); boolean cloneSucceeded = false; try { cloneFuture.actionGet(TimeValue.timeValueSeconds(30L)); @@ -406,7 +406,8 @@ public void testMasterFailoverDuringCloneStep1() throws Exception { awaitNoMoreRunningOperations(internalCluster().getMasterName()); - // Check if the clone operation worked out by chance as a result of the clone request being retried because of the master failover + // Check if the clone operation worked out by chance as a result of the clone request being retried + // because of the cluster-manager failover cloneSucceeded = cloneSucceeded || getRepositoryData(repoName).getSnapshotIds().stream().anyMatch(snapshotId -> snapshotId.getName().equals(cloneName)); assertAllSnapshotsSuccessful(getRepositoryData(repoName), cloneSucceeded ? 2 : 1); @@ -430,7 +431,7 @@ public void testFailsOnCloneMissingIndices() { ); } - public void testMasterFailoverDuringCloneStep2() throws Exception { + public void testClusterManagerFailoverDuringCloneStep2() throws Exception { // large snapshot pool so blocked snapshot threads from cloning don't prevent concurrent snapshot finalizations internalCluster().startMasterOnlyNodes(3, LARGE_SNAPSHOT_POOL_SETTINGS); internalCluster().startDataOnlyNode(); @@ -443,12 +444,12 @@ public void testMasterFailoverDuringCloneStep2() throws Exception { createFullSnapshot(repoName, sourceSnapshot); final String targetSnapshot = "target-snapshot"; - blockMasterOnShardClone(repoName); + blockClusterManagerOnShardClone(repoName); final ActionFuture cloneFuture = startCloneFromDataNode(repoName, sourceSnapshot, targetSnapshot, testIndex); awaitNumberOfSnapshotsInProgress(1); - final String masterNode = internalCluster().getMasterName(); - waitForBlock(masterNode, repoName, TimeValue.timeValueSeconds(30L)); - internalCluster().restartNode(masterNode); + final String clusterManagerNode = internalCluster().getMasterName(); + waitForBlock(clusterManagerNode, repoName, TimeValue.timeValueSeconds(30L)); + internalCluster().restartNode(clusterManagerNode); expectThrows(SnapshotException.class, cloneFuture::actionGet); awaitNoMoreRunningOperations(internalCluster().getMasterName()); @@ -471,9 +472,9 @@ public void testExceptionDuringShardClone() throws Exception { blockMasterFromFinalizingSnapshotOnSnapFile(repoName); final ActionFuture cloneFuture = startCloneFromDataNode(repoName, sourceSnapshot, targetSnapshot, testIndex); awaitNumberOfSnapshotsInProgress(1); - final String masterNode = internalCluster().getMasterName(); - waitForBlock(masterNode, repoName, TimeValue.timeValueSeconds(30L)); - unblockNode(repoName, masterNode); + final String clusterManagerNode = internalCluster().getMasterName(); + waitForBlock(clusterManagerNode, repoName, TimeValue.timeValueSeconds(30L)); + unblockNode(repoName, clusterManagerNode); expectThrows(SnapshotException.class, cloneFuture::actionGet); awaitNoMoreRunningOperations(internalCluster().getMasterName()); assertAllSnapshotsSuccessful(getRepositoryData(repoName), 1); @@ -490,8 +491,8 @@ public void testDoesNotStartOnBrokenSourceSnapshot() throws Exception { final String sourceSnapshot = "source-snapshot"; blockDataNode(repoName, dataNode); - final Client masterClient = internalCluster().masterClient(); - final ActionFuture sourceSnapshotFuture = masterClient.admin() + final Client clusterManagerClient = internalCluster().masterClient(); + final ActionFuture sourceSnapshotFuture = clusterManagerClient.admin() .cluster() .prepareCreateSnapshot(repoName, sourceSnapshot) .setWaitForCompletion(true) @@ -503,7 +504,7 @@ public void testDoesNotStartOnBrokenSourceSnapshot() throws Exception { final SnapshotException sne = expectThrows( SnapshotException.class, - () -> startClone(masterClient, repoName, sourceSnapshot, "target-snapshot", testIndex).actionGet( + () -> startClone(clusterManagerClient, repoName, sourceSnapshot, "target-snapshot", testIndex).actionGet( TimeValue.timeValueSeconds(30L) ) ); @@ -516,7 +517,7 @@ public void testDoesNotStartOnBrokenSourceSnapshot() throws Exception { } public void testStartSnapshotWithSuccessfulShardClonePendingFinalization() throws Exception { - final String masterName = internalCluster().startClusterManagerOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS); + final String clusterManagerName = internalCluster().startClusterManagerOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS); final String dataNode = internalCluster().startDataOnlyNode(); final String repoName = "test-repo"; createRepository(repoName, "mock"); @@ -530,15 +531,15 @@ public void testStartSnapshotWithSuccessfulShardClonePendingFinalization() throw blockMasterOnWriteIndexFile(repoName); final String cloneName = "clone-blocked"; final ActionFuture blockedClone = startClone(repoName, sourceSnapshot, cloneName, indexName); - waitForBlock(masterName, repoName, TimeValue.timeValueSeconds(30L)); + waitForBlock(clusterManagerName, repoName, TimeValue.timeValueSeconds(30L)); awaitNumberOfSnapshotsInProgress(1); blockNodeOnAnyFiles(repoName, dataNode); final ActionFuture otherSnapshot = startFullSnapshot(repoName, "other-snapshot"); awaitNumberOfSnapshotsInProgress(2); assertFalse(blockedClone.isDone()); - unblockNode(repoName, masterName); + unblockNode(repoName, clusterManagerName); awaitNumberOfSnapshotsInProgress(1); - awaitMasterFinishRepoOperations(); + awaitClusterManagerFinishRepoOperations(); unblockNode(repoName, dataNode); assertAcked(blockedClone.get()); assertEquals(getSnapshot(repoName, cloneName).state(), SnapshotState.SUCCESS); @@ -568,7 +569,7 @@ public void testStartCloneWithSuccessfulShardClonePendingFinalization() throws E assertFalse(blockedClone.isDone()); unblockNode(repoName, clusterManagerName); awaitNoMoreRunningOperations(clusterManagerName); - awaitMasterFinishRepoOperations(); + awaitClusterManagerFinishRepoOperations(); assertAcked(blockedClone.get()); assertAcked(otherClone.get()); assertEquals(getSnapshot(repoName, cloneName).state(), SnapshotState.SUCCESS); @@ -576,7 +577,7 @@ public void testStartCloneWithSuccessfulShardClonePendingFinalization() throws E } public void testStartCloneWithSuccessfulShardSnapshotPendingFinalization() throws Exception { - final String masterName = internalCluster().startClusterManagerOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS); + final String clusterManagerName = internalCluster().startClusterManagerOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS); internalCluster().startDataOnlyNode(); final String repoName = "test-repo"; createRepository(repoName, "mock"); @@ -589,7 +590,7 @@ public void testStartCloneWithSuccessfulShardSnapshotPendingFinalization() throw blockMasterOnWriteIndexFile(repoName); final ActionFuture blockedSnapshot = startFullSnapshot(repoName, "snap-blocked"); - waitForBlock(masterName, repoName, TimeValue.timeValueSeconds(30L)); + waitForBlock(clusterManagerName, repoName, TimeValue.timeValueSeconds(30L)); awaitNumberOfSnapshotsInProgress(1); final String cloneName = "clone"; final ActionFuture clone = startClone(repoName, sourceSnapshot, cloneName, indexName); @@ -602,11 +603,11 @@ public void testStartCloneWithSuccessfulShardSnapshotPendingFinalization() throw }); assertFalse(blockedSnapshot.isDone()); } finally { - unblockNode(repoName, masterName); + unblockNode(repoName, clusterManagerName); } awaitNoMoreRunningOperations(); - awaitMasterFinishRepoOperations(); + awaitClusterManagerFinishRepoOperations(); assertSuccessful(blockedSnapshot); assertAcked(clone.get()); @@ -641,12 +642,12 @@ private static ActionFuture startClone( return client.admin().cluster().prepareCloneSnapshot(repoName, sourceSnapshot, targetSnapshot).setIndices(indices).execute(); } - private void blockMasterOnReadIndexMeta(String repoName) { + private void blockClusterManagerOnReadIndexMeta(String repoName) { ((MockRepository) internalCluster().getCurrentMasterNodeInstance(RepositoriesService.class).repository(repoName)) .setBlockOnReadIndexMeta(); } - private void blockMasterOnShardClone(String repoName) { + private void blockClusterManagerOnShardClone(String repoName) { ((MockRepository) internalCluster().getCurrentMasterNodeInstance(RepositoriesService.class).repository(repoName)) .setBlockOnWriteShardLevelMeta(); } diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java index edae4fa4a6b5e..04ec3f027f908 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java @@ -280,7 +280,7 @@ public void testMultipleReposAreIndependent2() throws Exception { } public void testMultipleReposAreIndependent3() throws Exception { - final String masterNode = internalCluster().startClusterManagerOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS); + final String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS); internalCluster().startDataOnlyNode(); final String blockedRepoName = "test-repo-blocked"; final String otherRepoName = "test-repo"; @@ -289,14 +289,14 @@ public void testMultipleReposAreIndependent3() throws Exception { createIndexWithContent("test-index"); createFullSnapshot(blockedRepoName, "blocked-snapshot"); - blockNodeOnAnyFiles(blockedRepoName, masterNode); + blockNodeOnAnyFiles(blockedRepoName, clusterManagerNode); final ActionFuture slowDeleteFuture = startDeleteSnapshot(blockedRepoName, "*"); logger.info("--> waiting for concurrent snapshot(s) to finish"); createNSnapshots(otherRepoName, randomIntBetween(1, 5)); assertAcked(startDeleteSnapshot(otherRepoName, "*").get()); - unblockNode(blockedRepoName, masterNode); + unblockNode(blockedRepoName, clusterManagerNode); assertAcked(slowDeleteFuture.actionGet()); } @@ -447,7 +447,7 @@ public void testCascadedAborts() throws Exception { assertThat(client().admin().cluster().prepareGetSnapshots(repoName).get().getSnapshots(), empty()); } - public void testMasterFailOverWithQueuedDeletes() throws Exception { + public void testClusterManagerFailOverWithQueuedDeletes() throws Exception { internalCluster().startMasterOnlyNodes(3); final String dataNode = internalCluster().startDataOnlyNode(); final String repoName = "test-repo"; @@ -458,7 +458,10 @@ public void testMasterFailOverWithQueuedDeletes() throws Exception { final String firstSnapshot = "snapshot-one"; blockDataNode(repoName, dataNode); - final ActionFuture firstSnapshotResponse = startFullSnapshotFromNonMasterClient(repoName, firstSnapshot); + final ActionFuture firstSnapshotResponse = startFullSnapshotFromNonClusterManagerClient( + repoName, + firstSnapshot + ); waitForBlock(dataNode, repoName, TimeValue.timeValueSeconds(30L)); final String dataNode2 = internalCluster().startDataOnlyNode(); @@ -475,11 +478,14 @@ public void testMasterFailOverWithQueuedDeletes() throws Exception { return snapshotsInProgress.entries().size() == 2 && snapshotHasCompletedShard(secondSnapshot, snapshotsInProgress); }); - final ActionFuture firstDeleteFuture = startDeleteFromNonMasterClient(repoName, firstSnapshot); + final ActionFuture firstDeleteFuture = startDeleteFromNonClusterManagerClient(repoName, firstSnapshot); awaitNDeletionsInProgress(1); blockNodeOnAnyFiles(repoName, dataNode2); - final ActionFuture snapshotThreeFuture = startFullSnapshotFromNonMasterClient(repoName, "snapshot-three"); + final ActionFuture snapshotThreeFuture = startFullSnapshotFromNonClusterManagerClient( + repoName, + "snapshot-three" + ); waitForBlock(dataNode2, repoName, TimeValue.timeValueSeconds(30L)); assertThat(firstSnapshotResponse.isDone(), is(false)); @@ -488,7 +494,7 @@ public void testMasterFailOverWithQueuedDeletes() throws Exception { logger.info("--> waiting for all three snapshots to show up as in-progress"); assertBusy(() -> assertThat(currentSnapshots(repoName), hasSize(3)), 30L, TimeUnit.SECONDS); - final ActionFuture deleteAllSnapshots = startDeleteFromNonMasterClient(repoName, "*"); + final ActionFuture deleteAllSnapshots = startDeleteFromNonClusterManagerClient(repoName, "*"); logger.info("--> wait for delete to be enqueued in cluster state"); awaitClusterState(state -> { final SnapshotDeletionsInProgress deletionsInProgress = state.custom(SnapshotDeletionsInProgress.TYPE); @@ -506,7 +512,7 @@ public void testMasterFailOverWithQueuedDeletes() throws Exception { } }, 30L, TimeUnit.SECONDS); - logger.info("--> stopping current master node"); + logger.info("--> stopping current cluster-manager node"); internalCluster().stopCurrentMasterNode(); unblockNode(repoName, dataNode); @@ -516,13 +522,14 @@ public void testMasterFailOverWithQueuedDeletes() throws Exception { try { assertAcked(deleteFuture.actionGet()); } catch (RepositoryException rex) { - // rarely the master node fails over twice when shutting down the initial master and fails the transport listener + // rarely the cluster-manager node fails over twice + // when shutting down the initial cluster-manager and fails the transport listener assertThat(rex.repository(), is("_all")); assertThat(rex.getMessage(), endsWith("Failed to update cluster state during repository operation")); } catch (SnapshotMissingException sme) { - // very rarely a master node fail-over happens at such a time that the client on the data-node sees a disconnect exception - // after the master has already started the delete, leading to the delete retry to run into a situation where the - // snapshot has already been deleted potentially + // very rarely a cluster-manager node fail-over happens at such a time + // that the client on the data-node sees a disconnect exception after the cluster-manager has already started the delete, + // leading to the delete retry to run into a situation where the snapshot has already been deleted potentially assertThat(sme.getSnapshotName(), is(firstSnapshot)); } } @@ -551,7 +558,10 @@ public void testAssertMultipleSnapshotsAndPrimaryFailOver() throws Exception { index(testIndex, "_doc", "some_id", "foo", "bar"); blockDataNode(repoName, dataNode); - final ActionFuture firstSnapshotResponse = startFullSnapshotFromMasterClient(repoName, "snapshot-one"); + final ActionFuture firstSnapshotResponse = startFullSnapshotFromClusterManagerClient( + repoName, + "snapshot-one" + ); waitForBlock(dataNode, repoName, TimeValue.timeValueSeconds(30L)); internalCluster().startDataOnlyNode(); @@ -559,7 +569,10 @@ public void testAssertMultipleSnapshotsAndPrimaryFailOver() throws Exception { ensureGreen(testIndex); final String secondSnapshot = "snapshot-two"; - final ActionFuture secondSnapshotResponse = startFullSnapshotFromMasterClient(repoName, secondSnapshot); + final ActionFuture secondSnapshotResponse = startFullSnapshotFromClusterManagerClient( + repoName, + secondSnapshot + ); // make sure second snapshot is in progress before restarting data node waitUntilInprogress(repoName, secondSnapshot, TimeValue.timeValueSeconds(5L)); @@ -627,7 +640,7 @@ public void testQueuedDeletesWithOverlap() throws Exception { assertThat(client().admin().cluster().prepareGetSnapshots(repoName).get().getSnapshots(), empty()); } - public void testQueuedOperationsOnMasterRestart() throws Exception { + public void testQueuedOperationsOnClusterManagerRestart() throws Exception { internalCluster().startMasterOnlyNodes(3); internalCluster().startDataOnlyNode(); final String repoName = "test-repo"; @@ -648,7 +661,7 @@ public void testQueuedOperationsOnMasterRestart() throws Exception { awaitNoMoreRunningOperations(); } - public void testQueuedOperationsOnMasterDisconnect() throws Exception { + public void testQueuedOperationsOnClusterManagerDisconnect() throws Exception { internalCluster().startMasterOnlyNodes(3); final String dataNode = internalCluster().startDataOnlyNode(); final String repoName = "test-repo"; @@ -656,25 +669,25 @@ public void testQueuedOperationsOnMasterDisconnect() throws Exception { createIndexWithContent("index-one"); createNSnapshots(repoName, randomIntBetween(2, 5)); - final String masterNode = internalCluster().getMasterName(); - final NetworkDisruption networkDisruption = isolateMasterDisruption(NetworkDisruption.DISCONNECT); + final String clusterManagerNode = internalCluster().getMasterName(); + final NetworkDisruption networkDisruption = isolateClusterManagerDisruption(NetworkDisruption.DISCONNECT); internalCluster().setDisruptionScheme(networkDisruption); - blockNodeOnAnyFiles(repoName, masterNode); - ActionFuture firstDeleteFuture = client(masterNode).admin() + blockNodeOnAnyFiles(repoName, clusterManagerNode); + ActionFuture firstDeleteFuture = client(clusterManagerNode).admin() .cluster() .prepareDeleteSnapshot(repoName, "*") .execute(); - waitForBlock(masterNode, repoName, TimeValue.timeValueSeconds(30L)); + waitForBlock(clusterManagerNode, repoName, TimeValue.timeValueSeconds(30L)); - final ActionFuture createThirdSnapshot = client(masterNode).admin() + final ActionFuture createThirdSnapshot = client(clusterManagerNode).admin() .cluster() .prepareCreateSnapshot(repoName, "snapshot-three") .setWaitForCompletion(true) .execute(); awaitNumberOfSnapshotsInProgress(1); - final ActionFuture secondDeleteFuture = client(masterNode).admin() + final ActionFuture secondDeleteFuture = client(clusterManagerNode).admin() .cluster() .prepareDeleteSnapshot(repoName, "*") .execute(); @@ -682,7 +695,7 @@ public void testQueuedOperationsOnMasterDisconnect() throws Exception { networkDisruption.startDisrupting(); ensureStableCluster(3, dataNode); - unblockNode(repoName, masterNode); + unblockNode(repoName, clusterManagerNode); networkDisruption.stopDisrupting(); logger.info("--> make sure all failing requests get a response"); @@ -693,7 +706,7 @@ public void testQueuedOperationsOnMasterDisconnect() throws Exception { awaitNoMoreRunningOperations(); } - public void testQueuedOperationsOnMasterDisconnectAndRepoFailure() throws Exception { + public void testQueuedOperationsOnClusterManagerDisconnectAndRepoFailure() throws Exception { internalCluster().startMasterOnlyNodes(3); final String dataNode = internalCluster().startDataOnlyNode(); final String repoName = "test-repo"; @@ -701,23 +714,23 @@ public void testQueuedOperationsOnMasterDisconnectAndRepoFailure() throws Except createIndexWithContent("index-one"); createNSnapshots(repoName, randomIntBetween(2, 5)); - final String masterNode = internalCluster().getMasterName(); - final NetworkDisruption networkDisruption = isolateMasterDisruption(NetworkDisruption.DISCONNECT); + final String clusterManagerNode = internalCluster().getMasterName(); + final NetworkDisruption networkDisruption = isolateClusterManagerDisruption(NetworkDisruption.DISCONNECT); internalCluster().setDisruptionScheme(networkDisruption); blockMasterFromFinalizingSnapshotOnIndexFile(repoName); - final ActionFuture firstFailedSnapshotFuture = startFullSnapshotFromMasterClient( + final ActionFuture firstFailedSnapshotFuture = startFullSnapshotFromClusterManagerClient( repoName, "failing-snapshot-1" ); - waitForBlock(masterNode, repoName, TimeValue.timeValueSeconds(30L)); - final ActionFuture secondFailedSnapshotFuture = startFullSnapshotFromMasterClient( + waitForBlock(clusterManagerNode, repoName, TimeValue.timeValueSeconds(30L)); + final ActionFuture secondFailedSnapshotFuture = startFullSnapshotFromClusterManagerClient( repoName, "failing-snapshot-2" ); awaitNumberOfSnapshotsInProgress(2); - final ActionFuture failedDeleteFuture = client(masterNode).admin() + final ActionFuture failedDeleteFuture = client(clusterManagerNode).admin() .cluster() .prepareDeleteSnapshot(repoName, "*") .execute(); @@ -725,7 +738,7 @@ public void testQueuedOperationsOnMasterDisconnectAndRepoFailure() throws Except networkDisruption.startDisrupting(); ensureStableCluster(3, dataNode); - unblockNode(repoName, masterNode); + unblockNode(repoName, clusterManagerNode); networkDisruption.stopDisrupting(); logger.info("--> make sure all failing requests get a response"); @@ -736,7 +749,7 @@ public void testQueuedOperationsOnMasterDisconnectAndRepoFailure() throws Except awaitNoMoreRunningOperations(); } - public void testQueuedOperationsAndBrokenRepoOnMasterFailOver() throws Exception { + public void testQueuedOperationsAndBrokenRepoOnClusterManagerFailOver() throws Exception { disableRepoConsistencyCheck("This test corrupts the repository on purpose"); internalCluster().startMasterOnlyNodes(3); @@ -755,7 +768,7 @@ public void testQueuedOperationsAndBrokenRepoOnMasterFailOver() throws Exception client().admin().cluster().prepareCreateSnapshot(repoName, "snapshot-three").setWaitForCompletion(false).get(); - final ActionFuture deleteFuture = startDeleteFromNonMasterClient(repoName, "*"); + final ActionFuture deleteFuture = startDeleteFromNonClusterManagerClient(repoName, "*"); awaitNDeletionsInProgress(2); internalCluster().stopCurrentMasterNode(); @@ -765,7 +778,7 @@ public void testQueuedOperationsAndBrokenRepoOnMasterFailOver() throws Exception expectThrows(RepositoryException.class, deleteFuture::actionGet); } - public void testQueuedSnapshotOperationsAndBrokenRepoOnMasterFailOver() throws Exception { + public void testQueuedSnapshotOperationsAndBrokenRepoOnClusterManagerFailOver() throws Exception { disableRepoConsistencyCheck("This test corrupts the repository on purpose"); internalCluster().startMasterOnlyNodes(3); @@ -777,14 +790,14 @@ public void testQueuedSnapshotOperationsAndBrokenRepoOnMasterFailOver() throws E createNSnapshots(repoName, randomIntBetween(2, 5)); final long generation = getRepositoryData(repoName).getGenId(); - final String masterNode = internalCluster().getMasterName(); - blockNodeOnAnyFiles(repoName, masterNode); - final ActionFuture snapshotThree = startFullSnapshotFromNonMasterClient(repoName, "snapshot-three"); - waitForBlock(masterNode, repoName, TimeValue.timeValueSeconds(30L)); + final String clusterManagerNode = internalCluster().getMasterName(); + blockNodeOnAnyFiles(repoName, clusterManagerNode); + final ActionFuture snapshotThree = startFullSnapshotFromNonClusterManagerClient(repoName, "snapshot-three"); + waitForBlock(clusterManagerNode, repoName, TimeValue.timeValueSeconds(30L)); corruptIndexN(repoPath, generation); - final ActionFuture snapshotFour = startFullSnapshotFromNonMasterClient(repoName, "snapshot-four"); + final ActionFuture snapshotFour = startFullSnapshotFromNonClusterManagerClient(repoName, "snapshot-four"); internalCluster().stopCurrentMasterNode(); ensureStableCluster(3); @@ -793,7 +806,7 @@ public void testQueuedSnapshotOperationsAndBrokenRepoOnMasterFailOver() throws E expectThrows(OpenSearchException.class, snapshotFour::actionGet); } - public void testQueuedSnapshotOperationsAndBrokenRepoOnMasterFailOver2() throws Exception { + public void testQueuedSnapshotOperationsAndBrokenRepoOnClusterManagerFailOver2() throws Exception { disableRepoConsistencyCheck("This test corrupts the repository on purpose"); internalCluster().startMasterOnlyNodes(3); @@ -805,28 +818,28 @@ public void testQueuedSnapshotOperationsAndBrokenRepoOnMasterFailOver2() throws createNSnapshots(repoName, randomIntBetween(2, 5)); final long generation = getRepositoryData(repoName).getGenId(); - final String masterNode = internalCluster().getMasterName(); + final String clusterManagerNode = internalCluster().getMasterName(); blockMasterFromFinalizingSnapshotOnIndexFile(repoName); - final ActionFuture snapshotThree = startFullSnapshotFromNonMasterClient(repoName, "snapshot-three"); - waitForBlock(masterNode, repoName, TimeValue.timeValueSeconds(30L)); + final ActionFuture snapshotThree = startFullSnapshotFromNonClusterManagerClient(repoName, "snapshot-three"); + waitForBlock(clusterManagerNode, repoName, TimeValue.timeValueSeconds(30L)); corruptIndexN(repoPath, generation); - final ActionFuture snapshotFour = startFullSnapshotFromNonMasterClient(repoName, "snapshot-four"); + final ActionFuture snapshotFour = startFullSnapshotFromNonClusterManagerClient(repoName, "snapshot-four"); awaitNumberOfSnapshotsInProgress(2); - final NetworkDisruption networkDisruption = isolateMasterDisruption(NetworkDisruption.DISCONNECT); + final NetworkDisruption networkDisruption = isolateClusterManagerDisruption(NetworkDisruption.DISCONNECT); internalCluster().setDisruptionScheme(networkDisruption); networkDisruption.startDisrupting(); ensureStableCluster(3, dataNode); - unblockNode(repoName, masterNode); + unblockNode(repoName, clusterManagerNode); networkDisruption.stopDisrupting(); awaitNoMoreRunningOperations(); expectThrows(OpenSearchException.class, snapshotThree::actionGet); expectThrows(OpenSearchException.class, snapshotFour::actionGet); } - public void testQueuedSnapshotOperationsAndBrokenRepoOnMasterFailOverMultipleRepos() throws Exception { + public void testQueuedSnapshotOperationsAndBrokenRepoOnClusterManagerFailOverMultipleRepos() throws Exception { disableRepoConsistencyCheck("This test corrupts the repository on purpose"); internalCluster().startMasterOnlyNodes(3, LARGE_SNAPSHOT_POOL_SETTINGS); @@ -837,30 +850,30 @@ public void testQueuedSnapshotOperationsAndBrokenRepoOnMasterFailOverMultipleRep createIndexWithContent("index-one"); createNSnapshots(repoName, randomIntBetween(2, 5)); - final String masterNode = internalCluster().getMasterName(); + final String clusterManagerNode = internalCluster().getMasterName(); final String blockedRepoName = "repo-blocked"; createRepository(blockedRepoName, "mock"); createNSnapshots(blockedRepoName, randomIntBetween(1, 5)); - blockNodeOnAnyFiles(blockedRepoName, masterNode); - final ActionFuture deleteFuture = startDeleteFromNonMasterClient(blockedRepoName, "*"); - waitForBlock(masterNode, blockedRepoName, TimeValue.timeValueSeconds(30L)); + blockNodeOnAnyFiles(blockedRepoName, clusterManagerNode); + final ActionFuture deleteFuture = startDeleteFromNonClusterManagerClient(blockedRepoName, "*"); + waitForBlock(clusterManagerNode, blockedRepoName, TimeValue.timeValueSeconds(30L)); awaitNDeletionsInProgress(1); - final ActionFuture createBlockedSnapshot = startFullSnapshotFromNonMasterClient( + final ActionFuture createBlockedSnapshot = startFullSnapshotFromNonClusterManagerClient( blockedRepoName, "queued-snapshot" ); awaitNumberOfSnapshotsInProgress(1); final long generation = getRepositoryData(repoName).getGenId(); - blockNodeOnAnyFiles(repoName, masterNode); - final ActionFuture snapshotThree = startFullSnapshotFromNonMasterClient(repoName, "snapshot-three"); - waitForBlock(masterNode, repoName, TimeValue.timeValueSeconds(30L)); + blockNodeOnAnyFiles(repoName, clusterManagerNode); + final ActionFuture snapshotThree = startFullSnapshotFromNonClusterManagerClient(repoName, "snapshot-three"); + waitForBlock(clusterManagerNode, repoName, TimeValue.timeValueSeconds(30L)); awaitNumberOfSnapshotsInProgress(2); corruptIndexN(repoPath, generation); - final ActionFuture snapshotFour = startFullSnapshotFromNonMasterClient(repoName, "snapshot-four"); + final ActionFuture snapshotFour = startFullSnapshotFromNonClusterManagerClient(repoName, "snapshot-four"); awaitNumberOfSnapshotsInProgress(3); internalCluster().stopCurrentMasterNode(); ensureStableCluster(3); @@ -872,8 +885,8 @@ public void testQueuedSnapshotOperationsAndBrokenRepoOnMasterFailOverMultipleRep try { createBlockedSnapshot.actionGet(); } catch (OpenSearchException ex) { - // Ignored, thrown most of the time but due to retries when shutting down the master could randomly pass when the request is - // retried and gets executed after the above delete + // Ignored, thrown most of the time but due to retries when shutting down the cluster-manager could randomly pass + // when the request is retried and gets executed after the above delete } } @@ -1011,13 +1024,13 @@ public void testQueuedOperationsAfterFinalizationFailure() throws Exception { final ActionFuture snapshotThree = startAndBlockFailingFullSnapshot(repoName, "snap-other"); - final String masterName = internalCluster().getMasterName(); + final String clusterManagerName = internalCluster().getMasterName(); final String snapshotOne = snapshotNames.get(0); final ActionFuture deleteSnapshotOne = startDeleteSnapshot(repoName, snapshotOne); awaitNDeletionsInProgress(1); - unblockNode(repoName, masterName); + unblockNode(repoName, clusterManagerName); expectThrows(SnapshotException.class, snapshotThree::actionGet); assertAcked(deleteSnapshotOne.get()); @@ -1067,20 +1080,20 @@ public void testEquivalentDeletesAreDeduplicated() throws Exception { } } - public void testMasterFailoverOnFinalizationLoop() throws Exception { + public void testClusterManagerFailoverOnFinalizationLoop() throws Exception { internalCluster().startMasterOnlyNodes(3); final String dataNode = internalCluster().startDataOnlyNode(); final String repoName = "test-repo"; createRepository(repoName, "mock"); createIndexWithContent("index-test"); - final NetworkDisruption networkDisruption = isolateMasterDisruption(NetworkDisruption.DISCONNECT); + final NetworkDisruption networkDisruption = isolateClusterManagerDisruption(NetworkDisruption.DISCONNECT); internalCluster().setDisruptionScheme(networkDisruption); final List snapshotNames = createNSnapshots(repoName, randomIntBetween(2, 5)); - final String masterName = internalCluster().getMasterName(); + final String clusterManagerName = internalCluster().getMasterName(); blockMasterFromDeletingIndexNFile(repoName); - final ActionFuture snapshotThree = startFullSnapshotFromMasterClient(repoName, "snap-other"); - waitForBlock(masterName, repoName, TimeValue.timeValueSeconds(30L)); + final ActionFuture snapshotThree = startFullSnapshotFromClusterManagerClient(repoName, "snap-other"); + waitForBlock(clusterManagerName, repoName, TimeValue.timeValueSeconds(30L)); final String snapshotOne = snapshotNames.get(0); final ActionFuture deleteSnapshotOne = startDeleteSnapshot(repoName, snapshotOne); @@ -1088,7 +1101,7 @@ public void testMasterFailoverOnFinalizationLoop() throws Exception { networkDisruption.startDisrupting(); ensureStableCluster(3, dataNode); - unblockNode(repoName, masterName); + unblockNode(repoName, clusterManagerName); networkDisruption.stopDisrupting(); ensureStableCluster(4); @@ -1180,7 +1193,7 @@ public void testInterleavedAcrossMultipleRepos() throws Exception { assertSuccessful(createSlowFuture3); } - public void testMasterFailoverAndMultipleQueuedUpSnapshotsAcrossTwoRepos() throws Exception { + public void testClusterManagerFailoverAndMultipleQueuedUpSnapshotsAcrossTwoRepos() throws Exception { disableRepoConsistencyCheck("This test corrupts the repository on purpose"); internalCluster().startMasterOnlyNodes(3, LARGE_SNAPSHOT_POOL_SETTINGS); @@ -1206,9 +1219,9 @@ public void testMasterFailoverAndMultipleQueuedUpSnapshotsAcrossTwoRepos() throw client().admin().cluster().prepareCreateSnapshot(otherRepoName, "snapshot-other-blocked-2").setWaitForCompletion(false).get(); awaitNumberOfSnapshotsInProgress(4); - final String initialMaster = internalCluster().getMasterName(); - waitForBlock(initialMaster, repoName, TimeValue.timeValueSeconds(30L)); - waitForBlock(initialMaster, otherRepoName, TimeValue.timeValueSeconds(30L)); + final String initialClusterManager = internalCluster().getMasterName(); + waitForBlock(initialClusterManager, repoName, TimeValue.timeValueSeconds(30L)); + waitForBlock(initialClusterManager, otherRepoName, TimeValue.timeValueSeconds(30L)); internalCluster().stopCurrentMasterNode(); ensureStableCluster(3, dataNode); @@ -1384,7 +1397,7 @@ public void testStartWithSuccessfulShardSnapshotPendingFinalization() throws Exc unblockNode(repoName, clusterManagerName); awaitNumberOfSnapshotsInProgress(1); - awaitMasterFinishRepoOperations(); + awaitClusterManagerFinishRepoOperations(); unblockNode(repoName, dataNode); assertSuccessful(blockedSnapshot); @@ -1416,13 +1429,13 @@ private List createNSnapshots(String repoName, int count) { return snapshotNames; } - private ActionFuture startDeleteFromNonMasterClient(String repoName, String snapshotName) { - logger.info("--> deleting snapshot [{}] from repo [{}] from non master client", snapshotName, repoName); + private ActionFuture startDeleteFromNonClusterManagerClient(String repoName, String snapshotName) { + logger.info("--> deleting snapshot [{}] from repo [{}] from non cluster-manager client", snapshotName, repoName); return internalCluster().nonMasterClient().admin().cluster().prepareDeleteSnapshot(repoName, snapshotName).execute(); } - private ActionFuture startFullSnapshotFromNonMasterClient(String repoName, String snapshotName) { - logger.info("--> creating full snapshot [{}] to repo [{}] from non master client", snapshotName, repoName); + private ActionFuture startFullSnapshotFromNonClusterManagerClient(String repoName, String snapshotName) { + logger.info("--> creating full snapshot [{}] to repo [{}] from non cluster-manager client", snapshotName, repoName); return internalCluster().nonMasterClient() .admin() .cluster() @@ -1431,8 +1444,8 @@ private ActionFuture startFullSnapshotFromNonMasterClien .execute(); } - private ActionFuture startFullSnapshotFromMasterClient(String repoName, String snapshotName) { - logger.info("--> creating full snapshot [{}] to repo [{}] from master client", snapshotName, repoName); + private ActionFuture startFullSnapshotFromClusterManagerClient(String repoName, String snapshotName) { + logger.info("--> creating full snapshot [{}] to repo [{}] from cluster-manager client", snapshotName, repoName); return internalCluster().masterClient() .admin() .cluster() @@ -1488,10 +1501,10 @@ private static List currentSnapshots(String repoName) { private ActionFuture startAndBlockOnDeleteSnapshot(String repoName, String snapshotName) throws InterruptedException { - final String masterName = internalCluster().getMasterName(); - blockNodeOnAnyFiles(repoName, masterName); + final String clusterManagerName = internalCluster().getMasterName(); + blockNodeOnAnyFiles(repoName, clusterManagerName); final ActionFuture fut = startDeleteSnapshot(repoName, snapshotName); - waitForBlock(masterName, repoName, TimeValue.timeValueSeconds(30L)); + waitForBlock(clusterManagerName, repoName, TimeValue.timeValueSeconds(30L)); return fut; } diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 46638f31b6cca..2eca8555e1388 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -835,7 +835,7 @@ public void sendResponse(RestResponse response) { } } - public void testMasterShutdownDuringSnapshot() throws Exception { + public void testClusterManagerShutdownDuringSnapshot() throws Exception { logger.info("--> starting two cluster-manager nodes and two data nodes"); internalCluster().startMasterOnlyNodes(2); internalCluster().startDataOnlyNodes(2); @@ -873,7 +873,7 @@ public void testMasterShutdownDuringSnapshot() throws Exception { assertEquals(0, snapshotInfo.failedShards()); } - public void testMasterAndDataShutdownDuringSnapshot() throws Exception { + public void testClusterManagerAndDataShutdownDuringSnapshot() throws Exception { logger.info("--> starting three cluster-manager nodes and two data nodes"); internalCluster().startMasterOnlyNodes(3); internalCluster().startDataOnlyNodes(2); @@ -890,7 +890,7 @@ public void testMasterAndDataShutdownDuringSnapshot() throws Exception { final int numberOfShards = getNumShards("test-idx").numPrimaries; logger.info("number of shards: {}", numberOfShards); - final String masterNode = blockMasterFromFinalizingSnapshotOnSnapFile("test-repo"); + final String clusterManagerNode = blockMasterFromFinalizingSnapshotOnSnapFile("test-repo"); final String dataNode = blockNodeWithIndex("test-repo", "test-idx"); dataNodeClient().admin() @@ -902,7 +902,7 @@ public void testMasterAndDataShutdownDuringSnapshot() throws Exception { logger.info("--> stopping data node {}", dataNode); stopNode(dataNode); - logger.info("--> stopping cluster-manager node {} ", masterNode); + logger.info("--> stopping cluster-manager node {} ", clusterManagerNode); internalCluster().stopCurrentMasterNode(); logger.info("--> wait until the snapshot is done"); @@ -1143,7 +1143,7 @@ public void testDeduplicateIndexMetadata() throws Exception { assertThat(snapshot3IndexMetaFiles, hasSize(1)); // should have deleted the metadata blob referenced by the first two snapshots } - public void testDataNodeRestartWithBusyMasterDuringSnapshot() throws Exception { + public void testDataNodeRestartWithBusyClusterManagerDuringSnapshot() throws Exception { logger.info("--> starting a cluster-manager node and two data nodes"); internalCluster().startClusterManagerOnlyNode(); internalCluster().startDataOnlyNodes(2); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java index 608a439b40fec..0750675d46b9c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/MetadataLoadingDuringSnapshotRestoreIT.java @@ -198,8 +198,8 @@ private void assertIndexMetadataLoads(final String snapshot, final String index, } private CountingMockRepository getCountingMockRepository() { - String master = internalCluster().getMasterName(); - RepositoriesService repositoriesService = internalCluster().getInstance(RepositoriesService.class, master); + String clusterManager = internalCluster().getMasterName(); + RepositoriesService repositoriesService = internalCluster().getInstance(RepositoriesService.class, clusterManager); Repository repository = repositoriesService.repository("repository"); assertThat(repository, instanceOf(CountingMockRepository.class)); return (CountingMockRepository) repository; diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java index 662e97dd84fda..e72110f4c4efd 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java @@ -177,15 +177,15 @@ public void testResidualStaleIndicesAreDeletedByConsecutiveDelete() throws Excep // Make repository to throw exception when trying to delete stale indices // This will make sure stale indices stays in repository after snapshot delete - String masterNode = internalCluster().getMasterName(); - ((MockRepository) internalCluster().getInstance(RepositoriesService.class, masterNode).repository("test-repo")) + String clusterManagerNode = internalCluster().getMasterName(); + ((MockRepository) internalCluster().getInstance(RepositoriesService.class, clusterManagerNode).repository("test-repo")) .setThrowExceptionWhileDelete(true); logger.info("--> delete the bulk of the snapshots"); client.admin().cluster().prepareDeleteSnapshot(repositoryName, bulkSnapshotsPattern).get(); // Make repository to work normally - ((MockRepository) internalCluster().getInstance(RepositoriesService.class, masterNode).repository("test-repo")) + ((MockRepository) internalCluster().getInstance(RepositoriesService.class, clusterManagerNode).repository("test-repo")) .setThrowExceptionWhileDelete(false); // This snapshot should delete last snapshot's residual stale indices as well diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoryFilterUserMetadataIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoryFilterUserMetadataIT.java index 9ee479cdd7fe0..d84eb9ea1e269 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoryFilterUserMetadataIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoryFilterUserMetadataIT.java @@ -70,7 +70,7 @@ protected Collection> nodePlugins() { } public void testFilteredRepoMetadataIsUsed() { - final String masterName = internalCluster().getMasterName(); + final String clusterManagerName = internalCluster().getMasterName(); final String repoName = "test-repo"; assertAcked( client().admin() @@ -78,7 +78,9 @@ public void testFilteredRepoMetadataIsUsed() { .preparePutRepository(repoName) .setType(MetadataFilteringPlugin.TYPE) .setSettings( - Settings.builder().put("location", randomRepoPath()).put(MetadataFilteringPlugin.MASTER_SETTING_VALUE, masterName) + Settings.builder() + .put("location", randomRepoPath()) + .put(MetadataFilteringPlugin.CLUSTER_MANAGER_SETTING_VALUE, clusterManagerName) ) ); createIndex("test-idx"); @@ -88,15 +90,18 @@ public void testFilteredRepoMetadataIsUsed() { .setWaitForCompletion(true) .get() .getSnapshotInfo(); - assertThat(snapshotInfo.userMetadata(), is(Collections.singletonMap(MetadataFilteringPlugin.MOCK_FILTERED_META, masterName))); + assertThat( + snapshotInfo.userMetadata(), + is(Collections.singletonMap(MetadataFilteringPlugin.MOCK_FILTERED_META, clusterManagerName)) + ); } - // Mock plugin that stores the name of the master node that started a snapshot in each snapshot's metadata + // Mock plugin that stores the name of the cluster-manager node that started a snapshot in each snapshot's metadata public static final class MetadataFilteringPlugin extends org.opensearch.plugins.Plugin implements RepositoryPlugin { private static final String MOCK_FILTERED_META = "mock_filtered_meta"; - private static final String MASTER_SETTING_VALUE = "initial_master"; + private static final String CLUSTER_MANAGER_SETTING_VALUE = "initial_cluster_manager"; private static final String TYPE = "mock_meta_filtering"; @@ -112,8 +117,8 @@ public Map getRepositories( metadata -> new FsRepository(metadata, env, namedXContentRegistry, clusterService, recoverySettings) { // Storing the initially expected metadata value here to verify that #filterUserMetadata is only called once on the - // initial master node starting the snapshot - private final String initialMetaValue = metadata.settings().get(MASTER_SETTING_VALUE); + // initial cluster-manager node starting the snapshot + private final String initialMetaValue = metadata.settings().get(CLUSTER_MANAGER_SETTING_VALUE); @Override public void finalizeSnapshot( diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotShardsServiceIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotShardsServiceIT.java index 4543a3e0a1b6d..ef7c61205855e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotShardsServiceIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotShardsServiceIT.java @@ -83,7 +83,7 @@ public void testRetryPostingSnapshotStatusMessages() throws Exception { final SnapshotId snapshotId = getSnapshot("test-repo", "test-snap").snapshotId(); logger.info("--> start disrupting cluster"); - final NetworkDisruption networkDisruption = isolateMasterDisruption(NetworkDisruption.NetworkDelay.random(random())); + final NetworkDisruption networkDisruption = isolateClusterManagerDisruption(NetworkDisruption.NetworkDelay.random(random())); internalCluster().setDisruptionScheme(networkDisruption); networkDisruption.startDisrupting(); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java index 6c697439b241d..1376961825e8b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java @@ -237,7 +237,7 @@ public void testCorrectCountsForDoneShards() throws Exception { blockDataNode(repoName, dataNodeOne); final String snapshotOne = "snap-1"; - // restarting a data node below so using a master client here + // restarting a data node below so using a cluster-manager client here final ActionFuture responseSnapshotOne = internalCluster().masterClient() .admin() .cluster() diff --git a/server/src/internalClusterTest/java/org/opensearch/versioning/ConcurrentDocumentOperationIT.java b/server/src/internalClusterTest/java/org/opensearch/versioning/ConcurrentDocumentOperationIT.java index e433a489ad572..661efbaf9cd01 100644 --- a/server/src/internalClusterTest/java/org/opensearch/versioning/ConcurrentDocumentOperationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/versioning/ConcurrentDocumentOperationIT.java @@ -77,9 +77,9 @@ public void onFailure(Exception e) { client().admin().indices().prepareRefresh().execute().actionGet(); logger.info("done indexing, check all have the same field value"); - Map masterSource = client().prepareGet("test", "1").execute().actionGet().getSourceAsMap(); + Map clusterManagerSource = client().prepareGet("test", "1").execute().actionGet().getSourceAsMap(); for (int i = 0; i < (cluster().size() * 5); i++) { - assertThat(client().prepareGet("test", "1").execute().actionGet().getSourceAsMap(), equalTo(masterSource)); + assertThat(client().prepareGet("test", "1").execute().actionGet().getSourceAsMap(), equalTo(clusterManagerSource)); } } } diff --git a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java index 70839be718e47..3594bf9f53ca4 100644 --- a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -627,7 +627,7 @@ protected SnapshotInfo getSnapshot(String repository, String snapshot) { return snapshotInfos.get(0); } - protected void awaitMasterFinishRepoOperations() throws Exception { + protected void awaitClusterManagerFinishRepoOperations() throws Exception { logger.info("--> waiting for cluster-manager to finish all repo operations on its SNAPSHOT pool"); final ThreadPool clusterManagerThreadPool = internalCluster().getMasterNodeInstance(ThreadPool.class); assertBusy(() -> { diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 1af6b03ff24af..76c401a82646f 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -712,7 +712,7 @@ public void setDisruptionScheme(ServiceDisruptionScheme scheme) { * @param disruptionType type of disruption to create * @return disruption */ - protected static NetworkDisruption isolateMasterDisruption(NetworkDisruption.NetworkLinkDisruptionType disruptionType) { + protected static NetworkDisruption isolateClusterManagerDisruption(NetworkDisruption.NetworkLinkDisruptionType disruptionType) { final String clusterManagerNode = internalCluster().getMasterName(); return new NetworkDisruption( new NetworkDisruption.TwoPartitions( From a70edc681f794c4071b70c05a5495af945de767f Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Thu, 19 May 2022 17:20:26 -0400 Subject: [PATCH 14/75] [REMOVE] Cleanup deprecated thread pool types (FIXED_AUTO_QUEUE_SIZE) (#3369) Signed-off-by: Andriy Redko --- .../main/java/org/opensearch/threadpool/ThreadPool.java | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java index 77682a7946c8f..cc8d81d2a7b4b 100644 --- a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java @@ -118,7 +118,6 @@ public enum ThreadPoolType { DIRECT("direct"), FIXED("fixed"), RESIZABLE("resizable"), - FIXED_AUTO_QUEUE_SIZE("fixed_auto_queue_size"), SCALING("scaling"); private final String type; @@ -696,7 +695,13 @@ public Info(String name, ThreadPoolType type, int min, int max, @Nullable TimeVa public Info(StreamInput in) throws IOException { name = in.readString(); - type = ThreadPoolType.fromType(in.readString()); + final String typeStr = in.readString(); + // Opensearch on or after 3.0.0 version doesn't know about "fixed_auto_queue_size" thread pool. Convert it to RESIZABLE. + if (typeStr.equalsIgnoreCase("fixed_auto_queue_size")) { + type = ThreadPoolType.RESIZABLE; + } else { + type = ThreadPoolType.fromType(typeStr); + } min = in.readInt(); max = in.readInt(); keepAlive = in.readOptionalTimeValue(); From 326a1a8c6d7a4f29d2e0a1739af1d12b8bbe7090 Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Fri, 20 May 2022 07:29:55 -0700 Subject: [PATCH 15/75] [Type removal] _type removal from tests of yaml tests (#3406) * [Type removal] _type removal from tests of yaml tests Signed-off-by: Suraj Singh * Fix spotless failures Signed-off-by: Suraj Singh * Fix assertion failures Signed-off-by: Suraj Singh * Fix assertion failures in DoSectionTests Signed-off-by: Suraj Singh --- .../section/ClientYamlTestSuiteTests.java | 105 ++++-------------- .../rest/yaml/section/DoSectionTests.java | 52 +++------ .../rest-api-spec/test/suite1/10_basic.yml | 4 - 3 files changed, 40 insertions(+), 121 deletions(-) diff --git a/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java b/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java index f995e18d0f2df..40421ef43ab6b 100644 --- a/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java +++ b/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java @@ -32,8 +32,6 @@ package org.opensearch.test.rest.yaml.section; -import org.opensearch.LegacyESVersion; -import org.opensearch.Version; import org.opensearch.client.NodeSelector; import org.opensearch.common.ParsingException; import org.opensearch.common.xcontent.XContentLocation; @@ -76,23 +74,9 @@ public void testParseTestSetupTeardownAndSections() throws Exception { + " indices.get_mapping:\n" + " index: test_index\n" + "\n" - + " - match: {test_index.test_type.properties.text.type: string}\n" - + " - match: {test_index.test_type.properties.text.analyzer: whitespace}\n" + + " - match: {test_index.properties.text.type: string}\n" + + " - match: {test_index.properties.text.analyzer: whitespace}\n" + "\n" - + "---\n" - + "\"Get type mapping - pre 6.0\":\n" - + "\n" - + " - skip:\n" - + " version: \"6.0.0 - \"\n" - + " reason: \"for newer versions the index name is always returned\"\n" - + "\n" - + " - do:\n" - + " indices.get_mapping:\n" - + " index: test_index\n" - + " type: test_type\n" - + "\n" - + " - match: {test_type.properties.text.type: string}\n" - + " - match: {test_type.properties.text.analyzer: whitespace}\n" ); ClientYamlTestSuite restTestSuite = ClientYamlTestSuite.parse(getTestClass().getName(), getTestName(), parser); @@ -135,7 +119,7 @@ public void testParseTestSetupTeardownAndSections() throws Exception { assertThat(restTestSuite.getTeardownSection().isEmpty(), equalTo(true)); } - assertThat(restTestSuite.getTestSections().size(), equalTo(2)); + assertThat(restTestSuite.getTestSections().size(), equalTo(1)); assertThat(restTestSuite.getTestSections().get(0).getName(), equalTo("Get index mapping")); assertThat(restTestSuite.getTestSections().get(0).getSkipSection().isEmpty(), equalTo(true)); @@ -147,36 +131,13 @@ public void testParseTestSetupTeardownAndSections() throws Exception { assertThat(doSection.getApiCallSection().getParams().get("index"), equalTo("test_index")); assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(1), instanceOf(MatchAssertion.class)); MatchAssertion matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(1); - assertThat(matchAssertion.getField(), equalTo("test_index.test_type.properties.text.type")); + assertThat(matchAssertion.getField(), equalTo("test_index.properties.text.type")); assertThat(matchAssertion.getExpectedValue().toString(), equalTo("string")); assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(2), instanceOf(MatchAssertion.class)); matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(2); - assertThat(matchAssertion.getField(), equalTo("test_index.test_type.properties.text.analyzer")); + assertThat(matchAssertion.getField(), equalTo("test_index.properties.text.analyzer")); assertThat(matchAssertion.getExpectedValue().toString(), equalTo("whitespace")); - assertThat(restTestSuite.getTestSections().get(1).getName(), equalTo("Get type mapping - pre 6.0")); - assertThat(restTestSuite.getTestSections().get(1).getSkipSection().isEmpty(), equalTo(false)); - assertThat( - restTestSuite.getTestSections().get(1).getSkipSection().getReason(), - equalTo("for newer versions the index name is always returned") - ); - assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getLowerVersion(), equalTo(LegacyESVersion.fromString("6.0.0"))); - assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getUpperVersion(), equalTo(Version.CURRENT)); - assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().size(), equalTo(3)); - assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().get(0), instanceOf(DoSection.class)); - doSection = (DoSection) restTestSuite.getTestSections().get(1).getExecutableSections().get(0); - assertThat(doSection.getApiCallSection().getApi(), equalTo("indices.get_mapping")); - assertThat(doSection.getApiCallSection().getParams().size(), equalTo(2)); - assertThat(doSection.getApiCallSection().getParams().get("index"), equalTo("test_index")); - assertThat(doSection.getApiCallSection().getParams().get("type"), equalTo("test_type")); - assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(1), instanceOf(MatchAssertion.class)); - matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(1).getExecutableSections().get(1); - assertThat(matchAssertion.getField(), equalTo("test_type.properties.text.type")); - assertThat(matchAssertion.getExpectedValue().toString(), equalTo("string")); - assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().get(2), instanceOf(MatchAssertion.class)); - matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(1).getExecutableSections().get(2); - assertThat(matchAssertion.getField(), equalTo("test_type.properties.text.analyzer")); - assertThat(matchAssertion.getExpectedValue().toString(), equalTo("whitespace")); } public void testParseTestSingleTestSection() throws Exception { @@ -188,24 +149,20 @@ public void testParseTestSingleTestSection() throws Exception { + " - do:\n" + " index:\n" + " index: test-weird-index-中文\n" - + " type: weird.type\n" + " id: 1\n" + " body: { foo: bar }\n" + "\n" + " - is_true: ok\n" + " - match: { _index: test-weird-index-中文 }\n" - + " - match: { _type: weird.type }\n" + " - match: { _id: \"1\"}\n" + " - match: { _version: 1}\n" + "\n" + " - do:\n" + " get:\n" + " index: test-weird-index-中文\n" - + " type: weird.type\n" + " id: 1\n" + "\n" + " - match: { _index: test-weird-index-中文 }\n" - + " - match: { _type: weird.type }\n" + " - match: { _id: \"1\"}\n" + " - match: { _version: 1}\n" + " - match: { _source: { foo: bar }}" @@ -222,12 +179,12 @@ public void testParseTestSingleTestSection() throws Exception { assertThat(restTestSuite.getTestSections().get(0).getName(), equalTo("Index with ID")); assertThat(restTestSuite.getTestSections().get(0).getSkipSection().isEmpty(), equalTo(true)); - assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().size(), equalTo(12)); + assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().size(), equalTo(10)); assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(0), instanceOf(DoSection.class)); DoSection doSection = (DoSection) restTestSuite.getTestSections().get(0).getExecutableSections().get(0); assertThat(doSection.getCatch(), nullValue()); assertThat(doSection.getApiCallSection().getApi(), equalTo("index")); - assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3)); + assertThat(doSection.getApiCallSection().getParams().size(), equalTo(2)); assertThat(doSection.getApiCallSection().hasBody(), equalTo(true)); assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(1), instanceOf(IsTrueAssertion.class)); IsTrueAssertion trueAssertion = (IsTrueAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(1); @@ -238,40 +195,32 @@ public void testParseTestSingleTestSection() throws Exception { assertThat(matchAssertion.getExpectedValue().toString(), equalTo("test-weird-index-中文")); assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(3), instanceOf(MatchAssertion.class)); matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(3); - assertThat(matchAssertion.getField(), equalTo("_type")); - assertThat(matchAssertion.getExpectedValue().toString(), equalTo("weird.type")); - assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(4), instanceOf(MatchAssertion.class)); - matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(4); assertThat(matchAssertion.getField(), equalTo("_id")); assertThat(matchAssertion.getExpectedValue().toString(), equalTo("1")); - assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(5), instanceOf(MatchAssertion.class)); - matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(5); + assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(4), instanceOf(MatchAssertion.class)); + matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(4); assertThat(matchAssertion.getField(), equalTo("_version")); assertThat(matchAssertion.getExpectedValue().toString(), equalTo("1")); - assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(6), instanceOf(DoSection.class)); - doSection = (DoSection) restTestSuite.getTestSections().get(0).getExecutableSections().get(6); + assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(5), instanceOf(DoSection.class)); + doSection = (DoSection) restTestSuite.getTestSections().get(0).getExecutableSections().get(5); assertThat(doSection.getCatch(), nullValue()); assertThat(doSection.getApiCallSection().getApi(), equalTo("get")); - assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3)); + assertThat(doSection.getApiCallSection().getParams().size(), equalTo(2)); assertThat(doSection.getApiCallSection().hasBody(), equalTo(false)); - assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(7), instanceOf(MatchAssertion.class)); - matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(7); + assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(6), instanceOf(MatchAssertion.class)); + matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(6); assertThat(matchAssertion.getField(), equalTo("_index")); assertThat(matchAssertion.getExpectedValue().toString(), equalTo("test-weird-index-中文")); - assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(8), instanceOf(MatchAssertion.class)); - matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(8); - assertThat(matchAssertion.getField(), equalTo("_type")); - assertThat(matchAssertion.getExpectedValue().toString(), equalTo("weird.type")); - assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(9), instanceOf(MatchAssertion.class)); - matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(9); + assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(7), instanceOf(MatchAssertion.class)); + matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(7); assertThat(matchAssertion.getField(), equalTo("_id")); assertThat(matchAssertion.getExpectedValue().toString(), equalTo("1")); - assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(10), instanceOf(MatchAssertion.class)); - matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(10); + assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(8), instanceOf(MatchAssertion.class)); + matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(8); assertThat(matchAssertion.getField(), equalTo("_version")); assertThat(matchAssertion.getExpectedValue().toString(), equalTo("1")); - assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(11), instanceOf(MatchAssertion.class)); - matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(11); + assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(9), instanceOf(MatchAssertion.class)); + matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(9); assertThat(matchAssertion.getField(), equalTo("_source")); assertThat(matchAssertion.getExpectedValue(), instanceOf(Map.class)); assertThat(((Map) matchAssertion.getExpectedValue()).get("foo").toString(), equalTo("bar")); @@ -287,14 +236,12 @@ public void testParseTestMultipleTestSections() throws Exception { + " catch: missing\n" + " update:\n" + " index: test_1\n" - + " type: test\n" + " id: 1\n" + " body: { doc: { foo: bar } }\n" + "\n" + " - do:\n" + " update:\n" + " index: test_1\n" - + " type: test\n" + " id: 1\n" + " body: { doc: { foo: bar } }\n" + " ignore: 404\n" @@ -307,7 +254,6 @@ public void testParseTestMultipleTestSections() throws Exception { + " catch: missing\n" + " update:\n" + " index: test_1\n" - + " type: test\n" + " id: 1\n" + " body:\n" + " script: \"ctx._source.foo = bar\"\n" @@ -316,7 +262,6 @@ public void testParseTestMultipleTestSections() throws Exception { + " - do:\n" + " update:\n" + " index: test_1\n" - + " type: test\n" + " id: 1\n" + " ignore: 404\n" + " body:\n" @@ -341,13 +286,13 @@ public void testParseTestMultipleTestSections() throws Exception { DoSection doSection = (DoSection) restTestSuite.getTestSections().get(0).getExecutableSections().get(0); assertThat(doSection.getCatch(), equalTo("missing")); assertThat(doSection.getApiCallSection().getApi(), equalTo("update")); - assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3)); + assertThat(doSection.getApiCallSection().getParams().size(), equalTo(2)); assertThat(doSection.getApiCallSection().hasBody(), equalTo(true)); assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(1), instanceOf(DoSection.class)); doSection = (DoSection) restTestSuite.getTestSections().get(0).getExecutableSections().get(1); assertThat(doSection.getCatch(), nullValue()); assertThat(doSection.getApiCallSection().getApi(), equalTo("update")); - assertThat(doSection.getApiCallSection().getParams().size(), equalTo(4)); + assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3)); assertThat(doSection.getApiCallSection().hasBody(), equalTo(true)); assertThat(restTestSuite.getTestSections().get(1).getName(), equalTo("Missing document (script)")); @@ -358,13 +303,13 @@ public void testParseTestMultipleTestSections() throws Exception { doSection = (DoSection) restTestSuite.getTestSections().get(1).getExecutableSections().get(0); assertThat(doSection.getCatch(), equalTo("missing")); assertThat(doSection.getApiCallSection().getApi(), equalTo("update")); - assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3)); + assertThat(doSection.getApiCallSection().getParams().size(), equalTo(2)); assertThat(doSection.getApiCallSection().hasBody(), equalTo(true)); assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(1), instanceOf(DoSection.class)); doSection = (DoSection) restTestSuite.getTestSections().get(1).getExecutableSections().get(1); assertThat(doSection.getCatch(), nullValue()); assertThat(doSection.getApiCallSection().getApi(), equalTo("update")); - assertThat(doSection.getApiCallSection().getParams().size(), equalTo(4)); + assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3)); assertThat(doSection.getApiCallSection().hasBody(), equalTo(true)); } @@ -378,7 +323,6 @@ public void testParseTestDuplicateTestSections() throws Exception { + " catch: missing\n" + " update:\n" + " index: test_1\n" - + " type: test\n" + " id: 1\n" + " body: { doc: { foo: bar } }\n" + "\n" @@ -390,7 +334,6 @@ public void testParseTestDuplicateTestSections() throws Exception { + " catch: missing\n" + " update:\n" + " index: test_1\n" - + " type: test\n" + " id: 1\n" + " body:\n" + " script: \"ctx._source.foo = bar\"\n" diff --git a/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/DoSectionTests.java b/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/DoSectionTests.java index 53cae686e3cac..1fb08934c8b8b 100644 --- a/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/DoSectionTests.java +++ b/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/DoSectionTests.java @@ -173,19 +173,15 @@ public void testIgnoreTypesWarnings() { } public void testParseDoSectionNoBody() throws Exception { - parser = createParser( - YamlXContent.yamlXContent, - "get:\n" + " index: test_index\n" + " type: test_type\n" + " id: 1" - ); + parser = createParser(YamlXContent.yamlXContent, "get:\n" + " index: test_index\n" + " id: 1"); DoSection doSection = DoSection.parse(parser); ApiCallSection apiCallSection = doSection.getApiCallSection(); assertThat(apiCallSection, notNullValue()); assertThat(apiCallSection.getApi(), equalTo("get")); - assertThat(apiCallSection.getParams().size(), equalTo(3)); + assertThat(apiCallSection.getParams().size(), equalTo(2)); assertThat(apiCallSection.getParams().get("index"), equalTo("test_index")); - assertThat(apiCallSection.getParams().get("type"), equalTo("test_type")); assertThat(apiCallSection.getParams().get("id"), equalTo("1")); assertThat(apiCallSection.hasBody(), equalTo(false)); } @@ -204,19 +200,15 @@ public void testParseDoSectionNoParamsNoBody() throws Exception { public void testParseDoSectionWithJsonBody() throws Exception { String body = "{ \"include\": { \"field1\": \"v1\", \"field2\": \"v2\" }, \"count\": 1 }"; - parser = createParser( - YamlXContent.yamlXContent, - "index:\n" + " index: test_1\n" + " type: test\n" + " id: 1\n" + " body: " + body - ); + parser = createParser(YamlXContent.yamlXContent, "index:\n" + " index: test_1\n" + " id: 1\n" + " body: " + body); DoSection doSection = DoSection.parse(parser); ApiCallSection apiCallSection = doSection.getApiCallSection(); assertThat(apiCallSection, notNullValue()); assertThat(apiCallSection.getApi(), equalTo("index")); - assertThat(apiCallSection.getParams().size(), equalTo(3)); + assertThat(apiCallSection.getParams().size(), equalTo(2)); assertThat(apiCallSection.getParams().get("index"), equalTo("test_1")); - assertThat(apiCallSection.getParams().get("type"), equalTo("test")); assertThat(apiCallSection.getParams().get("id"), equalTo("1")); assertThat(apiCallSection.hasBody(), equalTo(true)); @@ -225,9 +217,9 @@ public void testParseDoSectionWithJsonBody() throws Exception { public void testParseDoSectionWithJsonMultipleBodiesAsLongString() throws Exception { String bodies[] = new String[] { - "{ \"index\": { \"_index\":\"test_index\", \"_type\":\"test_type\", \"_id\":\"test_id\" } }\n", + "{ \"index\": { \"_index\":\"test_index\", \"_id\":\"test_id\" } }\n", "{ \"f1\":\"v1\", \"f2\":42 }\n", - "{ \"index\": { \"_index\":\"test_index2\", \"_type\":\"test_type2\", \"_id\":\"test_id2\" } }\n", + "{ \"index\": { \"_index\":\"test_index2\", \"_id\":\"test_id2\" } }\n", "{ \"f1\":\"v2\", \"f2\":47 }\n" }; parser = createParser( YamlXContent.yamlXContent, @@ -284,21 +276,19 @@ public void testParseDoSectionWithYamlMultipleBodies() throws Exception { + " body:\n" + " - index:\n" + " _index: test_index\n" - + " _type: test_type\n" + " _id: test_id\n" + " - f1: v1\n" + " f2: 42\n" + " - index:\n" + " _index: test_index2\n" - + " _type: test_type2\n" + " _id: test_id2\n" + " - f1: v2\n" + " f2: 47" ); String[] bodies = new String[4]; - bodies[0] = "{\"index\": {\"_index\": \"test_index\", \"_type\": \"test_type\", \"_id\": \"test_id\"}}"; + bodies[0] = "{\"index\": {\"_index\": \"test_index\", \"_id\": \"test_id\"}}"; bodies[1] = "{ \"f1\":\"v1\", \"f2\": 42 }"; - bodies[2] = "{\"index\": {\"_index\": \"test_index2\", \"_type\": \"test_type2\", \"_id\": \"test_id2\"}}"; + bodies[2] = "{\"index\": {\"_index\": \"test_index2\", \"_id\": \"test_id2\"}}"; bodies[3] = "{ \"f1\":\"v2\", \"f2\": 47 }"; DoSection doSection = DoSection.parse(parser); @@ -322,13 +312,10 @@ public void testParseDoSectionWithYamlBodyMultiGet() throws Exception { "mget:\n" + " body:\n" + " docs:\n" - + " - { _index: test_2, _type: test, _id: 1}\n" - + " - { _index: test_1, _type: none, _id: 1}" + + " - { _index: test_2, _id: 1}\n" + + " - { _index: test_1, _id: 1}" ); - String body = "{ \"docs\": [ " - + "{\"_index\": \"test_2\", \"_type\":\"test\", \"_id\":1}, " - + "{\"_index\": \"test_1\", \"_type\":\"none\", \"_id\":1} " - + "]}"; + String body = "{ \"docs\": [ " + "{\"_index\": \"test_2\", \"_id\":1}, " + "{\"_index\": \"test_1\", \"_id\":1} " + "]}"; DoSection doSection = DoSection.parse(parser); ApiCallSection apiCallSection = doSection.getApiCallSection(); @@ -346,7 +333,6 @@ public void testParseDoSectionWithBodyStringified() throws Exception { YamlXContent.yamlXContent, "index:\n" + " index: test_1\n" - + " type: test\n" + " id: 1\n" + " body: \"{ \\\"_source\\\": true, \\\"query\\\": { \\\"match_all\\\": {} } }\"" ); @@ -356,9 +342,8 @@ public void testParseDoSectionWithBodyStringified() throws Exception { assertThat(apiCallSection, notNullValue()); assertThat(apiCallSection.getApi(), equalTo("index")); - assertThat(apiCallSection.getParams().size(), equalTo(3)); + assertThat(apiCallSection.getParams().size(), equalTo(2)); assertThat(apiCallSection.getParams().get("index"), equalTo("test_1")); - assertThat(apiCallSection.getParams().get("type"), equalTo("test")); assertThat(apiCallSection.getParams().get("id"), equalTo("1")); assertThat(apiCallSection.hasBody(), equalTo(true)); assertThat(apiCallSection.getBodies().size(), equalTo(1)); @@ -444,16 +429,15 @@ public void testParseDoSectionWithoutClientCallSection() throws Exception { public void testParseDoSectionMultivaluedField() throws Exception { parser = createParser( YamlXContent.yamlXContent, - "indices.get_field_mapping:\n" + " index: test_index\n" + " type: test_type\n" + " field: [ text , text1 ]" + "indices.get_field_mapping:\n" + " index: test_index\n" + " field: [ text , text1 ]" ); DoSection doSection = DoSection.parse(parser); assertThat(doSection.getCatch(), nullValue()); assertThat(doSection.getApiCallSection(), notNullValue()); assertThat(doSection.getApiCallSection().getApi(), equalTo("indices.get_field_mapping")); - assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3)); + assertThat(doSection.getApiCallSection().getParams().size(), equalTo(2)); assertThat(doSection.getApiCallSection().getParams().get("index"), equalTo("test_index")); - assertThat(doSection.getApiCallSection().getParams().get("type"), equalTo("test_type")); assertThat(doSection.getApiCallSection().getParams().get("field"), equalTo("text,text1")); assertThat(doSection.getApiCallSection().hasBody(), equalTo(false)); assertThat(doSection.getApiCallSection().getBodies().size(), equalTo(0)); @@ -464,7 +448,6 @@ public void testParseDoSectionExpectedWarnings() throws Exception { YamlXContent.yamlXContent, "indices.get_field_mapping:\n" + " index: test_index\n" - + " type: test_type\n" + "warnings:\n" + " - some test warning they are typically pretty long\n" + " - some other test warning sometimes they have [in] them" @@ -474,9 +457,8 @@ public void testParseDoSectionExpectedWarnings() throws Exception { assertThat(doSection.getCatch(), nullValue()); assertThat(doSection.getApiCallSection(), notNullValue()); assertThat(doSection.getApiCallSection().getApi(), equalTo("indices.get_field_mapping")); - assertThat(doSection.getApiCallSection().getParams().size(), equalTo(2)); + assertThat(doSection.getApiCallSection().getParams().size(), equalTo(1)); assertThat(doSection.getApiCallSection().getParams().get("index"), equalTo("test_index")); - assertThat(doSection.getApiCallSection().getParams().get("type"), equalTo("test_type")); assertThat(doSection.getApiCallSection().hasBody(), equalTo(false)); assertThat(doSection.getApiCallSection().getBodies().size(), equalTo(0)); assertThat( @@ -502,7 +484,6 @@ public void testParseDoSectionAllowedWarnings() throws Exception { YamlXContent.yamlXContent, "indices.get_field_mapping:\n" + " index: test_index\n" - + " type: test_type\n" + "allowed_warnings:\n" + " - some test warning they are typically pretty long\n" + " - some other test warning sometimes they have [in] them" @@ -512,9 +493,8 @@ public void testParseDoSectionAllowedWarnings() throws Exception { assertThat(doSection.getCatch(), nullValue()); assertThat(doSection.getApiCallSection(), notNullValue()); assertThat(doSection.getApiCallSection().getApi(), equalTo("indices.get_field_mapping")); - assertThat(doSection.getApiCallSection().getParams().size(), equalTo(2)); + assertThat(doSection.getApiCallSection().getParams().size(), equalTo(1)); assertThat(doSection.getApiCallSection().getParams().get("index"), equalTo("test_index")); - assertThat(doSection.getApiCallSection().getParams().get("type"), equalTo("test_type")); assertThat(doSection.getApiCallSection().hasBody(), equalTo(false)); assertThat(doSection.getApiCallSection().getBodies().size(), equalTo(0)); assertThat( diff --git a/test/framework/src/test/resources/rest-api-spec/test/suite1/10_basic.yml b/test/framework/src/test/resources/rest-api-spec/test/suite1/10_basic.yml index 0689f714d6416..c5fde76e94cc2 100644 --- a/test/framework/src/test/resources/rest-api-spec/test/suite1/10_basic.yml +++ b/test/framework/src/test/resources/rest-api-spec/test/suite1/10_basic.yml @@ -4,18 +4,15 @@ - do: index: index: test_1 - type: test id: 中文 body: { "foo": "Hello: 中文" } - do: get: index: test_1 - type: test id: 中文 - match: { _index: test_1 } - - match: { _type: test } - match: { _id: 中文 } - match: { _source: { foo: "Hello: 中文" } } @@ -26,6 +23,5 @@ id: 中文 - match: { _index: test_1 } - - match: { _type: test } - match: { _id: 中文 } - match: { _source: { foo: "Hello: 中文" } } From c5a3e017b3376e67f2d74e56515f14189bcb934e Mon Sep 17 00:00:00 2001 From: Rabi Panda Date: Fri, 20 May 2022 13:48:58 -0700 Subject: [PATCH 16/75] Add release notes for version 2.0.0 (#3410) Signed-off-by: Rabi Panda --- .../opensearch.release-notes-2.0.0.md | 189 ++++++++++++++++++ 1 file changed, 189 insertions(+) create mode 100644 release-notes/opensearch.release-notes-2.0.0.md diff --git a/release-notes/opensearch.release-notes-2.0.0.md b/release-notes/opensearch.release-notes-2.0.0.md new file mode 100644 index 0000000000000..8880d7a7bddf9 --- /dev/null +++ b/release-notes/opensearch.release-notes-2.0.0.md @@ -0,0 +1,189 @@ +## 2022-05-19 Version 2.0.0 Release Notes + +### Breaking Changes in 2.0 + +#### Remove Mapping types +* [Type removal] Remove redundant _type in pipeline simulate action ([#3371](https://github.com/opensearch-project/OpenSearch/pull/3371)) +* [Type removal] Remove _type deprecation from script and conditional processor ([#3239](https://github.com/opensearch-project/OpenSearch/pull/3239)) +* [Type removal] Remove _type from _bulk yaml test, scripts, unused constants ([#3372](https://github.com/opensearch-project/OpenSearch/pull/3372)) +* [Type removal] _type removal from mocked responses of scroll hit tests ([#3377](https://github.com/opensearch-project/OpenSearch/pull/3377)) +* [Remove] TypeFieldMapper ([#3196](https://github.com/opensearch-project/OpenSearch/pull/3196)) +* [Type Removal] Remove TypeFieldMapper usage, remove support of `_type` in searches and from LeafFieldsLookup ([#3016](https://github.com/opensearch-project/OpenSearch/pull/3016)) +* [Type removal] Remove _type support in NOOP bulk indexing from client benchmark ([#3076](https://github.com/opensearch-project/OpenSearch/pull/3076)) +* [Type removal] Remove deprecation warning on use of _type in doc scripts ([#2564](https://github.com/opensearch-project/OpenSearch/pull/2564)) +* [Remove] AliasesExistAction ([#3149](https://github.com/opensearch-project/OpenSearch/pull/3149)) +* [Remove] TypesExist Action ([#3139](https://github.com/opensearch-project/OpenSearch/pull/3139)) +* [Remove] Type from nested fields using new metadata field mapper([#3004](https://github.com/opensearch-project/OpenSearch/pull/3004)) +* [Remove] types from rest-api-spec endpoints ([#2689](https://github.com/opensearch-project/OpenSearch/pull/2689)) +* [Remove] Types from PutIndexTemplateRequest and builder to reduce mapping to a string ([#2510](https://github.com/opensearch-project/OpenSearch/pull/2510)) +* [Remove] Type from Percolate query API ([#2490](https://github.com/opensearch-project/OpenSearch/pull/2490)) +* [Remove] types from CreateIndexRequest and companion Builder's mapping method ([#2498](https://github.com/opensearch-project/OpenSearch/pull/2498)) +* [Remove] Type from PutIndexTemplateRequest and PITRB ([#2497](https://github.com/opensearch-project/OpenSearch/pull/2497)) +* [Remove] Type metadata from ingest documents ([#2491](https://github.com/opensearch-project/OpenSearch/pull/2491)) +* [Remove] type from CIR.mapping and CIRB.mapping ([#2478](https://github.com/opensearch-project/OpenSearch/pull/2478)) +* [Remove] types based addMapping method from CreateIndexRequest and Builder ([#2460](https://github.com/opensearch-project/OpenSearch/pull/2460)) +* [Remove] type from TaskResults index and IndexMetadata.getMappings ([#2469](https://github.com/opensearch-project/OpenSearch/pull/2469)) +* [Remove] Type query ([#2448](https://github.com/opensearch-project/OpenSearch/pull/2448)) +* [Remove] Type from TermsLookUp ([#2459](https://github.com/opensearch-project/OpenSearch/pull/2459)) +* [Remove] types from Uid and remaining types/Uid from translog ([#2450](https://github.com/opensearch-project/OpenSearch/pull/2450)) +* [Remove] types from translog ([#2439](https://github.com/opensearch-project/OpenSearch/pull/2439)) +* [Remove] Type mapping end-points from RestMultiSearchTemplateAction ([#2433](https://github.com/opensearch-project/OpenSearch/pull/2433)) +* [Remove] Multiple Types from IndexTemplateMetadata ([#2400](https://github.com/opensearch-project/OpenSearch/pull/2400)) + +#### Upgrades +* [Upgrade] Lucene 9.1 release ([#2560](https://github.com/opensearch-project/OpenSearch/pull/2560)) +* [Upgrade] ICU4j from 68.2 to 70.1 ([#2504](https://github.com/opensearch-project/OpenSearch/pull/2504)) + +#### Deprecations +* Deprecate setting 'cluster.no_master_block' and introduce the alternative setting 'cluster.no_cluster_manager_block' ([#2453](https://github.com/opensearch-project/OpenSearch/pull/2453)) +* Deprecate setting 'cluster.service.slow_master_task_logging_threshold' and introduce the alternative setting 'cluster.service.slow_cluster_manager_task_logging_threshold' ([#2451](https://github.com/opensearch-project/OpenSearch/pull/2451)) +* Deprecate setting 'cluster.initial_master_nodes' and introduce the alternative setting 'cluster.initial_cluster_manager_nodes' ([#2463](https://github.com/opensearch-project/OpenSearch/pull/2463)) +* Deprecated reserved node id '_must_join_elected_master_' that used by DetachClusterCommand and replace with '_must_join_elected_cluster_manager_' ([#3138](https://github.com/opensearch-project/OpenSearch/pull/3138)) + +### Security Fixes +* [CVE-2020-36518] Update jackson-databind to 2.13.2.2 ([#2599](https://github.com/opensearch-project/OpenSearch/pull/2599)) + +### Features/Enhancements +* Removing hard coded value of max concurrent shard requests ([#3364](https://github.com/opensearch-project/OpenSearch/pull/3364)) +* Update generated ANTLR lexer/parser to match runtime version ([#3297](https://github.com/opensearch-project/OpenSearch/pull/3297)) +* Rename BecomeMasterTask to BecomeClusterManagerTask in JoinTaskExecutor ([#3099](https://github.com/opensearch-project/OpenSearch/pull/3099)) +* Replace 'master' terminology with 'cluster manager' in log messages in 'server/src/main' directory - Part 2 ([#3174](https://github.com/opensearch-project/OpenSearch/pull/3174)) +* Remove deprecation warning of using REST API request parameter 'master_timeout' ([#2920](https://github.com/opensearch-project/OpenSearch/pull/2920)) +* Add deprecated API for creating History Ops Snapshot from translog ([#2886](https://github.com/opensearch-project/OpenSearch/pull/2886)) +* Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Ingest APIs and Script APIs ([#2682](https://github.com/opensearch-project/OpenSearch/pull/2682)) +* Change deprecation message for API parameter value 'master_node' of parameter 'metric' ([#2880](https://github.com/opensearch-project/OpenSearch/pull/2880)) +* Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Snapshot APIs ([#2680](https://github.com/opensearch-project/OpenSearch/pull/2680)) +* Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Index Template APIs ([#2678](https://github.com/opensearch-project/OpenSearch/pull/2678)) +* Change deprecation message for REST API parameter 'master_timeout' to specify the version of removal ([#2863](https://github.com/opensearch-project/OpenSearch/pull/2863)) +* Decouple IndexSettings from IncludeExclude ([#2860](https://github.com/opensearch-project/OpenSearch/pull/2860)) +* Remove endpoint_suffix dependency on account key ([#2485](https://github.com/opensearch-project/OpenSearch/pull/2485)) +* Replace remaining 'blacklist' with 'denylist' in internal class and method names ([#2784](https://github.com/opensearch-project/OpenSearch/pull/2784)) +* Make discovered_master field optional on the client to support compatibility for opensearch client with odfe ([#2641](https://github.com/opensearch-project/OpenSearch/pull/2641)) +* Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Index APIs except index template APIs ([#2660](https://github.com/opensearch-project/OpenSearch/pull/2660)) +* Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Cluster APIs ([#2658](https://github.com/opensearch-project/OpenSearch/pull/2658)) +* Make Rest-High-Rest-Level tests allow deprecation warning temporarily, during deprecation of request parameter 'master_timeout' ([#2702](https://github.com/opensearch-project/OpenSearch/pull/2702)) +* Add request parameter 'cluster_manager_timeout' as the alternative for 'master_timeout', and deprecate 'master_timeout' - in CAT APIs ([#2717](https://github.com/opensearch-project/OpenSearch/pull/2717)) +* Add mapping method back referenced in other repos ([#2636](https://github.com/opensearch-project/OpenSearch/pull/2636)) +* Replaced "master" terminology in Log message ([#2575](https://github.com/opensearch-project/OpenSearch/pull/2575)) +* Introduce QueryPhaseSearcher extension point (SearchPlugin) ([#1931](https://github.com/opensearch-project/OpenSearch/pull/1931)) +* Support for geo_bounding_box queries on geo_shape fields ([#2506](https://github.com/opensearch-project/OpenSearch/pull/2506)) +* Updating repository commons logging version ([#2541](https://github.com/opensearch-project/OpenSearch/pull/2541)) +* Support for geo_distance queries on geo_shape fields ([#2516](https://github.com/opensearch-project/OpenSearch/pull/2516)) +* Add 'cluster_manager_node' into ClusterState Metric as an alternative to 'master_node' ([#2415](https://github.com/opensearch-project/OpenSearch/pull/2415)) +* Add a new node role 'cluster_manager' as the alternative for 'master' role and deprecate 'master' role ([#2424](https://github.com/opensearch-project/OpenSearch/pull/2424)) +* Replace 'master' with 'cluster_manager' in 'GET Cat Nodes' API ([#2441](https://github.com/opensearch-project/OpenSearch/pull/2441)) +* Replace 'discovered_master' with 'discovered_cluster_manager' in 'GET Cat Health' API ([#2438](https://github.com/opensearch-project/OpenSearch/pull/2438)) +* Add a field discovered_cluster_manager in get cluster health api ([#2437](https://github.com/opensearch-project/OpenSearch/pull/2437)) +* Add request parameter 'cluster_manager_timeout' as the alternative for 'master_timeout', and deprecate 'master_timeout' - in CAT Nodes API ([#2435](https://github.com/opensearch-project/OpenSearch/pull/2435)) +* Add a new REST API endpoint 'GET _cat/cluster_manager' as the replacement of 'GET _cat/master' ([#2404](https://github.com/opensearch-project/OpenSearch/pull/2404)) +* Add default for EnginePlugin.getEngineFactory ([#2419](https://github.com/opensearch-project/OpenSearch/pull/2419)) + +### Bug Fixes +* Fixing PublishTests tests (running against unclean build folders) ([#3253](https://github.com/opensearch-project/OpenSearch/pull/3253)) +* Fixing Scaled float field mapper to respect ignoreMalformed setting ([#2918](https://github.com/opensearch-project/OpenSearch/pull/2918)) +* Fixing plugin installation URL to consume build qualifier ([#3193](https://github.com/opensearch-project/OpenSearch/pull/3193)) +* Fix minimum index compatibility error message ([#3159](https://github.com/opensearch-project/OpenSearch/pull/3159)) +* Added explicit 'null' check for response listener to prevent obscure NullPointerException issues ([#3048](https://github.com/opensearch-project/OpenSearch/pull/3048)) +* Adding a null pointer check to fix index_prefix query ([#2879](https://github.com/opensearch-project/OpenSearch/pull/2879)) +* Bugfix to guard against stack overflow errors caused by very large reg-ex input ([#2816](https://github.com/opensearch-project/OpenSearch/pull/2816)) +* Fix InboundDecoder version compat check ([#2570](https://github.com/opensearch-project/OpenSearch/pull/2570)) +* ignore_malformed parameter on ip_range data_type throws mapper_parsing_exception ([#2429](https://github.com/opensearch-project/OpenSearch/pull/2429)) +* Discrepancy in result from _validate/query API and actual query validity ([#2416](https://github.com/opensearch-project/OpenSearch/pull/2416)) + +### Build & Infrastructure +* Allow to configure POM for ZIP publication ([#3252](https://github.com/opensearch-project/OpenSearch/pull/3252)) +* Gradle plugin `opensearch.pluginzip` Add implicit dependency. ([#3189](https://github.com/opensearch-project/OpenSearch/pull/3189)) +* Gradle custom java zippublish plugin ([#2988](https://github.com/opensearch-project/OpenSearch/pull/2988)) +* Added Adoptium JDK8 support and updated DistroTestPlugin JDK version used by Gradle ([#3324](https://github.com/opensearch-project/OpenSearch/pull/3324)) +* Update bundled JDK to 17.0.3+7 ([#3093](https://github.com/opensearch-project/OpenSearch/pull/3093)) +* Use G1GC on JDK11+ ([#2964](https://github.com/opensearch-project/OpenSearch/pull/2964)) +* Removed java11 source folders since JDK-11 is the baseline now ([#2898](https://github.com/opensearch-project/OpenSearch/pull/2898)) +* Changed JAVA_HOME to jdk-17 ([#2656](https://github.com/opensearch-project/OpenSearch/pull/2656)) +* Fix build-tools/reaper source/target compatibility to be JDK-11 ([#2596](https://github.com/opensearch-project/OpenSearch/pull/2596)) +* Adding workflow to create documentation related issues in documentation-website repo ([#2929](https://github.com/opensearch-project/OpenSearch/pull/2929)) +* Fix issue that deprecated setting 'cluster.initial_master_nodes' is not identified in node bootstrap check ([#2779](https://github.com/opensearch-project/OpenSearch/pull/2779)) +* Replace blacklist in Gradle build environment configuration ([#2752](https://github.com/opensearch-project/OpenSearch/pull/2752)) +* Update ThirdPartyAuditTask to check for and list pointless exclusions. ([#2760](https://github.com/opensearch-project/OpenSearch/pull/2760)) +* Add Shadow jar publication to lang-painless module. ([#2681](https://github.com/opensearch-project/OpenSearch/pull/2681)) +* Add 1.3.2 to main causing gradle check failures ([#2679](https://github.com/opensearch-project/OpenSearch/pull/2679)) +* Added jenkinsfile to run gradle check in OpenSearch ([#2166](https://github.com/opensearch-project/OpenSearch/pull/2166)) +* Gradle check retry ([#2638](https://github.com/opensearch-project/OpenSearch/pull/2638)) +* Override Default Distribution Download Url with Custom Distribution Url when it is passed from Plugin ([#2420](https://github.com/opensearch-project/OpenSearch/pull/2420)) + +### Documentation +* [Javadocs] add remaining internal classes and reenable missingJavadoc on server ([#3296](https://github.com/opensearch-project/OpenSearch/pull/3296)) +* [Javadocs] add to o.o.cluster ([#3170](https://github.com/opensearch-project/OpenSearch/pull/3170)) +* [Javadocs] add to o.o.bootstrap, cli, and client ([#3163](https://github.com/opensearch-project/OpenSearch/pull/3163)) +* [Javadocs] add to o.o.search.rescore,searchafter,slice, sort, and suggest ([#3264](https://github.com/opensearch-project/OpenSearch/pull/3264)) +* [Javadocs] add to o.o.transport ([#3220](https://github.com/opensearch-project/OpenSearch/pull/3220)) +* [Javadocs] add to o.o.action, index, and transport ([#3277](https://github.com/opensearch-project/OpenSearch/pull/3277)) +* [Javadocs] add to internal classes in o.o.http, indices, and search ([#3288](https://github.com/opensearch-project/OpenSearch/pull/3288)) +* [Javadocs] Add to remaining o.o.action classes ([#3182](https://github.com/opensearch-project/OpenSearch/pull/3182)) +* [Javadocs] add to o.o.rest, snapshots, and tasks packages ([#3219](https://github.com/opensearch-project/OpenSearch/pull/3219)) +* [Javadocs] add to o.o.common ([#3289](https://github.com/opensearch-project/OpenSearch/pull/3289)) +* [Javadocs] add to o.o.dfs,fetch,internal,lookup,profile, and query packages ([#3261](https://github.com/opensearch-project/OpenSearch/pull/3261)) +* [Javadocs] add to o.o.search.aggs, builder, and collapse packages ([#3254](https://github.com/opensearch-project/OpenSearch/pull/3254)) +* [Javadocs] add to o.o.index and indices ([#3209](https://github.com/opensearch-project/OpenSearch/pull/3209)) +* [Javadocs] add to o.o.monitor,persistance,plugins,repo,script,threadpool,usage,watcher ([#3186](https://github.com/opensearch-project/OpenSearch/pull/3186)) +* [Javadocs] Add to o.o.disovery, env, gateway, http, ingest, lucene and node pkgs ([#3185](https://github.com/opensearch-project/OpenSearch/pull/3185)) +* [Javadocs] add to o.o.action.admin ([#3155](https://github.com/opensearch-project/OpenSearch/pull/3155)) +* [Javadocs] Add missing package-info.java files to server ([#3128](https://github.com/opensearch-project/OpenSearch/pull/3128)) + +### Maintenance +* Bump re2j from 1.1 to 1.6 in /plugins/repository-hdfs ([#3337](https://github.com/opensearch-project/OpenSearch/pull/3337)) +* Bump google-oauth-client from 1.33.1 to 1.33.2 in /plugins/discovery-gce ([#2828](https://github.com/opensearch-project/OpenSearch/pull/2828)) +* Bump protobuf-java-util from 3.19.3 to 3.20.0 in /plugins/repository-gcs ([#2834](https://github.com/opensearch-project/OpenSearch/pull/2834)) +* Bump cdi-api from 1.2 to 2.0 in /qa/wildfly ([#2835](https://github.com/opensearch-project/OpenSearch/pull/2835)) +* Bump azure-core from 1.26.0 to 1.27.0 in /plugins/repository-azure ([#2837](https://github.com/opensearch-project/OpenSearch/pull/2837)) +* Bump asm-analysis from 9.2 to 9.3 in /test/logger-usage ([#2829](https://github.com/opensearch-project/OpenSearch/pull/2829)) +* Bump protobuf-java from 3.19.3 to 3.20.0 in /plugins/repository-hdfs ([#2836](https://github.com/opensearch-project/OpenSearch/pull/2836)) +* Bump joni from 2.1.41 to 2.1.43 in /libs/grok ([#2832](https://github.com/opensearch-project/OpenSearch/pull/2832)) +* Bump geoip2 from 2.16.1 to 3.0.1 in /modules/ingest-geoip ([#2646](https://github.com/opensearch-project/OpenSearch/pull/2646)) +* Bump jettison from 1.1 to 1.4.1 in /plugins/discovery-azure-classic ([#2614](https://github.com/opensearch-project/OpenSearch/pull/2614)) +* Bump google-oauth-client from 1.31.0 to 1.33.1 in /plugins/repository-gcs ([#2616](https://github.com/opensearch-project/OpenSearch/pull/2616)) +* Bump jboss-annotations-api_1.2_spec in /qa/wildfly ([#2615](https://github.com/opensearch-project/OpenSearch/pull/2615)) +* Bump forbiddenapis in /buildSrc/src/testKit/thirdPartyAudit ([#2611](https://github.com/opensearch-project/OpenSearch/pull/2611)) +* Bump json-schema-validator from 1.0.67 to 1.0.68 in /buildSrc ([#2610](https://github.com/opensearch-project/OpenSearch/pull/2610)) +* Bump htrace-core4 from 4.1.0-incubating to 4.2.0-incubating in /plugins/repository-hdfs ([#2618](https://github.com/opensearch-project/OpenSearch/pull/2618)) +* Bump asm-tree from 7.2 to 9.2 in /modules/lang-painless ([#2617](https://github.com/opensearch-project/OpenSearch/pull/2617)) +* Bump antlr4 from 4.5.3 to 4.9.3 in /modules/lang-painless ([#2537](https://github.com/opensearch-project/OpenSearch/pull/2537)) +* Bump commons-lang3 from 3.7 to 3.12.0 in /plugins/repository-hdfs ([#2552](https://github.com/opensearch-project/OpenSearch/pull/2552)) +* Bump gson from 2.8.9 to 2.9.0 in /plugins/repository-gcs ([#2550](https://github.com/opensearch-project/OpenSearch/pull/2550)) +* Bump google-oauth-client from 1.31.0 to 1.33.1 in /plugins/discovery-gce ([#2524](https://github.com/opensearch-project/OpenSearch/pull/2524)) +* Bump google-cloud-core from 1.93.3 to 2.5.10 in /plugins/repository-gcs ([#2536](https://github.com/opensearch-project/OpenSearch/pull/2536)) +* Bump wiremock-jre8-standalone from 2.23.2 to 2.32.0 in /buildSrc ([#2525](https://github.com/opensearch-project/OpenSearch/pull/2525)) +* Bump com.gradle.enterprise from 3.8.1 to 3.9 ([#2523](https://github.com/opensearch-project/OpenSearch/pull/2523)) +* Bump commons-io from 2.7 to 2.11.0 in /plugins/discovery-azure-classic ([#2527](https://github.com/opensearch-project/OpenSearch/pull/2527)) +* Bump asm-analysis from 7.1 to 9.2 in /test/logger-usage ([#2273](https://github.com/opensearch-project/OpenSearch/pull/2273)) +* Bump asm-commons from 7.2 to 9.2 in /modules/lang-painless ([#2234](https://github.com/opensearch-project/OpenSearch/pull/2234)) +* Bump jna from 5.5.0 to 5.10.0 in /buildSrc ([#2512](https://github.com/opensearch-project/OpenSearch/pull/2512)) +* Bump jsr305 from 1.3.9 to 3.0.2 in /plugins/discovery-gce ([#2137](https://github.com/opensearch-project/OpenSearch/pull/2137)) +* Bump json-schema-validator from 1.0.36 to 1.0.67 in /buildSrc ([#2454](https://github.com/opensearch-project/OpenSearch/pull/2454)) +* Bump woodstox-core from 6.1.1 to 6.2.8 in /plugins/repository-azure ([#2456](https://github.com/opensearch-project/OpenSearch/pull/2456)) +* Bump commons-lang3 from 3.4 to 3.12.0 in /plugins/repository-azure ([#2455](https://github.com/opensearch-project/OpenSearch/pull/2455)) +* Update azure-storage-blob to 12.15.0 ([#2774](https://github.com/opensearch-project/OpenSearch/pull/2774)) +* Move Jackson-databind to 2.13.2 ([#2548](https://github.com/opensearch-project/OpenSearch/pull/2548)) +* Add trademark notice ([#2473](https://github.com/opensearch-project/OpenSearch/pull/2473)) +* adds ToC ([#2546](https://github.com/opensearch-project/OpenSearch/pull/2546)) +* Sync maintainers with actual permissions. ([#3127](https://github.com/opensearch-project/OpenSearch/pull/3127)) + +### Refactoring +* [Remove] remaining AllFieldMapper references ([#3007](https://github.com/opensearch-project/OpenSearch/pull/3007)) +* Clear up some confusing code in IndexShardHotSpotTests ([#1534](https://github.com/opensearch-project/OpenSearch/pull/1534)) +* [Remove] ShrinkAction, ShardUpgradeRequest, UpgradeSettingsRequestBuilder ([#3169](https://github.com/opensearch-project/OpenSearch/pull/3169)) +* [Rename] ESTestCase stragglers to OpenSearchTestCase ([#3053](https://github.com/opensearch-project/OpenSearch/pull/3053)) +* [Remove] MainResponse version override cluster setting ([#3031](https://github.com/opensearch-project/OpenSearch/pull/3031)) +* [Version] Don't spoof major for 3.0+ clusters ([#2722](https://github.com/opensearch-project/OpenSearch/pull/2722)) +* Centralize codes related to 'master_timeout' deprecation for eaiser removal - in CAT Nodes API ([#2670](https://github.com/opensearch-project/OpenSearch/pull/2670)) +* Rename reference to project OpenSearch was forked from ([#2483](https://github.com/opensearch-project/OpenSearch/pull/2483)) +* Remove the IndexCommitRef class ([#2421](https://github.com/opensearch-project/OpenSearch/pull/2421)) +* Refactoring gated and ref-counted interfaces and their implementations ([#2396](https://github.com/opensearch-project/OpenSearch/pull/2396)) +* [Refactor] LuceneChangesSnapshot to use accurate ops history ([#2452](https://github.com/opensearch-project/OpenSearch/pull/2452)) + +### Tests +* Add type mapping removal bwc tests for indexing, searching, snapshots ([#2901](https://github.com/opensearch-project/OpenSearch/pull/2901)) +* Removing SLM check in tests for OpenSearch versions ([#2604](https://github.com/opensearch-project/OpenSearch/pull/2604)) +* [Unmute] NumberFieldTypeTests ([#2531](https://github.com/opensearch-project/OpenSearch/pull/2531)) +* Use Hamcrest matchers and assertThat() in ReindexRenamedSettingTests ([#2503](https://github.com/opensearch-project/OpenSearch/pull/2503)) +* [Unmute] IndexPrimaryRelocationIT ([#2488](https://github.com/opensearch-project/OpenSearch/pull/2488)) +* Fixing PluginsServiceTests (post Lucene 9 update) ([#2484](https://github.com/opensearch-project/OpenSearch/pull/2484)) From 72a65ae2e536dbf89e45130a828e12f6f11ece75 Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Fri, 20 May 2022 16:29:51 -0500 Subject: [PATCH 17/75] [Upgrade] Lucene-9.2.0-snapshot-ba8c3a8 (#3416) Upgrades to latest snapshot of lucene 9.2.0 in preparation for GA release. Signed-off-by: Nicholas Walter Knize --- .../forbidden/opensearch-test-signatures.txt | 1 - buildSrc/version.properties | 2 +- .../opensearch/core/internal/io/IOUtilsTests.java | 11 ++++------- ...ucene-expressions-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + ...ucene-expressions-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - ...cene-analysis-icu-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + ...cene-analysis-icu-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - ...analysis-kuromoji-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + ...analysis-kuromoji-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - ...ene-analysis-nori-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + ...ene-analysis-nori-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - ...analysis-phonetic-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + ...analysis-phonetic-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - ...-analysis-smartcn-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + ...-analysis-smartcn-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - ...-analysis-stempel-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + ...-analysis-stempel-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - ...alysis-morfologik-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + ...alysis-morfologik-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - ...e-analysis-common-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + ...e-analysis-common-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - ...e-backward-codecs-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + ...e-backward-codecs-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - .../lucene-core-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + .../lucene-core-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - .../lucene-grouping-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + .../lucene-grouping-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - ...ucene-highlighter-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + ...ucene-highlighter-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - .../lucene-join-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + .../lucene-join-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - .../lucene-memory-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + .../lucene-memory-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - .../lucene-misc-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + .../lucene-misc-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - .../lucene-queries-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + .../lucene-queries-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - ...ucene-queryparser-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + ...ucene-queryparser-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - .../lucene-sandbox-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + .../lucene-sandbox-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - ...ne-spatial-extras-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + ...ne-spatial-extras-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - .../lucene-spatial3d-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + .../lucene-spatial3d-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - .../lucene-suggest-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + .../lucene-suggest-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - .../java/org/opensearch/common/lucene/Lucene.java | 2 +- .../org/opensearch/index/codec/CodecService.java | 8 ++++---- .../codec/PerFieldMappingPostingFormatCodec.java | 4 ++-- .../fielddata/ordinals/GlobalOrdinalMapping.java | 4 ++++ .../index/fielddata/ordinals/MultiOrdinals.java | 5 +++++ .../search/aggregations/support/MissingValues.java | 10 ++++++++++ .../java/org/opensearch/index/codec/CodecTests.java | 12 ++++++------ .../index/engine/CompletionStatsCacheTests.java | 4 ++-- .../org/opensearch/search/MultiValueModeTests.java | 5 +++++ .../bucket/range/BinaryRangeAggregatorTests.java | 4 ++++ .../aggregations/support/IncludeExcludeTests.java | 4 ++++ .../aggregations/support/MissingValuesTests.java | 10 ++++++++++ .../indices/analysis/AnalysisFactoryTestCase.java | 1 + 60 files changed, 85 insertions(+), 46 deletions(-) create mode 100644 modules/lang-expression/licenses/lucene-expressions-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 modules/lang-expression/licenses/lucene-expressions-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 server/licenses/lucene-analysis-common-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 server/licenses/lucene-analysis-common-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 server/licenses/lucene-backward-codecs-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 server/licenses/lucene-backward-codecs-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 server/licenses/lucene-core-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 server/licenses/lucene-core-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 server/licenses/lucene-grouping-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 server/licenses/lucene-grouping-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 server/licenses/lucene-highlighter-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 server/licenses/lucene-highlighter-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 server/licenses/lucene-join-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 server/licenses/lucene-join-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 server/licenses/lucene-memory-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 server/licenses/lucene-memory-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 server/licenses/lucene-misc-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 server/licenses/lucene-misc-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 server/licenses/lucene-queries-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 server/licenses/lucene-queries-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 server/licenses/lucene-queryparser-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 server/licenses/lucene-queryparser-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 server/licenses/lucene-sandbox-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 server/licenses/lucene-sandbox-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 server/licenses/lucene-spatial-extras-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-extras-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 server/licenses/lucene-spatial3d-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 server/licenses/lucene-spatial3d-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 server/licenses/lucene-suggest-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 server/licenses/lucene-suggest-9.2.0-snapshot-f4f1f70.jar.sha1 diff --git a/buildSrc/src/main/resources/forbidden/opensearch-test-signatures.txt b/buildSrc/src/main/resources/forbidden/opensearch-test-signatures.txt index 03dead38bd8b4..43568b3209baf 100644 --- a/buildSrc/src/main/resources/forbidden/opensearch-test-signatures.txt +++ b/buildSrc/src/main/resources/forbidden/opensearch-test-signatures.txt @@ -19,7 +19,6 @@ com.carrotsearch.randomizedtesting.annotations.Seed @ Don't commit hardcoded see com.carrotsearch.randomizedtesting.annotations.Repeat @ Don't commit hardcoded repeats org.apache.lucene.codecs.Codec#setDefault(org.apache.lucene.codecs.Codec) @ Use the SuppressCodecs("*") annotation instead -org.apache.lucene.tests.util.LuceneTestCase$Slow @ Don't write slow tests org.junit.Ignore @ Use AwaitsFix instead org.apache.lucene.tests.util.LuceneTestCase$Nightly @ We don't run nightly tests at this point! com.carrotsearch.randomizedtesting.annotations.Nightly @ We don't run nightly tests at this point! diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 121f88dd0aac0..7a8a9531ebda8 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ opensearch = 3.0.0 -lucene = 9.2.0-snapshot-f4f1f70 +lucene = 9.2.0-snapshot-ba8c3a8 bundled_jdk_vendor = adoptium bundled_jdk = 17.0.3+7 diff --git a/libs/core/src/test/java/org/opensearch/core/internal/io/IOUtilsTests.java b/libs/core/src/test/java/org/opensearch/core/internal/io/IOUtilsTests.java index f1c8642b73044..e1f3cb7520a7e 100644 --- a/libs/core/src/test/java/org/opensearch/core/internal/io/IOUtilsTests.java +++ b/libs/core/src/test/java/org/opensearch/core/internal/io/IOUtilsTests.java @@ -40,7 +40,6 @@ import java.io.Closeable; import java.io.IOException; import java.io.OutputStream; -import java.net.URI; import java.nio.channels.FileChannel; import java.nio.charset.StandardCharsets; import java.nio.file.AccessDeniedException; @@ -172,10 +171,8 @@ public void runTestRm(final boolean exception) throws IOException { for (int i = 0; i < numberOfLocations; i++) { if (exception && randomBoolean()) { final Path location = createTempDir(); - final FileSystem fs = new AccessDeniedWhileDeletingFileSystem(location.getFileSystem()).getFileSystem( - URI.create("file:///") - ); - final Path wrapped = new FilterPath(location, fs); + final FilterFileSystemProvider ffsp = new AccessDeniedWhileDeletingFileSystem(location.getFileSystem()); + final Path wrapped = ffsp.wrapPath(location); locations[i] = wrapped.resolve(randomAlphaOfLength(8)); Files.createDirectory(locations[i]); locationsThrowingException.add(locations[i]); @@ -256,8 +253,8 @@ public FileChannel newFileChannel(final Path path, final Set codecs = MapBuilder.newMapBuilder(); if (mapperService == null) { - codecs.put(DEFAULT_CODEC, new Lucene91Codec()); - codecs.put(BEST_COMPRESSION_CODEC, new Lucene91Codec(Mode.BEST_COMPRESSION)); + codecs.put(DEFAULT_CODEC, new Lucene92Codec()); + codecs.put(BEST_COMPRESSION_CODEC, new Lucene92Codec(Mode.BEST_COMPRESSION)); } else { codecs.put(DEFAULT_CODEC, new PerFieldMappingPostingFormatCodec(Mode.BEST_SPEED, mapperService, logger)); codecs.put(BEST_COMPRESSION_CODEC, new PerFieldMappingPostingFormatCodec(Mode.BEST_COMPRESSION, mapperService, logger)); diff --git a/server/src/main/java/org/opensearch/index/codec/PerFieldMappingPostingFormatCodec.java b/server/src/main/java/org/opensearch/index/codec/PerFieldMappingPostingFormatCodec.java index 52e940a25ddd6..fd0c66983208a 100644 --- a/server/src/main/java/org/opensearch/index/codec/PerFieldMappingPostingFormatCodec.java +++ b/server/src/main/java/org/opensearch/index/codec/PerFieldMappingPostingFormatCodec.java @@ -36,7 +36,7 @@ import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.DocValuesFormat; import org.apache.lucene.codecs.PostingsFormat; -import org.apache.lucene.codecs.lucene91.Lucene91Codec; +import org.apache.lucene.codecs.lucene92.Lucene92Codec; import org.apache.lucene.codecs.lucene90.Lucene90DocValuesFormat; import org.opensearch.common.lucene.Lucene; import org.opensearch.index.mapper.CompletionFieldMapper; @@ -53,7 +53,7 @@ * * @opensearch.internal */ -public class PerFieldMappingPostingFormatCodec extends Lucene91Codec { +public class PerFieldMappingPostingFormatCodec extends Lucene92Codec { private final Logger logger; private final MapperService mapperService; private final DocValuesFormat dvFormat = new Lucene90DocValuesFormat(); diff --git a/server/src/main/java/org/opensearch/index/fielddata/ordinals/GlobalOrdinalMapping.java b/server/src/main/java/org/opensearch/index/fielddata/ordinals/GlobalOrdinalMapping.java index 8fc6eb1a74056..884e0d66ffd8d 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/ordinals/GlobalOrdinalMapping.java +++ b/server/src/main/java/org/opensearch/index/fielddata/ordinals/GlobalOrdinalMapping.java @@ -112,4 +112,8 @@ public long cost() { return values.cost(); } + @Override + public long docValueCount() { + return values.docValueCount(); + } } diff --git a/server/src/main/java/org/opensearch/index/fielddata/ordinals/MultiOrdinals.java b/server/src/main/java/org/opensearch/index/fielddata/ordinals/MultiOrdinals.java index 6131bc33841b6..6e3f83690a872 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/ordinals/MultiOrdinals.java +++ b/server/src/main/java/org/opensearch/index/fielddata/ordinals/MultiOrdinals.java @@ -229,5 +229,10 @@ public long nextOrd() throws IOException { public BytesRef lookupOrd(long ord) { return values.lookupOrd(ord); } + + @Override + public long docValueCount() { + return currentEndOffset - currentOffset; + } } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/MissingValues.java b/server/src/main/java/org/opensearch/search/aggregations/support/MissingValues.java index 6e0778f9a0a2d..179e4f18a1ea1 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/MissingValues.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/MissingValues.java @@ -310,6 +310,11 @@ public boolean advanceExact(int doc) throws IOException { return true; } + @Override + public long docValueCount() { + return values.docValueCount(); + } + @Override public String toString() { return "anon AbstractSortedDocValues of [" + super.toString() + "]"; @@ -340,6 +345,11 @@ public long getValueCount() { return 1 + values.getValueCount(); } + @Override + public long docValueCount() { + return values.docValueCount(); + } + @Override public long nextOrd() throws IOException { if (hasOrds) { diff --git a/server/src/test/java/org/opensearch/index/codec/CodecTests.java b/server/src/test/java/org/opensearch/index/codec/CodecTests.java index 94b78da402b44..0275066f9af1b 100644 --- a/server/src/test/java/org/opensearch/index/codec/CodecTests.java +++ b/server/src/test/java/org/opensearch/index/codec/CodecTests.java @@ -34,7 +34,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.lucene.codecs.Codec; -import org.apache.lucene.codecs.lucene91.Lucene91Codec; +import org.apache.lucene.codecs.lucene92.Lucene92Codec; import org.apache.lucene.codecs.lucene90.Lucene90StoredFieldsFormat; import org.apache.lucene.document.Document; import org.apache.lucene.index.DirectoryReader; @@ -65,21 +65,21 @@ public class CodecTests extends OpenSearchTestCase { public void testResolveDefaultCodecs() throws Exception { CodecService codecService = createCodecService(); assertThat(codecService.codec("default"), instanceOf(PerFieldMappingPostingFormatCodec.class)); - assertThat(codecService.codec("default"), instanceOf(Lucene91Codec.class)); + assertThat(codecService.codec("default"), instanceOf(Lucene92Codec.class)); } public void testDefault() throws Exception { Codec codec = createCodecService().codec("default"); - assertStoredFieldsCompressionEquals(Lucene91Codec.Mode.BEST_SPEED, codec); + assertStoredFieldsCompressionEquals(Lucene92Codec.Mode.BEST_SPEED, codec); } public void testBestCompression() throws Exception { Codec codec = createCodecService().codec("best_compression"); - assertStoredFieldsCompressionEquals(Lucene91Codec.Mode.BEST_COMPRESSION, codec); + assertStoredFieldsCompressionEquals(Lucene92Codec.Mode.BEST_COMPRESSION, codec); } // write some docs with it, inspect .si to see this was the used compression - private void assertStoredFieldsCompressionEquals(Lucene91Codec.Mode expected, Codec actual) throws Exception { + private void assertStoredFieldsCompressionEquals(Lucene92Codec.Mode expected, Codec actual) throws Exception { Directory dir = newDirectory(); IndexWriterConfig iwc = newIndexWriterConfig(null); iwc.setCodec(actual); @@ -91,7 +91,7 @@ private void assertStoredFieldsCompressionEquals(Lucene91Codec.Mode expected, Co SegmentReader sr = (SegmentReader) ir.leaves().get(0).reader(); String v = sr.getSegmentInfo().info.getAttribute(Lucene90StoredFieldsFormat.MODE_KEY); assertNotNull(v); - assertEquals(expected, Lucene91Codec.Mode.valueOf(v)); + assertEquals(expected, Lucene92Codec.Mode.valueOf(v)); ir.close(); dir.close(); } diff --git a/server/src/test/java/org/opensearch/index/engine/CompletionStatsCacheTests.java b/server/src/test/java/org/opensearch/index/engine/CompletionStatsCacheTests.java index 66b066b907100..340811352a203 100644 --- a/server/src/test/java/org/opensearch/index/engine/CompletionStatsCacheTests.java +++ b/server/src/test/java/org/opensearch/index/engine/CompletionStatsCacheTests.java @@ -32,7 +32,7 @@ package org.opensearch.index.engine; import org.apache.lucene.codecs.PostingsFormat; -import org.apache.lucene.codecs.lucene91.Lucene91Codec; +import org.apache.lucene.codecs.lucene92.Lucene92Codec; import org.apache.lucene.document.Document; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriter; @@ -70,7 +70,7 @@ public void testExceptionsAreNotCached() { public void testCompletionStatsCache() throws IOException, InterruptedException { final IndexWriterConfig indexWriterConfig = newIndexWriterConfig(); final PostingsFormat postingsFormat = new Completion90PostingsFormat(); - indexWriterConfig.setCodec(new Lucene91Codec() { + indexWriterConfig.setCodec(new Lucene92Codec() { @Override public PostingsFormat getPostingsFormatForField(String field) { return postingsFormat; // all fields are suggest fields diff --git a/server/src/test/java/org/opensearch/search/MultiValueModeTests.java b/server/src/test/java/org/opensearch/search/MultiValueModeTests.java index bfb4466e53e43..525621c02fd32 100644 --- a/server/src/test/java/org/opensearch/search/MultiValueModeTests.java +++ b/server/src/test/java/org/opensearch/search/MultiValueModeTests.java @@ -763,6 +763,11 @@ public BytesRef lookupOrd(long ord) { public long getValueCount() { return 1 << 20; } + + @Override + public long docValueCount() { + return array[doc].length; + } }; verifySortedSet(multiValues, numDocs); final FixedBitSet rootDocs = randomRootDocs(numDocs); diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/range/BinaryRangeAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/range/BinaryRangeAggregatorTests.java index 9c2578a2378cc..ea4dc09e6a601 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/range/BinaryRangeAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/range/BinaryRangeAggregatorTests.java @@ -83,6 +83,10 @@ public long getValueCount() { return terms.length; } + @Override + public long docValueCount() { + return ords.length; + } } private void doTestSortedSetRangeLeafCollector(int maxNumValuesPerDoc) throws Exception { diff --git a/server/src/test/java/org/opensearch/search/aggregations/support/IncludeExcludeTests.java b/server/src/test/java/org/opensearch/search/aggregations/support/IncludeExcludeTests.java index d0995abd07f32..51f135ec0b56b 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/support/IncludeExcludeTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/support/IncludeExcludeTests.java @@ -98,6 +98,10 @@ public long getValueCount() { return 1; } + @Override + public long docValueCount() { + return 1; + } }; IncludeExclude inexcl = new IncludeExclude(new TreeSet<>(Collections.singleton(new BytesRef("foo"))), null); OrdinalsFilter filter = inexcl.convertToOrdinalsFilter(DocValueFormat.RAW); diff --git a/server/src/test/java/org/opensearch/search/aggregations/support/MissingValuesTests.java b/server/src/test/java/org/opensearch/search/aggregations/support/MissingValuesTests.java index 598c1323fc13f..0eca61d825a2d 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/support/MissingValuesTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/support/MissingValuesTests.java @@ -155,6 +155,11 @@ public long nextOrd() { return NO_MORE_ORDS; } } + + @Override + public long docValueCount() { + return ords[doc].length; + } }; final BytesRef existingMissing = RandomPicks.randomFrom(random(), values); @@ -257,6 +262,11 @@ public BytesRef lookupOrd(long ord) throws IOException { return values[Math.toIntExact(ord)]; } + @Override + public long docValueCount() { + throw new UnsupportedOperationException(); + } + @Override public long getValueCount() { return values.length; diff --git a/test/framework/src/main/java/org/opensearch/indices/analysis/AnalysisFactoryTestCase.java b/test/framework/src/main/java/org/opensearch/indices/analysis/AnalysisFactoryTestCase.java index fd762289caddb..27f3312626e48 100644 --- a/test/framework/src/main/java/org/opensearch/indices/analysis/AnalysisFactoryTestCase.java +++ b/test/framework/src/main/java/org/opensearch/indices/analysis/AnalysisFactoryTestCase.java @@ -221,6 +221,7 @@ public abstract class AnalysisFactoryTestCase extends OpenSearchTestCase { .put("spanishpluralstem", Void.class) // LUCENE-10352 .put("daitchmokotoffsoundex", Void.class) + .put("persianstem", Void.class) .immutableMap(); static final Map> KNOWN_CHARFILTERS = new MapBuilder>() From cd8259331f666fbcf850760155ce44d16713de50 Mon Sep 17 00:00:00 2001 From: Rabi Panda Date: Fri, 20 May 2022 15:29:56 -0700 Subject: [PATCH 18/75] Fix release notes for 2.0.0-rc1 version (#3418) This change removes some old commits from the 2.0.0-rc1 release notes. These commits were already released as part of 1.x releases. Add back some missing type removal commits to the 2.0.0 release notes Signed-off-by: Rabi Panda --- .../opensearch.release-notes-2.0.0-rc1.md | 640 +++--------------- .../opensearch.release-notes-2.0.0.md | 23 +- 2 files changed, 112 insertions(+), 551 deletions(-) diff --git a/release-notes/opensearch.release-notes-2.0.0-rc1.md b/release-notes/opensearch.release-notes-2.0.0-rc1.md index 5171424203c62..26a721d013bb6 100644 --- a/release-notes/opensearch.release-notes-2.0.0-rc1.md +++ b/release-notes/opensearch.release-notes-2.0.0-rc1.md @@ -45,63 +45,39 @@ #### Upgrades - -* [Upgrade] 1.2 BWC to Lucene 8.10.1 ([#1460](https://github.com/opensearch-project/OpenSearch/pull/1460)) -* [Upgrade] Lucene 9.1 release (#2560) ([#2565](https://github.com/opensearch-project/OpenSearch/pull/2565)) -* [Upgrade] Lucene 9.1.0-snapshot-ea989fe8f30 ([#2487](https://github.com/opensearch-project/OpenSearch/pull/2487)) -* [Upgrade] Lucene 9.0.0 release ([#1109](https://github.com/opensearch-project/OpenSearch/pull/1109)) -* Set target and source compatibility to 11, required by Lucene 9. ([#2407](https://github.com/opensearch-project/OpenSearch/pull/2407)) -* Upgrade to Lucene 8.10.1 ([#1440](https://github.com/opensearch-project/OpenSearch/pull/1440)) -* Upgrade to Lucene 8.9 ([#1080](https://github.com/opensearch-project/OpenSearch/pull/1080)) -* Update lucene version to 8.8.2 ([#557](https://github.com/opensearch-project/OpenSearch/pull/557)) -* Support Gradle 7. Fixing 'eclipse' plugin dependencies ([#1648](https://github.com/opensearch-project/OpenSearch/pull/1648)) -* Update to Gradle 7.3.3 ([#1803](https://github.com/opensearch-project/OpenSearch/pull/1803)) -* Support Gradle 7. More reliable tasks dependencies for Maven plugins publishing ([#1630](https://github.com/opensearch-project/OpenSearch/pull/1630)) -* Support Gradle 7. Fixing publishing to Maven Local for plugins ([#1624](https://github.com/opensearch-project/OpenSearch/pull/1624)) -* Support Gradle 7 ([#1609](https://github.com/opensearch-project/OpenSearch/pull/1609)) +* [Upgrade] Lucene 9.1 release ([#2560](https://github.com/opensearch-project/OpenSearch/pull/2560)) +* [Upgrade] ICU4j from 68.2 to 70.1 ([#2504](https://github.com/opensearch-project/OpenSearch/pull/2504)) #### Deprecations - -* [Remove] Deprecated Synced Flush API ([#1761](https://github.com/opensearch-project/OpenSearch/pull/1761)) -* Remove deprecated search.remote settings ([#1870](https://github.com/opensearch-project/OpenSearch/pull/1870)) -* [Remove] Default Mapping ([#2151](https://github.com/opensearch-project/OpenSearch/pull/2151)) -* Remove Deprecated SimpleFS ([#1639](https://github.com/opensearch-project/OpenSearch/pull/1639)) -* [Remove] Deprecated Zen1 Discovery ([#1216](https://github.com/opensearch-project/OpenSearch/pull/1216)) -* Remove LegacyESVersion.V_6_8_x constants ([#1869](https://github.com/opensearch-project/OpenSearch/pull/1869)) -* Remove LegacyESVersion.V_6_7_x constants ([#1807](https://github.com/opensearch-project/OpenSearch/pull/1807)) -* Remove LegacyESVersion.V_6_6_x constants ([#1804](https://github.com/opensearch-project/OpenSearch/pull/1804)) -* Remove LegacyESVersion.V_6_5_x constants ([#1794](https://github.com/opensearch-project/OpenSearch/pull/1794)) -* Remove deprecated transport client ([#1781](https://github.com/opensearch-project/OpenSearch/pull/1781)) -* Remove LegacyVersion.v6.4.x constants ([#1787](https://github.com/opensearch-project/OpenSearch/pull/1787)) -* Remove LegacyESVersion.V_6_3_x constants ([#1691](https://github.com/opensearch-project/OpenSearch/pull/1691)) -* Remove LegacyESVersion.V_6_2_x constants ([#1686](https://github.com/opensearch-project/OpenSearch/pull/1686)) -* Remove LegacyESVersion.V_6_1_x constants ([#1681](https://github.com/opensearch-project/OpenSearch/pull/1681)) -* Remove 6.0.* version constants ([#1658](https://github.com/opensearch-project/OpenSearch/pull/1658)) -* [Remove] 6x skip from yml ([#2153](https://github.com/opensearch-project/OpenSearch/pull/2153)) +* Deprecate setting 'cluster.no_master_block' and introduce the alternative setting 'cluster.no_cluster_manager_block' ([#2453](https://github.com/opensearch-project/OpenSearch/pull/2453)) +* Deprecate setting 'cluster.service.slow_master_task_logging_threshold' and introduce the alternative setting 'cluster.service.slow_cluster_manager_task_logging_threshold' ([#2451](https://github.com/opensearch-project/OpenSearch/pull/2451)) +* Deprecate setting 'cluster.initial_master_nodes' and introduce the alternative setting 'cluster.initial_cluster_manager_nodes' ([#2463](https://github.com/opensearch-project/OpenSearch/pull/2463)) ### Security Fixes - -* [CVE] Upgrade dependencies for Azure related plugins to mitigate CVEs ([#688](https://github.com/opensearch-project/OpenSearch/pull/688)) -* [CVE] Upgrade dependencies to mitigate CVEs ([#657](https://github.com/opensearch-project/OpenSearch/pull/657)) -* [CVE-2018-11765] Upgrade hadoop dependencies for hdfs plugin ([#654](https://github.com/opensearch-project/OpenSearch/pull/654)) -* [CVE-2020-7692] Upgrade google-oauth clients for goolge cloud plugins ([#662](https://github.com/opensearch-project/OpenSearch/pull/662)) -* [CVE-2020-36518] Update jackson-databind to 2.13.2.2 (#2599) ([#2647](https://github.com/opensearch-project/OpenSearch/pull/2647)) -* Remove old ES libraries used in reindex due to CVEs ([#1359](https://github.com/opensearch-project/OpenSearch/pull/1359)) +* [CVE-2020-36518] Update jackson-databind to 2.13.2.2 ([#2599](https://github.com/opensearch-project/OpenSearch/pull/2599)) ### Features/Enhancements - -* Allowing custom folder name for plugin installation ([#848](https://github.com/opensearch-project/OpenSearch/pull/848)) -* A CLI tool to assist during an upgrade to OpenSearch. ([#846](https://github.com/opensearch-project/OpenSearch/pull/846)) -* Enable adding experimental features through sandbox modules ([#691](https://github.com/opensearch-project/OpenSearch/pull/691)) -* Rank feature - unknown field linear ([#983](https://github.com/opensearch-project/OpenSearch/pull/983)) -* [FEATURE] Add OPENSEARCH_JAVA_HOME env to override JAVA_HOME ([#2001](https://github.com/opensearch-project/OpenSearch/pull/2001)) -* Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Ingest APIs and Script APIs (#2682) ([#2891](https://github.com/opensearch-project/OpenSearch/pull/2891)) -* Change deprecation message for API parameter value 'master_node' of parameter 'metric' (#2880) ([#2882](https://github.com/opensearch-project/OpenSearch/pull/2882)) -* Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Snapshot APIs (#2680) ([#2871](https://github.com/opensearch-project/OpenSearch/pull/2871)) -* Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Index Template APIs (#2678) ([#2867](https://github.com/opensearch-project/OpenSearch/pull/2867)) -* Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Index APIs except index template APIs (#2660) ([#2771](https://github.com/opensearch-project/OpenSearch/pull/2771)) -* Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Cluster APIs (#2658) ([#2755](https://github.com/opensearch-project/OpenSearch/pull/2755)) -* [Backport 2.0] Add request parameter 'cluster_manager_timeout' as the alternative for 'master_timeout', and deprecate 'master_timeout' - in CAT APIs ([#2717](https://github.com/opensearch-project/OpenSearch/pull/2717)) +* Remove deprecation warning of using REST API request parameter 'master_timeout' ([#2920](https://github.com/opensearch-project/OpenSearch/pull/2920)) +* Add deprecated API for creating History Ops Snapshot from translog ([#2886](https://github.com/opensearch-project/OpenSearch/pull/2886)) +* Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Ingest APIs and Script APIs ([#2682](https://github.com/opensearch-project/OpenSearch/pull/2682)) +* Change deprecation message for API parameter value 'master_node' of parameter 'metric' ([#2880](https://github.com/opensearch-project/OpenSearch/pull/2880)) +* Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Snapshot APIs ([#2680](https://github.com/opensearch-project/OpenSearch/pull/2680)) +* Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Index Template APIs ([#2678](https://github.com/opensearch-project/OpenSearch/pull/2678)) +* Change deprecation message for REST API parameter 'master_timeout' to specify the version of removal ([#2863](https://github.com/opensearch-project/OpenSearch/pull/2863)) +* Decouple IndexSettings from IncludeExclude ([#2860](https://github.com/opensearch-project/OpenSearch/pull/2860)) +* Remove endpoint_suffix dependency on account key ([#2485](https://github.com/opensearch-project/OpenSearch/pull/2485)) +* Replace remaining 'blacklist' with 'denylist' in internal class and method names ([#2784](https://github.com/opensearch-project/OpenSearch/pull/2784)) +* Make discovered_master field optional on the client to support compatibility for opensearch client with odfe ([#2641](https://github.com/opensearch-project/OpenSearch/pull/2641)) +* Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Index APIs except index template APIs ([#2660](https://github.com/opensearch-project/OpenSearch/pull/2660)) +* Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Cluster APIs ([#2658](https://github.com/opensearch-project/OpenSearch/pull/2658)) +* Make Rest-High-Rest-Level tests allow deprecation warning temporarily, during deprecation of request parameter 'master_timeout' ([#2702](https://github.com/opensearch-project/OpenSearch/pull/2702)) +* Add request parameter 'cluster_manager_timeout' as the alternative for 'master_timeout', and deprecate 'master_timeout' - in CAT APIs ([#2717](https://github.com/opensearch-project/OpenSearch/pull/2717)) +* Add mapping method back referenced in other repos ([#2636](https://github.com/opensearch-project/OpenSearch/pull/2636)) +* Replaced "master" terminology in Log message ([#2575](https://github.com/opensearch-project/OpenSearch/pull/2575)) +* Introduce QueryPhaseSearcher extension point (SearchPlugin) ([#1931](https://github.com/opensearch-project/OpenSearch/pull/1931)) +* Support for geo_bounding_box queries on geo_shape fields ([#2506](https://github.com/opensearch-project/OpenSearch/pull/2506)) +* Updating repository commons logging version ([#2541](https://github.com/opensearch-project/OpenSearch/pull/2541)) +* Support for geo_distance queries on geo_shape fields ([#2516](https://github.com/opensearch-project/OpenSearch/pull/2516)) * Add 'cluster_manager_node' into ClusterState Metric as an alternative to 'master_node' ([#2415](https://github.com/opensearch-project/OpenSearch/pull/2415)) * Add a new node role 'cluster_manager' as the alternative for 'master' role and deprecate 'master' role ([#2424](https://github.com/opensearch-project/OpenSearch/pull/2424)) * Replace 'master' with 'cluster_manager' in 'GET Cat Nodes' API ([#2441](https://github.com/opensearch-project/OpenSearch/pull/2441)) @@ -109,520 +85,84 @@ * Add a field discovered_cluster_manager in get cluster health api ([#2437](https://github.com/opensearch-project/OpenSearch/pull/2437)) * Add request parameter 'cluster_manager_timeout' as the alternative for 'master_timeout', and deprecate 'master_timeout' - in CAT Nodes API ([#2435](https://github.com/opensearch-project/OpenSearch/pull/2435)) * Add a new REST API endpoint 'GET _cat/cluster_manager' as the replacement of 'GET _cat/master' ([#2404](https://github.com/opensearch-project/OpenSearch/pull/2404)) -* Deprecate setting 'cluster.no_master_block' and introduce the alternative setting 'cluster.no_cluster_manager_block' ([#2453](https://github.com/opensearch-project/OpenSearch/pull/2453)) -* Deprecate setting 'cluster.service.slow_master_task_logging_threshold' and introduce the alternative setting 'cluster.service.slow_cluster_manager_task_logging_threshold' ([#2451](https://github.com/opensearch-project/OpenSearch/pull/2451)) -* Deprecate setting 'cluster.initial_master_nodes' and introduce the alternative setting 'cluster.initial_cluster_manager_nodes' ([#2463](https://github.com/opensearch-project/OpenSearch/pull/2463)) -* Replace remaining 'blacklist' with 'denylist' in internal class and method names (#2784) ([#2813](https://github.com/opensearch-project/OpenSearch/pull/2813)) -* Centralize codes related to 'master_timeout' deprecation for eaiser removal - in CAT Nodes API (#2670) ([#2696](https://github.com/opensearch-project/OpenSearch/pull/2696)) -* Make Rest-High-Rest-Level tests allow deprecation warning temporarily, during deprecation of request parameter 'master_timeout' (#2702) ([#2741](https://github.com/opensearch-project/OpenSearch/pull/2741)) -* Replaced "master" terminology in Log message (#2575) ([#2594](https://github.com/opensearch-project/OpenSearch/pull/2594)) -* Deprecate setting 'reindex.remote.whitelist' and introduce the alternative setting 'reindex.remote.allowlist' ([#2221](https://github.com/opensearch-project/OpenSearch/pull/2221)) -* Replace exclusionary words whitelist and blacklist in the places that won't impact backwards compatibility ([#2178](https://github.com/opensearch-project/OpenSearch/pull/2178)) -* Support for geo_bounding_box queries on geo_shape fields ([#2506](https://github.com/opensearch-project/OpenSearch/pull/2506)) -* Support for geo_distance queries on geo_shape fields ([#2516](https://github.com/opensearch-project/OpenSearch/pull/2516)) -* Add '_name' field support to score functions and provide it back in explanation response ([#2244](https://github.com/opensearch-project/OpenSearch/pull/2244)) -* Add support of SOCKS proxies for S3 repository ([#2160](https://github.com/opensearch-project/OpenSearch/pull/2160)) -* Case Insensitive Support in Regexp Interval ([#2237](https://github.com/opensearch-project/OpenSearch/pull/2237)) -* Support unordered non-overlapping intervals ([#2103](https://github.com/opensearch-project/OpenSearch/pull/2103)) -* Support _first and _last parameter for missing bucket ordering in composite aggregation ([#1942](https://github.com/opensearch-project/OpenSearch/pull/1942)) -* Concurrent Searching (Experimental): modify profiling implementation to support concurrent data collection ([#1673](https://github.com/opensearch-project/OpenSearch/pull/1673)) -* Changes to support retrieval of operations from translog based on specified range ([#1210](https://github.com/opensearch-project/OpenSearch/pull/1210)) -* Support for translog pruning based on retention leases ([#1038](https://github.com/opensearch-project/OpenSearch/pull/1038)) -* Support for bwc tests for plugins ([#1051](https://github.com/opensearch-project/OpenSearch/pull/1051)) -* Part 1: Support for cancel_after_timeinterval parameter in search and msearch request ([#986](https://github.com/opensearch-project/OpenSearch/pull/986)) -* alt bash path support ([#1047](https://github.com/opensearch-project/OpenSearch/pull/1047)) -* Support Data Streams in OpenSearch ([#690](https://github.com/opensearch-project/OpenSearch/pull/690)) -* Support for Heap after GC stats (correction after backport to 1.2.0) ([#1315](https://github.com/opensearch-project/OpenSearch/pull/1315)) -* Support for Heap after GC stats ([#1265](https://github.com/opensearch-project/OpenSearch/pull/1265)) -* Add deprecated API for creating History Ops Snapshot from translog (#2886) ([#2917](https://github.com/opensearch-project/OpenSearch/pull/2917)) -* Introduce QueryPhaseSearcher extension point (SearchPlugin) ([#1931](https://github.com/opensearch-project/OpenSearch/pull/1931)) * Add default for EnginePlugin.getEngineFactory ([#2419](https://github.com/opensearch-project/OpenSearch/pull/2419)) -* Add valuesField in PercentilesAggregationBuilder streamInput constructor ([#2308](https://github.com/opensearch-project/OpenSearch/pull/2308)) -* Reintroduce negative epoch_millis #1991 ([#2232](https://github.com/opensearch-project/OpenSearch/pull/2232)) -* Install plugin command help ([#2193](https://github.com/opensearch-project/OpenSearch/pull/2193)) -* Always use Lucene index in peer recovery ([#2077](https://github.com/opensearch-project/OpenSearch/pull/2077)) -* Add Factory to enable Lucene ConcatenateGraphFilter (#1278) ([#2152](https://github.com/opensearch-project/OpenSearch/pull/2152)) -* Add proxy settings for GCS repository ([#2096](https://github.com/opensearch-project/OpenSearch/pull/2096)) -* Add proxy username and password settings for Azure repository ([#2098](https://github.com/opensearch-project/OpenSearch/pull/2098)) -* Add regexp interval source ([#1917](https://github.com/opensearch-project/OpenSearch/pull/1917)) -* Delay the request size calculation until required by the indexing pressure framework ([#1592](https://github.com/opensearch-project/OpenSearch/pull/1592)) -* Enabling Sort Optimization to make use of Lucene ([#1974](https://github.com/opensearch-project/OpenSearch/pull/1974)) -* Add max_expansions option to wildcard interval ([#1916](https://github.com/opensearch-project/OpenSearch/pull/1916)) -* Prefer adaptive replica selection over awareness attribute based routing ([#1107](https://github.com/opensearch-project/OpenSearch/pull/1107)) -* Prioritize primary shard movement during shard allocation ([#1445](https://github.com/opensearch-project/OpenSearch/pull/1445)) -* Enforce soft deletes ([#1903](https://github.com/opensearch-project/OpenSearch/pull/1903)) -* Make SortBuilders pluggable ([#1856](https://github.com/opensearch-project/OpenSearch/pull/1856)) -* Use try-with-resources with MockLogAppender ([#1595](https://github.com/opensearch-project/OpenSearch/pull/1595)) -* Bridging the gap in network overhead measurement in the profiler ([#1360](https://github.com/opensearch-project/OpenSearch/pull/1360)) -* Adding a cancelled field to tell if a cancellable task is cancelled ([#1732](https://github.com/opensearch-project/OpenSearch/pull/1732)) -* Avoid logging duplicate deprecation warnings multiple times ([#1660](https://github.com/opensearch-project/OpenSearch/pull/1660)) -* Added more detailed logging for SSLHandshakeException ([#1602](https://github.com/opensearch-project/OpenSearch/pull/1602)) -* Rename field_masking_span to span_field_masking ([#1606](https://github.com/opensearch-project/OpenSearch/pull/1606)) -* Giving informative error messages for double slashes in API call URLs ([#1568](https://github.com/opensearch-project/OpenSearch/pull/1568)) -* Renaming slave to replica in filebeat-6.0.template.json file. ([#1569](https://github.com/opensearch-project/OpenSearch/pull/1569)) -* Enable RestHighLevel-Client to set parameter require_alias for bulk index and reindex requests ([#1533](https://github.com/opensearch-project/OpenSearch/pull/1533)) -* Improve leader node-left logging to indicate timeout/coordination state rejection ([#1584](https://github.com/opensearch-project/OpenSearch/pull/1584)) -* Added logic to allow {dot} files on startup ([#1437](https://github.com/opensearch-project/OpenSearch/pull/1437)) -* remove codeQL warning about implicit narrowing conversion in compound assignment ([#1403](https://github.com/opensearch-project/OpenSearch/pull/1403)) -* Make TranslogDeletionPolicy abstract for extension ([#1456](https://github.com/opensearch-project/OpenSearch/pull/1456)) -* Remove deprecated settings and logic for translog pruning by retention lease. ([#1416](https://github.com/opensearch-project/OpenSearch/pull/1416)) -* Adjust CodeCache size to eliminate JVM warnings (and crashes) ([#1426](https://github.com/opensearch-project/OpenSearch/pull/1426)) -* Add extension point for custom TranslogDeletionPolicy in EnginePlugin. ([#1404](https://github.com/opensearch-project/OpenSearch/pull/1404)) -* Update node attribute check to version update (1.2) check for shard indexing pressure serialization. ([#1395](https://github.com/opensearch-project/OpenSearch/pull/1395)) -* Add EngineConfig extensions to EnginePlugin ([#1387](https://github.com/opensearch-project/OpenSearch/pull/1387)) -* Add Shard Level Indexing Pressure ([#1336](https://github.com/opensearch-project/OpenSearch/pull/1336)) -* Making GeneralScriptException an Implementation of OpensearchWrapperException ([#1066](https://github.com/opensearch-project/OpenSearch/pull/1066)) -* Handle shard over allocation during partial zone/rack or independent node failures ([#1149](https://github.com/opensearch-project/OpenSearch/pull/1149)) -* Introduce FS Health HEALTHY threshold to fail stuck node ([#1167](https://github.com/opensearch-project/OpenSearch/pull/1167)) -* Drop mocksocket in favour of custom security manager checks (tests only) ([#1205](https://github.com/opensearch-project/OpenSearch/pull/1205)) -* Improving the Grok circular reference check to prevent stack overflow ([#1079](https://github.com/opensearch-project/OpenSearch/pull/1079)) -* Introduce replaceRoutes() method and 2 new constructors to RestHandler.java ([#947](https://github.com/opensearch-project/OpenSearch/pull/947)) -* Fail fast when BytesRestResponse ctor throws exception ([#923](https://github.com/opensearch-project/OpenSearch/pull/923)) -* Restricting logs permissions ([#966](https://github.com/opensearch-project/OpenSearch/pull/966)) -* Avoid override of routes() in BaseRestHandler to respect the default behavior defined in RestHandler ([#889](https://github.com/opensearch-project/OpenSearch/pull/889)) -* Replacing docs-beta links with /docs ([#957](https://github.com/opensearch-project/OpenSearch/pull/957)) -* Adding broken links checker ([#877](https://github.com/opensearch-project/OpenSearch/pull/877)) -* Pass interceptor to super constructor ([#876](https://github.com/opensearch-project/OpenSearch/pull/876)) -* Add 'tagline' back to MainResponse in server that was removed in PR #427 ([#913](https://github.com/opensearch-project/OpenSearch/pull/913)) -* Remove distribution from main response in compatibility mode ([#898](https://github.com/opensearch-project/OpenSearch/pull/898)) -* Replace metadata keys in OpenSearchException during serialization and deserialization ([#905](https://github.com/opensearch-project/OpenSearch/pull/905)) -* Add cluster setting to spoof version number returned from MainResponse ([#847](https://github.com/opensearch-project/OpenSearch/pull/847)) -* Add URL for lucene snapshots ([#858](https://github.com/opensearch-project/OpenSearch/pull/858)) -* Decouple throttling limits for new and old indices. ([#778](https://github.com/opensearch-project/OpenSearch/pull/778)) -* Verbose plugin not found exception ([#849](https://github.com/opensearch-project/OpenSearch/pull/849)) -* Enable BWC checks ([#796](https://github.com/opensearch-project/OpenSearch/pull/796)) -* Add a method to use fallback setting to set the memory size ([#755](https://github.com/opensearch-project/OpenSearch/pull/755)) -* An allocation constraint mechanism, that de-prioritizes nodes from getting picked for allocation if they breach certain constraints ([#680](https://github.com/opensearch-project/OpenSearch/pull/680)) -* Create group settings with fallback. ([#743](https://github.com/opensearch-project/OpenSearch/pull/743)) -* Add timeout on cat/stats API ([#552](https://github.com/opensearch-project/OpenSearch/pull/552)) -* Make allocation decisions at node level first for pending task optimi… ([#534](https://github.com/opensearch-project/OpenSearch/pull/534)) -* Decouples primaries_recoveries limit from concurrent recoveries limit. ([#546](https://github.com/opensearch-project/OpenSearch/pull/546)) -* Merging javadoc feature branch changes to main ([#715](https://github.com/opensearch-project/OpenSearch/pull/715)) -* Add read_only block argument to opensearch-node unsafe-bootstrap command ([#599](https://github.com/opensearch-project/OpenSearch/pull/599)) -* Catch runtime exceptions to make class loader race conditions easier to debug. ([#608](https://github.com/opensearch-project/OpenSearch/pull/608)) -* Remove URL content from Reindex error response ([#630](https://github.com/opensearch-project/OpenSearch/pull/630)) -* Standardize int, long, double and float Setting constructors. ([#665](https://github.com/opensearch-project/OpenSearch/pull/665)) -* Add Remote Reindex SPI extension ([#547](https://github.com/opensearch-project/OpenSearch/pull/547)) -* Make default number of shards configurable ([#625](https://github.com/opensearch-project/OpenSearch/pull/625)) -* Converted all .asciidoc to .md. ([#658](https://github.com/opensearch-project/OpenSearch/pull/658)) -* Make -Dtests.output=always actually work. ([#648](https://github.com/opensearch-project/OpenSearch/pull/648)) -* Handle inefficiencies while fetching the delayed unassigned shards during cluster health ([#588](https://github.com/opensearch-project/OpenSearch/pull/588)) -* Replace elastic.co with opensearch.org ([#611](https://github.com/opensearch-project/OpenSearch/pull/611)) -* Speedup lang-painless tests ([#605](https://github.com/opensearch-project/OpenSearch/pull/605)) -* Speedup snapshot stale indices delete ([#613](https://github.com/opensearch-project/OpenSearch/pull/613)) -* Speed ups to test suite and precommit tasks. ([#580](https://github.com/opensearch-project/OpenSearch/pull/580)) -* [Versioning] Rebase to OpenSearch version 1.0.0 ([#555](https://github.com/opensearch-project/OpenSearch/pull/555)) -* Prevent setting maxParallelForks=0 on single-cpu machines ([#558](https://github.com/opensearch-project/OpenSearch/pull/558)) -* Use alternate example data in OpenSearch test cases. ([#454](https://github.com/opensearch-project/OpenSearch/pull/454)) ### Bug Fixes - -* Adding a null pointer check to fix index_prefix query (#2879) ([#2903](https://github.com/opensearch-project/OpenSearch/pull/2903)) -* Fix issue that deprecated setting 'cluster.initial_master_nodes' is not identified in node bootstrap check (#2779) ([#2794](https://github.com/opensearch-project/OpenSearch/pull/2794)) -* [Bug] Fix InboundDecoder version compat check (#2570) ([#2573](https://github.com/opensearch-project/OpenSearch/pull/2573)) -* Fixing PluginsServiceTests (post Lucene 9 update) ([#2484](https://github.com/opensearch-project/OpenSearch/pull/2484)) -* Fixing the --release flag usage for javac (#2343) ([#2352](https://github.com/opensearch-project/OpenSearch/pull/2352)) -* Fix flaky test case - string profiler via global ordinals ([#2226](https://github.com/opensearch-project/OpenSearch/pull/2226)) -* Fixing the indentation in version.yml ([#2163](https://github.com/opensearch-project/OpenSearch/pull/2163)) -* Fixing org.opensearch.monitor.os.OsProbeTests::testLogWarnCpuMessageOnlyOnes when CGroups are not available ([#2101](https://github.com/opensearch-project/OpenSearch/pull/2101)) -* Fix integration tests failure ([#2067](https://github.com/opensearch-project/OpenSearch/pull/2067)) -* Another attempt to fix o.o.transport.netty4.OpenSearchLoggingHandlerIT fails w/ stack overflow ([#2051](https://github.com/opensearch-project/OpenSearch/pull/2051)) -* Fix AssertionError message ([#2044](https://github.com/opensearch-project/OpenSearch/pull/2044)) -* Fix composite aggregation failed test cases introduce by missing_order parameter (#1942) ([#2005](https://github.com/opensearch-project/OpenSearch/pull/2005)) -* Fixing allocation filters to persist existing state on settings update ([#1718](https://github.com/opensearch-project/OpenSearch/pull/1718)) -* Fix more failing tests as a result of renaming ([#457](https://github.com/opensearch-project/OpenSearch/pull/457)) -* Fix failing rest-api-spec tests as part of renaming. ([#451](https://github.com/opensearch-project/OpenSearch/pull/451)) -* Fix multiple failing server tests. ([#453](https://github.com/opensearch-project/OpenSearch/pull/453)) -* [TEST] Fix FsHealthServiceTest by increasing the timeout period before checking the FS health after restoring the FS status ([#1813](https://github.com/opensearch-project/OpenSearch/pull/1813)) -* [BUG] Wait for outstanding requests to complete in LastSuccessfulSett… ([#1939](https://github.com/opensearch-project/OpenSearch/pull/1939)) -* [Bug] Wait for outstanding requests to complete ([#1925](https://github.com/opensearch-project/OpenSearch/pull/1925)) -* [BUG] Serialization bugs can cause node drops ([#1885](https://github.com/opensearch-project/OpenSearch/pull/1885)) -* [BUG] Docker distribution builds are failing. Switching to http://vault.centos.org ([#2024](https://github.com/opensearch-project/OpenSearch/pull/2024)) -* [BUG] SymbolicLinkPreservingUntarTransform fails on Windows ([#1433](https://github.com/opensearch-project/OpenSearch/pull/1433)) -* [BUG] ConcurrentSnapshotsIT#testAssertMultipleSnapshotsAndPrimaryFailOver fails intermittently ([#1311](https://github.com/opensearch-project/OpenSearch/pull/1311)) -* [Bug] Fix InstallPluginCommand to use proper key signatures ([#1233](https://github.com/opensearch-project/OpenSearch/pull/1233)) -* [Bug] Fix mixed cluster support for OpenSearch 2+ ([#1191](https://github.com/opensearch-project/OpenSearch/pull/1191)) -* [BUG] Fix cat.health test failures in pre 1.0.0 mixed cluster test ([#928](https://github.com/opensearch-project/OpenSearch/pull/928)) -* [BUG] Fix versioning issues discovered through version bump ([#884](https://github.com/opensearch-project/OpenSearch/pull/884)) -* [BUG] fix MainResponse to spoof version number for legacy clients ([#708](https://github.com/opensearch-project/OpenSearch/pull/708)) -* [Bug] Fix gradle build on Windows failing from a recent change ([#758](https://github.com/opensearch-project/OpenSearch/pull/758)) -* Apply fix for health API response to distinguish no master ([#656](https://github.com/opensearch-project/OpenSearch/pull/656)) -* Rename translog pruning setting to CCR specific setting and addressed Bug in the test case ([#1243](https://github.com/opensearch-project/OpenSearch/pull/1243)) -* fix gradle check fail due to renameing -min in #1094 ([#1289](https://github.com/opensearch-project/OpenSearch/pull/1289)) -* Added explicit 'null' check for response listener to prevent obscure NullPointerException issues (#3048) ([#3050](https://github.com/opensearch-project/OpenSearch/pull/3050)) -* [Backport] [2.0] Bugfix to guard against stack overflow errors caused by very large reg-ex input ([#2816](https://github.com/opensearch-project/OpenSearch/pull/2816)) -* [Bug] Change 1.0.0 version check in PluginInfo -* TEST BUG: MergeSchedulerSettingsTests fails always on small machines ([#559](https://github.com/opensearch-project/OpenSearch/pull/559)) -* Fix bwcVersions after bumping version 1.3.1 ([#2532](https://github.com/opensearch-project/OpenSearch/pull/2532)) -* Fixing bwcVersions and bwc builds (#2430) - adding 1.4.0 into main bwcVersions -* Fixing invalid Java code example in JavaDoc ([#2008](https://github.com/opensearch-project/OpenSearch/pull/2008)) -* Fixing org.opensearch.common.network.InetAddressesTests.testForStringIPv6WithScopeIdInput ([#1913](https://github.com/opensearch-project/OpenSearch/pull/1913)) -* Fix o.o.transport.netty4.OpenSearchLoggingHandlerIT stack overflow test failure ([#1900](https://github.com/opensearch-project/OpenSearch/pull/1900)) -* Fix verifyVersions gradle task and cleanup bwcVersions ([#1878](https://github.com/opensearch-project/OpenSearch/pull/1878)) -* Attempt to fix :test:fixtures:s3-fixture:composeUp fails due to HTTP connection issue ([#1866](https://github.com/opensearch-project/OpenSearch/pull/1866)) -* Fixing build failures after Flavor Serialization backport ([#1867](https://github.com/opensearch-project/OpenSearch/pull/1867)) -* Fixing auto backport workflow ([#1845](https://github.com/opensearch-project/OpenSearch/pull/1845)) -* Upgrade and fix link checker to 1.2. ([#1811](https://github.com/opensearch-project/OpenSearch/pull/1811)) -* link checker fix - only run on opensearch-project/OpenSearch ([#1719](https://github.com/opensearch-project/OpenSearch/pull/1719)) -* Fixing .gitattributes for binary content, removing *.class files ([#1717](https://github.com/opensearch-project/OpenSearch/pull/1717)) -* Fix unit test testFailsHealthOnHungIOBeyondHealthyTimeout() by incresing the max waiting time before assertion ([#1692](https://github.com/opensearch-project/OpenSearch/pull/1692)) -* Fixing bwc test for repository-multi-version ([#1441](https://github.com/opensearch-project/OpenSearch/pull/1441)) -* Fixing support for a multi-node cluster via "gradle run" ([#1455](https://github.com/opensearch-project/OpenSearch/pull/1455)) -* Fix windows build (mostly) ([#1412](https://github.com/opensearch-project/OpenSearch/pull/1412)) -* Fixing post merge 3rd party audit issues ([#1384](https://github.com/opensearch-project/OpenSearch/pull/1384)) -* Minor fix for the flaky test to reduce concurrency (#1361) ([#1364](https://github.com/opensearch-project/OpenSearch/pull/1364)) -* Fixing org.opensearch.repositories.azure.AzureBlobContainerRetriesTests and org.opensearch.action.admin.cluster.node.stats.NodeStatsTests ([#1390](https://github.com/opensearch-project/OpenSearch/pull/1390)) -* Fix failure in SearchCancellationIT.testMSearchChildReqCancellationWithHybridTimeout ([#1103](https://github.com/opensearch-project/OpenSearch/pull/1103)) -* Fix failing test caused by versioning change. ([#598](https://github.com/opensearch-project/OpenSearch/pull/598)) -* fixed broken anchor link. ([#436](https://github.com/opensearch-project/OpenSearch/pull/436)) -* [Rename] fix painless test ([#446](https://github.com/opensearch-project/OpenSearch/pull/446)) -* Fix name of the log appender. ([#445](https://github.com/opensearch-project/OpenSearch/pull/445)) -* [Rename] Fixing lingering rename and ./gradlew run will start ([#443](https://github.com/opensearch-project/OpenSearch/pull/443)) -* Fixed copyright to OpenSearch ([#1175](https://github.com/opensearch-project/OpenSearch/pull/1175)) -* Fix defects in code-coverage.gralde to generate code coverage report properly ([#1214](https://github.com/opensearch-project/OpenSearch/pull/1214)) -* Fix failure in SearchCancellationIT.testMSearchChildReqCancellationWithHybridTimeout ([#1103](https://github.com/opensearch-project/OpenSearch/pull/1103)) -* Fix Snapshot pattern in DistributionDownloader. ([#916](https://github.com/opensearch-project/OpenSearch/pull/916)) -* Fix stragglers from renaming to OpenSearch work. ([#483](https://github.com/opensearch-project/OpenSearch/pull/483)) -* Fix rename issues and failing repository-hdfs tests. ([#518](https://github.com/opensearch-project/OpenSearch/pull/518)) -* Fix build-tools integ test failures. ([#465](https://github.com/opensearch-project/OpenSearch/pull/465)) -* Fix a few more renaming issues. ([#464](https://github.com/opensearch-project/OpenSearch/pull/464)) -* Fix org.opensearch.index.reindex.ReindexRestClientSslTests#testClientSucceedsWithCertificateAuthorities - javax.net.ssl.SSLPeerUnverifiedException ([#1212](https://github.com/opensearch-project/OpenSearch/pull/1212)) -* Fix opensearch-env always sources the environment from hardcoded file ([#875](https://github.com/opensearch-project/OpenSearch/pull/875)) -* Fix resource leak issues suggested by Amazon CodeGuru ([#816](https://github.com/opensearch-project/OpenSearch/pull/816)) -* Fix arm architecture translation issue ([#809](https://github.com/opensearch-project/OpenSearch/pull/809)) -* Fix Javadoc errors in `client/sniffer` ([#802](https://github.com/opensearch-project/OpenSearch/pull/802)) -* [BWC] fix mixedCluster and rolling upgrades ([#775](https://github.com/opensearch-project/OpenSearch/pull/775)) -* Fix #649: Properly escape @ in JavaDoc. ([#651](https://github.com/opensearch-project/OpenSearch/pull/651)) -* Fix snapshot deletion task getting stuck in the event of exceptions ([#629](https://github.com/opensearch-project/OpenSearch/pull/629)) -* Fix failing test caused by versioning change. ([#598](https://github.com/opensearch-project/OpenSearch/pull/598)) -* Use the correct domain to fix failing integration tests. ([#519](https://github.com/opensearch-project/OpenSearch/pull/519)) -* Change OpenSearch Version to OpenSearch version to fix failed test case org.opensearch.plugins.ListPluginsCommandTests.testPluginWithNativeController ([#460](https://github.com/opensearch-project/OpenSearch/pull/460)) -* [Rename] Fix env variables and old es maven repo ([#439](https://github.com/opensearch-project/OpenSearch/pull/439)) +* Added explicit 'null' check for response listener to prevent obscure NullPointerException issues ([#3048](https://github.com/opensearch-project/OpenSearch/pull/3048)) +* Adding a null pointer check to fix index_prefix query ([#2879](https://github.com/opensearch-project/OpenSearch/pull/2879)) +* Bugfix to guard against stack overflow errors caused by very large reg-ex input ([#2816](https://github.com/opensearch-project/OpenSearch/pull/2816)) +* Fix InboundDecoder version compat check ([#2570](https://github.com/opensearch-project/OpenSearch/pull/2570)) * ignore_malformed parameter on ip_range data_type throws mapper_parsing_exception ([#2429](https://github.com/opensearch-project/OpenSearch/pull/2429)) * Discrepancy in result from _validate/query API and actual query validity ([#2416](https://github.com/opensearch-project/OpenSearch/pull/2416)) -* MapperService has to be passed in as null for EnginePlugins CodecService constructor ([#2177](https://github.com/opensearch-project/OpenSearch/pull/2177)) -* Adding shards per node constraint for predictability to testClusterGr… ([#2110](https://github.com/opensearch-project/OpenSearch/pull/2110)) -* Mapping update for “date_range” field type is not idempotent ([#2094](https://github.com/opensearch-project/OpenSearch/pull/2094)) -* Use Version.compareMajor instead of using equals operator ([#1876](https://github.com/opensearch-project/OpenSearch/pull/1876)) -* Execution failed for task ':test:fixtures:azure/s3/hdfs/gcs-fixture:composeDown' ([#1824](https://github.com/opensearch-project/OpenSearch/pull/1824)) -* RestIntegTestTask fails because of missed log4j-core dependency ([#1815](https://github.com/opensearch-project/OpenSearch/pull/1815)) -* Start MockLogAppender before adding to static context ([#1587](https://github.com/opensearch-project/OpenSearch/pull/1587)) -* Use a non-default port for upgrade-cli unit tests ([#1512](https://github.com/opensearch-project/OpenSearch/pull/1512)) -* Close first engine instance before creating second ([#1457](https://github.com/opensearch-project/OpenSearch/pull/1457)) -* Avoid crashing on using the index.lifecycle.name in the API body ([#1060](https://github.com/opensearch-project/OpenSearch/pull/1060)) -* Max scroll limit breach to throw a OpenSearchRejectedExecutionException ([#1054](https://github.com/opensearch-project/OpenSearch/pull/1054)) -* Extract excludes into a file, fix the link checker by adding http://site.icu-project.org/. ([#1189](https://github.com/opensearch-project/OpenSearch/pull/1189)) -* Prevent /_cat/master from getting tripped by the CB ([#1036](https://github.com/opensearch-project/OpenSearch/pull/1036)) -* Excluding missed broken links from link checker ([#1010](https://github.com/opensearch-project/OpenSearch/pull/1010)) -* Excluding links from link checker ([#995](https://github.com/opensearch-project/OpenSearch/pull/995)) -* Version checks are incorrectly returning versions < 1.0.0. ([#797](https://github.com/opensearch-project/OpenSearch/pull/797)) -* Make `:server:check` pass successfully ([#471](https://github.com/opensearch-project/OpenSearch/pull/471)) -* Correct the regex pattern for class path in testDieWithDignity() ([#466](https://github.com/opensearch-project/OpenSearch/pull/466)) -* Change ESLoggingHandler to OpenSearchLoggingHandler to pass failing test case org.opensearch.transport.netty4.OpenSearchLoggingHandlerIT.testLoggingHandler due to renaming ([#461](https://github.com/opensearch-project/OpenSearch/pull/461)) - -### Infrastructure - -* Using Github App token to trigger CI for version increment PRs ([#2157](https://github.com/opensearch-project/OpenSearch/pull/2157)) -* Using Github App to trigger CI for auto-backport ([#2071](https://github.com/opensearch-project/OpenSearch/pull/2071)) -* Remove precommit and wrapper validation workflows for gradle as we migrate it to internal CI tools ([#452](https://github.com/opensearch-project/OpenSearch/pull/452)) -* Updated the url for docker distribution ([#2325](https://github.com/opensearch-project/OpenSearch/pull/2325)) -* Recommend Docker 3.6.0. ([#1427](https://github.com/opensearch-project/OpenSearch/pull/1427)) -* docker build: use OSS `log4j2.properties` ([#878](https://github.com/opensearch-project/OpenSearch/pull/878)) -* [DOCKER] add apt update to test fixture krb5kdc ([#565](https://github.com/opensearch-project/OpenSearch/pull/565)) -* Cleanup `default` flavor stragglers from docker distributions. ([#481](https://github.com/opensearch-project/OpenSearch/pull/481)) -* Replace blacklist in Gradle build environment configuration (#2752) ([#2781](https://github.com/opensearch-project/OpenSearch/pull/2781)) -* Add 1.3.2 to main causing gradle check failures (#2679) ([#2684](https://github.com/opensearch-project/OpenSearch/pull/2684)) -* Added jenkinsfile to run gradle check in OpenSearch (#2166) ([#2629](https://github.com/opensearch-project/OpenSearch/pull/2629)) -* Gradle check retry (#2638) ([#2661](https://github.com/opensearch-project/OpenSearch/pull/2661)) -* Move Gradle wrapper and precommit checks into OpenSearch repo. ([#1664](https://github.com/opensearch-project/OpenSearch/pull/1664)) -* Enabling missingJavadoc validation in gradle check ([#721](https://github.com/opensearch-project/OpenSearch/pull/721)) -* Removing Jenkinsfile (not used), replaced by opensearch-build/jenkins/opensearch/Jenkinsfile ([#1408](https://github.com/opensearch-project/OpenSearch/pull/1408)) -* Changed JAVA_HOME to jdk-17 (#2656) ([#2671](https://github.com/opensearch-project/OpenSearch/pull/2671)) -* Adding support for JDK17 and removing JDK8 ([#2025](https://github.com/opensearch-project/OpenSearch/pull/2025)) -* Add darwin-arm64-tar and no-jdk-darwin-arm64-tar archive distributions. ([#1668](https://github.com/opensearch-project/OpenSearch/pull/1668)) -* Better JDK-18 EA (and beyond) support of SecurityManager ([#1750](https://github.com/opensearch-project/OpenSearch/pull/1750)) -* Support JDK 18 EA builds ([#1710](https://github.com/opensearch-project/OpenSearch/pull/1710)) -* Adding 1.2.2 ([#1731](https://github.com/opensearch-project/OpenSearch/pull/1731)) -* Add version 1.2.1. ([#1701](https://github.com/opensearch-project/OpenSearch/pull/1701)) -* Add version 1.2.3. ([#1760](https://github.com/opensearch-project/OpenSearch/pull/1760)) -* Modernize and consolidate JDKs usage across all stages of the build. Use JDK-17 as bundled JDK distribution to run tests ([#1358](https://github.com/opensearch-project/OpenSearch/pull/1358)) -* Fix build-tools/reaper source/target compatibility to be JDK-11 (#2596) ([#2606](https://github.com/opensearch-project/OpenSearch/pull/2606)) -* Add darwin-arm64-tar and no-jdk-darwin-arm64-tar archive distributions. ([#1668](https://github.com/opensearch-project/OpenSearch/pull/1668)) -* Remove Github DCO action since DCO runs via Github App now ([#2317](https://github.com/opensearch-project/OpenSearch/pull/2317)) -* Adding Github action for auto backport PR creation ([#1600](https://github.com/opensearch-project/OpenSearch/pull/1600)) -* Add a whitesource unified agent file and update the config ([#1540](https://github.com/opensearch-project/OpenSearch/pull/1540)) -* Run link checker GitHub action on schedule. ([#1221](https://github.com/opensearch-project/OpenSearch/pull/1221)) -* Clarify opensearch.version to not include -SNAPSHOT. ([#1186](https://github.com/opensearch-project/OpenSearch/pull/1186)) -* Move pr template to .github as default since folder design required manually added to url ([#458](https://github.com/opensearch-project/OpenSearch/pull/458)) -* changed label from low hanging fruit to help wanted. added link to filter for that label. Added link to forum ([#435](https://github.com/opensearch-project/OpenSearch/pull/435)) -* adding in untriaged label to features ([#1419](https://github.com/opensearch-project/OpenSearch/pull/1419)) -* Run spotless and exclude checkstyle on plugins module ([#1417](https://github.com/opensearch-project/OpenSearch/pull/1417)) -* Adding spotless support for subprojects under :test ([#1464](https://github.com/opensearch-project/OpenSearch/pull/1464)) -* Run spotless and exclude checkstyle on rest-api-spec module ([#1462](https://github.com/opensearch-project/OpenSearch/pull/1462)) -* Run spotless and exclude checkstyle on modules module ([#1442](https://github.com/opensearch-project/OpenSearch/pull/1442)) -* Enabling spotless, disabling checkstyle check on plugins ([#1488](https://github.com/opensearch-project/OpenSearch/pull/1488)) -* Cleanup for Checkstyle ([#1370](https://github.com/opensearch-project/OpenSearch/pull/1370)) -* Run spotless and exclude checkstyle on libs module ([#1428](https://github.com/opensearch-project/OpenSearch/pull/1428)) -* Run spotless and exclude checkstyle on client module ([#1392](https://github.com/opensearch-project/OpenSearch/pull/1392)) -* Run spotless and exclude checkstyle on server module ([#1380](https://github.com/opensearch-project/OpenSearch/pull/1380)) -* Change whitesource integration to scan on 1.x branch ([#1786](https://github.com/opensearch-project/OpenSearch/pull/1786)) -* Add .whitesource configuration file ([#1525](https://github.com/opensearch-project/OpenSearch/pull/1525)) -* add codeowners file ([#1530](https://github.com/opensearch-project/OpenSearch/pull/1530)) -* Updated links for linkchecker ([#1539](https://github.com/opensearch-project/OpenSearch/pull/1539)) -* Updating dependabot open pr limits ([#1875](https://github.com/opensearch-project/OpenSearch/pull/1875)) -* Updating .gitattributes for additional file types ([#1727](https://github.com/opensearch-project/OpenSearch/pull/1727)) -* Updating the Ivy repository to point to real url for Releases ([#602](https://github.com/opensearch-project/OpenSearch/pull/602)) -* build: introduce support for reproducible builds ([#1995](https://github.com/opensearch-project/OpenSearch/pull/1995)) -* Add support to generate code coverage report with JaCoCo ([#971](https://github.com/opensearch-project/OpenSearch/pull/971)) -* Support running elasticsearch-oss distribution in test cluster for BWC ([#764](https://github.com/opensearch-project/OpenSearch/pull/764)) -* FreeBSD Java support ([#1014](https://github.com/opensearch-project/OpenSearch/pull/1014)) +### Build & Infrastructure +* Gradle custom java zippublish plugin ([#2988](https://github.com/opensearch-project/OpenSearch/pull/2988)) +* Use G1GC on JDK11+ ([#2964](https://github.com/opensearch-project/OpenSearch/pull/2964)) +* Removed java11 source folders since JDK-11 is the baseline now ([#2898](https://github.com/opensearch-project/OpenSearch/pull/2898)) +* Changed JAVA_HOME to jdk-17 ([#2656](https://github.com/opensearch-project/OpenSearch/pull/2656)) +* Fix build-tools/reaper source/target compatibility to be JDK-11 ([#2596](https://github.com/opensearch-project/OpenSearch/pull/2596)) +* Adding workflow to create documentation related issues in documentation-website repo ([#2929](https://github.com/opensearch-project/OpenSearch/pull/2929)) +* Fix issue that deprecated setting 'cluster.initial_master_nodes' is not identified in node bootstrap check ([#2779](https://github.com/opensearch-project/OpenSearch/pull/2779)) +* Replace blacklist in Gradle build environment configuration ([#2752](https://github.com/opensearch-project/OpenSearch/pull/2752)) +* Update ThirdPartyAuditTask to check for and list pointless exclusions. ([#2760](https://github.com/opensearch-project/OpenSearch/pull/2760)) +* Add Shadow jar publication to lang-painless module. ([#2681](https://github.com/opensearch-project/OpenSearch/pull/2681)) +* Add 1.3.2 to main causing gradle check failures ([#2679](https://github.com/opensearch-project/OpenSearch/pull/2679)) +* Added jenkinsfile to run gradle check in OpenSearch ([#2166](https://github.com/opensearch-project/OpenSearch/pull/2166)) +* Gradle check retry ([#2638](https://github.com/opensearch-project/OpenSearch/pull/2638)) * Override Default Distribution Download Url with Custom Distribution Url when it is passed from Plugin ([#2420](https://github.com/opensearch-project/OpenSearch/pull/2420)) -* Restore Java 8 compatibility for build tools. (#2300) ([#2321](https://github.com/opensearch-project/OpenSearch/pull/2321)) -* Revert "Override Default Distribution Download Url with Custom Distribution Url When User Passes a Url" ([#2256](https://github.com/opensearch-project/OpenSearch/pull/2256)) -* Override Default Distribution Download Url with Custom Distribution Url When User Passes a Url ([#2086](https://github.com/opensearch-project/OpenSearch/pull/2086)) -* added config file to git issue template directory to disable blank issue creation ([#2158](https://github.com/opensearch-project/OpenSearch/pull/2158)) -* Add JetBrains Gateway setup details ([#1944](https://github.com/opensearch-project/OpenSearch/pull/1944)) -* Adding workflow to auto delete backport merged branches from backport workflow ([#2050](https://github.com/opensearch-project/OpenSearch/pull/2050)) -* Add IssueNavigationLink ([#1964](https://github.com/opensearch-project/OpenSearch/pull/1964)) -* Using pull_request_target in place of pull_request ([#1952](https://github.com/opensearch-project/OpenSearch/pull/1952)) -* Using custom branch name for auto backporting PRs ([#1862](https://github.com/opensearch-project/OpenSearch/pull/1862)) -* Added help to build distributions in docs ([#1898](https://github.com/opensearch-project/OpenSearch/pull/1898)) -* Auto-increment next development iteration. ([#1816](https://github.com/opensearch-project/OpenSearch/pull/1816)) -* Catching Maintainers up for Q4 2021 new additions/removals ([#1841](https://github.com/opensearch-project/OpenSearch/pull/1841)) -* Added .gitattributes to manage end-of-line checks for Windows/*nix systems ([#1638](https://github.com/opensearch-project/OpenSearch/pull/1638)) -* Add staged version 1.1.1 ([#1506](https://github.com/opensearch-project/OpenSearch/pull/1506)) -* [BWC] Diable BWC tests until branch versions are synced ([#1508](https://github.com/opensearch-project/OpenSearch/pull/1508)) -* Moving DCO to workflows ([#1458](https://github.com/opensearch-project/OpenSearch/pull/1458)) -* changed work-in-progress language ([#1275](https://github.com/opensearch-project/OpenSearch/pull/1275)) -* Removed beta from new issues. ([#1071](https://github.com/opensearch-project/OpenSearch/pull/1071)) -* Include sources and javadoc artifacts while publishing to a Maven repository ([#1049](https://github.com/opensearch-project/OpenSearch/pull/1049)) -* Replaced custom built JNA by official JNA distribution. ([#1003](https://github.com/opensearch-project/OpenSearch/pull/1003)) -* [Version] Don't spoof major for 3.0+ clusters (#2722) ([#2749](https://github.com/opensearch-project/OpenSearch/pull/2749)) -* adds ToC ([#2546](https://github.com/opensearch-project/OpenSearch/pull/2546)) -* Add Version.V_1_2_5 constant -* add 1.2.5 to bwcVersions -* [Deprecate] Setting explicit version on analysis component ([#1978](https://github.com/opensearch-project/OpenSearch/pull/1978)) -* [Deprecate] index.merge.policy.max_merge_at_once_explicit ([#1981](https://github.com/opensearch-project/OpenSearch/pull/1981)) -* [plugin] repository-azure: add configuration settings for connect/write/response/read timeouts ([#1789](https://github.com/opensearch-project/OpenSearch/pull/1789)) -* [plugin] repository-azure is not working properly hangs on basic operations (#1740) ([#1749](https://github.com/opensearch-project/OpenSearch/pull/1749)) -* [main] Add staged version 1.3.0 for bwc ([#1510](https://github.com/opensearch-project/OpenSearch/pull/1510)) -* [repository-azure] plugin should use Azure Storage SDK v12 for Java ([#1302](https://github.com/opensearch-project/OpenSearch/pull/1302)) -* Allow building on FreeBSD ([#1091](https://github.com/opensearch-project/OpenSearch/pull/1091)) -* initial commit to add in a dependabot.yml file ([#1353](https://github.com/opensearch-project/OpenSearch/pull/1353)) -* Rename artifact produced by the build to include -min ([#1251](https://github.com/opensearch-project/OpenSearch/pull/1251)) -* [Version] Add 1.2 for BWC testing ([#1241](https://github.com/opensearch-project/OpenSearch/pull/1241)) -* Exclude failing links from plugins/modules ([#1223](https://github.com/opensearch-project/OpenSearch/pull/1223)) -* Kept the original constructor for PluginInfo to maintain bwc ([#1206](https://github.com/opensearch-project/OpenSearch/pull/1206)) -* [Version] Increment main to 2.0 ([#1192](https://github.com/opensearch-project/OpenSearch/pull/1192)) -* Added all icu-project.org websites to the link checker exclusions. ([#1201](https://github.com/opensearch-project/OpenSearch/pull/1201)) -* Add 1.0.1 revision ([#1152](https://github.com/opensearch-project/OpenSearch/pull/1152)) -* distribution/packages: Fix filename format for deb archives ([#621](https://github.com/opensearch-project/OpenSearch/pull/621)) -* [Versioning] Fix Version.fromString logic for legacy version ([#604](https://github.com/opensearch-project/OpenSearch/pull/604)) -* Rename the distribution used in test clusters. ([#603](https://github.com/opensearch-project/OpenSearch/pull/603)) -* clean up rpm artifact naming ([#590](https://github.com/opensearch-project/OpenSearch/pull/590)) -* changed to point to open issues rather than the project board -* Update Plugin Signing Key ([#512](https://github.com/opensearch-project/OpenSearch/pull/512)) -* Use OpenSearch artifacts URL for official plugin installation. ([#490](https://github.com/opensearch-project/OpenSearch/pull/490)) -* Perform more renaming to OpenSearch. ([#470](https://github.com/opensearch-project/OpenSearch/pull/470)) -* Adding instructions on License and DCO practices to PR template ([#462](https://github.com/opensearch-project/OpenSearch/pull/462)) -* Remove lingering instances of Default distribution in favour of Oss ([#440](https://github.com/opensearch-project/OpenSearch/pull/440)) -* Validation for official plugins for upgrade tool ([#973](https://github.com/opensearch-project/OpenSearch/pull/973)) -* Lower build requirement from Java 14+ to Java 11+ ([#940](https://github.com/opensearch-project/OpenSearch/pull/940)) -* Add Snapshot maven repository ([#829](https://github.com/opensearch-project/OpenSearch/pull/829)) -* distribution/packages: Fix RPM architecture name for 64-bit x86 ([#620](https://github.com/opensearch-project/OpenSearch/pull/620)) -* Update issue template with multiple labels ([#668](https://github.com/opensearch-project/OpenSearch/pull/668)) -* Renaming CPU architecture to have consistent naming ([#612](https://github.com/opensearch-project/OpenSearch/pull/612)) - -### Documentation - -* Adding workflow to create documentation related issues in documentation-website repo (#2929) ([#2976](https://github.com/opensearch-project/OpenSearch/pull/2976)) -* Updating auto backport documentation ([#1620](https://github.com/opensearch-project/OpenSearch/pull/1620)) -* Updating README and CONTRIBUTING guide to get ready for beta1 release. ([#672](https://github.com/opensearch-project/OpenSearch/pull/672)) -* Update instructions on debugging OpenSearch. ([#689](https://github.com/opensearch-project/OpenSearch/pull/689)) -* Fixing typo in TESTING.md ([#1849](https://github.com/opensearch-project/OpenSearch/pull/1849)) -* Fix JavaDoc typo in XContentBuilder ([#1739](https://github.com/opensearch-project/OpenSearch/pull/1739)) -* Update Readme ([#433](https://github.com/opensearch-project/OpenSearch/pull/433)) -* Fix DCO CLI example in CONTRIBUTING.md ([#576](https://github.com/opensearch-project/OpenSearch/pull/576)) -* Change comment to point to DEVELOPER_GUIDE.md ([#1415](https://github.com/opensearch-project/OpenSearch/pull/1415)) -* [typos] typos in DEVELOPER_GUIDE.md ([#1381](https://github.com/opensearch-project/OpenSearch/pull/1381)) -* Adding Security Reporting Instructions in README.md file Signed-off-by: Rishikesh Reddy Pasham rishireddy1159@gmail.com ([#1326](https://github.com/opensearch-project/OpenSearch/pull/1326)) -* Add guide for generating code coverage report in TESTING.md ([#1264](https://github.com/opensearch-project/OpenSearch/pull/1264)) -* Added Eclipse import instructions to DEVELOPER_GUIDE.md ([#1215](https://github.com/opensearch-project/OpenSearch/pull/1215)) -* Update/maintainers.md ([#723](https://github.com/opensearch-project/OpenSearch/pull/723)) -* Added a link to the maintainer file in contribution guides ([#589](https://github.com/opensearch-project/OpenSearch/pull/589)) -* Updated READMEs on releasing, maintaining, admins and security. ([#853](https://github.com/opensearch-project/OpenSearch/pull/853)) -* adding components to DEVELOPER_GUIDE ([#1200](https://github.com/opensearch-project/OpenSearch/pull/1200)) -* Update developer guide reference to download JDK 14 ([#1452](https://github.com/opensearch-project/OpenSearch/pull/1452)) -* [WIP] Developer guide updates ([#595](https://github.com/opensearch-project/OpenSearch/pull/595)) -* Update README with getting started ([#549](https://github.com/opensearch-project/OpenSearch/pull/549)) -* Update Developers Guide. ([#522](https://github.com/opensearch-project/OpenSearch/pull/522)) -* Update LICENSE.txt -* [License] Add SPDX and OpenSearch Modification license header ([#509](https://github.com/opensearch-project/OpenSearch/pull/509)) -* [License] Update SPDX License Header ([#510](https://github.com/opensearch-project/OpenSearch/pull/510)) -* Cleanup TESTING and DEVELOPER_GUIDE markdowns ([#946](https://github.com/opensearch-project/OpenSearch/pull/946)) -* Add 1.3.0 release notes in main ([#2489](https://github.com/opensearch-project/OpenSearch/pull/2489)) -* Add release notes for 1.2.4 ([#1934](https://github.com/opensearch-project/OpenSearch/pull/1934)) -* Added release notes for 1.2.3. ([#1791](https://github.com/opensearch-project/OpenSearch/pull/1791)) -* Adding release notes for 1.2.2 ([#1730](https://github.com/opensearch-project/OpenSearch/pull/1730)) -* Adding release notes for 1.2.1 ([#1725](https://github.com/opensearch-project/OpenSearch/pull/1725)) -* Add 1.2 release notes and correct 1.1 release notes. ([#1581](https://github.com/opensearch-project/OpenSearch/pull/1581)) -* Generate release notes for 1.1 ([#1230](https://github.com/opensearch-project/OpenSearch/pull/1230)) -* Update release note for GA 1.0 with new commits and removes #547 ([#953](https://github.com/opensearch-project/OpenSearch/pull/953)) -* Adding release notes for 1.0.0 ([#885](https://github.com/opensearch-project/OpenSearch/pull/885)) -* Adding release notes for 1.0.0-rc1 ([#794](https://github.com/opensearch-project/OpenSearch/pull/794)) -* Modified TESTING instructions to clarify use of testing classes ([#1930](https://github.com/opensearch-project/OpenSearch/pull/1930)) -* Clarify JDK requirement in the developer guide ([#1153](https://github.com/opensearch-project/OpenSearch/pull/1153)) -* Add trademark notice ([#2473](https://github.com/opensearch-project/OpenSearch/pull/2473)) -* Expand SearchPlugin javadocs. ([#1909](https://github.com/opensearch-project/OpenSearch/pull/1909)) -* Linked the formatting setting file ([#1860](https://github.com/opensearch-project/OpenSearch/pull/1860)) -* Add more instructions how to install/configure git secrets ([#1202](https://github.com/opensearch-project/OpenSearch/pull/1202)) -* Add themed logo to README ([#988](https://github.com/opensearch-project/OpenSearch/pull/988)) -* Replace Elasticsearch docs links in scripts ([#994](https://github.com/opensearch-project/OpenSearch/pull/994)) -* Cleaned up developer guide, added TOC. ([#572](https://github.com/opensearch-project/OpenSearch/pull/572)) -* Document running individual tests. ([#741](https://github.com/opensearch-project/OpenSearch/pull/741)) -* [License] Add SPDX License Header to security policies ([#531](https://github.com/opensearch-project/OpenSearch/pull/531)) -* Added a maintainers file ([#523](https://github.com/opensearch-project/OpenSearch/pull/523)) -* Remove extra greater-thans from README ([#527](https://github.com/opensearch-project/OpenSearch/pull/527)) -* [Rename] Update Vagrantfile ([#515](https://github.com/opensearch-project/OpenSearch/pull/515)) -* [README] Remove stale information ([#513](https://github.com/opensearch-project/OpenSearch/pull/513)) -* [Rename] Change license header and copyright notice to SPDX ([#437](https://github.com/opensearch-project/OpenSearch/pull/437)) - ### Maintenance - -* Make discovered_master field optional on the client to support compatibility for opensearch client with odfe (#2641) ([#2653](https://github.com/opensearch-project/OpenSearch/pull/2653)) -* Update azure-storage-blob to 12.15.0: fix test flakiness (#2795) ([#2799](https://github.com/opensearch-project/OpenSearch/pull/2799)) -* Update azure-storage-blob to 12.15.0 (#2774) ([#2778](https://github.com/opensearch-project/OpenSearch/pull/2778)) -* Update the BWC versions (post 1.x backport) ([#2390](https://github.com/opensearch-project/OpenSearch/pull/2390)) -* Update bwc verions for (#2237) ([#2248](https://github.com/opensearch-project/OpenSearch/pull/2248)) -* Update #2103 BWC Versions ([#2173](https://github.com/opensearch-project/OpenSearch/pull/2173)) -* Update bundled JDK distribution to 17.0.2+8 ([#2007](https://github.com/opensearch-project/OpenSearch/pull/2007)) -* Update Mockito to 4.3.1 ([#1973](https://github.com/opensearch-project/OpenSearch/pull/1973)) -* Update protobuf-java to 3.19.3 ([#1945](https://github.com/opensearch-project/OpenSearch/pull/1945)) -* Update Netty to 4.1.73.Final ([#1936](https://github.com/opensearch-project/OpenSearch/pull/1936)) -* Update FIPS API libraries of Bouncy Castle ([#1853](https://github.com/opensearch-project/OpenSearch/pull/1853)) -* Update junit to 4.13.1 ([#1837](https://github.com/opensearch-project/OpenSearch/pull/1837)) -* Update Mockito to 4.2.x ([#1830](https://github.com/opensearch-project/OpenSearch/pull/1830)) -* Upgrading bouncycastle to 1.70 ([#1832](https://github.com/opensearch-project/OpenSearch/pull/1832)) -* Updating Netty to 4.1.72.Final ([#1831](https://github.com/opensearch-project/OpenSearch/pull/1831)) -* Update to log4j 2.17.1 ([#1820](https://github.com/opensearch-project/OpenSearch/pull/1820)) -* Update to log4j 2.17.0 ([#1771](https://github.com/opensearch-project/OpenSearch/pull/1771)) -* [repository-azure] Update to the latest Azure Storage SDK v12, remove privileged runnable wrapper in favor of access helper ([#1521](https://github.com/opensearch-project/OpenSearch/pull/1521)) -* Update bundled JDK distribution to 17.0.1+12 ([#1476](https://github.com/opensearch-project/OpenSearch/pull/1476)) -* Upgrading netty version to 4.1.69.Final ([#1363](https://github.com/opensearch-project/OpenSearch/pull/1363)) -* Modernize and consolidate JDKs usage across all stages of the build. Update JDK-14 requirement, switch to JDK-17 instead ([#1368](https://github.com/opensearch-project/OpenSearch/pull/1368)) -* Upgrade hadoop dependencies for hdfs plugin ([#1335](https://github.com/opensearch-project/OpenSearch/pull/1335)) -* Replace securemock with mock-maker (test support), update Mockito to 3.12.4 ([#1332](https://github.com/opensearch-project/OpenSearch/pull/1332)) -* Update Jackson to 2.12.5 ([#1247](https://github.com/opensearch-project/OpenSearch/pull/1247)) -* Update DistributionDownloader to support fetching arm64 bundles. ([#929](https://github.com/opensearch-project/OpenSearch/pull/929)) -* Update favicon for OpenSearch ([#932](https://github.com/opensearch-project/OpenSearch/pull/932)) -* Update DistributionDownloader to fetch snapshots and staging bundles. ([#904](https://github.com/opensearch-project/OpenSearch/pull/904)) -* Version bump for 1.1 release ([#772](https://github.com/opensearch-project/OpenSearch/pull/772)) -* update external library 'pdfbox' version to 2.0.24 to reduce vulnerability ([#883](https://github.com/opensearch-project/OpenSearch/pull/883)) -* Update dependencies for ingest-attachment plugin. ([#666](https://github.com/opensearch-project/OpenSearch/pull/666)) -* Update hadoop-minicluster version for test fixture. ([#645](https://github.com/opensearch-project/OpenSearch/pull/645)) -* Update remote repo for BWC checks. ([#482](https://github.com/opensearch-project/OpenSearch/pull/482)) -* Update year and developer info in generated POMs. ([#444](https://github.com/opensearch-project/OpenSearch/pull/444)) -* Refresh OpenSearch nodes version in cluster state after upgrade ([#865](https://github.com/opensearch-project/OpenSearch/pull/865)) -* [Upgrade] ICU4j from 68.2 to 70.1 ([#2504](https://github.com/opensearch-project/OpenSearch/pull/2504)) -* Upgrade to log4j 2.16.0 ([#1721](https://github.com/opensearch-project/OpenSearch/pull/1721)) -* Upgrade to logj4 2.15.0 ([#1698](https://github.com/opensearch-project/OpenSearch/pull/1698)) -* Updating Log4j to 2.11.2 ([#1696](https://github.com/opensearch-project/OpenSearch/pull/1696)) -* Upgrade dependency ([#1571](https://github.com/opensearch-project/OpenSearch/pull/1571)) -* Upgrade apache commons-compress to 1.21 ([#1197](https://github.com/opensearch-project/OpenSearch/pull/1197)) -* Removed java11 source folders since JDK-11 is the baseline now (#2898) ([#2953](https://github.com/opensearch-project/OpenSearch/pull/2953)) -* [Remove] MainResponse version override cluster setting (#3031) ([#3033](https://github.com/opensearch-project/OpenSearch/pull/3033)) -* [Remove] remaining AllFieldMapper references (#3007) ([#3010](https://github.com/opensearch-project/OpenSearch/pull/3010)) -* [2.x] Remove deprecation warning of using REST API request parameter 'master_timeout' (#2920) ([#2931](https://github.com/opensearch-project/OpenSearch/pull/2931)) -* [Rename] ESTestCase stragglers to OpenSearchTestCase (#3053) ([#3064](https://github.com/opensearch-project/OpenSearch/pull/3064)) -* Use G1GC on JDK11+ (#2964) ([#2970](https://github.com/opensearch-project/OpenSearch/pull/2970)) -* Remove endpoint_suffix dependency on account key (#2485) ([#2808](https://github.com/opensearch-project/OpenSearch/pull/2808)) -* Updating repository commons logging version ([#2541](https://github.com/opensearch-project/OpenSearch/pull/2541)) -* Upgrading Shadow plugin to 7.1.2 ([#2033](https://github.com/opensearch-project/OpenSearch/pull/2033)) -* Upgrading Jackson-Databind version ([#1982](https://github.com/opensearch-project/OpenSearch/pull/1982)) -* Upgrading commons-codec in hdfs-fixture and cleaning up dependencies in repository-hdfs ([#1603](https://github.com/opensearch-project/OpenSearch/pull/1603)) -* Upgrading gson to 2.8.9 ([#1541](https://github.com/opensearch-project/OpenSearch/pull/1541)) -* Upgrading dependencies ([#1491](https://github.com/opensearch-project/OpenSearch/pull/1491)) -* Upgrading dependencies in hdfs plugin ([#1466](https://github.com/opensearch-project/OpenSearch/pull/1466)) -* Upgrading mockito version to make it consistent across the repo ([#1410](https://github.com/opensearch-project/OpenSearch/pull/1410)) -* Change deprecation message for REST API parameter 'master_timeout' to specify the version of removal (#2863) ([#2865](https://github.com/opensearch-project/OpenSearch/pull/2865)) -* Update ThirdPartyAuditTask to check for and list pointless exclusions. (#2760) ([#2765](https://github.com/opensearch-project/OpenSearch/pull/2765)) -* Add Shadow jar publication to lang-painless module. (#2681) ([#2712](https://github.com/opensearch-project/OpenSearch/pull/2712)) -* Add mapping method back referenced in other repos (#2636) ([#2649](https://github.com/opensearch-project/OpenSearch/pull/2649)) +* Bump google-oauth-client from 1.33.1 to 1.33.2 in /plugins/discovery-gce ([#2828](https://github.com/opensearch-project/OpenSearch/pull/2828)) +* Bump protobuf-java-util from 3.19.3 to 3.20.0 in /plugins/repository-gcs ([#2834](https://github.com/opensearch-project/OpenSearch/pull/2834)) +* Bump cdi-api from 1.2 to 2.0 in /qa/wildfly ([#2835](https://github.com/opensearch-project/OpenSearch/pull/2835)) +* Bump azure-core from 1.26.0 to 1.27.0 in /plugins/repository-azure ([#2837](https://github.com/opensearch-project/OpenSearch/pull/2837)) +* Bump asm-analysis from 9.2 to 9.3 in /test/logger-usage ([#2829](https://github.com/opensearch-project/OpenSearch/pull/2829)) +* Bump protobuf-java from 3.19.3 to 3.20.0 in /plugins/repository-hdfs ([#2836](https://github.com/opensearch-project/OpenSearch/pull/2836)) +* Bump joni from 2.1.41 to 2.1.43 in /libs/grok ([#2832](https://github.com/opensearch-project/OpenSearch/pull/2832)) +* Bump geoip2 from 2.16.1 to 3.0.1 in /modules/ingest-geoip ([#2646](https://github.com/opensearch-project/OpenSearch/pull/2646)) +* Bump jettison from 1.1 to 1.4.1 in /plugins/discovery-azure-classic ([#2614](https://github.com/opensearch-project/OpenSearch/pull/2614)) +* Bump google-oauth-client from 1.31.0 to 1.33.1 in /plugins/repository-gcs ([#2616](https://github.com/opensearch-project/OpenSearch/pull/2616)) +* Bump jboss-annotations-api_1.2_spec in /qa/wildfly ([#2615](https://github.com/opensearch-project/OpenSearch/pull/2615)) +* Bump forbiddenapis in /buildSrc/src/testKit/thirdPartyAudit ([#2611](https://github.com/opensearch-project/OpenSearch/pull/2611)) +* Bump json-schema-validator from 1.0.67 to 1.0.68 in /buildSrc ([#2610](https://github.com/opensearch-project/OpenSearch/pull/2610)) +* Bump htrace-core4 from 4.1.0-incubating to 4.2.0-incubating in /plugins/repository-hdfs ([#2618](https://github.com/opensearch-project/OpenSearch/pull/2618)) +* Bump asm-tree from 7.2 to 9.2 in /modules/lang-painless ([#2617](https://github.com/opensearch-project/OpenSearch/pull/2617)) +* Bump antlr4 from 4.5.3 to 4.9.3 in /modules/lang-painless ([#2537](https://github.com/opensearch-project/OpenSearch/pull/2537)) +* Bump commons-lang3 from 3.7 to 3.12.0 in /plugins/repository-hdfs ([#2552](https://github.com/opensearch-project/OpenSearch/pull/2552)) +* Bump gson from 2.8.9 to 2.9.0 in /plugins/repository-gcs ([#2550](https://github.com/opensearch-project/OpenSearch/pull/2550)) +* Bump google-oauth-client from 1.31.0 to 1.33.1 in /plugins/discovery-gce ([#2524](https://github.com/opensearch-project/OpenSearch/pull/2524)) +* Bump google-cloud-core from 1.93.3 to 2.5.10 in /plugins/repository-gcs ([#2536](https://github.com/opensearch-project/OpenSearch/pull/2536)) +* Bump wiremock-jre8-standalone from 2.23.2 to 2.32.0 in /buildSrc ([#2525](https://github.com/opensearch-project/OpenSearch/pull/2525)) +* Bump com.gradle.enterprise from 3.8.1 to 3.9 ([#2523](https://github.com/opensearch-project/OpenSearch/pull/2523)) +* Bump commons-io from 2.7 to 2.11.0 in /plugins/discovery-azure-classic ([#2527](https://github.com/opensearch-project/OpenSearch/pull/2527)) +* Bump asm-analysis from 7.1 to 9.2 in /test/logger-usage ([#2273](https://github.com/opensearch-project/OpenSearch/pull/2273)) +* Bump asm-commons from 7.2 to 9.2 in /modules/lang-painless ([#2234](https://github.com/opensearch-project/OpenSearch/pull/2234)) +* Bump jna from 5.5.0 to 5.10.0 in /buildSrc ([#2512](https://github.com/opensearch-project/OpenSearch/pull/2512)) +* Bump jsr305 from 1.3.9 to 3.0.2 in /plugins/discovery-gce ([#2137](https://github.com/opensearch-project/OpenSearch/pull/2137)) +* Bump json-schema-validator from 1.0.36 to 1.0.67 in /buildSrc ([#2454](https://github.com/opensearch-project/OpenSearch/pull/2454)) +* Bump woodstox-core from 6.1.1 to 6.2.8 in /plugins/repository-azure ([#2456](https://github.com/opensearch-project/OpenSearch/pull/2456)) +* Bump commons-lang3 from 3.4 to 3.12.0 in /plugins/repository-azure ([#2455](https://github.com/opensearch-project/OpenSearch/pull/2455)) +* Update azure-storage-blob to 12.15.0 ([#2774](https://github.com/opensearch-project/OpenSearch/pull/2774)) * Move Jackson-databind to 2.13.2 ([#2548](https://github.com/opensearch-project/OpenSearch/pull/2548)) -* [Unmute] NumberFieldTypeTests ([#2531](https://github.com/opensearch-project/OpenSearch/pull/2531)) -* [Unmute] IndexPrimaryRelocationIT ([#2488](https://github.com/opensearch-project/OpenSearch/pull/2488)) -* [Remove] TrimUnsafeCommit logic for legacy 6.x indexes ([#2225](https://github.com/opensearch-project/OpenSearch/pull/2225)) -* Adjust main version after backport to 1.x ([#2147](https://github.com/opensearch-project/OpenSearch/pull/2147)) -* [Remove] CircuitBreaker Accounting ([#2056](https://github.com/opensearch-project/OpenSearch/pull/2056)) -* [Remove] Segment memory estimation and tracking ([#2029](https://github.com/opensearch-project/OpenSearch/pull/2029)) -* [Remove] index.merge.policy.max_merge_at_once_explicit ([#1988](https://github.com/opensearch-project/OpenSearch/pull/1988)) -* [Remove] Setting explicit version on analysis component ([#1986](https://github.com/opensearch-project/OpenSearch/pull/1986)) -* Wildcard max_expansion version check update ([#1980](https://github.com/opensearch-project/OpenSearch/pull/1980)) -* Removing lingering transportclient ([#1955](https://github.com/opensearch-project/OpenSearch/pull/1955)) -* [BWC] Ensure 2.x compatibility with Legacy 7.10.x ([#1902](https://github.com/opensearch-project/OpenSearch/pull/1902)) -* File name correction to follow existing convention ([#1874](https://github.com/opensearch-project/OpenSearch/pull/1874)) -* [Remove] Old Translog Checkpoint Format ([#1884](https://github.com/opensearch-project/OpenSearch/pull/1884)) -* Remove unwanted unreleased versions ([#1877](https://github.com/opensearch-project/OpenSearch/pull/1877)) -* replace with opensearch-http-channel and opensearch-http-server-channel ([#1799](https://github.com/opensearch-project/OpenSearch/pull/1799)) -* Add bwc version 1.2.4 ([#1796](https://github.com/opensearch-project/OpenSearch/pull/1796)) -* [Remove] various builder and mapping deprecations ([#1752](https://github.com/opensearch-project/OpenSearch/pull/1752)) -* [Remove] Remaining Flavor Serialization ([#1751](https://github.com/opensearch-project/OpenSearch/pull/1751)) -* [Remove] DynamicTemplate deprecations ([#1742](https://github.com/opensearch-project/OpenSearch/pull/1742)) -* [Remove] Analyzer Deprecations ([#1741](https://github.com/opensearch-project/OpenSearch/pull/1741)) -* Drop mocksocket & securemock dependencies from sniffer and rest client (no needed) ([#1174](https://github.com/opensearch-project/OpenSearch/pull/1174)) -* [BWC] Temporarily disable bwc testing while bumping 1.0.1 -* [DEPRECATE] SimpleFS in favor of NIOFS ([#1073](https://github.com/opensearch-project/OpenSearch/pull/1073)) -* Replace JCenter with Maven Central. ([#1057](https://github.com/opensearch-project/OpenSearch/pull/1057)) -* Restoring alpha/beta/rc version semantics ([#1112](https://github.com/opensearch-project/OpenSearch/pull/1112)) -* Remove `client/sniffer` from Javadoc exemption list ([#818](https://github.com/opensearch-project/OpenSearch/pull/818)) -* Removed pre-alpha notes. ([#815](https://github.com/opensearch-project/OpenSearch/pull/815)) -* Remove checks for legacy .yaml and .json config files. ([#792](https://github.com/opensearch-project/OpenSearch/pull/792)) -* Remove reference to an EC2 instance type. ([#812](https://github.com/opensearch-project/OpenSearch/pull/812)) -* Remove all elastic.co references from javadocs ([#586](https://github.com/opensearch-project/OpenSearch/pull/586)) -* Remove the oss string from OpenSearch distributions ([#575](https://github.com/opensearch-project/OpenSearch/pull/575)) -* [Rename] Remove final references to legacy keystore ([#514](https://github.com/opensearch-project/OpenSearch/pull/514)) -* changed Apache to Apache 2.0. Numbered principles -* fixed apache to apache 2.0 -* Replace nio and nitty test endpoint ([#475](https://github.com/opensearch-project/OpenSearch/pull/475)) -* [Rename] org.elasticsearch.client.documentation.SearchDocumentationIT.testSearchRequestSuggestions ([#467](https://github.com/opensearch-project/OpenSearch/pull/467)) +* Add trademark notice ([#2473](https://github.com/opensearch-project/OpenSearch/pull/2473)) +* adds ToC ([#2546](https://github.com/opensearch-project/OpenSearch/pull/2546)) ### Refactoring - -* [Rename] Refactoring Elastic references in docker and kerberos builds (#428) ([#438](https://github.com/opensearch-project/OpenSearch/pull/438)) -* [Refactor] LuceneChangesSnapshot to use accurate ops history ([#2452](https://github.com/opensearch-project/OpenSearch/pull/2452)) -* Refactoring gated and ref-counted interfaces and their implementations ([#2396](https://github.com/opensearch-project/OpenSearch/pull/2396)) -* [Refactor] MapperService to QueryShardContext in valueFetcher ([#2027](https://github.com/opensearch-project/OpenSearch/pull/2027)) -* [Refactor] Lucene DataInput and DataOutput to StreamInput and StreamOutput ([#2035](https://github.com/opensearch-project/OpenSearch/pull/2035)) -* [Refactor] InternalEngine to always use soft deletes ([#1933](https://github.com/opensearch-project/OpenSearch/pull/1933)) -* Refactor LegacyESVersion tests from Version tests ([#1662](https://github.com/opensearch-project/OpenSearch/pull/1662)) -* Remove the IndexCommitRef class ([#2421](https://github.com/opensearch-project/OpenSearch/pull/2421)) -* Decouple IndexSettings from IncludeExclude ([#2860](https://github.com/opensearch-project/OpenSearch/pull/2860)) +* [Remove] remaining AllFieldMapper references ([#3007](https://github.com/opensearch-project/OpenSearch/pull/3007)) * Clear up some confusing code in IndexShardHotSpotTests ([#1534](https://github.com/opensearch-project/OpenSearch/pull/1534)) +* [Rename] ESTestCase stragglers to OpenSearchTestCase ([#3053](https://github.com/opensearch-project/OpenSearch/pull/3053)) +* [Remove] MainResponse version override cluster setting ([#3031](https://github.com/opensearch-project/OpenSearch/pull/3031)) +* [Version] Don't spoof major for 3.0+ clusters ([#2722](https://github.com/opensearch-project/OpenSearch/pull/2722)) +* Centralize codes related to 'master_timeout' deprecation for eaiser removal - in CAT Nodes API ([#2670](https://github.com/opensearch-project/OpenSearch/pull/2670)) * Rename reference to project OpenSearch was forked from ([#2483](https://github.com/opensearch-project/OpenSearch/pull/2483)) -* Introduce RestHandler.Wrapper to help with delegate implementations ([#1004](https://github.com/opensearch-project/OpenSearch/pull/1004)) +* Remove the IndexCommitRef class ([#2421](https://github.com/opensearch-project/OpenSearch/pull/2421)) +* Refactoring gated and ref-counted interfaces and their implementations ([#2396](https://github.com/opensearch-project/OpenSearch/pull/2396)) +* [Refactor] LuceneChangesSnapshot to use accurate ops history ([#2452](https://github.com/opensearch-project/OpenSearch/pull/2452)) ### Tests - * Add type mapping removal bwc tests for indexing, searching, snapshots ([#2901](https://github.com/opensearch-project/OpenSearch/pull/2901)) -* Removing SLM check in tests for OpenSearch versions (#2604) ([#2620](https://github.com/opensearch-project/OpenSearch/pull/2620)) +* Removing SLM check in tests for OpenSearch versions ([#2604](https://github.com/opensearch-project/OpenSearch/pull/2604)) +* [Unmute] NumberFieldTypeTests ([#2531](https://github.com/opensearch-project/OpenSearch/pull/2531)) * Use Hamcrest matchers and assertThat() in ReindexRenamedSettingTests ([#2503](https://github.com/opensearch-project/OpenSearch/pull/2503)) -* [Test-Failure] Mute TranslogPolicyIT ([#2342](https://github.com/opensearch-project/OpenSearch/pull/2342)) -* Added timeout to ensureGreen() for testClusterGreenAfterPartialRelocation ([#2074](https://github.com/opensearch-project/OpenSearch/pull/2074)) -* Stabilizing org.opensearch.cluster.routing.MovePrimaryFirstTests.test… ([#2048](https://github.com/opensearch-project/OpenSearch/pull/2048)) -* Added timeout to ensureGreen() for testClusterGreenAfterPartialRelocation ([#1983](https://github.com/opensearch-project/OpenSearch/pull/1983)) -* Add hook to execute logic before Integ test task starts ([#1969](https://github.com/opensearch-project/OpenSearch/pull/1969)) -* Remove transport client from tests. ([#1809](https://github.com/opensearch-project/OpenSearch/pull/1809)) -* [Tests] ClusterHealthIT:testHealthOnMasterFailover - Increase master node timeout ([#1812](https://github.com/opensearch-project/OpenSearch/pull/1812)) -* Ignore file order in test assertion ([#1755](https://github.com/opensearch-project/OpenSearch/pull/1755)) -* Integration test that checks for settings upgrade ([#1482](https://github.com/opensearch-project/OpenSearch/pull/1482)) -* [bwc] reenable bwc testing after syncing staged branches ([#1511](https://github.com/opensearch-project/OpenSearch/pull/1511)) -* [Tests] Translog Pruning tests to MetadataCreateIndexServiceTests ([#1295](https://github.com/opensearch-project/OpenSearch/pull/1295)) -* Reduce iterations to improve test run time ([#1168](https://github.com/opensearch-project/OpenSearch/pull/1168)) -* Tune datanode count and shards count to improve test run time ([#1170](https://github.com/opensearch-project/OpenSearch/pull/1170)) -* [BWC] Re-enable bwc testing after 1.0.1 version bump -* Add unit test for RestActionListener. Validate that onFailure() sends response even when BytesRestResponse can not be constructed using passed exception. Follow up on #923. ([#1024](https://github.com/opensearch-project/OpenSearch/pull/1024)) -* [TEST] Fix failing distro tests for linux packages ([#569](https://github.com/opensearch-project/OpenSearch/pull/569)) -* [TEST] Fix failing packaging tests for OpenSearch distributions. ([#541](https://github.com/opensearch-project/OpenSearch/pull/541)) -* Remove the references to xpack and elastic in tests. ([#516](https://github.com/opensearch-project/OpenSearch/pull/516)) +* [Unmute] IndexPrimaryRelocationIT ([#2488](https://github.com/opensearch-project/OpenSearch/pull/2488)) +* Fixing PluginsServiceTests (post Lucene 9 update) ([#2484](https://github.com/opensearch-project/OpenSearch/pull/2484)) diff --git a/release-notes/opensearch.release-notes-2.0.0.md b/release-notes/opensearch.release-notes-2.0.0.md index 8880d7a7bddf9..ab38069bbf67d 100644 --- a/release-notes/opensearch.release-notes-2.0.0.md +++ b/release-notes/opensearch.release-notes-2.0.0.md @@ -27,8 +27,29 @@ * [Remove] Type from TermsLookUp ([#2459](https://github.com/opensearch-project/OpenSearch/pull/2459)) * [Remove] types from Uid and remaining types/Uid from translog ([#2450](https://github.com/opensearch-project/OpenSearch/pull/2450)) * [Remove] types from translog ([#2439](https://github.com/opensearch-project/OpenSearch/pull/2439)) -* [Remove] Type mapping end-points from RestMultiSearchTemplateAction ([#2433](https://github.com/opensearch-project/OpenSearch/pull/2433)) * [Remove] Multiple Types from IndexTemplateMetadata ([#2400](https://github.com/opensearch-project/OpenSearch/pull/2400)) +* Remove type mapping from document index API ([#2026](https://github.com/opensearch-project/OpenSearch/pull/2026)) +* [Remove] Type mapping parameter from document update API ([#2204](https://github.com/opensearch-project/OpenSearch/pull/2204)) +* [Remove] Types from DocWrite Request and Response ([#2239](https://github.com/opensearch-project/OpenSearch/pull/2239)) +* [Remove] Types from GET/MGET ([#2168](https://github.com/opensearch-project/OpenSearch/pull/2168)) +* [Remove] types from SearchHit and Explain API ([#2205](https://github.com/opensearch-project/OpenSearch/pull/2205)) +* [Remove] type support from Bulk API ([#2215](https://github.com/opensearch-project/OpenSearch/pull/2215)) +* Remove type end-points from no-op bulk and search action ([#2261](https://github.com/opensearch-project/OpenSearch/pull/2261)) +* Remove type end-points from search and related APIs ([#2263](https://github.com/opensearch-project/OpenSearch/pull/2263)) +* [Remove] Type mapping end-points from RestMultiSearchTemplateAction ([#2433](https://github.com/opensearch-project/OpenSearch/pull/2433)) +* Removes type mappings from mapping APIs ([#2238](https://github.com/opensearch-project/OpenSearch/pull/2238)) +* Remove type end-points from count action ([#2379](https://github.com/opensearch-project/OpenSearch/pull/2379)) +* Remove type from validate query API ([#2255](https://github.com/opensearch-project/OpenSearch/pull/2255)) +* [Remove] Type parameter from TermVectors API ([#2104](https://github.com/opensearch-project/OpenSearch/pull/2104)) +* Remove inclue_type_name parameter from rest api spec ([#2410](https://github.com/opensearch-project/OpenSearch/pull/2410)) +* [Remove] include_type_name from HLRC ([#2397](https://github.com/opensearch-project/OpenSearch/pull/2397)) +* [Remove] Type mappings from GeoShapeQueryBuilder ([#2322](https://github.com/opensearch-project/OpenSearch/pull/2322)) +* [Remove] types from PutMappingRequest ([#2335](https://github.com/opensearch-project/OpenSearch/pull/2335)) +* [Remove] deprecated getMapping API from IndicesClient ([#2262](https://github.com/opensearch-project/OpenSearch/pull/2262)) +* [Remove] remaining type usage in Client and AbstractClient ([#2258](https://github.com/opensearch-project/OpenSearch/pull/2258)) +* [Remove] Type from Client.prepare(Index,Delete,Update) ([#2253](https://github.com/opensearch-project/OpenSearch/pull/2253)) +* [Remove] Type Specific Index Stats ([#2198](https://github.com/opensearch-project/OpenSearch/pull/2198)) +* [Remove] Type from Search Internals ([#2109](https://github.com/opensearch-project/OpenSearch/pull/2109)) #### Upgrades * [Upgrade] Lucene 9.1 release ([#2560](https://github.com/opensearch-project/OpenSearch/pull/2560)) From 8cae3a3133f64b8c28c0fe67e0610589f725bccc Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Mon, 23 May 2022 10:28:13 -0500 Subject: [PATCH 19/75] Bump version 2.1 to Lucene 9.2 after upgrade (#3424) Bumps Version.V_2_1_0 lucene version to 9.2 after backporting upgrage. Signed-off-by: Nicholas Walter Knize --- server/src/main/java/org/opensearch/Version.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index 4f0bb55c0f666..e309af54eac6e 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -88,7 +88,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_1_3_2 = new Version(1030299, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_1_3_3 = new Version(1030399, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_2_0_0 = new Version(2000099, org.apache.lucene.util.Version.LUCENE_9_1_0); - public static final Version V_2_1_0 = new Version(2010099, org.apache.lucene.util.Version.LUCENE_9_1_0); + public static final Version V_2_1_0 = new Version(2010099, org.apache.lucene.util.Version.LUCENE_9_2_0); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_2_0); public static final Version CURRENT = V_3_0_0; From c0f829c2e69167ca9bb187031a32f27c1e7d5266 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 23 May 2022 11:24:07 -0500 Subject: [PATCH 20/75] Bump com.gradle.enterprise from 3.10 to 3.10.1 (#3425) Bumps com.gradle.enterprise from 3.10 to 3.10.1. --- updated-dependencies: - dependency-name: com.gradle.enterprise dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- settings.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/settings.gradle b/settings.gradle index 52e1e16fc1c01..a24b063f9fa96 100644 --- a/settings.gradle +++ b/settings.gradle @@ -10,7 +10,7 @@ */ plugins { - id "com.gradle.enterprise" version "3.10" + id "com.gradle.enterprise" version "3.10.1" } rootProject.name = "OpenSearch" From 1f1526bdd3edae59c1a9613ae15b41150bcf44ec Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 23 May 2022 11:25:19 -0500 Subject: [PATCH 21/75] Bump reactor-core from 3.4.17 to 3.4.18 in /plugins/repository-azure (#3427) Bumps [reactor-core](https://github.com/reactor/reactor-core) from 3.4.17 to 3.4.18. - [Release notes](https://github.com/reactor/reactor-core/releases) - [Commits](https://github.com/reactor/reactor-core/compare/v3.4.17...v3.4.18) --- updated-dependencies: - dependency-name: io.projectreactor:reactor-core dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- plugins/repository-azure/build.gradle | 2 +- plugins/repository-azure/licenses/reactor-core-3.4.17.jar.sha1 | 1 - plugins/repository-azure/licenses/reactor-core-3.4.18.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-azure/licenses/reactor-core-3.4.17.jar.sha1 create mode 100644 plugins/repository-azure/licenses/reactor-core-3.4.18.jar.sha1 diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 55b4fc638f07b..eb5fc1650a1b4 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -56,7 +56,7 @@ dependencies { implementation project(':modules:transport-netty4') api 'com.azure:azure-storage-blob:12.16.1' api 'org.reactivestreams:reactive-streams:1.0.3' - api 'io.projectreactor:reactor-core:3.4.17' + api 'io.projectreactor:reactor-core:3.4.18' api 'io.projectreactor.netty:reactor-netty:1.0.18' api 'io.projectreactor.netty:reactor-netty-core:1.0.19' api 'io.projectreactor.netty:reactor-netty-http:1.0.18' diff --git a/plugins/repository-azure/licenses/reactor-core-3.4.17.jar.sha1 b/plugins/repository-azure/licenses/reactor-core-3.4.17.jar.sha1 deleted file mode 100644 index 3803458775631..0000000000000 --- a/plugins/repository-azure/licenses/reactor-core-3.4.17.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -52176b50d2191bc32a8a235124e7aff7f291754b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-core-3.4.18.jar.sha1 b/plugins/repository-azure/licenses/reactor-core-3.4.18.jar.sha1 new file mode 100644 index 0000000000000..749954f62c77b --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-core-3.4.18.jar.sha1 @@ -0,0 +1 @@ +29f4f3a4876a65861deffc0f7f189029bcaf7946 \ No newline at end of file From 3a9b37af3a1de8fbdedc79598b24ea10670b9fff Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 23 May 2022 11:25:57 -0500 Subject: [PATCH 22/75] Bump gax-httpjson from 0.101.0 to 0.103.1 in /plugins/repository-gcs (#3426) Bumps [gax-httpjson](https://github.com/googleapis/gax-java) from 0.101.0 to 0.103.1. - [Release notes](https://github.com/googleapis/gax-java/releases) - [Changelog](https://github.com/googleapis/gax-java/blob/main/CHANGELOG.md) - [Commits](https://github.com/googleapis/gax-java/commits) --- updated-dependencies: - dependency-name: com.google.api:gax-httpjson dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- plugins/repository-gcs/build.gradle | 2 +- plugins/repository-gcs/licenses/gax-httpjson-0.101.0.jar.sha1 | 1 - plugins/repository-gcs/licenses/gax-httpjson-0.103.1.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-gcs/licenses/gax-httpjson-0.101.0.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/gax-httpjson-0.103.1.jar.sha1 diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 67468639dc354..72964f9444026 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -74,7 +74,7 @@ dependencies { api 'com.google.http-client:google-http-client-appengine:1.41.8' api 'com.google.http-client:google-http-client-jackson2:1.35.0' api 'com.google.http-client:google-http-client-gson:1.41.4' - api 'com.google.api:gax-httpjson:0.101.0' + api 'com.google.api:gax-httpjson:0.103.1' api 'io.grpc:grpc-context:1.46.0' api 'io.opencensus:opencensus-api:0.18.0' api 'io.opencensus:opencensus-contrib-http-util:0.18.0' diff --git a/plugins/repository-gcs/licenses/gax-httpjson-0.101.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-httpjson-0.101.0.jar.sha1 deleted file mode 100644 index f722ccbd86c54..0000000000000 --- a/plugins/repository-gcs/licenses/gax-httpjson-0.101.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e056920e5df4086270e6c3d2e3a16d8a7585fd13 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gax-httpjson-0.103.1.jar.sha1 b/plugins/repository-gcs/licenses/gax-httpjson-0.103.1.jar.sha1 new file mode 100644 index 0000000000000..11315004e233d --- /dev/null +++ b/plugins/repository-gcs/licenses/gax-httpjson-0.103.1.jar.sha1 @@ -0,0 +1 @@ +041d99172fda933bc879bdfd8de9420c5c34107e \ No newline at end of file From 146b334f44ebba4e59404b0741a3c255b6eb2681 Mon Sep 17 00:00:00 2001 From: Poojita Raj Date: Mon, 23 May 2022 12:19:54 -0700 Subject: [PATCH 23/75] [segment replication]Introducing common Replication interfaces for segment replication and recovery code paths (#3234) * RecoveryState inherits from ReplicationState + RecoveryTarget inherits from ReplicationTarget Signed-off-by: Poojita Raj * Refactoring: mixedClusterVersion error fix + move Stage to ReplicationState Signed-off-by: Poojita Raj * pull ReplicationListener into a top level class + add javadocs + address review comments Signed-off-by: Poojita Raj * fix javadoc Signed-off-by: Poojita Raj * review changes Signed-off-by: Poojita Raj * Refactoring the hierarchy relationship between repl and recovery Signed-off-by: Poojita Raj * style fix Signed-off-by: Poojita Raj * move package common under replication Signed-off-by: Poojita Raj * rename to replication Signed-off-by: Poojita Raj * rename and doc changes Signed-off-by: Poojita Raj --- .../indices/recovery/IndexRecoveryIT.java | 2 +- .../opensearch/index/shard/IndexShard.java | 11 +- .../opensearch/indices/IndicesService.java | 3 +- .../cluster/IndicesClusterStateService.java | 42 +-- .../recovery/PeerRecoveryTargetService.java | 80 ++--- .../recovery/RecoveriesCollection.java | 332 ------------------ .../indices/recovery/RecoveryListener.java | 55 +++ .../indices/recovery/RecoveryState.java | 3 +- .../indices/recovery/RecoveryTarget.java | 146 +++----- .../common/ReplicationCollection.java | 297 ++++++++++++++++ .../common/ReplicationListener.java | 23 ++ .../common/ReplicationRequestTracker.java} | 6 +- .../replication/common/ReplicationState.java | 18 + .../replication/common/ReplicationTarget.java | 175 +++++++++ .../RecoveryDuringReplicationTests.java | 4 +- ...actIndicesClusterStateServiceTestCase.java | 7 +- .../indices/recovery/RecoveryTests.java | 26 +- ...va => ReplicationRequestTrackerTests.java} | 5 +- ...s.java => ReplicationCollectionTests.java} | 88 +++-- .../index/shard/IndexShardTestCase.java | 9 +- 20 files changed, 750 insertions(+), 582 deletions(-) delete mode 100644 server/src/main/java/org/opensearch/indices/recovery/RecoveriesCollection.java create mode 100644 server/src/main/java/org/opensearch/indices/recovery/RecoveryListener.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/common/ReplicationCollection.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/common/ReplicationListener.java rename server/src/main/java/org/opensearch/indices/{recovery/RecoveryRequestTracker.java => replication/common/ReplicationRequestTracker.java} (96%) create mode 100644 server/src/main/java/org/opensearch/indices/replication/common/ReplicationState.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java rename server/src/test/java/org/opensearch/indices/recovery/{RecoveryRequestTrackerTests.java => ReplicationRequestTrackerTests.java} (95%) rename server/src/test/java/org/opensearch/recovery/{RecoveriesCollectionTests.java => ReplicationCollectionTests.java} (65%) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java index 4650000f1e20a..0ab3be3d63091 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java @@ -101,8 +101,8 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.NodeIndicesStats; import org.opensearch.indices.analysis.AnalysisModule; -import org.opensearch.indices.recovery.RecoveryState.Stage; import org.opensearch.indices.replication.common.ReplicationLuceneIndex; +import org.opensearch.indices.recovery.RecoveryState.Stage; import org.opensearch.node.NodeClosedException; import org.opensearch.node.RecoverySettingsChunkSizePlugin; import org.opensearch.plugins.AnalysisPlugin; diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 0f088a13d5c5a..8002dfe688def 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -157,6 +157,7 @@ import org.opensearch.indices.cluster.IndicesClusterStateService; import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoveryFailedException; +import org.opensearch.indices.recovery.RecoveryListener; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.recovery.RecoveryTarget; import org.opensearch.repositories.RepositoriesService; @@ -2876,7 +2877,7 @@ protected Engine getEngineOrNull() { public void startRecovery( RecoveryState recoveryState, PeerRecoveryTargetService recoveryTargetService, - PeerRecoveryTargetService.RecoveryListener recoveryListener, + RecoveryListener recoveryListener, RepositoriesService repositoriesService, Consumer mappingUpdateConsumer, IndicesService indicesService @@ -2909,7 +2910,7 @@ public void startRecovery( recoveryTargetService.startRecovery(this, recoveryState.getSourceNode(), recoveryListener); } catch (Exception e) { failShard("corrupted preexisting index", e); - recoveryListener.onRecoveryFailure(recoveryState, new RecoveryFailedException(recoveryState, null, e), true); + recoveryListener.onFailure(recoveryState, new RecoveryFailedException(recoveryState, null, e), true); } break; case SNAPSHOT: @@ -2984,15 +2985,15 @@ public void startRecovery( private void executeRecovery( String reason, RecoveryState recoveryState, - PeerRecoveryTargetService.RecoveryListener recoveryListener, + RecoveryListener recoveryListener, CheckedConsumer, Exception> action ) { markAsRecovering(reason, recoveryState); // mark the shard as recovering on the cluster state thread threadPool.generic().execute(ActionRunnable.wrap(ActionListener.wrap(r -> { if (r) { - recoveryListener.onRecoveryDone(recoveryState); + recoveryListener.onDone(recoveryState); } - }, e -> recoveryListener.onRecoveryFailure(recoveryState, new RecoveryFailedException(recoveryState, null, e), true)), action)); + }, e -> recoveryListener.onFailure(recoveryState, new RecoveryFailedException(recoveryState, null, e), true)), action)); } /** diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index b5da0ae1f7688..1c7e45323813c 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -136,6 +136,7 @@ import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.indices.mapper.MapperRegistry; import org.opensearch.indices.recovery.PeerRecoveryTargetService; +import org.opensearch.indices.recovery.RecoveryListener; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.node.Node; import org.opensearch.plugins.IndexStorePlugin; @@ -839,7 +840,7 @@ public synchronized void verifyIndexMetadata(IndexMetadata metadata, IndexMetada public IndexShard createShard( final ShardRouting shardRouting, final PeerRecoveryTargetService recoveryTargetService, - final PeerRecoveryTargetService.RecoveryListener recoveryListener, + final RecoveryListener recoveryListener, final RepositoriesService repositoriesService, final Consumer onShardFailure, final Consumer globalCheckpointSyncer, diff --git a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java index 29f74f8a86d85..d1623df156593 100644 --- a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java @@ -78,8 +78,9 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.recovery.PeerRecoverySourceService; import org.opensearch.indices.recovery.PeerRecoveryTargetService; -import org.opensearch.indices.recovery.RecoveryFailedException; +import org.opensearch.indices.recovery.RecoveryListener; import org.opensearch.indices.recovery.RecoveryState; +import org.opensearch.indices.replication.common.ReplicationState; import org.opensearch.repositories.RepositoriesService; import org.opensearch.search.SearchService; import org.opensearch.snapshots.SnapshotShardsService; @@ -624,7 +625,7 @@ private void createShard(DiscoveryNodes nodes, RoutingTable routingTable, ShardR indicesService.createShard( shardRouting, recoveryTargetService, - new RecoveryListener(shardRouting, primaryTerm), + new RecoveryListener(shardRouting, primaryTerm, this), repositoriesService, failedShardHandler, globalCheckpointSyncer, @@ -739,39 +740,16 @@ private static DiscoveryNode findSourceNodeForPeerRecovery( return sourceNode; } - private class RecoveryListener implements PeerRecoveryTargetService.RecoveryListener { - - /** - * ShardRouting with which the shard was created - */ - private final ShardRouting shardRouting; - - /** - * Primary term with which the shard was created - */ - private final long primaryTerm; - - private RecoveryListener(final ShardRouting shardRouting, final long primaryTerm) { - this.shardRouting = shardRouting; - this.primaryTerm = primaryTerm; - } - - @Override - public void onRecoveryDone(final RecoveryState state) { - shardStateAction.shardStarted(shardRouting, primaryTerm, "after " + state.getRecoverySource(), SHARD_STATE_ACTION_LISTENER); - } - - @Override - public void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure) { - handleRecoveryFailure(shardRouting, sendShardFailure, e); - } - } - // package-private for testing - synchronized void handleRecoveryFailure(ShardRouting shardRouting, boolean sendShardFailure, Exception failure) { + public synchronized void handleRecoveryFailure(ShardRouting shardRouting, boolean sendShardFailure, Exception failure) { failAndRemoveShard(shardRouting, sendShardFailure, "failed recovery", failure, clusterService.state()); } + public void handleRecoveryDone(ReplicationState state, ShardRouting shardRouting, long primaryTerm) { + RecoveryState RecState = (RecoveryState) state; + shardStateAction.shardStarted(shardRouting, primaryTerm, "after " + RecState.getRecoverySource(), SHARD_STATE_ACTION_LISTENER); + } + private void failAndRemoveShard( ShardRouting shardRouting, boolean sendShardFailure, @@ -1004,7 +982,7 @@ U createIndex(IndexMetadata indexMetadata, List builtInIndex T createShard( ShardRouting shardRouting, PeerRecoveryTargetService recoveryTargetService, - PeerRecoveryTargetService.RecoveryListener recoveryListener, + RecoveryListener recoveryListener, RepositoriesService repositoriesService, Consumer onShardFailure, Consumer globalCheckpointSyncer, diff --git a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java index 4ae188abe5896..e13022afa81ba 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java @@ -37,10 +37,10 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.RateLimiter; +import org.opensearch.ExceptionsHelper; import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchTimeoutException; -import org.opensearch.ExceptionsHelper; import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; import org.opensearch.action.support.ChannelActionListener; @@ -69,7 +69,8 @@ import org.opensearch.index.store.Store; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.TranslogCorruptedException; -import org.opensearch.indices.recovery.RecoveriesCollection.RecoveryRef; +import org.opensearch.indices.replication.common.ReplicationCollection; +import org.opensearch.indices.replication.common.ReplicationCollection.ReplicationRef; import org.opensearch.indices.replication.common.ReplicationLuceneIndex; import org.opensearch.indices.replication.common.ReplicationTimer; import org.opensearch.tasks.Task; @@ -124,7 +125,7 @@ public static class Actions { private final RecoverySettings recoverySettings; private final ClusterService clusterService; - private final RecoveriesCollection onGoingRecoveries; + private final ReplicationCollection onGoingRecoveries; public PeerRecoveryTargetService( ThreadPool threadPool, @@ -136,7 +137,7 @@ public PeerRecoveryTargetService( this.transportService = transportService; this.recoverySettings = recoverySettings; this.clusterService = clusterService; - this.onGoingRecoveries = new RecoveriesCollection(logger, threadPool); + this.onGoingRecoveries = new ReplicationCollection<>(logger, threadPool); transportService.registerRequestHandler( Actions.FILES_INFO, @@ -185,13 +186,16 @@ public PeerRecoveryTargetService( @Override public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard, Settings indexSettings) { if (indexShard != null) { - onGoingRecoveries.cancelRecoveriesForShard(shardId, "shard closed"); + onGoingRecoveries.cancelForShard(shardId, "shard closed"); } } public void startRecovery(final IndexShard indexShard, final DiscoveryNode sourceNode, final RecoveryListener listener) { // create a new recovery status, and process... - final long recoveryId = onGoingRecoveries.startRecovery(indexShard, sourceNode, listener, recoverySettings.activityTimeout()); + final long recoveryId = onGoingRecoveries.start( + new RecoveryTarget(indexShard, sourceNode, listener), + recoverySettings.activityTimeout() + ); // we fork off quickly here and go async but this is called from the cluster state applier thread too and that can cause // assertions to trip if we executed it on the same thread hence we fork off to the generic threadpool. threadPool.generic().execute(new RecoveryRunner(recoveryId)); @@ -208,9 +212,9 @@ protected void retryRecovery(final long recoveryId, final String reason, TimeVal } private void retryRecovery(final long recoveryId, final TimeValue retryAfter, final TimeValue activityTimeout) { - RecoveryTarget newTarget = onGoingRecoveries.resetRecovery(recoveryId, activityTimeout); + RecoveryTarget newTarget = onGoingRecoveries.reset(recoveryId, activityTimeout); if (newTarget != null) { - threadPool.scheduleUnlessShuttingDown(retryAfter, ThreadPool.Names.GENERIC, new RecoveryRunner(newTarget.recoveryId())); + threadPool.scheduleUnlessShuttingDown(retryAfter, ThreadPool.Names.GENERIC, new RecoveryRunner(newTarget.getId())); } } @@ -225,7 +229,7 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi final TransportRequest requestToSend; final StartRecoveryRequest startRequest; final ReplicationTimer timer; - try (RecoveryRef recoveryRef = onGoingRecoveries.getRecovery(recoveryId)) { + try (ReplicationRef recoveryRef = onGoingRecoveries.get(recoveryId)) { if (recoveryRef == null) { logger.trace("not running recovery with id [{}] - can not find it (probably finished)", recoveryId); return; @@ -248,7 +252,7 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi } catch (final Exception e) { // this will be logged as warning later on... logger.trace("unexpected error while preparing shard for peer recovery, failing recovery", e); - onGoingRecoveries.failRecovery( + onGoingRecoveries.fail( recoveryId, new RecoveryFailedException(recoveryTarget.state(), "failed to prepare shard for recovery", e), true @@ -339,28 +343,17 @@ public static StartRecoveryRequest getStartRecoveryRequest( localNode, metadataSnapshot, recoveryTarget.state().getPrimary(), - recoveryTarget.recoveryId(), + recoveryTarget.getId(), startingSeqNo ); return request; } - /** - * The recovery listener - * - * @opensearch.internal - */ - public interface RecoveryListener { - void onRecoveryDone(RecoveryState state); - - void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure); - } - class PrepareForTranslogOperationsRequestHandler implements TransportRequestHandler { @Override public void messageReceived(RecoveryPrepareForTranslogOperationsRequest request, TransportChannel channel, Task task) { - try (RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId())) { + try (ReplicationRef recoveryRef = onGoingRecoveries.getSafe(request.recoveryId(), request.shardId())) { final ActionListener listener = createOrFinishListener(recoveryRef, channel, Actions.PREPARE_TRANSLOG, request); if (listener == null) { return; @@ -375,7 +368,7 @@ class FinalizeRecoveryRequestHandler implements TransportRequestHandler recoveryRef = onGoingRecoveries.getSafe(request.recoveryId(), request.shardId())) { final ActionListener listener = createOrFinishListener(recoveryRef, channel, Actions.FINALIZE, request); if (listener == null) { return; @@ -391,7 +384,7 @@ class HandoffPrimaryContextRequestHandler implements TransportRequestHandler recoveryRef = onGoingRecoveries.getSafe(request.recoveryId(), request.shardId())) { recoveryRef.get().handoffPrimaryContext(request.primaryContext()); } channel.sendResponse(TransportResponse.Empty.INSTANCE); @@ -404,7 +397,7 @@ class TranslogOperationsRequestHandler implements TransportRequestHandler recoveryRef = onGoingRecoveries.getSafe(request.recoveryId(), request.shardId())) { final RecoveryTarget recoveryTarget = recoveryRef.get(); final ActionListener listener = createOrFinishListener( recoveryRef, @@ -424,7 +417,7 @@ public void messageReceived(final RecoveryTranslogOperationsRequest request, fin private void performTranslogOps( final RecoveryTranslogOperationsRequest request, final ActionListener listener, - final RecoveryRef recoveryRef + final ReplicationRef recoveryRef ) { final RecoveryTarget recoveryTarget = recoveryRef.get(); @@ -439,7 +432,12 @@ private void performTranslogOps( @Override public void onNewClusterState(ClusterState state) { threadPool.generic().execute(ActionRunnable.wrap(listener, l -> { - try (RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId())) { + try ( + ReplicationRef recoveryRef = onGoingRecoveries.getSafe( + request.recoveryId(), + request.shardId() + ) + ) { performTranslogOps(request, listener, recoveryRef); } })); @@ -485,7 +483,7 @@ class FilesInfoRequestHandler implements TransportRequestHandler recoveryRef = onGoingRecoveries.getSafe(request.recoveryId(), request.shardId())) { final ActionListener listener = createOrFinishListener(recoveryRef, channel, Actions.FILES_INFO, request); if (listener == null) { return; @@ -508,7 +506,7 @@ class CleanFilesRequestHandler implements TransportRequestHandler recoveryRef = onGoingRecoveries.getSafe(request.recoveryId(), request.shardId())) { final ActionListener listener = createOrFinishListener(recoveryRef, channel, Actions.CLEAN_FILES, request); if (listener == null) { return; @@ -527,7 +525,7 @@ class FileChunkTransportRequestHandler implements TransportRequestHandler recoveryRef = onGoingRecoveries.getSafe(request.recoveryId(), request.shardId())) { final RecoveryTarget recoveryTarget = recoveryRef.get(); final ActionListener listener = createOrFinishListener(recoveryRef, channel, Actions.FILE_CHUNK, request); if (listener == null) { @@ -563,7 +561,7 @@ public void messageReceived(final RecoveryFileChunkRequest request, TransportCha } private ActionListener createOrFinishListener( - final RecoveryRef recoveryRef, + final ReplicationRef recoveryRef, final TransportChannel channel, final String action, final RecoveryTransportRequest request @@ -572,7 +570,7 @@ private ActionListener createOrFinishListener( } private ActionListener createOrFinishListener( - final RecoveryRef recoveryRef, + final ReplicationRef recoveryRef, final TransportChannel channel, final String action, final RecoveryTransportRequest request, @@ -609,10 +607,10 @@ class RecoveryRunner extends AbstractRunnable { @Override public void onFailure(Exception e) { - try (RecoveryRef recoveryRef = onGoingRecoveries.getRecovery(recoveryId)) { + try (ReplicationRef recoveryRef = onGoingRecoveries.get(recoveryId)) { if (recoveryRef != null) { logger.error(() -> new ParameterizedMessage("unexpected error during recovery [{}], failing shard", recoveryId), e); - onGoingRecoveries.failRecovery( + onGoingRecoveries.fail( recoveryId, new RecoveryFailedException(recoveryRef.get().state(), "unexpected error", e), true // be safe @@ -648,7 +646,7 @@ private RecoveryResponseHandler(final StartRecoveryRequest request, final Replic public void handleResponse(RecoveryResponse recoveryResponse) { final TimeValue recoveryTime = new TimeValue(timer.time()); // do this through ongoing recoveries to remove it from the collection - onGoingRecoveries.markRecoveryAsDone(recoveryId); + onGoingRecoveries.markAsDone(recoveryId); if (logger.isTraceEnabled()) { StringBuilder sb = new StringBuilder(); sb.append('[') @@ -709,11 +707,7 @@ private void onException(Exception e) { Throwable cause = ExceptionsHelper.unwrapCause(e); if (cause instanceof CancellableThreads.ExecutionCancelledException) { // this can also come from the source wrapped in a RemoteTransportException - onGoingRecoveries.failRecovery( - recoveryId, - new RecoveryFailedException(request, "source has canceled the recovery", cause), - false - ); + onGoingRecoveries.fail(recoveryId, new RecoveryFailedException(request, "source has canceled the recovery", cause), false); return; } if (cause instanceof RecoveryEngineException) { @@ -766,11 +760,11 @@ private void onException(Exception e) { } if (cause instanceof AlreadyClosedException) { - onGoingRecoveries.failRecovery(recoveryId, new RecoveryFailedException(request, "source shard is closed", cause), false); + onGoingRecoveries.fail(recoveryId, new RecoveryFailedException(request, "source shard is closed", cause), false); return; } - onGoingRecoveries.failRecovery(recoveryId, new RecoveryFailedException(request, e), true); + onGoingRecoveries.fail(recoveryId, new RecoveryFailedException(request, e), true); } @Override diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveriesCollection.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveriesCollection.java deleted file mode 100644 index 38b72dd0f7dee..0000000000000 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveriesCollection.java +++ /dev/null @@ -1,332 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.indices.recovery; - -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.OpenSearchTimeoutException; -import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.common.concurrent.AutoCloseableRefCounted; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.concurrent.AbstractRunnable; -import org.opensearch.common.util.concurrent.ConcurrentCollections; -import org.opensearch.index.shard.IndexShard; -import org.opensearch.index.shard.IndexShardClosedException; -import org.opensearch.index.shard.ShardId; -import org.opensearch.threadpool.ThreadPool; - -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.concurrent.ConcurrentMap; - -/** - * This class holds a collection of all on going recoveries on the current node (i.e., the node is the target node - * of those recoveries). The class is used to guarantee concurrent semantics such that once a recoveries was done/cancelled/failed - * no other thread will be able to find it. Last, the {@link RecoveryRef} inner class verifies that recovery temporary files - * and store will only be cleared once on going usage is finished. - * - * @opensearch.internal - */ -public class RecoveriesCollection { - - /** This is the single source of truth for ongoing recoveries. If it's not here, it was canceled or done */ - private final ConcurrentMap onGoingRecoveries = ConcurrentCollections.newConcurrentMap(); - - private final Logger logger; - private final ThreadPool threadPool; - - public RecoveriesCollection(Logger logger, ThreadPool threadPool) { - this.logger = logger; - this.threadPool = threadPool; - } - - /** - * Starts are new recovery for the given shard, source node and state - * - * @return the id of the new recovery. - */ - public long startRecovery( - IndexShard indexShard, - DiscoveryNode sourceNode, - PeerRecoveryTargetService.RecoveryListener listener, - TimeValue activityTimeout - ) { - RecoveryTarget recoveryTarget = new RecoveryTarget(indexShard, sourceNode, listener); - startRecoveryInternal(recoveryTarget, activityTimeout); - return recoveryTarget.recoveryId(); - } - - private void startRecoveryInternal(RecoveryTarget recoveryTarget, TimeValue activityTimeout) { - RecoveryTarget existingTarget = onGoingRecoveries.putIfAbsent(recoveryTarget.recoveryId(), recoveryTarget); - assert existingTarget == null : "found two RecoveryStatus instances with the same id"; - logger.trace( - "{} started recovery from {}, id [{}]", - recoveryTarget.shardId(), - recoveryTarget.sourceNode(), - recoveryTarget.recoveryId() - ); - threadPool.schedule( - new RecoveryMonitor(recoveryTarget.recoveryId(), recoveryTarget.lastAccessTime(), activityTimeout), - activityTimeout, - ThreadPool.Names.GENERIC - ); - } - - /** - * Resets the recovery and performs a recovery restart on the currently recovering index shard - * - * @see IndexShard#performRecoveryRestart() - * @return newly created RecoveryTarget - */ - public RecoveryTarget resetRecovery(final long recoveryId, final TimeValue activityTimeout) { - RecoveryTarget oldRecoveryTarget = null; - final RecoveryTarget newRecoveryTarget; - - try { - synchronized (onGoingRecoveries) { - // swap recovery targets in a synchronized block to ensure that the newly added recovery target is picked up by - // cancelRecoveriesForShard whenever the old recovery target is picked up - oldRecoveryTarget = onGoingRecoveries.remove(recoveryId); - if (oldRecoveryTarget == null) { - return null; - } - - newRecoveryTarget = oldRecoveryTarget.retryCopy(); - startRecoveryInternal(newRecoveryTarget, activityTimeout); - } - - // Closes the current recovery target - boolean successfulReset = oldRecoveryTarget.resetRecovery(newRecoveryTarget.cancellableThreads()); - if (successfulReset) { - logger.trace( - "{} restarted recovery from {}, id [{}], previous id [{}]", - newRecoveryTarget.shardId(), - newRecoveryTarget.sourceNode(), - newRecoveryTarget.recoveryId(), - oldRecoveryTarget.recoveryId() - ); - return newRecoveryTarget; - } else { - logger.trace( - "{} recovery could not be reset as it is already cancelled, recovery from {}, id [{}], previous id [{}]", - newRecoveryTarget.shardId(), - newRecoveryTarget.sourceNode(), - newRecoveryTarget.recoveryId(), - oldRecoveryTarget.recoveryId() - ); - cancelRecovery(newRecoveryTarget.recoveryId(), "recovery cancelled during reset"); - return null; - } - } catch (Exception e) { - // fail shard to be safe - oldRecoveryTarget.notifyListener(new RecoveryFailedException(oldRecoveryTarget.state(), "failed to retry recovery", e), true); - return null; - } - } - - public RecoveryTarget getRecoveryTarget(long id) { - return onGoingRecoveries.get(id); - } - - /** - * gets the {@link RecoveryTarget } for a given id. The RecoveryStatus returned has it's ref count already incremented - * to make sure it's safe to use. However, you must call {@link RecoveryTarget#decRef()} when you are done with it, typically - * by using this method in a try-with-resources clause. - *

- * Returns null if recovery is not found - */ - public RecoveryRef getRecovery(long id) { - RecoveryTarget status = onGoingRecoveries.get(id); - if (status != null && status.tryIncRef()) { - return new RecoveryRef(status); - } - return null; - } - - /** Similar to {@link #getRecovery(long)} but throws an exception if no recovery is found */ - public RecoveryRef getRecoverySafe(long id, ShardId shardId) { - RecoveryRef recoveryRef = getRecovery(id); - if (recoveryRef == null) { - throw new IndexShardClosedException(shardId); - } - assert recoveryRef.get().shardId().equals(shardId); - return recoveryRef; - } - - /** cancel the recovery with the given id (if found) and remove it from the recovery collection */ - public boolean cancelRecovery(long id, String reason) { - RecoveryTarget removed = onGoingRecoveries.remove(id); - boolean cancelled = false; - if (removed != null) { - logger.trace( - "{} canceled recovery from {}, id [{}] (reason [{}])", - removed.shardId(), - removed.sourceNode(), - removed.recoveryId(), - reason - ); - removed.cancel(reason); - cancelled = true; - } - return cancelled; - } - - /** - * fail the recovery with the given id (if found) and remove it from the recovery collection - * - * @param id id of the recovery to fail - * @param e exception with reason for the failure - * @param sendShardFailure true a shard failed message should be sent to the master - */ - public void failRecovery(long id, RecoveryFailedException e, boolean sendShardFailure) { - RecoveryTarget removed = onGoingRecoveries.remove(id); - if (removed != null) { - logger.trace( - "{} failing recovery from {}, id [{}]. Send shard failure: [{}]", - removed.shardId(), - removed.sourceNode(), - removed.recoveryId(), - sendShardFailure - ); - removed.fail(e, sendShardFailure); - } - } - - /** mark the recovery with the given id as done (if found) */ - public void markRecoveryAsDone(long id) { - RecoveryTarget removed = onGoingRecoveries.remove(id); - if (removed != null) { - logger.trace("{} marking recovery from {} as done, id [{}]", removed.shardId(), removed.sourceNode(), removed.recoveryId()); - removed.markAsDone(); - } - } - - /** the number of ongoing recoveries */ - public int size() { - return onGoingRecoveries.size(); - } - - /** - * cancel all ongoing recoveries for the given shard - * - * @param reason reason for cancellation - * @param shardId shardId for which to cancel recoveries - * @return true if a recovery was cancelled - */ - public boolean cancelRecoveriesForShard(ShardId shardId, String reason) { - boolean cancelled = false; - List matchedRecoveries = new ArrayList<>(); - synchronized (onGoingRecoveries) { - for (Iterator it = onGoingRecoveries.values().iterator(); it.hasNext();) { - RecoveryTarget status = it.next(); - if (status.shardId().equals(shardId)) { - matchedRecoveries.add(status); - it.remove(); - } - } - } - for (RecoveryTarget removed : matchedRecoveries) { - logger.trace( - "{} canceled recovery from {}, id [{}] (reason [{}])", - removed.shardId(), - removed.sourceNode(), - removed.recoveryId(), - reason - ); - removed.cancel(reason); - cancelled = true; - } - return cancelled; - } - - /** - * a reference to {@link RecoveryTarget}, which implements {@link AutoCloseable}. closing the reference - * causes {@link RecoveryTarget#decRef()} to be called. This makes sure that the underlying resources - * will not be freed until {@link RecoveryRef#close()} is called. - * - * @opensearch.internal - */ - public static class RecoveryRef extends AutoCloseableRefCounted { - - /** - * Important: {@link RecoveryTarget#tryIncRef()} should - * be *successfully* called on status before - */ - public RecoveryRef(RecoveryTarget status) { - super(status); - status.setLastAccessTime(); - } - } - - private class RecoveryMonitor extends AbstractRunnable { - private final long recoveryId; - private final TimeValue checkInterval; - - private volatile long lastSeenAccessTime; - - private RecoveryMonitor(long recoveryId, long lastSeenAccessTime, TimeValue checkInterval) { - this.recoveryId = recoveryId; - this.checkInterval = checkInterval; - this.lastSeenAccessTime = lastSeenAccessTime; - } - - @Override - public void onFailure(Exception e) { - logger.error(() -> new ParameterizedMessage("unexpected error while monitoring recovery [{}]", recoveryId), e); - } - - @Override - protected void doRun() throws Exception { - RecoveryTarget status = onGoingRecoveries.get(recoveryId); - if (status == null) { - logger.trace("[monitor] no status found for [{}], shutting down", recoveryId); - return; - } - long accessTime = status.lastAccessTime(); - if (accessTime == lastSeenAccessTime) { - String message = "no activity after [" + checkInterval + "]"; - failRecovery( - recoveryId, - new RecoveryFailedException(status.state(), message, new OpenSearchTimeoutException(message)), - true // to be safe, we don't know what go stuck - ); - return; - } - lastSeenAccessTime = accessTime; - logger.trace("[monitor] rescheduling check for [{}]. last access time is [{}]", recoveryId, lastSeenAccessTime); - threadPool.schedule(this, checkInterval, ThreadPool.Names.GENERIC); - } - } - -} diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryListener.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryListener.java new file mode 100644 index 0000000000000..b93c054ffa4bf --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryListener.java @@ -0,0 +1,55 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.recovery; + +import org.opensearch.OpenSearchException; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.indices.cluster.IndicesClusterStateService; +import org.opensearch.indices.replication.common.ReplicationListener; +import org.opensearch.indices.replication.common.ReplicationState; + +/** + * Listener that runs on changes in Recovery state + * + * @opensearch.internal + */ +public class RecoveryListener implements ReplicationListener { + + /** + * ShardRouting with which the shard was created + */ + private final ShardRouting shardRouting; + + /** + * Primary term with which the shard was created + */ + private final long primaryTerm; + + private final IndicesClusterStateService indicesClusterStateService; + + public RecoveryListener( + final ShardRouting shardRouting, + final long primaryTerm, + IndicesClusterStateService indicesClusterStateService + ) { + this.shardRouting = shardRouting; + this.primaryTerm = primaryTerm; + this.indicesClusterStateService = indicesClusterStateService; + } + + @Override + public void onDone(ReplicationState state) { + indicesClusterStateService.handleRecoveryDone(state, shardRouting, primaryTerm); + } + + @Override + public void onFailure(ReplicationState state, OpenSearchException e, boolean sendShardFailure) { + indicesClusterStateService.handleRecoveryFailure(shardRouting, sendShardFailure, e); + } +} diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java index 35ac5cbc12bde..a3c7adb755145 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java @@ -45,6 +45,7 @@ import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ShardId; +import org.opensearch.indices.replication.common.ReplicationState; import org.opensearch.indices.replication.common.ReplicationLuceneIndex; import org.opensearch.indices.replication.common.ReplicationTimer; @@ -56,7 +57,7 @@ * * @opensearch.internal */ -public class RecoveryState implements ToXContentFragment, Writeable { +public class RecoveryState implements ReplicationState, ToXContentFragment, Writeable { /** * The stage of the recovery state diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java index bb557cc6837ab..92897ab19ad64 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java @@ -32,22 +32,18 @@ package org.opensearch.indices.recovery; -import org.apache.logging.log4j.Logger; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexFormatTooNewException; import org.apache.lucene.index.IndexFormatTooOldException; import org.opensearch.Assertions; -import org.opensearch.OpenSearchException; import org.opensearch.ExceptionsHelper; import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.UUIDs; import org.opensearch.common.bytes.BytesReference; -import org.opensearch.common.logging.Loggers; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.util.CancellableThreads; -import org.opensearch.common.util.concurrent.AbstractRefCounted; import org.opensearch.index.engine.Engine; import org.opensearch.index.mapper.MapperException; import org.opensearch.index.seqno.ReplicationTracker; @@ -56,48 +52,33 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardNotRecoveringException; import org.opensearch.index.shard.IndexShardState; -import org.opensearch.index.shard.ShardId; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.index.translog.Translog; import org.opensearch.indices.replication.common.ReplicationLuceneIndex; +import org.opensearch.indices.replication.common.ReplicationTarget; +import org.opensearch.indices.replication.common.ReplicationListener; +import org.opensearch.indices.replication.common.ReplicationCollection; import java.io.IOException; import java.nio.file.Path; import java.util.List; import java.util.concurrent.CountDownLatch; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicLong; /** * Represents a recovery where the current node is the target node of the recovery. To track recoveries in a central place, instances of - * this class are created through {@link RecoveriesCollection}. + * this class are created through {@link ReplicationCollection}. * * @opensearch.internal */ -public class RecoveryTarget extends AbstractRefCounted implements RecoveryTargetHandler { - - private final Logger logger; - - private static final AtomicLong idGenerator = new AtomicLong(); +public class RecoveryTarget extends ReplicationTarget implements RecoveryTargetHandler { private static final String RECOVERY_PREFIX = "recovery."; - private final ShardId shardId; - private final long recoveryId; - private final IndexShard indexShard; private final DiscoveryNode sourceNode; - private final MultiFileWriter multiFileWriter; - private final RecoveryRequestTracker requestTracker = new RecoveryRequestTracker(); - private final Store store; - private final PeerRecoveryTargetService.RecoveryListener listener; - - private final AtomicBoolean finished = new AtomicBoolean(); - private final CancellableThreads cancellableThreads; - - // last time this status was accessed - private volatile long lastAccessTime = System.nanoTime(); + protected final MultiFileWriter multiFileWriter; + protected final Store store; // latch that can be used to blockingly wait for RecoveryTarget to be closed private final CountDownLatch closedLatch = new CountDownLatch(1); @@ -109,27 +90,15 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget * @param sourceNode source node of the recovery where we recover from * @param listener called when recovery is completed/failed */ - public RecoveryTarget(IndexShard indexShard, DiscoveryNode sourceNode, PeerRecoveryTargetService.RecoveryListener listener) { - super("recovery_status"); + public RecoveryTarget(IndexShard indexShard, DiscoveryNode sourceNode, ReplicationListener listener) { + super("recovery_status", indexShard, indexShard.recoveryState().getIndex(), listener); this.cancellableThreads = new CancellableThreads(); - this.recoveryId = idGenerator.incrementAndGet(); - this.listener = listener; - this.logger = Loggers.getLogger(getClass(), indexShard.shardId()); - this.indexShard = indexShard; this.sourceNode = sourceNode; - this.shardId = indexShard.shardId(); - final String tempFilePrefix = RECOVERY_PREFIX + UUIDs.randomBase64UUID() + "."; - this.multiFileWriter = new MultiFileWriter( - indexShard.store(), - indexShard.recoveryState().getIndex(), - tempFilePrefix, - logger, - this::ensureRefCount - ); + indexShard.recoveryStats().incCurrentAsTarget(); this.store = indexShard.store(); - // make sure the store is not released until we are done. + final String tempFilePrefix = getPrefix() + UUIDs.randomBase64UUID() + "."; + this.multiFileWriter = new MultiFileWriter(indexShard.store(), stateIndex, tempFilePrefix, logger, this::ensureRefCount); store.incRef(); - indexShard.recoveryStats().incCurrentAsTarget(); } /** @@ -141,23 +110,15 @@ public RecoveryTarget retryCopy() { return new RecoveryTarget(indexShard, sourceNode, listener); } - public ActionListener markRequestReceivedAndCreateListener(long requestSeqNo, ActionListener listener) { - return requestTracker.markReceivedAndCreateListener(requestSeqNo, listener); - } - - public long recoveryId() { - return recoveryId; - } - - public ShardId shardId() { - return shardId; - } - public IndexShard indexShard() { ensureRefCount(); return indexShard; } + public String source() { + return sourceNode.toString(); + } + public DiscoveryNode sourceNode() { return this.sourceNode; } @@ -170,29 +131,29 @@ public CancellableThreads cancellableThreads() { return cancellableThreads; } - /** return the last time this RecoveryStatus was used (based on System.nanoTime() */ - public long lastAccessTime() { - return lastAccessTime; + public Store store() { + ensureRefCount(); + return store; } - /** sets the lasAccessTime flag to now */ - public void setLastAccessTime() { - lastAccessTime = System.nanoTime(); + public String description() { + return "recovery from " + source(); } - public Store store() { - ensureRefCount(); - return store; + @Override + public void notifyListener(Exception e, boolean sendShardFailure) { + listener.onFailure(state(), new RecoveryFailedException(state(), e.getMessage(), e), sendShardFailure); } /** * Closes the current recovery target and waits up to a certain timeout for resources to be freed. * Returns true if resetting the recovery was successful, false if the recovery target is already cancelled / failed or marked as done. */ - boolean resetRecovery(CancellableThreads newTargetCancellableThreads) throws IOException { + public boolean reset(CancellableThreads newTargetCancellableThreads) throws IOException { + final long recoveryId = getId(); if (finished.compareAndSet(false, true)) { try { - logger.debug("reset of recovery with shard {} and id [{}]", shardId, recoveryId); + logger.debug("reset of recovery with shard {} and id [{}]", shardId(), recoveryId); } finally { // release the initial reference. recovery files will be cleaned as soon as ref count goes to zero, potentially now. decRef(); @@ -202,7 +163,7 @@ boolean resetRecovery(CancellableThreads newTargetCancellableThreads) throws IOE } catch (CancellableThreads.ExecutionCancelledException e) { logger.trace( "new recovery target cancelled for shard {} while waiting on old recovery target with id [{}] to close", - shardId, + shardId(), recoveryId ); return false; @@ -248,22 +209,7 @@ public void cancel(String reason) { * @param sendShardFailure indicates whether to notify the cluster-manager of the shard failure */ public void fail(RecoveryFailedException e, boolean sendShardFailure) { - if (finished.compareAndSet(false, true)) { - try { - notifyListener(e, sendShardFailure); - } finally { - try { - cancellableThreads.cancel("failed recovery [" + ExceptionsHelper.stackTrace(e) + "]"); - } finally { - // release the initial reference. recovery files will be cleaned as soon as ref count goes to zero, potentially now - decRef(); - } - } - } - } - - public void notifyListener(RecoveryFailedException e, boolean sendShardFailure) { - listener.onRecoveryFailure(state(), e, sendShardFailure); + super.fail(e, sendShardFailure); } /** mark the current recovery as done */ @@ -278,7 +224,7 @@ public void markAsDone() { // release the initial reference. recovery files will be cleaned as soon as ref count goes to zero, potentially now decRef(); } - listener.onRecoveryDone(state()); + listener.onDone(state()); } } @@ -287,7 +233,6 @@ protected void closeInternal() { try { multiFileWriter.close(); } finally { - // free store. increment happens in constructor store.decRef(); indexShard.recoveryStats().decCurrentAsTarget(); closedLatch.countDown(); @@ -296,15 +241,28 @@ protected void closeInternal() { @Override public String toString() { - return shardId + " [" + recoveryId + "]"; + return shardId() + " [" + getId() + "]"; } - private void ensureRefCount() { - if (refCount() <= 0) { - throw new OpenSearchException( - "RecoveryStatus is used but it's refcount is 0. Probably a mismatch between incRef/decRef " + "calls" - ); - } + @Override + protected String getPrefix() { + return RECOVERY_PREFIX; + } + + @Override + protected void onDone() { + assert multiFileWriter.tempFileNames.isEmpty() : "not all temporary files are renamed"; + // this might still throw an exception ie. if the shard is CLOSED due to some other event. + // it's safer to decrement the reference in a try finally here. + indexShard.postRecovery("peer recovery done"); + } + + /** + * if {@link #cancellableThreads()} was used, the threads will be interrupted. + */ + @Override + protected void onCancel(String reason) { + cancellableThreads.cancel(reason); } /*** Implementation of {@link RecoveryTargetHandler } */ @@ -374,7 +332,7 @@ public void indexTranslogOperations( translog.totalOperations(totalTranslogOps); assert indexShard().recoveryState() == state(); if (indexShard().state() != IndexShardState.RECOVERING) { - throw new IndexShardNotRecoveringException(shardId, indexShard().state()); + throw new IndexShardNotRecoveringException(shardId(), indexShard().state()); } /* * The maxSeenAutoIdTimestampOnPrimary received from the primary is at least the highest auto_id_timestamp from any operation @@ -460,7 +418,7 @@ public void cleanFiles( final String translogUUID = Translog.createEmptyTranslog( indexShard.shardPath().resolveTranslog(), globalCheckpoint, - shardId, + shardId(), indexShard.getPendingPrimaryTerm() ); store.associateIndexWithNewTranslog(translogUUID); diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationCollection.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationCollection.java new file mode 100644 index 0000000000000..609825eb5227b --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationCollection.java @@ -0,0 +1,297 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.indices.replication.common; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.OpenSearchException; +import org.opensearch.OpenSearchTimeoutException; +import org.opensearch.common.concurrent.AutoCloseableRefCounted; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.IndexShardClosedException; +import org.opensearch.index.shard.ShardId; +import org.opensearch.threadpool.ThreadPool; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.ConcurrentMap; + +/** + * This class holds a collection of all on going replication events on the current node (i.e., the node is the target node + * of those events). The class is used to guarantee concurrent semantics such that once an event was done/cancelled/failed + * no other thread will be able to find it. Last, the {@link ReplicationRef} inner class verifies that temporary files + * and store will only be cleared once on going usage is finished. + * + * @opensearch.internal + */ +public class ReplicationCollection { + + /** This is the single source of truth for ongoing target events. If it's not here, it was canceled or done */ + private final ConcurrentMap onGoingTargetEvents = ConcurrentCollections.newConcurrentMap(); + + private final Logger logger; + private final ThreadPool threadPool; + + public ReplicationCollection(Logger logger, ThreadPool threadPool) { + this.logger = logger; + this.threadPool = threadPool; + } + + /** + * Starts a new target event for the given shard, source node and state + * + * @return the id of the new target event. + */ + public long start(T target, TimeValue activityTimeout) { + startInternal(target, activityTimeout); + return target.getId(); + } + + private void startInternal(T target, TimeValue activityTimeout) { + T existingTarget = onGoingTargetEvents.putIfAbsent(target.getId(), target); + assert existingTarget == null : "found two Target instances with the same id"; + logger.trace("started {}", target.description()); + threadPool.schedule( + new ReplicationMonitor(target.getId(), target.lastAccessTime(), activityTimeout), + activityTimeout, + ThreadPool.Names.GENERIC + ); + } + + /** + * Resets the target event and performs a restart on the current index shard + * + * @see IndexShard#performRecoveryRestart() + * @return newly created Target + */ + @SuppressWarnings(value = "unchecked") + public T reset(final long id, final TimeValue activityTimeout) { + T oldTarget = null; + final T newTarget; + + try { + synchronized (onGoingTargetEvents) { + // swap targets in a synchronized block to ensure that the newly added target is picked up by + // cancelForShard whenever the old target is picked up + oldTarget = onGoingTargetEvents.remove(id); + if (oldTarget == null) { + return null; + } + + newTarget = (T) oldTarget.retryCopy(); + startInternal(newTarget, activityTimeout); + } + + // Closes the current target + boolean successfulReset = oldTarget.reset(newTarget.cancellableThreads()); + if (successfulReset) { + logger.trace("restarted {}, previous id [{}]", newTarget.description(), oldTarget.getId()); + return newTarget; + } else { + logger.trace( + "{} could not be reset as it is already cancelled, previous id [{}]", + newTarget.description(), + oldTarget.getId() + ); + cancel(newTarget.getId(), "cancelled during reset"); + return null; + } + } catch (Exception e) { + // fail shard to be safe + assert oldTarget != null; + oldTarget.notifyListener(e, true); + return null; + } + } + + public T getTarget(long id) { + return onGoingTargetEvents.get(id); + } + + /** + * gets the {@link ReplicationTarget } for a given id. The ShardTarget returned has it's ref count already incremented + * to make sure it's safe to use. However, you must call {@link ReplicationTarget#decRef()} when you are done with it, typically + * by using this method in a try-with-resources clause. + *

+ * Returns null if target event is not found + */ + public ReplicationRef get(long id) { + T status = onGoingTargetEvents.get(id); + if (status != null && status.tryIncRef()) { + return new ReplicationRef(status); + } + return null; + } + + /** Similar to {@link #get(long)} but throws an exception if no target is found */ + public ReplicationRef getSafe(long id, ShardId shardId) { + ReplicationRef ref = get(id); + if (ref == null) { + throw new IndexShardClosedException(shardId); + } + assert ref.get().indexShard().shardId().equals(shardId); + return ref; + } + + /** cancel the target with the given id (if found) and remove it from the target collection */ + public boolean cancel(long id, String reason) { + T removed = onGoingTargetEvents.remove(id); + boolean cancelled = false; + if (removed != null) { + logger.trace("canceled {} (reason [{}])", removed.description(), reason); + removed.cancel(reason); + cancelled = true; + } + return cancelled; + } + + /** + * fail the target with the given id (if found) and remove it from the target collection + * + * @param id id of the target to fail + * @param e exception with reason for the failure + * @param sendShardFailure true a shard failed message should be sent to the master + */ + public void fail(long id, OpenSearchException e, boolean sendShardFailure) { + T removed = onGoingTargetEvents.remove(id); + if (removed != null) { + logger.trace("failing {}. Send shard failure: [{}]", removed.description(), sendShardFailure); + removed.fail(e, sendShardFailure); + } + } + + /** mark the target with the given id as done (if found) */ + public void markAsDone(long id) { + T removed = onGoingTargetEvents.remove(id); + if (removed != null) { + logger.trace("Marking {} as done", removed.description()); + removed.markAsDone(); + } + } + + /** the number of ongoing target events */ + public int size() { + return onGoingTargetEvents.size(); + } + + /** + * cancel all ongoing targets for the given shard + * + * @param reason reason for cancellation + * @param shardId shardId for which to cancel targets + * @return true if a target was cancelled + */ + public boolean cancelForShard(ShardId shardId, String reason) { + boolean cancelled = false; + List matchedTargets = new ArrayList<>(); + synchronized (onGoingTargetEvents) { + for (Iterator it = onGoingTargetEvents.values().iterator(); it.hasNext();) { + T status = it.next(); + if (status.indexShard().shardId().equals(shardId)) { + matchedTargets.add(status); + it.remove(); + } + } + } + for (T removed : matchedTargets) { + logger.trace("canceled {} (reason [{}])", removed.description(), reason); + removed.cancel(reason); + cancelled = true; + } + return cancelled; + } + + /** + * a reference to {@link ReplicationTarget}, which implements {@link AutoCloseable}. closing the reference + * causes {@link ReplicationTarget#decRef()} to be called. This makes sure that the underlying resources + * will not be freed until {@link ReplicationRef#close()} is called. + * + * @opensearch.internal + */ + public static class ReplicationRef extends AutoCloseableRefCounted { + + /** + * Important: {@link ReplicationTarget#tryIncRef()} should + * be *successfully* called on status before + */ + public ReplicationRef(T status) { + super(status); + status.setLastAccessTime(); + } + } + + private class ReplicationMonitor extends AbstractRunnable { + private final long id; + private final TimeValue checkInterval; + + private volatile long lastSeenAccessTime; + + private ReplicationMonitor(long id, long lastSeenAccessTime, TimeValue checkInterval) { + this.id = id; + this.checkInterval = checkInterval; + this.lastSeenAccessTime = lastSeenAccessTime; + } + + @Override + public void onFailure(Exception e) { + logger.error(() -> new ParameterizedMessage("unexpected error while monitoring [{}]", id), e); + } + + @Override + protected void doRun() throws Exception { + T status = onGoingTargetEvents.get(id); + if (status == null) { + logger.trace("[monitor] no status found for [{}], shutting down", id); + return; + } + long accessTime = status.lastAccessTime(); + if (accessTime == lastSeenAccessTime) { + String message = "no activity after [" + checkInterval + "]"; + fail( + id, + new OpenSearchTimeoutException(message), + true // to be safe, we don't know what go stuck + ); + return; + } + lastSeenAccessTime = accessTime; + logger.trace("[monitor] rescheduling check for [{}]. last access time is [{}]", id, lastSeenAccessTime); + threadPool.schedule(this, checkInterval, ThreadPool.Names.GENERIC); + } + } + +} diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationListener.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationListener.java new file mode 100644 index 0000000000000..0666f475d496a --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationListener.java @@ -0,0 +1,23 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication.common; + +import org.opensearch.OpenSearchException; + +/** + * Interface for listeners that run when there's a change in {@link ReplicationState} + * + * @opensearch.internal + */ +public interface ReplicationListener { + + void onDone(ReplicationState state); + + void onFailure(ReplicationState state, OpenSearchException e, boolean sendShardFailure); +} diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryRequestTracker.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationRequestTracker.java similarity index 96% rename from server/src/main/java/org/opensearch/indices/recovery/RecoveryRequestTracker.java rename to server/src/main/java/org/opensearch/indices/replication/common/ReplicationRequestTracker.java index 71a7f2776f324..0b0d20fc9f17e 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryRequestTracker.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationRequestTracker.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.indices.recovery; +package org.opensearch.indices.replication.common; import org.opensearch.action.ActionListener; import org.opensearch.common.Nullable; @@ -45,11 +45,11 @@ import static org.opensearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; /** - * Tracks recovery requests + * Tracks replication requests * * @opensearch.internal */ -public class RecoveryRequestTracker { +public class ReplicationRequestTracker { private final Map> ongoingRequests = Collections.synchronizedMap(new HashMap<>()); private final LocalCheckpointTracker checkpointTracker = new LocalCheckpointTracker(NO_OPS_PERFORMED, NO_OPS_PERFORMED); diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationState.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationState.java new file mode 100644 index 0000000000000..7942fa8938dd0 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationState.java @@ -0,0 +1,18 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication.common; + +/** + * Represents a state object used to track copying of segments from an external source + * + * @opensearch.internal + */ +public interface ReplicationState { + +} diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java new file mode 100644 index 0000000000000..0192270907fd2 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java @@ -0,0 +1,175 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication.common; + +import org.apache.logging.log4j.Logger; +import org.opensearch.ExceptionsHelper; +import org.opensearch.OpenSearchException; +import org.opensearch.action.ActionListener; +import org.opensearch.common.logging.Loggers; +import org.opensearch.common.util.CancellableThreads; +import org.opensearch.common.util.concurrent.AbstractRefCounted; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.ShardId; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; + +/** + * Represents the target of a replication operation performed on a shard + * + * @opensearch.internal + */ +public abstract class ReplicationTarget extends AbstractRefCounted { + + private static final AtomicLong ID_GENERATOR = new AtomicLong(); + + // last time the target/status was accessed + private volatile long lastAccessTime = System.nanoTime(); + private final ReplicationRequestTracker requestTracker = new ReplicationRequestTracker(); + private final long id; + + protected final AtomicBoolean finished = new AtomicBoolean(); + private final ShardId shardId; + protected final IndexShard indexShard; + protected final ReplicationListener listener; + protected final Logger logger; + protected final CancellableThreads cancellableThreads; + protected final ReplicationLuceneIndex stateIndex; + + protected abstract String getPrefix(); + + protected abstract void onDone(); + + protected abstract void onCancel(String reason); + + public abstract ReplicationState state(); + + public abstract ReplicationTarget retryCopy(); + + public abstract String description(); + + public ReplicationListener getListener() { + return listener; + } + + public CancellableThreads cancellableThreads() { + return cancellableThreads; + } + + public abstract void notifyListener(Exception e, boolean sendShardFailure); + + public ReplicationTarget(String name, IndexShard indexShard, ReplicationLuceneIndex stateIndex, ReplicationListener listener) { + super(name); + this.logger = Loggers.getLogger(getClass(), indexShard.shardId()); + this.listener = listener; + this.id = ID_GENERATOR.incrementAndGet(); + this.stateIndex = stateIndex; + this.indexShard = indexShard; + this.shardId = indexShard.shardId(); + // make sure the store is not released until we are done. + this.cancellableThreads = new CancellableThreads(); + } + + public long getId() { + return id; + } + + public abstract boolean reset(CancellableThreads newTargetCancellableThreads) throws IOException; + + /** + * return the last time this ReplicationStatus was used (based on System.nanoTime() + */ + public long lastAccessTime() { + return lastAccessTime; + } + + /** + * sets the lasAccessTime flag to now + */ + public void setLastAccessTime() { + lastAccessTime = System.nanoTime(); + } + + public ActionListener markRequestReceivedAndCreateListener(long requestSeqNo, ActionListener listener) { + return requestTracker.markReceivedAndCreateListener(requestSeqNo, listener); + } + + public IndexShard indexShard() { + ensureRefCount(); + return indexShard; + } + + public ShardId shardId() { + return shardId; + } + + /** + * mark the current replication as done + */ + public void markAsDone() { + if (finished.compareAndSet(false, true)) { + try { + onDone(); + } finally { + // release the initial reference. replication files will be cleaned as soon as ref count goes to zero, potentially now + decRef(); + } + listener.onDone(state()); + } + } + + /** + * cancel the replication. calling this method will clean temporary files and release the store + * unless this object is in use (in which case it will be cleaned once all ongoing users call + * {@link #decRef()} + */ + public void cancel(String reason) { + if (finished.compareAndSet(false, true)) { + try { + logger.debug("replication cancelled (reason: [{}])", reason); + onCancel(reason); + } finally { + // release the initial reference. replication files will be cleaned as soon as ref count goes to zero, potentially now + decRef(); + } + } + } + + /** + * fail the replication and call listener + * + * @param e exception that encapsulates the failure + * @param sendShardFailure indicates whether to notify the master of the shard failure + */ + public void fail(OpenSearchException e, boolean sendShardFailure) { + if (finished.compareAndSet(false, true)) { + try { + notifyListener(e, sendShardFailure); + } finally { + try { + cancellableThreads.cancel("failed" + description() + "[" + ExceptionsHelper.stackTrace(e) + "]"); + } finally { + // release the initial reference. replication files will be cleaned as soon as ref count goes to zero, potentially now + decRef(); + } + } + } + } + + protected void ensureRefCount() { + if (refCount() <= 0) { + throw new OpenSearchException( + "ReplicationTarget is used but it's refcount is 0. Probably a mismatch between incRef/decRef calls" + ); + } + } + +} diff --git a/server/src/test/java/org/opensearch/index/replication/RecoveryDuringReplicationTests.java b/server/src/test/java/org/opensearch/index/replication/RecoveryDuringReplicationTests.java index add2ecd34e3af..509d1f52daa0d 100644 --- a/server/src/test/java/org/opensearch/index/replication/RecoveryDuringReplicationTests.java +++ b/server/src/test/java/org/opensearch/index/replication/RecoveryDuringReplicationTests.java @@ -69,9 +69,9 @@ import org.opensearch.index.shard.PrimaryReplicaSyncer; import org.opensearch.index.store.Store; import org.opensearch.index.translog.Translog; -import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.recovery.RecoveryTarget; +import org.opensearch.indices.replication.common.ReplicationListener; import java.io.IOException; import java.util.ArrayList; @@ -809,7 +809,7 @@ public BlockingTarget( CountDownLatch releaseRecovery, IndexShard shard, DiscoveryNode sourceNode, - PeerRecoveryTargetService.RecoveryListener listener, + ReplicationListener listener, Logger logger ) { super(shard, sourceNode, listener); diff --git a/server/src/test/java/org/opensearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java b/server/src/test/java/org/opensearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java index 9d83071c177f5..97cb1dc341b13 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java +++ b/server/src/test/java/org/opensearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java @@ -32,6 +32,7 @@ package org.opensearch.indices.cluster; +import org.junit.Before; import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; @@ -56,10 +57,10 @@ import org.opensearch.indices.cluster.IndicesClusterStateService.AllocatedIndices; import org.opensearch.indices.cluster.IndicesClusterStateService.Shard; import org.opensearch.indices.recovery.PeerRecoveryTargetService; +import org.opensearch.indices.recovery.RecoveryListener; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.repositories.RepositoriesService; import org.opensearch.test.OpenSearchTestCase; -import org.junit.Before; import java.io.IOException; import java.util.HashMap; @@ -73,9 +74,9 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; -import static org.opensearch.common.collect.MapBuilder.newMapBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.opensearch.common.collect.MapBuilder.newMapBuilder; /** * Abstract base class for tests against {@link IndicesClusterStateService} @@ -253,7 +254,7 @@ public MockIndexService indexService(Index index) { public MockIndexShard createShard( final ShardRouting shardRouting, final PeerRecoveryTargetService recoveryTargetService, - final PeerRecoveryTargetService.RecoveryListener recoveryListener, + final RecoveryListener recoveryListener, final RepositoriesService repositoriesService, final Consumer onShardFailure, final Consumer globalCheckpointSyncer, diff --git a/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java index 5e09e0f2253df..5224a54a35e96 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java @@ -41,6 +41,7 @@ import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.store.AlreadyClosedException; import org.opensearch.ExceptionsHelper; +import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.bulk.BulkShardRequest; @@ -68,6 +69,8 @@ import org.opensearch.index.store.Store; import org.opensearch.index.translog.SnapshotMatchers; import org.opensearch.index.translog.Translog; +import org.opensearch.indices.replication.common.ReplicationListener; +import org.opensearch.indices.replication.common.ReplicationState; import java.io.IOException; import java.util.HashMap; @@ -448,20 +451,17 @@ public long addDocument(Iterable doc) throws IOExcepti IndexShard replica = group.addReplica(); expectThrows( Exception.class, - () -> group.recoverReplica( - replica, - (shard, sourceNode) -> new RecoveryTarget(shard, sourceNode, new PeerRecoveryTargetService.RecoveryListener() { - @Override - public void onRecoveryDone(RecoveryState state) { - throw new AssertionError("recovery must fail"); - } + () -> group.recoverReplica(replica, (shard, sourceNode) -> new RecoveryTarget(shard, sourceNode, new ReplicationListener() { + @Override + public void onDone(ReplicationState state) { + throw new AssertionError("recovery must fail"); + } - @Override - public void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure) { - assertThat(ExceptionsHelper.unwrap(e, IOException.class).getMessage(), equalTo("simulated")); - } - }) - ) + @Override + public void onFailure(ReplicationState state, OpenSearchException e, boolean sendShardFailure) { + assertThat(ExceptionsHelper.unwrap(e, IOException.class).getMessage(), equalTo("simulated")); + } + })) ); expectThrows(AlreadyClosedException.class, () -> replica.refresh("test")); group.removeReplica(replica); diff --git a/server/src/test/java/org/opensearch/indices/recovery/RecoveryRequestTrackerTests.java b/server/src/test/java/org/opensearch/indices/recovery/ReplicationRequestTrackerTests.java similarity index 95% rename from server/src/test/java/org/opensearch/indices/recovery/RecoveryRequestTrackerTests.java rename to server/src/test/java/org/opensearch/indices/recovery/ReplicationRequestTrackerTests.java index 931d36f587db8..afad385deabe4 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/RecoveryRequestTrackerTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/ReplicationRequestTrackerTests.java @@ -36,6 +36,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.indices.replication.common.ReplicationRequestTracker; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; @@ -44,7 +45,7 @@ import java.util.Set; import java.util.concurrent.ConcurrentMap; -public class RecoveryRequestTrackerTests extends OpenSearchTestCase { +public class ReplicationRequestTrackerTests extends OpenSearchTestCase { private TestThreadPool threadPool; @@ -64,7 +65,7 @@ public void testIdempotencyIsEnforced() { Set seqNosReturned = ConcurrentCollections.newConcurrentSet(); ConcurrentMap>> seqToResult = ConcurrentCollections.newConcurrentMap(); - RecoveryRequestTracker requestTracker = new RecoveryRequestTracker(); + ReplicationRequestTracker requestTracker = new ReplicationRequestTracker(); int numberOfRequests = randomIntBetween(100, 200); for (int j = 0; j < numberOfRequests; ++j) { diff --git a/server/src/test/java/org/opensearch/recovery/RecoveriesCollectionTests.java b/server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java similarity index 65% rename from server/src/test/java/org/opensearch/recovery/RecoveriesCollectionTests.java rename to server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java index 6a08f5115d1e2..7587f48503625 100644 --- a/server/src/test/java/org/opensearch/recovery/RecoveriesCollectionTests.java +++ b/server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java @@ -38,10 +38,10 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ShardId; import org.opensearch.index.store.Store; -import org.opensearch.indices.recovery.RecoveriesCollection; -import org.opensearch.indices.recovery.RecoveryFailedException; +import org.opensearch.indices.replication.common.ReplicationCollection; +import org.opensearch.indices.replication.common.ReplicationListener; +import org.opensearch.indices.replication.common.ReplicationState; import org.opensearch.indices.recovery.RecoveryState; -import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoveryTarget; import java.util.concurrent.CountDownLatch; @@ -51,64 +51,58 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThan; -public class RecoveriesCollectionTests extends OpenSearchIndexLevelReplicationTestCase { - static final PeerRecoveryTargetService.RecoveryListener listener = new PeerRecoveryTargetService.RecoveryListener() { +public class ReplicationCollectionTests extends OpenSearchIndexLevelReplicationTestCase { + static final ReplicationListener listener = new ReplicationListener() { @Override - public void onRecoveryDone(RecoveryState state) { + public void onDone(ReplicationState state) { } @Override - public void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure) { + public void onFailure(ReplicationState state, OpenSearchException e, boolean sendShardFailure) { } }; public void testLastAccessTimeUpdate() throws Exception { try (ReplicationGroup shards = createGroup(0)) { - final RecoveriesCollection collection = new RecoveriesCollection(logger, threadPool); + final ReplicationCollection collection = new ReplicationCollection<>(logger, threadPool); final long recoveryId = startRecovery(collection, shards.getPrimaryNode(), shards.addReplica()); - try (RecoveriesCollection.RecoveryRef status = collection.getRecovery(recoveryId)) { + try (ReplicationCollection.ReplicationRef status = collection.get(recoveryId)) { final long lastSeenTime = status.get().lastAccessTime(); assertBusy(() -> { - try (RecoveriesCollection.RecoveryRef currentStatus = collection.getRecovery(recoveryId)) { + try (ReplicationCollection.ReplicationRef currentStatus = collection.get(recoveryId)) { assertThat("access time failed to update", lastSeenTime, lessThan(currentStatus.get().lastAccessTime())); } }); } finally { - collection.cancelRecovery(recoveryId, "life"); + collection.cancel(recoveryId, "life"); } } } public void testRecoveryTimeout() throws Exception { try (ReplicationGroup shards = createGroup(0)) { - final RecoveriesCollection collection = new RecoveriesCollection(logger, threadPool); + final ReplicationCollection collection = new ReplicationCollection<>(logger, threadPool); final AtomicBoolean failed = new AtomicBoolean(); final CountDownLatch latch = new CountDownLatch(1); - final long recoveryId = startRecovery( - collection, - shards.getPrimaryNode(), - shards.addReplica(), - new PeerRecoveryTargetService.RecoveryListener() { - @Override - public void onRecoveryDone(RecoveryState state) { - latch.countDown(); - } - - @Override - public void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure) { - failed.set(true); - latch.countDown(); - } - }, - TimeValue.timeValueMillis(100) - ); + final long recoveryId = startRecovery(collection, shards.getPrimaryNode(), shards.addReplica(), new ReplicationListener() { + @Override + public void onDone(ReplicationState state) { + latch.countDown(); + } + + @Override + public void onFailure(ReplicationState state, OpenSearchException e, boolean sendShardFailure) { + failed.set(true); + latch.countDown(); + } + }, TimeValue.timeValueMillis(100)); try { latch.await(30, TimeUnit.SECONDS); assertTrue("recovery failed to timeout", failed.get()); } finally { - collection.cancelRecovery(recoveryId, "meh"); + collection.cancel(recoveryId, "meh"); } } @@ -116,16 +110,16 @@ public void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, bo public void testRecoveryCancellation() throws Exception { try (ReplicationGroup shards = createGroup(0)) { - final RecoveriesCollection collection = new RecoveriesCollection(logger, threadPool); + final ReplicationCollection collection = new ReplicationCollection<>(logger, threadPool); final long recoveryId = startRecovery(collection, shards.getPrimaryNode(), shards.addReplica()); final long recoveryId2 = startRecovery(collection, shards.getPrimaryNode(), shards.addReplica()); - try (RecoveriesCollection.RecoveryRef recoveryRef = collection.getRecovery(recoveryId)) { - ShardId shardId = recoveryRef.get().shardId(); - assertTrue("failed to cancel recoveries", collection.cancelRecoveriesForShard(shardId, "test")); + try (ReplicationCollection.ReplicationRef recoveryRef = collection.get(recoveryId)) { + ShardId shardId = recoveryRef.get().indexShard().shardId(); + assertTrue("failed to cancel recoveries", collection.cancelForShard(shardId, "test")); assertThat("all recoveries should be cancelled", collection.size(), equalTo(0)); } finally { - collection.cancelRecovery(recoveryId, "meh"); - collection.cancelRecovery(recoveryId2, "meh"); + collection.cancel(recoveryId, "meh"); + collection.cancel(recoveryId2, "meh"); } } } @@ -135,17 +129,17 @@ public void testResetRecovery() throws Exception { shards.startAll(); int numDocs = randomIntBetween(1, 15); shards.indexDocs(numDocs); - final RecoveriesCollection collection = new RecoveriesCollection(logger, threadPool); + final ReplicationCollection collection = new ReplicationCollection<>(logger, threadPool); IndexShard shard = shards.addReplica(); final long recoveryId = startRecovery(collection, shards.getPrimaryNode(), shard); - RecoveryTarget recoveryTarget = collection.getRecoveryTarget(recoveryId); + RecoveryTarget recoveryTarget = collection.getTarget(recoveryId); final int currentAsTarget = shard.recoveryStats().currentAsTarget(); final int referencesToStore = recoveryTarget.store().refCount(); IndexShard indexShard = recoveryTarget.indexShard(); Store store = recoveryTarget.store(); String tempFileName = recoveryTarget.getTempNameForFile("foobar"); - RecoveryTarget resetRecovery = collection.resetRecovery(recoveryId, TimeValue.timeValueMinutes(60)); - final long resetRecoveryId = resetRecovery.recoveryId(); + RecoveryTarget resetRecovery = collection.reset(recoveryId, TimeValue.timeValueMinutes(60)); + final long resetRecoveryId = resetRecovery.getId(); assertNotSame(recoveryTarget, resetRecovery); assertNotSame(recoveryTarget.cancellableThreads(), resetRecovery.cancellableThreads()); assertSame(indexShard, resetRecovery.indexShard()); @@ -158,31 +152,31 @@ public void testResetRecovery() throws Exception { String resetTempFileName = resetRecovery.getTempNameForFile("foobar"); assertNotEquals(tempFileName, resetTempFileName); assertEquals(currentAsTarget, shard.recoveryStats().currentAsTarget()); - try (RecoveriesCollection.RecoveryRef newRecoveryRef = collection.getRecovery(resetRecoveryId)) { + try (ReplicationCollection.ReplicationRef newRecoveryRef = collection.get(resetRecoveryId)) { shards.recoverReplica(shard, (s, n) -> { assertSame(s, newRecoveryRef.get().indexShard()); return newRecoveryRef.get(); }, false); } shards.assertAllEqual(numDocs); - assertNull("recovery is done", collection.getRecovery(recoveryId)); + assertNull("recovery is done", collection.get(recoveryId)); } } - long startRecovery(RecoveriesCollection collection, DiscoveryNode sourceNode, IndexShard shard) { + long startRecovery(ReplicationCollection collection, DiscoveryNode sourceNode, IndexShard shard) { return startRecovery(collection, sourceNode, shard, listener, TimeValue.timeValueMinutes(60)); } long startRecovery( - RecoveriesCollection collection, + ReplicationCollection collection, DiscoveryNode sourceNode, IndexShard indexShard, - PeerRecoveryTargetService.RecoveryListener listener, + ReplicationListener listener, TimeValue timeValue ) { final DiscoveryNode rNode = getDiscoveryNode(indexShard.routingEntry().currentNodeId()); indexShard.markAsRecovering("remote", new RecoveryState(indexShard.routingEntry(), sourceNode, rNode)); indexShard.prepareForIndexRecovery(); - return collection.startRecovery(indexShard, sourceNode, listener, timeValue); + return collection.start(new RecoveryTarget(indexShard, sourceNode, listener), timeValue); } } diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 509edfd1b9103..298fdcaea6465 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -34,6 +34,7 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.store.Directory; +import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.index.IndexRequest; @@ -93,6 +94,8 @@ import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.recovery.RecoveryTarget; import org.opensearch.indices.recovery.StartRecoveryRequest; +import org.opensearch.indices.replication.common.ReplicationListener; +import org.opensearch.indices.replication.common.ReplicationState; import org.opensearch.repositories.IndexId; import org.opensearch.repositories.Repository; import org.opensearch.repositories.blobstore.OpenSearchBlobStoreRepositoryIntegTestCase; @@ -138,14 +141,14 @@ public abstract class IndexShardTestCase extends OpenSearchTestCase { } }; - protected static final PeerRecoveryTargetService.RecoveryListener recoveryListener = new PeerRecoveryTargetService.RecoveryListener() { + protected static final ReplicationListener recoveryListener = new ReplicationListener() { @Override - public void onRecoveryDone(RecoveryState state) { + public void onDone(ReplicationState state) { } @Override - public void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure) { + public void onFailure(ReplicationState state, OpenSearchException e, boolean sendShardFailure) { throw new AssertionError(e); } }; From f0d2033c775cdc574854ca5da0e307fcdc262bad Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Mon, 23 May 2022 14:01:22 -0700 Subject: [PATCH 24/75] [Type removal] Remove type from BulkRequestParser (#3423) * [Type removal] Remove type handling in bulk request parser Signed-off-by: Suraj Singh * [Type removal] Remove testTypesStillParsedForBulkMonitoring as it is no longer present in codebase Signed-off-by: Suraj Singh --- .../opensearch/action/bulk/BulkRequest.java | 4 +- .../action/bulk/BulkRequestParser.java | 34 ++------ .../action/bulk/BulkRequestParserTests.java | 83 +++++-------------- 3 files changed, 30 insertions(+), 91 deletions(-) diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java b/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java index 25b335eae0bf1..3af4227bf46ca 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java @@ -287,7 +287,7 @@ public BulkRequest add( String routing = valueOrDefault(defaultRouting, globalRouting); String pipeline = valueOrDefault(defaultPipeline, globalPipeline); Boolean requireAlias = valueOrDefault(defaultRequireAlias, globalRequireAlias); - new BulkRequestParser(true).parse( + new BulkRequestParser().parse( data, defaultIndex, routing, @@ -296,7 +296,7 @@ public BulkRequest add( requireAlias, allowExplicitIndex, xContentType, - (indexRequest, type) -> internalAdd(indexRequest), + this::internalAdd, this::internalAdd, this::add ); diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkRequestParser.java b/server/src/main/java/org/opensearch/action/bulk/BulkRequestParser.java index 675905cc60e75..212450515b57e 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkRequestParser.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkRequestParser.java @@ -53,7 +53,6 @@ import java.io.IOException; import java.util.HashMap; import java.util.Map; -import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.Function; @@ -67,7 +66,6 @@ public final class BulkRequestParser { private static final ParseField INDEX = new ParseField("_index"); - private static final ParseField TYPE = new ParseField("_type"); private static final ParseField ID = new ParseField("_id"); private static final ParseField ROUTING = new ParseField("routing"); private static final ParseField OP_TYPE = new ParseField("op_type"); @@ -80,17 +78,6 @@ public final class BulkRequestParser { private static final ParseField IF_PRIMARY_TERM = new ParseField("if_primary_term"); private static final ParseField REQUIRE_ALIAS = new ParseField(DocWriteRequest.REQUIRE_ALIAS); - // TODO: Remove this parameter once the BulkMonitoring endpoint has been removed - private final boolean errorOnType; - - /** - * Create a new parser. - * @param errorOnType whether to allow _type information in the index line; used by BulkMonitoring - */ - public BulkRequestParser(boolean errorOnType) { - this.errorOnType = errorOnType; - } - private static int findNextMarker(byte marker, int from, BytesReference data) { final int res = data.indexOf(marker, from); if (res != -1) { @@ -136,7 +123,7 @@ public void parse( @Nullable Boolean defaultRequireAlias, boolean allowExplicitIndex, XContentType xContentType, - BiConsumer indexRequestConsumer, + Consumer indexRequestConsumer, Consumer updateRequestConsumer, Consumer deleteRequestConsumer ) throws IOException { @@ -192,7 +179,6 @@ public void parse( String action = parser.currentName(); String index = defaultIndex; - String type = null; String id = null; String routing = defaultRouting; FetchSourceContext fetchSourceContext = defaultFetchSourceContext; @@ -205,7 +191,7 @@ public void parse( String pipeline = defaultPipeline; boolean requireAlias = defaultRequireAlias != null && defaultRequireAlias; - // at this stage, next token can either be END_OBJECT (and use default index and type, with auto generated id) + // at this stage, next token can either be END_OBJECT (and use default index with auto generated id) // or START_OBJECT which will have another set of parameters token = parser.nextToken(); @@ -220,13 +206,6 @@ public void parse( throw new IllegalArgumentException("explicit index in bulk is not allowed"); } index = stringDeduplicator.computeIfAbsent(parser.text(), Function.identity()); - } else if (TYPE.match(currentFieldName, parser.getDeprecationHandler())) { - if (errorOnType) { - throw new IllegalArgumentException( - "Action/metadata line [" + line + "] contains an unknown parameter [" + currentFieldName + "]" - ); - } - type = stringDeduplicator.computeIfAbsent(parser.text(), Function.identity()); } else if (ID.match(currentFieldName, parser.getDeprecationHandler())) { id = parser.text(); } else if (ROUTING.match(currentFieldName, parser.getDeprecationHandler())) { @@ -322,8 +301,7 @@ public void parse( .setIfSeqNo(ifSeqNo) .setIfPrimaryTerm(ifPrimaryTerm) .source(sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType), xContentType) - .setRequireAlias(requireAlias), - type + .setRequireAlias(requireAlias) ); } else { indexRequestConsumer.accept( @@ -336,8 +314,7 @@ public void parse( .setIfSeqNo(ifSeqNo) .setIfPrimaryTerm(ifPrimaryTerm) .source(sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType), xContentType) - .setRequireAlias(requireAlias), - type + .setRequireAlias(requireAlias) ); } } else if ("create".equals(action)) { @@ -351,8 +328,7 @@ public void parse( .setIfSeqNo(ifSeqNo) .setIfPrimaryTerm(ifPrimaryTerm) .source(sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType), xContentType) - .setRequireAlias(requireAlias), - type + .setRequireAlias(requireAlias) ); } else if ("update".equals(action)) { if (version != Versions.MATCH_ANY || versionType != VersionType.INTERNAL) { diff --git a/server/src/test/java/org/opensearch/action/bulk/BulkRequestParserTests.java b/server/src/test/java/org/opensearch/action/bulk/BulkRequestParserTests.java index 239bb19c5f6ad..d3da77112408b 100644 --- a/server/src/test/java/org/opensearch/action/bulk/BulkRequestParserTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/BulkRequestParserTests.java @@ -47,9 +47,9 @@ public class BulkRequestParserTests extends OpenSearchTestCase { public void testIndexRequest() throws IOException { BytesArray request = new BytesArray("{ \"index\":{ \"_id\": \"bar\" } }\n{}\n"); - BulkRequestParser parser = new BulkRequestParser(randomBoolean()); + BulkRequestParser parser = new BulkRequestParser(); final AtomicBoolean parsed = new AtomicBoolean(); - parser.parse(request, "foo", null, null, null, null, false, XContentType.JSON, (indexRequest, type) -> { + parser.parse(request, "foo", null, null, null, null, false, XContentType.JSON, indexRequest -> { assertFalse(parsed.get()); assertEquals("foo", indexRequest.index()); assertEquals("bar", indexRequest.id()); @@ -67,7 +67,7 @@ public void testIndexRequest() throws IOException { true, false, XContentType.JSON, - (indexRequest, type) -> { assertTrue(indexRequest.isRequireAlias()); }, + indexRequest -> { assertTrue(indexRequest.isRequireAlias()); }, req -> fail(), req -> fail() ); @@ -82,7 +82,7 @@ public void testIndexRequest() throws IOException { null, false, XContentType.JSON, - (indexRequest, type) -> { assertTrue(indexRequest.isRequireAlias()); }, + indexRequest -> { assertTrue(indexRequest.isRequireAlias()); }, req -> fail(), req -> fail() ); @@ -97,7 +97,7 @@ public void testIndexRequest() throws IOException { true, false, XContentType.JSON, - (indexRequest, type) -> { assertFalse(indexRequest.isRequireAlias()); }, + indexRequest -> { assertFalse(indexRequest.isRequireAlias()); }, req -> fail(), req -> fail() ); @@ -105,34 +105,22 @@ public void testIndexRequest() throws IOException { public void testDeleteRequest() throws IOException { BytesArray request = new BytesArray("{ \"delete\":{ \"_id\": \"bar\" } }\n"); - BulkRequestParser parser = new BulkRequestParser(randomBoolean()); + BulkRequestParser parser = new BulkRequestParser(); final AtomicBoolean parsed = new AtomicBoolean(); - parser.parse( - request, - "foo", - null, - null, - null, - null, - false, - XContentType.JSON, - (req, type) -> fail(), - req -> fail(), - deleteRequest -> { - assertFalse(parsed.get()); - assertEquals("foo", deleteRequest.index()); - assertEquals("bar", deleteRequest.id()); - parsed.set(true); - } - ); + parser.parse(request, "foo", null, null, null, null, false, XContentType.JSON, req -> fail(), req -> fail(), deleteRequest -> { + assertFalse(parsed.get()); + assertEquals("foo", deleteRequest.index()); + assertEquals("bar", deleteRequest.id()); + parsed.set(true); + }); assertTrue(parsed.get()); } public void testUpdateRequest() throws IOException { BytesArray request = new BytesArray("{ \"update\":{ \"_id\": \"bar\" } }\n{}\n"); - BulkRequestParser parser = new BulkRequestParser(randomBoolean()); + BulkRequestParser parser = new BulkRequestParser(); final AtomicBoolean parsed = new AtomicBoolean(); - parser.parse(request, "foo", null, null, null, null, false, XContentType.JSON, (req, type) -> fail(), updateRequest -> { + parser.parse(request, "foo", null, null, null, null, false, XContentType.JSON, req -> fail(), updateRequest -> { assertFalse(parsed.get()); assertEquals("foo", updateRequest.index()); assertEquals("bar", updateRequest.id()); @@ -150,7 +138,7 @@ public void testUpdateRequest() throws IOException { true, false, XContentType.JSON, - (req, type) -> fail(), + req -> fail(), updateRequest -> { assertTrue(updateRequest.isRequireAlias()); }, req -> fail() ); @@ -165,7 +153,7 @@ public void testUpdateRequest() throws IOException { null, false, XContentType.JSON, - (req, type) -> fail(), + req -> fail(), updateRequest -> { assertTrue(updateRequest.isRequireAlias()); }, req -> fail() ); @@ -180,7 +168,7 @@ public void testUpdateRequest() throws IOException { true, false, XContentType.JSON, - (req, type) -> fail(), + req -> fail(), updateRequest -> { assertFalse(updateRequest.isRequireAlias()); }, req -> fail() ); @@ -188,7 +176,7 @@ public void testUpdateRequest() throws IOException { public void testBarfOnLackOfTrailingNewline() { BytesArray request = new BytesArray("{ \"index\":{ \"_id\": \"bar\" } }\n{}"); - BulkRequestParser parser = new BulkRequestParser(randomBoolean()); + BulkRequestParser parser = new BulkRequestParser(); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> parser.parse( @@ -200,7 +188,7 @@ public void testBarfOnLackOfTrailingNewline() { null, false, XContentType.JSON, - (indexRequest, type) -> fail(), + indexRequest -> fail(), req -> fail(), req -> fail() ) @@ -210,46 +198,21 @@ public void testBarfOnLackOfTrailingNewline() { public void testFailOnExplicitIndex() { BytesArray request = new BytesArray("{ \"index\":{ \"_index\": \"foo\", \"_id\": \"bar\" } }\n{}\n"); - BulkRequestParser parser = new BulkRequestParser(randomBoolean()); + BulkRequestParser parser = new BulkRequestParser(); IllegalArgumentException ex = expectThrows( IllegalArgumentException.class, - () -> parser.parse( - request, - null, - null, - null, - null, - null, - false, - XContentType.JSON, - (req, type) -> fail(), - req -> fail(), - req -> fail() - ) + () -> parser.parse(request, null, null, null, null, null, false, XContentType.JSON, req -> fail(), req -> fail(), req -> fail()) ); assertEquals("explicit index in bulk is not allowed", ex.getMessage()); } - public void testTypesStillParsedForBulkMonitoring() throws IOException { - BytesArray request = new BytesArray("{ \"index\":{ \"_type\": \"quux\", \"_id\": \"bar\" } }\n{}\n"); - BulkRequestParser parser = new BulkRequestParser(false); - final AtomicBoolean parsed = new AtomicBoolean(); - parser.parse(request, "foo", null, null, null, null, false, XContentType.JSON, (indexRequest, type) -> { - assertFalse(parsed.get()); - assertEquals("foo", indexRequest.index()); - assertEquals("bar", indexRequest.id()); - parsed.set(true); - }, req -> fail(), req -> fail()); - assertTrue(parsed.get()); - } - public void testParseDeduplicatesParameterStrings() throws IOException { BytesArray request = new BytesArray( "{ \"index\":{ \"_index\": \"bar\", \"pipeline\": \"foo\", \"routing\": \"blub\"} }\n{}\n" + "{ \"index\":{ \"_index\": \"bar\", \"pipeline\": \"foo\", \"routing\": \"blub\" } }\n{}\n" ); - BulkRequestParser parser = new BulkRequestParser(randomBoolean()); + BulkRequestParser parser = new BulkRequestParser(); final List indexRequests = new ArrayList<>(); parser.parse( request, @@ -260,7 +223,7 @@ public void testParseDeduplicatesParameterStrings() throws IOException { null, true, XContentType.JSON, - (indexRequest, type) -> indexRequests.add(indexRequest), + indexRequest -> indexRequests.add(indexRequest), req -> fail(), req -> fail() ); From 87d354abf3a3763257872cd9f6921a115cbbeb4c Mon Sep 17 00:00:00 2001 From: Rishikesh Pasham <62345295+Rishikesh1159@users.noreply.github.com> Date: Tue, 24 May 2022 14:08:14 +0000 Subject: [PATCH 25/75] Adding CheckpointRefreshListener to trigger when Segment replication is turned on and Primary shard refreshes (#3108) * Intial PR adding classes and tests related to checkpoint publishing Signed-off-by: Rishikesh1159 * Putting a Draft PR with all changes in classes. Testing is still not included in this commit. Signed-off-by: Rishikesh1159 * Wiring up index shard to new engine, spotless apply and removing unnecessary tests and logs Signed-off-by: Rishikesh1159 * Adding Unit test for checkpointRefreshListener Signed-off-by: Rishikesh1159 * Applying spotless check Signed-off-by: Rishikesh1159 * Fixing import statements * Signed-off-by: Rishikesh1159 * removing unused constructor in index shard Signed-off-by: Rishikesh1159 * Addressing comments from last commit Signed-off-by: Rishikesh1159 * Adding package-info.java files for two new packages Signed-off-by: Rishikesh1159 * Adding test for null checkpoint publisher and addreesing PR comments Signed-off-by: Rishikesh1159 * Add docs for indexshardtests and remove shard.refresh Signed-off-by: Rishikesh1159 --- .../opensearch/index/shard/IndexShardIT.java | 4 +- .../org/opensearch/index/IndexService.java | 7 +- .../shard/CheckpointRefreshListener.java | 47 +++++ .../opensearch/index/shard/IndexShard.java | 36 +++- .../org/opensearch/indices/IndicesModule.java | 5 + .../opensearch/indices/IndicesService.java | 4 +- .../cluster/IndicesClusterStateService.java | 11 +- .../checkpoint/PublishCheckpointAction.java | 173 ++++++++++++++++++ .../checkpoint/PublishCheckpointRequest.java | 53 ++++++ .../checkpoint/ReplicationCheckpoint.java | 136 ++++++++++++++ ...SegmentReplicationCheckpointPublisher.java | 49 +++++ .../replication/checkpoint/package-info.java | 10 + .../index/shard/IndexShardTests.java | 69 +++++++ ...dicesLifecycleListenerSingleNodeTests.java | 8 +- ...actIndicesClusterStateServiceTestCase.java | 2 + ...ClusterStateServiceRandomUpdatesTests.java | 2 + .../PublishCheckpointActionTests.java | 157 ++++++++++++++++ .../snapshots/SnapshotResiliencyTests.java | 4 +- .../index/shard/IndexShardTestCase.java | 47 ++++- 19 files changed, 814 insertions(+), 10 deletions(-) create mode 100644 server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointRequest.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/checkpoint/SegmentReplicationCheckpointPublisher.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/checkpoint/package-info.java create mode 100644 server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java diff --git a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java index 5f014e89e330e..888881d43eb11 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java @@ -84,6 +84,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.indices.recovery.RecoveryState; +import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.plugins.Plugin; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.test.DummyShardLock; @@ -673,7 +674,8 @@ public static final IndexShard newIndexShard( Arrays.asList(listeners), () -> {}, RetentionLeaseSyncer.EMPTY, - cbs + cbs, + SegmentReplicationCheckpointPublisher.EMPTY ); } diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index 0739e5afdffcd..0a6d1501f2bea 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -94,6 +94,7 @@ import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.indices.mapper.MapperRegistry; import org.opensearch.indices.recovery.RecoveryState; +import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.plugins.IndexStorePlugin; import org.opensearch.script.ScriptService; import org.opensearch.search.aggregations.support.ValuesSourceRegistry; @@ -428,7 +429,8 @@ private long getAvgShardSizeInBytes() throws IOException { public synchronized IndexShard createShard( final ShardRouting routing, final Consumer globalCheckpointSyncer, - final RetentionLeaseSyncer retentionLeaseSyncer + final RetentionLeaseSyncer retentionLeaseSyncer, + final SegmentReplicationCheckpointPublisher checkpointPublisher ) throws IOException { Objects.requireNonNull(retentionLeaseSyncer); /* @@ -530,7 +532,8 @@ public synchronized IndexShard createShard( indexingOperationListeners, () -> globalCheckpointSyncer.accept(shardId), retentionLeaseSyncer, - circuitBreakerService + circuitBreakerService, + this.indexSettings.isSegRepEnabled() && routing.primary() ? checkpointPublisher : null ); eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created"); eventListener.afterIndexShardCreated(indexShard); diff --git a/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java new file mode 100644 index 0000000000000..ac6754bf6a74a --- /dev/null +++ b/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.shard; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.search.ReferenceManager; +import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; + +import java.io.IOException; + +/** + * A {@link ReferenceManager.RefreshListener} that publishes a checkpoint to be consumed by replicas. + * This class is only used with Segment Replication enabled. + * + * @opensearch.internal + */ +public class CheckpointRefreshListener implements ReferenceManager.RefreshListener { + + protected static Logger logger = LogManager.getLogger(CheckpointRefreshListener.class); + + private final IndexShard shard; + private final SegmentReplicationCheckpointPublisher publisher; + + public CheckpointRefreshListener(IndexShard shard, SegmentReplicationCheckpointPublisher publisher) { + this.shard = shard; + this.publisher = publisher; + } + + @Override + public void beforeRefresh() throws IOException { + // Do nothing + } + + @Override + public void afterRefresh(boolean didRefresh) throws IOException { + if (didRefresh) { + publisher.publish(shard); + } + } +} diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 8002dfe688def..60a3305370c2a 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -160,6 +160,9 @@ import org.opensearch.indices.recovery.RecoveryListener; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.recovery.RecoveryTarget; +import org.opensearch.indices.replication.checkpoint.PublishCheckpointRequest; +import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; import org.opensearch.rest.RestStatus; @@ -299,6 +302,7 @@ Runnable getGlobalCheckpointSyncer() { private final AtomicReference pendingRefreshLocation = new AtomicReference<>(); private final RefreshPendingLocationListener refreshPendingLocationListener; private volatile boolean useRetentionLeasesInPeerRecovery; + private final ReferenceManager.RefreshListener checkpointRefreshListener; public IndexShard( final ShardRouting shardRouting, @@ -320,7 +324,8 @@ public IndexShard( final List listeners, final Runnable globalCheckpointSyncer, final RetentionLeaseSyncer retentionLeaseSyncer, - final CircuitBreakerService circuitBreakerService + final CircuitBreakerService circuitBreakerService, + @Nullable final SegmentReplicationCheckpointPublisher checkpointPublisher ) throws IOException { super(shardRouting.shardId(), indexSettings); assert shardRouting.initializing(); @@ -403,6 +408,11 @@ public boolean shouldCache(Query query) { persistMetadata(path, indexSettings, shardRouting, null, logger); this.useRetentionLeasesInPeerRecovery = replicationTracker.hasAllPeerRecoveryRetentionLeases(); this.refreshPendingLocationListener = new RefreshPendingLocationListener(); + if (checkpointPublisher != null) { + this.checkpointRefreshListener = new CheckpointRefreshListener(this, checkpointPublisher); + } else { + this.checkpointRefreshListener = null; + } } public ThreadPool getThreadPool() { @@ -1363,6 +1373,21 @@ public GatedCloseable acquireSafeIndexCommit() throws EngineExcepti } } + /** + * Returns the lastest Replication Checkpoint that shard received + */ + public ReplicationCheckpoint getLatestReplicationCheckpoint() { + return new ReplicationCheckpoint(shardId, 0, 0, 0, 0); + } + + /** + * Invoked when a new checkpoint is received from a primary shard. Starts the copy process. + */ + public synchronized void onNewCheckpoint(final PublishCheckpointRequest request) { + assert shardRouting.primary() == false; + // TODO + } + /** * gets a {@link Store.MetadataSnapshot} for the current directory. This method is safe to call in all lifecycle of the index shard, * without having to worry about the current state of the engine and concurrent flushes. @@ -3106,6 +3131,13 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) { } }; + final List internalRefreshListener; + if (this.checkpointRefreshListener != null) { + internalRefreshListener = Arrays.asList(new RefreshMetricUpdater(refreshMetric), checkpointRefreshListener); + } else { + internalRefreshListener = Collections.singletonList(new RefreshMetricUpdater(refreshMetric)); + } + return this.engineConfigFactory.newEngineConfig( shardId, threadPool, @@ -3122,7 +3154,7 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) { translogConfig, IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING.get(indexSettings.getSettings()), Arrays.asList(refreshListeners, refreshPendingLocationListener), - Collections.singletonList(new RefreshMetricUpdater(refreshMetric)), + internalRefreshListener, indexSort, circuitBreakerService, globalCheckpointSupplier, diff --git a/server/src/main/java/org/opensearch/indices/IndicesModule.java b/server/src/main/java/org/opensearch/indices/IndicesModule.java index f188c47e7a9de..0cb2ff958c787 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesModule.java +++ b/server/src/main/java/org/opensearch/indices/IndicesModule.java @@ -41,6 +41,7 @@ import org.opensearch.common.inject.AbstractModule; import org.opensearch.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.io.stream.NamedWriteableRegistry.Entry; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.NamedXContentRegistry; import org.opensearch.index.mapper.BinaryFieldMapper; import org.opensearch.index.mapper.BooleanFieldMapper; @@ -73,6 +74,7 @@ import org.opensearch.index.shard.PrimaryReplicaSyncer; import org.opensearch.indices.cluster.IndicesClusterStateService; import org.opensearch.indices.mapper.MapperRegistry; +import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.store.IndicesStore; import org.opensearch.indices.store.TransportNodesListShardStoreMetadata; import org.opensearch.plugins.MapperPlugin; @@ -278,6 +280,9 @@ protected void configure() { bind(RetentionLeaseSyncAction.class).asEagerSingleton(); bind(RetentionLeaseBackgroundSyncAction.class).asEagerSingleton(); bind(RetentionLeaseSyncer.class).asEagerSingleton(); + if (FeatureFlags.isEnabled(FeatureFlags.REPLICATION_TYPE)) { + bind(SegmentReplicationCheckpointPublisher.class).asEagerSingleton(); + } } /** diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 1c7e45323813c..5ce10069aaa89 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -138,6 +138,7 @@ import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoveryListener; import org.opensearch.indices.recovery.RecoveryState; +import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.node.Node; import org.opensearch.plugins.IndexStorePlugin; import org.opensearch.plugins.PluginsService; @@ -839,6 +840,7 @@ public synchronized void verifyIndexMetadata(IndexMetadata metadata, IndexMetada @Override public IndexShard createShard( final ShardRouting shardRouting, + final SegmentReplicationCheckpointPublisher checkpointPublisher, final PeerRecoveryTargetService recoveryTargetService, final RecoveryListener recoveryListener, final RepositoriesService repositoriesService, @@ -853,7 +855,7 @@ public IndexShard createShard( IndexService indexService = indexService(shardRouting.index()); assert indexService != null; RecoveryState recoveryState = indexService.createRecoveryState(shardRouting, targetNode, sourceNode); - IndexShard indexShard = indexService.createShard(shardRouting, globalCheckpointSyncer, retentionLeaseSyncer); + IndexShard indexShard = indexService.createShard(shardRouting, globalCheckpointSyncer, retentionLeaseSyncer, checkpointPublisher); indexShard.addShardFailureCallback(onShardFailure); indexShard.startRecovery(recoveryState, recoveryTargetService, recoveryListener, repositoriesService, mapping -> { assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS diff --git a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java index d1623df156593..7233b6893b03e 100644 --- a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java @@ -80,6 +80,7 @@ import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoveryListener; import org.opensearch.indices.recovery.RecoveryState; +import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.common.ReplicationState; import org.opensearch.repositories.RepositoriesService; import org.opensearch.search.SearchService; @@ -138,6 +139,8 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple private final Consumer globalCheckpointSyncer; private final RetentionLeaseSyncer retentionLeaseSyncer; + private final SegmentReplicationCheckpointPublisher checkpointPublisher; + @Inject public IndicesClusterStateService( final Settings settings, @@ -153,13 +156,15 @@ public IndicesClusterStateService( final SnapshotShardsService snapshotShardsService, final PrimaryReplicaSyncer primaryReplicaSyncer, final GlobalCheckpointSyncAction globalCheckpointSyncAction, - final RetentionLeaseSyncer retentionLeaseSyncer + final RetentionLeaseSyncer retentionLeaseSyncer, + final SegmentReplicationCheckpointPublisher checkpointPublisher ) { this( settings, indicesService, clusterService, threadPool, + checkpointPublisher, recoveryTargetService, shardStateAction, nodeMappingRefreshAction, @@ -179,6 +184,7 @@ public IndicesClusterStateService( final AllocatedIndices> indicesService, final ClusterService clusterService, final ThreadPool threadPool, + final SegmentReplicationCheckpointPublisher checkpointPublisher, final PeerRecoveryTargetService recoveryTargetService, final ShardStateAction shardStateAction, final NodeMappingRefreshAction nodeMappingRefreshAction, @@ -191,6 +197,7 @@ public IndicesClusterStateService( final RetentionLeaseSyncer retentionLeaseSyncer ) { this.settings = settings; + this.checkpointPublisher = checkpointPublisher; this.buildInIndexListener = Arrays.asList(peerRecoverySourceService, recoveryTargetService, searchService, snapshotShardsService); this.indicesService = indicesService; this.clusterService = clusterService; @@ -624,6 +631,7 @@ private void createShard(DiscoveryNodes nodes, RoutingTable routingTable, ShardR logger.debug("{} creating shard with primary term [{}]", shardRouting.shardId(), primaryTerm); indicesService.createShard( shardRouting, + checkpointPublisher, recoveryTargetService, new RecoveryListener(shardRouting, primaryTerm, this), repositoriesService, @@ -981,6 +989,7 @@ U createIndex(IndexMetadata indexMetadata, List builtInIndex */ T createShard( ShardRouting shardRouting, + SegmentReplicationCheckpointPublisher checkpointPublisher, PeerRecoveryTargetService recoveryTargetService, RecoveryListener recoveryListener, RepositoriesService repositoriesService, diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java new file mode 100644 index 0000000000000..b74a69971ebd5 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java @@ -0,0 +1,173 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication.checkpoint; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.store.AlreadyClosedException; +import org.opensearch.ExceptionsHelper; +import org.opensearch.action.ActionListener; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.replication.ReplicationResponse; +import org.opensearch.action.support.replication.ReplicationTask; +import org.opensearch.action.support.replication.TransportReplicationAction; +import org.opensearch.cluster.action.shard.ShardStateAction; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.index.IndexNotFoundException; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.IndexShardClosedException; +import org.opensearch.indices.IndicesService; +import org.opensearch.node.NodeClosedException; +import org.opensearch.tasks.Task; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportException; +import org.opensearch.transport.TransportResponseHandler; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.Objects; + +/** + * Replication action responsible for publishing checkpoint to a replica shard. + * + * @opensearch.internal + */ + +public class PublishCheckpointAction extends TransportReplicationAction< + PublishCheckpointRequest, + PublishCheckpointRequest, + ReplicationResponse> { + + public static final String ACTION_NAME = "indices:admin/publishCheckpoint"; + protected static Logger logger = LogManager.getLogger(PublishCheckpointAction.class); + + @Inject + public PublishCheckpointAction( + Settings settings, + TransportService transportService, + ClusterService clusterService, + IndicesService indicesService, + ThreadPool threadPool, + ShardStateAction shardStateAction, + ActionFilters actionFilters + ) { + super( + settings, + ACTION_NAME, + transportService, + clusterService, + indicesService, + threadPool, + shardStateAction, + actionFilters, + PublishCheckpointRequest::new, + PublishCheckpointRequest::new, + ThreadPool.Names.REFRESH + ); + } + + @Override + protected ReplicationResponse newResponseInstance(StreamInput in) throws IOException { + return new ReplicationResponse(in); + } + + @Override + protected void doExecute(Task task, PublishCheckpointRequest request, ActionListener listener) { + assert false : "use PublishCheckpointAction#publish"; + } + + /** + * Publish checkpoint request to shard + */ + final void publish(IndexShard indexShard) { + String primaryAllocationId = indexShard.routingEntry().allocationId().getId(); + long primaryTerm = indexShard.getPendingPrimaryTerm(); + final ThreadContext threadContext = threadPool.getThreadContext(); + try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { + // we have to execute under the system context so that if security is enabled the sync is authorized + threadContext.markAsSystemContext(); + PublishCheckpointRequest request = new PublishCheckpointRequest(indexShard.getLatestReplicationCheckpoint()); + final ReplicationTask task = (ReplicationTask) taskManager.register("transport", "segrep_publish_checkpoint", request); + transportService.sendChildRequest( + clusterService.localNode(), + transportPrimaryAction, + new ConcreteShardRequest<>(request, primaryAllocationId, primaryTerm), + task, + transportOptions, + new TransportResponseHandler() { + @Override + public ReplicationResponse read(StreamInput in) throws IOException { + return newResponseInstance(in); + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + + @Override + public void handleResponse(ReplicationResponse response) { + task.setPhase("finished"); + taskManager.unregister(task); + } + + @Override + public void handleException(TransportException e) { + task.setPhase("finished"); + taskManager.unregister(task); + if (ExceptionsHelper.unwrap(e, NodeClosedException.class) != null) { + // node shutting down + return; + } + if (ExceptionsHelper.unwrap( + e, + IndexNotFoundException.class, + AlreadyClosedException.class, + IndexShardClosedException.class + ) != null) { + // the index was deleted or the shard is closed + return; + } + logger.warn( + new ParameterizedMessage("{} segment replication checkpoint publishing failed", indexShard.shardId()), + e + ); + } + } + ); + } + } + + @Override + protected void shardOperationOnPrimary( + PublishCheckpointRequest request, + IndexShard primary, + ActionListener> listener + ) { + ActionListener.completeWith(listener, () -> new PrimaryResult<>(request, new ReplicationResponse())); + } + + @Override + protected void shardOperationOnReplica(PublishCheckpointRequest request, IndexShard replica, ActionListener listener) { + Objects.requireNonNull(request); + Objects.requireNonNull(replica); + ActionListener.completeWith(listener, () -> { + logger.trace("Checkpoint received on replica {}", request); + if (request.getCheckpoint().getShardId().equals(replica.shardId())) { + replica.onNewCheckpoint(request); + } + return new ReplicaResult(); + }); + } +} diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointRequest.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointRequest.java new file mode 100644 index 0000000000000..740fd3bccb7c4 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointRequest.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication.checkpoint; + +import org.opensearch.action.support.replication.ReplicationRequest; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Replication request responsible for publishing checkpoint request to a replica shard. + * + * @opensearch.internal + */ +public class PublishCheckpointRequest extends ReplicationRequest { + + private final ReplicationCheckpoint checkpoint; + + public PublishCheckpointRequest(ReplicationCheckpoint checkpoint) { + super(checkpoint.getShardId()); + this.checkpoint = checkpoint; + } + + public PublishCheckpointRequest(StreamInput in) throws IOException { + super(in); + this.checkpoint = new ReplicationCheckpoint(in); + } + + /** + * Returns Replication Checkpoint + */ + public ReplicationCheckpoint getCheckpoint() { + return checkpoint; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + checkpoint.writeTo(out); + } + + @Override + public String toString() { + return "PublishCheckpointRequest{" + "checkpoint=" + checkpoint + '}'; + } +} diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java new file mode 100644 index 0000000000000..98ab9cc4c1708 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java @@ -0,0 +1,136 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication.checkpoint; + +import org.opensearch.common.Nullable; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.Writeable; +import org.opensearch.index.shard.ShardId; + +import java.io.IOException; +import java.util.Objects; + +/** + * Represents a Replication Checkpoint which is sent to a replica shard. + * + * @opensearch.internal + */ +public class ReplicationCheckpoint implements Writeable { + + private final ShardId shardId; + private final long primaryTerm; + private final long segmentsGen; + private final long seqNo; + private final long segmentInfosVersion; + + public ReplicationCheckpoint(ShardId shardId, long primaryTerm, long segmentsGen, long seqNo, long segmentInfosVersion) { + this.shardId = shardId; + this.primaryTerm = primaryTerm; + this.segmentsGen = segmentsGen; + this.seqNo = seqNo; + this.segmentInfosVersion = segmentInfosVersion; + } + + public ReplicationCheckpoint(StreamInput in) throws IOException { + shardId = new ShardId(in); + primaryTerm = in.readLong(); + segmentsGen = in.readLong(); + seqNo = in.readLong(); + segmentInfosVersion = in.readLong(); + } + + /** + * The primary term of this Replication Checkpoint. + * + * @return the primary term + */ + public long getPrimaryTerm() { + return primaryTerm; + } + + /** + * @return the Segments Gen number + */ + public long getSegmentsGen() { + return segmentsGen; + } + + /** + * @return the Segment Info version + */ + public long getSegmentInfosVersion() { + return segmentInfosVersion; + } + + /** + * @return the Seq number + */ + public long getSeqNo() { + return seqNo; + } + + /** + * Shard Id of primary shard. + * + * @return the Shard Id + */ + public ShardId getShardId() { + return shardId; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + shardId.writeTo(out); + out.writeLong(primaryTerm); + out.writeLong(segmentsGen); + out.writeLong(seqNo); + out.writeLong(segmentInfosVersion); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ReplicationCheckpoint that = (ReplicationCheckpoint) o; + return primaryTerm == that.primaryTerm + && segmentsGen == that.segmentsGen + && seqNo == that.seqNo + && segmentInfosVersion == that.segmentInfosVersion + && Objects.equals(shardId, that.shardId); + } + + @Override + public int hashCode() { + return Objects.hash(shardId, primaryTerm, segmentsGen, seqNo); + } + + /** + * Checks if other is aheadof current replication point by comparing segmentInfosVersion. Returns true for null + */ + public boolean isAheadOf(@Nullable ReplicationCheckpoint other) { + return other == null || segmentInfosVersion > other.getSegmentInfosVersion(); + } + + @Override + public String toString() { + return "ReplicationCheckpoint{" + + "shardId=" + + shardId + + ", primaryTerm=" + + primaryTerm + + ", segmentsGen=" + + segmentsGen + + ", seqNo=" + + seqNo + + ", version=" + + segmentInfosVersion + + '}'; + } +} diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/SegmentReplicationCheckpointPublisher.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/SegmentReplicationCheckpointPublisher.java new file mode 100644 index 0000000000000..2b09901a947fe --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/SegmentReplicationCheckpointPublisher.java @@ -0,0 +1,49 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication.checkpoint; + +import org.opensearch.common.inject.Inject; +import org.opensearch.index.shard.IndexShard; + +import java.util.Objects; + +/** + * Publish Segment Replication Checkpoint. + * + * @opensearch.internal + */ +public class SegmentReplicationCheckpointPublisher { + + private final PublishAction publishAction; + + @Inject + public SegmentReplicationCheckpointPublisher(PublishCheckpointAction publishAction) { + this(publishAction::publish); + } + + public SegmentReplicationCheckpointPublisher(PublishAction publishAction) { + this.publishAction = Objects.requireNonNull(publishAction); + } + + public void publish(IndexShard indexShard) { + publishAction.publish(indexShard); + } + + /** + * Represents an action that is invoked to publish segment replication checkpoint to replica shard + */ + public interface PublishAction { + void publish(IndexShard indexShard); + } + + /** + * NoOp Checkpoint publisher + */ + public static final SegmentReplicationCheckpointPublisher EMPTY = new SegmentReplicationCheckpointPublisher(indexShard -> {}); +} diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/package-info.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/package-info.java new file mode 100644 index 0000000000000..a30154ea9206a --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Package containing classes to implement a replication checkpoint */ +package org.opensearch.indices.replication.checkpoint; diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index e54d30c626812..bf9671964a210 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -37,6 +37,7 @@ import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; +import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.AlreadyClosedException; @@ -133,6 +134,7 @@ import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.recovery.RecoveryTarget; +import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.common.ReplicationLuceneIndex; import org.opensearch.repositories.IndexId; import org.opensearch.snapshots.Snapshot; @@ -198,6 +200,7 @@ import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.oneOf; import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Mockito.mock; import static org.opensearch.cluster.routing.TestShardRouting.newShardRouting; import static org.opensearch.common.lucene.Lucene.cleanLuceneIndex; import static org.opensearch.common.xcontent.ToXContent.EMPTY_PARAMS; @@ -3425,6 +3428,72 @@ public void testReadSnapshotConcurrently() throws IOException, InterruptedExcept closeShards(newShard); } + /** + * here we are mocking a SegmentReplicationcheckpointPublisher and testing on index shard if CheckpointRefreshListener is added to the InternalrefreshListerners List + */ + public void testCheckpointRefreshListener() throws IOException { + final SegmentReplicationCheckpointPublisher mock = mock(SegmentReplicationCheckpointPublisher.class); + IndexShard shard = newStartedShard(p -> newShard(mock), true); + List refreshListeners = shard.getEngine().config().getInternalRefreshListener(); + assertTrue(refreshListeners.stream().anyMatch(e -> e instanceof CheckpointRefreshListener)); + closeShards(shard); + } + + /** + * here we are passing null in place of SegmentReplicationCheckpointPublisher and testing on index shard if CheckpointRefreshListener is not added to the InternalrefreshListerners List + */ + public void testCheckpointRefreshListenerWithNull() throws IOException { + IndexShard shard = newStartedShard(p -> newShard(null), true); + List refreshListeners = shard.getEngine().config().getInternalRefreshListener(); + assertFalse(refreshListeners.stream().anyMatch(e -> e instanceof CheckpointRefreshListener)); + closeShards(shard); + } + + /** + * creates a new initializing shard. The shard will will be put in its proper path under the + * current node id the shard is assigned to. + * @param checkpointPublisher Segment Replication Checkpoint Publisher to publish checkpoint + */ + private IndexShard newShard(SegmentReplicationCheckpointPublisher checkpointPublisher) throws IOException { + final ShardId shardId = new ShardId("index", "_na_", 0); + final ShardRouting shardRouting = TestShardRouting.newShardRouting( + shardId, + randomAlphaOfLength(10), + true, + ShardRoutingState.INITIALIZING, + RecoverySource.EmptyStoreRecoverySource.INSTANCE + ); + final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(createTempDir()); + ShardPath shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId); + + Settings indexSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, "SEGMENT") + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), between(0, 1000)) + .put(Settings.EMPTY) + .build(); + IndexMetadata metadata = IndexMetadata.builder(shardRouting.getIndexName()) + .settings(indexSettings) + .primaryTerm(0, primaryTerm) + .putMapping("{ \"properties\": {} }") + .build(); + return newShard( + shardRouting, + shardPath, + metadata, + null, + null, + new InternalEngineFactory(), + new EngineConfigFactory(new IndexSettings(metadata, metadata.getSettings())), + () -> {}, + RetentionLeaseSyncer.EMPTY, + EMPTY_EVENT_LISTENER, + checkpointPublisher + ); + } + public void testIndexCheckOnStartup() throws Exception { final IndexShard indexShard = newStartedShard(true); diff --git a/server/src/test/java/org/opensearch/indices/IndicesLifecycleListenerSingleNodeTests.java b/server/src/test/java/org/opensearch/indices/IndicesLifecycleListenerSingleNodeTests.java index 5f3d03f85f324..0989bf869f18e 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesLifecycleListenerSingleNodeTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesLifecycleListenerSingleNodeTests.java @@ -49,6 +49,7 @@ import org.opensearch.index.shard.ShardId; import org.opensearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason; import org.opensearch.indices.recovery.RecoveryState; +import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.test.OpenSearchSingleNodeTestCase; import java.util.Arrays; @@ -148,7 +149,12 @@ public void afterIndexRemoved(Index index, IndexSettings indexSettings, IndexRem newRouting = newRouting.moveToUnassigned(unassignedInfo) .updateUnassigned(unassignedInfo, RecoverySource.EmptyStoreRecoverySource.INSTANCE); newRouting = ShardRoutingHelper.initialize(newRouting, nodeId); - IndexShard shard = index.createShard(newRouting, s -> {}, RetentionLeaseSyncer.EMPTY); + IndexShard shard = index.createShard( + newRouting, + s -> {}, + RetentionLeaseSyncer.EMPTY, + SegmentReplicationCheckpointPublisher.EMPTY + ); IndexShardTestCase.updateRoutingEntry(shard, newRouting); assertEquals(5, counter.get()); final DiscoveryNode localNode = new DiscoveryNode( diff --git a/server/src/test/java/org/opensearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java b/server/src/test/java/org/opensearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java index 97cb1dc341b13..0619e3e3f62a2 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java +++ b/server/src/test/java/org/opensearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java @@ -59,6 +59,7 @@ import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoveryListener; import org.opensearch.indices.recovery.RecoveryState; +import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.repositories.RepositoriesService; import org.opensearch.test.OpenSearchTestCase; @@ -253,6 +254,7 @@ public MockIndexService indexService(Index index) { @Override public MockIndexShard createShard( final ShardRouting shardRouting, + final SegmentReplicationCheckpointPublisher checkpointPublisher, final PeerRecoveryTargetService recoveryTargetService, final RecoveryListener recoveryListener, final RepositoriesService repositoriesService, diff --git a/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index 7789054cfdc16..cd3fee60014a7 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -66,6 +66,7 @@ import org.opensearch.index.shard.PrimaryReplicaSyncer; import org.opensearch.index.shard.ShardId; import org.opensearch.indices.recovery.PeerRecoveryTargetService; +import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.repositories.RepositoriesService; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -562,6 +563,7 @@ private IndicesClusterStateService createIndicesClusterStateService( indicesService, clusterService, threadPool, + SegmentReplicationCheckpointPublisher.EMPTY, recoveryTargetService, shardStateAction, null, diff --git a/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java b/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java new file mode 100644 index 0000000000000..074b5ff613b08 --- /dev/null +++ b/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java @@ -0,0 +1,157 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication.checkpoint; + +import org.opensearch.action.ActionListener; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.ActionTestUtils; +import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.action.support.replication.TransportReplicationAction; +import org.opensearch.cluster.action.shard.ShardStateAction; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.internal.io.IOUtils; +import org.opensearch.index.Index; +import org.opensearch.index.IndexService; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.ShardId; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.transport.CapturingTransport; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.util.Collections; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Mockito.*; +import static org.opensearch.test.ClusterServiceUtils.createClusterService; + +public class PublishCheckpointActionTests extends OpenSearchTestCase { + + private ThreadPool threadPool; + private CapturingTransport transport; + private ClusterService clusterService; + private TransportService transportService; + private ShardStateAction shardStateAction; + + @Override + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool(getClass().getName()); + transport = new CapturingTransport(); + clusterService = createClusterService(threadPool); + transportService = transport.createTransportService( + clusterService.getSettings(), + threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + boundAddress -> clusterService.localNode(), + null, + Collections.emptySet() + ); + transportService.start(); + transportService.acceptIncomingRequests(); + shardStateAction = new ShardStateAction(clusterService, transportService, null, null, threadPool); + } + + @Override + public void tearDown() throws Exception { + try { + IOUtils.close(transportService, clusterService, transport); + } finally { + terminate(threadPool); + } + super.tearDown(); + } + + public void testPublishCheckpointActionOnPrimary() throws InterruptedException { + final IndicesService indicesService = mock(IndicesService.class); + + final Index index = new Index("index", "uuid"); + final IndexService indexService = mock(IndexService.class); + when(indicesService.indexServiceSafe(index)).thenReturn(indexService); + + final int id = randomIntBetween(0, 4); + final IndexShard indexShard = mock(IndexShard.class); + when(indexService.getShard(id)).thenReturn(indexShard); + + final ShardId shardId = new ShardId(index, id); + when(indexShard.shardId()).thenReturn(shardId); + + final RecoverySettings recoverySettings = new RecoverySettings(Settings.EMPTY, clusterService.getClusterSettings()); + + final PublishCheckpointAction action = new PublishCheckpointAction( + Settings.EMPTY, + transportService, + clusterService, + indicesService, + threadPool, + shardStateAction, + new ActionFilters(Collections.emptySet()) + ); + + final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint(indexShard.shardId(), 1111, 111, 11, 1); + + final PublishCheckpointRequest request = new PublishCheckpointRequest(checkpoint); + + action.shardOperationOnPrimary(request, indexShard, ActionTestUtils.assertNoFailureListener(result -> { + // we should forward the request containing the current publish checkpoint to the replica + assertThat(result.replicaRequest(), sameInstance(request)); + })); + + } + + public void testPublishCheckpointActionOnReplica() { + final IndicesService indicesService = mock(IndicesService.class); + + final Index index = new Index("index", "uuid"); + final IndexService indexService = mock(IndexService.class); + when(indicesService.indexServiceSafe(index)).thenReturn(indexService); + + final int id = randomIntBetween(0, 4); + final IndexShard indexShard = mock(IndexShard.class); + when(indexService.getShard(id)).thenReturn(indexShard); + + final ShardId shardId = new ShardId(index, id); + when(indexShard.shardId()).thenReturn(shardId); + + final RecoverySettings recoverySettings = new RecoverySettings(Settings.EMPTY, clusterService.getClusterSettings()); + + final PublishCheckpointAction action = new PublishCheckpointAction( + Settings.EMPTY, + transportService, + clusterService, + indicesService, + threadPool, + shardStateAction, + new ActionFilters(Collections.emptySet()) + ); + + final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint(indexShard.shardId(), 1111, 111, 11, 1); + + final PublishCheckpointRequest request = new PublishCheckpointRequest(checkpoint); + + final PlainActionFuture listener = PlainActionFuture.newFuture(); + action.shardOperationOnReplica(request, indexShard, listener); + final TransportReplicationAction.ReplicaResult result = listener.actionGet(); + + // onNewCheckpoint should be called on shard with checkpoint request + verify(indexShard).onNewCheckpoint(request); + + // the result should indicate success + final AtomicBoolean success = new AtomicBoolean(); + result.runPostReplicaActions(ActionListener.wrap(r -> success.set(true), e -> fail(e.toString()))); + assertTrue(success.get()); + + } + +} diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index a896aab0f70c9..ab9a455399366 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -182,6 +182,7 @@ import org.opensearch.indices.recovery.PeerRecoverySourceService; import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.ingest.IngestService; import org.opensearch.monitor.StatusInfo; import org.opensearch.node.ResponseCollectorService; @@ -1860,7 +1861,8 @@ public void onFailure(final Exception e) { shardStateAction, actionFilters ), - RetentionLeaseSyncer.EMPTY + RetentionLeaseSyncer.EMPTY, + SegmentReplicationCheckpointPublisher.EMPTY ); Map actions = new HashMap<>(); final SystemIndices systemIndices = new SystemIndices(emptyMap()); diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 298fdcaea6465..371fa6d102304 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -94,6 +94,7 @@ import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.recovery.RecoveryTarget; import org.opensearch.indices.recovery.StartRecoveryRequest; +import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.common.ReplicationListener; import org.opensearch.indices.replication.common.ReplicationState; import org.opensearch.repositories.IndexId; @@ -411,6 +412,47 @@ protected IndexShard newShard( ); } + /** + * creates a new initializing shard. The shard will will be put in its proper path under the + * current node id the shard is assigned to. + * @param routing shard routing to use + * @param shardPath path to use for shard data + * @param indexMetadata indexMetadata for the shard, including any mapping + * @param storeProvider an optional custom store provider to use. If null a default file based store will be created + * @param indexReaderWrapper an optional wrapper to be used during search + * @param globalCheckpointSyncer callback for syncing global checkpoints + * @param indexEventListener index event listener + * @param listeners an optional set of listeners to add to the shard + */ + protected IndexShard newShard( + ShardRouting routing, + ShardPath shardPath, + IndexMetadata indexMetadata, + @Nullable CheckedFunction storeProvider, + @Nullable CheckedFunction indexReaderWrapper, + @Nullable EngineFactory engineFactory, + @Nullable EngineConfigFactory engineConfigFactory, + Runnable globalCheckpointSyncer, + RetentionLeaseSyncer retentionLeaseSyncer, + IndexEventListener indexEventListener, + IndexingOperationListener... listeners + ) throws IOException { + return newShard( + routing, + shardPath, + indexMetadata, + storeProvider, + indexReaderWrapper, + engineFactory, + engineConfigFactory, + globalCheckpointSyncer, + retentionLeaseSyncer, + indexEventListener, + SegmentReplicationCheckpointPublisher.EMPTY, + listeners + ); + } + /** * creates a new initializing shard. * @param routing shard routing to use @@ -420,6 +462,7 @@ protected IndexShard newShard( * @param indexReaderWrapper an optional wrapper to be used during search * @param globalCheckpointSyncer callback for syncing global checkpoints * @param indexEventListener index event listener + * @param checkpointPublisher segment Replication Checkpoint Publisher to publish checkpoint * @param listeners an optional set of listeners to add to the shard */ protected IndexShard newShard( @@ -433,6 +476,7 @@ protected IndexShard newShard( Runnable globalCheckpointSyncer, RetentionLeaseSyncer retentionLeaseSyncer, IndexEventListener indexEventListener, + SegmentReplicationCheckpointPublisher checkpointPublisher, IndexingOperationListener... listeners ) throws IOException { final Settings nodeSettings = Settings.builder().put("node.name", routing.currentNodeId()).build(); @@ -480,7 +524,8 @@ protected IndexShard newShard( Arrays.asList(listeners), globalCheckpointSyncer, retentionLeaseSyncer, - breakerService + breakerService, + checkpointPublisher ); indexShard.addShardFailureCallback(DEFAULT_SHARD_FAILURE_HANDLER); success = true; From 052e96055320537d8ae29a637c0e493b2d48eae9 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Tue, 24 May 2022 10:18:18 -0700 Subject: [PATCH 26/75] Add a new Engine implementation for replicas with segment replication enabled. (#3240) * Change fastForwardProcessedSeqNo method in LocalCheckpointTracker to persisted checkpoint. This change inverts fastForwardProcessedSeqNo to fastForwardPersistedSeqNo for use in Segment Replication. This is so that a Segrep Engine can match the logic of InternalEngine where the seqNo is incremented with each operation, but only persisted in the tracker on a flush. With Segment Replication we bump the processed number with each operation received index/delete/noOp, and invoke this method when we receive a new set of segments to bump the persisted seqNo. Signed-off-by: Marc Handalian * Extract Translog specific engine methods into an abstract class. This change extracts translog specific methods to an abstract engine class so that other engine implementations can reuse translog logic. Signed-off-by: Marc Handalian * Add a separate Engine implementation for replicas with segment replication enabled. This change adds a new engine intended to be used on replicas with segment replication enabled. This engine does not wire up an IndexWriter, but still writes all operations to a translog. The engine uses a new ReaderManager that refreshes from an externally provided SegmentInfos. Signed-off-by: Marc Handalian * Fix spotless checks. Signed-off-by: Marc Handalian * Fix :server:compileInternalClusterTestJava compilation. Signed-off-by: Marc Handalian * Fix failing test naming convention check. Signed-off-by: Marc Handalian * PR feedback. - Removed isReadOnlyReplica from overloaded constructor and added feature flag checks. - Updated log msg in NRTReplicationReaderManager - cleaned up store ref counting in NRTReplicationEngine. Signed-off-by: Marc Handalian * Fix spotless check. Signed-off-by: Marc Handalian * Remove TranslogAwareEngine and build translog in NRTReplicationEngine. Signed-off-by: Marc Handalian * Fix formatting Signed-off-by: Marc Handalian * Add missing translog methods to NRTEngine. Signed-off-by: Marc Handalian * Remove persistent seqNo check from fastForwardProcessedSeqNo. Signed-off-by: Marc Handalian * PR feedback. Signed-off-by: Marc Handalian * Add test specific to translog trimming. Signed-off-by: Marc Handalian * Javadoc check. Signed-off-by: Marc Handalian * Add failEngine calls to translog methods in NRTReplicationEngine. Roll xlog generation on replica when a new commit point is received. Signed-off-by: Marc Handalian --- .../org/opensearch/index/engine/Engine.java | 17 + .../opensearch/index/engine/EngineConfig.java | 72 +++ .../index/engine/EngineConfigFactory.java | 6 +- .../index/engine/InternalEngine.java | 29 +- .../index/engine/NRTReplicationEngine.java | 482 ++++++++++++++++++ .../engine/NRTReplicationEngineFactory.java | 25 + .../engine/NRTReplicationReaderManager.java | 92 ++++ .../index/engine/ReadOnlyEngine.java | 5 + .../index/seqno/LocalCheckpointTracker.java | 2 +- .../opensearch/index/shard/IndexShard.java | 3 +- .../opensearch/indices/IndicesService.java | 4 + .../engine/EngineConfigFactoryTests.java | 6 +- .../engine/NRTReplicationEngineTests.java | 239 +++++++++ .../seqno/LocalCheckpointTrackerTests.java | 45 +- .../index/shard/IndexShardTests.java | 28 +- .../index/engine/EngineTestCase.java | 22 +- 16 files changed, 1007 insertions(+), 70 deletions(-) create mode 100644 server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java create mode 100644 server/src/main/java/org/opensearch/index/engine/NRTReplicationEngineFactory.java create mode 100644 server/src/main/java/org/opensearch/index/engine/NRTReplicationReaderManager.java create mode 100644 server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java diff --git a/server/src/main/java/org/opensearch/index/engine/Engine.java b/server/src/main/java/org/opensearch/index/engine/Engine.java index 047d632c44392..c242d98b4b65c 100644 --- a/server/src/main/java/org/opensearch/index/engine/Engine.java +++ b/server/src/main/java/org/opensearch/index/engine/Engine.java @@ -169,6 +169,12 @@ public final EngineConfig config() { protected abstract SegmentInfos getLastCommittedSegmentInfos(); + /** + * Return the latest active SegmentInfos from the engine. + * @return {@link SegmentInfos} + */ + protected abstract SegmentInfos getLatestSegmentInfos(); + public MergeStats getMergeStats() { return new MergeStats(); } @@ -176,6 +182,17 @@ public MergeStats getMergeStats() { /** returns the history uuid for the engine */ public abstract String getHistoryUUID(); + /** + * Reads the current stored history ID from commit data. + */ + String loadHistoryUUID(Map commitData) { + final String uuid = commitData.get(HISTORY_UUID_KEY); + if (uuid == null) { + throw new IllegalStateException("commit doesn't contain history uuid"); + } + return uuid; + } + /** Returns how many bytes we are currently moving from heap to disk */ public abstract long getWritingBytes(); diff --git a/server/src/main/java/org/opensearch/index/engine/EngineConfig.java b/server/src/main/java/org/opensearch/index/engine/EngineConfig.java index 0ea4a96a72362..4ae6646ed14f0 100644 --- a/server/src/main/java/org/opensearch/index/engine/EngineConfig.java +++ b/server/src/main/java/org/opensearch/index/engine/EngineConfig.java @@ -97,6 +97,7 @@ public final class EngineConfig { private final CircuitBreakerService circuitBreakerService; private final LongSupplier globalCheckpointSupplier; private final Supplier retentionLeasesSupplier; + private final boolean isReadOnlyReplica; /** * A supplier of the outstanding retention leases. This is used during merged operations to determine which operations that have been @@ -228,6 +229,66 @@ public EngineConfig( LongSupplier primaryTermSupplier, TombstoneDocSupplier tombstoneDocSupplier ) { + this( + shardId, + threadPool, + indexSettings, + warmer, + store, + mergePolicy, + analyzer, + similarity, + codecService, + eventListener, + queryCache, + queryCachingPolicy, + translogConfig, + translogDeletionPolicyFactory, + flushMergesAfter, + externalRefreshListener, + internalRefreshListener, + indexSort, + circuitBreakerService, + globalCheckpointSupplier, + retentionLeasesSupplier, + primaryTermSupplier, + tombstoneDocSupplier, + false + ); + } + + /** + * Creates a new {@link org.opensearch.index.engine.EngineConfig} + */ + EngineConfig( + ShardId shardId, + ThreadPool threadPool, + IndexSettings indexSettings, + Engine.Warmer warmer, + Store store, + MergePolicy mergePolicy, + Analyzer analyzer, + Similarity similarity, + CodecService codecService, + Engine.EventListener eventListener, + QueryCache queryCache, + QueryCachingPolicy queryCachingPolicy, + TranslogConfig translogConfig, + TranslogDeletionPolicyFactory translogDeletionPolicyFactory, + TimeValue flushMergesAfter, + List externalRefreshListener, + List internalRefreshListener, + Sort indexSort, + CircuitBreakerService circuitBreakerService, + LongSupplier globalCheckpointSupplier, + Supplier retentionLeasesSupplier, + LongSupplier primaryTermSupplier, + TombstoneDocSupplier tombstoneDocSupplier, + boolean isReadOnlyReplica + ) { + if (isReadOnlyReplica && indexSettings.isSegRepEnabled() == false) { + throw new IllegalArgumentException("Shard can only be wired as a read only replica with Segment Replication enabled"); + } this.shardId = shardId; this.indexSettings = indexSettings; this.threadPool = threadPool; @@ -266,6 +327,7 @@ public EngineConfig( this.retentionLeasesSupplier = Objects.requireNonNull(retentionLeasesSupplier); this.primaryTermSupplier = primaryTermSupplier; this.tombstoneDocSupplier = tombstoneDocSupplier; + this.isReadOnlyReplica = isReadOnlyReplica; } /** @@ -460,6 +522,16 @@ public LongSupplier getPrimaryTermSupplier() { return primaryTermSupplier; } + /** + * Returns if this replica should be wired as a read only. + * This is used for Segment Replication where the engine implementation used is dependent on + * if the shard is a primary/replica. + * @return true if this engine should be wired as read only. + */ + public boolean isReadOnlyReplica() { + return indexSettings.isSegRepEnabled() && isReadOnlyReplica; + } + /** * A supplier supplies tombstone documents which will be used in soft-update methods. * The returned document consists only _uid, _seqno, _term and _version fields; other metadata fields are excluded. diff --git a/server/src/main/java/org/opensearch/index/engine/EngineConfigFactory.java b/server/src/main/java/org/opensearch/index/engine/EngineConfigFactory.java index afab57905a9a7..c8aec3570f8b5 100644 --- a/server/src/main/java/org/opensearch/index/engine/EngineConfigFactory.java +++ b/server/src/main/java/org/opensearch/index/engine/EngineConfigFactory.java @@ -146,7 +146,8 @@ public EngineConfig newEngineConfig( LongSupplier globalCheckpointSupplier, Supplier retentionLeasesSupplier, LongSupplier primaryTermSupplier, - EngineConfig.TombstoneDocSupplier tombstoneDocSupplier + EngineConfig.TombstoneDocSupplier tombstoneDocSupplier, + boolean isReadOnlyReplica ) { CodecService codecServiceToUse = codecService; if (codecService == null && this.codecServiceFactory != null) { @@ -176,7 +177,8 @@ public EngineConfig newEngineConfig( globalCheckpointSupplier, retentionLeasesSupplier, primaryTermSupplier, - tombstoneDocSupplier + tombstoneDocSupplier, + isReadOnlyReplica ); } diff --git a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java index eb91478b97adc..e60e650372ec4 100644 --- a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java @@ -49,6 +49,7 @@ import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.index.ShuffleForcedMergePolicy; import org.apache.lucene.index.SoftDeletesRetentionMergePolicy; +import org.apache.lucene.index.StandardDirectoryReader; import org.apache.lucene.index.Term; import org.apache.lucene.sandbox.index.MergeOnFlushMergePolicy; import org.apache.lucene.search.BooleanClause; @@ -648,17 +649,6 @@ public long getWritingBytes() { return indexWriter.getFlushingBytes() + versionMap.getRefreshingBytes(); } - /** - * Reads the current stored history ID from the IW commit data. - */ - private String loadHistoryUUID(Map commitData) { - final String uuid = commitData.get(HISTORY_UUID_KEY); - if (uuid == null) { - throw new IllegalStateException("commit doesn't contain history uuid"); - } - return uuid; - } - private ExternalReaderManager createReaderManager(RefreshWarmerListener externalRefreshListener) throws EngineException { boolean success = false; OpenSearchReaderManager internalReaderManager = null; @@ -2298,6 +2288,23 @@ protected SegmentInfos getLastCommittedSegmentInfos() { return lastCommittedSegmentInfos; } + @Override + public SegmentInfos getLatestSegmentInfos() { + OpenSearchDirectoryReader reader = null; + try { + reader = internalReaderManager.acquire(); + return ((StandardDirectoryReader) reader.getDelegate()).getSegmentInfos(); + } catch (IOException e) { + throw new EngineException(shardId, e.getMessage(), e); + } finally { + try { + internalReaderManager.release(reader); + } catch (IOException e) { + throw new EngineException(shardId, e.getMessage(), e); + } + } + } + @Override protected final void writerSegmentStats(SegmentsStats stats) { stats.addVersionMapMemoryInBytes(versionMap.ramBytesUsed()); diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java new file mode 100644 index 0000000000000..106643198cc3b --- /dev/null +++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java @@ -0,0 +1,482 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.engine; + +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.index.SoftDeletesDirectoryReaderWrapper; +import org.apache.lucene.search.ReferenceManager; +import org.opensearch.common.concurrent.GatedCloseable; +import org.opensearch.common.lucene.Lucene; +import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; +import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.ReleasableLock; +import org.opensearch.core.internal.io.IOUtils; +import org.opensearch.index.seqno.LocalCheckpointTracker; +import org.opensearch.index.seqno.SeqNoStats; +import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.index.translog.DefaultTranslogDeletionPolicy; +import org.opensearch.index.translog.Translog; +import org.opensearch.index.translog.TranslogConfig; +import org.opensearch.index.translog.TranslogDeletionPolicy; +import org.opensearch.index.translog.TranslogStats; +import org.opensearch.search.suggest.completion.CompletionStats; + +import java.io.Closeable; +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.CountDownLatch; +import java.util.function.BiFunction; +import java.util.function.LongConsumer; +import java.util.function.LongSupplier; +import java.util.stream.Stream; + +/** + * This is an {@link Engine} implementation intended for replica shards when Segment Replication + * is enabled. This Engine does not create an IndexWriter, rather it refreshes a {@link NRTReplicationReaderManager} + * with new Segments when received from an external source. + * + * @opensearch.internal + */ +public class NRTReplicationEngine extends Engine { + + private volatile SegmentInfos lastCommittedSegmentInfos; + private final NRTReplicationReaderManager readerManager; + private final CompletionStatsCache completionStatsCache; + private final LocalCheckpointTracker localCheckpointTracker; + private final Translog translog; + + public NRTReplicationEngine(EngineConfig engineConfig) { + super(engineConfig); + store.incRef(); + NRTReplicationReaderManager readerManager = null; + try { + lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); + readerManager = new NRTReplicationReaderManager(OpenSearchDirectoryReader.wrap(getDirectoryReader(), shardId)); + final SequenceNumbers.CommitInfo commitInfo = SequenceNumbers.loadSeqNoInfoFromLuceneCommit( + this.lastCommittedSegmentInfos.getUserData().entrySet() + ); + this.localCheckpointTracker = new LocalCheckpointTracker(commitInfo.maxSeqNo, commitInfo.localCheckpoint); + this.completionStatsCache = new CompletionStatsCache(() -> acquireSearcher("completion_stats")); + this.readerManager = readerManager; + this.readerManager.addListener(completionStatsCache); + this.translog = openTranslog( + engineConfig, + getTranslogDeletionPolicy(engineConfig), + engineConfig.getGlobalCheckpointSupplier(), + localCheckpointTracker::markSeqNoAsPersisted + ); + } catch (IOException e) { + IOUtils.closeWhileHandlingException(store::decRef, readerManager); + throw new EngineCreationFailureException(shardId, "failed to create engine", e); + } + } + + public synchronized void updateSegments(final SegmentInfos infos, long seqNo) throws IOException { + // Update the current infos reference on the Engine's reader. + readerManager.updateSegments(infos); + + // only update the persistedSeqNo and "lastCommitted" infos reference if the incoming segments have a higher + // generation. We can still refresh with incoming SegmentInfos that are not part of a commit point. + if (infos.getGeneration() > lastCommittedSegmentInfos.getGeneration()) { + this.lastCommittedSegmentInfos = infos; + rollTranslogGeneration(); + } + localCheckpointTracker.fastForwardProcessedSeqNo(seqNo); + } + + @Override + public String getHistoryUUID() { + return loadHistoryUUID(lastCommittedSegmentInfos.userData); + } + + @Override + public long getWritingBytes() { + return 0; + } + + @Override + public CompletionStats completionStats(String... fieldNamePatterns) { + return completionStatsCache.get(fieldNamePatterns); + } + + @Override + public long getIndexThrottleTimeInMillis() { + return 0; + } + + @Override + public boolean isThrottled() { + return false; + } + + @Override + public void trimOperationsFromTranslog(long belowTerm, long aboveSeqNo) throws EngineException { + try (ReleasableLock lock = readLock.acquire()) { + ensureOpen(); + translog.trimOperations(belowTerm, aboveSeqNo); + } catch (Exception e) { + try { + failEngine("translog operations trimming failed", e); + } catch (Exception inner) { + e.addSuppressed(inner); + } + throw new EngineException(shardId, "failed to trim translog operations", e); + } + } + + @Override + public IndexResult index(Index index) throws IOException { + ensureOpen(); + IndexResult indexResult = new IndexResult(index.version(), index.primaryTerm(), index.seqNo(), false); + final Translog.Location location = translog.add(new Translog.Index(index, indexResult)); + indexResult.setTranslogLocation(location); + indexResult.setTook(System.nanoTime() - index.startTime()); + indexResult.freeze(); + localCheckpointTracker.advanceMaxSeqNo(index.seqNo()); + return indexResult; + } + + @Override + public DeleteResult delete(Delete delete) throws IOException { + ensureOpen(); + DeleteResult deleteResult = new DeleteResult(delete.version(), delete.primaryTerm(), delete.seqNo(), true); + final Translog.Location location = translog.add(new Translog.Delete(delete, deleteResult)); + deleteResult.setTranslogLocation(location); + deleteResult.setTook(System.nanoTime() - delete.startTime()); + deleteResult.freeze(); + localCheckpointTracker.advanceMaxSeqNo(delete.seqNo()); + return deleteResult; + } + + @Override + public NoOpResult noOp(NoOp noOp) throws IOException { + ensureOpen(); + NoOpResult noOpResult = new NoOpResult(noOp.primaryTerm(), noOp.seqNo()); + final Translog.Location location = translog.add(new Translog.NoOp(noOp.seqNo(), noOp.primaryTerm(), noOp.reason())); + noOpResult.setTranslogLocation(location); + noOpResult.setTook(System.nanoTime() - noOp.startTime()); + noOpResult.freeze(); + localCheckpointTracker.advanceMaxSeqNo(noOp.seqNo()); + return noOpResult; + } + + @Override + public GetResult get(Get get, BiFunction searcherFactory) throws EngineException { + return getFromSearcher(get, searcherFactory, SearcherScope.EXTERNAL); + } + + @Override + protected ReferenceManager getReferenceManager(SearcherScope scope) { + return readerManager; + } + + @Override + public boolean isTranslogSyncNeeded() { + return translog.syncNeeded(); + } + + @Override + public boolean ensureTranslogSynced(Stream locations) throws IOException { + boolean synced = translog.ensureSynced(locations); + if (synced) { + translog.trimUnreferencedReaders(); + } + return synced; + } + + @Override + public void syncTranslog() throws IOException { + translog.sync(); + translog.trimUnreferencedReaders(); + } + + @Override + public Closeable acquireHistoryRetentionLock() { + throw new UnsupportedOperationException("Not implemented"); + } + + @Override + public Translog.Snapshot newChangesSnapshot( + String source, + long fromSeqNo, + long toSeqNo, + boolean requiredFullRange, + boolean accurateCount + ) throws IOException { + throw new UnsupportedOperationException("Not implemented"); + } + + @Override + public int countNumberOfHistoryOperations(String source, long fromSeqNo, long toSeqNumber) throws IOException { + return 0; + } + + @Override + public boolean hasCompleteOperationHistory(String reason, long startingSeqNo) { + return false; + } + + @Override + public long getMinRetainedSeqNo() { + return localCheckpointTracker.getProcessedCheckpoint(); + } + + @Override + public TranslogStats getTranslogStats() { + return translog.stats(); + } + + @Override + public Translog.Location getTranslogLastWriteLocation() { + return translog.getLastWriteLocation(); + } + + @Override + public long getPersistedLocalCheckpoint() { + return localCheckpointTracker.getPersistedCheckpoint(); + } + + public long getProcessedLocalCheckpoint() { + return localCheckpointTracker.getProcessedCheckpoint(); + } + + @Override + public SeqNoStats getSeqNoStats(long globalCheckpoint) { + return localCheckpointTracker.getStats(globalCheckpoint); + } + + @Override + public long getLastSyncedGlobalCheckpoint() { + return translog.getLastSyncedGlobalCheckpoint(); + } + + @Override + public long getIndexBufferRAMBytesUsed() { + return 0; + } + + @Override + public List segments(boolean verbose) { + return Arrays.asList(getSegmentInfo(getLatestSegmentInfos(), verbose)); + } + + @Override + public void refresh(String source) throws EngineException {} + + @Override + public boolean maybeRefresh(String source) throws EngineException { + return false; + } + + @Override + public void writeIndexingBuffer() throws EngineException {} + + @Override + public boolean shouldPeriodicallyFlush() { + return false; + } + + @Override + public void flush(boolean force, boolean waitIfOngoing) throws EngineException {} + + @Override + public void trimUnreferencedTranslogFiles() throws EngineException { + try (ReleasableLock lock = readLock.acquire()) { + ensureOpen(); + translog.trimUnreferencedReaders(); + } catch (Exception e) { + try { + failEngine("translog trimming failed", e); + } catch (Exception inner) { + e.addSuppressed(inner); + } + throw new EngineException(shardId, "failed to trim translog", e); + } + } + + @Override + public boolean shouldRollTranslogGeneration() { + return translog.shouldRollGeneration(); + } + + @Override + public void rollTranslogGeneration() throws EngineException { + try (ReleasableLock ignored = readLock.acquire()) { + ensureOpen(); + translog.rollGeneration(); + translog.trimUnreferencedReaders(); + } catch (Exception e) { + try { + failEngine("translog trimming failed", e); + } catch (Exception inner) { + e.addSuppressed(inner); + } + throw new EngineException(shardId, "failed to roll translog", e); + } + } + + @Override + public void forceMerge( + boolean flush, + int maxNumSegments, + boolean onlyExpungeDeletes, + boolean upgrade, + boolean upgradeOnlyAncientSegments, + String forceMergeUUID + ) throws EngineException, IOException {} + + @Override + public GatedCloseable acquireLastIndexCommit(boolean flushFirst) throws EngineException { + try { + final IndexCommit indexCommit = Lucene.getIndexCommit(lastCommittedSegmentInfos, store.directory()); + return new GatedCloseable<>(indexCommit, () -> {}); + } catch (IOException e) { + throw new EngineException(shardId, "Unable to build latest IndexCommit", e); + } + } + + @Override + public GatedCloseable acquireSafeIndexCommit() throws EngineException { + return acquireLastIndexCommit(false); + } + + @Override + public SafeCommitInfo getSafeCommitInfo() { + return new SafeCommitInfo(localCheckpointTracker.getProcessedCheckpoint(), lastCommittedSegmentInfos.totalMaxDoc()); + } + + @Override + protected final void closeNoLock(String reason, CountDownLatch closedLatch) { + if (isClosed.compareAndSet(false, true)) { + assert rwl.isWriteLockedByCurrentThread() || failEngineLock.isHeldByCurrentThread() + : "Either the write lock must be held or the engine must be currently be failing itself"; + try { + IOUtils.close(readerManager, translog, store::decRef); + } catch (Exception e) { + logger.warn("failed to close engine", e); + } finally { + logger.debug("engine closed [{}]", reason); + closedLatch.countDown(); + } + } + } + + @Override + public void activateThrottling() {} + + @Override + public void deactivateThrottling() {} + + @Override + public int restoreLocalHistoryFromTranslog(TranslogRecoveryRunner translogRecoveryRunner) throws IOException { + return 0; + } + + @Override + public int fillSeqNoGaps(long primaryTerm) throws IOException { + return 0; + } + + @Override + public Engine recoverFromTranslog(TranslogRecoveryRunner translogRecoveryRunner, long recoverUpToSeqNo) throws IOException { + throw new UnsupportedOperationException("Read only replicas do not have an IndexWriter and cannot recover from a translog."); + } + + @Override + public void skipTranslogRecovery() { + // Do nothing. + } + + @Override + public void maybePruneDeletes() {} + + @Override + public void updateMaxUnsafeAutoIdTimestamp(long newTimestamp) {} + + @Override + public long getMaxSeqNoOfUpdatesOrDeletes() { + return localCheckpointTracker.getMaxSeqNo(); + } + + @Override + public void advanceMaxSeqNoOfUpdatesOrDeletes(long maxSeqNoOfUpdatesOnPrimary) {} + + public Translog getTranslog() { + return translog; + } + + @Override + public void onSettingsChanged(TimeValue translogRetentionAge, ByteSizeValue translogRetentionSize, long softDeletesRetentionOps) { + final TranslogDeletionPolicy translogDeletionPolicy = translog.getDeletionPolicy(); + translogDeletionPolicy.setRetentionAgeInMillis(translogRetentionAge.millis()); + translogDeletionPolicy.setRetentionSizeInBytes(translogRetentionSize.getBytes()); + } + + @Override + protected SegmentInfos getLastCommittedSegmentInfos() { + return lastCommittedSegmentInfos; + } + + @Override + protected SegmentInfos getLatestSegmentInfos() { + return readerManager.getSegmentInfos(); + } + + protected LocalCheckpointTracker getLocalCheckpointTracker() { + return localCheckpointTracker; + } + + private DirectoryReader getDirectoryReader() throws IOException { + // for segment replication: replicas should create the reader from store, we don't want an open IW on replicas. + return new SoftDeletesDirectoryReaderWrapper(DirectoryReader.open(store.directory()), Lucene.SOFT_DELETES_FIELD); + } + + private Translog openTranslog( + EngineConfig engineConfig, + TranslogDeletionPolicy translogDeletionPolicy, + LongSupplier globalCheckpointSupplier, + LongConsumer persistedSequenceNumberConsumer + ) throws IOException { + final TranslogConfig translogConfig = engineConfig.getTranslogConfig(); + final Map userData = lastCommittedSegmentInfos.getUserData(); + final String translogUUID = Objects.requireNonNull(userData.get(Translog.TRANSLOG_UUID_KEY)); + // We expect that this shard already exists, so it must already have an existing translog else something is badly wrong! + return new Translog( + translogConfig, + translogUUID, + translogDeletionPolicy, + globalCheckpointSupplier, + engineConfig.getPrimaryTermSupplier(), + persistedSequenceNumberConsumer + ); + } + + private TranslogDeletionPolicy getTranslogDeletionPolicy(EngineConfig engineConfig) { + TranslogDeletionPolicy customTranslogDeletionPolicy = null; + if (engineConfig.getCustomTranslogDeletionPolicyFactory() != null) { + customTranslogDeletionPolicy = engineConfig.getCustomTranslogDeletionPolicyFactory() + .create(engineConfig.getIndexSettings(), engineConfig.retentionLeasesSupplier()); + } + return Objects.requireNonNullElseGet( + customTranslogDeletionPolicy, + () -> new DefaultTranslogDeletionPolicy( + engineConfig.getIndexSettings().getTranslogRetentionSize().getBytes(), + engineConfig.getIndexSettings().getTranslogRetentionAge().getMillis(), + engineConfig.getIndexSettings().getTranslogRetentionTotalFiles() + ) + ); + } + +} diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngineFactory.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngineFactory.java new file mode 100644 index 0000000000000..45fe3086ac3f6 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngineFactory.java @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.engine; + +/** + * Engine Factory implementation used with Segment Replication that wires up replica shards with an ${@link NRTReplicationEngine} + * and primary with an ${@link InternalEngine} + * + * @opensearch.internal + */ +public class NRTReplicationEngineFactory implements EngineFactory { + @Override + public Engine newReadWriteEngine(EngineConfig config) { + if (config.isReadOnlyReplica()) { + return new NRTReplicationEngine(config); + } + return new InternalEngine(config); + } +} diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationReaderManager.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationReaderManager.java new file mode 100644 index 0000000000000..16e615672a26f --- /dev/null +++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationReaderManager.java @@ -0,0 +1,92 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.engine; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.index.SoftDeletesDirectoryReaderWrapper; +import org.apache.lucene.index.StandardDirectoryReader; +import org.opensearch.common.lucene.Lucene; +import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; + +/** + * This is an extension of {@link OpenSearchReaderManager} for use with {@link NRTReplicationEngine}. + * The manager holds a reference to the latest {@link SegmentInfos} object that is used to refresh a reader. + * + * @opensearch.internal + */ +public class NRTReplicationReaderManager extends OpenSearchReaderManager { + + private final static Logger logger = LogManager.getLogger(NRTReplicationReaderManager.class); + private volatile SegmentInfos currentInfos; + + /** + * Creates and returns a new SegmentReplicationReaderManager from the given + * already-opened {@link OpenSearchDirectoryReader}, stealing + * the incoming reference. + * + * @param reader the SegmentReplicationReaderManager to use for future reopens + */ + NRTReplicationReaderManager(OpenSearchDirectoryReader reader) { + super(reader); + currentInfos = unwrapStandardReader(reader).getSegmentInfos(); + } + + @Override + protected OpenSearchDirectoryReader refreshIfNeeded(OpenSearchDirectoryReader referenceToRefresh) throws IOException { + Objects.requireNonNull(referenceToRefresh); + final List subs = new ArrayList<>(); + final StandardDirectoryReader standardDirectoryReader = unwrapStandardReader(referenceToRefresh); + for (LeafReaderContext ctx : standardDirectoryReader.leaves()) { + subs.add(ctx.reader()); + } + DirectoryReader innerReader = StandardDirectoryReader.open(referenceToRefresh.directory(), currentInfos, subs, null); + final DirectoryReader softDeletesDirectoryReaderWrapper = new SoftDeletesDirectoryReaderWrapper( + innerReader, + Lucene.SOFT_DELETES_FIELD + ); + logger.trace( + () -> new ParameterizedMessage("updated to SegmentInfosVersion=" + currentInfos.getVersion() + " reader=" + innerReader) + ); + return OpenSearchDirectoryReader.wrap(softDeletesDirectoryReaderWrapper, referenceToRefresh.shardId()); + } + + /** + * Update this reader's segments and refresh. + * + * @param infos {@link SegmentInfos} infos + * @throws IOException - When Refresh fails with an IOException. + */ + public synchronized void updateSegments(SegmentInfos infos) throws IOException { + currentInfos = infos; + maybeRefresh(); + } + + public SegmentInfos getSegmentInfos() { + return currentInfos; + } + + private StandardDirectoryReader unwrapStandardReader(OpenSearchDirectoryReader reader) { + final DirectoryReader delegate = reader.getDelegate(); + if (delegate instanceof SoftDeletesDirectoryReaderWrapper) { + return (StandardDirectoryReader) ((SoftDeletesDirectoryReaderWrapper) delegate).getDelegate(); + } + return (StandardDirectoryReader) delegate; + } +} diff --git a/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java index 2e3155a4d173e..23a86d8da5599 100644 --- a/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java @@ -270,6 +270,11 @@ protected SegmentInfos getLastCommittedSegmentInfos() { return lastCommittedSegmentInfos; } + @Override + protected SegmentInfos getLatestSegmentInfos() { + return lastCommittedSegmentInfos; + } + @Override public String getHistoryUUID() { return lastCommittedSegmentInfos.userData.get(Engine.HISTORY_UUID_KEY); diff --git a/server/src/main/java/org/opensearch/index/seqno/LocalCheckpointTracker.java b/server/src/main/java/org/opensearch/index/seqno/LocalCheckpointTracker.java index dbcc5e2190006..d75893080c0d7 100644 --- a/server/src/main/java/org/opensearch/index/seqno/LocalCheckpointTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/LocalCheckpointTracker.java @@ -156,7 +156,7 @@ public synchronized void markSeqNoAsPersisted(final long seqNo) { public synchronized void fastForwardProcessedSeqNo(final long seqNo) { advanceMaxSeqNo(seqNo); final long currentProcessedCheckpoint = processedCheckpoint.get(); - if (shouldUpdateSeqNo(seqNo, currentProcessedCheckpoint, persistedCheckpoint) == false) { + if (seqNo <= currentProcessedCheckpoint) { return; } processedCheckpoint.compareAndSet(currentProcessedCheckpoint, seqNo); diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 60a3305370c2a..995a92e94aeb3 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -3160,7 +3160,8 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) { globalCheckpointSupplier, replicationTracker::getRetentionLeases, () -> getOperationPrimaryTerm(), - tombstoneDocSupplier() + tombstoneDocSupplier(), + indexSettings.isSegRepEnabled() && shardRouting.primary() == false ); } diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 5ce10069aaa89..79fd2893fb78c 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -109,6 +109,7 @@ import org.opensearch.index.engine.EngineConfigFactory; import org.opensearch.index.engine.EngineFactory; import org.opensearch.index.engine.InternalEngineFactory; +import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.engine.NoOpEngine; import org.opensearch.index.fielddata.IndexFieldDataCache; import org.opensearch.index.flush.FlushStats; @@ -764,6 +765,9 @@ private EngineFactory getEngineFactory(final IndexSettings idxSettings) { .filter(maybe -> Objects.requireNonNull(maybe).isPresent()) .collect(Collectors.toList()); if (engineFactories.isEmpty()) { + if (idxSettings.isSegRepEnabled()) { + return new NRTReplicationEngineFactory(); + } return new InternalEngineFactory(); } else if (engineFactories.size() == 1) { assert engineFactories.get(0).isPresent(); diff --git a/server/src/test/java/org/opensearch/index/engine/EngineConfigFactoryTests.java b/server/src/test/java/org/opensearch/index/engine/EngineConfigFactoryTests.java index 8030619500278..7ddd92ea7b36e 100644 --- a/server/src/test/java/org/opensearch/index/engine/EngineConfigFactoryTests.java +++ b/server/src/test/java/org/opensearch/index/engine/EngineConfigFactoryTests.java @@ -65,7 +65,8 @@ public void testCreateEngineConfigFromFactory() { null, () -> new RetentionLeases(0, 0, Collections.emptyList()), null, - null + null, + false ); assertNotNull(config.getCodec()); @@ -141,7 +142,8 @@ public void testCreateCodecServiceFromFactory() { null, () -> new RetentionLeases(0, 0, Collections.emptyList()), null, - null + null, + false ); assertNotNull(config.getCodec()); } diff --git a/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java new file mode 100644 index 0000000000000..6aa00bb9312dd --- /dev/null +++ b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java @@ -0,0 +1,239 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.engine; + +import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.index.SegmentInfos; +import org.hamcrest.MatcherAssert; +import org.opensearch.common.concurrent.GatedCloseable; +import org.opensearch.common.lucene.Lucene; +import org.opensearch.common.lucene.search.Queries; +import org.opensearch.index.mapper.ParsedDocument; +import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.index.store.Store; +import org.opensearch.index.translog.TestTranslog; +import org.opensearch.index.translog.Translog; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.List; +import java.util.Set; +import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; +import static org.opensearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; + +public class NRTReplicationEngineTests extends EngineTestCase { + + public void testCreateEngine() throws IOException { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + try ( + final Store nrtEngineStore = createStore(); + final NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore); + ) { + final SegmentInfos latestSegmentInfos = nrtEngine.getLatestSegmentInfos(); + final SegmentInfos lastCommittedSegmentInfos = nrtEngine.getLastCommittedSegmentInfos(); + assertEquals(latestSegmentInfos.version, lastCommittedSegmentInfos.version); + assertEquals(latestSegmentInfos.getGeneration(), lastCommittedSegmentInfos.getGeneration()); + assertEquals(latestSegmentInfos.getUserData(), lastCommittedSegmentInfos.getUserData()); + assertEquals(latestSegmentInfos.files(true), lastCommittedSegmentInfos.files(true)); + + assertTrue(nrtEngine.segments(true).isEmpty()); + + try (final GatedCloseable indexCommitGatedCloseable = nrtEngine.acquireLastIndexCommit(false)) { + final IndexCommit indexCommit = indexCommitGatedCloseable.get(); + assertEquals(indexCommit.getUserData(), lastCommittedSegmentInfos.getUserData()); + assertTrue(indexCommit.getFileNames().containsAll(lastCommittedSegmentInfos.files(true))); + } + } + } + + public void testEngineWritesOpsToTranslog() throws Exception { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + + try ( + final Store nrtEngineStore = createStore(); + final NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore); + ) { + List operations = generateHistoryOnReplica( + between(1, 500), + randomBoolean(), + randomBoolean(), + randomBoolean() + ); + for (Engine.Operation op : operations) { + applyOperation(engine, op); + applyOperation(nrtEngine, op); + } + + assertEquals(nrtEngine.getTranslogLastWriteLocation(), engine.getTranslogLastWriteLocation()); + assertEquals(nrtEngine.getLastSyncedGlobalCheckpoint(), engine.getLastSyncedGlobalCheckpoint()); + + // we don't index into nrtEngine, so get the doc ids from the regular engine. + final List docs = getDocIds(engine, true); + + // recover a new engine from the nrtEngine's xlog. + nrtEngine.syncTranslog(); + try (InternalEngine engine = new InternalEngine(nrtEngine.config())) { + engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); + assertEquals(getDocIds(engine, true), docs); + } + assertEngineCleanedUp(nrtEngine, nrtEngine.getTranslog()); + } + } + + public void testUpdateSegments() throws Exception { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + + try ( + final Store nrtEngineStore = createStore(); + final NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore); + ) { + // add docs to the primary engine. + List operations = generateHistoryOnReplica(between(1, 500), randomBoolean(), randomBoolean(), randomBoolean()) + .stream() + .filter(op -> op.operationType().equals(Engine.Operation.TYPE.INDEX)) + .collect(Collectors.toList()); + for (Engine.Operation op : operations) { + applyOperation(engine, op); + applyOperation(nrtEngine, op); + } + + engine.refresh("test"); + + nrtEngine.updateSegments(engine.getLatestSegmentInfos(), engine.getProcessedLocalCheckpoint()); + assertMatchingSegmentsAndCheckpoints(nrtEngine); + + // assert a doc from the operations exists. + final ParsedDocument parsedDoc = createParsedDoc(operations.stream().findFirst().get().id(), null); + try (Engine.GetResult getResult = engine.get(newGet(true, parsedDoc), engine::acquireSearcher)) { + assertThat(getResult.exists(), equalTo(true)); + assertThat(getResult.docIdAndVersion(), notNullValue()); + } + + try (Engine.GetResult getResult = nrtEngine.get(newGet(true, parsedDoc), nrtEngine::acquireSearcher)) { + assertThat(getResult.exists(), equalTo(true)); + assertThat(getResult.docIdAndVersion(), notNullValue()); + } + + // Flush the primary and update the NRTEngine with the latest committed infos. + engine.flush(); + nrtEngine.syncTranslog(); // to advance persisted checkpoint + + Set seqNos = operations.stream().map(Engine.Operation::seqNo).collect(Collectors.toSet()); + + try (Translog.Snapshot snapshot = nrtEngine.getTranslog().newSnapshot()) { + assertThat(snapshot.totalOperations(), equalTo(operations.size())); + assertThat( + TestTranslog.drainSnapshot(snapshot, false).stream().map(Translog.Operation::seqNo).collect(Collectors.toSet()), + equalTo(seqNos) + ); + } + + nrtEngine.updateSegments(engine.getLastCommittedSegmentInfos(), engine.getProcessedLocalCheckpoint()); + assertMatchingSegmentsAndCheckpoints(nrtEngine); + + assertEquals( + nrtEngine.getTranslog().getGeneration().translogFileGeneration, + engine.getTranslog().getGeneration().translogFileGeneration + ); + + try (Translog.Snapshot snapshot = nrtEngine.getTranslog().newSnapshot()) { + assertThat(snapshot.totalOperations(), equalTo(operations.size())); + assertThat( + TestTranslog.drainSnapshot(snapshot, false).stream().map(Translog.Operation::seqNo).collect(Collectors.toSet()), + equalTo(seqNos) + ); + } + + // Ensure the same hit count between engines. + int expectedDocCount; + try (final Engine.Searcher test = engine.acquireSearcher("test")) { + expectedDocCount = test.count(Queries.newMatchAllQuery()); + assertSearcherHits(nrtEngine, expectedDocCount); + } + assertEngineCleanedUp(nrtEngine, nrtEngine.getTranslog()); + } + } + + public void testTrimTranslogOps() throws Exception { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + + try ( + final Store nrtEngineStore = createStore(); + final NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore); + ) { + List operations = generateHistoryOnReplica( + between(1, 100), + randomBoolean(), + randomBoolean(), + randomBoolean() + ); + applyOperations(nrtEngine, operations); + Set seqNos = operations.stream().map(Engine.Operation::seqNo).collect(Collectors.toSet()); + try (Translog.Snapshot snapshot = nrtEngine.getTranslog().newSnapshot()) { + assertThat(snapshot.totalOperations(), equalTo(operations.size())); + assertThat( + TestTranslog.drainSnapshot(snapshot, false).stream().map(Translog.Operation::seqNo).collect(Collectors.toSet()), + equalTo(seqNos) + ); + } + nrtEngine.rollTranslogGeneration(); + nrtEngine.trimOperationsFromTranslog(primaryTerm.get(), NO_OPS_PERFORMED); + try (Translog.Snapshot snapshot = getTranslog(engine).newSnapshot()) { + assertThat(snapshot.totalOperations(), equalTo(0)); + assertNull(snapshot.next()); + } + } + } + + private void assertMatchingSegmentsAndCheckpoints(NRTReplicationEngine nrtEngine) throws IOException { + assertEquals(engine.getPersistedLocalCheckpoint(), nrtEngine.getPersistedLocalCheckpoint()); + assertEquals(engine.getProcessedLocalCheckpoint(), nrtEngine.getProcessedLocalCheckpoint()); + assertEquals(engine.getLocalCheckpointTracker().getMaxSeqNo(), nrtEngine.getLocalCheckpointTracker().getMaxSeqNo()); + assertEquals(engine.getLatestSegmentInfos().files(true), nrtEngine.getLatestSegmentInfos().files(true)); + assertEquals(engine.getLatestSegmentInfos().getUserData(), nrtEngine.getLatestSegmentInfos().getUserData()); + assertEquals(engine.getLatestSegmentInfos().getVersion(), nrtEngine.getLatestSegmentInfos().getVersion()); + assertEquals(engine.segments(true), nrtEngine.segments(true)); + } + + private void assertSearcherHits(Engine engine, int hits) { + try (final Engine.Searcher test = engine.acquireSearcher("test")) { + MatcherAssert.assertThat(test, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(hits)); + } + } + + private NRTReplicationEngine buildNrtReplicaEngine(AtomicLong globalCheckpoint, Store store) throws IOException { + Lucene.cleanLuceneIndex(store.directory()); + final Path translogDir = createTempDir(); + final EngineConfig replicaConfig = config( + defaultSettings, + store, + translogDir, + NoMergePolicy.INSTANCE, + null, + null, + globalCheckpoint::get + ); + if (Lucene.indexExists(store.directory()) == false) { + store.createEmpty(replicaConfig.getIndexSettings().getIndexVersionCreated().luceneVersion); + final String translogUuid = Translog.createEmptyTranslog( + replicaConfig.getTranslogConfig().getTranslogPath(), + SequenceNumbers.NO_OPS_PERFORMED, + shardId, + primaryTerm.get() + ); + store.associateIndexWithNewTranslog(translogUuid); + } + return new NRTReplicationEngine(replicaConfig); + } +} diff --git a/server/src/test/java/org/opensearch/index/seqno/LocalCheckpointTrackerTests.java b/server/src/test/java/org/opensearch/index/seqno/LocalCheckpointTrackerTests.java index 237066e549b09..3a450e1f72a8d 100644 --- a/server/src/test/java/org/opensearch/index/seqno/LocalCheckpointTrackerTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/LocalCheckpointTrackerTests.java @@ -332,59 +332,22 @@ public void testContains() { assertThat(tracker.hasProcessed(seqNo), equalTo(seqNo <= localCheckpoint || seqNos.contains(seqNo))); } - public void testFastForwardProcessedNoPersistentUpdate() { + public void testFastForwardProcessedSeqNo() { // base case with no persistent checkpoint update long seqNo1; assertThat(tracker.getProcessedCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); seqNo1 = tracker.generateSeqNo(); assertThat(seqNo1, equalTo(0L)); tracker.fastForwardProcessedSeqNo(seqNo1); - assertThat(tracker.getProcessedCheckpoint(), equalTo(-1L)); - } + assertThat(tracker.getProcessedCheckpoint(), equalTo(seqNo1)); - public void testFastForwardProcessedPersistentUpdate() { - // base case with persistent checkpoint update - long seqNo1; - assertThat(tracker.getProcessedCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); - seqNo1 = tracker.generateSeqNo(); - assertThat(seqNo1, equalTo(0L)); - - tracker.markSeqNoAsPersisted(seqNo1); - assertThat(tracker.getPersistedCheckpoint(), equalTo(0L)); + // idempotent case tracker.fastForwardProcessedSeqNo(seqNo1); assertThat(tracker.getProcessedCheckpoint(), equalTo(0L)); assertThat(tracker.hasProcessed(0L), equalTo(true)); - assertThat(tracker.hasProcessed(atLeast(1)), equalTo(false)); - // idempotent case - tracker.fastForwardProcessedSeqNo(seqNo1); + tracker.fastForwardProcessedSeqNo(-1); assertThat(tracker.getProcessedCheckpoint(), equalTo(0L)); assertThat(tracker.hasProcessed(0L), equalTo(true)); - assertThat(tracker.hasProcessed(atLeast(1)), equalTo(false)); - - } - - public void testFastForwardProcessedPersistentUpdate2() { - long seqNo1, seqNo2; - assertThat(tracker.getProcessedCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); - seqNo1 = tracker.generateSeqNo(); - seqNo2 = tracker.generateSeqNo(); - assertThat(seqNo1, equalTo(0L)); - assertThat(seqNo2, equalTo(1L)); - tracker.markSeqNoAsPersisted(seqNo1); - tracker.markSeqNoAsPersisted(seqNo2); - assertThat(tracker.getProcessedCheckpoint(), equalTo(-1L)); - assertThat(tracker.getPersistedCheckpoint(), equalTo(1L)); - - tracker.fastForwardProcessedSeqNo(seqNo2); - assertThat(tracker.getProcessedCheckpoint(), equalTo(1L)); - assertThat(tracker.hasProcessed(seqNo1), equalTo(true)); - assertThat(tracker.hasProcessed(seqNo2), equalTo(true)); - - tracker.fastForwardProcessedSeqNo(seqNo1); - assertThat(tracker.getProcessedCheckpoint(), equalTo(1L)); - assertThat(tracker.hasProcessed(between(0, 1)), equalTo(true)); - assertThat(tracker.hasProcessed(atLeast(2)), equalTo(false)); - assertThat(tracker.getMaxSeqNo(), equalTo(1L)); } } diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index bf9671964a210..49d0c089f072b 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -101,6 +101,8 @@ import org.opensearch.index.engine.EngineTestCase; import org.opensearch.index.engine.InternalEngine; import org.opensearch.index.engine.InternalEngineFactory; +import org.opensearch.index.engine.NRTReplicationEngineFactory; +import org.opensearch.index.engine.NRTReplicationEngine; import org.opensearch.index.engine.ReadOnlyEngine; import org.opensearch.index.fielddata.FieldDataStats; import org.opensearch.index.fielddata.IndexFieldData; @@ -136,6 +138,7 @@ import org.opensearch.indices.recovery.RecoveryTarget; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.common.ReplicationLuceneIndex; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.repositories.IndexId; import org.opensearch.snapshots.Snapshot; import org.opensearch.snapshots.SnapshotId; @@ -4167,14 +4170,14 @@ public void testSnapshotWhileResettingEngine() throws Exception { @Override public InternalEngine recoverFromTranslog(TranslogRecoveryRunner translogRecoveryRunner, long recoverUpToSeqNo) throws IOException { - InternalEngine internalEngine = super.recoverFromTranslog(translogRecoveryRunner, recoverUpToSeqNo); + InternalEngine engine = super.recoverFromTranslog(translogRecoveryRunner, recoverUpToSeqNo); readyToSnapshotLatch.countDown(); try { snapshotDoneLatch.await(); } catch (InterruptedException e) { throw new AssertionError(e); } - return internalEngine; + return engine; } }); @@ -4447,6 +4450,27 @@ protected void ensureMaxSeqNoEqualsToGlobalCheckpoint(SeqNoStats seqNoStats) { closeShards(readonlyShard); } + public void testReadOnlyReplicaEngineConfig() throws IOException { + Settings primarySettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .build(); + final IndexShard primaryShard = newStartedShard(false, primarySettings, new NRTReplicationEngineFactory()); + assertFalse(primaryShard.getEngine().config().isReadOnlyReplica()); + assertEquals(primaryShard.getEngine().getClass(), InternalEngine.class); + + Settings replicaSettings = Settings.builder() + .put(primarySettings) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build(); + final IndexShard replicaShard = newStartedShard(false, replicaSettings, new NRTReplicationEngineFactory()); + assertTrue(replicaShard.getEngine().config().isReadOnlyReplica()); + assertEquals(replicaShard.getEngine().getClass(), NRTReplicationEngine.class); + + closeShards(primaryShard, replicaShard); + } + public void testCloseShardWhileEngineIsWarming() throws Exception { CountDownLatch warmerStarted = new CountDownLatch(1); CountDownLatch warmerBlocking = new CountDownLatch(1); diff --git a/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java index 2bce5a7c81794..66c697d83510b 100644 --- a/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java @@ -328,24 +328,26 @@ public void tearDown() throws Exception { super.tearDown(); try { if (engine != null && engine.isClosed.get() == false) { - engine.getTranslog().getDeletionPolicy().assertNoOpenTranslogRefs(); - assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine); - assertNoInFlightDocuments(engine); - assertMaxSeqNoInCommitUserData(engine); - assertAtMostOneLuceneDocumentPerSequenceNumber(engine); + assertEngineCleanedUp(engine, engine.getTranslog()); } if (replicaEngine != null && replicaEngine.isClosed.get() == false) { - replicaEngine.getTranslog().getDeletionPolicy().assertNoOpenTranslogRefs(); - assertConsistentHistoryBetweenTranslogAndLuceneIndex(replicaEngine); - assertNoInFlightDocuments(replicaEngine); - assertMaxSeqNoInCommitUserData(replicaEngine); - assertAtMostOneLuceneDocumentPerSequenceNumber(replicaEngine); + assertEngineCleanedUp(replicaEngine, replicaEngine.getTranslog()); } } finally { IOUtils.close(replicaEngine, storeReplica, engine, store, () -> terminate(threadPool)); } } + protected void assertEngineCleanedUp(Engine engine, Translog translog) throws Exception { + if (engine.isClosed.get() == false) { + translog.getDeletionPolicy().assertNoOpenTranslogRefs(); + assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine); + assertNoInFlightDocuments(engine); + assertMaxSeqNoInCommitUserData(engine); + assertAtMostOneLuceneDocumentPerSequenceNumber(engine); + } + } + protected static ParseContext.Document testDocumentWithTextField() { return testDocumentWithTextField("test"); } From 1d9e369417d73352c97ed8a776b62134f9699452 Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Tue, 24 May 2022 23:00:18 -0700 Subject: [PATCH 27/75] Rename master to cluster_manager in the XContent Parser of ClusterHealthResponse (#3432) Signed-off-by: Tianli Feng --- .../cluster/health/ClusterHealthResponse.java | 5 ++- .../cluster/health/ClusterStateHealth.java | 24 +++++----- .../health/ClusterHealthResponsesTests.java | 44 +++++++++++++++++-- 3 files changed, 57 insertions(+), 16 deletions(-) diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java index e4ec75fb7045a..a67ef721879ce 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java @@ -71,6 +71,7 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo private static final String TIMED_OUT = "timed_out"; private static final String NUMBER_OF_NODES = "number_of_nodes"; private static final String NUMBER_OF_DATA_NODES = "number_of_data_nodes"; + @Deprecated private static final String DISCOVERED_MASTER = "discovered_master"; private static final String DISCOVERED_CLUSTER_MANAGER = "discovered_cluster_manager"; private static final String NUMBER_OF_PENDING_TASKS = "number_of_pending_tasks"; @@ -95,6 +96,7 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo // ClusterStateHealth fields int numberOfNodes = (int) parsedObjects[i++]; int numberOfDataNodes = (int) parsedObjects[i++]; + boolean hasDiscoveredMaster = Boolean.TRUE.equals(parsedObjects[i++]); boolean hasDiscoveredClusterManager = Boolean.TRUE.equals(parsedObjects[i++]); int activeShards = (int) parsedObjects[i++]; int relocatingShards = (int) parsedObjects[i++]; @@ -123,7 +125,7 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo unassignedShards, numberOfNodes, numberOfDataNodes, - hasDiscoveredClusterManager, + hasDiscoveredClusterManager || hasDiscoveredMaster, activeShardsPercent, status, indices @@ -157,6 +159,7 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo PARSER.declareInt(constructorArg(), new ParseField(NUMBER_OF_NODES)); PARSER.declareInt(constructorArg(), new ParseField(NUMBER_OF_DATA_NODES)); PARSER.declareBoolean(optionalConstructorArg(), new ParseField(DISCOVERED_MASTER)); + PARSER.declareBoolean(optionalConstructorArg(), new ParseField(DISCOVERED_CLUSTER_MANAGER)); PARSER.declareInt(constructorArg(), new ParseField(ACTIVE_SHARDS)); PARSER.declareInt(constructorArg(), new ParseField(RELOCATING_SHARDS)); PARSER.declareInt(constructorArg(), new ParseField(ACTIVE_PRIMARY_SHARDS)); diff --git a/server/src/main/java/org/opensearch/cluster/health/ClusterStateHealth.java b/server/src/main/java/org/opensearch/cluster/health/ClusterStateHealth.java index 4c8be0f2d73f0..f1fe680f80769 100644 --- a/server/src/main/java/org/opensearch/cluster/health/ClusterStateHealth.java +++ b/server/src/main/java/org/opensearch/cluster/health/ClusterStateHealth.java @@ -58,7 +58,7 @@ public final class ClusterStateHealth implements Iterable, W private final int numberOfNodes; private final int numberOfDataNodes; - private final boolean hasDiscoveredMaster; + private final boolean hasDiscoveredClusterManager; private final int activeShards; private final int relocatingShards; private final int activePrimaryShards; @@ -86,7 +86,7 @@ public ClusterStateHealth(final ClusterState clusterState) { public ClusterStateHealth(final ClusterState clusterState, final String[] concreteIndices) { numberOfNodes = clusterState.nodes().getSize(); numberOfDataNodes = clusterState.nodes().getDataNodes().size(); - hasDiscoveredMaster = clusterState.nodes().getMasterNodeId() != null; + hasDiscoveredClusterManager = clusterState.nodes().getMasterNodeId() != null; indices = new HashMap<>(); for (String index : concreteIndices) { IndexRoutingTable indexRoutingTable = clusterState.routingTable().index(index); @@ -155,9 +155,9 @@ public ClusterStateHealth(final StreamInput in) throws IOException { numberOfNodes = in.readVInt(); numberOfDataNodes = in.readVInt(); if (in.getVersion().onOrAfter(Version.V_1_0_0)) { - hasDiscoveredMaster = in.readBoolean(); + hasDiscoveredClusterManager = in.readBoolean(); } else { - hasDiscoveredMaster = true; + hasDiscoveredClusterManager = true; } status = ClusterHealthStatus.fromValue(in.readByte()); int size = in.readVInt(); @@ -180,7 +180,7 @@ public ClusterStateHealth( int unassignedShards, int numberOfNodes, int numberOfDataNodes, - boolean hasDiscoveredMaster, + boolean hasDiscoveredClusterManager, double activeShardsPercent, ClusterHealthStatus status, Map indices @@ -192,7 +192,7 @@ public ClusterStateHealth( this.unassignedShards = unassignedShards; this.numberOfNodes = numberOfNodes; this.numberOfDataNodes = numberOfDataNodes; - this.hasDiscoveredMaster = hasDiscoveredMaster; + this.hasDiscoveredClusterManager = hasDiscoveredClusterManager; this.activeShardsPercent = activeShardsPercent; this.status = status; this.indices = indices; @@ -239,7 +239,7 @@ public double getActiveShardsPercent() { } public boolean hasDiscoveredMaster() { - return hasDiscoveredMaster; + return hasDiscoveredClusterManager; } @Override @@ -257,7 +257,7 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeVInt(numberOfNodes); out.writeVInt(numberOfDataNodes); if (out.getVersion().onOrAfter(Version.V_1_0_0)) { - out.writeBoolean(hasDiscoveredMaster); + out.writeBoolean(hasDiscoveredClusterManager); } out.writeByte(status.value()); out.writeVInt(indices.size()); @@ -274,8 +274,8 @@ public String toString() { + numberOfNodes + ", numberOfDataNodes=" + numberOfDataNodes - + ", hasDiscoveredMaster=" - + hasDiscoveredMaster + + ", hasDiscoveredClusterManager=" + + hasDiscoveredClusterManager + ", activeShards=" + activeShards + ", relocatingShards=" @@ -302,7 +302,7 @@ public boolean equals(Object o) { ClusterStateHealth that = (ClusterStateHealth) o; return numberOfNodes == that.numberOfNodes && numberOfDataNodes == that.numberOfDataNodes - && hasDiscoveredMaster == that.hasDiscoveredMaster + && hasDiscoveredClusterManager == that.hasDiscoveredClusterManager && activeShards == that.activeShards && relocatingShards == that.relocatingShards && activePrimaryShards == that.activePrimaryShards @@ -318,7 +318,7 @@ public int hashCode() { return Objects.hash( numberOfNodes, numberOfDataNodes, - hasDiscoveredMaster, + hasDiscoveredClusterManager, activeShards, relocatingShards, activePrimaryShards, diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponsesTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponsesTests.java index 3db20fd3404a7..844dfe9c6c00f 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponsesTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponsesTests.java @@ -157,13 +157,13 @@ ClusterHealthResponse maybeSerialize(ClusterHealthResponse clusterHealth) throws return clusterHealth; } - public void testParseFromXContentWithDiscoveredMasterField() throws IOException { + public void testParseFromXContentWithDiscoveredClusterManagerField() throws IOException { try ( XContentParser parser = JsonXContent.jsonXContent.createParser( NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, "{\"cluster_name\":\"535799904437:7-1-3-node\",\"status\":\"green\"," - + "\"timed_out\":false,\"number_of_nodes\":6,\"number_of_data_nodes\":3,\"discovered_master\":true," + + "\"timed_out\":false,\"number_of_nodes\":6,\"number_of_data_nodes\":3,\"discovered_cluster_manager\":true," + "\"active_primary_shards\":4,\"active_shards\":5,\"relocating_shards\":0,\"initializing_shards\":0," + "\"unassigned_shards\":0,\"delayed_unassigned_shards\":0,\"number_of_pending_tasks\":0," + "\"number_of_in_flight_fetch\":0,\"task_max_waiting_in_queue_millis\":0," @@ -179,7 +179,7 @@ public void testParseFromXContentWithDiscoveredMasterField() throws IOException } } - public void testParseFromXContentWithoutDiscoveredMasterField() throws IOException { + public void testParseFromXContentWithoutDiscoveredClusterManagerField() throws IOException { try ( XContentParser parser = JsonXContent.jsonXContent.createParser( NamedXContentRegistry.EMPTY, @@ -200,6 +200,44 @@ public void testParseFromXContentWithoutDiscoveredMasterField() throws IOExcepti } } + /** + * Validate the ClusterHealthResponse can be parsed from JsonXContent that contains the deprecated "discovered_master" field. + * As of 2.0, to support inclusive language, "discovered_master" field will be replaced by "discovered_cluster_manager". + */ + public void testParseFromXContentWithDeprecatedDiscoveredMasterField() throws IOException { + try ( + XContentParser parser = JsonXContent.jsonXContent.createParser( + NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + "{\"cluster_name\":\"opensearch-cluster\",\"status\":\"green\",\"timed_out\":false," + + "\"number_of_nodes\":6,\"number_of_data_nodes\":3,\"discovered_cluster_manager\":true,\"discovered_master\":true," + + "\"active_primary_shards\":4,\"active_shards\":5,\"relocating_shards\":0,\"initializing_shards\":0," + + "\"unassigned_shards\":0,\"delayed_unassigned_shards\":0,\"number_of_pending_tasks\":0," + + "\"number_of_in_flight_fetch\":0,\"task_max_waiting_in_queue_millis\":0," + + "\"active_shards_percent_as_number\":100}" + ) + ) { + ClusterHealthResponse clusterHealth = ClusterHealthResponse.fromXContent(parser); + assertThat(clusterHealth.hasDiscoveredMaster(), Matchers.equalTo(true)); + } + + try ( + XContentParser parser = JsonXContent.jsonXContent.createParser( + NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + "{\"cluster_name\":\"opensearch-cluster\",\"status\":\"green\"," + + "\"timed_out\":false,\"number_of_nodes\":6,\"number_of_data_nodes\":3,\"discovered_master\":true," + + "\"active_primary_shards\":4,\"active_shards\":5,\"relocating_shards\":0,\"initializing_shards\":0," + + "\"unassigned_shards\":0,\"delayed_unassigned_shards\":0,\"number_of_pending_tasks\":0," + + "\"number_of_in_flight_fetch\":0,\"task_max_waiting_in_queue_millis\":0," + + "\"active_shards_percent_as_number\":100}" + ) + ) { + ClusterHealthResponse clusterHealth = ClusterHealthResponse.fromXContent(parser); + assertThat(clusterHealth.hasDiscoveredMaster(), Matchers.equalTo(true)); + } + } + @Override protected ClusterHealthResponse doParseInstance(XContentParser parser) { return ClusterHealthResponse.fromXContent(parser); From 6251f27f8b56c43da642e4e2d565cfc08180e31f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 25 May 2022 10:20:52 -0700 Subject: [PATCH 28/75] Bump hadoop-minicluster in /test/fixtures/hdfs-fixture (#3359) Bumps hadoop-minicluster from 3.3.2 to 3.3.3. --- updated-dependencies: - dependency-name: org.apache.hadoop:hadoop-minicluster dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- test/fixtures/hdfs-fixture/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 2ff444c03b123..0795cecaa36cc 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -33,7 +33,7 @@ apply plugin: 'opensearch.java' group = 'hdfs' dependencies { - api "org.apache.hadoop:hadoop-minicluster:3.3.2" + api "org.apache.hadoop:hadoop-minicluster:3.3.3" api "org.apache.commons:commons-compress:1.21" api "commons-codec:commons-codec:${versions.commonscodec}" api "org.apache.logging.log4j:log4j-core:${versions.log4j}" From d8a64e7ec69a8756fba403afb06771d498600446 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 25 May 2022 10:21:20 -0700 Subject: [PATCH 29/75] Bump avro from 1.10.2 to 1.11.0 in /plugins/repository-hdfs (#3358) * Bump avro from 1.10.2 to 1.11.0 in /plugins/repository-hdfs Bumps avro from 1.10.2 to 1.11.0. --- updated-dependencies: - dependency-name: org.apache.avro:avro dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- plugins/repository-hdfs/build.gradle | 2 +- plugins/repository-hdfs/licenses/avro-1.10.2.jar.sha1 | 1 - plugins/repository-hdfs/licenses/avro-1.11.0.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-hdfs/licenses/avro-1.10.2.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/avro-1.11.0.jar.sha1 diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index cf51daec2b740..41c38b0b4e558 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -63,7 +63,7 @@ dependencies { api "org.apache.hadoop:hadoop-hdfs:${versions.hadoop3}" api 'org.apache.htrace:htrace-core4:4.2.0-incubating' api "org.apache.logging.log4j:log4j-core:${versions.log4j}" - api 'org.apache.avro:avro:1.10.2' + api 'org.apache.avro:avro:1.11.0' api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api 'com.google.code.gson:gson:2.9.0' runtimeOnly 'com.google.guava:guava:31.1-jre' diff --git a/plugins/repository-hdfs/licenses/avro-1.10.2.jar.sha1 b/plugins/repository-hdfs/licenses/avro-1.10.2.jar.sha1 deleted file mode 100644 index eae1c5116ff0f..0000000000000 --- a/plugins/repository-hdfs/licenses/avro-1.10.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a65aaa91c1aeceb3dd4859dbb9765d1c2063f5a2 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/avro-1.11.0.jar.sha1 b/plugins/repository-hdfs/licenses/avro-1.11.0.jar.sha1 new file mode 100644 index 0000000000000..9a0601879a1fc --- /dev/null +++ b/plugins/repository-hdfs/licenses/avro-1.11.0.jar.sha1 @@ -0,0 +1 @@ +2b0c58e5b450d4f4931456952ad9520cae9c896c \ No newline at end of file From 12bf60fbd7f44258d66ce9a4420c8b554ef4bfb1 Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Wed, 25 May 2022 16:27:17 -0700 Subject: [PATCH 30/75] Fix testSetAdditionalRolesCanAddDeprecatedMasterRole() by removing the initial assertion (#3441) Signed-off-by: Tianli Feng --- .../java/org/opensearch/cluster/node/DiscoveryNodeTests.java | 3 --- 1 file changed, 3 deletions(-) diff --git a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java index 3a058a282be9c..1b7f698ae1f5c 100644 --- a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java +++ b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java @@ -179,9 +179,6 @@ public void testDiscoveryNodeIsRemoteClusterClientUnset() { // as a workaround for making the new CLUSTER_MANAGER_ROLE has got the same abbreviation 'm'. // The test validate this behavior. public void testSetAdditionalRolesCanAddDeprecatedMasterRole() { - // Validate MASTER_ROLE is not in DiscoveryNodeRole.BUILT_IN_ROLES - assertFalse(DiscoveryNode.getPossibleRoleNames().contains(DiscoveryNodeRole.MASTER_ROLE.roleName())); - DiscoveryNode.setAdditionalRoles(Collections.emptySet()); assertTrue(DiscoveryNode.getPossibleRoleNames().contains(DiscoveryNodeRole.MASTER_ROLE.roleName())); } From 9c1645771869930339222e60aa992cbdb538a2ce Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Wed, 25 May 2022 16:44:23 -0700 Subject: [PATCH 31/75] Replace internal usages of 'master' term in 'server/src/test' directory (#2520) * Replace the non-inclusive terminology "master" with "cluster manager" in code comments, internal variable/method/class names, in `server/src/test` directory. * Backwards compatibility is not impacted. * Add a new unit test `testDeprecatedMasterNodeFilter()` to validate using `master:true` or `master:false` can filter the node in [Cluster Stats](https://opensearch.org/docs/latest/opensearch/rest-api/cluster-stats/) API, after the `master` role is deprecated in PR https://github.com/opensearch-project/OpenSearch/pull/2424 Signed-off-by: Tianli Feng --- .../discovery/ClusterDisruptionIT.java | 2 +- .../discovery/ClusterManagerDisruptionIT.java | 10 +- .../discovery/DiscoveryDisruptionIT.java | 8 +- ...tAddVotingConfigExclusionsActionTests.java | 8 +- .../health/ClusterHealthResponsesTests.java | 4 +- .../node/tasks/CancellableTasksTests.java | 4 +- .../node/tasks/TaskManagerTestCase.java | 4 +- .../reroute/ClusterRerouteRequestTests.java | 2 +- .../TransportMultiSearchActionTests.java | 2 +- .../TransportBroadcastByNodeActionTests.java | 34 +-- .../TransportMasterNodeActionTests.java | 55 +++-- .../TransportMasterNodeActionUtils.java | 8 +- .../cluster/ClusterChangedEventTests.java | 37 ++-- .../opensearch/cluster/ClusterStateTests.java | 41 ++-- ...rnalClusterInfoServiceSchedulingTests.java | 58 ++--- .../action/shard/ShardStateActionTests.java | 61 +++--- .../ClusterBootstrapServiceTests.java | 6 +- .../ClusterFormationFailureHelperTests.java | 28 ++- .../coordination/CoordinationStateTests.java | 6 +- .../coordination/CoordinatorTests.java | 42 ++-- .../coordination/FollowersCheckerTests.java | 2 +- .../coordination/JoinTaskExecutorTests.java | 15 +- .../NoMasterBlockServiceTests.java | 18 +- .../cluster/coordination/NodeJoinTests.java | 111 +++++----- .../coordination/PublicationTests.java | 2 +- .../coordination/ReconfiguratorTests.java | 14 +- .../health/ClusterHealthAllocationTests.java | 7 +- .../health/ClusterStateHealthTests.java | 6 +- .../metadata/AutoExpandReplicasTests.java | 4 +- .../cluster/node/DiscoveryNodesTests.java | 58 +++-- .../routing/BatchedRerouteServiceTests.java | 6 +- .../routing/OperationRoutingTests.java | 6 +- .../allocation/FailedNodeRoutingTests.java | 2 +- .../allocation/InSyncAllocationIdTests.java | 6 +- .../decider/DiskThresholdDeciderTests.java | 10 +- ...storeInProgressAllocationDeciderTests.java | 6 +- .../service/ClusterApplierServiceTests.java | 18 +- .../cluster/service/MasterServiceTests.java | 105 ++++----- .../discovery/AbstractDisruptionTestCase.java | 34 +-- .../discovery/DiscoveryModuleTests.java | 6 +- ...shakingTransportAddressConnectorTests.java | 4 +- .../opensearch/discovery/PeerFinderTests.java | 52 ++--- .../opensearch/env/NodeEnvironmentTests.java | 26 +-- .../env/NodeRepurposeCommandTests.java | 75 ++++--- .../gateway/AsyncShardFetchTests.java | 2 +- .../gateway/GatewayServiceTests.java | 4 +- .../IncrementalClusterStateWriterTests.java | 36 +-- .../index/seqno/ReplicationTrackerTests.java | 12 +- .../indices/IndicesServiceTests.java | 2 +- .../indices/cluster/ClusterStateChanges.java | 7 +- ...ClusterStateServiceRandomUpdatesTests.java | 14 +- .../PersistentTasksClusterServiceTests.java | 14 +- .../cluster/RestNodesInfoActionTests.java | 4 +- .../InternalSnapshotsInfoServiceTests.java | 10 +- .../snapshots/SnapshotResiliencyTests.java | 207 +++++++++--------- .../snapshots/SnapshotsServiceTests.java | 8 +- .../MockEventuallyConsistentRepository.java | 2 +- .../transport/RemoteClusterServiceTests.java | 6 +- .../SniffConnectionStrategyTests.java | 20 +- 59 files changed, 736 insertions(+), 625 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionIT.java index 7a8b6b447a68d..915aef5cb1d25 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterDisruptionIT.java @@ -386,7 +386,7 @@ public void onFailure(Exception e) { ); if (isolatedNode.equals(nonClusterManagerNode)) { - assertNoMaster(nonClusterManagerNode); + assertNoClusterManager(nonClusterManagerNode); } else { ensureStableCluster(2, nonClusterManagerNode); } diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java index 4515e0828be2e..61f50ace17b62 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/ClusterManagerDisruptionIT.java @@ -91,7 +91,7 @@ public void testClusterManagerNodeGCs() throws Exception { logger.info("waiting for nodes to de-elect cluster-manager [{}]", oldClusterManagerNode); for (String node : oldNonClusterManagerNodesSet) { - assertDifferentMaster(node, oldClusterManagerNode); + assertDifferentClusterManager(node, oldClusterManagerNode); } logger.info("waiting for nodes to elect a new cluster-manager"); @@ -107,7 +107,7 @@ public void testClusterManagerNodeGCs() throws Exception { // make sure all nodes agree on cluster-manager String newClusterManager = internalCluster().getMasterName(); assertThat(newClusterManager, not(equalTo(oldClusterManagerNode))); - assertMaster(newClusterManager, nodes); + assertClusterManager(newClusterManager, nodes); } /** @@ -137,7 +137,7 @@ public void testIsolateClusterManagerAndVerifyClusterStateConsensus() throws Exc ensureStableCluster(2, nonIsolatedNode); // make sure isolated need picks up on things. - assertNoMaster(isolatedNode, TimeValue.timeValueSeconds(40)); + assertNoClusterManager(isolatedNode, TimeValue.timeValueSeconds(40)); // restore isolation networkDisruption.stopDisrupting(); @@ -227,7 +227,7 @@ public void testVerifyApiBlocksDuringPartition() throws Exception { // continuously ping until network failures have been resolved. However // It may a take a bit before the node detects it has been cut off from the elected cluster-manager logger.info("waiting for isolated node [{}] to have no cluster-manager", isolatedNode); - assertNoMaster(isolatedNode, NoMasterBlockService.NO_MASTER_BLOCK_WRITES, TimeValue.timeValueSeconds(30)); + assertNoClusterManager(isolatedNode, NoMasterBlockService.NO_MASTER_BLOCK_WRITES, TimeValue.timeValueSeconds(30)); logger.info("wait until elected cluster-manager has been removed and a new 2 node cluster was from (via [{}])", isolatedNode); ensureStableCluster(2, nonIsolatedNode); @@ -273,7 +273,7 @@ public void testVerifyApiBlocksDuringPartition() throws Exception { // continuously ping until network failures have been resolved. However // It may a take a bit before the node detects it has been cut off from the elected cluster-manager logger.info("waiting for isolated node [{}] to have no cluster-manager", isolatedNode); - assertNoMaster(isolatedNode, NoMasterBlockService.NO_MASTER_BLOCK_ALL, TimeValue.timeValueSeconds(30)); + assertNoClusterManager(isolatedNode, NoMasterBlockService.NO_MASTER_BLOCK_ALL, TimeValue.timeValueSeconds(30)); // make sure we have stable cluster & cross partition recoveries are canceled by the removal of the missing node // the unresponsive partition causes recoveries to only time out after 15m (default) and these will cause diff --git a/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java index a4667d62a878c..c6e4d95449d42 100644 --- a/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/discovery/DiscoveryDisruptionIT.java @@ -81,7 +81,7 @@ public void testClusterJoinDespiteOfPublishingIssues() throws Exception { ); nonClusterManagerTransportService.addFailToSendNoConnectRule(clusterManagerTranspotService); - assertNoMaster(nonClusterManagerNode); + assertNoClusterManager(nonClusterManagerNode); logger.info( "blocking cluster state publishing from cluster-manager [{}] to non cluster-manager [{}]", @@ -166,7 +166,7 @@ public void testElectClusterManagerWithLatestVersion() throws Exception { logger.info("--> forcing a complete election to make sure \"preferred\" cluster-manager is elected"); isolateAllNodes.startDisrupting(); for (String node : nodes) { - assertNoMaster(node); + assertNoClusterManager(node); } internalCluster().clearDisruptionScheme(); ensureStableCluster(3); @@ -194,7 +194,7 @@ public void testElectClusterManagerWithLatestVersion() throws Exception { logger.info("--> forcing a complete election again"); isolateAllNodes.startDisrupting(); for (String node : nodes) { - assertNoMaster(node); + assertNoClusterManager(node); } isolateAllNodes.stopDisrupting(); @@ -242,7 +242,7 @@ public void testNodeNotReachableFromClusterManager() throws Exception { ensureStableCluster(2, clusterManagerNode); logger.info("waiting for [{}] to have no cluster-manager", nonClusterManagerNode); - assertNoMaster(nonClusterManagerNode); + assertNoClusterManager(nonClusterManagerNode); logger.info("healing partition and checking cluster reforms"); clusterManagerTransportService.clearAllRules(); diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java index bff0689a153b3..dfd6d059cc3a8 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsActionTests.java @@ -252,7 +252,7 @@ public void testWithdrawsVotesFromNodesMatchingWildcard() throws InterruptedExce assertWarnings(AddVotingConfigExclusionsRequest.DEPRECATION_MESSAGE); } - public void testWithdrawsVotesFromAllMasterEligibleNodes() throws InterruptedException { + public void testWithdrawsVotesFromAllClusterManagerEligibleNodes() throws InterruptedException { final CountDownLatch countDownLatch = new CountDownLatch(2); clusterStateObserver.waitForNextChange(new AdjustConfigurationForExclusions(countDownLatch)); @@ -349,14 +349,14 @@ public void testReturnsErrorIfNoMatchingNodeDescriptions() throws InterruptedExc assertWarnings(AddVotingConfigExclusionsRequest.DEPRECATION_MESSAGE); } - public void testOnlyMatchesMasterEligibleNodes() throws InterruptedException { + public void testOnlyMatchesClusterManagerEligibleNodes() throws InterruptedException { final CountDownLatch countDownLatch = new CountDownLatch(1); final SetOnce exceptionHolder = new SetOnce<>(); transportService.sendRequest( localNode, AddVotingConfigExclusionsAction.NAME, - makeRequestWithNodeDescriptions("_all", "master:false"), + makeRequestWithNodeDescriptions("_all", "cluster_manager:false"), expectError(e -> { exceptionHolder.set(e); countDownLatch.countDown(); @@ -368,7 +368,7 @@ public void testOnlyMatchesMasterEligibleNodes() throws InterruptedException { assertThat(rootCause, instanceOf(IllegalArgumentException.class)); assertThat( rootCause.getMessage(), - equalTo("add voting config exclusions request for [_all, master:false] matched no cluster-manager-eligible nodes") + equalTo("add voting config exclusions request for [_all, cluster_manager:false] matched no cluster-manager-eligible nodes") ); assertWarnings(AddVotingConfigExclusionsRequest.DEPRECATION_MESSAGE); } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponsesTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponsesTests.java index 844dfe9c6c00f..b33f5c7bd5bc7 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponsesTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponsesTests.java @@ -110,9 +110,9 @@ public void testClusterHealth() throws IOException { assertThat(clusterHealth.getActiveShardsPercent(), is(allOf(greaterThanOrEqualTo(0.0), lessThanOrEqualTo(100.0)))); } - public void testClusterHealthVerifyMasterNodeDiscovery() throws IOException { + public void testClusterHealthVerifyClusterManagerNodeDiscovery() throws IOException { DiscoveryNode localNode = new DiscoveryNode("node", OpenSearchTestCase.buildNewFakeTransportAddress(), Version.CURRENT); - // set the node information to verify master_node discovery in ClusterHealthResponse + // set the node information to verify cluster_manager_node discovery in ClusterHealthResponse ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).masterNodeId(localNode.getId())) .build(); diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksTests.java index d3be6170526fc..5b2b4f361083b 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksTests.java @@ -474,12 +474,12 @@ public void onFailure(Exception e) { for (int i = 1; i < testNodes.length; i++) { discoveryNodes[i - 1] = testNodes[i].discoveryNode(); } - DiscoveryNode master = discoveryNodes[0]; + DiscoveryNode clusterManager = discoveryNodes[0]; for (int i = 1; i < testNodes.length; i++) { // Notify only nodes that should remain in the cluster setState( testNodes[i].clusterService, - ClusterStateCreationUtils.state(testNodes[i].discoveryNode(), master, discoveryNodes) + ClusterStateCreationUtils.state(testNodes[i].discoveryNode(), clusterManager, discoveryNodes) ); } if (randomBoolean()) { diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java index c8411b31e0709..4383b21aa7e74 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java @@ -257,9 +257,9 @@ public static void connectNodes(TestNode... nodes) { for (int i = 0; i < nodes.length; i++) { discoveryNodes[i] = nodes[i].discoveryNode(); } - DiscoveryNode master = discoveryNodes[0]; + DiscoveryNode clusterManager = discoveryNodes[0]; for (TestNode node : nodes) { - setState(node.clusterService, ClusterStateCreationUtils.state(node.discoveryNode(), master, discoveryNodes)); + setState(node.clusterService, ClusterStateCreationUtils.state(node.discoveryNode(), clusterManager, discoveryNodes)); } for (TestNode nodeA : nodes) { for (TestNode nodeB : nodes) { diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java index 6f62883ff436c..d48eb1619d36c 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java @@ -161,7 +161,7 @@ public void testEqualsAndHashCode() { assertEquals(request, copy); assertEquals(request.hashCode(), copy.hashCode()); - // Changing masterNodeTime makes requests not equal + // Changing clusterManagerNodeTimeout makes requests not equal copy.masterNodeTimeout(timeValueMillis(request.masterNodeTimeout().millis() + 1)); assertNotEquals(request, copy); assertNotEquals(request.hashCode(), copy.hashCode()); diff --git a/server/src/test/java/org/opensearch/action/search/TransportMultiSearchActionTests.java b/server/src/test/java/org/opensearch/action/search/TransportMultiSearchActionTests.java index 09ab2438bd106..5fd5e7315e553 100644 --- a/server/src/test/java/org/opensearch/action/search/TransportMultiSearchActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/TransportMultiSearchActionTests.java @@ -260,7 +260,7 @@ public void testDefaultMaxConcurrentSearches() { } builder.add( new DiscoveryNode( - "master", + "cluster_manager", buildNewFakeTransportAddress(), Collections.emptyMap(), Collections.singleton(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE), diff --git a/server/src/test/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java b/server/src/test/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java index 2830a42dfae76..930fe4ad6049d 100644 --- a/server/src/test/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java @@ -366,17 +366,17 @@ public void testOneRequestIsSentToEachNodeHoldingAShard() { } } - // simulate the master being removed from the cluster but before a new master is elected - // as such, the shards assigned to the master will still show up in the cluster state as assigned to a node but - // that node will not be in the local cluster state on any node that has detected the master as failing + // simulate the cluster-manager being removed from the cluster but before a new cluster-manager is elected + // as such, the shards assigned to the cluster-manager will still show up in the cluster state as assigned to a node but + // that node will not be in the local cluster state on any node that has detected the cluster-manager as failing // in this case, such a shard should be treated as unassigned - public void testRequestsAreNotSentToFailedMaster() { + public void testRequestsAreNotSentToFailedClusterManager() { Request request = new Request(new String[] { TEST_INDEX }); PlainActionFuture listener = new PlainActionFuture<>(); - DiscoveryNode masterNode = clusterService.state().nodes().getMasterNode(); + DiscoveryNode clusterManagerNode = clusterService.state().nodes().getMasterNode(); DiscoveryNodes.Builder builder = DiscoveryNodes.builder(clusterService.state().getNodes()); - builder.remove(masterNode.getId()); + builder.remove(clusterManagerNode.getId()); setState(clusterService, ClusterState.builder(clusterService.state()).nodes(builder)); @@ -384,11 +384,11 @@ public void testRequestsAreNotSentToFailedMaster() { Map> capturedRequests = transport.getCapturedRequestsByTargetNodeAndClear(); - // the master should not be in the list of nodes that requests were sent to + // the cluster manager should not be in the list of nodes that requests were sent to ShardsIterator shardIt = clusterService.state().routingTable().allShards(new String[] { TEST_INDEX }); Set set = new HashSet<>(); for (ShardRouting shard : shardIt) { - if (!shard.currentNodeId().equals(masterNode.getId())) { + if (!shard.currentNodeId().equals(clusterManagerNode.getId())) { set.add(shard.currentNodeId()); } } @@ -399,7 +399,7 @@ public void testRequestsAreNotSentToFailedMaster() { // check requests were sent to the right nodes assertEquals(set, capturedRequests.keySet()); for (Map.Entry> entry : capturedRequests.entrySet()) { - // check one request was sent to each non-master node + // check one request was sent to each non-cluster-manager node assertEquals(1, entry.getValue().size()); } } @@ -456,13 +456,13 @@ public void testResultAggregation() throws ExecutionException, InterruptedExcept Request request = new Request(new String[] { TEST_INDEX }); PlainActionFuture listener = new PlainActionFuture<>(); - // simulate removing the master - final boolean simulateFailedMasterNode = rarely(); - DiscoveryNode failedMasterNode = null; - if (simulateFailedMasterNode) { - failedMasterNode = clusterService.state().nodes().getMasterNode(); + // simulate removing the cluster-manager + final boolean simulateFailedClusterManagerNode = rarely(); + DiscoveryNode failedClusterManagerNode = null; + if (simulateFailedClusterManagerNode) { + failedClusterManagerNode = clusterService.state().nodes().getMasterNode(); DiscoveryNodes.Builder builder = DiscoveryNodes.builder(clusterService.state().getNodes()); - builder.remove(failedMasterNode.getId()); + builder.remove(failedClusterManagerNode.getId()); builder.masterNodeId(null); setState(clusterService, ClusterState.builder(clusterService.state()).nodes(builder)); @@ -511,8 +511,8 @@ public void testResultAggregation() throws ExecutionException, InterruptedExcept transport.handleResponse(requestId, nodeResponse); } } - if (simulateFailedMasterNode) { - totalShards += map.get(failedMasterNode.getId()).size(); + if (simulateFailedClusterManagerNode) { + totalShards += map.get(failedClusterManagerNode.getId()).size(); } Response response = listener.get(); diff --git a/server/src/test/java/org/opensearch/action/support/master/TransportMasterNodeActionTests.java b/server/src/test/java/org/opensearch/action/support/master/TransportMasterNodeActionTests.java index 1dd44f3186657..512749346588e 100644 --- a/server/src/test/java/org/opensearch/action/support/master/TransportMasterNodeActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/master/TransportMasterNodeActionTests.java @@ -240,7 +240,7 @@ protected ClusterBlockException checkBlock(Request request, ClusterState state) } public void testLocalOperationWithoutBlocks() throws ExecutionException, InterruptedException { - final boolean masterOperationFailure = randomBoolean(); + final boolean clusterManagerOperationFailure = randomBoolean(); Request request = new Request(); PlainActionFuture listener = new PlainActionFuture<>(); @@ -253,7 +253,7 @@ public void testLocalOperationWithoutBlocks() throws ExecutionException, Interru new Action("internal:testAction", transportService, clusterService, threadPool) { @Override protected void masterOperation(Task task, Request request, ClusterState state, ActionListener listener) { - if (masterOperationFailure) { + if (clusterManagerOperationFailure) { listener.onFailure(exception); } else { listener.onResponse(response); @@ -262,7 +262,7 @@ protected void masterOperation(Task task, Request request, ClusterState state, A }.execute(request, listener); assertTrue(listener.isDone()); - if (masterOperationFailure) { + if (clusterManagerOperationFailure) { try { listener.get(); fail("Expected exception but returned proper result"); @@ -376,7 +376,7 @@ protected boolean localExecute(Request request) { listener.get(); } - public void testMasterNotAvailable() throws ExecutionException, InterruptedException { + public void testClusterManagerNotAvailable() throws ExecutionException, InterruptedException { Request request = new Request().masterNodeTimeout(TimeValue.timeValueSeconds(0)); setState(clusterService, ClusterStateCreationUtils.state(localNode, null, allNodes)); PlainActionFuture listener = new PlainActionFuture<>(); @@ -385,7 +385,7 @@ public void testMasterNotAvailable() throws ExecutionException, InterruptedExcep assertListenerThrows("MasterNotDiscoveredException should be thrown", listener, MasterNotDiscoveredException.class); } - public void testMasterBecomesAvailable() throws ExecutionException, InterruptedException { + public void testClusterManagerBecomesAvailable() throws ExecutionException, InterruptedException { Request request = new Request(); setState(clusterService, ClusterStateCreationUtils.state(localNode, null, allNodes)); PlainActionFuture listener = new PlainActionFuture<>(); @@ -396,7 +396,7 @@ public void testMasterBecomesAvailable() throws ExecutionException, InterruptedE listener.get(); } - public void testDelegateToMaster() throws ExecutionException, InterruptedException { + public void testDelegateToClusterManager() throws ExecutionException, InterruptedException { Request request = new Request(); setState(clusterService, ClusterStateCreationUtils.state(localNode, remoteNode, allNodes)); @@ -415,15 +415,15 @@ public void testDelegateToMaster() throws ExecutionException, InterruptedExcepti assertThat(listener.get(), equalTo(response)); } - public void testDelegateToFailingMaster() throws ExecutionException, InterruptedException { + public void testDelegateToFailingClusterManager() throws ExecutionException, InterruptedException { boolean failsWithConnectTransportException = randomBoolean(); - boolean rejoinSameMaster = failsWithConnectTransportException && randomBoolean(); + boolean rejoinSameClusterManager = failsWithConnectTransportException && randomBoolean(); Request request = new Request().masterNodeTimeout(TimeValue.timeValueSeconds(failsWithConnectTransportException ? 60 : 0)); - DiscoveryNode masterNode = this.remoteNode; + DiscoveryNode clusterManagerNode = this.remoteNode; setState( clusterService, // use a random base version so it can go down when simulating a restart. - ClusterState.builder(ClusterStateCreationUtils.state(localNode, masterNode, allNodes)).version(randomIntBetween(0, 10)) + ClusterState.builder(ClusterStateCreationUtils.state(localNode, clusterManagerNode, allNodes)).version(randomIntBetween(0, 10)) ); PlainActionFuture listener = new PlainActionFuture<>(); @@ -436,14 +436,16 @@ public void testDelegateToFailingMaster() throws ExecutionException, Interrupted assertThat(capturedRequest.request, equalTo(request)); assertThat(capturedRequest.action, equalTo("internal:testAction")); - if (rejoinSameMaster) { + if (rejoinSameClusterManager) { transport.handleRemoteError( capturedRequest.requestId, - randomBoolean() ? new ConnectTransportException(masterNode, "Fake error") : new NodeClosedException(masterNode) + randomBoolean() + ? new ConnectTransportException(clusterManagerNode, "Fake error") + : new NodeClosedException(clusterManagerNode) ); assertFalse(listener.isDone()); if (randomBoolean()) { - // simulate master node removal + // simulate cluster-manager node removal final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(clusterService.state().nodes()); nodesBuilder.masterNodeId(null); setState(clusterService, ClusterState.builder(clusterService.state()).nodes(nodesBuilder)); @@ -452,15 +454,19 @@ public void testDelegateToFailingMaster() throws ExecutionException, Interrupted // reset the same state to increment a version simulating a join of an existing node // simulating use being disconnected final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(clusterService.state().nodes()); - nodesBuilder.masterNodeId(masterNode.getId()); + nodesBuilder.masterNodeId(clusterManagerNode.getId()); setState(clusterService, ClusterState.builder(clusterService.state()).nodes(nodesBuilder)); } else { - // simulate master restart followed by a state recovery - this will reset the cluster state version + // simulate cluster-manager restart followed by a state recovery - this will reset the cluster state version final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(clusterService.state().nodes()); - nodesBuilder.remove(masterNode); - masterNode = new DiscoveryNode(masterNode.getId(), masterNode.getAddress(), masterNode.getVersion()); - nodesBuilder.add(masterNode); - nodesBuilder.masterNodeId(masterNode.getId()); + nodesBuilder.remove(clusterManagerNode); + clusterManagerNode = new DiscoveryNode( + clusterManagerNode.getId(), + clusterManagerNode.getAddress(), + clusterManagerNode.getVersion() + ); + nodesBuilder.add(clusterManagerNode); + nodesBuilder.masterNodeId(clusterManagerNode.getId()); final ClusterState.Builder builder = ClusterState.builder(clusterService.state()).nodes(nodesBuilder); setState(clusterService, builder.version(0)); } @@ -472,7 +478,7 @@ public void testDelegateToFailingMaster() throws ExecutionException, Interrupted assertThat(capturedRequest.request, equalTo(request)); assertThat(capturedRequest.action, equalTo("internal:testAction")); } else if (failsWithConnectTransportException) { - transport.handleRemoteError(capturedRequest.requestId, new ConnectTransportException(masterNode, "Fake error")); + transport.handleRemoteError(capturedRequest.requestId, new ConnectTransportException(clusterManagerNode, "Fake error")); assertFalse(listener.isDone()); setState(clusterService, ClusterStateCreationUtils.state(localNode, localNode, allNodes)); assertTrue(listener.isDone()); @@ -495,7 +501,7 @@ public void testDelegateToFailingMaster() throws ExecutionException, Interrupted } } - public void testMasterFailoverAfterStepDown() throws ExecutionException, InterruptedException { + public void testClusterManagerFailoverAfterStepDown() throws ExecutionException, InterruptedException { Request request = new Request().masterNodeTimeout(TimeValue.timeValueHours(1)); PlainActionFuture listener = new PlainActionFuture<>(); @@ -506,7 +512,8 @@ public void testMasterFailoverAfterStepDown() throws ExecutionException, Interru new Action("internal:testAction", transportService, clusterService, threadPool) { @Override protected void masterOperation(Request request, ClusterState state, ActionListener listener) throws Exception { - // The other node has become master, simulate failures of this node while publishing cluster state through ZenDiscovery + // The other node has become cluster-manager, simulate failures of this node while publishing cluster state through + // ZenDiscovery setState(clusterService, ClusterStateCreationUtils.state(localNode, remoteNode, allNodes)); Exception failure = randomBoolean() ? new FailedToCommitClusterStateException("Fake error") @@ -526,8 +533,8 @@ protected void masterOperation(Request request, ClusterState state, ActionListen assertThat(listener.get(), equalTo(response)); } - // Validate TransportMasterNodeAction.testDelegateToMaster() works correctly on node with the deprecated MASTER_ROLE. - public void testDelegateToMasterOnNodeWithDeprecatedMasterRole() throws ExecutionException, InterruptedException { + // Validate TransportMasterNodeAction.testDelegateToClusterManager() works correctly on node with the deprecated MASTER_ROLE. + public void testDelegateToClusterManagerOnNodeWithDeprecatedMasterRole() throws ExecutionException, InterruptedException { DiscoveryNode localNode = new DiscoveryNode( "local_node", buildNewFakeTransportAddress(), diff --git a/server/src/test/java/org/opensearch/action/support/master/TransportMasterNodeActionUtils.java b/server/src/test/java/org/opensearch/action/support/master/TransportMasterNodeActionUtils.java index d1faeccc83ac4..391103eb5cebd 100644 --- a/server/src/test/java/org/opensearch/action/support/master/TransportMasterNodeActionUtils.java +++ b/server/src/test/java/org/opensearch/action/support/master/TransportMasterNodeActionUtils.java @@ -42,13 +42,13 @@ public class TransportMasterNodeActionUtils { * Allows to directly call {@link TransportMasterNodeAction#masterOperation(MasterNodeRequest, ClusterState, ActionListener)} which is * a protected method. */ - public static , Response extends ActionResponse> void runMasterOperation( - TransportMasterNodeAction masterNodeAction, + public static , Response extends ActionResponse> void runClusterManagerOperation( + TransportMasterNodeAction clusterManagerNodeAction, Request request, ClusterState clusterState, ActionListener actionListener ) throws Exception { - assert masterNodeAction.checkBlock(request, clusterState) == null; - masterNodeAction.masterOperation(request, clusterState, actionListener); + assert clusterManagerNodeAction.checkBlock(request, clusterState) == null; + clusterManagerNodeAction.masterOperation(request, clusterState, actionListener); } } diff --git a/server/src/test/java/org/opensearch/cluster/ClusterChangedEventTests.java b/server/src/test/java/org/opensearch/cluster/ClusterChangedEventTests.java index e0a12fc1d312b..16f21a48d7ab8 100644 --- a/server/src/test/java/org/opensearch/cluster/ClusterChangedEventTests.java +++ b/server/src/test/java/org/opensearch/cluster/ClusterChangedEventTests.java @@ -107,19 +107,19 @@ public void testBasicProperties() { } /** - * Test whether the ClusterChangedEvent returns the correct value for whether the local node is master, + * Test whether the ClusterChangedEvent returns the correct value for whether the local node is cluster-manager, * based on what was set on the cluster state. */ - public void testLocalNodeIsMaster() { + public void testLocalNodeIsClusterManager() { final int numNodesInCluster = 3; ClusterState previousState = createSimpleClusterState(); ClusterState newState = createState(numNodesInCluster, true, initialIndices); ClusterChangedEvent event = new ClusterChangedEvent("_na_", newState, previousState); - assertTrue("local node should be master", event.localNodeMaster()); + assertTrue("local node should be cluster-manager", event.localNodeMaster()); newState = createState(numNodesInCluster, false, initialIndices); event = new ClusterChangedEvent("_na_", newState, previousState); - assertFalse("local node should not be master", event.localNodeMaster()); + assertFalse("local node should not be cluster-manager", event.localNodeMaster()); } /** @@ -314,8 +314,8 @@ public void testChangedCustomMetadataSet() { assertTrue(changedCustomMetadataTypeSet.contains(customMetadata1.getWriteableName())); } - // Validate the above test case testLocalNodeIsMaster() passes when the deprecated 'master' role is assigned to the local node. - public void testLocalNodeIsMasterWithDeprecatedMasterRole() { + // Validate the above test case testLocalNodeIsClusterManager() passes when the deprecated 'master' role is assigned to the local node. + public void testLocalNodeIsClusterManagerWithDeprecatedMasterRole() { final DiscoveryNodes.Builder builderLocalIsMaster = DiscoveryNodes.builder(); final DiscoveryNode node0 = newNode("node_0", Set.of(DiscoveryNodeRole.MASTER_ROLE)); final DiscoveryNode node1 = newNode("node_1", Set.of(DiscoveryNodeRole.DATA_ROLE)); @@ -390,18 +390,18 @@ private static ClusterState createSimpleClusterState() { } // Create a basic cluster state with a given set of indices - private static ClusterState createState(final int numNodes, final boolean isLocalMaster, final List indices) { + private static ClusterState createState(final int numNodes, final boolean isLocalClusterManager, final List indices) { final Metadata metadata = createMetadata(indices); return ClusterState.builder(TEST_CLUSTER_NAME) - .nodes(createDiscoveryNodes(numNodes, isLocalMaster)) + .nodes(createDiscoveryNodes(numNodes, isLocalClusterManager)) .metadata(metadata) .routingTable(createRoutingTable(1, metadata)) .build(); } // Create a non-initialized cluster state - private static ClusterState createNonInitializedState(final int numNodes, final boolean isLocalMaster) { - final ClusterState withoutBlock = createState(numNodes, isLocalMaster, Collections.emptyList()); + private static ClusterState createNonInitializedState(final int numNodes, final boolean isLocalClusterManager) { + final ClusterState withoutBlock = createState(numNodes, isLocalClusterManager, Collections.emptyList()); return ClusterState.builder(withoutBlock) .blocks(ClusterBlocks.builder().addGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK).build()) .build(); @@ -463,28 +463,29 @@ private static ClusterState nextState( } // Create the discovery nodes for a cluster state. For our testing purposes, we want - // the first to be master, the second to be master eligible, the third to be a data node, - // and the remainder can be any kinds of nodes (master eligible, data, or both). - private static DiscoveryNodes createDiscoveryNodes(final int numNodes, final boolean isLocalMaster) { + // the first to be cluster-manager, the second to be cluster-manager eligible, the third to be a data node, + // and the remainder can be any kinds of nodes (cluster-manager eligible, data, or both). + private static DiscoveryNodes createDiscoveryNodes(final int numNodes, final boolean isLocalClusterManager) { assert (numNodes >= 3) : "the initial cluster state for event change tests should have a minimum of 3 nodes " - + "so there are a minimum of 2 master nodes for testing master change events."; + + "so there are a minimum of 2 cluster-manager nodes for testing cluster-manager change events."; final DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); - final int localNodeIndex = isLocalMaster ? 0 : randomIntBetween(1, numNodes - 1); // randomly assign the local node if not master + final int localNodeIndex = isLocalClusterManager ? 0 : randomIntBetween(1, numNodes - 1); // randomly assign the local node if not + // cluster-manager for (int i = 0; i < numNodes; i++) { final String nodeId = NODE_ID_PREFIX + i; Set roles = new HashSet<>(); if (i == 0) { - // the master node + // the cluster-manager node builder.masterNodeId(nodeId); roles.add(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); } else if (i == 1) { - // the alternate master node + // the alternate cluster-manager node roles.add(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); } else if (i == 2) { // we need at least one data node roles.add(DiscoveryNodeRole.DATA_ROLE); } else { - // remaining nodes can be anything (except for master) + // remaining nodes can be anything (except for cluster-manager) if (randomBoolean()) { roles.add(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); } diff --git a/server/src/test/java/org/opensearch/cluster/ClusterStateTests.java b/server/src/test/java/org/opensearch/cluster/ClusterStateTests.java index 8904e4391a89f..3155954d020a4 100644 --- a/server/src/test/java/org/opensearch/cluster/ClusterStateTests.java +++ b/server/src/test/java/org/opensearch/cluster/ClusterStateTests.java @@ -86,31 +86,34 @@ public void testSupersedes() { final DiscoveryNode node2 = new DiscoveryNode("node2", buildNewFakeTransportAddress(), emptyMap(), emptySet(), version); final DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).add(node2).build(); ClusterName name = ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY); - ClusterState noMaster1 = ClusterState.builder(name).version(randomInt(5)).nodes(nodes).build(); - ClusterState noMaster2 = ClusterState.builder(name).version(randomInt(5)).nodes(nodes).build(); - ClusterState withMaster1a = ClusterState.builder(name) + ClusterState noClusterManager1 = ClusterState.builder(name).version(randomInt(5)).nodes(nodes).build(); + ClusterState noClusterManager2 = ClusterState.builder(name).version(randomInt(5)).nodes(nodes).build(); + ClusterState withClusterManager1a = ClusterState.builder(name) .version(randomInt(5)) .nodes(DiscoveryNodes.builder(nodes).masterNodeId(node1.getId())) .build(); - ClusterState withMaster1b = ClusterState.builder(name) + ClusterState withClusterManager1b = ClusterState.builder(name) .version(randomInt(5)) .nodes(DiscoveryNodes.builder(nodes).masterNodeId(node1.getId())) .build(); - ClusterState withMaster2 = ClusterState.builder(name) + ClusterState withClusterManager2 = ClusterState.builder(name) .version(randomInt(5)) .nodes(DiscoveryNodes.builder(nodes).masterNodeId(node2.getId())) .build(); // states with no cluster-manager should never supersede anything - assertFalse(noMaster1.supersedes(noMaster2)); - assertFalse(noMaster1.supersedes(withMaster1a)); + assertFalse(noClusterManager1.supersedes(noClusterManager2)); + assertFalse(noClusterManager1.supersedes(withClusterManager1a)); - // states should never supersede states from another master - assertFalse(withMaster1a.supersedes(withMaster2)); - assertFalse(withMaster1a.supersedes(noMaster1)); + // states should never supersede states from another cluster-manager + assertFalse(withClusterManager1a.supersedes(withClusterManager2)); + assertFalse(withClusterManager1a.supersedes(noClusterManager1)); - // state from the same master compare by version - assertThat(withMaster1a.supersedes(withMaster1b), equalTo(withMaster1a.version() > withMaster1b.version())); + // state from the same cluster-manager compare by version + assertThat( + withClusterManager1a.supersedes(withClusterManager1b), + equalTo(withClusterManager1a.version() > withClusterManager1b.version()) + ); } public void testBuilderRejectsNullCustom() { @@ -146,8 +149,8 @@ public void testToXContent() throws IOException { + " \"cluster_uuid\" : \"clusterUUID\",\n" + " \"version\" : 0,\n" + " \"state_uuid\" : \"stateUUID\",\n" - + " \"master_node\" : \"masterNodeId\",\n" - + " \"cluster_manager_node\" : \"masterNodeId\",\n" + + " \"master_node\" : \"clusterManagerNodeId\",\n" + + " \"cluster_manager_node\" : \"clusterManagerNodeId\",\n" + " \"blocks\" : {\n" + " \"global\" : {\n" + " \"1\" : {\n" @@ -352,8 +355,8 @@ public void testToXContent_FlatSettingTrue_ReduceMappingFalse() throws IOExcepti + " \"cluster_uuid\" : \"clusterUUID\",\n" + " \"version\" : 0,\n" + " \"state_uuid\" : \"stateUUID\",\n" - + " \"master_node\" : \"masterNodeId\",\n" - + " \"cluster_manager_node\" : \"masterNodeId\",\n" + + " \"master_node\" : \"clusterManagerNodeId\",\n" + + " \"cluster_manager_node\" : \"clusterManagerNodeId\",\n" + " \"blocks\" : {\n" + " \"global\" : {\n" + " \"1\" : {\n" @@ -551,8 +554,8 @@ public void testToXContent_FlatSettingFalse_ReduceMappingTrue() throws IOExcepti + " \"cluster_uuid\" : \"clusterUUID\",\n" + " \"version\" : 0,\n" + " \"state_uuid\" : \"stateUUID\",\n" - + " \"master_node\" : \"masterNodeId\",\n" - + " \"cluster_manager_node\" : \"masterNodeId\",\n" + + " \"master_node\" : \"clusterManagerNodeId\",\n" + + " \"cluster_manager_node\" : \"clusterManagerNodeId\",\n" + " \"blocks\" : {\n" + " \"global\" : {\n" + " \"1\" : {\n" @@ -868,7 +871,7 @@ private ClusterState buildClusterState() throws IOException { .stateUUID("stateUUID") .nodes( DiscoveryNodes.builder() - .masterNodeId("masterNodeId") + .masterNodeId("clusterManagerNodeId") .add(new DiscoveryNode("nodeId1", new TransportAddress(InetAddress.getByName("127.0.0.1"), 111), Version.CURRENT)) .build() ) diff --git a/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java b/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java index c6279d0029009..251703a933525 100644 --- a/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java +++ b/server/src/test/java/org/opensearch/cluster/InternalClusterInfoServiceSchedulingTests.java @@ -68,8 +68,8 @@ public class InternalClusterInfoServiceSchedulingTests extends OpenSearchTestCas public void testScheduling() { final DiscoveryNode discoveryNode = new DiscoveryNode("test", buildNewFakeTransportAddress(), Version.CURRENT); - final DiscoveryNodes noMaster = DiscoveryNodes.builder().add(discoveryNode).localNodeId(discoveryNode.getId()).build(); - final DiscoveryNodes localMaster = DiscoveryNodes.builder(noMaster).masterNodeId(discoveryNode.getId()).build(); + final DiscoveryNodes noClusterManager = DiscoveryNodes.builder().add(discoveryNode).localNodeId(discoveryNode.getId()).build(); + final DiscoveryNodes localClusterManager = DiscoveryNodes.builder(noClusterManager).masterNodeId(discoveryNode.getId()).build(); final Settings.Builder settingsBuilder = Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), discoveryNode.getName()); if (randomBoolean()) { @@ -87,14 +87,14 @@ protected PrioritizedOpenSearchThreadPoolExecutor createThreadPoolExecutor() { } }; - final MasterService masterService = new FakeThreadPoolMasterService( + final MasterService clusterManagerService = new FakeThreadPoolMasterService( "test", - "masterService", + "clusterManagerService", threadPool, - r -> { fail("master service should not run any tasks"); } + r -> { fail("cluster-manager service should not run any tasks"); } ); - final ClusterService clusterService = new ClusterService(settings, clusterSettings, masterService, clusterApplierService); + final ClusterService clusterService = new ClusterService(settings, clusterSettings, clusterManagerService, clusterApplierService); final FakeClusterInfoServiceClient client = new FakeClusterInfoServiceClient(threadPool); final InternalClusterInfoService clusterInfoService = new InternalClusterInfoService(settings, clusterService, threadPool, client); @@ -102,34 +102,34 @@ protected PrioritizedOpenSearchThreadPoolExecutor createThreadPoolExecutor() { clusterInfoService.addListener(ignored -> {}); clusterService.setNodeConnectionsService(ClusterServiceUtils.createNoOpNodeConnectionsService()); - clusterApplierService.setInitialState(ClusterState.builder(new ClusterName("cluster")).nodes(noMaster).build()); - masterService.setClusterStatePublisher((clusterChangedEvent, publishListener, ackListener) -> fail("should not publish")); - masterService.setClusterStateSupplier(clusterApplierService::state); + clusterApplierService.setInitialState(ClusterState.builder(new ClusterName("cluster")).nodes(noClusterManager).build()); + clusterManagerService.setClusterStatePublisher((clusterChangedEvent, publishListener, ackListener) -> fail("should not publish")); + clusterManagerService.setClusterStateSupplier(clusterApplierService::state); clusterService.start(); - final AtomicBoolean becameMaster1 = new AtomicBoolean(); + final AtomicBoolean becameClusterManager1 = new AtomicBoolean(); clusterApplierService.onNewClusterState( - "become master 1", - () -> ClusterState.builder(new ClusterName("cluster")).nodes(localMaster).build(), - setFlagOnSuccess(becameMaster1) + "become cluster-manager 1", + () -> ClusterState.builder(new ClusterName("cluster")).nodes(localClusterManager).build(), + setFlagOnSuccess(becameClusterManager1) ); - runUntilFlag(deterministicTaskQueue, becameMaster1); + runUntilFlag(deterministicTaskQueue, becameClusterManager1); - final AtomicBoolean failMaster1 = new AtomicBoolean(); + final AtomicBoolean failClusterManager1 = new AtomicBoolean(); clusterApplierService.onNewClusterState( - "fail master 1", - () -> ClusterState.builder(new ClusterName("cluster")).nodes(noMaster).build(), - setFlagOnSuccess(failMaster1) + "fail cluster-manager 1", + () -> ClusterState.builder(new ClusterName("cluster")).nodes(noClusterManager).build(), + setFlagOnSuccess(failClusterManager1) ); - runUntilFlag(deterministicTaskQueue, failMaster1); + runUntilFlag(deterministicTaskQueue, failClusterManager1); - final AtomicBoolean becameMaster2 = new AtomicBoolean(); + final AtomicBoolean becameClusterManager2 = new AtomicBoolean(); clusterApplierService.onNewClusterState( - "become master 2", - () -> ClusterState.builder(new ClusterName("cluster")).nodes(localMaster).build(), - setFlagOnSuccess(becameMaster2) + "become cluster-manager 2", + () -> ClusterState.builder(new ClusterName("cluster")).nodes(localClusterManager).build(), + setFlagOnSuccess(becameClusterManager2) ); - runUntilFlag(deterministicTaskQueue, becameMaster2); + runUntilFlag(deterministicTaskQueue, becameClusterManager2); for (int i = 0; i < 3; i++) { final int initialRequestCount = client.requestCount; @@ -139,13 +139,13 @@ protected PrioritizedOpenSearchThreadPoolExecutor createThreadPoolExecutor() { assertThat(client.requestCount, equalTo(initialRequestCount + 2)); // should have run two client requests per interval } - final AtomicBoolean failMaster2 = new AtomicBoolean(); + final AtomicBoolean failClusterManager2 = new AtomicBoolean(); clusterApplierService.onNewClusterState( - "fail master 2", - () -> ClusterState.builder(new ClusterName("cluster")).nodes(noMaster).build(), - setFlagOnSuccess(failMaster2) + "fail cluster-manager 2", + () -> ClusterState.builder(new ClusterName("cluster")).nodes(noClusterManager).build(), + setFlagOnSuccess(failClusterManager2) ); - runUntilFlag(deterministicTaskQueue, failMaster2); + runUntilFlag(deterministicTaskQueue, failClusterManager2); runFor(deterministicTaskQueue, INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING.get(settings).millis()); deterministicTaskQueue.runAllRunnableTasks(); diff --git a/server/src/test/java/org/opensearch/cluster/action/shard/ShardStateActionTests.java b/server/src/test/java/org/opensearch/cluster/action/shard/ShardStateActionTests.java index 744c833fa54e9..cf34af718c660 100644 --- a/server/src/test/java/org/opensearch/cluster/action/shard/ShardStateActionTests.java +++ b/server/src/test/java/org/opensearch/cluster/action/shard/ShardStateActionTests.java @@ -112,16 +112,16 @@ private static class TestShardStateAction extends ShardStateAction { super(clusterService, transportService, allocationService, rerouteService, THREAD_POOL); } - private Runnable onBeforeWaitForNewMasterAndRetry; + private Runnable onBeforeWaitForNewClusterManagerAndRetry; - public void setOnBeforeWaitForNewMasterAndRetry(Runnable onBeforeWaitForNewMasterAndRetry) { - this.onBeforeWaitForNewMasterAndRetry = onBeforeWaitForNewMasterAndRetry; + public void setOnBeforeWaitForNewClusterManagerAndRetry(Runnable onBeforeWaitForNewClusterManagerAndRetry) { + this.onBeforeWaitForNewClusterManagerAndRetry = onBeforeWaitForNewClusterManagerAndRetry; } - private Runnable onAfterWaitForNewMasterAndRetry; + private Runnable onAfterWaitForNewClusterManagerAndRetry; - public void setOnAfterWaitForNewMasterAndRetry(Runnable onAfterWaitForNewMasterAndRetry) { - this.onAfterWaitForNewMasterAndRetry = onAfterWaitForNewMasterAndRetry; + public void setOnAfterWaitFornewClusterManagerAndRetry(Runnable onAfterWaitFornewClusterManagerAndRetry) { + this.onAfterWaitForNewClusterManagerAndRetry = onAfterWaitFornewClusterManagerAndRetry; } @Override @@ -132,9 +132,9 @@ protected void waitForNewClusterManagerAndRetry( ActionListener listener, Predicate changePredicate ) { - onBeforeWaitForNewMasterAndRetry.run(); + onBeforeWaitForNewClusterManagerAndRetry.run(); super.waitForNewClusterManagerAndRetry(actionName, observer, request, listener, changePredicate); - onAfterWaitForNewMasterAndRetry.run(); + onAfterWaitForNewClusterManagerAndRetry.run(); } } @@ -160,8 +160,8 @@ public void setUp() throws Exception { transportService.start(); transportService.acceptIncomingRequests(); shardStateAction = new TestShardStateAction(clusterService, transportService, null, null); - shardStateAction.setOnBeforeWaitForNewMasterAndRetry(() -> {}); - shardStateAction.setOnAfterWaitForNewMasterAndRetry(() -> {}); + shardStateAction.setOnBeforeWaitForNewClusterManagerAndRetry(() -> {}); + shardStateAction.setOnAfterWaitFornewClusterManagerAndRetry(() -> {}); } @Override @@ -196,7 +196,7 @@ public void testSuccess() throws InterruptedException { // for the right shard assertEquals(shardEntry.shardId, shardRouting.shardId()); assertEquals(shardEntry.allocationId, shardRouting.allocationId().getId()); - // sent to the master + // sent to the cluster-manager assertEquals(clusterService.state().nodes().getMasterNode().getId(), capturedRequests[0].node.getId()); transport.handleResponse(capturedRequests[0].requestId, TransportResponse.Empty.INSTANCE); @@ -205,20 +205,20 @@ public void testSuccess() throws InterruptedException { assertNull(listener.failure.get()); } - public void testNoMaster() throws InterruptedException { + public void testNoClusterManager() throws InterruptedException { final String index = "test"; setState(clusterService, ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5))); - DiscoveryNodes.Builder noMasterBuilder = DiscoveryNodes.builder(clusterService.state().nodes()); - noMasterBuilder.masterNodeId(null); - setState(clusterService, ClusterState.builder(clusterService.state()).nodes(noMasterBuilder)); + DiscoveryNodes.Builder noClusterManagerBuilder = DiscoveryNodes.builder(clusterService.state().nodes()); + noClusterManagerBuilder.masterNodeId(null); + setState(clusterService, ClusterState.builder(clusterService.state()).nodes(noClusterManagerBuilder)); CountDownLatch latch = new CountDownLatch(1); AtomicInteger retries = new AtomicInteger(); AtomicBoolean success = new AtomicBoolean(); - setUpMasterRetryVerification(1, retries, latch, requestId -> {}); + setUpClusterManagerRetryVerification(1, retries, latch, requestId -> {}); ShardRouting failedShard = getRandomShardRouting(index); shardStateAction.localShardFailed(failedShard, "test", getSimulatedFailure(), new ActionListener() { @@ -242,7 +242,7 @@ public void onFailure(Exception e) { assertTrue(success.get()); } - public void testMasterChannelException() throws InterruptedException { + public void testClusterManagerChannelException() throws InterruptedException { final String index = "test"; setState(clusterService, ClusterStateCreationUtils.stateWithActivePrimary(index, true, randomInt(5))); @@ -268,7 +268,7 @@ public void testMasterChannelException() throws InterruptedException { }; final int numberOfRetries = randomIntBetween(1, 256); - setUpMasterRetryVerification(numberOfRetries, retries, latch, retryLoop); + setUpClusterManagerRetryVerification(numberOfRetries, retries, latch, retryLoop); ShardRouting failedShard = getRandomShardRouting(index); shardStateAction.localShardFailed(failedShard, "test", getSimulatedFailure(), new ActionListener() { @@ -413,8 +413,8 @@ public void testRemoteShardFailedConcurrently() throws Exception { } Thread[] clientThreads = new Thread[between(1, 6)]; int iterationsPerThread = scaledRandomIntBetween(50, 500); - Phaser barrier = new Phaser(clientThreads.length + 2); // one for master thread, one for the main thread - Thread masterThread = new Thread(() -> { + Phaser barrier = new Phaser(clientThreads.length + 2); // one for cluster-manager thread, one for the main thread + Thread clusterManagerThread = new Thread(() -> { barrier.arriveAndAwaitAdvance(); while (shutdown.get() == false) { for (CapturingTransport.CapturedRequest request : transport.getCapturedRequestsAndClear()) { @@ -426,7 +426,7 @@ public void testRemoteShardFailedConcurrently() throws Exception { } } }); - masterThread.start(); + clusterManagerThread.start(); AtomicInteger notifiedResponses = new AtomicInteger(); for (int t = 0; t < clientThreads.length; t++) { @@ -463,7 +463,7 @@ public void onFailure(Exception e) { } assertBusy(() -> assertThat(notifiedResponses.get(), equalTo(clientThreads.length * iterationsPerThread))); shutdown.set(true); - masterThread.join(); + clusterManagerThread.join(); } public void testShardStarted() throws InterruptedException { @@ -496,14 +496,19 @@ private ShardRouting getRandomShardRouting(String index) { return shardRouting; } - private void setUpMasterRetryVerification(int numberOfRetries, AtomicInteger retries, CountDownLatch latch, LongConsumer retryLoop) { - shardStateAction.setOnBeforeWaitForNewMasterAndRetry(() -> { - DiscoveryNodes.Builder masterBuilder = DiscoveryNodes.builder(clusterService.state().nodes()); - masterBuilder.masterNodeId(clusterService.state().nodes().getMasterNodes().iterator().next().value.getId()); - setState(clusterService, ClusterState.builder(clusterService.state()).nodes(masterBuilder)); + private void setUpClusterManagerRetryVerification( + int numberOfRetries, + AtomicInteger retries, + CountDownLatch latch, + LongConsumer retryLoop + ) { + shardStateAction.setOnBeforeWaitForNewClusterManagerAndRetry(() -> { + DiscoveryNodes.Builder clusterManagerBuilder = DiscoveryNodes.builder(clusterService.state().nodes()); + clusterManagerBuilder.masterNodeId(clusterService.state().nodes().getMasterNodes().iterator().next().value.getId()); + setState(clusterService, ClusterState.builder(clusterService.state()).nodes(clusterManagerBuilder)); }); - shardStateAction.setOnAfterWaitForNewMasterAndRetry(() -> verifyRetry(numberOfRetries, retries, latch, retryLoop)); + shardStateAction.setOnAfterWaitFornewClusterManagerAndRetry(() -> verifyRetry(numberOfRetries, retries, latch, retryLoop)); } private void verifyRetry(int numberOfRetries, AtomicInteger retries, CountDownLatch latch, LongConsumer retryLoop) { diff --git a/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceTests.java b/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceTests.java index 812bf9425968a..b2b7c167ec7c7 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/ClusterBootstrapServiceTests.java @@ -169,7 +169,7 @@ public void testDoesNothingByDefaultIfClusterManagerNodesConfigured() { testDoesNothingWithSettings(builder().putList(INITIAL_CLUSTER_MANAGER_NODES_SETTING.getKey())); } - public void testDoesNothingByDefaultOnMasterIneligibleNodes() { + public void testDoesNothingByDefaultOnClusterManagerIneligibleNodes() { localNode = new DiscoveryNode( "local", randomAlphaOfLength(10), @@ -401,7 +401,7 @@ public void testDoesNotBootstrapIfAlreadyBootstrapped() { deterministicTaskQueue.runAllTasks(); } - public void testDoesNotBootstrapsOnNonMasterNode() { + public void testDoesNotBootstrapsOnNonClusterManagerNode() { localNode = new DiscoveryNode( "local", randomAlphaOfLength(10), @@ -676,7 +676,7 @@ public void testFailBootstrapWithBothSingleNodeDiscoveryAndInitialClusterManager ); } - public void testFailBootstrapNonMasterEligibleNodeWithSingleNodeDiscovery() { + public void testFailBootstrapNonClusterManagerEligibleNodeWithSingleNodeDiscovery() { final Settings.Builder settings = Settings.builder() .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE) .put(NODE_NAME_SETTING.getKey(), localNode.getName()) diff --git a/server/src/test/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelperTests.java b/server/src/test/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelperTests.java index 4fb96145732a5..0a534c34b4f86 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelperTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelperTests.java @@ -173,7 +173,7 @@ public void testScheduling() { assertThat(logLastFailedJoinAttemptWarningCount.get(), is(5L)); } - public void testDescriptionOnMasterIneligibleNodes() { + public void testDescriptionOnClusterManagerIneligibleNodes() { final DiscoveryNode localNode = new DiscoveryNode("local", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) .version(12L) @@ -284,7 +284,7 @@ public void testDescriptionOnUnhealthyNodes() { is("this node is unhealthy: unhealthy-info") ); - final DiscoveryNode masterNode = new DiscoveryNode( + final DiscoveryNode clusterManagerNode = new DiscoveryNode( "local", buildNewFakeTransportAddress(), emptyMap(), @@ -293,7 +293,7 @@ public void testDescriptionOnUnhealthyNodes() { ); clusterState = ClusterState.builder(ClusterName.DEFAULT) .version(12L) - .nodes(DiscoveryNodes.builder().add(masterNode).localNodeId(masterNode.getId())) + .nodes(DiscoveryNodes.builder().add(clusterManagerNode).localNodeId(clusterManagerNode.getId())) .build(); assertThat( @@ -851,9 +851,13 @@ public void testDescriptionAfterBootstrapping() { ) ); - final DiscoveryNode otherMasterNode = new DiscoveryNode("other-master", buildNewFakeTransportAddress(), Version.CURRENT); - final DiscoveryNode otherNonMasterNode = new DiscoveryNode( - "other-non-master", + final DiscoveryNode otherClusterManagerNode = new DiscoveryNode( + "other-cluster-manager", + buildNewFakeTransportAddress(), + Version.CURRENT + ); + final DiscoveryNode otherNonClusterManagerNode = new DiscoveryNode( + "other-non-cluster-manager", buildNewFakeTransportAddress(), emptyMap(), new HashSet<>( @@ -866,7 +870,13 @@ public void testDescriptionAfterBootstrapping() { String[] configNodeIds = new String[] { "n1", "n2" }; final ClusterState stateWithOtherNodes = ClusterState.builder(ClusterName.DEFAULT) - .nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).add(otherMasterNode).add(otherNonMasterNode)) + .nodes( + DiscoveryNodes.builder() + .add(localNode) + .localNodeId(localNode.getId()) + .add(otherClusterManagerNode) + .add(otherNonClusterManagerNode) + ) .metadata( Metadata.builder() .coordinationMetadata( @@ -897,13 +907,13 @@ public void testDescriptionAfterBootstrapping() { + "discovery will continue using [] from hosts providers and [" + localNode + ", " - + otherMasterNode + + otherClusterManagerNode + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0", "cluster-manager not discovered or elected yet, an election requires two nodes with ids [n1, n2], " + "have discovered [] which is not a quorum; " + "discovery will continue using [] from hosts providers and [" - + otherMasterNode + + otherClusterManagerNode + ", " + localNode + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0" diff --git a/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java index 6ddbc909747f7..c4db0641717c6 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java @@ -586,7 +586,7 @@ public void testHandlePublishRequestWithBadTerm() { ); } - // scenario when handling a publish request from a master that we already received a newer state from + // scenario when handling a publish request from a cluster-manager that we already received a newer state from public void testHandlePublishRequestWithSameTermButOlderOrSameVersion() { VotingConfiguration initialConfig = VotingConfiguration.of(node1); ClusterState state1 = clusterState(0L, 0L, node1, initialConfig, initialConfig, 42L); @@ -613,7 +613,7 @@ public void testHandlePublishRequestWithSameTermButOlderOrSameVersion() { ); } - // scenario when handling a publish request from a fresh master + // scenario when handling a publish request from a fresh cluster-manager public void testHandlePublishRequestWithTermHigherThanLastAcceptedTerm() { VotingConfiguration initialConfig = VotingConfiguration.of(node1); StartJoinRequest startJoinRequest1 = new StartJoinRequest(node1, randomLongBetween(1, 5)); @@ -845,7 +845,7 @@ public void testVoteCollection() { assertFalse( voteCollection.addVote( - new DiscoveryNode("master-ineligible", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT) + new DiscoveryNode("cluster-manager-ineligible", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT) ) ); assertTrue(voteCollection.isEmpty()); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java index f43d6ff4e6c02..44239fdc0883f 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java @@ -107,7 +107,7 @@ public class CoordinatorTests extends AbstractCoordinatorTestCase { /** - * This test was added to verify that state recovery is properly reset on a node after it has become master and successfully + * This test was added to verify that state recovery is properly reset on a node after it has become cluster-manager and successfully * recovered a state (see {@link GatewayService}). The situation which triggers this with a decent likelihood is as follows: * 3 cluster-manager-eligible nodes (leader, follower1, follower2), the followers are shut down (leader remains), when followers come back * one of them becomes leader and publishes first state (with STATE_NOT_RECOVERED_BLOCK) to old leader, which accepts it. @@ -164,7 +164,7 @@ public void testCanUpdateClusterStateAfterStabilisation() { } } - public void testDoesNotElectNonMasterNode() { + public void testDoesNotElectNonClusterManagerNode() { try (Cluster cluster = new Cluster(randomIntBetween(1, 5), false, Settings.EMPTY)) { cluster.runRandomly(); cluster.stabilise(); @@ -191,7 +191,7 @@ public void testUnhealthyNodesGetsRemoved() { cluster.clusterNodes.add(newNode1); cluster.clusterNodes.add(newNode2); cluster.stabilise( - // The first pinging discovers the master + // The first pinging discovers the cluster-manager defaultMillis(DISCOVERY_FIND_PEERS_INTERVAL_SETTING) // One message delay to send a join + DEFAULT_DELAY_VARIABILITY @@ -627,7 +627,7 @@ public void testUnHealthyLeaderRemoved() { cluster.clusterNodes.add(newNode2); cluster.clusterNodes.add(newNode3); cluster.stabilise( - // The first pinging discovers the master + // The first pinging discovers the cluster-manager defaultMillis(DISCOVERY_FIND_PEERS_INTERVAL_SETTING) // One message delay to send a join + DEFAULT_DELAY_VARIABILITY @@ -1096,7 +1096,7 @@ public void testIncompatibleDiffResendsFullState() { * does not notice the node disconnecting, it is important for the node not to be turned back into a follower but try * and join the leader again. */ - public void testStayCandidateAfterReceivingFollowerCheckFromKnownMaster() { + public void testStayCandidateAfterReceivingFollowerCheckFromKnownClusterManager() { try (Cluster cluster = new Cluster(2, false, Settings.EMPTY)) { cluster.runRandomly(); cluster.stabilise(); @@ -1121,23 +1121,23 @@ public void testStayCandidateAfterReceivingFollowerCheckFromKnownMaster() { } } - public void testAppliesNoMasterBlockWritesByDefault() { - testAppliesNoMasterBlock(null, NO_MASTER_BLOCK_WRITES); + public void testAppliesNoClusterManagerBlockWritesByDefault() { + testAppliesNoClusterManagerBlock(null, NO_MASTER_BLOCK_WRITES); } - public void testAppliesNoMasterBlockWritesIfConfigured() { - testAppliesNoMasterBlock("write", NO_MASTER_BLOCK_WRITES); + public void testAppliesNoClusterManagerBlockWritesIfConfigured() { + testAppliesNoClusterManagerBlock("write", NO_MASTER_BLOCK_WRITES); } - public void testAppliesNoMasterBlockAllIfConfigured() { - testAppliesNoMasterBlock("all", NO_MASTER_BLOCK_ALL); + public void testAppliesNoClusterManagerBlockAllIfConfigured() { + testAppliesNoClusterManagerBlock("all", NO_MASTER_BLOCK_ALL); } - public void testAppliesNoMasterBlockMetadataWritesIfConfigured() { - testAppliesNoMasterBlock("metadata_write", NO_MASTER_BLOCK_METADATA_WRITES); + public void testAppliesNoClusterManagerBlockMetadataWritesIfConfigured() { + testAppliesNoClusterManagerBlock("metadata_write", NO_MASTER_BLOCK_METADATA_WRITES); } - private void testAppliesNoMasterBlock(String noMasterBlockSetting, ClusterBlock expectedBlock) { + private void testAppliesNoClusterManagerBlock(String noClusterManagerBlockSetting, ClusterBlock expectedBlock) { try (Cluster cluster = new Cluster(3)) { cluster.runRandomly(); cluster.stabilise(); @@ -1145,7 +1145,7 @@ private void testAppliesNoMasterBlock(String noMasterBlockSetting, ClusterBlock final ClusterNode leader = cluster.getAnyLeader(); leader.submitUpdateTask("update NO_CLUSTER_MANAGER_BLOCK_SETTING", cs -> { final Builder settingsBuilder = Settings.builder().put(cs.metadata().persistentSettings()); - settingsBuilder.put(NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), noMasterBlockSetting); + settingsBuilder.put(NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), noClusterManagerBlockSetting); return ClusterState.builder(cs) .metadata(Metadata.builder(cs.metadata()).persistentSettings(settingsBuilder.build())) .build(); @@ -1175,12 +1175,12 @@ private void testAppliesNoMasterBlock(String noMasterBlockSetting, ClusterBlock } } - public void testNodeCannotJoinIfJoinValidationFailsOnMaster() { + public void testNodeCannotJoinIfJoinValidationFailsOnClusterManager() { try (Cluster cluster = new Cluster(randomIntBetween(1, 3))) { cluster.runRandomly(); cluster.stabilise(); - // check that if node join validation fails on master, the nodes can't join + // check that if node join validation fails on cluster-manager, the nodes can't join List addedNodes = cluster.addNodes(randomIntBetween(1, 2)); final Set validatedNodes = new HashSet<>(); cluster.getAnyLeader().extraJoinValidators.add((discoveryNode, clusterState) -> { @@ -1305,7 +1305,7 @@ public void testDiscoveryUsesNodesFromLastClusterState() { } } - public void testFollowerRemovedIfUnableToSendRequestsToMaster() { + public void testFollowerRemovedIfUnableToSendRequestsToClusterManager() { try (Cluster cluster = new Cluster(3)) { cluster.runRandomly(); cluster.stabilise(); @@ -1333,7 +1333,7 @@ public void testFollowerRemovedIfUnableToSendRequestsToMaster() { cluster.clearBlackholedConnections(); cluster.stabilise( - // time for the disconnected node to find the master again + // time for the disconnected node to find the cluster-manager again defaultMillis(DISCOVERY_FIND_PEERS_INTERVAL_SETTING) * 2 // time for joining + 4 * DEFAULT_DELAY_VARIABILITY @@ -1679,7 +1679,7 @@ public String toString() { } } - public void testReconfiguresToExcludeMasterIneligibleNodesInVotingConfig() { + public void testReconfiguresToExcludeClusterManagerIneligibleNodesInVotingConfig() { try (Cluster cluster = new Cluster(3)) { cluster.runRandomly(); cluster.stabilise(); @@ -1698,7 +1698,7 @@ public void testReconfiguresToExcludeMasterIneligibleNodesInVotingConfig() { final boolean chosenNodeIsLeader = chosenNode == cluster.getAnyLeader(); final long termBeforeRestart = cluster.getAnyNode().coordinator.getCurrentTerm(); - logger.info("--> restarting [{}] as a master-ineligible node", chosenNode); + logger.info("--> restarting [{}] as a cluster-manager-ineligible node", chosenNode); chosenNode.close(); cluster.clusterNodes.replaceAll( diff --git a/server/src/test/java/org/opensearch/cluster/coordination/FollowersCheckerTests.java b/server/src/test/java/org/opensearch/cluster/coordination/FollowersCheckerTests.java index 7e7193fbf02ef..d5947bf444d17 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/FollowersCheckerTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/FollowersCheckerTests.java @@ -685,7 +685,7 @@ public String executor() { } } - public void testPreferMasterNodes() { + public void testPreferClusterManagerNodes() { List nodes = randomNodes(10); DiscoveryNodes.Builder discoNodesBuilder = DiscoveryNodes.builder(); nodes.forEach(dn -> discoNodesBuilder.add(dn)); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java index 6bd2d1e70033a..fec1bb025d235 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java @@ -161,8 +161,9 @@ public void testSuccess() { } public void testUpdatesNodeWithNewRoles() throws Exception { - // Node roles vary by version, and new roles are suppressed for BWC. This means we can receive a join from a node that's already - // in the cluster but with a different set of roles: the node didn't change roles, but the cluster state came via an older master. + // Node roles vary by version, and new roles are suppressed for BWC. + // This means we can receive a join from a node that's already in the cluster but with a different set of roles: + // the node didn't change roles, but the cluster state came via an older cluster-manager. // In this case we must properly process its join to ensure that the roles are correct. final AllocationService allocationService = mock(AllocationService.class); @@ -171,7 +172,7 @@ public void testUpdatesNodeWithNewRoles() throws Exception { final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor(Settings.EMPTY, allocationService, logger, rerouteService, null); - final DiscoveryNode masterNode = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); + final DiscoveryNode clusterManagerNode = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); final DiscoveryNode actualNode = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); final DiscoveryNode bwcNode = new DiscoveryNode( @@ -186,7 +187,13 @@ public void testUpdatesNodeWithNewRoles() throws Exception { actualNode.getVersion() ); final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) - .nodes(DiscoveryNodes.builder().add(masterNode).localNodeId(masterNode.getId()).masterNodeId(masterNode.getId()).add(bwcNode)) + .nodes( + DiscoveryNodes.builder() + .add(clusterManagerNode) + .localNodeId(clusterManagerNode.getId()) + .masterNodeId(clusterManagerNode.getId()) + .add(bwcNode) + ) .build(); final ClusterStateTaskExecutor.ClusterTasksResult result = joinTaskExecutor.execute( diff --git a/server/src/test/java/org/opensearch/cluster/coordination/NoMasterBlockServiceTests.java b/server/src/test/java/org/opensearch/cluster/coordination/NoMasterBlockServiceTests.java index a637826951f87..a44026bbbf477 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/NoMasterBlockServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/NoMasterBlockServiceTests.java @@ -44,12 +44,12 @@ public class NoMasterBlockServiceTests extends OpenSearchTestCase { - private NoMasterBlockService noMasterBlockService; + private NoMasterBlockService noClusterManagerBlockService; private ClusterSettings clusterSettings; private void createService(Settings settings) { clusterSettings = new ClusterSettings(settings, BUILT_IN_CLUSTER_SETTINGS); - noMasterBlockService = new NoMasterBlockService(settings, clusterSettings); + noClusterManagerBlockService = new NoMasterBlockService(settings, clusterSettings); } private void assertDeprecatedWarningEmitted() { @@ -61,22 +61,22 @@ private void assertDeprecatedWarningEmitted() { public void testBlocksWritesByDefault() { createService(Settings.EMPTY); - assertThat(noMasterBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_WRITES)); + assertThat(noClusterManagerBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_WRITES)); } public void testBlocksWritesIfConfiguredBySetting() { createService(Settings.builder().put(NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "write").build()); - assertThat(noMasterBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_WRITES)); + assertThat(noClusterManagerBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_WRITES)); } public void testBlocksAllIfConfiguredBySetting() { createService(Settings.builder().put(NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "all").build()); - assertThat(noMasterBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_ALL)); + assertThat(noClusterManagerBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_ALL)); } public void testBlocksMetadataWritesIfConfiguredBySetting() { createService(Settings.builder().put(NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "metadata_write").build()); - assertThat(noMasterBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_METADATA_WRITES)); + assertThat(noClusterManagerBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_METADATA_WRITES)); } public void testRejectsInvalidSetting() { @@ -88,12 +88,12 @@ public void testRejectsInvalidSetting() { public void testSettingCanBeUpdated() { createService(Settings.builder().put(NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "all").build()); - assertThat(noMasterBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_ALL)); + assertThat(noClusterManagerBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_ALL)); clusterSettings.applySettings(Settings.builder().put(NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "write").build()); - assertThat(noMasterBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_WRITES)); + assertThat(noClusterManagerBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_WRITES)); clusterSettings.applySettings(Settings.builder().put(NO_CLUSTER_MANAGER_BLOCK_SETTING.getKey(), "metadata_write").build()); - assertThat(noMasterBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_METADATA_WRITES)); + assertThat(noClusterManagerBlockService.getNoMasterBlock(), sameInstance(NO_MASTER_BLOCK_METADATA_WRITES)); } } diff --git a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java index 43c9c77f193dd..2cf8c2c13d3b6 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java @@ -98,7 +98,7 @@ public class NodeJoinTests extends OpenSearchTestCase { private static ThreadPool threadPool; - private MasterService masterService; + private MasterService clusterManagerService; private Coordinator coordinator; private DeterministicTaskQueue deterministicTaskQueue; private Transport transport; @@ -117,7 +117,7 @@ public static void afterClass() { @After public void tearDown() throws Exception { super.tearDown(); - masterService.close(); + clusterManagerService.close(); } private static ClusterState initialState(DiscoveryNode localNode, long term, long version, VotingConfiguration config) { @@ -138,61 +138,68 @@ private static ClusterState initialState(DiscoveryNode localNode, long term, lon .build(); } - private void setupFakeMasterServiceAndCoordinator(long term, ClusterState initialState, NodeHealthService nodeHealthService) { + private void setupFakeClusterManagerServiceAndCoordinator(long term, ClusterState initialState, NodeHealthService nodeHealthService) { deterministicTaskQueue = new DeterministicTaskQueue( Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test").build(), random() ); final ThreadPool fakeThreadPool = deterministicTaskQueue.getThreadPool(); - FakeThreadPoolMasterService fakeMasterService = new FakeThreadPoolMasterService( + FakeThreadPoolMasterService fakeClusterManagerService = new FakeThreadPoolMasterService( "test_node", "test", fakeThreadPool, deterministicTaskQueue::scheduleNow ); - setupMasterServiceAndCoordinator(term, initialState, fakeMasterService, fakeThreadPool, Randomness.get(), nodeHealthService); - fakeMasterService.setClusterStatePublisher((event, publishListener, ackListener) -> { + setupClusterManagerServiceAndCoordinator( + term, + initialState, + fakeClusterManagerService, + fakeThreadPool, + Randomness.get(), + nodeHealthService + ); + fakeClusterManagerService.setClusterStatePublisher((event, publishListener, ackListener) -> { coordinator.handlePublishRequest(new PublishRequest(event.state())); publishListener.onResponse(null); }); - fakeMasterService.start(); + fakeClusterManagerService.start(); } - private void setupRealMasterServiceAndCoordinator(long term, ClusterState initialState) { - MasterService masterService = new MasterService( + private void setupRealClusterManagerServiceAndCoordinator(long term, ClusterState initialState) { + MasterService clusterManagerService = new MasterService( Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "test_node").build(), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool ); AtomicReference clusterStateRef = new AtomicReference<>(initialState); - masterService.setClusterStatePublisher((event, publishListener, ackListener) -> { + clusterManagerService.setClusterStatePublisher((event, publishListener, ackListener) -> { clusterStateRef.set(event.state()); publishListener.onResponse(null); }); - setupMasterServiceAndCoordinator( + setupClusterManagerServiceAndCoordinator( term, initialState, - masterService, + clusterManagerService, threadPool, new Random(Randomness.get().nextLong()), () -> new StatusInfo(HEALTHY, "healthy-info") ); - masterService.setClusterStateSupplier(clusterStateRef::get); - masterService.start(); + clusterManagerService.setClusterStateSupplier(clusterStateRef::get); + clusterManagerService.start(); } - private void setupMasterServiceAndCoordinator( + private void setupClusterManagerServiceAndCoordinator( long term, ClusterState initialState, - MasterService masterService, + MasterService clusterManagerService, ThreadPool threadPool, Random random, NodeHealthService nodeHealthService ) { - if (this.masterService != null || coordinator != null) { + if (this.clusterManagerService != null || coordinator != null) { throw new IllegalStateException("method setupMasterServiceAndCoordinator can only be called once"); } - this.masterService = masterService; + this.clusterManagerService = clusterManagerService; CapturingTransport capturingTransport = new CapturingTransport() { @Override protected void onSendRequest(long requestId, String action, TransportRequest request, DiscoveryNode destination) { @@ -224,7 +231,7 @@ protected void onSendRequest(long requestId, String action, TransportRequest req transportService, writableRegistry(), OpenSearchAllocationTestCase.createAllocationService(Settings.EMPTY), - masterService, + clusterManagerService, () -> new InMemoryPersistedState(term, initialState), r -> emptyList(), new NoOpClusterApplier(), @@ -245,14 +252,14 @@ protected DiscoveryNode newNode(int i) { return newNode(i, randomBoolean()); } - protected DiscoveryNode newNode(int i, boolean master) { + protected DiscoveryNode newNode(int i, boolean clusterManager) { final Set roles; - if (master) { + if (clusterManager) { roles = singleton(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); } else { roles = Collections.emptySet(); } - final String prefix = master ? "master_" : "data_"; + final String prefix = clusterManager ? "cluster_manager_" : "data_"; return new DiscoveryNode(prefix + i, i + "", buildNewFakeTransportAddress(), emptyMap(), roles, Version.CURRENT); } @@ -323,7 +330,7 @@ public void testJoinWithHigherTermElectsLeader() { DiscoveryNode node1 = newNode(1, true); long initialTerm = randomLongBetween(1, 10); long initialVersion = randomLongBetween(1, 10); - setupFakeMasterServiceAndCoordinator( + setupFakeClusterManagerServiceAndCoordinator( initialTerm, initialState(node0, initialTerm, initialVersion, VotingConfiguration.of(randomFrom(node0, node1))), () -> new StatusInfo(HEALTHY, "healthy-info") @@ -347,7 +354,7 @@ public void testJoinWithHigherTermButBetterStateGetsRejected() { DiscoveryNode node1 = newNode(1, true); long initialTerm = randomLongBetween(1, 10); long initialVersion = randomLongBetween(1, 10); - setupFakeMasterServiceAndCoordinator( + setupFakeClusterManagerServiceAndCoordinator( initialTerm, initialState(node0, initialTerm, initialVersion, VotingConfiguration.of(node1)), () -> new StatusInfo(HEALTHY, "healthy-info") @@ -362,12 +369,12 @@ public void testJoinWithHigherTermButBetterStateGetsRejected() { assertFalse(isLocalNodeElectedMaster()); } - public void testJoinWithHigherTermButBetterStateStillElectsMasterThroughSelfJoin() { + public void testJoinWithHigherTermButBetterStateStillElectsClusterManagerThroughSelfJoin() { DiscoveryNode node0 = newNode(0, true); DiscoveryNode node1 = newNode(1, true); long initialTerm = randomLongBetween(1, 10); long initialVersion = randomLongBetween(1, 10); - setupFakeMasterServiceAndCoordinator( + setupFakeClusterManagerServiceAndCoordinator( initialTerm, initialState(node0, initialTerm, initialVersion, VotingConfiguration.of(node0)), () -> new StatusInfo(HEALTHY, "healthy-info") @@ -384,7 +391,7 @@ public void testJoinElectedLeader() { DiscoveryNode node1 = newNode(1, true); long initialTerm = randomLongBetween(1, 10); long initialVersion = randomLongBetween(1, 10); - setupFakeMasterServiceAndCoordinator( + setupFakeClusterManagerServiceAndCoordinator( initialTerm, initialState(node0, initialTerm, initialVersion, VotingConfiguration.of(node0)), () -> new StatusInfo(HEALTHY, "healthy-info") @@ -404,7 +411,7 @@ public void testJoinElectedLeaderWithHigherTerm() { DiscoveryNode node1 = newNode(1, true); long initialTerm = randomLongBetween(1, 10); long initialVersion = randomLongBetween(1, 10); - setupFakeMasterServiceAndCoordinator( + setupFakeClusterManagerServiceAndCoordinator( initialTerm, initialState(node0, initialTerm, initialVersion, VotingConfiguration.of(node0)), () -> new StatusInfo(HEALTHY, "healthy-info") @@ -426,7 +433,7 @@ public void testJoinAccumulation() { DiscoveryNode node2 = newNode(2, true); long initialTerm = randomLongBetween(1, 10); long initialVersion = randomLongBetween(1, 10); - setupFakeMasterServiceAndCoordinator( + setupFakeClusterManagerServiceAndCoordinator( initialTerm, initialState(node0, initialTerm, initialVersion, VotingConfiguration.of(node2)), () -> new StatusInfo(HEALTHY, "healthy-info") @@ -458,7 +465,7 @@ public void testJoinFollowerWithHigherTerm() throws Exception { DiscoveryNode node1 = newNode(1, true); long initialTerm = randomLongBetween(1, 10); long initialVersion = randomLongBetween(1, 10); - setupFakeMasterServiceAndCoordinator( + setupFakeClusterManagerServiceAndCoordinator( initialTerm, initialState(node0, initialTerm, initialVersion, VotingConfiguration.of(node0)), () -> new StatusInfo(HEALTHY, "healthy-info") @@ -481,7 +488,7 @@ public void testJoinUpdateVotingConfigExclusion() throws Exception { "knownNodeName" ); - setupFakeMasterServiceAndCoordinator( + setupFakeClusterManagerServiceAndCoordinator( initialTerm, buildStateWithVotingConfigExclusion(initialNode, initialTerm, initialVersion, votingConfigExclusion), () -> new StatusInfo(HEALTHY, "healthy-info") @@ -507,7 +514,7 @@ public void testJoinUpdateVotingConfigExclusion() throws Exception { ); assertTrue( - MasterServiceTests.discoveryState(masterService) + MasterServiceTests.discoveryState(clusterManagerService) .getVotingConfigExclusions() .stream() .anyMatch( @@ -583,7 +590,7 @@ public void testJoinFollowerFails() throws Exception { DiscoveryNode node1 = newNode(1, true); long initialTerm = randomLongBetween(1, 10); long initialVersion = randomLongBetween(1, 10); - setupFakeMasterServiceAndCoordinator( + setupFakeClusterManagerServiceAndCoordinator( initialTerm, initialState(node0, initialTerm, initialVersion, VotingConfiguration.of(node0)), () -> new StatusInfo(HEALTHY, "healthy-info") @@ -604,7 +611,7 @@ public void testBecomeFollowerFailsPendingJoin() throws Exception { DiscoveryNode node1 = newNode(1, true); long initialTerm = randomLongBetween(1, 10); long initialVersion = randomLongBetween(1, 10); - setupFakeMasterServiceAndCoordinator( + setupFakeClusterManagerServiceAndCoordinator( initialTerm, initialState(node0, initialTerm, initialVersion, VotingConfiguration.of(node1)), () -> new StatusInfo(HEALTHY, "healthy-info") @@ -626,27 +633,31 @@ public void testBecomeFollowerFailsPendingJoin() throws Exception { } public void testConcurrentJoining() { - List masterNodes = IntStream.rangeClosed(1, randomIntBetween(2, 5)) + List clusterManagerNodes = IntStream.rangeClosed(1, randomIntBetween(2, 5)) .mapToObj(nodeId -> newNode(nodeId, true)) .collect(Collectors.toList()); - List otherNodes = IntStream.rangeClosed(masterNodes.size() + 1, masterNodes.size() + 1 + randomIntBetween(0, 5)) - .mapToObj(nodeId -> newNode(nodeId, false)) - .collect(Collectors.toList()); - List allNodes = Stream.concat(masterNodes.stream(), otherNodes.stream()).collect(Collectors.toList()); + List otherNodes = IntStream.rangeClosed( + clusterManagerNodes.size() + 1, + clusterManagerNodes.size() + 1 + randomIntBetween(0, 5) + ).mapToObj(nodeId -> newNode(nodeId, false)).collect(Collectors.toList()); + List allNodes = Stream.concat(clusterManagerNodes.stream(), otherNodes.stream()).collect(Collectors.toList()); - DiscoveryNode localNode = masterNodes.get(0); + DiscoveryNode localNode = clusterManagerNodes.get(0); VotingConfiguration votingConfiguration = new VotingConfiguration( - randomValueOtherThan(singletonList(localNode), () -> randomSubsetOf(randomIntBetween(1, masterNodes.size()), masterNodes)) - .stream() - .map(DiscoveryNode::getId) - .collect(Collectors.toSet()) + randomValueOtherThan( + singletonList(localNode), + () -> randomSubsetOf(randomIntBetween(1, clusterManagerNodes.size()), clusterManagerNodes) + ).stream().map(DiscoveryNode::getId).collect(Collectors.toSet()) ); logger.info("Voting configuration: {}", votingConfiguration); long initialTerm = randomLongBetween(1, 10); long initialVersion = randomLongBetween(1, 10); - setupRealMasterServiceAndCoordinator(initialTerm, initialState(localNode, initialTerm, initialVersion, votingConfiguration)); + setupRealClusterManagerServiceAndCoordinator( + initialTerm, + initialState(localNode, initialTerm, initialVersion, votingConfiguration) + ); long newTerm = initialTerm + randomLongBetween(1, 10); // we need at least a quorum of voting nodes with a correct term and worse state @@ -735,10 +746,10 @@ public void testConcurrentJoining() { throw new RuntimeException(e); } - assertTrue(MasterServiceTests.discoveryState(masterService).nodes().isLocalNodeElectedMaster()); + assertTrue(MasterServiceTests.discoveryState(clusterManagerService).nodes().isLocalNodeElectedMaster()); for (DiscoveryNode successfulNode : successfulNodes) { assertTrue(successfulNode + " joined cluster", clusterStateHasNode(successfulNode)); - assertFalse(successfulNode + " voted for master", coordinator.missingJoinVoteFrom(successfulNode)); + assertFalse(successfulNode + " voted for cluster-manager", coordinator.missingJoinVoteFrom(successfulNode)); } } @@ -749,7 +760,7 @@ public void testJoinElectedLeaderWithDeprecatedMasterRole() { DiscoveryNode node1 = new DiscoveryNode("master1", "1", buildNewFakeTransportAddress(), emptyMap(), roles, Version.CURRENT); long initialTerm = 1; long initialVersion = 1; - setupFakeMasterServiceAndCoordinator( + setupFakeClusterManagerServiceAndCoordinator( initialTerm, initialState(node0, initialTerm, initialVersion, VotingConfiguration.of(node0)), () -> new StatusInfo(HEALTHY, "healthy-info") @@ -765,10 +776,10 @@ public void testJoinElectedLeaderWithDeprecatedMasterRole() { } private boolean isLocalNodeElectedMaster() { - return MasterServiceTests.discoveryState(masterService).nodes().isLocalNodeElectedMaster(); + return MasterServiceTests.discoveryState(clusterManagerService).nodes().isLocalNodeElectedMaster(); } private boolean clusterStateHasNode(DiscoveryNode node) { - return node.equals(MasterServiceTests.discoveryState(masterService).nodes().get(node.getId())); + return node.equals(MasterServiceTests.discoveryState(clusterManagerService).nodes().get(node.getId())); } } diff --git a/server/src/test/java/org/opensearch/cluster/coordination/PublicationTests.java b/server/src/test/java/org/opensearch/cluster/coordination/PublicationTests.java index 954e8ce79cdc8..3e86ec11ae7b3 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/PublicationTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/PublicationTests.java @@ -447,7 +447,7 @@ public void testClusterStatePublishingFailsOrTimesOutBeforeCommit() throws Inter ); } - public void testPublishingToMastersFirst() { + public void testPublishingToClusterManagersFirst() { VotingConfiguration singleNodeConfig = VotingConfiguration.of(n1); initializeCluster(singleNodeConfig); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/ReconfiguratorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/ReconfiguratorTests.java index 71d640e202f33..057455fefc4b3 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/ReconfiguratorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/ReconfiguratorTests.java @@ -223,14 +223,14 @@ private void check( boolean autoShrinkVotingConfiguration, VotingConfiguration expectedConfig ) { - final DiscoveryNode master = liveNodes.stream().sorted(Comparator.comparing(DiscoveryNode::getId)).findFirst().get(); - check(liveNodes, retired, master.getId(), config, autoShrinkVotingConfiguration, expectedConfig); + final DiscoveryNode clusterManager = liveNodes.stream().sorted(Comparator.comparing(DiscoveryNode::getId)).findFirst().get(); + check(liveNodes, retired, clusterManager.getId(), config, autoShrinkVotingConfiguration, expectedConfig); } private void check( Set liveNodes, Set retired, - String masterId, + String clusterManagerId, VotingConfiguration config, boolean autoShrinkVotingConfiguration, VotingConfiguration expectedConfig @@ -239,14 +239,14 @@ private void check( Settings.builder().put(CLUSTER_AUTO_SHRINK_VOTING_CONFIGURATION.getKey(), autoShrinkVotingConfiguration).build() ); - final DiscoveryNode master = liveNodes.stream().filter(n -> n.getId().equals(masterId)).findFirst().get(); - final VotingConfiguration adaptedConfig = reconfigurator.reconfigure(liveNodes, retired, master, config); + final DiscoveryNode clusterManager = liveNodes.stream().filter(n -> n.getId().equals(clusterManagerId)).findFirst().get(); + final VotingConfiguration adaptedConfig = reconfigurator.reconfigure(liveNodes, retired, clusterManager, config); assertEquals( new ParameterizedMessage( - "[liveNodes={}, retired={}, master={}, config={}, autoShrinkVotingConfiguration={}]", + "[liveNodes={}, retired={}, clusterManager={}, config={}, autoShrinkVotingConfiguration={}]", liveNodes, retired, - master, + clusterManager, config, autoShrinkVotingConfiguration ).getFormattedMessage(), diff --git a/server/src/test/java/org/opensearch/cluster/health/ClusterHealthAllocationTests.java b/server/src/test/java/org/opensearch/cluster/health/ClusterHealthAllocationTests.java index 2f05297146f8e..06e58672fa994 100644 --- a/server/src/test/java/org/opensearch/cluster/health/ClusterHealthAllocationTests.java +++ b/server/src/test/java/org/opensearch/cluster/health/ClusterHealthAllocationTests.java @@ -85,10 +85,13 @@ public void testClusterHealth() { assertEquals(ClusterHealthStatus.GREEN, getClusterHealthStatus(clusterState)); } - private ClusterState addNode(ClusterState clusterState, String nodeName, boolean isMaster) { + private ClusterState addNode(ClusterState clusterState, String nodeName, boolean isClusterManager) { DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.getNodes()); nodeBuilder.add( - newNode(nodeName, Collections.singleton(isMaster ? DiscoveryNodeRole.CLUSTER_MANAGER_ROLE : DiscoveryNodeRole.DATA_ROLE)) + newNode( + nodeName, + Collections.singleton(isClusterManager ? DiscoveryNodeRole.CLUSTER_MANAGER_ROLE : DiscoveryNodeRole.DATA_ROLE) + ) ); return ClusterState.builder(clusterState).nodes(nodeBuilder).build(); } diff --git a/server/src/test/java/org/opensearch/cluster/health/ClusterStateHealthTests.java b/server/src/test/java/org/opensearch/cluster/health/ClusterStateHealthTests.java index 9a6c458edeb11..bd856d5c41ace 100644 --- a/server/src/test/java/org/opensearch/cluster/health/ClusterStateHealthTests.java +++ b/server/src/test/java/org/opensearch/cluster/health/ClusterStateHealthTests.java @@ -157,11 +157,11 @@ public void testClusterHealthWaitsForClusterStateApplication() throws Interrupte } }); - logger.info("--> submit task to restore master"); + logger.info("--> submit task to restore cluster-manager"); ClusterState currentState = clusterService.getClusterApplierService().state(); clusterService.getClusterApplierService() .onNewClusterState( - "restore master", + "restore cluster-manager", () -> ClusterState.builder(currentState) .nodes(DiscoveryNodes.builder(currentState.nodes()).masterNodeId(currentState.nodes().getLocalNodeId())) .build(), @@ -184,7 +184,7 @@ public void testClusterHealthWaitsForClusterStateApplication() throws Interrupte assertFalse(listener.isDone()); - logger.info("--> realising task to restore master"); + logger.info("--> realising task to restore cluster-manager"); applyLatch.countDown(); listener.get(); } diff --git a/server/src/test/java/org/opensearch/cluster/metadata/AutoExpandReplicasTests.java b/server/src/test/java/org/opensearch/cluster/metadata/AutoExpandReplicasTests.java index aafd507aef7cd..559dd86dce4b1 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/AutoExpandReplicasTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/AutoExpandReplicasTests.java @@ -145,7 +145,7 @@ public void testAutoExpandWhenNodeLeavesAndPossiblyRejoins() throws InterruptedE try { List allNodes = new ArrayList<>(); - DiscoveryNode localNode = createNode(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); // local node is the master + DiscoveryNode localNode = createNode(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); // local node is the cluster-manager allNodes.add(localNode); int numDataNodes = randomIntBetween(3, 5); List dataNodes = new ArrayList<>(numDataNodes); @@ -246,7 +246,7 @@ public void testOnlyAutoExpandAllocationFilteringAfterAllNodesUpgraded() { VersionUtils.randomVersionBetween(random(), Version.V_1_0_0, Version.V_1_2_1), DiscoveryNodeRole.CLUSTER_MANAGER_ROLE, DiscoveryNodeRole.DATA_ROLE - ); // local node is the master + ); // local node is the cluster-manager allNodes.add(oldNode); ClusterState state = ClusterStateCreationUtils.state(oldNode, oldNode, allNodes.toArray(new DiscoveryNode[0])); diff --git a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodesTests.java b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodesTests.java index bc36a57fed125..80c7d8c9417fe 100644 --- a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodesTests.java +++ b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodesTests.java @@ -108,14 +108,14 @@ public void testAll() { assertThat(discoveryNodes.resolveNodes(new String[0]), arrayContainingInAnyOrder(allNodes)); assertThat(discoveryNodes.resolveNodes("_all"), arrayContainingInAnyOrder(allNodes)); - final String[] nonMasterNodes = StreamSupport.stream(discoveryNodes.getNodes().values().spliterator(), false) + final String[] nonClusterManagerNodes = StreamSupport.stream(discoveryNodes.getNodes().values().spliterator(), false) .map(n -> n.value) .filter(n -> n.isMasterNode() == false) .map(DiscoveryNode::getId) .toArray(String[]::new); - assertThat(discoveryNodes.resolveNodes("_all", "master:false"), arrayContainingInAnyOrder(nonMasterNodes)); + assertThat(discoveryNodes.resolveNodes("_all", "cluster_manager:false"), arrayContainingInAnyOrder(nonClusterManagerNodes)); - assertThat(discoveryNodes.resolveNodes("master:false", "_all"), arrayContainingInAnyOrder(allNodes)); + assertThat(discoveryNodes.resolveNodes("cluster_manager:false", "_all"), arrayContainingInAnyOrder(allNodes)); } public void testCoordinatorOnlyNodes() { @@ -135,7 +135,7 @@ public void testCoordinatorOnlyNodes() { assertThat(discoveryNodes.resolveNodes("coordinating_only:true"), arrayContainingInAnyOrder(coordinatorOnlyNodes)); assertThat( - discoveryNodes.resolveNodes("_all", "data:false", "ingest:false", "master:false"), + discoveryNodes.resolveNodes("_all", "data:false", "ingest:false", "cluster_manager:false"), arrayContainingInAnyOrder(coordinatorOnlyNodes) ); assertThat(discoveryNodes.resolveNodes("_all", "coordinating_only:false"), arrayContainingInAnyOrder(nonCoordinatorOnlyNodes)); @@ -175,7 +175,7 @@ public void testResolveNodesIds() { assertThat(resolvedNodesIds, equalTo(expectedNodesIds)); } - public void testMastersFirst() { + public void testClusterManagersFirst() { final List inputNodes = randomNodes(10); final DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); inputNodes.forEach(discoBuilder::add); @@ -254,19 +254,19 @@ public void testDeltas() { nodesB.add(node); } - DiscoveryNode masterA = randomBoolean() ? null : RandomPicks.randomFrom(random(), nodesA); - DiscoveryNode masterB = randomBoolean() ? null : RandomPicks.randomFrom(random(), nodesB); + DiscoveryNode clusterManagerA = randomBoolean() ? null : RandomPicks.randomFrom(random(), nodesA); + DiscoveryNode clusterManagerB = randomBoolean() ? null : RandomPicks.randomFrom(random(), nodesB); DiscoveryNodes.Builder builderA = DiscoveryNodes.builder(); nodesA.stream().forEach(builderA::add); - final String masterAId = masterA == null ? null : masterA.getId(); - builderA.masterNodeId(masterAId); + final String clusterManagerAId = clusterManagerA == null ? null : clusterManagerA.getId(); + builderA.masterNodeId(clusterManagerAId); builderA.localNodeId(RandomPicks.randomFrom(random(), nodesA).getId()); DiscoveryNodes.Builder builderB = DiscoveryNodes.builder(); nodesB.stream().forEach(builderB::add); - final String masterBId = masterB == null ? null : masterB.getId(); - builderB.masterNodeId(masterBId); + final String clusterManagerBId = clusterManagerB == null ? null : clusterManagerB.getId(); + builderB.masterNodeId(clusterManagerBId); builderB.localNodeId(RandomPicks.randomFrom(random(), nodesB).getId()); final DiscoveryNodes discoNodesA = builderA.build(); @@ -276,18 +276,18 @@ public void testDeltas() { DiscoveryNodes.Delta delta = discoNodesB.delta(discoNodesA); - if (masterA == null) { + if (clusterManagerA == null) { assertThat(delta.previousClusterManagerNode(), nullValue()); } else { - assertThat(delta.previousClusterManagerNode().getId(), equalTo(masterAId)); + assertThat(delta.previousClusterManagerNode().getId(), equalTo(clusterManagerAId)); } - if (masterB == null) { + if (clusterManagerB == null) { assertThat(delta.newMasterNode(), nullValue()); } else { - assertThat(delta.newMasterNode().getId(), equalTo(masterBId)); + assertThat(delta.newMasterNode().getId(), equalTo(clusterManagerBId)); } - if (Objects.equals(masterAId, masterBId)) { + if (Objects.equals(clusterManagerAId, clusterManagerBId)) { assertFalse(delta.masterNodeChanged()); } else { assertTrue(delta.masterNodeChanged()); @@ -306,6 +306,32 @@ public void testDeltas() { assertThat(delta.removedNodes().size(), equalTo(removedNodes.size())); } + // Validate using the deprecated 'master' role in the node filter can get correct result. + public void testDeprecatedMasterNodeFilter() { + final DiscoveryNodes discoveryNodes = buildDiscoveryNodes(); + + final String[] allNodes = StreamSupport.stream(discoveryNodes.spliterator(), false) + .map(DiscoveryNode::getId) + .toArray(String[]::new); + + final String[] clusterManagerNodes = StreamSupport.stream(discoveryNodes.getNodes().values().spliterator(), false) + .map(n -> n.value) + .filter(n -> n.isMasterNode() == true) + .map(DiscoveryNode::getId) + .toArray(String[]::new); + + final String[] nonClusterManagerNodes = StreamSupport.stream(discoveryNodes.getNodes().values().spliterator(), false) + .map(n -> n.value) + .filter(n -> n.isMasterNode() == false) + .map(DiscoveryNode::getId) + .toArray(String[]::new); + + assertThat(discoveryNodes.resolveNodes("cluster_manager:true"), arrayContainingInAnyOrder(clusterManagerNodes)); + assertThat(discoveryNodes.resolveNodes("master:true"), arrayContainingInAnyOrder(clusterManagerNodes)); + assertThat(discoveryNodes.resolveNodes("_all", "master:false"), arrayContainingInAnyOrder(nonClusterManagerNodes)); + assertThat(discoveryNodes.resolveNodes("master:false", "_all"), arrayContainingInAnyOrder(allNodes)); + } + private static AtomicInteger idGenerator = new AtomicInteger(); private static List randomNodes(final int numNodes) { diff --git a/server/src/test/java/org/opensearch/cluster/routing/BatchedRerouteServiceTests.java b/server/src/test/java/org/opensearch/cluster/routing/BatchedRerouteServiceTests.java index db93aa39c2da7..2f6d34e1eb204 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/BatchedRerouteServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/BatchedRerouteServiceTests.java @@ -101,7 +101,7 @@ public void testReroutesWhenRequested() throws InterruptedException { public void testBatchesReroutesTogetherAtPriorityOfHighestSubmittedReroute() throws BrokenBarrierException, InterruptedException { final CyclicBarrier cyclicBarrier = new CyclicBarrier(2); - clusterService.submitStateUpdateTask("block master service", new ClusterStateUpdateTask() { + clusterService.submitStateUpdateTask("block cluster-manager service", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) throws Exception { cyclicBarrier.await(); // notify test that we are blocked @@ -115,7 +115,7 @@ public void onFailure(String source, Exception e) { } }); - cyclicBarrier.await(); // wait for master thread to be blocked + cyclicBarrier.await(); // wait for cluster-manager thread to be blocked final AtomicBoolean rerouteExecuted = new AtomicBoolean(); final BatchedRerouteService batchedRerouteService = new BatchedRerouteService(clusterService, (s, r) -> { @@ -194,7 +194,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS actions.forEach(threadPool.generic()::execute); assertTrue(tasksSubmittedCountDown.await(10, TimeUnit.SECONDS)); - cyclicBarrier.await(); // allow master thread to continue; + cyclicBarrier.await(); // allow cluster-manager thread to continue; assertTrue(tasksCompletedCountDown.await(10, TimeUnit.SECONDS)); // wait for reroute to complete assertTrue(rerouteExecuted.get()); // see above for assertion that it's only called once } diff --git a/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java b/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java index d42c3e80c60c9..8bf2b1626292a 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/OperationRoutingTests.java @@ -774,14 +774,14 @@ private DiscoveryNode[] setupNodes() { ); allNodes[i++] = node; } - DiscoveryNode master = new DiscoveryNode( - "master", + DiscoveryNode clusterManager = new DiscoveryNode( + "cluster-manager", buildNewFakeTransportAddress(), Collections.emptyMap(), Collections.singleton(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE), Version.CURRENT ); - allNodes[i] = master; + allNodes[i] = clusterManager; return allNodes; } diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/FailedNodeRoutingTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/FailedNodeRoutingTests.java index f60497b4108b7..b3d62ea9c6160 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/FailedNodeRoutingTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/FailedNodeRoutingTests.java @@ -230,7 +230,7 @@ private static Version getNodeVersion(ShardRouting shardRouting, ClusterState st public ClusterState randomInitialClusterState() { List allNodes = new ArrayList<>(); - DiscoveryNode localNode = createNode(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); // local node is the master + DiscoveryNode localNode = createNode(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); // local node is the cluster-manager allNodes.add(localNode); // at least two nodes that have the data role so that we can allocate shards allNodes.add(createNode(DiscoveryNodeRole.DATA_ROLE)); diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/InSyncAllocationIdTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/InSyncAllocationIdTests.java index 243701e746ef5..3a1f4a586d519 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/InSyncAllocationIdTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/InSyncAllocationIdTests.java @@ -170,7 +170,7 @@ public void testInSyncAllocationIdsUpdated() { /** * Assume following scenario: indexing request is written to primary, but fails to be replicated to active replica. - * The primary instructs master to fail replica before acknowledging write to client. In the meanwhile, the node of the replica was + * The primary instructs cluster-manager to fail replica before acknowledging write to client. In the meanwhile, the node of the replica was * removed from the cluster (disassociateDeadNodes). This means that the ShardRouting of the replica was failed, but it's allocation * id is still part of the in-sync set. We have to make sure that the failShard request from the primary removes the allocation id * from the in-sync set. @@ -204,8 +204,8 @@ public void testDeadNodesBeforeReplicaFailed() throws Exception { /** * Assume following scenario: indexing request is written to primary, but fails to be replicated to active replica. - * The primary instructs master to fail replica before acknowledging write to client. In the meanwhile, primary fails for an unrelated - * reason. Master now batches both requests to fail primary and replica. We have to make sure that only the allocation id of the primary + * The primary instructs cluster-manager to fail replica before acknowledging write to client. In the meanwhile, primary fails for an unrelated + * reason. Cluster-manager now batches both requests to fail primary and replica. We have to make sure that only the allocation id of the primary * is kept in the in-sync allocation set before we acknowledge request to client. Otherwise we would acknowledge a write that made it * into the primary but not the replica but the replica is still considered non-stale. */ diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java index cbf624cdad2ca..c3f54fa7580ac 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java @@ -1072,7 +1072,7 @@ public void testForSingleDataNode() { RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metadata.index("test")).build(); - logger.info("--> adding one master node, one data node"); + logger.info("--> adding one cluster-manager node, one data node"); DiscoveryNode discoveryNode1 = new DiscoveryNode( "", "node1", @@ -1222,9 +1222,9 @@ public void testWatermarksEnabledForSingleDataNode() { .build(); RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metadata.index("test")).build(); - DiscoveryNode masterNode = new DiscoveryNode( - "master", - "master", + DiscoveryNode clusterManagerNode = new DiscoveryNode( + "cluster-manager", + "cluster-manager", buildNewFakeTransportAddress(), emptyMap(), singleton(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE), @@ -1240,7 +1240,7 @@ public void testWatermarksEnabledForSingleDataNode() { ); DiscoveryNodes.Builder discoveryNodesBuilder = DiscoveryNodes.builder().add(dataNode); if (randomBoolean()) { - discoveryNodesBuilder.add(masterNode); + discoveryNodesBuilder.add(clusterManagerNode); } DiscoveryNodes discoveryNodes = discoveryNodesBuilder.build(); diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/RestoreInProgressAllocationDeciderTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/RestoreInProgressAllocationDeciderTests.java index 5b05cb3afd83e..1a047b3ccd9da 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/RestoreInProgressAllocationDeciderTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/decider/RestoreInProgressAllocationDeciderTests.java @@ -199,9 +199,9 @@ private ClusterState createInitialClusterState() { RoutingTable routingTable = RoutingTable.builder().addAsNew(metadata.index("test")).build(); DiscoveryNodes discoveryNodes = DiscoveryNodes.builder() - .add(newNode("master", Collections.singleton(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE))) - .localNodeId("master") - .masterNodeId("master") + .add(newNode("cluster-manager", Collections.singleton(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE))) + .localNodeId("cluster-manager") + .masterNodeId("cluster-manager") .build(); ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) diff --git a/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java b/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java index 04b4044864dbd..b9b939f28e365 100644 --- a/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/ClusterApplierServiceTests.java @@ -104,7 +104,7 @@ public void tearDown() throws Exception { super.tearDown(); } - private TimedClusterApplierService createTimedClusterService(boolean makeMaster) { + private TimedClusterApplierService createTimedClusterService(boolean makeClusterManager) { DiscoveryNode localNode = new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); TimedClusterApplierService timedClusterApplierService = new TimedClusterApplierService( Settings.builder().put("cluster.name", "ClusterApplierServiceTests").build(), @@ -118,7 +118,7 @@ private TimedClusterApplierService createTimedClusterService(boolean makeMaster) DiscoveryNodes.builder() .add(localNode) .localNodeId(localNode.getId()) - .masterNodeId(makeMaster ? localNode.getId() : null) + .masterNodeId(makeClusterManager ? localNode.getId() : null) ) .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK) .build() @@ -292,19 +292,19 @@ public void onFailure(String source, Exception e) { } } - public void testLocalNodeMasterListenerCallbacks() { + public void testLocalNodeClusterManagerListenerCallbacks() { TimedClusterApplierService timedClusterApplierService = createTimedClusterService(false); - AtomicBoolean isMaster = new AtomicBoolean(); + AtomicBoolean isClusterManager = new AtomicBoolean(); timedClusterApplierService.addLocalNodeMasterListener(new LocalNodeMasterListener() { @Override public void onClusterManager() { - isMaster.set(true); + isClusterManager.set(true); } @Override public void offClusterManager() { - isMaster.set(false); + isClusterManager.set(false); } }); @@ -313,7 +313,7 @@ public void offClusterManager() { DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(nodes).masterNodeId(nodes.getLocalNodeId()); state = ClusterState.builder(state).blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).nodes(nodesBuilder).build(); setState(timedClusterApplierService, state); - assertThat(isMaster.get(), is(true)); + assertThat(isClusterManager.get(), is(true)); nodes = state.nodes(); nodesBuilder = DiscoveryNodes.builder(nodes).masterNodeId(null); @@ -322,11 +322,11 @@ public void offClusterManager() { .nodes(nodesBuilder) .build(); setState(timedClusterApplierService, state); - assertThat(isMaster.get(), is(false)); + assertThat(isClusterManager.get(), is(false)); nodesBuilder = DiscoveryNodes.builder(nodes).masterNodeId(nodes.getLocalNodeId()); state = ClusterState.builder(state).blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).nodes(nodesBuilder).build(); setState(timedClusterApplierService, state); - assertThat(isMaster.get(), is(true)); + assertThat(isClusterManager.get(), is(true)); timedClusterApplierService.close(); } diff --git a/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java b/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java index 845a5ee91052d..d5f7344c544b9 100644 --- a/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/MasterServiceTests.java @@ -121,9 +121,9 @@ public void randomizeCurrentTime() { relativeTimeInMillis = randomLongBetween(0L, 1L << 62); } - private MasterService createMasterService(boolean makeMaster) { + private MasterService createClusterManagerService(boolean makeClusterManager) { final DiscoveryNode localNode = new DiscoveryNode("node1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); - final MasterService masterService = new MasterService( + final MasterService clusterManagerService = new MasterService( Settings.builder() .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), MasterServiceTests.class.getSimpleName()) .put(Node.NODE_NAME_SETTING.getKey(), "test_node") @@ -133,26 +133,29 @@ private MasterService createMasterService(boolean makeMaster) { ); final ClusterState initialClusterState = ClusterState.builder(new ClusterName(MasterServiceTests.class.getSimpleName())) .nodes( - DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).masterNodeId(makeMaster ? localNode.getId() : null) + DiscoveryNodes.builder() + .add(localNode) + .localNodeId(localNode.getId()) + .masterNodeId(makeClusterManager ? localNode.getId() : null) ) .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK) .build(); final AtomicReference clusterStateRef = new AtomicReference<>(initialClusterState); - masterService.setClusterStatePublisher((event, publishListener, ackListener) -> { + clusterManagerService.setClusterStatePublisher((event, publishListener, ackListener) -> { clusterStateRef.set(event.state()); publishListener.onResponse(null); }); - masterService.setClusterStateSupplier(clusterStateRef::get); - masterService.start(); - return masterService; + clusterManagerService.setClusterStateSupplier(clusterStateRef::get); + clusterManagerService.start(); + return clusterManagerService; } - public void testMasterAwareExecution() throws Exception { - final MasterService nonMaster = createMasterService(false); + public void testClusterManagerAwareExecution() throws Exception { + final MasterService nonClusterManager = createClusterManagerService(false); final boolean[] taskFailed = { false }; final CountDownLatch latch1 = new CountDownLatch(1); - nonMaster.submitStateUpdateTask("test", new ClusterStateUpdateTask() { + nonClusterManager.submitStateUpdateTask("test", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { latch1.countDown(); @@ -167,10 +170,10 @@ public void onFailure(String source, Exception e) { }); latch1.await(); - assertTrue("cluster state update task was executed on a non-master", taskFailed[0]); + assertTrue("cluster state update task was executed on a non-cluster-manager", taskFailed[0]); final CountDownLatch latch2 = new CountDownLatch(1); - nonMaster.submitStateUpdateTask("test", new LocalClusterUpdateTask() { + nonClusterManager.submitStateUpdateTask("test", new LocalClusterUpdateTask() { @Override public ClusterTasksResult execute(ClusterState currentState) { taskFailed[0] = false; @@ -185,13 +188,13 @@ public void onFailure(String source, Exception e) { } }); latch2.await(); - assertFalse("non-master cluster state update task was not executed", taskFailed[0]); + assertFalse("non-cluster-manager cluster state update task was not executed", taskFailed[0]); - nonMaster.close(); + nonClusterManager.close(); } public void testThreadContext() throws InterruptedException { - final MasterService master = createMasterService(true); + final MasterService clusterManager = createClusterManagerService(true); final CountDownLatch latch = new CountDownLatch(1); try (ThreadContext.StoredContext ignored = threadPool.getThreadContext().stashContext()) { @@ -203,9 +206,9 @@ public void testThreadContext() throws InterruptedException { threadPool.getThreadContext().putHeader(expectedHeaders); final TimeValue ackTimeout = randomBoolean() ? TimeValue.ZERO : TimeValue.timeValueMillis(randomInt(10000)); - final TimeValue masterTimeout = randomBoolean() ? TimeValue.ZERO : TimeValue.timeValueMillis(randomInt(10000)); + final TimeValue clusterManagerTimeout = randomBoolean() ? TimeValue.ZERO : TimeValue.timeValueMillis(randomInt(10000)); - master.submitStateUpdateTask("test", new AckedClusterStateUpdateTask(null, null) { + clusterManager.submitStateUpdateTask("test", new AckedClusterStateUpdateTask(null, null) { @Override public ClusterState execute(ClusterState currentState) { assertTrue(threadPool.getThreadContext().isSystemContext()); @@ -249,7 +252,7 @@ public TimeValue ackTimeout() { @Override public TimeValue timeout() { - return masterTimeout; + return clusterManagerTimeout; } @Override @@ -277,7 +280,7 @@ public void onAckTimeout() { latch.await(); - master.close(); + clusterManager.close(); } /* @@ -289,8 +292,8 @@ public void testClusterStateTaskListenerThrowingExceptionIsOkay() throws Interru final CountDownLatch latch = new CountDownLatch(1); AtomicBoolean published = new AtomicBoolean(); - try (MasterService masterService = createMasterService(true)) { - masterService.submitStateUpdateTask( + try (MasterService clusterManagerService = createClusterManagerService(true)) { + clusterManagerService.submitStateUpdateTask( "testClusterStateTaskListenerThrowingExceptionIsOkay", new Object(), ClusterStateTaskConfig.build(Priority.NORMAL), @@ -418,8 +421,8 @@ public void testClusterStateUpdateLogging() throws Exception { ) ); - try (MasterService masterService = createMasterService(true)) { - masterService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() { + try (MasterService clusterManagerService = createClusterManagerService(true)) { + clusterManagerService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { relativeTimeInMillis += TimeValue.timeValueSeconds(1).millis(); @@ -434,7 +437,7 @@ public void onFailure(String source, Exception e) { fail(); } }); - masterService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() { + clusterManagerService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { relativeTimeInMillis += TimeValue.timeValueSeconds(2).millis(); @@ -449,7 +452,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS @Override public void onFailure(String source, Exception e) {} }); - masterService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() { + clusterManagerService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { relativeTimeInMillis += TimeValue.timeValueSeconds(3).millis(); @@ -466,7 +469,7 @@ public void onFailure(String source, Exception e) { fail(); } }); - masterService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() { + clusterManagerService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { return currentState; @@ -614,7 +617,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS } }; - try (MasterService masterService = createMasterService(true)) { + try (MasterService clusterManagerService = createClusterManagerService(true)) { final ConcurrentMap submittedTasksPerThread = new ConcurrentHashMap<>(); CyclicBarrier barrier = new CyclicBarrier(1 + numberOfThreads); for (int i = 0; i < numberOfThreads; i++) { @@ -629,7 +632,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS submittedTasksPerThread.computeIfAbsent(threadName, key -> new AtomicInteger()).addAndGet(tasks.size()); final TaskExecutor executor = assignment.v1(); if (tasks.size() == 1) { - masterService.submitStateUpdateTask( + clusterManagerService.submitStateUpdateTask( threadName, tasks.stream().findFirst().get(), ClusterStateTaskConfig.build(randomFrom(Priority.values())), @@ -639,7 +642,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS } else { Map taskListeners = new HashMap<>(); tasks.forEach(t -> taskListeners.put(t, listener)); - masterService.submitStateUpdateTasks( + clusterManagerService.submitStateUpdateTasks( threadName, taskListeners, ClusterStateTaskConfig.build(randomFrom(Priority.values())), @@ -693,8 +696,8 @@ public void testBlockingCallInClusterStateTaskListenerFails() throws Interrupted final CountDownLatch latch = new CountDownLatch(1); final AtomicReference assertionRef = new AtomicReference<>(); - try (MasterService masterService = createMasterService(true)) { - masterService.submitStateUpdateTask( + try (MasterService clusterManagerService = createClusterManagerService(true)) { + clusterManagerService.submitStateUpdateTask( "testBlockingCallInClusterStateTaskListenerFails", new Object(), ClusterStateTaskConfig.build(Priority.NORMAL), @@ -785,7 +788,7 @@ public void testLongClusterStateUpdateLogging() throws Exception { ); try ( - MasterService masterService = new MasterService( + MasterService clusterManagerService = new MasterService( Settings.builder() .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), MasterServiceTests.class.getSimpleName()) .put(Node.NODE_NAME_SETTING.getKey(), "test_node") @@ -807,7 +810,7 @@ public void testLongClusterStateUpdateLogging() throws Exception { .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK) .build(); final AtomicReference clusterStateRef = new AtomicReference<>(initialClusterState); - masterService.setClusterStatePublisher((event, publishListener, ackListener) -> { + clusterManagerService.setClusterStatePublisher((event, publishListener, ackListener) -> { if (event.source().contains("test5")) { relativeTimeInMillis += MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( Settings.EMPTY @@ -822,12 +825,12 @@ public void testLongClusterStateUpdateLogging() throws Exception { clusterStateRef.set(event.state()); publishListener.onResponse(null); }); - masterService.setClusterStateSupplier(clusterStateRef::get); - masterService.start(); + clusterManagerService.setClusterStateSupplier(clusterStateRef::get); + clusterManagerService.start(); final CountDownLatch latch = new CountDownLatch(6); final CountDownLatch processedFirstTask = new CountDownLatch(1); - masterService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() { + clusterManagerService.submitStateUpdateTask("test1", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { relativeTimeInMillis += randomLongBetween( @@ -850,7 +853,7 @@ public void onFailure(String source, Exception e) { }); processedFirstTask.await(); - masterService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() { + clusterManagerService.submitStateUpdateTask("test2", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { relativeTimeInMillis += MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( @@ -869,7 +872,7 @@ public void onFailure(String source, Exception e) { latch.countDown(); } }); - masterService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() { + clusterManagerService.submitStateUpdateTask("test3", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { relativeTimeInMillis += MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( @@ -888,7 +891,7 @@ public void onFailure(String source, Exception e) { fail(); } }); - masterService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() { + clusterManagerService.submitStateUpdateTask("test4", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { relativeTimeInMillis += MasterService.CLUSTER_MANAGER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING.get( @@ -907,7 +910,7 @@ public void onFailure(String source, Exception e) { fail(); } }); - masterService.submitStateUpdateTask("test5", new ClusterStateUpdateTask() { + clusterManagerService.submitStateUpdateTask("test5", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { return ClusterState.builder(currentState).incrementVersion().build(); @@ -923,7 +926,7 @@ public void onFailure(String source, Exception e) { fail(); } }); - masterService.submitStateUpdateTask("test6", new ClusterStateUpdateTask() { + clusterManagerService.submitStateUpdateTask("test6", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { return ClusterState.builder(currentState).incrementVersion().build(); @@ -941,7 +944,7 @@ public void onFailure(String source, Exception e) { }); // Additional update task to make sure all previous logging made it to the loggerName // We don't check logging for this on since there is no guarantee that it will occur before our check - masterService.submitStateUpdateTask("test7", new ClusterStateUpdateTask() { + clusterManagerService.submitStateUpdateTask("test7", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { return currentState; @@ -968,7 +971,7 @@ public void testAcking() throws InterruptedException { final DiscoveryNode node2 = new DiscoveryNode("node2", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); final DiscoveryNode node3 = new DiscoveryNode("node3", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); try ( - MasterService masterService = new MasterService( + MasterService clusterManagerService = new MasterService( Settings.builder() .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), MasterServiceTests.class.getSimpleName()) .put(Node.NODE_NAME_SETTING.getKey(), "test_node") @@ -983,9 +986,9 @@ public void testAcking() throws InterruptedException { .blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK) .build(); final AtomicReference publisherRef = new AtomicReference<>(); - masterService.setClusterStatePublisher((e, pl, al) -> publisherRef.get().publish(e, pl, al)); - masterService.setClusterStateSupplier(() -> initialClusterState); - masterService.start(); + clusterManagerService.setClusterStatePublisher((e, pl, al) -> publisherRef.get().publish(e, pl, al)); + clusterManagerService.setClusterStateSupplier(() -> initialClusterState); + clusterManagerService.start(); // check that we don't time out before even committing the cluster state { @@ -997,7 +1000,7 @@ public void testAcking() throws InterruptedException { ) ); - masterService.submitStateUpdateTask("test2", new AckedClusterStateUpdateTask(null, null) { + clusterManagerService.submitStateUpdateTask("test2", new AckedClusterStateUpdateTask(null, null) { @Override public ClusterState execute(ClusterState currentState) { return ClusterState.builder(currentState).build(); @@ -1052,7 +1055,7 @@ public void onAckTimeout() { ackListener.onNodeAck(node3, null); }); - masterService.submitStateUpdateTask("test2", new AckedClusterStateUpdateTask(null, null) { + clusterManagerService.submitStateUpdateTask("test2", new AckedClusterStateUpdateTask(null, null) { @Override public ClusterState execute(ClusterState currentState) { return ClusterState.builder(currentState).build(); @@ -1096,10 +1099,10 @@ public void onAckTimeout() { } /** - * Returns the cluster state that the master service uses (and that is provided by the discovery layer) + * Returns the cluster state that the cluster-manager service uses (and that is provided by the discovery layer) */ - public static ClusterState discoveryState(MasterService masterService) { - return masterService.state(); + public static ClusterState discoveryState(MasterService clusterManagerService) { + return clusterManagerService.state(); } } diff --git a/server/src/test/java/org/opensearch/discovery/AbstractDisruptionTestCase.java b/server/src/test/java/org/opensearch/discovery/AbstractDisruptionTestCase.java index e690770b3d0a5..307edc2f03075 100644 --- a/server/src/test/java/org/opensearch/discovery/AbstractDisruptionTestCase.java +++ b/server/src/test/java/org/opensearch/discovery/AbstractDisruptionTestCase.java @@ -155,15 +155,15 @@ ClusterState getNodeClusterState(String node) { return client(node).admin().cluster().prepareState().setLocal(true).get().getState(); } - void assertNoMaster(final String node) throws Exception { - assertNoMaster(node, null, TimeValue.timeValueSeconds(30)); + void assertNoClusterManager(final String node) throws Exception { + assertNoClusterManager(node, null, TimeValue.timeValueSeconds(30)); } - void assertNoMaster(final String node, TimeValue maxWaitTime) throws Exception { - assertNoMaster(node, null, maxWaitTime); + void assertNoClusterManager(final String node, TimeValue maxWaitTime) throws Exception { + assertNoClusterManager(node, null, maxWaitTime); } - void assertNoMaster(final String node, @Nullable final ClusterBlock expectedBlocks, TimeValue maxWaitTime) throws Exception { + void assertNoClusterManager(final String node, @Nullable final ClusterBlock expectedBlocks, TimeValue maxWaitTime) throws Exception { assertBusy(() -> { ClusterState state = getNodeClusterState(node); final DiscoveryNodes nodes = state.nodes(); @@ -179,26 +179,34 @@ void assertNoMaster(final String node, @Nullable final ClusterBlock expectedBloc }, maxWaitTime.getMillis(), TimeUnit.MILLISECONDS); } - void assertDifferentMaster(final String node, final String oldMasterNode) throws Exception { + void assertDifferentClusterManager(final String node, final String oldClusterManagerNode) throws Exception { assertBusy(() -> { ClusterState state = getNodeClusterState(node); - String masterNode = null; + String clusterManagerNode = null; if (state.nodes().getMasterNode() != null) { - masterNode = state.nodes().getMasterNode().getName(); + clusterManagerNode = state.nodes().getMasterNode().getName(); } - logger.trace("[{}] master is [{}]", node, state.nodes().getMasterNode()); - assertThat("node [" + node + "] still has [" + masterNode + "] as master", oldMasterNode, not(equalTo(masterNode))); + logger.trace("[{}] cluster-manager is [{}]", node, state.nodes().getMasterNode()); + assertThat( + "node [" + node + "] still has [" + clusterManagerNode + "] as cluster-manager", + oldClusterManagerNode, + not(equalTo(clusterManagerNode)) + ); }, 30, TimeUnit.SECONDS); } - void assertMaster(String masterNode, List nodes) throws Exception { + void assertClusterManager(String clusterManagerNode, List nodes) throws Exception { assertBusy(() -> { for (String node : nodes) { ClusterState state = getNodeClusterState(node); String failMsgSuffix = "cluster_state:\n" + state; assertThat("wrong node count on [" + node + "]. " + failMsgSuffix, state.nodes().getSize(), equalTo(nodes.size())); - String otherMasterNodeName = state.nodes().getMasterNode() != null ? state.nodes().getMasterNode().getName() : null; - assertThat("wrong master on node [" + node + "]. " + failMsgSuffix, otherMasterNodeName, equalTo(masterNode)); + String otherClusterManagerNodeName = state.nodes().getMasterNode() != null ? state.nodes().getMasterNode().getName() : null; + assertThat( + "wrong cluster-manager on node [" + node + "]. " + failMsgSuffix, + otherClusterManagerNodeName, + equalTo(clusterManagerNode) + ); } }); } diff --git a/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java b/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java index d1e3f406b4933..efcefab6c9f8b 100644 --- a/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java +++ b/server/src/test/java/org/opensearch/discovery/DiscoveryModuleTests.java @@ -70,7 +70,7 @@ public class DiscoveryModuleTests extends OpenSearchTestCase { private TransportService transportService; private NamedWriteableRegistry namedWriteableRegistry; - private MasterService masterService; + private MasterService clusterManagerService; private ClusterApplier clusterApplier; private ThreadPool threadPool; private ClusterSettings clusterSettings; @@ -93,7 +93,7 @@ public void setupDummyServices() { threadPool = mock(ThreadPool.class); when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); transportService = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null); - masterService = mock(MasterService.class); + clusterManagerService = mock(MasterService.class); namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); clusterApplier = mock(ClusterApplier.class); clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); @@ -112,7 +112,7 @@ private DiscoveryModule newModule(Settings settings, List plugi transportService, namedWriteableRegistry, null, - masterService, + clusterManagerService, clusterApplier, clusterSettings, plugins, diff --git a/server/src/test/java/org/opensearch/discovery/HandshakingTransportAddressConnectorTests.java b/server/src/test/java/org/opensearch/discovery/HandshakingTransportAddressConnectorTests.java index 403d2e2122855..3a1c24806e266 100644 --- a/server/src/test/java/org/opensearch/discovery/HandshakingTransportAddressConnectorTests.java +++ b/server/src/test/java/org/opensearch/discovery/HandshakingTransportAddressConnectorTests.java @@ -137,7 +137,7 @@ public void stopServices() { terminate(threadPool); } - public void testConnectsToMasterNode() throws InterruptedException { + public void testConnectsToClustreManagerNode() throws InterruptedException { final CountDownLatch completionLatch = new CountDownLatch(1); final SetOnce receivedNode = new SetOnce<>(); @@ -190,7 +190,7 @@ public void testLogsFullConnectionFailureAfterSuccessfulHandshake() throws Excep } } - public void testDoesNotConnectToNonMasterNode() throws InterruptedException { + public void testDoesNotConnectToNonClusterManagerNode() throws InterruptedException { remoteNode = new DiscoveryNode("remote-node", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); discoveryAddress = getDiscoveryAddress(); remoteClusterName = "local-cluster"; diff --git a/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java b/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java index d6cafb3421f7d..2f78e60631ec2 100644 --- a/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java +++ b/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java @@ -147,7 +147,7 @@ public void run() { listener.onResponse(discoveryNode); return; } else { - listener.onFailure(new OpenSearchException("non-master node " + discoveryNode)); + listener.onFailure(new OpenSearchException("non-cluster-manager node " + discoveryNode)); return; } } @@ -165,20 +165,20 @@ public String toString() { } class TestPeerFinder extends PeerFinder { - DiscoveryNode discoveredMasterNode; - OptionalLong discoveredMasterTerm = OptionalLong.empty(); + DiscoveryNode discoveredClusterManagerNode; + OptionalLong discoveredClusterManagerTerm = OptionalLong.empty(); TestPeerFinder(Settings settings, TransportService transportService, TransportAddressConnector transportAddressConnector) { super(settings, transportService, transportAddressConnector, PeerFinderTests.this::resolveConfiguredHosts); } @Override - protected void onActiveClusterManagerFound(DiscoveryNode masterNode, long term) { + protected void onActiveClusterManagerFound(DiscoveryNode clusterManagerNode, long term) { assert holdsLock() == false : "PeerFinder lock held in error"; - assertThat(discoveredMasterNode, nullValue()); - assertFalse(discoveredMasterTerm.isPresent()); - discoveredMasterNode = masterNode; - discoveredMasterTerm = OptionalLong.of(term); + assertThat(discoveredClusterManagerNode, nullValue()); + assertFalse(discoveredClusterManagerTerm.isPresent()); + discoveredClusterManagerNode = clusterManagerNode; + discoveredClusterManagerTerm = OptionalLong.of(term); } @Override @@ -335,8 +335,8 @@ public void testDoesNotAddUnreachableNodesFromUnicastHostsList() { assertFoundPeers(); } - public void testDoesNotAddNonMasterEligibleNodesFromUnicastHostsList() { - final DiscoveryNode nonMasterNode = new DiscoveryNode( + public void testDoesNotAddNonClusterManagerEligibleNodesFromUnicastHostsList() { + final DiscoveryNode nonClusterManagerNode = new DiscoveryNode( "node-from-hosts-list", buildNewFakeTransportAddress(), emptyMap(), @@ -344,8 +344,8 @@ public void testDoesNotAddNonMasterEligibleNodesFromUnicastHostsList() { Version.CURRENT ); - providedAddresses.add(nonMasterNode.getAddress()); - transportAddressConnector.addReachableNode(nonMasterNode); + providedAddresses.add(nonClusterManagerNode.getAddress()); + transportAddressConnector.addReachableNode(nonClusterManagerNode); peerFinder.activate(lastAcceptedNodes); runAllRunnableTasks(); @@ -423,7 +423,7 @@ public void testAddsReachableNodesFromIncomingRequests() { assertFoundPeers(sourceNode, otherKnownNode); } - public void testDoesNotAddReachableNonMasterEligibleNodesFromIncomingRequests() { + public void testDoesNotAddReachableNonClusterManagerEligibleNodesFromIncomingRequests() { final DiscoveryNode sourceNode = new DiscoveryNode( "request-source", buildNewFakeTransportAddress(), @@ -494,7 +494,7 @@ public void testRespondsToRequestWhenActive() { } public void testDelegatesRequestHandlingWhenInactive() { - final DiscoveryNode masterNode = newDiscoveryNode("master-node"); + final DiscoveryNode clusterManagerNode = newDiscoveryNode("cluster-manager-node"); final DiscoveryNode sourceNode = newDiscoveryNode("request-source"); transportAddressConnector.addReachableNode(sourceNode); @@ -502,9 +502,9 @@ public void testDelegatesRequestHandlingWhenInactive() { final long term = randomNonNegativeLong(); peerFinder.setCurrentTerm(term); - peerFinder.deactivate(masterNode); + peerFinder.deactivate(clusterManagerNode); - final PeersResponse expectedResponse = new PeersResponse(Optional.of(masterNode), Collections.emptyList(), term); + final PeersResponse expectedResponse = new PeersResponse(Optional.of(clusterManagerNode), Collections.emptyList(), term); final PeersResponse peersResponse = peerFinder.handlePeersRequest(new PeersRequest(sourceNode, Collections.emptyList())); assertThat(peersResponse, equalTo(expectedResponse)); } @@ -590,7 +590,7 @@ public void testAddsReachablePeersFromResponse() { assertFoundPeers(otherNode, discoveredNode); } - public void testAddsReachableMasterFromResponse() { + public void testAddsReachableClusterManagerFromResponse() { final DiscoveryNode otherNode = newDiscoveryNode("node-from-hosts-list"); providedAddresses.add(otherNode.getAddress()); transportAddressConnector.addReachableNode(otherNode); @@ -599,21 +599,21 @@ public void testAddsReachableMasterFromResponse() { runAllRunnableTasks(); assertFoundPeers(otherNode); - final DiscoveryNode discoveredMaster = newDiscoveryNode("discovered-master"); + final DiscoveryNode discoveredClusterManager = newDiscoveryNode("discovered-cluster-manager"); respondToRequests(node -> { assertThat(node, is(otherNode)); - return new PeersResponse(Optional.of(discoveredMaster), emptyList(), randomNonNegativeLong()); + return new PeersResponse(Optional.of(discoveredClusterManager), emptyList(), randomNonNegativeLong()); }); - transportAddressConnector.addReachableNode(discoveredMaster); + transportAddressConnector.addReachableNode(discoveredClusterManager); runAllRunnableTasks(); - assertFoundPeers(otherNode, discoveredMaster); - assertThat(peerFinder.discoveredMasterNode, nullValue()); - assertFalse(peerFinder.discoveredMasterTerm.isPresent()); + assertFoundPeers(otherNode, discoveredClusterManager); + assertThat(peerFinder.discoveredClusterManagerNode, nullValue()); + assertFalse(peerFinder.discoveredClusterManagerTerm.isPresent()); } - public void testHandlesDiscoveryOfMasterFromResponseFromMaster() { + public void testHandlesDiscoveryOfClusterManagerFromResponseFromClusterManager() { final DiscoveryNode otherNode = newDiscoveryNode("node-from-hosts-list"); providedAddresses.add(otherNode.getAddress()); transportAddressConnector.addReachableNode(otherNode); @@ -631,8 +631,8 @@ public void testHandlesDiscoveryOfMasterFromResponseFromMaster() { runAllRunnableTasks(); assertFoundPeers(otherNode); - assertThat(peerFinder.discoveredMasterNode, is(otherNode)); - assertThat(peerFinder.discoveredMasterTerm, is(OptionalLong.of(term))); + assertThat(peerFinder.discoveredClusterManagerNode, is(otherNode)); + assertThat(peerFinder.discoveredClusterManagerTerm, is(OptionalLong.of(term))); } public void testOnlyRequestsPeersOncePerRoundButDoesRetryNextRound() { diff --git a/server/src/test/java/org/opensearch/env/NodeEnvironmentTests.java b/server/src/test/java/org/opensearch/env/NodeEnvironmentTests.java index 6f07d0de1e31d..f9e1b8e30af41 100644 --- a/server/src/test/java/org/opensearch/env/NodeEnvironmentTests.java +++ b/server/src/test/java/org/opensearch/env/NodeEnvironmentTests.java @@ -524,8 +524,8 @@ public void testEnsureNoShardDataOrIndexMetadata() throws IOException { Settings settings = buildEnvSettings(Settings.EMPTY); Index index = new Index("test", "testUUID"); - // build settings using same path.data as original but without data and master roles - Settings noDataNoMasterSettings = Settings.builder() + // build settings using same path.data as original but without data and cluster-manager roles + Settings noDataNoClusterManagerSettings = Settings.builder() .put(settings) .put( NodeRoles.removeRoles( @@ -535,8 +535,8 @@ public void testEnsureNoShardDataOrIndexMetadata() throws IOException { ) .build(); - // test that we can create data=false and master=false with no meta information - newNodeEnvironment(noDataNoMasterSettings).close(); + // test that we can create data=false and cluster_manager=false with no meta information + newNodeEnvironment(noDataNoClusterManagerSettings).close(); Path indexPath; try (NodeEnvironment env = newNodeEnvironment(settings)) { @@ -546,7 +546,7 @@ public void testEnsureNoShardDataOrIndexMetadata() throws IOException { indexPath = env.indexPaths(index)[0]; } - verifyFailsOnMetadata(noDataNoMasterSettings, indexPath); + verifyFailsOnMetadata(noDataNoClusterManagerSettings, indexPath); // build settings using same path.data as original but without data role Settings noDataSettings = nonDataNode(settings); @@ -563,15 +563,15 @@ public void testEnsureNoShardDataOrIndexMetadata() throws IOException { verifyFailsOnShardData(noDataSettings, indexPath, shardDataDirName); // assert that we get the stricter message on meta-data when both conditions fail - verifyFailsOnMetadata(noDataNoMasterSettings, indexPath); + verifyFailsOnMetadata(noDataNoClusterManagerSettings, indexPath); - // build settings using same path.data as original but without master role - Settings noMasterSettings = nonMasterNode(settings); + // build settings using same path.data as original but without cluster-manager role + Settings noClusterManagerSettings = nonMasterNode(settings); - // test that we can create master=false env regardless of data. - newNodeEnvironment(noMasterSettings).close(); + // test that we can create cluster_manager=false env regardless of data. + newNodeEnvironment(noClusterManagerSettings).close(); - // test that we can create data=true, master=true env. Also remove state dir to leave only shard data for following asserts + // test that we can create data=true, cluster_manager=true env. Also remove state dir to leave only shard data for following asserts try (NodeEnvironment env = newNodeEnvironment(settings)) { for (Path path : env.indexPaths(index)) { Files.delete(path.resolve(MetadataStateFormat.STATE_DIR_NAME)); @@ -580,7 +580,7 @@ public void testEnsureNoShardDataOrIndexMetadata() throws IOException { // assert that we fail on shard data even without the metadata dir. verifyFailsOnShardData(noDataSettings, indexPath, shardDataDirName); - verifyFailsOnShardData(noDataNoMasterSettings, indexPath, shardDataDirName); + verifyFailsOnShardData(noDataNoClusterManagerSettings, indexPath, shardDataDirName); } private void verifyFailsOnShardData(Settings settings, Path indexPath, String shardDataDirName) { @@ -597,7 +597,7 @@ private void verifyFailsOnShardData(Settings settings, Path indexPath, String sh private void verifyFailsOnMetadata(Settings settings, Path indexPath) { IllegalStateException ex = expectThrows( IllegalStateException.class, - "Must fail creating NodeEnvironment on a data path that has index metadata if node does not have data and master roles", + "Must fail creating NodeEnvironment on a data path that has index metadata if node does not have data and cluster-manager roles", () -> newNodeEnvironment(settings).close() ); diff --git a/server/src/test/java/org/opensearch/env/NodeRepurposeCommandTests.java b/server/src/test/java/org/opensearch/env/NodeRepurposeCommandTests.java index 7a346d4cf9fc5..ffcbb3eed91f7 100644 --- a/server/src/test/java/org/opensearch/env/NodeRepurposeCommandTests.java +++ b/server/src/test/java/org/opensearch/env/NodeRepurposeCommandTests.java @@ -75,18 +75,18 @@ public class NodeRepurposeCommandTests extends OpenSearchTestCase { private static final Index INDEX = new Index("testIndex", "testUUID"); - private Settings dataMasterSettings; + private Settings dataClusterManagerSettings; private Environment environment; private Path[] nodePaths; - private Settings dataNoMasterSettings; - private Settings noDataNoMasterSettings; - private Settings noDataMasterSettings; + private Settings dataNoClusterManagerSettings; + private Settings noDataNoClusterManagerSettings; + private Settings noDataClusterManagerSettings; @Before public void createNodePaths() throws IOException { - dataMasterSettings = buildEnvSettings(Settings.EMPTY); - environment = TestEnvironment.newEnvironment(dataMasterSettings); - try (NodeEnvironment nodeEnvironment = new NodeEnvironment(dataMasterSettings, environment)) { + dataClusterManagerSettings = buildEnvSettings(Settings.EMPTY); + environment = TestEnvironment.newEnvironment(dataClusterManagerSettings); + try (NodeEnvironment nodeEnvironment = new NodeEnvironment(dataClusterManagerSettings, environment)) { nodePaths = nodeEnvironment.nodeDataPaths(); final String nodeId = randomAlphaOfLength(10); try ( @@ -95,36 +95,36 @@ public void createNodePaths() throws IOException { nodeId, xContentRegistry(), BigArrays.NON_RECYCLING_INSTANCE, - new ClusterSettings(dataMasterSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), + new ClusterSettings(dataClusterManagerSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L ).createWriter() ) { writer.writeFullStateAndCommit(1L, ClusterState.EMPTY_STATE); } } - dataNoMasterSettings = nonMasterNode(dataMasterSettings); - noDataNoMasterSettings = removeRoles( - dataMasterSettings, + dataNoClusterManagerSettings = nonMasterNode(dataClusterManagerSettings); + noDataNoClusterManagerSettings = removeRoles( + dataClusterManagerSettings, Collections.unmodifiableSet(new HashSet<>(Arrays.asList(DiscoveryNodeRole.DATA_ROLE, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE))) ); - noDataMasterSettings = masterNode(nonDataNode(dataMasterSettings)); + noDataClusterManagerSettings = masterNode(nonDataNode(dataClusterManagerSettings)); } public void testEarlyExitNoCleanup() throws Exception { - createIndexDataFiles(dataMasterSettings, randomInt(10), randomBoolean()); + createIndexDataFiles(dataClusterManagerSettings, randomInt(10), randomBoolean()); - verifyNoQuestions(dataMasterSettings, containsString(NO_CLEANUP)); - verifyNoQuestions(dataNoMasterSettings, containsString(NO_CLEANUP)); + verifyNoQuestions(dataClusterManagerSettings, containsString(NO_CLEANUP)); + verifyNoQuestions(dataNoClusterManagerSettings, containsString(NO_CLEANUP)); } public void testNothingToCleanup() throws Exception { - verifyNoQuestions(noDataNoMasterSettings, containsString(NO_DATA_TO_CLEAN_UP_FOUND)); - verifyNoQuestions(noDataMasterSettings, containsString(NO_SHARD_DATA_TO_CLEAN_UP_FOUND)); + verifyNoQuestions(noDataNoClusterManagerSettings, containsString(NO_DATA_TO_CLEAN_UP_FOUND)); + verifyNoQuestions(noDataClusterManagerSettings, containsString(NO_SHARD_DATA_TO_CLEAN_UP_FOUND)); - Environment environment = TestEnvironment.newEnvironment(noDataMasterSettings); + Environment environment = TestEnvironment.newEnvironment(noDataClusterManagerSettings); if (randomBoolean()) { - try (NodeEnvironment env = new NodeEnvironment(noDataMasterSettings, environment)) { + try (NodeEnvironment env = new NodeEnvironment(noDataClusterManagerSettings, environment)) { try ( PersistedClusterStateService.Writer writer = OpenSearchNodeCommand.createPersistedClusterStateService( Settings.EMPTY, @@ -136,19 +136,24 @@ public void testNothingToCleanup() throws Exception { } } - verifyNoQuestions(noDataNoMasterSettings, containsString(NO_DATA_TO_CLEAN_UP_FOUND)); - verifyNoQuestions(noDataMasterSettings, containsString(NO_SHARD_DATA_TO_CLEAN_UP_FOUND)); + verifyNoQuestions(noDataNoClusterManagerSettings, containsString(NO_DATA_TO_CLEAN_UP_FOUND)); + verifyNoQuestions(noDataClusterManagerSettings, containsString(NO_SHARD_DATA_TO_CLEAN_UP_FOUND)); - createIndexDataFiles(dataMasterSettings, 0, randomBoolean()); + createIndexDataFiles(dataClusterManagerSettings, 0, randomBoolean()); - verifyNoQuestions(noDataMasterSettings, containsString(NO_SHARD_DATA_TO_CLEAN_UP_FOUND)); + verifyNoQuestions(noDataClusterManagerSettings, containsString(NO_SHARD_DATA_TO_CLEAN_UP_FOUND)); } public void testLocked() throws IOException { - try (NodeEnvironment env = new NodeEnvironment(dataMasterSettings, TestEnvironment.newEnvironment(dataMasterSettings))) { + try ( + NodeEnvironment env = new NodeEnvironment( + dataClusterManagerSettings, + TestEnvironment.newEnvironment(dataClusterManagerSettings) + ) + ) { assertThat( - expectThrows(OpenSearchException.class, () -> verifyNoQuestions(noDataNoMasterSettings, null)).getMessage(), + expectThrows(OpenSearchException.class, () -> verifyNoQuestions(noDataNoClusterManagerSettings, null)).getMessage(), containsString(NodeRepurposeCommand.FAILED_TO_OBTAIN_NODE_LOCK_MSG) ); } @@ -158,7 +163,7 @@ public void testCleanupAll() throws Exception { int shardCount = randomIntBetween(1, 10); boolean verbose = randomBoolean(); boolean hasClusterState = randomBoolean(); - createIndexDataFiles(dataMasterSettings, shardCount, hasClusterState); + createIndexDataFiles(dataClusterManagerSettings, shardCount, hasClusterState); String messageText = NodeRepurposeCommand.noClusterManagerMessage(1, environment.dataFiles().length * shardCount, 0); @@ -168,22 +173,22 @@ public void testCleanupAll() throws Exception { conditionalNot(containsString("no name for uuid: testUUID"), verbose == false || hasClusterState) ); - verifyUnchangedOnAbort(noDataNoMasterSettings, outputMatcher, verbose); + verifyUnchangedOnAbort(noDataNoClusterManagerSettings, outputMatcher, verbose); // verify test setup - expectThrows(IllegalStateException.class, () -> new NodeEnvironment(noDataNoMasterSettings, environment).close()); + expectThrows(IllegalStateException.class, () -> new NodeEnvironment(noDataNoClusterManagerSettings, environment).close()); - verifySuccess(noDataNoMasterSettings, outputMatcher, verbose); + verifySuccess(noDataNoClusterManagerSettings, outputMatcher, verbose); // verify cleaned. - new NodeEnvironment(noDataNoMasterSettings, environment).close(); + new NodeEnvironment(noDataNoClusterManagerSettings, environment).close(); } public void testCleanupShardData() throws Exception { int shardCount = randomIntBetween(1, 10); boolean verbose = randomBoolean(); boolean hasClusterState = randomBoolean(); - createIndexDataFiles(dataMasterSettings, shardCount, hasClusterState); + createIndexDataFiles(dataClusterManagerSettings, shardCount, hasClusterState); Matcher matcher = allOf( containsString(NodeRepurposeCommand.shardMessage(environment.dataFiles().length * shardCount, 1)), @@ -192,15 +197,15 @@ public void testCleanupShardData() throws Exception { conditionalNot(containsString("no name for uuid: testUUID"), verbose == false || hasClusterState) ); - verifyUnchangedOnAbort(noDataMasterSettings, matcher, verbose); + verifyUnchangedOnAbort(noDataClusterManagerSettings, matcher, verbose); // verify test setup - expectThrows(IllegalStateException.class, () -> new NodeEnvironment(noDataMasterSettings, environment).close()); + expectThrows(IllegalStateException.class, () -> new NodeEnvironment(noDataClusterManagerSettings, environment).close()); - verifySuccess(noDataMasterSettings, matcher, verbose); + verifySuccess(noDataClusterManagerSettings, matcher, verbose); // verify clean. - new NodeEnvironment(noDataMasterSettings, environment).close(); + new NodeEnvironment(noDataClusterManagerSettings, environment).close(); } static void verifySuccess(Settings settings, Matcher outputMatcher, boolean verbose) throws Exception { diff --git a/server/src/test/java/org/opensearch/gateway/AsyncShardFetchTests.java b/server/src/test/java/org/opensearch/gateway/AsyncShardFetchTests.java index 1af5a63e344d0..982c21a9e57ec 100644 --- a/server/src/test/java/org/opensearch/gateway/AsyncShardFetchTests.java +++ b/server/src/test/java/org/opensearch/gateway/AsyncShardFetchTests.java @@ -433,7 +433,7 @@ public void run() { try { entry = simulations.get(nodeId); if (entry == null) { - // we are simulating a master node switch, wait for it to not be null + // we are simulating a cluster-manager node switch, wait for it to not be null assertBusy(() -> assertTrue(simulations.containsKey(nodeId))); } assert entry != null; diff --git a/server/src/test/java/org/opensearch/gateway/GatewayServiceTests.java b/server/src/test/java/org/opensearch/gateway/GatewayServiceTests.java index 63792968b1c59..51ba096a86ae0 100644 --- a/server/src/test/java/org/opensearch/gateway/GatewayServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/GatewayServiceTests.java @@ -129,13 +129,13 @@ public void testRecoverStateUpdateTask() throws Exception { GatewayService service = createService(Settings.builder()); ClusterStateUpdateTask clusterStateUpdateTask = service.new RecoverStateUpdateTask(); String nodeId = randomAlphaOfLength(10); - DiscoveryNode masterNode = DiscoveryNode.createLocal( + DiscoveryNode clusterManagerNode = DiscoveryNode.createLocal( settings(Version.CURRENT).put(masterNode()).build(), new TransportAddress(TransportAddress.META_ADDRESS, 9300), nodeId ); ClusterState stateWithBlock = ClusterState.builder(ClusterName.DEFAULT) - .nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(masterNode).build()) + .nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(clusterManagerNode).build()) .blocks(ClusterBlocks.builder().addGlobalBlock(STATE_NOT_RECOVERED_BLOCK).build()) .build(); diff --git a/server/src/test/java/org/opensearch/gateway/IncrementalClusterStateWriterTests.java b/server/src/test/java/org/opensearch/gateway/IncrementalClusterStateWriterTests.java index 1907abbfcaabd..00ca35207620d 100644 --- a/server/src/test/java/org/opensearch/gateway/IncrementalClusterStateWriterTests.java +++ b/server/src/test/java/org/opensearch/gateway/IncrementalClusterStateWriterTests.java @@ -86,7 +86,7 @@ public class IncrementalClusterStateWriterTests extends OpenSearchAllocationTestCase { - private ClusterState clusterStateWithUnassignedIndex(IndexMetadata indexMetadata, boolean masterEligible) { + private ClusterState clusterStateWithUnassignedIndex(IndexMetadata indexMetadata, boolean clusterManagerEligible) { Metadata metadata = Metadata.builder().put(indexMetadata, false).build(); RoutingTable routingTable = RoutingTable.builder().addAsNew(metadata.index("test")).build(); @@ -94,11 +94,11 @@ private ClusterState clusterStateWithUnassignedIndex(IndexMetadata indexMetadata return ClusterState.builder(org.opensearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metadata(metadata) .routingTable(routingTable) - .nodes(generateDiscoveryNodes(masterEligible)) + .nodes(generateDiscoveryNodes(clusterManagerEligible)) .build(); } - private ClusterState clusterStateWithAssignedIndex(IndexMetadata indexMetadata, boolean masterEligible) { + private ClusterState clusterStateWithAssignedIndex(IndexMetadata indexMetadata, boolean clusterManagerEligible) { AllocationService strategy = createAllocationService( Settings.builder() .put("cluster.routing.allocation.node_concurrent_recoveries", 100) @@ -108,7 +108,7 @@ private ClusterState clusterStateWithAssignedIndex(IndexMetadata indexMetadata, .build() ); - ClusterState oldClusterState = clusterStateWithUnassignedIndex(indexMetadata, masterEligible); + ClusterState oldClusterState = clusterStateWithUnassignedIndex(indexMetadata, clusterManagerEligible); RoutingTable routingTable = strategy.reroute(oldClusterState, "reroute").routingTable(); Metadata metadataNewClusterState = Metadata.builder().put(oldClusterState.metadata().index("test"), false).build(); @@ -120,8 +120,8 @@ private ClusterState clusterStateWithAssignedIndex(IndexMetadata indexMetadata, .build(); } - private ClusterState clusterStateWithNonReplicatedClosedIndex(IndexMetadata indexMetadata, boolean masterEligible) { - ClusterState oldClusterState = clusterStateWithAssignedIndex(indexMetadata, masterEligible); + private ClusterState clusterStateWithNonReplicatedClosedIndex(IndexMetadata indexMetadata, boolean clusterManagerEligible) { + ClusterState oldClusterState = clusterStateWithAssignedIndex(indexMetadata, clusterManagerEligible); Metadata metadataNewClusterState = Metadata.builder() .put( @@ -142,8 +142,12 @@ private ClusterState clusterStateWithNonReplicatedClosedIndex(IndexMetadata inde .build(); } - private ClusterState clusterStateWithReplicatedClosedIndex(IndexMetadata indexMetadata, boolean masterEligible, boolean assigned) { - ClusterState oldClusterState = clusterStateWithAssignedIndex(indexMetadata, masterEligible); + private ClusterState clusterStateWithReplicatedClosedIndex( + IndexMetadata indexMetadata, + boolean clusterManagerEligible, + boolean assigned + ) { + ClusterState oldClusterState = clusterStateWithAssignedIndex(indexMetadata, clusterManagerEligible); Metadata metadataNewClusterState = Metadata.builder() .put( @@ -178,20 +182,20 @@ private ClusterState clusterStateWithReplicatedClosedIndex(IndexMetadata indexMe .build(); } - private DiscoveryNodes.Builder generateDiscoveryNodes(boolean masterEligible) { + private DiscoveryNodes.Builder generateDiscoveryNodes(boolean clusterManagerEligible) { Set dataOnlyRoles = Collections.singleton(DiscoveryNodeRole.DATA_ROLE); return DiscoveryNodes.builder() - .add(newNode("node1", masterEligible ? CLUSTER_MANAGER_DATA_ROLES : dataOnlyRoles)) - .add(newNode("master_node", CLUSTER_MANAGER_DATA_ROLES)) + .add(newNode("node1", clusterManagerEligible ? CLUSTER_MANAGER_DATA_ROLES : dataOnlyRoles)) + .add(newNode("cluster_manager_node", CLUSTER_MANAGER_DATA_ROLES)) .localNodeId("node1") - .masterNodeId(masterEligible ? "node1" : "master_node"); + .masterNodeId(clusterManagerEligible ? "node1" : "cluster_manager_node"); } private IndexMetadata createIndexMetadata(String name) { return IndexMetadata.builder(name).settings(settings(Version.CURRENT)).numberOfShards(5).numberOfReplicas(2).build(); } - public void testGetRelevantIndicesWithUnassignedShardsOnMasterEligibleNode() { + public void testGetRelevantIndicesWithUnassignedShardsOnClusterManagerEligibleNode() { IndexMetadata indexMetadata = createIndexMetadata("test"); Set indices = IncrementalClusterStateWriter.getRelevantIndices(clusterStateWithUnassignedIndex(indexMetadata, true)); assertThat(indices.size(), equalTo(0)); @@ -205,8 +209,10 @@ public void testGetRelevantIndicesWithUnassignedShardsOnDataOnlyNode() { public void testGetRelevantIndicesWithAssignedShards() { IndexMetadata indexMetadata = createIndexMetadata("test"); - boolean masterEligible = randomBoolean(); - Set indices = IncrementalClusterStateWriter.getRelevantIndices(clusterStateWithAssignedIndex(indexMetadata, masterEligible)); + boolean clusterManagerEligible = randomBoolean(); + Set indices = IncrementalClusterStateWriter.getRelevantIndices( + clusterStateWithAssignedIndex(indexMetadata, clusterManagerEligible) + ); assertThat(indices.size(), equalTo(1)); } diff --git a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java index 8fe8a13de9910..8fd8449108333 100644 --- a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java @@ -160,7 +160,7 @@ public void testGlobalCheckpointUpdate() { // now insert an unknown active/insync id , the checkpoint shouldn't change but a refresh should be requested. final AllocationId extraId = AllocationId.newInitializing(); - // first check that adding it without the master blessing doesn't change anything. + // first check that adding it without the cluster-manager blessing doesn't change anything. updateLocalCheckpoint(tracker, extraId.getId(), minLocalCheckpointAfterUpdates + 1 + randomInt(4)); assertNull(tracker.checkpoints.get(extraId.getId())); expectThrows(IllegalStateException.class, () -> tracker.initiateTracking(extraId.getId())); @@ -292,7 +292,7 @@ public void testMissingInSyncIdsPreventAdvance() { assertThat(updatedGlobalCheckpoint.get(), not(equalTo(UNASSIGNED_SEQ_NO))); } - public void testInSyncIdsAreIgnoredIfNotValidatedByMaster() { + public void testInSyncIdsAreIgnoredIfNotValidatedByClusterManager() { final Map active = randomAllocationsWithLocalCheckpoints(1, 5); final Map initializing = randomAllocationsWithLocalCheckpoints(1, 5); final Map nonApproved = randomAllocationsWithLocalCheckpoints(1, 5); @@ -313,7 +313,7 @@ public void testInSyncIdsAreIgnoredIfNotValidatedByMaster() { assertThat(tracker.getGlobalCheckpoint(), not(equalTo(UNASSIGNED_SEQ_NO))); } - public void testInSyncIdsAreRemovedIfNotValidatedByMaster() { + public void testInSyncIdsAreRemovedIfNotValidatedByClusterManager() { final long initialClusterStateVersion = randomNonNegativeLong(); final Map activeToStay = randomAllocationsWithLocalCheckpoints(1, 5); final Map initializingToStay = randomAllocationsWithLocalCheckpoints(1, 5); @@ -421,7 +421,7 @@ public void testWaitForAllocationIdToBeInSync() throws Exception { assertTrue(complete.get()); assertTrue(tracker.getTrackedLocalCheckpointForShard(trackingAllocationId.getId()).inSync); } else { - // master changes its mind and cancels the allocation + // cluster-manager changes its mind and cancels the allocation tracker.updateFromMaster( clusterStateVersion + 1, Collections.singleton(inSyncAllocationId.getId()), @@ -492,7 +492,7 @@ public void testWaitForAllocationIdToBeInSyncCanBeInterrupted() throws BrokenBar thread.join(); } - public void testUpdateAllocationIdsFromMaster() throws Exception { + public void testUpdateAllocationIdsFromClusterManager() throws Exception { final long initialClusterStateVersion = randomNonNegativeLong(); final int numberOfActiveAllocationsIds = randomIntBetween(2, 16); final int numberOfInitializingIds = randomIntBetween(2, 16); @@ -645,7 +645,7 @@ public void testUpdateAllocationIdsFromMaster() throws Exception { assertTrue(tracker.getTrackedLocalCheckpointForShard(newSyncingAllocationId.getId()).inSync); /* - * The new in-sync allocation ID is in the in-sync set now yet the master does not know this; the allocation ID should still be in + * The new in-sync allocation ID is in the in-sync set now yet the cluster-manager does not know this; the allocation ID should still be in * the in-sync set even if we receive a cluster state update that does not reflect this. * */ diff --git a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java index da984084321e1..e481384c3d6f3 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java @@ -438,7 +438,7 @@ public void testDanglingIndicesWithLaterVersion() throws Exception { final ClusterService clusterService = getInstanceFromNode(ClusterService.class); final ClusterState originalState = clusterService.state(); - // import an index with minor version incremented by one over cluster master version, it should be ignored + // import an index with minor version incremented by one over cluster cluster-manager version, it should be ignored final LocalAllocateDangledIndices dangling = getInstanceFromNode(LocalAllocateDangledIndices.class); final Settings idxSettingsLater = Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, Version.fromId(Version.CURRENT.id + 10000)) diff --git a/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java index a7d9ba0bf3d4b..8139ceec4611f 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java @@ -470,7 +470,12 @@ private , Response extends ActionResp ) { return executeClusterStateUpdateTask(clusterState, () -> { try { - TransportMasterNodeActionUtils.runMasterOperation(masterNodeAction, request, clusterState, new PlainActionFuture<>()); + TransportMasterNodeActionUtils.runClusterManagerOperation( + masterNodeAction, + request, + clusterState, + new PlainActionFuture<>() + ); } catch (Exception e) { throw new RuntimeException(e); } diff --git a/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index cd3fee60014a7..d38d31f3ef43b 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -134,7 +134,7 @@ public void testRandomClusterStateUpdates() { } } - // apply cluster state to nodes (incl. master) + // apply cluster state to nodes (incl. cluster-manager) for (DiscoveryNode node : state.nodes()) { IndicesClusterStateService indicesClusterStateService = clusterStateServiceMap.get(node); ClusterState localState = adaptClusterStateToLocalNode(state, node); @@ -328,7 +328,7 @@ public ClusterState randomInitialClusterState( Supplier indicesServiceSupplier ) { List allNodes = new ArrayList<>(); - DiscoveryNode localNode = createNode(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); // local node is the master + DiscoveryNode localNode = createNode(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); // local node is the cluster-manager allNodes.add(localNode); // at least two nodes that have the data role so that we can allocate shards allNodes.add(createNode(DiscoveryNodeRole.DATA_ROLE)); @@ -368,20 +368,20 @@ public ClusterState randomlyUpdateClusterState( Map clusterStateServiceMap, Supplier indicesServiceSupplier ) { - // randomly remove no_master blocks + // randomly remove no_cluster_manager blocks if (randomBoolean() && state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)) { state = ClusterState.builder(state) .blocks(ClusterBlocks.builder().blocks(state.blocks()).removeGlobalBlock(NoMasterBlockService.NO_MASTER_BLOCK_ID)) .build(); } - // randomly add no_master blocks + // randomly add no_cluster_manager blocks if (rarely() && state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID) == false) { ClusterBlock block = randomBoolean() ? NoMasterBlockService.NO_MASTER_BLOCK_ALL : NoMasterBlockService.NO_MASTER_BLOCK_WRITES; state = ClusterState.builder(state).blocks(ClusterBlocks.builder().blocks(state.blocks()).addGlobalBlock(block)).build(); } - // if no_master block is in place, make no other cluster state changes + // if no_cluster_manager block is in place, make no other cluster state changes if (state.blocks().hasGlobalBlockWithId(NoMasterBlockService.NO_MASTER_BLOCK_ID)) { return state; } @@ -481,7 +481,7 @@ public ClusterState randomlyUpdateClusterState( state = cluster.applyFailedShards(state, failedShards); state = cluster.applyStartedShards(state, startedShards); - // randomly add and remove nodes (except current master) + // randomly add and remove nodes (except current cluster-manager) if (rarely()) { if (randomBoolean()) { // add node @@ -506,7 +506,7 @@ public ClusterState randomlyUpdateClusterState( } } - // TODO: go masterless? + // TODO: go cluster-managerless? return state; } diff --git a/server/src/test/java/org/opensearch/persistent/PersistentTasksClusterServiceTests.java b/server/src/test/java/org/opensearch/persistent/PersistentTasksClusterServiceTests.java index bb8f0405ecf7e..40ffa2eeb0aff 100644 --- a/server/src/test/java/org/opensearch/persistent/PersistentTasksClusterServiceTests.java +++ b/server/src/test/java/org/opensearch/persistent/PersistentTasksClusterServiceTests.java @@ -494,7 +494,7 @@ public void testPeriodicRecheck() throws Exception { }); } - public void testPeriodicRecheckOffMaster() { + public void testPeriodicRecheckOffClusterManager() { ClusterState initialState = initialState(); ClusterState.Builder builder = ClusterState.builder(initialState); PersistentTasksCustomMetadata.Builder tasks = PersistentTasksCustomMetadata.builder( @@ -528,20 +528,20 @@ public void testPeriodicRecheckOffMaster() { assertThat(tasksInProgress.tasks().size(), equalTo(1)); } - // The rechecker should recheck indefinitely on the master node as the + // The rechecker should recheck indefinitely on the cluster-manager node as the // task can never be assigned while nonClusterStateCondition = false assertTrue(service.getPeriodicRechecker().isScheduled()); - // Now simulate the node ceasing to be the master + // Now simulate the node ceasing to be the cluster-manager builder = ClusterState.builder(clusterState); nodes = DiscoveryNodes.builder(clusterState.nodes()); - nodes.add(DiscoveryNode.createLocal(Settings.EMPTY, buildNewFakeTransportAddress(), "a_new_master_node")); - nodes.masterNodeId("a_new_master_node"); + nodes.add(DiscoveryNode.createLocal(Settings.EMPTY, buildNewFakeTransportAddress(), "a_new_cluster_manager_node")); + nodes.masterNodeId("a_new_cluster_manager_node"); ClusterState nonMasterClusterState = builder.nodes(nodes).build(); event = new ClusterChangedEvent("test", nonMasterClusterState, clusterState); service.clusterChanged(event); - // The service should have cancelled the rechecker on learning it is no longer running on the master node + // The service should have cancelled the rechecker on learning it is no longer running on the cluster-manager node assertFalse(service.getPeriodicRechecker().isScheduled()); } @@ -796,7 +796,7 @@ private ClusterState insignificantChange(ClusterState clusterState) { } } if (randomBoolean()) { - // remove a node that doesn't have any tasks assigned to it and it's not the master node + // remove a node that doesn't have any tasks assigned to it and it's not the cluster-manager node for (DiscoveryNode node : clusterState.nodes()) { if (hasTasksAssignedTo(tasks, node.getId()) == false && "this_node".equals(node.getId()) == false) { logger.info("removed unassigned node {}", node.getId()); diff --git a/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestNodesInfoActionTests.java b/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestNodesInfoActionTests.java index 4d35098309b0d..fb633f9fa4a9c 100644 --- a/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestNodesInfoActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestNodesInfoActionTests.java @@ -54,11 +54,11 @@ public class RestNodesInfoActionTests extends OpenSearchTestCase { public void testDuplicatedFiltersAreNotRemoved() { Map params = new HashMap<>(); - params.put("nodeId", "_all,master:false,_all"); + params.put("nodeId", "_all,cluster_manager:false,_all"); RestRequest restRequest = buildRestRequest(params); NodesInfoRequest actual = RestNodesInfoAction.prepareRequest(restRequest); - assertArrayEquals(new String[] { "_all", "master:false", "_all" }, actual.nodesIds()); + assertArrayEquals(new String[] { "_all", "cluster_manager:false", "_all" }, actual.nodesIds()); } public void testOnlyMetrics() { diff --git a/server/src/test/java/org/opensearch/snapshots/InternalSnapshotsInfoServiceTests.java b/server/src/test/java/org/opensearch/snapshots/InternalSnapshotsInfoServiceTests.java index 6c789ae6d98cd..f0e283e6dde6d 100644 --- a/server/src/test/java/org/opensearch/snapshots/InternalSnapshotsInfoServiceTests.java +++ b/server/src/test/java/org/opensearch/snapshots/InternalSnapshotsInfoServiceTests.java @@ -290,7 +290,7 @@ public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, In assertThat("Expecting all snapshot shard size fetches to execute a Reroute", reroutes.get(), equalTo(maxShardsToCreate)); } - public void testNoLongerMaster() throws Exception { + public void testNoLongerClusterManager() throws Exception { final InternalSnapshotsInfoService snapshotsInfoService = new InternalSnapshotsInfoService( Settings.EMPTY, clusterService, @@ -310,18 +310,18 @@ public IndexShardSnapshotStatus getShardSnapshotStatus(SnapshotId snapshotId, In final String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); final int nbShards = randomIntBetween(1, 5); applyClusterState( - "restore-indices-when-master-" + indexName, + "restore-indices-when-cluster-manager-" + indexName, clusterState -> addUnassignedShards(clusterState, indexName, nbShards) ); } - applyClusterState("demote-current-master", this::demoteMasterNode); + applyClusterState("demote-current-cluster-manager", this::demoteClusterManagerNode); for (int i = 0; i < randomIntBetween(1, 10); i++) { final String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); final int nbShards = randomIntBetween(1, 5); applyClusterState( - "restore-indices-when-no-longer-master-" + indexName, + "restore-indices-when-no-longer-cluster-manager-" + indexName, clusterState -> addUnassignedShards(clusterState, indexName, nbShards) ); } @@ -484,7 +484,7 @@ private ClusterState addUnassignedShards(final ClusterState currentState, String .build(); } - private ClusterState demoteMasterNode(final ClusterState currentState) { + private ClusterState demoteClusterManagerNode(final ClusterState currentState) { final DiscoveryNode node = new DiscoveryNode( "other", OpenSearchTestCase.buildNewFakeTransportAddress(), diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index ab9a455399366..68a6af25a7c82 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -306,7 +306,7 @@ public void verifyReposThenStopServices() { blobStoreContext.forceConsistent(); } BlobStoreTestUtil.assertConsistency( - (BlobStoreRepository) testClusterNodes.randomMasterNodeSafe().repositoriesService.repository("repo"), + (BlobStoreRepository) testClusterNodes.randomClusterManagerNodeSafe().repositoriesService.repository("repo"), Runnable::run ); } finally { @@ -323,7 +323,7 @@ public void testSuccessfulSnapshotAndRestore() { final int shards = randomIntBetween(1, 10); final int documents = randomIntBetween(0, 100); - final TestClusterNodes.TestClusterNode masterNode = testClusterNodes.currentMaster( + final TestClusterNodes.TestClusterNode clusterManagerNode = testClusterNodes.currentClusterManager( testClusterNodes.nodes.values().iterator().next().clusterService.state() ); @@ -389,9 +389,9 @@ public void testSuccessfulSnapshotAndRestore() { assertNotNull(createSnapshotResponseListener.result()); assertNotNull(restoreSnapshotResponseListener.result()); assertTrue(documentCountVerified.get()); - SnapshotsInProgress finalSnapshotsInProgress = masterNode.clusterService.state().custom(SnapshotsInProgress.TYPE); + SnapshotsInProgress finalSnapshotsInProgress = clusterManagerNode.clusterService.state().custom(SnapshotsInProgress.TYPE); assertFalse(finalSnapshotsInProgress.entries().stream().anyMatch(entry -> entry.state().completed() == false)); - final Repository repository = masterNode.repositoriesService.repository(repoName); + final Repository repository = clusterManagerNode.repositoriesService.repository(repoName); Collection snapshotIds = getRepositoryData(repository).getSnapshotIds(); assertThat(snapshotIds, hasSize(1)); @@ -404,8 +404,8 @@ public void testSuccessfulSnapshotAndRestore() { public void testSnapshotWithNodeDisconnects() { final int dataNodes = randomIntBetween(2, 10); - final int masterNodes = randomFrom(1, 3, 5); - setupTestCluster(masterNodes, dataNodes); + final int clusterManagerNodes = randomFrom(1, 3, 5); + setupTestCluster(clusterManagerNodes, dataNodes); String repoName = "repo"; String snapshotName = "snapshot"; @@ -422,7 +422,7 @@ public void testSnapshotWithNodeDisconnects() { if (randomBoolean()) { scheduleNow(() -> testClusterNodes.clearNetworkDisruptions()); } - testClusterNodes.randomMasterNodeSafe().client.admin() + testClusterNodes.randomClusterManagerNodeSafe().client.admin() .cluster() .prepareCreateSnapshot(repoName, snapshotName) .setPartial(partial) @@ -435,12 +435,12 @@ public void testSnapshotWithNodeDisconnects() { for (int i = 0; i < randomIntBetween(0, dataNodes); ++i) { scheduleNow(this::disconnectOrRestartDataNode); } - // Only disconnect master if we have more than a single master and can simulate a failover - final boolean disconnectedMaster = randomBoolean() && masterNodes > 1; - if (disconnectedMaster) { - scheduleNow(this::disconnectOrRestartMasterNode); + // Only disconnect cluster-manager if we have more than a single cluster-manager and can simulate a failover + final boolean disconnectedClusterManager = randomBoolean() && clusterManagerNodes > 1; + if (disconnectedClusterManager) { + scheduleNow(this::disconnectOrRestartClusterManagerNode); } - if (disconnectedMaster || randomBoolean()) { + if (disconnectedClusterManager || randomBoolean()) { scheduleSoon(() -> testClusterNodes.clearNetworkDisruptions()); } else if (randomBoolean()) { scheduleNow(() -> testClusterNodes.clearNetworkDisruptions()); @@ -456,22 +456,22 @@ public void testSnapshotWithNodeDisconnects() { } }); - runUntil(() -> testClusterNodes.randomMasterNode().map(master -> { + runUntil(() -> testClusterNodes.randomClusterManagerNode().map(clusterManager -> { if (snapshotNeverStarted.get()) { return true; } - final SnapshotsInProgress snapshotsInProgress = master.clusterService.state().custom(SnapshotsInProgress.TYPE); + final SnapshotsInProgress snapshotsInProgress = clusterManager.clusterService.state().custom(SnapshotsInProgress.TYPE); return snapshotsInProgress != null && snapshotsInProgress.entries().isEmpty(); }).orElse(false), TimeUnit.MINUTES.toMillis(1L)); clearDisruptionsAndAwaitSync(); - final TestClusterNodes.TestClusterNode randomMaster = testClusterNodes.randomMasterNode() - .orElseThrow(() -> new AssertionError("expected to find at least one active master node")); - SnapshotsInProgress finalSnapshotsInProgress = randomMaster.clusterService.state() + final TestClusterNodes.TestClusterNode randomClusterManager = testClusterNodes.randomClusterManagerNode() + .orElseThrow(() -> new AssertionError("expected to find at least one active cluster-manager node")); + SnapshotsInProgress finalSnapshotsInProgress = randomClusterManager.clusterService.state() .custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY); assertThat(finalSnapshotsInProgress.entries(), empty()); - final Repository repository = randomMaster.repositoriesService.repository(repoName); + final Repository repository = randomClusterManager.repositoriesService.repository(repoName); Collection snapshotIds = getRepositoryData(repository).getSnapshotIds(); if (snapshotNeverStarted.get()) { assertThat(snapshotIds, empty()); @@ -480,10 +480,10 @@ public void testSnapshotWithNodeDisconnects() { } } - public void testSnapshotDeleteWithMasterFailover() { + public void testSnapshotDeleteWithClusterManagerFailover() { final int dataNodes = randomIntBetween(2, 10); - final int masterNodes = randomFrom(3, 5); - setupTestCluster(masterNodes, dataNodes); + final int clusterManagerNodes = randomFrom(3, 5); + setupTestCluster(clusterManagerNodes, dataNodes); String repoName = "repo"; String snapshotName = "snapshot"; @@ -494,7 +494,7 @@ public void testSnapshotDeleteWithMasterFailover() { final StepListener createSnapshotResponseStepListener = new StepListener<>(); continueOrDie( createRepoAndIndex(repoName, index, shards), - createIndexResponse -> testClusterNodes.randomMasterNodeSafe().client.admin() + createIndexResponse -> testClusterNodes.randomClusterManagerNodeSafe().client.admin() .cluster() .prepareCreateSnapshot(repoName, snapshotName) .setWaitForCompletion(waitForSnapshot) @@ -503,7 +503,7 @@ public void testSnapshotDeleteWithMasterFailover() { final AtomicBoolean snapshotDeleteResponded = new AtomicBoolean(false); continueOrDie(createSnapshotResponseStepListener, createSnapshotResponse -> { - scheduleNow(this::disconnectOrRestartMasterNode); + scheduleNow(this::disconnectOrRestartClusterManagerNode); testClusterNodes.randomDataNodeSafe().client.admin() .cluster() .prepareDeleteSnapshot(repoName, snapshotName) @@ -511,10 +511,10 @@ public void testSnapshotDeleteWithMasterFailover() { }); runUntil( - () -> testClusterNodes.randomMasterNode() + () -> testClusterNodes.randomClusterManagerNode() .map( - master -> snapshotDeleteResponded.get() - && master.clusterService.state() + clusterManager -> snapshotDeleteResponded.get() + && clusterManager.clusterService.state() .custom(SnapshotDeletionsInProgress.TYPE, SnapshotDeletionsInProgress.EMPTY) .getEntries() .isEmpty() @@ -525,11 +525,11 @@ public void testSnapshotDeleteWithMasterFailover() { clearDisruptionsAndAwaitSync(); - final TestClusterNodes.TestClusterNode randomMaster = testClusterNodes.randomMasterNode() - .orElseThrow(() -> new AssertionError("expected to find at least one active master node")); - SnapshotsInProgress finalSnapshotsInProgress = randomMaster.clusterService.state().custom(SnapshotsInProgress.TYPE); + final TestClusterNodes.TestClusterNode randomClusterManager = testClusterNodes.randomClusterManagerNode() + .orElseThrow(() -> new AssertionError("expected to find at least one active cluster-manager node")); + SnapshotsInProgress finalSnapshotsInProgress = randomClusterManager.clusterService.state().custom(SnapshotsInProgress.TYPE); assertThat(finalSnapshotsInProgress.entries(), empty()); - final Repository repository = randomMaster.repositoriesService.repository(repoName); + final Repository repository = randomClusterManager.repositoriesService.repository(repoName); Collection snapshotIds = getRepositoryData(repository).getSnapshotIds(); assertThat(snapshotIds, hasSize(0)); } @@ -542,7 +542,7 @@ public void testConcurrentSnapshotCreateAndDelete() { final String index = "test"; final int shards = randomIntBetween(1, 10); - TestClusterNodes.TestClusterNode masterNode = testClusterNodes.currentMaster( + TestClusterNodes.TestClusterNode clusterManagerNode = testClusterNodes.currentClusterManager( testClusterNodes.nodes.values().iterator().next().clusterService.state() ); @@ -558,12 +558,12 @@ public void testConcurrentSnapshotCreateAndDelete() { final StepListener deleteSnapshotStepListener = new StepListener<>(); - masterNode.clusterService.addListener(new ClusterStateListener() { + clusterManagerNode.clusterService.addListener(new ClusterStateListener() { @Override public void clusterChanged(ClusterChangedEvent event) { if (event.state().custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY).entries().isEmpty() == false) { client().admin().cluster().prepareDeleteSnapshot(repoName, snapshotName).execute(deleteSnapshotStepListener); - masterNode.clusterService.removeListener(this); + clusterManagerNode.clusterService.removeListener(this); } } }); @@ -587,9 +587,9 @@ public void clusterChanged(ClusterChangedEvent event) { assertNotNull(createSnapshotResponseStepListener.result()); assertNotNull(createAnotherSnapshotResponseStepListener.result()); - SnapshotsInProgress finalSnapshotsInProgress = masterNode.clusterService.state().custom(SnapshotsInProgress.TYPE); + SnapshotsInProgress finalSnapshotsInProgress = clusterManagerNode.clusterService.state().custom(SnapshotsInProgress.TYPE); assertFalse(finalSnapshotsInProgress.entries().stream().anyMatch(entry -> entry.state().completed() == false)); - final Repository repository = masterNode.repositoriesService.repository(repoName); + final Repository repository = clusterManagerNode.repositoriesService.repository(repoName); Collection snapshotIds = getRepositoryData(repository).getSnapshotIds(); assertThat(snapshotIds, hasSize(1)); @@ -608,7 +608,7 @@ public void testConcurrentSnapshotCreateAndDeleteOther() { final String index = "test"; final int shards = randomIntBetween(1, 10); - TestClusterNodes.TestClusterNode masterNode = testClusterNodes.currentMaster( + TestClusterNodes.TestClusterNode clusterManagerNode = testClusterNodes.currentClusterManager( testClusterNodes.nodes.values().iterator().next().clusterService.state() ); @@ -659,9 +659,9 @@ public void testConcurrentSnapshotCreateAndDeleteOther() { deterministicTaskQueue.runAllRunnableTasks(); - SnapshotsInProgress finalSnapshotsInProgress = masterNode.clusterService.state().custom(SnapshotsInProgress.TYPE); + SnapshotsInProgress finalSnapshotsInProgress = clusterManagerNode.clusterService.state().custom(SnapshotsInProgress.TYPE); assertFalse(finalSnapshotsInProgress.entries().stream().anyMatch(entry -> entry.state().completed() == false)); - final Repository repository = masterNode.repositoriesService.repository(repoName); + final Repository repository = clusterManagerNode.repositoriesService.repository(repoName); Collection snapshotIds = getRepositoryData(repository).getSnapshotIds(); // We end up with two snapshots no matter if the delete worked out or not assertThat(snapshotIds, hasSize(2)); @@ -683,7 +683,7 @@ public void testBulkSnapshotDeleteWithAbort() { final String index = "test"; final int shards = randomIntBetween(1, 10); - TestClusterNodes.TestClusterNode masterNode = testClusterNodes.currentMaster( + TestClusterNodes.TestClusterNode clusterManagerNode = testClusterNodes.currentClusterManager( testClusterNodes.nodes.values().iterator().next().clusterService.state() ); @@ -722,9 +722,9 @@ public void testBulkSnapshotDeleteWithAbort() { deterministicTaskQueue.runAllRunnableTasks(); - SnapshotsInProgress finalSnapshotsInProgress = masterNode.clusterService.state().custom(SnapshotsInProgress.TYPE); + SnapshotsInProgress finalSnapshotsInProgress = clusterManagerNode.clusterService.state().custom(SnapshotsInProgress.TYPE); assertFalse(finalSnapshotsInProgress.entries().stream().anyMatch(entry -> entry.state().completed() == false)); - final Repository repository = masterNode.repositoriesService.repository(repoName); + final Repository repository = clusterManagerNode.repositoriesService.repository(repoName); Collection snapshotIds = getRepositoryData(repository).getSnapshotIds(); // No snapshots should be left in the repository assertThat(snapshotIds, empty()); @@ -738,7 +738,7 @@ public void testConcurrentSnapshotRestoreAndDeleteOther() { final String index = "test"; final int shards = randomIntBetween(1, 10); - TestClusterNodes.TestClusterNode masterNode = testClusterNodes.currentMaster( + TestClusterNodes.TestClusterNode clusterManagerNode = testClusterNodes.currentClusterManager( testClusterNodes.nodes.values().iterator().next().clusterService.state() ); @@ -812,7 +812,7 @@ public void testConcurrentSnapshotRestoreAndDeleteOther() { assertThat(deleteSnapshotStepListener.result().isAcknowledged(), is(true)); assertThat(restoreSnapshotResponseListener.result().getRestoreInfo().failedShards(), is(0)); - final Repository repository = masterNode.repositoriesService.repository(repoName); + final Repository repository = clusterManagerNode.repositoriesService.repository(repoName); Collection snapshotIds = getRepositoryData(repository).getSnapshotIds(); assertThat(snapshotIds, contains(createOtherSnapshotResponseStepListener.result().getSnapshotInfo().snapshotId())); @@ -850,7 +850,7 @@ public void testConcurrentSnapshotDeleteAndDeleteIndex() throws IOException { String snapshotName = "snapshot"; final String index = "test"; - TestClusterNodes.TestClusterNode masterNode = testClusterNodes.currentMaster( + TestClusterNodes.TestClusterNode clusterManagerNode = testClusterNodes.currentClusterManager( testClusterNodes.nodes.values().iterator().next().clusterService.state() ); @@ -859,7 +859,7 @@ public void testConcurrentSnapshotDeleteAndDeleteIndex() throws IOException { final SetOnce firstIndex = new SetOnce<>(); continueOrDie(createRepoAndIndex(repoName, index, 1), createIndexResponse -> { - firstIndex.set(masterNode.clusterService.state().metadata().index(index).getIndex()); + firstIndex.set(clusterManagerNode.clusterService.state().metadata().index(index).getIndex()); // create a few more indices to make it more likely that the subsequent index delete operation happens before snapshot // finalization final GroupedActionListener listener = new GroupedActionListener<>(createIndicesListener, indices); @@ -907,9 +907,9 @@ public void onFailure(Exception e) { deterministicTaskQueue.runAllRunnableTasks(); - SnapshotsInProgress finalSnapshotsInProgress = masterNode.clusterService.state().custom(SnapshotsInProgress.TYPE); + SnapshotsInProgress finalSnapshotsInProgress = clusterManagerNode.clusterService.state().custom(SnapshotsInProgress.TYPE); assertFalse(finalSnapshotsInProgress.entries().stream().anyMatch(entry -> entry.state().completed() == false)); - final Repository repository = masterNode.repositoriesService.repository(repoName); + final Repository repository = clusterManagerNode.repositoriesService.repository(repoName); final RepositoryData repositoryData = getRepositoryData(repository); Collection snapshotIds = repositoryData.getSnapshotIds(); assertThat(snapshotIds, hasSize(1)); @@ -944,7 +944,7 @@ public void testConcurrentDeletes() { final String index = "test"; final int shards = randomIntBetween(1, 10); - TestClusterNodes.TestClusterNode masterNode = testClusterNodes.currentMaster( + TestClusterNodes.TestClusterNode clusterManagerNode = testClusterNodes.currentClusterManager( testClusterNodes.nodes.values().iterator().next().clusterService.state() ); @@ -990,9 +990,10 @@ public void testConcurrentDeletes() { deterministicTaskQueue.runAllRunnableTasks(); - SnapshotDeletionsInProgress deletionsInProgress = masterNode.clusterService.state().custom(SnapshotDeletionsInProgress.TYPE); + SnapshotDeletionsInProgress deletionsInProgress = clusterManagerNode.clusterService.state() + .custom(SnapshotDeletionsInProgress.TYPE); assertFalse(deletionsInProgress.hasDeletionsInProgress()); - final Repository repository = masterNode.repositoriesService.repository(repoName); + final Repository repository = clusterManagerNode.repositoriesService.repository(repoName); final RepositoryData repositoryData = getRepositoryData(repository); Collection snapshotIds = repositoryData.getSnapshotIds(); // We end up with no snapshots since at least one of the deletes worked out @@ -1003,12 +1004,12 @@ public void testConcurrentDeletes() { } /** - * Simulates concurrent restarts of data and master nodes as well as relocating a primary shard, while starting and subsequently + * Simulates concurrent restarts of data and cluster-manager nodes as well as relocating a primary shard, while starting and subsequently * deleting a snapshot. */ public void testSnapshotPrimaryRelocations() { - final int masterNodeCount = randomFrom(1, 3, 5); - setupTestCluster(masterNodeCount, randomIntBetween(2, 5)); + final int clusterManagerNodeCount = randomFrom(1, 3, 5); + setupTestCluster(clusterManagerNodeCount, randomIntBetween(2, 5)); String repoName = "repo"; String snapshotName = "snapshot"; @@ -1016,11 +1017,11 @@ public void testSnapshotPrimaryRelocations() { final int shards = randomIntBetween(1, 5); - final TestClusterNodes.TestClusterNode masterNode = testClusterNodes.currentMaster( + final TestClusterNodes.TestClusterNode clusterManagerNode = testClusterNodes.currentClusterManager( testClusterNodes.nodes.values().iterator().next().clusterService.state() ); final AtomicBoolean createdSnapshot = new AtomicBoolean(); - final AdminClient masterAdminClient = masterNode.client.admin(); + final AdminClient clusterManagerAdminClient = clusterManagerNode.client.admin(); final StepListener clusterStateResponseStepListener = new StepListener<>(); @@ -1038,15 +1039,15 @@ public void testSnapshotPrimaryRelocations() { @Override public void run() { final StepListener updatedClusterStateResponseStepListener = new StepListener<>(); - masterAdminClient.cluster().state(new ClusterStateRequest(), updatedClusterStateResponseStepListener); + clusterManagerAdminClient.cluster().state(new ClusterStateRequest(), updatedClusterStateResponseStepListener); continueOrDie(updatedClusterStateResponseStepListener, updatedClusterState -> { final ShardRouting shardRouting = updatedClusterState.getState() .routingTable() .shardRoutingTable(shardToRelocate.shardId()) .primaryShard(); if (shardRouting.unassigned() && shardRouting.unassignedInfo().getReason() == UnassignedInfo.Reason.NODE_LEFT) { - if (masterNodeCount > 1) { - scheduleNow(() -> testClusterNodes.stopNode(masterNode)); + if (clusterManagerNodeCount > 1) { + scheduleNow(() -> testClusterNodes.stopNode(clusterManagerNode)); } testClusterNodes.randomDataNodeSafe().client.admin() .cluster() @@ -1058,7 +1059,7 @@ public void run() { .deleteSnapshot(new DeleteSnapshotRequest(repoName, snapshotName), noopListener()); })); scheduleNow( - () -> testClusterNodes.randomMasterNodeSafe().client.admin() + () -> testClusterNodes.randomClusterManagerNodeSafe().client.admin() .cluster() .reroute( new ClusterRerouteRequest().add( @@ -1080,11 +1081,11 @@ public void run() { }); }); - runUntil(() -> testClusterNodes.randomMasterNode().map(master -> { + runUntil(() -> testClusterNodes.randomClusterManagerNode().map(clusterManager -> { if (createdSnapshot.get() == false) { return false; } - return master.clusterService.state().custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY).entries().isEmpty(); + return clusterManager.clusterService.state().custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY).entries().isEmpty(); }).orElse(false), TimeUnit.MINUTES.toMillis(1L)); clearDisruptionsAndAwaitSync(); @@ -1096,7 +1097,7 @@ public void run() { .entries(), empty() ); - final Repository repository = testClusterNodes.randomMasterNodeSafe().repositoriesService.repository(repoName); + final Repository repository = testClusterNodes.randomClusterManagerNodeSafe().repositoriesService.repository(repoName); Collection snapshotIds = getRepositoryData(repository).getSnapshotIds(); assertThat(snapshotIds, either(hasSize(1)).or(hasSize(0))); } @@ -1110,7 +1111,7 @@ public void testSuccessfulSnapshotWithConcurrentDynamicMappingUpdates() { final int shards = randomIntBetween(1, 10); final int documents = randomIntBetween(2, 100); - TestClusterNodes.TestClusterNode masterNode = testClusterNodes.currentMaster( + TestClusterNodes.TestClusterNode clusterManagerNode = testClusterNodes.currentClusterManager( testClusterNodes.nodes.values().iterator().next().clusterService.state() ); @@ -1171,7 +1172,7 @@ public void testSuccessfulSnapshotWithConcurrentDynamicMappingUpdates() { "Documents were restored but the restored index mapping was older than some documents and misses some of their fields", (int) hitCount, lessThanOrEqualTo( - ((Map) masterNode.clusterService.state() + ((Map) clusterManagerNode.clusterService.state() .metadata() .index(restoredIndex) .mapping() @@ -1186,9 +1187,9 @@ public void testSuccessfulSnapshotWithConcurrentDynamicMappingUpdates() { assertNotNull(createSnapshotResponseStepListener.result()); assertNotNull(restoreSnapshotResponseStepListener.result()); - SnapshotsInProgress finalSnapshotsInProgress = masterNode.clusterService.state().custom(SnapshotsInProgress.TYPE); + SnapshotsInProgress finalSnapshotsInProgress = clusterManagerNode.clusterService.state().custom(SnapshotsInProgress.TYPE); assertFalse(finalSnapshotsInProgress.entries().stream().anyMatch(entry -> entry.state().completed() == false)); - final Repository repository = masterNode.repositoriesService.repository(repoName); + final Repository repository = clusterManagerNode.repositoriesService.repository(repoName); Collection snapshotIds = getRepositoryData(repository).getSnapshotIds(); assertThat(snapshotIds, hasSize(1)); @@ -1210,7 +1211,7 @@ public void testRunConcurrentSnapshots() { final int shards = randomIntBetween(1, 10); final int documents = randomIntBetween(1, 100); - final TestClusterNodes.TestClusterNode masterNode = testClusterNodes.currentMaster( + final TestClusterNodes.TestClusterNode clusterManagerNode = testClusterNodes.currentClusterManager( testClusterNodes.nodes.values().iterator().next().clusterService.state() ); @@ -1253,9 +1254,9 @@ public void testRunConcurrentSnapshots() { }); runUntil(() -> doneIndexing.get() && doneSnapshotting.get(), TimeUnit.MINUTES.toMillis(5L)); - SnapshotsInProgress finalSnapshotsInProgress = masterNode.clusterService.state().custom(SnapshotsInProgress.TYPE); + SnapshotsInProgress finalSnapshotsInProgress = clusterManagerNode.clusterService.state().custom(SnapshotsInProgress.TYPE); assertFalse(finalSnapshotsInProgress.entries().stream().anyMatch(entry -> entry.state().completed() == false)); - final Repository repository = masterNode.repositoriesService.repository(repoName); + final Repository repository = clusterManagerNode.repositoriesService.repository(repoName); Collection snapshotIds = getRepositoryData(repository).getSnapshotIds(); assertThat(snapshotIds, hasSize(snapshotNames.size())); @@ -1314,12 +1315,12 @@ private void disconnectOrRestartDataNode() { } } - private void disconnectOrRestartMasterNode() { - testClusterNodes.randomMasterNode().ifPresent(masterNode -> { + private void disconnectOrRestartClusterManagerNode() { + testClusterNodes.randomClusterManagerNode().ifPresent(clusterManagerNode -> { if (randomBoolean()) { - testClusterNodes.disconnectNode(masterNode); + testClusterNodes.disconnectNode(clusterManagerNode); } else { - masterNode.restart(); + clusterManagerNode.restart(); } }); } @@ -1374,12 +1375,15 @@ private void stabilize() { .stream() .map(node -> node.clusterService.state()) .collect(Collectors.toList()); - final Set masterNodeIds = clusterStates.stream() + final Set clusterManagerNodeIds = clusterStates.stream() .map(clusterState -> clusterState.nodes().getMasterNodeId()) .collect(Collectors.toSet()); final Set terms = clusterStates.stream().map(ClusterState::term).collect(Collectors.toSet()); final List versions = clusterStates.stream().map(ClusterState::version).distinct().collect(Collectors.toList()); - return versions.size() == 1 && masterNodeIds.size() == 1 && masterNodeIds.contains(null) == false && terms.size() == 1; + return versions.size() == 1 + && clusterManagerNodeIds.size() == 1 + && clusterManagerNodeIds.contains(null) == false + && terms.size() == 1; }, TimeUnit.MINUTES.toMillis(1L)); } @@ -1395,8 +1399,8 @@ private void runUntil(Supplier fulfilled, long timeout) { fail("Condition wasn't fulfilled."); } - private void setupTestCluster(int masterNodes, int dataNodes) { - testClusterNodes = new TestClusterNodes(masterNodes, dataNodes); + private void setupTestCluster(int clusterManagerNodes, int dataNodes) { + testClusterNodes = new TestClusterNodes(clusterManagerNodes, dataNodes); startCluster(); } @@ -1472,11 +1476,11 @@ private final class TestClusterNodes { */ private final Set disconnectedNodes = new HashSet<>(); - TestClusterNodes(int masterNodes, int dataNodes) { - for (int i = 0; i < masterNodes; ++i) { + TestClusterNodes(int clusterManagerNodes, int dataNodes) { + for (int i = 0; i < clusterManagerNodes; ++i) { nodes.computeIfAbsent("node" + i, nodeName -> { try { - return newMasterNode(nodeName); + return newClusterManagerNode(nodeName); } catch (IOException e) { throw new AssertionError(e); } @@ -1501,7 +1505,7 @@ public TestClusterNode nodeById(final String nodeId) { .orElseThrow(() -> new AssertionError("Could not find node by id [" + nodeId + ']')); } - private TestClusterNode newMasterNode(String nodeName) throws IOException { + private TestClusterNode newClusterManagerNode(String nodeName) throws IOException { return newNode(nodeName, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); } @@ -1522,19 +1526,21 @@ private TestClusterNode newNode(String nodeName, DiscoveryNodeRole role) throws ); } - public TestClusterNode randomMasterNodeSafe() { - return randomMasterNode().orElseThrow(() -> new AssertionError("Expected to find at least one connected master node")); + public TestClusterNode randomClusterManagerNodeSafe() { + return randomClusterManagerNode().orElseThrow( + () -> new AssertionError("Expected to find at least one connected cluster-manager node") + ); } - public Optional randomMasterNode() { + public Optional randomClusterManagerNode() { // Select from sorted list of data-nodes here to not have deterministic behaviour - final List masterNodes = testClusterNodes.nodes.values() + final List clusterManagerNodes = testClusterNodes.nodes.values() .stream() .filter(n -> n.node.isMasterNode()) .filter(n -> disconnectedNodes.contains(n.node.getName()) == false) .sorted(Comparator.comparing(n -> n.node.getName())) .collect(Collectors.toList()); - return masterNodes.isEmpty() ? Optional.empty() : Optional.of(randomFrom(masterNodes)); + return clusterManagerNodes.isEmpty() ? Optional.empty() : Optional.of(randomFrom(clusterManagerNodes)); } public void stopNode(TestClusterNode node) { @@ -1596,15 +1602,15 @@ public DiscoveryNodes discoveryNodes() { } /** - * Returns the {@link TestClusterNode} for the master node in the given {@link ClusterState}. + * Returns the {@link TestClusterNode} for the cluster-manager node in the given {@link ClusterState}. * @param state ClusterState - * @return Master Node + * @return Cluster Manager Node */ - public TestClusterNode currentMaster(ClusterState state) { - TestClusterNode master = nodes.get(state.nodes().getMasterNode().getName()); - assertNotNull(master); - assertTrue(master.node.isMasterNode()); - return master; + public TestClusterNode currentClusterManager(ClusterState state) { + TestClusterNode clusterManager = nodes.get(state.nodes().getMasterNode().getName()); + assertNotNull(clusterManager); + assertTrue(clusterManager.node.isMasterNode()); + return clusterManager; } private final class TestClusterNode { @@ -1636,7 +1642,7 @@ private final class TestClusterNode { private final DiscoveryNode node; - private final MasterService masterService; + private final MasterService clusterManagerService; private final AllocationService allocationService; @@ -1656,13 +1662,18 @@ private final class TestClusterNode { this.node = node; final Environment environment = createEnvironment(node.getName()); threadPool = deterministicTaskQueue.getThreadPool(runnable -> CoordinatorTests.onNodeLog(node, runnable)); - masterService = new FakeThreadPoolMasterService(node.getName(), "test", threadPool, deterministicTaskQueue::scheduleNow); + clusterManagerService = new FakeThreadPoolMasterService( + node.getName(), + "test", + threadPool, + deterministicTaskQueue::scheduleNow + ); final Settings settings = environment.settings(); final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); clusterService = new ClusterService( settings, clusterSettings, - masterService, + clusterManagerService, new ClusterApplierService(node.getName(), settings, clusterSettings, threadPool) { @Override protected PrioritizedOpenSearchThreadPoolExecutor createThreadPoolExecutor() { @@ -2192,7 +2203,7 @@ public void start(ClusterState initialState) { transportService, namedWriteableRegistry, allocationService, - masterService, + clusterManagerService, () -> persistedState, hostsResolver -> nodes.values() .stream() @@ -2206,7 +2217,7 @@ public void start(ClusterState initialState) { ElectionStrategy.DEFAULT_INSTANCE, () -> new StatusInfo(HEALTHY, "healthy-info") ); - masterService.setClusterStatePublisher(coordinator); + clusterManagerService.setClusterStatePublisher(coordinator); coordinator.start(); clusterService.getClusterApplierService().setNodeConnectionsService(nodeConnectionsService); nodeConnectionsService.start(); diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotsServiceTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotsServiceTests.java index 16fc7467d099b..7f96d4842e37d 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotsServiceTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotsServiceTests.java @@ -371,11 +371,11 @@ public void testCompletedCloneStartsNextClone() throws Exception { final String indexName1 = "index-1"; final IndexId indexId1 = indexId(indexName1); final RepositoryShardId shardId1 = new RepositoryShardId(indexId1, 0); - final String masterNodeId = uuid(); + final String clusterManagerNodeId = uuid(); final SnapshotsInProgress.Entry cloneSingleShard = cloneEntry( targetSnapshot, sourceSnapshot.getSnapshotId(), - clonesMap(shardId1, initShardStatus(masterNodeId)) + clonesMap(shardId1, initShardStatus(clusterManagerNodeId)) ); final Snapshot queuedTargetSnapshot = snapshot(repoName, "test-snapshot"); @@ -388,11 +388,11 @@ public void testCompletedCloneStartsNextClone() throws Exception { assertThat(cloneSingleShard.state(), is(SnapshotsInProgress.State.STARTED)); final ClusterState stateWithUnassignedRoutingShard = stateWithSnapshots( - ClusterState.builder(ClusterState.EMPTY_STATE).nodes(discoveryNodes(masterNodeId)).build(), + ClusterState.builder(ClusterState.EMPTY_STATE).nodes(discoveryNodes(clusterManagerNodeId)).build(), cloneSingleShard, queuedClone ); - final SnapshotsService.ShardSnapshotUpdate completeShardClone = successUpdate(targetSnapshot, shardId1, masterNodeId); + final SnapshotsService.ShardSnapshotUpdate completeShardClone = successUpdate(targetSnapshot, shardId1, clusterManagerNodeId); final ClusterState updatedClusterState = applyUpdates(stateWithUnassignedRoutingShard, completeShardClone); final SnapshotsInProgress snapshotsInProgress = updatedClusterState.custom(SnapshotsInProgress.TYPE); diff --git a/server/src/test/java/org/opensearch/snapshots/mockstore/MockEventuallyConsistentRepository.java b/server/src/test/java/org/opensearch/snapshots/mockstore/MockEventuallyConsistentRepository.java index 6bfd96b328d75..8d43db15053f1 100644 --- a/server/src/test/java/org/opensearch/snapshots/mockstore/MockEventuallyConsistentRepository.java +++ b/server/src/test/java/org/opensearch/snapshots/mockstore/MockEventuallyConsistentRepository.java @@ -373,7 +373,7 @@ public void writeBlob(String blobName, InputStream inputStream, long blobSize, b new BytesArray(data) ); // If the existing snapshotInfo differs only in the timestamps it stores, then the overwrite is not - // a problem and could be the result of a correctly handled master failover. + // a problem and could be the result of a correctly handled cluster-manager failover. final SnapshotInfo existingInfo = SNAPSHOT_FORMAT.deserialize( blobName, namedXContentRegistry, diff --git a/server/src/test/java/org/opensearch/transport/RemoteClusterServiceTests.java b/server/src/test/java/org/opensearch/transport/RemoteClusterServiceTests.java index 0092763b4ba20..9bb8b79377939 100644 --- a/server/src/test/java/org/opensearch/transport/RemoteClusterServiceTests.java +++ b/server/src/test/java/org/opensearch/transport/RemoteClusterServiceTests.java @@ -553,11 +553,11 @@ public void testRemoteNodeRoles() throws IOException, InterruptedException { final Settings settings = Settings.EMPTY; final List knownNodes = new CopyOnWriteArrayList<>(); final Settings data = nonMasterNode(); - final Settings dedicatedMaster = clusterManagerOnlyNode(); + final Settings dedicatedClusterManager = clusterManagerOnlyNode(); try ( - MockTransportService c1N1 = startTransport("cluster_1_node_1", knownNodes, Version.CURRENT, dedicatedMaster); + MockTransportService c1N1 = startTransport("cluster_1_node_1", knownNodes, Version.CURRENT, dedicatedClusterManager); MockTransportService c1N2 = startTransport("cluster_1_node_2", knownNodes, Version.CURRENT, data); - MockTransportService c2N1 = startTransport("cluster_2_node_1", knownNodes, Version.CURRENT, dedicatedMaster); + MockTransportService c2N1 = startTransport("cluster_2_node_1", knownNodes, Version.CURRENT, dedicatedClusterManager); MockTransportService c2N2 = startTransport("cluster_2_node_2", knownNodes, Version.CURRENT, data) ) { final DiscoveryNode c1N1Node = c1N1.getLocalDiscoNode(); diff --git a/server/src/test/java/org/opensearch/transport/SniffConnectionStrategyTests.java b/server/src/test/java/org/opensearch/transport/SniffConnectionStrategyTests.java index 1714f154036a5..409bc327cb095 100644 --- a/server/src/test/java/org/opensearch/transport/SniffConnectionStrategyTests.java +++ b/server/src/test/java/org/opensearch/transport/SniffConnectionStrategyTests.java @@ -743,24 +743,24 @@ public void testGetNodePredicateNodeRoles() { assertTrue(nodePredicate.test(all)); } { - DiscoveryNode dataMaster = new DiscoveryNode( + DiscoveryNode dataClusterManager = new DiscoveryNode( "id", address, Collections.emptyMap(), new HashSet<>(Arrays.asList(DiscoveryNodeRole.DATA_ROLE, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)), Version.CURRENT ); - assertTrue(nodePredicate.test(dataMaster)); + assertTrue(nodePredicate.test(dataClusterManager)); } { - DiscoveryNode dedicatedMaster = new DiscoveryNode( + DiscoveryNode dedicatedClusterManager = new DiscoveryNode( "id", address, Collections.emptyMap(), new HashSet<>(Arrays.asList(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)), Version.CURRENT ); - assertFalse(nodePredicate.test(dedicatedMaster)); + assertFalse(nodePredicate.test(dedicatedClusterManager)); } { DiscoveryNode dedicatedIngest = new DiscoveryNode( @@ -773,14 +773,14 @@ public void testGetNodePredicateNodeRoles() { assertTrue(nodePredicate.test(dedicatedIngest)); } { - DiscoveryNode masterIngest = new DiscoveryNode( + DiscoveryNode clusterManagerIngest = new DiscoveryNode( "id", address, Collections.emptyMap(), new HashSet<>(Arrays.asList(DiscoveryNodeRole.INGEST_ROLE, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)), Version.CURRENT ); - assertTrue(nodePredicate.test(masterIngest)); + assertTrue(nodePredicate.test(clusterManagerIngest)); } { DiscoveryNode dedicatedData = new DiscoveryNode( @@ -855,14 +855,14 @@ public void testGetNodePredicatesCombination() { TransportAddress address = new TransportAddress(TransportAddress.META_ADDRESS, 0); Settings settings = Settings.builder().put("cluster.remote.node.attr", "gateway").build(); Predicate nodePredicate = SniffConnectionStrategy.getNodePredicate(settings); - Set dedicatedMasterRoles = new HashSet<>(Arrays.asList(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)); + Set dedicatedClusterManagerRoles = new HashSet<>(Arrays.asList(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)); Set allRoles = DiscoveryNodeRole.BUILT_IN_ROLES; { DiscoveryNode node = new DiscoveryNode( "id", address, Collections.singletonMap("gateway", "true"), - dedicatedMasterRoles, + dedicatedClusterManagerRoles, Version.CURRENT ); assertFalse(nodePredicate.test(node)); @@ -872,7 +872,7 @@ public void testGetNodePredicatesCombination() { "id", address, Collections.singletonMap("gateway", "false"), - dedicatedMasterRoles, + dedicatedClusterManagerRoles, Version.CURRENT ); assertFalse(nodePredicate.test(node)); @@ -882,7 +882,7 @@ public void testGetNodePredicatesCombination() { "id", address, Collections.singletonMap("gateway", "false"), - dedicatedMasterRoles, + dedicatedClusterManagerRoles, Version.CURRENT ); assertFalse(nodePredicate.test(node)); From decf4c29cd7c69cb1a439facb0f00991ebef32a2 Mon Sep 17 00:00:00 2001 From: Ankit Jain Date: Wed, 25 May 2022 22:09:25 -0700 Subject: [PATCH 32/75] Removing unused method from TransportSearchAction (#3437) * Removing unused method from TransportSearchAction Signed-off-by: Ankit Jain --- .../action/search/TransportSearchAction.java | 76 ------------------- 1 file changed, 76 deletions(-) diff --git a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java index 1ca477942cdf6..ebb0f21d6fe16 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java @@ -65,7 +65,6 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.concurrent.AtomicArray; import org.opensearch.common.util.concurrent.CountDown; import org.opensearch.index.Index; import org.opensearch.index.query.Rewriteable; @@ -298,81 +297,6 @@ void executeOnShardTarget( ); } - public void executeRequest( - Task task, - SearchRequest searchRequest, - String actionName, - boolean includeSearchContext, - SinglePhaseSearchAction phaseSearchAction, - ActionListener listener - ) { - executeRequest(task, searchRequest, new SearchAsyncActionProvider() { - @Override - public AbstractSearchAsyncAction asyncSearchAction( - SearchTask task, - SearchRequest searchRequest, - Executor executor, - GroupShardsIterator shardsIts, - SearchTimeProvider timeProvider, - BiFunction connectionLookup, - ClusterState clusterState, - Map aliasFilter, - Map concreteIndexBoosts, - Map> indexRoutings, - ActionListener listener, - boolean preFilter, - ThreadPool threadPool, - SearchResponse.Clusters clusters - ) { - return new AbstractSearchAsyncAction( - actionName, - logger, - searchTransportService, - connectionLookup, - aliasFilter, - concreteIndexBoosts, - indexRoutings, - executor, - searchRequest, - listener, - shardsIts, - timeProvider, - clusterState, - task, - new ArraySearchPhaseResults<>(shardsIts.size()), - searchRequest.getMaxConcurrentShardRequests(), - clusters - ) { - @Override - protected void executePhaseOnShard( - SearchShardIterator shardIt, - SearchShardTarget shard, - SearchActionListener listener - ) { - final Transport.Connection connection = getConnection(shard.getClusterAlias(), shard.getNodeId()); - phaseSearchAction.executeOnShardTarget(task, shard, connection, listener); - } - - @Override - protected SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context) { - return new SearchPhase(getName()) { - @Override - public void run() { - final AtomicArray atomicArray = results.getAtomicArray(); - sendSearchResponse(InternalSearchResponse.empty(), atomicArray); - } - }; - } - - @Override - boolean buildPointInTimeFromSearchResults() { - return includeSearchContext; - } - }; - } - }, listener); - } - private void executeRequest( Task task, SearchRequest searchRequest, From 1535c67d44e9282c360974fc0cfd0a54d14736a5 Mon Sep 17 00:00:00 2001 From: vpehkone <101240162+vpehkone@users.noreply.github.com> Date: Thu, 26 May 2022 08:05:52 -0700 Subject: [PATCH 33/75] Set term vector flags to false for ._index_prefix field (#1901). (#3119) * Set term vector flags to false for ._index_prefix field (#1901). Signed-off-by: Vesa Pehkonen * Replaced the FieldType copy ctor with ctor for the prefix field and replaced setting the field type parameters with setIndexOptions(). (#1901) Signed-off-by: Vesa Pehkonen * Added tests for term vectors. (#1901) Signed-off-by: Vesa Pehkonen * Fixed code formatting error. Signed-off-by: Vesa Pehkonen Co-authored-by: sdp --- .../mapper/SearchAsYouTypeFieldMapper.java | 4 +-- .../SearchAsYouTypeFieldMapperTests.java | 27 ++++++++++++++----- 2 files changed, 23 insertions(+), 8 deletions(-) diff --git a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapper.java b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapper.java index 68b887c4c4a43..1b6aad0bda32a 100644 --- a/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapper.java @@ -205,8 +205,8 @@ public SearchAsYouTypeFieldMapper build(Mapper.BuilderContext context) { ft.setIndexAnalyzer(analyzers.getIndexAnalyzer()); // set up the prefix field - FieldType prefixft = new FieldType(fieldType); - prefixft.setStoreTermVectors(false); + FieldType prefixft = new FieldType(); + prefixft.setIndexOptions(fieldType.indexOptions()); prefixft.setOmitNorms(true); prefixft.setStored(false); final String fullName = buildFullName(context); diff --git a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java index 786791314692d..7c4b8956d9e3c 100644 --- a/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/opensearch/index/mapper/SearchAsYouTypeFieldMapperTests.java @@ -352,15 +352,30 @@ public void testIndex() throws IOException { } public void testTermVectors() throws IOException { - DocumentMapper mapper = createDocumentMapper(fieldMapping(b -> b.field("type", "search_as_you_type").field("term_vector", "yes"))); + String[] termVectors = { + "yes", + "with_positions", + "with_offsets", + "with_positions_offsets", + "with_positions_payloads", + "with_positions_offsets_payloads" }; + + for (String termVector : termVectors) { + DocumentMapper mapper = createDocumentMapper( + fieldMapping(b -> b.field("type", "search_as_you_type").field("term_vector", termVector)) + ); - assertTrue(getRootFieldMapper(mapper, "field").fieldType().fieldType.storeTermVectors()); + assertTrue(getRootFieldMapper(mapper, "field").fieldType().fieldType.storeTermVectors()); - Stream.of(getShingleFieldMapper(mapper, "field._2gram"), getShingleFieldMapper(mapper, "field._3gram")) - .forEach(m -> assertTrue("for " + m.name(), m.fieldType.storeTermVectors())); + Stream.of(getShingleFieldMapper(mapper, "field._2gram"), getShingleFieldMapper(mapper, "field._3gram")) + .forEach(m -> assertTrue("for " + m.name(), m.fieldType.storeTermVectors())); - PrefixFieldMapper prefixFieldMapper = getPrefixFieldMapper(mapper, "field._index_prefix"); - assertFalse(prefixFieldMapper.fieldType.storeTermVectors()); + PrefixFieldMapper prefixFieldMapper = getPrefixFieldMapper(mapper, "field._index_prefix"); + assertFalse(prefixFieldMapper.fieldType.storeTermVectors()); + assertFalse(prefixFieldMapper.fieldType.storeTermVectorOffsets()); + assertFalse(prefixFieldMapper.fieldType.storeTermVectorPositions()); + assertFalse(prefixFieldMapper.fieldType.storeTermVectorPayloads()); + } } public void testNorms() throws IOException { From 3dd712123766c7b104b0e83fab74fb802678ee20 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Fri, 27 May 2022 08:01:50 -0400 Subject: [PATCH 34/75] [BUG] Fixing org.opensearch.monitor.os.OsProbeTests > testLogWarnCpuMessageOnlyOnes when cgroups are available but cgroup stats is not (#3448) Signed-off-by: Andriy Redko --- .../java/org/opensearch/monitor/os/OsProbeTests.java | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/server/src/test/java/org/opensearch/monitor/os/OsProbeTests.java b/server/src/test/java/org/opensearch/monitor/os/OsProbeTests.java index 505dce8879bdd..575ab02bd6f07 100644 --- a/server/src/test/java/org/opensearch/monitor/os/OsProbeTests.java +++ b/server/src/test/java/org/opensearch/monitor/os/OsProbeTests.java @@ -32,6 +32,8 @@ package org.opensearch.monitor.os; +import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.both; @@ -296,8 +298,12 @@ List readSysFsCgroupCpuAcctCpuStat(String controlGroup) throws IOExcepti } }; - assumeThat("CGroups are not available", noCpuStatsOsProbe.areCgroupStatsAvailable(), is(true)); - noCpuStatsOsProbe.osStats(); + assumeThat("CGroups are available", noCpuStatsOsProbe.areCgroupStatsAvailable(), is(true)); + OsStats osStats = noCpuStatsOsProbe.osStats(); + + // Depending on CGroups v1/v2, the cgroup stats may not be available + assumeThat("CGroup is available", osStats.getCgroup(), is(not(nullValue()))); + // no nr_throttled and throttled_time verify(logger, times(2)).warn(anyString()); reset(logger); From 31c5be5dbfbc3656dfa3d7f3d11fe9f21993321a Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Fri, 27 May 2022 12:25:12 -0700 Subject: [PATCH 35/75] [Segment Replication] Add SegmentReplicationTargetService to orchestrate replication events. (#3439) * Add SegmentReplicationTargetService to orchestrate replication events. This change introduces boilerplate classes for Segment Replication and a target service to orchestrate replication events. It also includes two refactors of peer recovery components for reuse. 1. Rename RecoveryFileChunkRequest to FileChunkRequest and extract code to handle throttling into ReplicationTarget. 2. Extracts a component to execute retryable requests over the transport layer. Signed-off-by: Marc Handalian * Code cleanup. Signed-off-by: Marc Handalian * Make SegmentReplicationTargetService component final so that it can not be extended by plugins. Signed-off-by: Marc Handalian --- .../index/store/CorruptedFileIT.java | 8 +- .../org/opensearch/recovery/RelocationIT.java | 4 +- .../recovery/TruncatedRecoveryIT.java | 4 +- ...hunkRequest.java => FileChunkRequest.java} | 8 +- .../recovery/PeerRecoveryTargetService.java | 119 ++++-------- .../indices/recovery/RecoveryState.java | 1 + .../indices/recovery/RecoveryTarget.java | 3 +- .../recovery/RemoteRecoveryTargetHandler.java | 124 +++---------- .../recovery/RetryableTransportClient.java | 139 ++++++++++++++ .../replication/CheckpointInfoResponse.java | 79 ++++++++ .../replication/GetSegmentFilesResponse.java | 40 +++++ .../replication/SegmentReplicationSource.java | 50 ++++++ .../SegmentReplicationSourceFactory.java | 41 +++++ .../replication/SegmentReplicationState.java | 84 +++++++++ .../replication/SegmentReplicationTarget.java | 115 ++++++++++++ .../SegmentReplicationTargetService.java | 170 ++++++++++++++++++ .../common/ReplicationCollection.java | 2 +- .../replication/common/ReplicationState.java | 2 + .../replication/common/ReplicationTarget.java | 96 +++++++++- .../PeerRecoveryTargetServiceTests.java | 8 +- .../SegmentReplicationTargetServiceTests.java | 127 +++++++++++++ 21 files changed, 1017 insertions(+), 207 deletions(-) rename server/src/main/java/org/opensearch/indices/recovery/{RecoveryFileChunkRequest.java => FileChunkRequest.java} (95%) create mode 100644 server/src/main/java/org/opensearch/indices/recovery/RetryableTransportClient.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/CheckpointInfoResponse.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/GetSegmentFilesResponse.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/SegmentReplicationState.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java create mode 100644 server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java diff --git a/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java b/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java index 3a5e21fc8ef65..ee2067c591cef 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/store/CorruptedFileIT.java @@ -77,7 +77,7 @@ import org.opensearch.index.shard.IndexShardState; import org.opensearch.index.shard.ShardId; import org.opensearch.indices.recovery.PeerRecoveryTargetService; -import org.opensearch.indices.recovery.RecoveryFileChunkRequest; +import org.opensearch.indices.recovery.FileChunkRequest; import org.opensearch.monitor.fs.FsInfo; import org.opensearch.plugins.Plugin; import org.opensearch.snapshots.SnapshotState; @@ -397,7 +397,7 @@ public void testCorruptionOnNetworkLayerFinalizingRecovery() throws ExecutionExc internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), (connection, requestId, action, request, options) -> { if (corrupt.get() && action.equals(PeerRecoveryTargetService.Actions.FILE_CHUNK)) { - RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request; + FileChunkRequest req = (FileChunkRequest) request; byte[] array = BytesRef.deepCopyOf(req.content().toBytesRef()).bytes; int i = randomIntBetween(0, req.content().length() - 1); array[i] = (byte) ~array[i]; // flip one byte in the content @@ -474,11 +474,11 @@ public void testCorruptionOnNetworkLayer() throws ExecutionException, Interrupte internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), (connection, requestId, action, request, options) -> { if (action.equals(PeerRecoveryTargetService.Actions.FILE_CHUNK)) { - RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request; + FileChunkRequest req = (FileChunkRequest) request; if (truncate && req.length() > 1) { BytesRef bytesRef = req.content().toBytesRef(); BytesArray array = new BytesArray(bytesRef.bytes, bytesRef.offset, (int) req.length() - 1); - request = new RecoveryFileChunkRequest( + request = new FileChunkRequest( req.recoveryId(), req.requestSeqNo(), req.shardId(), diff --git a/server/src/internalClusterTest/java/org/opensearch/recovery/RelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/recovery/RelocationIT.java index 06475f1e7ac9d..1f16cc0363686 100644 --- a/server/src/internalClusterTest/java/org/opensearch/recovery/RelocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/recovery/RelocationIT.java @@ -67,7 +67,7 @@ import org.opensearch.index.shard.IndexShardState; import org.opensearch.index.shard.ShardId; import org.opensearch.indices.recovery.PeerRecoveryTargetService; -import org.opensearch.indices.recovery.RecoveryFileChunkRequest; +import org.opensearch.indices.recovery.FileChunkRequest; import org.opensearch.plugins.Plugin; import org.opensearch.search.SearchHit; import org.opensearch.search.SearchHits; @@ -809,7 +809,7 @@ public void sendRequest( TransportRequestOptions options ) throws IOException { if (action.equals(PeerRecoveryTargetService.Actions.FILE_CHUNK)) { - RecoveryFileChunkRequest chunkRequest = (RecoveryFileChunkRequest) request; + FileChunkRequest chunkRequest = (FileChunkRequest) request; if (chunkRequest.name().startsWith(IndexFileNames.SEGMENTS)) { // corrupting the segments_N files in order to make sure future recovery re-send files logger.debug("corrupting [{}] to {}. file name: [{}]", action, connection.getNode(), chunkRequest.name()); diff --git a/server/src/internalClusterTest/java/org/opensearch/recovery/TruncatedRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/recovery/TruncatedRecoveryIT.java index 1708454faf7b3..b5d7bd476059d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/recovery/TruncatedRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/recovery/TruncatedRecoveryIT.java @@ -43,7 +43,7 @@ import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.index.query.QueryBuilders; import org.opensearch.indices.recovery.PeerRecoveryTargetService; -import org.opensearch.indices.recovery.RecoveryFileChunkRequest; +import org.opensearch.indices.recovery.FileChunkRequest; import org.opensearch.node.RecoverySettingsChunkSizePlugin; import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchIntegTestCase; @@ -146,7 +146,7 @@ public void testCancelRecoveryAndResume() throws Exception { internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), (connection, requestId, action, request, options) -> { if (action.equals(PeerRecoveryTargetService.Actions.FILE_CHUNK)) { - RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request; + FileChunkRequest req = (FileChunkRequest) request; logger.info("file chunk [{}] lastChunk: {}", req, req.lastChunk()); if ((req.name().endsWith("cfs") || req.name().endsWith("fdt")) && req.lastChunk() && truncate.get()) { latch.countDown(); diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryFileChunkRequest.java b/server/src/main/java/org/opensearch/indices/recovery/FileChunkRequest.java similarity index 95% rename from server/src/main/java/org/opensearch/indices/recovery/RecoveryFileChunkRequest.java rename to server/src/main/java/org/opensearch/indices/recovery/FileChunkRequest.java index 886de8d56645c..3594495224481 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryFileChunkRequest.java +++ b/server/src/main/java/org/opensearch/indices/recovery/FileChunkRequest.java @@ -43,11 +43,11 @@ import java.io.IOException; /** - * Request for a recovery file chunk + * Request containing a file chunk. * * @opensearch.internal */ -public final class RecoveryFileChunkRequest extends RecoveryTransportRequest { +public final class FileChunkRequest extends RecoveryTransportRequest { private final boolean lastChunk; private final long recoveryId; private final ShardId shardId; @@ -58,7 +58,7 @@ public final class RecoveryFileChunkRequest extends RecoveryTransportRequest { private final int totalTranslogOps; - public RecoveryFileChunkRequest(StreamInput in) throws IOException { + public FileChunkRequest(StreamInput in) throws IOException { super(in); recoveryId = in.readLong(); shardId = new ShardId(in); @@ -75,7 +75,7 @@ public RecoveryFileChunkRequest(StreamInput in) throws IOException { sourceThrottleTimeInNanos = in.readLong(); } - public RecoveryFileChunkRequest( + public FileChunkRequest( long recoveryId, final long requestSeqNo, ShardId shardId, diff --git a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java index e13022afa81ba..85141556657f3 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java @@ -36,20 +36,17 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; -import org.apache.lucene.store.RateLimiter; -import org.opensearch.ExceptionsHelper; import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchTimeoutException; +import org.opensearch.ExceptionsHelper; import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; -import org.opensearch.action.support.ChannelActionListener; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateObserver; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.CheckedFunction; import org.opensearch.common.Nullable; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.settings.Settings; @@ -60,7 +57,6 @@ import org.opensearch.index.IndexNotFoundException; import org.opensearch.index.engine.RecoveryEngineException; import org.opensearch.index.mapper.MapperException; -import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.IllegalIndexShardStateException; import org.opensearch.index.shard.IndexEventListener; import org.opensearch.index.shard.IndexShard; @@ -71,7 +67,6 @@ import org.opensearch.index.translog.TranslogCorruptedException; import org.opensearch.indices.replication.common.ReplicationCollection; import org.opensearch.indices.replication.common.ReplicationCollection.ReplicationRef; -import org.opensearch.indices.replication.common.ReplicationLuceneIndex; import org.opensearch.indices.replication.common.ReplicationTimer; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; @@ -148,7 +143,7 @@ public PeerRecoveryTargetService( transportService.registerRequestHandler( Actions.FILE_CHUNK, ThreadPool.Names.GENERIC, - RecoveryFileChunkRequest::new, + FileChunkRequest::new, new FileChunkTransportRequestHandler() ); transportService.registerRequestHandler( @@ -354,12 +349,13 @@ class PrepareForTranslogOperationsRequestHandler implements TransportRequestHand @Override public void messageReceived(RecoveryPrepareForTranslogOperationsRequest request, TransportChannel channel, Task task) { try (ReplicationRef recoveryRef = onGoingRecoveries.getSafe(request.recoveryId(), request.shardId())) { - final ActionListener listener = createOrFinishListener(recoveryRef, channel, Actions.PREPARE_TRANSLOG, request); + final RecoveryTarget recoveryTarget = recoveryRef.get(); + final ActionListener listener = recoveryTarget.createOrFinishListener(channel, Actions.PREPARE_TRANSLOG, request); if (listener == null) { return; } - recoveryRef.get().prepareForTranslogOperations(request.totalTranslogOps(), listener); + recoveryTarget.prepareForTranslogOperations(request.totalTranslogOps(), listener); } } } @@ -369,12 +365,13 @@ class FinalizeRecoveryRequestHandler implements TransportRequestHandler recoveryRef = onGoingRecoveries.getSafe(request.recoveryId(), request.shardId())) { - final ActionListener listener = createOrFinishListener(recoveryRef, channel, Actions.FINALIZE, request); + final RecoveryTarget recoveryTarget = recoveryRef.get(); + final ActionListener listener = recoveryTarget.createOrFinishListener(channel, Actions.FINALIZE, request); if (listener == null) { return; } - recoveryRef.get().finalizeRecovery(request.globalCheckpoint(), request.trimAboveSeqNo(), listener); + recoveryTarget.finalizeRecovery(request.globalCheckpoint(), request.trimAboveSeqNo(), listener); } } } @@ -399,8 +396,7 @@ public void messageReceived(final RecoveryTranslogOperationsRequest request, fin throws IOException { try (ReplicationRef recoveryRef = onGoingRecoveries.getSafe(request.recoveryId(), request.shardId())) { final RecoveryTarget recoveryTarget = recoveryRef.get(); - final ActionListener listener = createOrFinishListener( - recoveryRef, + final ActionListener listener = recoveryTarget.createOrFinishListener( channel, Actions.TRANSLOG_OPS, request, @@ -484,20 +480,20 @@ class FilesInfoRequestHandler implements TransportRequestHandler recoveryRef = onGoingRecoveries.getSafe(request.recoveryId(), request.shardId())) { - final ActionListener listener = createOrFinishListener(recoveryRef, channel, Actions.FILES_INFO, request); + final RecoveryTarget recoveryTarget = recoveryRef.get(); + final ActionListener listener = recoveryTarget.createOrFinishListener(channel, Actions.FILES_INFO, request); if (listener == null) { return; } - recoveryRef.get() - .receiveFileInfo( - request.phase1FileNames, - request.phase1FileSizes, - request.phase1ExistingFileNames, - request.phase1ExistingFileSizes, - request.totalTranslogOps, - listener - ); + recoveryTarget.receiveFileInfo( + request.phase1FileNames, + request.phase1FileSizes, + request.phase1ExistingFileNames, + request.phase1ExistingFileSizes, + request.totalTranslogOps, + listener + ); } } } @@ -507,90 +503,37 @@ class CleanFilesRequestHandler implements TransportRequestHandler recoveryRef = onGoingRecoveries.getSafe(request.recoveryId(), request.shardId())) { - final ActionListener listener = createOrFinishListener(recoveryRef, channel, Actions.CLEAN_FILES, request); + final RecoveryTarget recoveryTarget = recoveryRef.get(); + final ActionListener listener = recoveryTarget.createOrFinishListener(channel, Actions.CLEAN_FILES, request); if (listener == null) { return; } - recoveryRef.get() - .cleanFiles(request.totalTranslogOps(), request.getGlobalCheckpoint(), request.sourceMetaSnapshot(), listener); + recoveryTarget.cleanFiles( + request.totalTranslogOps(), + request.getGlobalCheckpoint(), + request.sourceMetaSnapshot(), + listener + ); } } } - class FileChunkTransportRequestHandler implements TransportRequestHandler { + class FileChunkTransportRequestHandler implements TransportRequestHandler { // How many bytes we've copied since we last called RateLimiter.pause final AtomicLong bytesSinceLastPause = new AtomicLong(); @Override - public void messageReceived(final RecoveryFileChunkRequest request, TransportChannel channel, Task task) throws Exception { + public void messageReceived(final FileChunkRequest request, TransportChannel channel, Task task) throws Exception { try (ReplicationRef recoveryRef = onGoingRecoveries.getSafe(request.recoveryId(), request.shardId())) { final RecoveryTarget recoveryTarget = recoveryRef.get(); - final ActionListener listener = createOrFinishListener(recoveryRef, channel, Actions.FILE_CHUNK, request); - if (listener == null) { - return; - } - - final ReplicationLuceneIndex indexState = recoveryTarget.state().getIndex(); - if (request.sourceThrottleTimeInNanos() != ReplicationLuceneIndex.UNKNOWN) { - indexState.addSourceThrottling(request.sourceThrottleTimeInNanos()); - } - - RateLimiter rateLimiter = recoverySettings.rateLimiter(); - if (rateLimiter != null) { - long bytes = bytesSinceLastPause.addAndGet(request.content().length()); - if (bytes > rateLimiter.getMinPauseCheckBytes()) { - // Time to pause - bytesSinceLastPause.addAndGet(-bytes); - long throttleTimeInNanos = rateLimiter.pause(bytes); - indexState.addTargetThrottling(throttleTimeInNanos); - recoveryTarget.indexShard().recoveryStats().addThrottleTime(throttleTimeInNanos); - } - } - recoveryTarget.writeFileChunk( - request.metadata(), - request.position(), - request.content(), - request.lastChunk(), - request.totalTranslogOps(), - listener - ); + final ActionListener listener = recoveryTarget.createOrFinishListener(channel, Actions.FILE_CHUNK, request); + recoveryTarget.handleFileChunk(request, recoveryTarget, bytesSinceLastPause, recoverySettings.rateLimiter(), listener); } } } - private ActionListener createOrFinishListener( - final ReplicationRef recoveryRef, - final TransportChannel channel, - final String action, - final RecoveryTransportRequest request - ) { - return createOrFinishListener(recoveryRef, channel, action, request, nullVal -> TransportResponse.Empty.INSTANCE); - } - - private ActionListener createOrFinishListener( - final ReplicationRef recoveryRef, - final TransportChannel channel, - final String action, - final RecoveryTransportRequest request, - final CheckedFunction responseFn - ) { - final RecoveryTarget recoveryTarget = recoveryRef.get(); - final ActionListener channelListener = new ChannelActionListener<>(channel, action, request); - final ActionListener voidListener = ActionListener.map(channelListener, responseFn); - - final long requestSeqNo = request.requestSeqNo(); - final ActionListener listener; - if (requestSeqNo != SequenceNumbers.UNASSIGNED_SEQ_NO) { - listener = recoveryTarget.markRequestReceivedAndCreateListener(requestSeqNo, voidListener); - } else { - listener = voidListener; - } - - return listener; - } - class RecoveryRunner extends AbstractRunnable { final long recoveryId; diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java index a3c7adb755145..57208ab029bf4 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java @@ -260,6 +260,7 @@ public Translog getTranslog() { return translog; } + @Override public ReplicationTimer getTimer() { return timer; } diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java index 92897ab19ad64..1735bb015c90c 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java @@ -37,6 +37,7 @@ import org.apache.lucene.index.IndexFormatTooOldException; import org.opensearch.Assertions; import org.opensearch.ExceptionsHelper; +import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.cluster.node.DiscoveryNode; @@ -141,7 +142,7 @@ public String description() { } @Override - public void notifyListener(Exception e, boolean sendShardFailure) { + public void notifyListener(OpenSearchException e, boolean sendShardFailure) { listener.onFailure(state(), new RecoveryFailedException(state(), e.getMessage(), e), sendShardFailure); } diff --git a/server/src/main/java/org/opensearch/indices/recovery/RemoteRecoveryTargetHandler.java b/server/src/main/java/org/opensearch/indices/recovery/RemoteRecoveryTargetHandler.java index fd6de6322bb0a..ab6466feb11f8 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RemoteRecoveryTargetHandler.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RemoteRecoveryTargetHandler.java @@ -35,38 +35,24 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.store.RateLimiter; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; -import org.opensearch.ExceptionsHelper; import org.opensearch.action.ActionListener; -import org.opensearch.action.ActionListenerResponseHandler; -import org.opensearch.action.support.RetryableAction; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.common.breaker.CircuitBreakingException; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.io.stream.Writeable; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.CancellableThreads; -import org.opensearch.common.util.concurrent.ConcurrentCollections; -import org.opensearch.common.util.concurrent.OpenSearchRejectedExecutionException; import org.opensearch.index.seqno.ReplicationTracker; import org.opensearch.index.seqno.RetentionLeases; import org.opensearch.index.shard.ShardId; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.index.translog.Translog; -import org.opensearch.threadpool.ThreadPool; -import org.opensearch.transport.ConnectTransportException; import org.opensearch.transport.EmptyTransportResponseHandler; -import org.opensearch.transport.RemoteTransportException; -import org.opensearch.transport.SendRequestTransportException; import org.opensearch.transport.TransportRequestOptions; import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportService; import java.io.IOException; import java.util.List; -import java.util.Map; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Consumer; @@ -80,12 +66,10 @@ public class RemoteRecoveryTargetHandler implements RecoveryTargetHandler { private static final Logger logger = LogManager.getLogger(RemoteRecoveryTargetHandler.class); private final TransportService transportService; - private final ThreadPool threadPool; private final long recoveryId; private final ShardId shardId; private final DiscoveryNode targetNode; private final RecoverySettings recoverySettings; - private final Map> onGoingRetryableActions = ConcurrentCollections.newConcurrentMap(); private final TransportRequestOptions translogOpsRequestOptions; private final TransportRequestOptions fileChunkRequestOptions; @@ -94,8 +78,7 @@ public class RemoteRecoveryTargetHandler implements RecoveryTargetHandler { private final AtomicLong requestSeqNoGenerator = new AtomicLong(0); private final Consumer onSourceThrottle; - private final boolean retriesSupported; - private volatile boolean isCancelled = false; + private final RetryableTransportClient retryableTransportClient; public RemoteRecoveryTargetHandler( long recoveryId, @@ -106,7 +89,15 @@ public RemoteRecoveryTargetHandler( Consumer onSourceThrottle ) { this.transportService = transportService; - this.threadPool = transportService.getThreadPool(); + // It is safe to pass the retry timeout value here because RemoteRecoveryTargetHandler + // created per recovery. Any change to RecoverySettings will be applied on the next + // recovery. + this.retryableTransportClient = new RetryableTransportClient( + transportService, + targetNode, + recoverySettings.internalActionRetryTimeout(), + logger + ); this.recoveryId = recoveryId; this.shardId = shardId; this.targetNode = targetNode; @@ -120,7 +111,6 @@ public RemoteRecoveryTargetHandler( .withType(TransportRequestOptions.Type.RECOVERY) .withTimeout(recoverySettings.internalActionTimeout()) .build(); - this.retriesSupported = targetNode.getVersion().onOrAfter(LegacyESVersion.V_7_9_0); } public DiscoveryNode targetNode() { @@ -137,12 +127,9 @@ public void prepareForTranslogOperations(int totalTranslogOps, ActionListener reader = in -> TransportResponse.Empty.INSTANCE; final ActionListener responseListener = ActionListener.map(listener, r -> null); - executeRetryableAction(action, request, options, responseListener, reader); + retryableTransportClient.executeRetryableAction(action, request, responseListener, reader); } @Override @@ -156,12 +143,9 @@ public void finalizeRecovery(final long globalCheckpoint, final long trimAboveSe globalCheckpoint, trimAboveSeqNo ); - final TransportRequestOptions options = TransportRequestOptions.builder() - .withTimeout(recoverySettings.internalActionLongTimeout()) - .build(); final Writeable.Reader reader = in -> TransportResponse.Empty.INSTANCE; final ActionListener responseListener = ActionListener.map(listener, r -> null); - executeRetryableAction(action, request, options, responseListener, reader); + retryableTransportClient.executeRetryableAction(action, request, responseListener, reader); } @Override @@ -200,7 +184,7 @@ public void indexTranslogOperations( ); final Writeable.Reader reader = RecoveryTranslogOperationsResponse::new; final ActionListener responseListener = ActionListener.map(listener, r -> r.localCheckpoint); - executeRetryableAction(action, request, translogOpsRequestOptions, responseListener, reader); + retryableTransportClient.executeRetryableAction(action, request, translogOpsRequestOptions, responseListener, reader); } @Override @@ -224,12 +208,9 @@ public void receiveFileInfo( phase1ExistingFileSizes, totalTranslogOps ); - final TransportRequestOptions options = TransportRequestOptions.builder() - .withTimeout(recoverySettings.internalActionTimeout()) - .build(); final Writeable.Reader reader = in -> TransportResponse.Empty.INSTANCE; final ActionListener responseListener = ActionListener.map(listener, r -> null); - executeRetryableAction(action, request, options, responseListener, reader); + retryableTransportClient.executeRetryableAction(action, request, responseListener, reader); } @Override @@ -249,12 +230,9 @@ public void cleanFiles( totalTranslogOps, globalCheckpoint ); - final TransportRequestOptions options = TransportRequestOptions.builder() - .withTimeout(recoverySettings.internalActionTimeout()) - .build(); final Writeable.Reader reader = in -> TransportResponse.Empty.INSTANCE; final ActionListener responseListener = ActionListener.map(listener, r -> null); - executeRetryableAction(action, request, options, responseListener, reader); + retryableTransportClient.executeRetryableAction(action, request, responseListener, reader); } @Override @@ -294,7 +272,7 @@ public void writeFileChunk( * see how many translog ops we accumulate while copying files across the network. A future optimization * would be in to restart file copy again (new deltas) if we have too many translog ops are piling up. */ - final RecoveryFileChunkRequest request = new RecoveryFileChunkRequest( + final FileChunkRequest request = new FileChunkRequest( recoveryId, requestSeqNo, shardId, @@ -306,71 +284,17 @@ public void writeFileChunk( throttleTimeInNanos ); final Writeable.Reader reader = in -> TransportResponse.Empty.INSTANCE; - executeRetryableAction(action, request, fileChunkRequestOptions, ActionListener.map(listener, r -> null), reader); + retryableTransportClient.executeRetryableAction( + action, + request, + fileChunkRequestOptions, + ActionListener.map(listener, r -> null), + reader + ); } @Override public void cancel() { - isCancelled = true; - if (onGoingRetryableActions.isEmpty()) { - return; - } - final RuntimeException exception = new CancellableThreads.ExecutionCancelledException("recovery was cancelled"); - // Dispatch to generic as cancellation calls can come on the cluster state applier thread - threadPool.generic().execute(() -> { - for (RetryableAction action : onGoingRetryableActions.values()) { - action.cancel(exception); - } - onGoingRetryableActions.clear(); - }); - } - - private void executeRetryableAction( - String action, - RecoveryTransportRequest request, - TransportRequestOptions options, - ActionListener actionListener, - Writeable.Reader reader - ) { - final Object key = new Object(); - final ActionListener removeListener = ActionListener.runBefore(actionListener, () -> onGoingRetryableActions.remove(key)); - final TimeValue initialDelay = TimeValue.timeValueMillis(200); - final TimeValue timeout = recoverySettings.internalActionRetryTimeout(); - final RetryableAction retryableAction = new RetryableAction(logger, threadPool, initialDelay, timeout, removeListener) { - - @Override - public void tryAction(ActionListener listener) { - transportService.sendRequest( - targetNode, - action, - request, - options, - new ActionListenerResponseHandler<>(listener, reader, ThreadPool.Names.GENERIC) - ); - } - - @Override - public boolean shouldRetry(Exception e) { - return retriesSupported && retryableException(e); - } - }; - onGoingRetryableActions.put(key, retryableAction); - retryableAction.run(); - if (isCancelled) { - retryableAction.cancel(new CancellableThreads.ExecutionCancelledException("recovery was cancelled")); - } - } - - private static boolean retryableException(Exception e) { - if (e instanceof ConnectTransportException) { - return true; - } else if (e instanceof SendRequestTransportException) { - final Throwable cause = ExceptionsHelper.unwrapCause(e); - return cause instanceof ConnectTransportException; - } else if (e instanceof RemoteTransportException) { - final Throwable cause = ExceptionsHelper.unwrapCause(e); - return cause instanceof CircuitBreakingException || cause instanceof OpenSearchRejectedExecutionException; - } - return false; + retryableTransportClient.cancel(); } } diff --git a/server/src/main/java/org/opensearch/indices/recovery/RetryableTransportClient.java b/server/src/main/java/org/opensearch/indices/recovery/RetryableTransportClient.java new file mode 100644 index 0000000000000..bc10cc80b7fdc --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/recovery/RetryableTransportClient.java @@ -0,0 +1,139 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.recovery; + +import org.apache.logging.log4j.Logger; +import org.opensearch.ExceptionsHelper; +import org.opensearch.LegacyESVersion; +import org.opensearch.action.ActionListener; +import org.opensearch.action.ActionListenerResponseHandler; +import org.opensearch.action.support.RetryableAction; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.breaker.CircuitBreakingException; +import org.opensearch.common.io.stream.Writeable; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.CancellableThreads; +import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.common.util.concurrent.OpenSearchRejectedExecutionException; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.ConnectTransportException; +import org.opensearch.transport.RemoteTransportException; +import org.opensearch.transport.SendRequestTransportException; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportRequestOptions; +import org.opensearch.transport.TransportResponse; +import org.opensearch.transport.TransportService; + +import java.util.Map; + +/** + * Client that implements retry functionality for transport layer requests. + * + * @opensearch.internal + */ +public final class RetryableTransportClient { + + private final ThreadPool threadPool; + private final Map> onGoingRetryableActions = ConcurrentCollections.newConcurrentMap(); + private volatile boolean isCancelled = false; + private final TransportService transportService; + private final TimeValue retryTimeout; + private final DiscoveryNode targetNode; + + private final Logger logger; + + public RetryableTransportClient(TransportService transportService, DiscoveryNode targetNode, TimeValue retryTimeout, Logger logger) { + this.threadPool = transportService.getThreadPool(); + this.transportService = transportService; + this.retryTimeout = retryTimeout; + this.targetNode = targetNode; + this.logger = logger; + } + + /** + * Execute a retryable action. + * @param action {@link String} Action Name. + * @param request {@link TransportRequest} Transport request to execute. + * @param actionListener {@link ActionListener} Listener to complete + * @param reader {@link Writeable.Reader} Reader to read the response stream. + * @param {@link TransportResponse} type. + */ + public void executeRetryableAction( + String action, + TransportRequest request, + ActionListener actionListener, + Writeable.Reader reader + ) { + final TransportRequestOptions options = TransportRequestOptions.builder().withTimeout(retryTimeout).build(); + executeRetryableAction(action, request, options, actionListener, reader); + } + + void executeRetryableAction( + String action, + TransportRequest request, + TransportRequestOptions options, + ActionListener actionListener, + Writeable.Reader reader + ) { + final Object key = new Object(); + final ActionListener removeListener = ActionListener.runBefore(actionListener, () -> onGoingRetryableActions.remove(key)); + final TimeValue initialDelay = TimeValue.timeValueMillis(200); + final RetryableAction retryableAction = new RetryableAction(logger, threadPool, initialDelay, retryTimeout, removeListener) { + + @Override + public void tryAction(ActionListener listener) { + transportService.sendRequest( + targetNode, + action, + request, + options, + new ActionListenerResponseHandler<>(listener, reader, ThreadPool.Names.GENERIC) + ); + } + + @Override + public boolean shouldRetry(Exception e) { + return targetNode.getVersion().onOrAfter(LegacyESVersion.V_7_9_0) && retryableException(e); + } + }; + onGoingRetryableActions.put(key, retryableAction); + retryableAction.run(); + if (isCancelled) { + retryableAction.cancel(new CancellableThreads.ExecutionCancelledException("retryable action was cancelled")); + } + } + + public void cancel() { + isCancelled = true; + if (onGoingRetryableActions.isEmpty()) { + return; + } + final RuntimeException exception = new CancellableThreads.ExecutionCancelledException("retryable action was cancelled"); + // Dispatch to generic as cancellation calls can come on the cluster state applier thread + threadPool.generic().execute(() -> { + for (RetryableAction action : onGoingRetryableActions.values()) { + action.cancel(exception); + } + onGoingRetryableActions.clear(); + }); + } + + private static boolean retryableException(Exception e) { + if (e instanceof ConnectTransportException) { + return true; + } else if (e instanceof SendRequestTransportException) { + final Throwable cause = ExceptionsHelper.unwrapCause(e); + return cause instanceof ConnectTransportException; + } else if (e instanceof RemoteTransportException) { + final Throwable cause = ExceptionsHelper.unwrapCause(e); + return cause instanceof CircuitBreakingException || cause instanceof OpenSearchRejectedExecutionException; + } + return false; + } +} diff --git a/server/src/main/java/org/opensearch/indices/replication/CheckpointInfoResponse.java b/server/src/main/java/org/opensearch/indices/replication/CheckpointInfoResponse.java new file mode 100644 index 0000000000000..a73a3b54184da --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/CheckpointInfoResponse.java @@ -0,0 +1,79 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.index.store.Store; +import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.transport.TransportResponse; + +import java.io.IOException; +import java.util.Set; + +/** + * Response returned from a {@link SegmentReplicationSource} that includes the file metadata, and SegmentInfos + * associated with a particular {@link ReplicationCheckpoint}. The {@link SegmentReplicationSource} may determine that + * the requested {@link ReplicationCheckpoint} is behind and return a different {@link ReplicationCheckpoint} in this response. + * + * @opensearch.internal + */ +public class CheckpointInfoResponse extends TransportResponse { + + private final ReplicationCheckpoint checkpoint; + private final Store.MetadataSnapshot snapshot; + private final byte[] infosBytes; + // pendingDeleteFiles are segments that have been merged away in the latest in memory SegmentInfos + // but are still referenced by the latest commit point (Segments_N). + private final Set pendingDeleteFiles; + + public CheckpointInfoResponse( + final ReplicationCheckpoint checkpoint, + final Store.MetadataSnapshot snapshot, + final byte[] infosBytes, + final Set additionalFiles + ) { + this.checkpoint = checkpoint; + this.snapshot = snapshot; + this.infosBytes = infosBytes; + this.pendingDeleteFiles = additionalFiles; + } + + public CheckpointInfoResponse(StreamInput in) throws IOException { + this.checkpoint = new ReplicationCheckpoint(in); + this.snapshot = new Store.MetadataSnapshot(in); + this.infosBytes = in.readByteArray(); + this.pendingDeleteFiles = in.readSet(StoreFileMetadata::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + checkpoint.writeTo(out); + snapshot.writeTo(out); + out.writeByteArray(infosBytes); + out.writeCollection(pendingDeleteFiles); + } + + public ReplicationCheckpoint getCheckpoint() { + return checkpoint; + } + + public Store.MetadataSnapshot getSnapshot() { + return snapshot; + } + + public byte[] getInfosBytes() { + return infosBytes; + } + + public Set getPendingDeleteFiles() { + return pendingDeleteFiles; + } +} diff --git a/server/src/main/java/org/opensearch/indices/replication/GetSegmentFilesResponse.java b/server/src/main/java/org/opensearch/indices/replication/GetSegmentFilesResponse.java new file mode 100644 index 0000000000000..6dc7e293b2c31 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/GetSegmentFilesResponse.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.transport.TransportResponse; + +import java.io.IOException; +import java.util.List; + +/** + * Response from a {@link SegmentReplicationSource} indicating that a replication event has completed. + * + * @opensearch.internal + */ +public class GetSegmentFilesResponse extends TransportResponse { + + List files; + + public GetSegmentFilesResponse(List files) { + this.files = files; + } + + public GetSegmentFilesResponse(StreamInput out) throws IOException { + out.readList(StoreFileMetadata::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeCollection(files); + } +} diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java new file mode 100644 index 0000000000000..8628a266ea7d0 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSource.java @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import org.opensearch.action.ActionListener; +import org.opensearch.index.store.Store; +import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; + +import java.util.List; + +/** + * Represents the source of a replication event. + * + * @opensearch.internal + */ +public interface SegmentReplicationSource { + + /** + * Get Metadata for a ReplicationCheckpoint. + * + * @param replicationId {@link long} - ID of the replication event. + * @param checkpoint {@link ReplicationCheckpoint} Checkpoint to fetch metadata for. + * @param listener {@link ActionListener} listener that completes with a {@link CheckpointInfoResponse}. + */ + void getCheckpointMetadata(long replicationId, ReplicationCheckpoint checkpoint, ActionListener listener); + + /** + * Fetch the requested segment files. Passes a listener that completes when files are stored locally. + * + * @param replicationId {@link long} - ID of the replication event. + * @param checkpoint {@link ReplicationCheckpoint} Checkpoint to fetch metadata for. + * @param filesToFetch {@link List} List of files to fetch. + * @param store {@link Store} Reference to the local store. + * @param listener {@link ActionListener} Listener that completes with the list of files copied. + */ + void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List filesToFetch, + Store store, + ActionListener listener + ); +} diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java new file mode 100644 index 0000000000000..3ca31503f176d --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java @@ -0,0 +1,41 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.transport.TransportService; + +/** + * Factory to build {@link SegmentReplicationSource} used by {@link SegmentReplicationTargetService}. + * + * @opensearch.internal + */ +public class SegmentReplicationSourceFactory { + + private TransportService transportService; + private RecoverySettings recoverySettings; + private ClusterService clusterService; + + public SegmentReplicationSourceFactory( + TransportService transportService, + RecoverySettings recoverySettings, + ClusterService clusterService + ) { + this.transportService = transportService; + this.recoverySettings = recoverySettings; + this.clusterService = clusterService; + } + + public SegmentReplicationSource get(IndexShard shard) { + // TODO: Default to an implementation that uses the primary shard. + return null; + } +} diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationState.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationState.java new file mode 100644 index 0000000000000..b01016d2a1e62 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationState.java @@ -0,0 +1,84 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import org.opensearch.indices.replication.common.ReplicationLuceneIndex; +import org.opensearch.indices.replication.common.ReplicationState; +import org.opensearch.indices.replication.common.ReplicationTimer; + +/** + * ReplicationState implementation to track Segment Replication events. + * + * @opensearch.internal + */ +public class SegmentReplicationState implements ReplicationState { + + /** + * The stage of the recovery state + * + * @opensearch.internal + */ + public enum Stage { + DONE((byte) 0), + + INIT((byte) 1); + + private static final Stage[] STAGES = new Stage[Stage.values().length]; + + static { + for (Stage stage : Stage.values()) { + assert stage.id() < STAGES.length && stage.id() >= 0; + STAGES[stage.id] = stage; + } + } + + private final byte id; + + Stage(byte id) { + this.id = id; + } + + public byte id() { + return id; + } + + public static Stage fromId(byte id) { + if (id < 0 || id >= STAGES.length) { + throw new IllegalArgumentException("No mapping for id [" + id + "]"); + } + return STAGES[id]; + } + } + + public SegmentReplicationState() { + this.stage = Stage.INIT; + } + + private Stage stage; + + @Override + public ReplicationLuceneIndex getIndex() { + // TODO + return null; + } + + @Override + public ReplicationTimer getTimer() { + // TODO + return null; + } + + public Stage getStage() { + return stage; + } + + public void setStage(Stage stage) { + this.stage = stage; + } +} diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java new file mode 100644 index 0000000000000..7933ea5f0344b --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java @@ -0,0 +1,115 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import org.opensearch.OpenSearchException; +import org.opensearch.action.ActionListener; +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.util.CancellableThreads; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.indices.replication.common.ReplicationLuceneIndex; +import org.opensearch.indices.replication.common.ReplicationState; +import org.opensearch.indices.replication.common.ReplicationTarget; + +import java.io.IOException; + +/** + * Represents the target of a replication event. + * + * @opensearch.internal + */ +public class SegmentReplicationTarget extends ReplicationTarget { + + private final ReplicationCheckpoint checkpoint; + private final SegmentReplicationSource source; + private final SegmentReplicationState state; + + public SegmentReplicationTarget( + ReplicationCheckpoint checkpoint, + IndexShard indexShard, + SegmentReplicationSource source, + SegmentReplicationTargetService.SegmentReplicationListener listener + ) { + super("replication_target", indexShard, new ReplicationLuceneIndex(), listener); + this.checkpoint = checkpoint; + this.source = source; + this.state = new SegmentReplicationState(); + } + + @Override + protected void closeInternal() { + // TODO + } + + @Override + protected String getPrefix() { + // TODO + return null; + } + + @Override + protected void onDone() { + this.state.setStage(SegmentReplicationState.Stage.DONE); + } + + @Override + protected void onCancel(String reason) { + // TODO + } + + @Override + public ReplicationState state() { + return state; + } + + @Override + public ReplicationTarget retryCopy() { + // TODO + return null; + } + + @Override + public String description() { + // TODO + return null; + } + + @Override + public void notifyListener(OpenSearchException e, boolean sendShardFailure) { + listener.onFailure(state(), e, sendShardFailure); + } + + @Override + public boolean reset(CancellableThreads newTargetCancellableThreads) throws IOException { + // TODO + return false; + } + + @Override + public void writeFileChunk( + StoreFileMetadata metadata, + long position, + BytesReference content, + boolean lastChunk, + int totalTranslogOps, + ActionListener listener + ) { + // TODO + } + + /** + * Start the Replication event. + * @param listener {@link ActionListener} listener. + */ + public void startReplication(ActionListener listener) { + // TODO + } +} diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java new file mode 100644 index 0000000000000..1c6053a72a4c5 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java @@ -0,0 +1,170 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.OpenSearchException; +import org.opensearch.action.ActionListener; +import org.opensearch.common.Nullable; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.shard.IndexEventListener; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.ShardId; +import org.opensearch.indices.recovery.FileChunkRequest; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.indices.replication.common.ReplicationCollection; +import org.opensearch.indices.replication.common.ReplicationCollection.ReplicationRef; +import org.opensearch.indices.replication.common.ReplicationListener; +import org.opensearch.indices.replication.common.ReplicationState; +import org.opensearch.tasks.Task; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportChannel; +import org.opensearch.transport.TransportRequestHandler; +import org.opensearch.transport.TransportService; + +import java.util.concurrent.atomic.AtomicLong; + +/** + * Service class that orchestrates replication events on replicas. + * + * @opensearch.internal + */ +public final class SegmentReplicationTargetService implements IndexEventListener { + + private static final Logger logger = LogManager.getLogger(SegmentReplicationTargetService.class); + + private final ThreadPool threadPool; + private final RecoverySettings recoverySettings; + + private final ReplicationCollection onGoingReplications; + + private final SegmentReplicationSourceFactory sourceFactory; + + /** + * The internal actions + * + * @opensearch.internal + */ + public static class Actions { + public static final String FILE_CHUNK = "internal:index/shard/replication/file_chunk"; + } + + public SegmentReplicationTargetService( + final ThreadPool threadPool, + final RecoverySettings recoverySettings, + final TransportService transportService, + final SegmentReplicationSourceFactory sourceFactory + ) { + this.threadPool = threadPool; + this.recoverySettings = recoverySettings; + this.onGoingReplications = new ReplicationCollection<>(logger, threadPool); + this.sourceFactory = sourceFactory; + + transportService.registerRequestHandler( + Actions.FILE_CHUNK, + ThreadPool.Names.GENERIC, + FileChunkRequest::new, + new FileChunkTransportRequestHandler() + ); + } + + @Override + public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard, Settings indexSettings) { + if (indexShard != null) { + onGoingReplications.cancelForShard(shardId, "shard closed"); + } + } + + public void startReplication( + final ReplicationCheckpoint checkpoint, + final IndexShard indexShard, + final SegmentReplicationListener listener + ) { + startReplication(new SegmentReplicationTarget(checkpoint, indexShard, sourceFactory.get(indexShard), listener)); + } + + public void startReplication(final SegmentReplicationTarget target) { + final long replicationId = onGoingReplications.start(target, recoverySettings.activityTimeout()); + logger.trace(() -> new ParameterizedMessage("Starting replication {}", replicationId)); + threadPool.generic().execute(new ReplicationRunner(replicationId)); + } + + /** + * Listener that runs on changes in Replication state + * + * @opensearch.internal + */ + public interface SegmentReplicationListener extends ReplicationListener { + + @Override + default void onDone(ReplicationState state) { + onReplicationDone((SegmentReplicationState) state); + } + + @Override + default void onFailure(ReplicationState state, OpenSearchException e, boolean sendShardFailure) { + onReplicationFailure((SegmentReplicationState) state, e, sendShardFailure); + } + + void onReplicationDone(SegmentReplicationState state); + + void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure); + } + + /** + * Runnable implementation to trigger a replication event. + */ + private class ReplicationRunner implements Runnable { + + final long replicationId; + + public ReplicationRunner(long replicationId) { + this.replicationId = replicationId; + } + + @Override + public void run() { + start(replicationId); + } + } + + private void start(final long replicationId) { + try (ReplicationRef replicationRef = onGoingReplications.get(replicationId)) { + replicationRef.get().startReplication(new ActionListener<>() { + @Override + public void onResponse(Void o) { + onGoingReplications.markAsDone(replicationId); + } + + @Override + public void onFailure(Exception e) { + onGoingReplications.fail(replicationId, new OpenSearchException("Segment Replication failed", e), true); + } + }); + } + } + + private class FileChunkTransportRequestHandler implements TransportRequestHandler { + + // How many bytes we've copied since we last called RateLimiter.pause + final AtomicLong bytesSinceLastPause = new AtomicLong(); + + @Override + public void messageReceived(final FileChunkRequest request, TransportChannel channel, Task task) throws Exception { + try (ReplicationRef ref = onGoingReplications.getSafe(request.recoveryId(), request.shardId())) { + final SegmentReplicationTarget target = ref.get(); + final ActionListener listener = target.createOrFinishListener(channel, Actions.FILE_CHUNK, request); + target.handleFileChunk(request, target, bytesSinceLastPause, recoverySettings.rateLimiter(), listener); + } + } + } +} diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationCollection.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationCollection.java index 609825eb5227b..b8295f0685a7f 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationCollection.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationCollection.java @@ -133,7 +133,7 @@ public T reset(final long id, final TimeValue activityTimeout) { } catch (Exception e) { // fail shard to be safe assert oldTarget != null; - oldTarget.notifyListener(e, true); + oldTarget.notifyListener(new OpenSearchException("Unable to reset target", e), true); return null; } } diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationState.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationState.java index 7942fa8938dd0..029fcb6a3b690 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationState.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationState.java @@ -14,5 +14,7 @@ * @opensearch.internal */ public interface ReplicationState { + ReplicationLuceneIndex getIndex(); + ReplicationTimer getTimer(); } diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java index 0192270907fd2..f8dc07f122c02 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java @@ -9,14 +9,25 @@ package org.opensearch.indices.replication.common; import org.apache.logging.log4j.Logger; +import org.apache.lucene.store.RateLimiter; import org.opensearch.ExceptionsHelper; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; +import org.opensearch.action.support.ChannelActionListener; +import org.opensearch.common.CheckedFunction; +import org.opensearch.common.Nullable; +import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.logging.Loggers; import org.opensearch.common.util.CancellableThreads; import org.opensearch.common.util.concurrent.AbstractRefCounted; +import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ShardId; +import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.indices.recovery.FileChunkRequest; +import org.opensearch.indices.recovery.RecoveryTransportRequest; +import org.opensearch.transport.TransportChannel; +import org.opensearch.transport.TransportResponse; import java.io.IOException; import java.util.concurrent.atomic.AtomicBoolean; @@ -64,7 +75,7 @@ public CancellableThreads cancellableThreads() { return cancellableThreads; } - public abstract void notifyListener(Exception e, boolean sendShardFailure); + public abstract void notifyListener(OpenSearchException e, boolean sendShardFailure); public ReplicationTarget(String name, IndexShard indexShard, ReplicationLuceneIndex stateIndex, ReplicationListener listener) { super(name); @@ -98,6 +109,7 @@ public void setLastAccessTime() { lastAccessTime = System.nanoTime(); } + @Nullable public ActionListener markRequestReceivedAndCreateListener(long requestSeqNo, ActionListener listener) { return requestTracker.markReceivedAndCreateListener(requestSeqNo, listener); } @@ -172,4 +184,86 @@ protected void ensureRefCount() { } } + @Nullable + public ActionListener createOrFinishListener( + final TransportChannel channel, + final String action, + final RecoveryTransportRequest request + ) { + return createOrFinishListener(channel, action, request, nullVal -> TransportResponse.Empty.INSTANCE); + } + + @Nullable + public ActionListener createOrFinishListener( + final TransportChannel channel, + final String action, + final RecoveryTransportRequest request, + final CheckedFunction responseFn + ) { + final ActionListener channelListener = new ChannelActionListener<>(channel, action, request); + final ActionListener voidListener = ActionListener.map(channelListener, responseFn); + + final long requestSeqNo = request.requestSeqNo(); + final ActionListener listener; + if (requestSeqNo != SequenceNumbers.UNASSIGNED_SEQ_NO) { + listener = markRequestReceivedAndCreateListener(requestSeqNo, voidListener); + } else { + listener = voidListener; + } + + return listener; + } + + /** + * Handle a FileChunkRequest for a {@link ReplicationTarget}. + * + * @param request {@link FileChunkRequest} Request containing the file chunk. + * @param bytesSinceLastPause {@link AtomicLong} Bytes since the last pause. + * @param rateLimiter {@link RateLimiter} Rate limiter. + * @param listener {@link ActionListener} listener that completes when the chunk has been written. + * @throws IOException When there is an issue pausing the rate limiter. + */ + public void handleFileChunk( + final FileChunkRequest request, + final ReplicationTarget replicationTarget, + final AtomicLong bytesSinceLastPause, + final RateLimiter rateLimiter, + final ActionListener listener + ) throws IOException { + + if (listener == null) { + return; + } + final ReplicationLuceneIndex indexState = replicationTarget.state().getIndex(); + if (request.sourceThrottleTimeInNanos() != ReplicationLuceneIndex.UNKNOWN) { + indexState.addSourceThrottling(request.sourceThrottleTimeInNanos()); + } + if (rateLimiter != null) { + long bytes = bytesSinceLastPause.addAndGet(request.content().length()); + if (bytes > rateLimiter.getMinPauseCheckBytes()) { + // Time to pause + bytesSinceLastPause.addAndGet(-bytes); + long throttleTimeInNanos = rateLimiter.pause(bytes); + indexState.addTargetThrottling(throttleTimeInNanos); + replicationTarget.indexShard().recoveryStats().addThrottleTime(throttleTimeInNanos); + } + } + writeFileChunk( + request.metadata(), + request.position(), + request.content(), + request.lastChunk(), + request.totalTranslogOps(), + listener + ); + } + + public abstract void writeFileChunk( + StoreFileMetadata metadata, + long position, + BytesReference content, + boolean lastChunk, + int totalTranslogOps, + ActionListener listener + ); } diff --git a/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java index e54f06937cad3..bda2a910d922e 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java @@ -105,7 +105,7 @@ public void testWriteFileChunksConcurrently() throws Exception { receiveFileInfoFuture ); receiveFileInfoFuture.actionGet(); - List requests = new ArrayList<>(); + List requests = new ArrayList<>(); long seqNo = 0; for (StoreFileMetadata md : mdFiles) { try (IndexInput in = sourceShard.store().directory().openInput(md.name(), IOContext.READONCE)) { @@ -115,7 +115,7 @@ public void testWriteFileChunksConcurrently() throws Exception { byte[] buffer = new byte[length]; in.readBytes(buffer, 0, length); requests.add( - new RecoveryFileChunkRequest( + new FileChunkRequest( 0, seqNo++, sourceShard.shardId(), @@ -132,7 +132,7 @@ public void testWriteFileChunksConcurrently() throws Exception { } } Randomness.shuffle(requests); - BlockingQueue queue = new ArrayBlockingQueue<>(requests.size()); + BlockingQueue queue = new ArrayBlockingQueue<>(requests.size()); queue.addAll(requests); Thread[] senders = new Thread[between(1, 4)]; CyclicBarrier barrier = new CyclicBarrier(senders.length); @@ -140,7 +140,7 @@ public void testWriteFileChunksConcurrently() throws Exception { senders[i] = new Thread(() -> { try { barrier.await(); - RecoveryFileChunkRequest r; + FileChunkRequest r; while ((r = queue.poll()) != null) { recoveryTarget.writeFileChunk( r.metadata(), diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java new file mode 100644 index 0000000000000..aa17dec5767da --- /dev/null +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -0,0 +1,127 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import org.junit.Assert; +import org.mockito.Mockito; +import org.opensearch.OpenSearchException; +import org.opensearch.action.ActionListener; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.IndexShardTestCase; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.transport.TransportService; + +import java.io.IOException; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.*; + +public class SegmentReplicationTargetServiceTests extends IndexShardTestCase { + + private IndexShard indexShard; + private ReplicationCheckpoint checkpoint; + private SegmentReplicationSource replicationSource; + private SegmentReplicationTargetService sut; + + @Override + public void setUp() throws Exception { + super.setUp(); + final Settings settings = Settings.builder().put("node.name", SegmentReplicationTargetServiceTests.class.getSimpleName()).build(); + final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); + final TransportService transportService = mock(TransportService.class); + indexShard = newShard(false, settings); + checkpoint = new ReplicationCheckpoint(indexShard.shardId(), 0L, 0L, 0L, 0L); + SegmentReplicationSourceFactory replicationSourceFactory = mock(SegmentReplicationSourceFactory.class); + replicationSource = mock(SegmentReplicationSource.class); + when(replicationSourceFactory.get(indexShard)).thenReturn(replicationSource); + + sut = new SegmentReplicationTargetService(threadPool, recoverySettings, transportService, replicationSourceFactory); + } + + @Override + public void tearDown() throws Exception { + closeShards(indexShard); + super.tearDown(); + } + + public void testTargetReturnsSuccess_listenerCompletes() throws IOException { + final SegmentReplicationTarget target = new SegmentReplicationTarget( + checkpoint, + indexShard, + replicationSource, + new SegmentReplicationTargetService.SegmentReplicationListener() { + @Override + public void onReplicationDone(SegmentReplicationState state) { + assertEquals(SegmentReplicationState.Stage.DONE, state.getStage()); + } + + @Override + public void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure) { + Assert.fail(); + } + } + ); + final SegmentReplicationTarget spy = Mockito.spy(target); + doAnswer(invocation -> { + final ActionListener listener = invocation.getArgument(0); + listener.onResponse(null); + return null; + }).when(spy).startReplication(any()); + sut.startReplication(spy); + closeShards(indexShard); + } + + public void testTargetThrowsException() throws IOException { + final OpenSearchException expectedError = new OpenSearchException("Fail"); + final SegmentReplicationTarget target = new SegmentReplicationTarget( + checkpoint, + indexShard, + replicationSource, + new SegmentReplicationTargetService.SegmentReplicationListener() { + @Override + public void onReplicationDone(SegmentReplicationState state) { + Assert.fail(); + } + + @Override + public void onReplicationFailure(SegmentReplicationState state, OpenSearchException e, boolean sendShardFailure) { + assertEquals(SegmentReplicationState.Stage.INIT, state.getStage()); + assertEquals(expectedError, e.getCause()); + assertTrue(sendShardFailure); + } + } + ); + final SegmentReplicationTarget spy = Mockito.spy(target); + doAnswer(invocation -> { + final ActionListener listener = invocation.getArgument(0); + listener.onFailure(expectedError); + return null; + }).when(spy).startReplication(any()); + sut.startReplication(spy); + closeShards(indexShard); + } + + public void testBeforeIndexShardClosed_CancelsOngoingReplications() throws IOException { + final SegmentReplicationTarget target = new SegmentReplicationTarget( + checkpoint, + indexShard, + replicationSource, + mock(SegmentReplicationTargetService.SegmentReplicationListener.class) + ); + final SegmentReplicationTarget spy = Mockito.spy(target); + sut.startReplication(spy); + sut.beforeIndexShardClosed(indexShard.shardId(), indexShard, Settings.EMPTY); + Mockito.verify(spy, times(1)).cancel(any()); + closeShards(indexShard); + } +} From 1e99e0386ebfe4ae5e2277ec80dc60b3fc4d310d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 30 May 2022 11:03:41 -0500 Subject: [PATCH 36/75] Bump azure-core-http-netty from 1.11.9 to 1.12.0 in /plugins/repository-azure (#3474) Bumps [azure-core-http-netty](https://github.com/Azure/azure-sdk-for-java) from 1.11.9 to 1.12.0. - [Release notes](https://github.com/Azure/azure-sdk-for-java/releases) - [Commits](https://github.com/Azure/azure-sdk-for-java/compare/azure-core-http-netty_1.11.9...azure-core_1.12.0) --- updated-dependencies: - dependency-name: com.azure:azure-core-http-netty dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- plugins/repository-azure/build.gradle | 2 +- .../licenses/azure-core-http-netty-1.11.9.jar.sha1 | 1 - .../licenses/azure-core-http-netty-1.12.0.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-azure/licenses/azure-core-http-netty-1.11.9.jar.sha1 create mode 100644 plugins/repository-azure/licenses/azure-core-http-netty-1.12.0.jar.sha1 diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index eb5fc1650a1b4..dd2ad78ebed04 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -46,7 +46,7 @@ opensearchplugin { dependencies { api 'com.azure:azure-core:1.27.0' api 'com.azure:azure-storage-common:12.15.0' - api 'com.azure:azure-core-http-netty:1.11.9' + api 'com.azure:azure-core-http-netty:1.12.0' api "io.netty:netty-codec-dns:${versions.netty}" api "io.netty:netty-codec-socks:${versions.netty}" api "io.netty:netty-codec-http2:${versions.netty}" diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.11.9.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.11.9.jar.sha1 deleted file mode 100644 index 936a02dfba4d7..0000000000000 --- a/plugins/repository-azure/licenses/azure-core-http-netty-1.11.9.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1d1f34b3e60db038f3913007a2706a820383dc26 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-http-netty-1.12.0.jar.sha1 b/plugins/repository-azure/licenses/azure-core-http-netty-1.12.0.jar.sha1 new file mode 100644 index 0000000000000..1b5d162c004de --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-http-netty-1.12.0.jar.sha1 @@ -0,0 +1 @@ +e4381e4e2801ee190ae76b61dbd992e94b40272e \ No newline at end of file From 5cefd89651cd982800e38b76add9945f55bcb50c Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Mon, 30 May 2022 14:25:49 -0400 Subject: [PATCH 37/75] Update to Apache Lucene 9.2 (#3477) Signed-off-by: Andriy Redko --- buildSrc/version.properties | 2 +- .../licenses/lucene-expressions-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - .../lang-expression/licenses/lucene-expressions-9.2.0.jar.sha1 | 1 + .../lucene-analysis-icu-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - .../analysis-icu/licenses/lucene-analysis-icu-9.2.0.jar.sha1 | 1 + .../lucene-analysis-kuromoji-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - .../licenses/lucene-analysis-kuromoji-9.2.0.jar.sha1 | 1 + .../lucene-analysis-nori-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - .../analysis-nori/licenses/lucene-analysis-nori-9.2.0.jar.sha1 | 1 + .../lucene-analysis-phonetic-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - .../licenses/lucene-analysis-phonetic-9.2.0.jar.sha1 | 1 + .../lucene-analysis-smartcn-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - .../licenses/lucene-analysis-smartcn-9.2.0.jar.sha1 | 1 + .../lucene-analysis-stempel-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - .../licenses/lucene-analysis-stempel-9.2.0.jar.sha1 | 1 + .../lucene-analysis-morfologik-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - .../licenses/lucene-analysis-morfologik-9.2.0.jar.sha1 | 1 + .../lucene-analysis-common-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - server/licenses/lucene-analysis-common-9.2.0.jar.sha1 | 1 + .../lucene-backward-codecs-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - server/licenses/lucene-backward-codecs-9.2.0.jar.sha1 | 1 + server/licenses/lucene-core-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - server/licenses/lucene-core-9.2.0.jar.sha1 | 1 + server/licenses/lucene-grouping-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - server/licenses/lucene-grouping-9.2.0.jar.sha1 | 1 + .../licenses/lucene-highlighter-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - server/licenses/lucene-highlighter-9.2.0.jar.sha1 | 1 + server/licenses/lucene-join-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - server/licenses/lucene-join-9.2.0.jar.sha1 | 1 + server/licenses/lucene-memory-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - server/licenses/lucene-memory-9.2.0.jar.sha1 | 1 + server/licenses/lucene-misc-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - server/licenses/lucene-misc-9.2.0.jar.sha1 | 1 + server/licenses/lucene-queries-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - server/licenses/lucene-queries-9.2.0.jar.sha1 | 1 + .../licenses/lucene-queryparser-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - server/licenses/lucene-queryparser-9.2.0.jar.sha1 | 1 + server/licenses/lucene-sandbox-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - server/licenses/lucene-sandbox-9.2.0.jar.sha1 | 1 + .../lucene-spatial-extras-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - server/licenses/lucene-spatial-extras-9.2.0.jar.sha1 | 1 + .../licenses/lucene-spatial3d-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - server/licenses/lucene-spatial3d-9.2.0.jar.sha1 | 1 + server/licenses/lucene-suggest-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 - server/licenses/lucene-suggest-9.2.0.jar.sha1 | 1 + 45 files changed, 23 insertions(+), 23 deletions(-) delete mode 100644 modules/lang-expression/licenses/lucene-expressions-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 modules/lang-expression/licenses/lucene-expressions-9.2.0.jar.sha1 delete mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.2.0.jar.sha1 delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.2.0.jar.sha1 delete mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.2.0.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.2.0.jar.sha1 delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.2.0.jar.sha1 delete mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.2.0.jar.sha1 delete mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-analysis-common-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 server/licenses/lucene-analysis-common-9.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-backward-codecs-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 server/licenses/lucene-backward-codecs-9.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-core-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 server/licenses/lucene-core-9.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-grouping-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 server/licenses/lucene-grouping-9.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-highlighter-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 server/licenses/lucene-highlighter-9.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-join-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 server/licenses/lucene-join-9.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-memory-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 server/licenses/lucene-memory-9.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-misc-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 server/licenses/lucene-misc-9.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-queries-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 server/licenses/lucene-queries-9.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-queryparser-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 server/licenses/lucene-queryparser-9.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-sandbox-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 server/licenses/lucene-sandbox-9.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-extras-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 server/licenses/lucene-spatial-extras-9.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-spatial3d-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 server/licenses/lucene-spatial3d-9.2.0.jar.sha1 delete mode 100644 server/licenses/lucene-suggest-9.2.0-snapshot-ba8c3a8.jar.sha1 create mode 100644 server/licenses/lucene-suggest-9.2.0.jar.sha1 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 7a8a9531ebda8..625c540737065 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ opensearch = 3.0.0 -lucene = 9.2.0-snapshot-ba8c3a8 +lucene = 9.2.0 bundled_jdk_vendor = adoptium bundled_jdk = 17.0.3+7 diff --git a/modules/lang-expression/licenses/lucene-expressions-9.2.0-snapshot-ba8c3a8.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index 17c82a8e9df7d..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fe5e4cf94d26bbe1d982808f34fa132bba5565e3 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.2.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..50ceb1672cc45 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-9.2.0.jar.sha1 @@ -0,0 +1 @@ +12e8ba1ca93695819d0251a16584880deac58ae0 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.2.0-snapshot-ba8c3a8.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index 1c9c809722104..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9fc73c790c037e817635fcc30ea0891e6acd2fac \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.2.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..ae82ce9134db8 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.2.0.jar.sha1 @@ -0,0 +1 @@ +832f62c39c8c2a77097e2d2d4438bd1642f11f29 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.2.0-snapshot-ba8c3a8.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index 0538fc53b8a60..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -058ffd84388f9ffcf0bfdd7f43a6e832836a2927 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.2.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..97615d33b942e --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.2.0.jar.sha1 @@ -0,0 +1 @@ +f3314a95b461d30e048a932f81ff3c5808dd145f \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.2.0-snapshot-ba8c3a8.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index b06795ab2c8a1..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fb166f35ba04a7687b3073afb9972f6669ac722e \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.2.0.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..38f65996f2395 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.2.0.jar.sha1 @@ -0,0 +1 @@ +e4fd55524bf85aa7d1ec86f8680faa7b07d95fb4 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.2.0-snapshot-ba8c3a8.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index daaa895551c70..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -abf9eb24601ec11ce5b61e4753b41444a869b29d \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.2.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..4a0a4a561bb44 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.2.0.jar.sha1 @@ -0,0 +1 @@ +85c59dcdd7ac761b7f384475aa687a0ae0afaab2 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.2.0-snapshot-ba8c3a8.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index 4bd203700bf5e..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b310130fe9e0f31ce4218cda921309b1143f3541 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.2.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..e5fb4a89d6fc3 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.2.0.jar.sha1 @@ -0,0 +1 @@ +8bfdb8ff2824a585be6d91d80a52a6d4d15c35e8 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.2.0-snapshot-ba8c3a8.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index ac27d25f7a100..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ad8783255cdcb6e7ab23a505123716ad979f3484 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.2.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..de10c0dfc9ef6 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.2.0.jar.sha1 @@ -0,0 +1 @@ +a050c43f529572590d8dd5a5bc9f7b64119795b4 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.2.0-snapshot-ba8c3a8.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index b8abf33514782..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -75f8fbb94a303d04c5dc2b25436300a463003dd6 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.2.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..8e2b8f32c035a --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.2.0.jar.sha1 @@ -0,0 +1 @@ +d2a148922ee01da3f653e931cb572d6dfec1ba3b \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.2.0-snapshot-ba8c3a8.jar.sha1 b/server/licenses/lucene-analysis-common-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index 3384d5fc221e2..0000000000000 --- a/server/licenses/lucene-analysis-common-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b2aa0739c95f1f715f407087dbcf96c5c21f4cc7 \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.2.0.jar.sha1 b/server/licenses/lucene-analysis-common-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..316a74de5f2d8 --- /dev/null +++ b/server/licenses/lucene-analysis-common-9.2.0.jar.sha1 @@ -0,0 +1 @@ +da636dedae3155ef186b5eaa543093da069ddab1 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.2.0-snapshot-ba8c3a8.jar.sha1 b/server/licenses/lucene-backward-codecs-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index b0304427bafd7..0000000000000 --- a/server/licenses/lucene-backward-codecs-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e4a2f89c03e98e0fc211bba2c090047a007eb442 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.2.0.jar.sha1 b/server/licenses/lucene-backward-codecs-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..991b99eadd4c3 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-9.2.0.jar.sha1 @@ -0,0 +1 @@ +97f362ff458b03850b3e0fb45a6cc2773ddbfbfa \ No newline at end of file diff --git a/server/licenses/lucene-core-9.2.0-snapshot-ba8c3a8.jar.sha1 b/server/licenses/lucene-core-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index d9d21a557db60..0000000000000 --- a/server/licenses/lucene-core-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -06df30c240dfb970002c104d44370ae58b7cb60a \ No newline at end of file diff --git a/server/licenses/lucene-core-9.2.0.jar.sha1 b/server/licenses/lucene-core-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..7f05fa3210bf3 --- /dev/null +++ b/server/licenses/lucene-core-9.2.0.jar.sha1 @@ -0,0 +1 @@ +da43e5472e43db68b8c74f05e63d900ecedc1631 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.2.0-snapshot-ba8c3a8.jar.sha1 b/server/licenses/lucene-grouping-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index 26260af3f5c20..0000000000000 --- a/server/licenses/lucene-grouping-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1991e0f0f71c3c99ba726fcfa372f7ba7c75bcf0 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.2.0.jar.sha1 b/server/licenses/lucene-grouping-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..06e446118ebfc --- /dev/null +++ b/server/licenses/lucene-grouping-9.2.0.jar.sha1 @@ -0,0 +1 @@ +b1ea8b82a036cbff93a9c849cbf574c6730a7b13 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.2.0-snapshot-ba8c3a8.jar.sha1 b/server/licenses/lucene-highlighter-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index 7010bcbd1a3c6..0000000000000 --- a/server/licenses/lucene-highlighter-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cac793b5cfbccf5c310d51bc78cf97ce3befceac \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.2.0.jar.sha1 b/server/licenses/lucene-highlighter-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..0729c42c4d129 --- /dev/null +++ b/server/licenses/lucene-highlighter-9.2.0.jar.sha1 @@ -0,0 +1 @@ +c447cad35d879bd656f8a0aeb3114c08e25ca1b1 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.2.0-snapshot-ba8c3a8.jar.sha1 b/server/licenses/lucene-join-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index 2af846e454951..0000000000000 --- a/server/licenses/lucene-join-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -79344146c032fda532def9771de589c4798117e5 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.2.0.jar.sha1 b/server/licenses/lucene-join-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..b401ef5c0d88c --- /dev/null +++ b/server/licenses/lucene-join-9.2.0.jar.sha1 @@ -0,0 +1 @@ +4652557ef1d68b0046f0bb28762ede953f6367ef \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.2.0-snapshot-ba8c3a8.jar.sha1 b/server/licenses/lucene-memory-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index 579b1eaadf13f..0000000000000 --- a/server/licenses/lucene-memory-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1d5f1c88786bcdfc50466f963ef07cbd9c6c7827 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.2.0.jar.sha1 b/server/licenses/lucene-memory-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..dd9a95000a6cd --- /dev/null +++ b/server/licenses/lucene-memory-9.2.0.jar.sha1 @@ -0,0 +1 @@ +6c9aa37760c11c033f154170c15c2b1961b7a886 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.2.0-snapshot-ba8c3a8.jar.sha1 b/server/licenses/lucene-misc-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index b5bea36607367..0000000000000 --- a/server/licenses/lucene-misc-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -adba90f90cf6815eeb9009c1a42d7c86f916d9da \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.2.0.jar.sha1 b/server/licenses/lucene-misc-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..df82a6bd926c4 --- /dev/null +++ b/server/licenses/lucene-misc-9.2.0.jar.sha1 @@ -0,0 +1 @@ +c51ef9a5894dfb4548bbf80d1a271cfe8e86cbf6 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.2.0-snapshot-ba8c3a8.jar.sha1 b/server/licenses/lucene-queries-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index 970dee25c8a9b..0000000000000 --- a/server/licenses/lucene-queries-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d0f38091eee45a118173c9201677ebafa9ed9e89 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.2.0.jar.sha1 b/server/licenses/lucene-queries-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..bdd9109cbd324 --- /dev/null +++ b/server/licenses/lucene-queries-9.2.0.jar.sha1 @@ -0,0 +1 @@ +fcb32402e0cba93454675cb631d59264968b32a4 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.2.0-snapshot-ba8c3a8.jar.sha1 b/server/licenses/lucene-queryparser-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index a8372fad8c3b4..0000000000000 --- a/server/licenses/lucene-queryparser-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -05e2ca9fc81e8b73f746c5ec40321d6d90e3bcdd \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.2.0.jar.sha1 b/server/licenses/lucene-queryparser-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..17ff055324cc2 --- /dev/null +++ b/server/licenses/lucene-queryparser-9.2.0.jar.sha1 @@ -0,0 +1 @@ +7bbcadf643c6bed8a15d789c71cd89a8c9dddf31 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.2.0-snapshot-ba8c3a8.jar.sha1 b/server/licenses/lucene-sandbox-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index 6199acd87d7c3..0000000000000 --- a/server/licenses/lucene-sandbox-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c4ebbf7fd05e2889624b4dd9afb3f7b22aad94f3 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.2.0.jar.sha1 b/server/licenses/lucene-sandbox-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..026075cb7165f --- /dev/null +++ b/server/licenses/lucene-sandbox-9.2.0.jar.sha1 @@ -0,0 +1 @@ +aedb9a641278845f81cb004d6bc557eb43f69a57 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.2.0-snapshot-ba8c3a8.jar.sha1 b/server/licenses/lucene-spatial-extras-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index 0a6932502bced..0000000000000 --- a/server/licenses/lucene-spatial-extras-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -08836d9dee5a2e9e92b538023285de3d620abd4b \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.2.0.jar.sha1 b/server/licenses/lucene-spatial-extras-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..e01ea9ef7c16f --- /dev/null +++ b/server/licenses/lucene-spatial-extras-9.2.0.jar.sha1 @@ -0,0 +1 @@ +47e15ef3815554c73cff7163c70115ea1f18818f \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.2.0-snapshot-ba8c3a8.jar.sha1 b/server/licenses/lucene-spatial3d-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index e587e445c7770..0000000000000 --- a/server/licenses/lucene-spatial3d-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d492d0c7b4bb76c3de7cfc1b4fe224ef9e9e7056 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.2.0.jar.sha1 b/server/licenses/lucene-spatial3d-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..dd06925902b0b --- /dev/null +++ b/server/licenses/lucene-spatial3d-9.2.0.jar.sha1 @@ -0,0 +1 @@ +22308d4eaab8bf8a2b16cfc9eff97bfc2fb5a508 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.2.0-snapshot-ba8c3a8.jar.sha1 b/server/licenses/lucene-suggest-9.2.0-snapshot-ba8c3a8.jar.sha1 deleted file mode 100644 index 5b722bf4274d1..0000000000000 --- a/server/licenses/lucene-suggest-9.2.0-snapshot-ba8c3a8.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -71b5b0cfb5b5809c4a86e947b1f4d9202d6f1b75 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.2.0.jar.sha1 b/server/licenses/lucene-suggest-9.2.0.jar.sha1 new file mode 100644 index 0000000000000..1d53225793a33 --- /dev/null +++ b/server/licenses/lucene-suggest-9.2.0.jar.sha1 @@ -0,0 +1 @@ +608e3851216dc1d8d85f9389c71241f2b395f1ea \ No newline at end of file From 072b7591ad8e6c0def2b84661b066c09f2a6c313 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 30 May 2022 21:30:08 -0700 Subject: [PATCH 38/75] Bump protobuf-java from 3.20.1 to 3.21.1 in /plugins/repository-hdfs (#3472) Signed-off-by: dependabot[bot] --- plugins/repository-hdfs/build.gradle | 2 +- plugins/repository-hdfs/licenses/protobuf-java-3.20.1.jar.sha1 | 1 - plugins/repository-hdfs/licenses/protobuf-java-3.21.1.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-hdfs/licenses/protobuf-java-3.20.1.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/protobuf-java-3.21.1.jar.sha1 diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 41c38b0b4e558..15980abcb16e3 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -67,7 +67,7 @@ dependencies { api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api 'com.google.code.gson:gson:2.9.0' runtimeOnly 'com.google.guava:guava:31.1-jre' - api 'com.google.protobuf:protobuf-java:3.20.1' + api 'com.google.protobuf:protobuf-java:3.21.1' api "commons-logging:commons-logging:${versions.commonslogging}" api 'commons-cli:commons-cli:1.5.0' api "commons-codec:commons-codec:${versions.commonscodec}" diff --git a/plugins/repository-hdfs/licenses/protobuf-java-3.20.1.jar.sha1 b/plugins/repository-hdfs/licenses/protobuf-java-3.20.1.jar.sha1 deleted file mode 100644 index 1ebc9838b7bea..0000000000000 --- a/plugins/repository-hdfs/licenses/protobuf-java-3.20.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5472700cd39a46060efbd35e29cb36b3fb89517b \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/protobuf-java-3.21.1.jar.sha1 b/plugins/repository-hdfs/licenses/protobuf-java-3.21.1.jar.sha1 new file mode 100644 index 0000000000000..2336816611bfe --- /dev/null +++ b/plugins/repository-hdfs/licenses/protobuf-java-3.21.1.jar.sha1 @@ -0,0 +1 @@ +2e396173a5b6ab549d790eba21c1d125bfe92912 \ No newline at end of file From a061df61b3352ffc923dee400f00eb031797d7e6 Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Tue, 31 May 2022 10:57:10 -0500 Subject: [PATCH 39/75] [Upgrade] Lucene-9.3.0-snapshot-823df23 (#3478) Upgrades to latest snapshot of lucene 9.3.0. Signed-off-by: Nicholas Walter Knize --- buildSrc/version.properties | 2 +- .../lang-expression/licenses/lucene-expressions-9.2.0.jar.sha1 | 1 - .../licenses/lucene-expressions-9.3.0-snapshot-823df23.jar.sha1 | 1 + .../analysis-icu/licenses/lucene-analysis-icu-9.2.0.jar.sha1 | 1 - .../lucene-analysis-icu-9.3.0-snapshot-823df23.jar.sha1 | 1 + .../licenses/lucene-analysis-kuromoji-9.2.0.jar.sha1 | 1 - .../lucene-analysis-kuromoji-9.3.0-snapshot-823df23.jar.sha1 | 1 + .../analysis-nori/licenses/lucene-analysis-nori-9.2.0.jar.sha1 | 1 - .../lucene-analysis-nori-9.3.0-snapshot-823df23.jar.sha1 | 1 + .../licenses/lucene-analysis-phonetic-9.2.0.jar.sha1 | 1 - .../lucene-analysis-phonetic-9.3.0-snapshot-823df23.jar.sha1 | 1 + .../licenses/lucene-analysis-smartcn-9.2.0.jar.sha1 | 1 - .../lucene-analysis-smartcn-9.3.0-snapshot-823df23.jar.sha1 | 1 + .../licenses/lucene-analysis-stempel-9.2.0.jar.sha1 | 1 - .../lucene-analysis-stempel-9.3.0-snapshot-823df23.jar.sha1 | 1 + .../licenses/lucene-analysis-morfologik-9.2.0.jar.sha1 | 1 - .../lucene-analysis-morfologik-9.3.0-snapshot-823df23.jar.sha1 | 1 + server/licenses/lucene-analysis-common-9.2.0.jar.sha1 | 1 - .../lucene-analysis-common-9.3.0-snapshot-823df23.jar.sha1 | 1 + server/licenses/lucene-backward-codecs-9.2.0.jar.sha1 | 1 - .../lucene-backward-codecs-9.3.0-snapshot-823df23.jar.sha1 | 1 + server/licenses/lucene-core-9.2.0.jar.sha1 | 1 - server/licenses/lucene-core-9.3.0-snapshot-823df23.jar.sha1 | 1 + server/licenses/lucene-grouping-9.2.0.jar.sha1 | 1 - server/licenses/lucene-grouping-9.3.0-snapshot-823df23.jar.sha1 | 1 + server/licenses/lucene-highlighter-9.2.0.jar.sha1 | 1 - .../licenses/lucene-highlighter-9.3.0-snapshot-823df23.jar.sha1 | 1 + server/licenses/lucene-join-9.2.0.jar.sha1 | 1 - server/licenses/lucene-join-9.3.0-snapshot-823df23.jar.sha1 | 1 + server/licenses/lucene-memory-9.2.0.jar.sha1 | 1 - server/licenses/lucene-memory-9.3.0-snapshot-823df23.jar.sha1 | 1 + server/licenses/lucene-misc-9.2.0.jar.sha1 | 1 - server/licenses/lucene-misc-9.3.0-snapshot-823df23.jar.sha1 | 1 + server/licenses/lucene-queries-9.2.0.jar.sha1 | 1 - server/licenses/lucene-queries-9.3.0-snapshot-823df23.jar.sha1 | 1 + server/licenses/lucene-queryparser-9.2.0.jar.sha1 | 1 - .../licenses/lucene-queryparser-9.3.0-snapshot-823df23.jar.sha1 | 1 + server/licenses/lucene-sandbox-9.2.0.jar.sha1 | 1 - server/licenses/lucene-sandbox-9.3.0-snapshot-823df23.jar.sha1 | 1 + server/licenses/lucene-spatial-extras-9.2.0.jar.sha1 | 1 - .../lucene-spatial-extras-9.3.0-snapshot-823df23.jar.sha1 | 1 + server/licenses/lucene-spatial3d-9.2.0.jar.sha1 | 1 - .../licenses/lucene-spatial3d-9.3.0-snapshot-823df23.jar.sha1 | 1 + server/licenses/lucene-suggest-9.2.0.jar.sha1 | 1 - server/licenses/lucene-suggest-9.3.0-snapshot-823df23.jar.sha1 | 1 + server/src/main/java/org/opensearch/Version.java | 2 +- 46 files changed, 24 insertions(+), 24 deletions(-) delete mode 100644 modules/lang-expression/licenses/lucene-expressions-9.2.0.jar.sha1 create mode 100644 modules/lang-expression/licenses/lucene-expressions-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.2.0.jar.sha1 create mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.2.0.jar.sha1 create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.2.0.jar.sha1 create mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.2.0.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.2.0.jar.sha1 create mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.2.0.jar.sha1 create mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.2.0.jar.sha1 create mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 server/licenses/lucene-analysis-common-9.2.0.jar.sha1 create mode 100644 server/licenses/lucene-analysis-common-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 server/licenses/lucene-backward-codecs-9.2.0.jar.sha1 create mode 100644 server/licenses/lucene-backward-codecs-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 server/licenses/lucene-core-9.2.0.jar.sha1 create mode 100644 server/licenses/lucene-core-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 server/licenses/lucene-grouping-9.2.0.jar.sha1 create mode 100644 server/licenses/lucene-grouping-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 server/licenses/lucene-highlighter-9.2.0.jar.sha1 create mode 100644 server/licenses/lucene-highlighter-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 server/licenses/lucene-join-9.2.0.jar.sha1 create mode 100644 server/licenses/lucene-join-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 server/licenses/lucene-memory-9.2.0.jar.sha1 create mode 100644 server/licenses/lucene-memory-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 server/licenses/lucene-misc-9.2.0.jar.sha1 create mode 100644 server/licenses/lucene-misc-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 server/licenses/lucene-queries-9.2.0.jar.sha1 create mode 100644 server/licenses/lucene-queries-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 server/licenses/lucene-queryparser-9.2.0.jar.sha1 create mode 100644 server/licenses/lucene-queryparser-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 server/licenses/lucene-sandbox-9.2.0.jar.sha1 create mode 100644 server/licenses/lucene-sandbox-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-extras-9.2.0.jar.sha1 create mode 100644 server/licenses/lucene-spatial-extras-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 server/licenses/lucene-spatial3d-9.2.0.jar.sha1 create mode 100644 server/licenses/lucene-spatial3d-9.3.0-snapshot-823df23.jar.sha1 delete mode 100644 server/licenses/lucene-suggest-9.2.0.jar.sha1 create mode 100644 server/licenses/lucene-suggest-9.3.0-snapshot-823df23.jar.sha1 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 625c540737065..fe2cfe6a63ee6 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ opensearch = 3.0.0 -lucene = 9.2.0 +lucene = 9.3.0-snapshot-823df23 bundled_jdk_vendor = adoptium bundled_jdk = 17.0.3+7 diff --git a/modules/lang-expression/licenses/lucene-expressions-9.2.0.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.2.0.jar.sha1 deleted file mode 100644 index 50ceb1672cc45..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -12e8ba1ca93695819d0251a16584880deac58ae0 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.3.0-snapshot-823df23.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..540a48bf7415f --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +610ec9bb8001a2d2ea88e3384eb516017504139e \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.2.0.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.2.0.jar.sha1 deleted file mode 100644 index ae82ce9134db8..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -832f62c39c8c2a77097e2d2d4438bd1642f11f29 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0-snapshot-823df23.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..7bc128d4562fa --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +43f2ea45a2d12b4c75c7ac11b85ec736c73bc07f \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.2.0.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.2.0.jar.sha1 deleted file mode 100644 index 97615d33b942e..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f3314a95b461d30e048a932f81ff3c5808dd145f \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0-snapshot-823df23.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..bad2a0bdcfa2a --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +fb46807684a5b0e28a02b2a1ea3d528e4c25aa05 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.2.0.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.2.0.jar.sha1 deleted file mode 100644 index 38f65996f2395..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e4fd55524bf85aa7d1ec86f8680faa7b07d95fb4 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0-snapshot-823df23.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..b2c62bcbbade1 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +be94b15085b6390ed64a8e8a4f5afbcb2d4d5181 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.2.0.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.2.0.jar.sha1 deleted file mode 100644 index 4a0a4a561bb44..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -85c59dcdd7ac761b7f384475aa687a0ae0afaab2 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0-snapshot-823df23.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..c7f8fd797c589 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +3a6f705a7df2007f5583215420da0725f844ac4f \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.2.0.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.2.0.jar.sha1 deleted file mode 100644 index e5fb4a89d6fc3..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8bfdb8ff2824a585be6d91d80a52a6d4d15c35e8 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0-snapshot-823df23.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..28424c2dd1c7a --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +ea9931a34288fa6cbd894e244a101e86926ebfb8 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.2.0.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.2.0.jar.sha1 deleted file mode 100644 index de10c0dfc9ef6..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a050c43f529572590d8dd5a5bc9f7b64119795b4 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0-snapshot-823df23.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..d7c4b20a29db2 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +c339ce0a3b02d92a804081f5ff44b99f7a468caf \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.2.0.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.2.0.jar.sha1 deleted file mode 100644 index 8e2b8f32c035a..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d2a148922ee01da3f653e931cb572d6dfec1ba3b \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0-snapshot-823df23.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..b4a9090408165 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +a8faa5faa38ab8f545e12cf3dd914e934a2f2bfe \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.2.0.jar.sha1 b/server/licenses/lucene-analysis-common-9.2.0.jar.sha1 deleted file mode 100644 index 316a74de5f2d8..0000000000000 --- a/server/licenses/lucene-analysis-common-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da636dedae3155ef186b5eaa543093da069ddab1 \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-analysis-common-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..ab4abfd7d6a49 --- /dev/null +++ b/server/licenses/lucene-analysis-common-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +8dbb5828e79780989a8758b7cbb5a1aacac0004f \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.2.0.jar.sha1 b/server/licenses/lucene-backward-codecs-9.2.0.jar.sha1 deleted file mode 100644 index 991b99eadd4c3..0000000000000 --- a/server/licenses/lucene-backward-codecs-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -97f362ff458b03850b3e0fb45a6cc2773ddbfbfa \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-backward-codecs-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..8ff6a25c9547e --- /dev/null +++ b/server/licenses/lucene-backward-codecs-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +68ebd183f1e9edde9f2f37c60f784e4f03555eec \ No newline at end of file diff --git a/server/licenses/lucene-core-9.2.0.jar.sha1 b/server/licenses/lucene-core-9.2.0.jar.sha1 deleted file mode 100644 index 7f05fa3210bf3..0000000000000 --- a/server/licenses/lucene-core-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da43e5472e43db68b8c74f05e63d900ecedc1631 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-core-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..2ec15eb0012c5 --- /dev/null +++ b/server/licenses/lucene-core-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +ea3cb640597d93168765174207542c6765c1fe15 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.2.0.jar.sha1 b/server/licenses/lucene-grouping-9.2.0.jar.sha1 deleted file mode 100644 index 06e446118ebfc..0000000000000 --- a/server/licenses/lucene-grouping-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b1ea8b82a036cbff93a9c849cbf574c6730a7b13 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-grouping-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..7b6c561ddeedf --- /dev/null +++ b/server/licenses/lucene-grouping-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +ab2bcdbade5976e127c7e9393bf7a7e25a957d9a \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.2.0.jar.sha1 b/server/licenses/lucene-highlighter-9.2.0.jar.sha1 deleted file mode 100644 index 0729c42c4d129..0000000000000 --- a/server/licenses/lucene-highlighter-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c447cad35d879bd656f8a0aeb3114c08e25ca1b1 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-highlighter-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..b2aa53fcdfb83 --- /dev/null +++ b/server/licenses/lucene-highlighter-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +31ce6ff9188dea49dc4b4d082b498332cc7b86e7 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.2.0.jar.sha1 b/server/licenses/lucene-join-9.2.0.jar.sha1 deleted file mode 100644 index b401ef5c0d88c..0000000000000 --- a/server/licenses/lucene-join-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4652557ef1d68b0046f0bb28762ede953f6367ef \ No newline at end of file diff --git a/server/licenses/lucene-join-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-join-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..7918597d46763 --- /dev/null +++ b/server/licenses/lucene-join-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +c387884f0bc00fb1c064754a69e1e81dff12c755 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.2.0.jar.sha1 b/server/licenses/lucene-memory-9.2.0.jar.sha1 deleted file mode 100644 index dd9a95000a6cd..0000000000000 --- a/server/licenses/lucene-memory-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c9aa37760c11c033f154170c15c2b1961b7a886 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-memory-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..a87d3de9e2310 --- /dev/null +++ b/server/licenses/lucene-memory-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +e278a2cfe1500b76da770aa29ecd487fea5f8dc3 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.2.0.jar.sha1 b/server/licenses/lucene-misc-9.2.0.jar.sha1 deleted file mode 100644 index df82a6bd926c4..0000000000000 --- a/server/licenses/lucene-misc-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c51ef9a5894dfb4548bbf80d1a271cfe8e86cbf6 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-misc-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..18a165097d2be --- /dev/null +++ b/server/licenses/lucene-misc-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +77933cdffbcd0f56888a50fd1d9fb39cf6148f1a \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.2.0.jar.sha1 b/server/licenses/lucene-queries-9.2.0.jar.sha1 deleted file mode 100644 index bdd9109cbd324..0000000000000 --- a/server/licenses/lucene-queries-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fcb32402e0cba93454675cb631d59264968b32a4 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-queries-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..4d148f3a840c8 --- /dev/null +++ b/server/licenses/lucene-queries-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +8d521efa3a111e2feab1a7f07a0cc944bbdcddf4 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.2.0.jar.sha1 b/server/licenses/lucene-queryparser-9.2.0.jar.sha1 deleted file mode 100644 index 17ff055324cc2..0000000000000 --- a/server/licenses/lucene-queryparser-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7bbcadf643c6bed8a15d789c71cd89a8c9dddf31 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-queryparser-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..c6e913767696a --- /dev/null +++ b/server/licenses/lucene-queryparser-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +30d6f8f757a007248804ed5db624a125ada24154 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.2.0.jar.sha1 b/server/licenses/lucene-sandbox-9.2.0.jar.sha1 deleted file mode 100644 index 026075cb7165f..0000000000000 --- a/server/licenses/lucene-sandbox-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -aedb9a641278845f81cb004d6bc557eb43f69a57 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-sandbox-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..22b7769ee3b4d --- /dev/null +++ b/server/licenses/lucene-sandbox-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +8dd68761fade2dc4d2ea0d9d476a5172cfd22cd2 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.2.0.jar.sha1 b/server/licenses/lucene-spatial-extras-9.2.0.jar.sha1 deleted file mode 100644 index e01ea9ef7c16f..0000000000000 --- a/server/licenses/lucene-spatial-extras-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -47e15ef3815554c73cff7163c70115ea1f18818f \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-spatial-extras-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..22d9211a3b623 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +044ac03b461aaae4568f64948f783e87dae85a8b \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.2.0.jar.sha1 b/server/licenses/lucene-spatial3d-9.2.0.jar.sha1 deleted file mode 100644 index dd06925902b0b..0000000000000 --- a/server/licenses/lucene-spatial3d-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -22308d4eaab8bf8a2b16cfc9eff97bfc2fb5a508 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-spatial3d-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..66998393ed970 --- /dev/null +++ b/server/licenses/lucene-spatial3d-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +53a02ec5b0eabe7fdf97fea1b19eeca5a6cf1122 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.2.0.jar.sha1 b/server/licenses/lucene-suggest-9.2.0.jar.sha1 deleted file mode 100644 index 1d53225793a33..0000000000000 --- a/server/licenses/lucene-suggest-9.2.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -608e3851216dc1d8d85f9389c71241f2b395f1ea \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-suggest-9.3.0-snapshot-823df23.jar.sha1 new file mode 100644 index 0000000000000..e5aca63b21732 --- /dev/null +++ b/server/licenses/lucene-suggest-9.3.0-snapshot-823df23.jar.sha1 @@ -0,0 +1 @@ +a57b91ee1c6f3f666dcac697ce6a7de9bd5abba7 \ No newline at end of file diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index e309af54eac6e..a69c1f3c3bcb1 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -89,7 +89,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_1_3_3 = new Version(1030399, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_2_0_0 = new Version(2000099, org.apache.lucene.util.Version.LUCENE_9_1_0); public static final Version V_2_1_0 = new Version(2010099, org.apache.lucene.util.Version.LUCENE_9_2_0); - public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_2_0); + public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version CURRENT = V_3_0_0; public static Version readVersion(StreamInput in) throws IOException { From f080f260297c53c35617602c83cbbbb7e11296de Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Thu, 2 Jun 2022 09:29:32 -0700 Subject: [PATCH 40/75] Filter out invalid URI and HTTP method in the error message of no handler found for a REST request (#3459) Filter out invalid URI and HTTP method of a error message, which shown when there is no handler found for a REST request sent by user, so that HTML special characters <>&"' will not shown in the error message. The error message is return as mine-type `application/json`, which can't contain active (script) content, so it's not a vulnerability. Besides, no browsers are going to render as html when the mine-type is that. While the common security scanners will raise a false-positive alarm for having HTML tags in the response without escaping the HTML special characters, so the solution only aims to satisfy the code security scanners. Signed-off-by: Tianli Feng --- .../java/org/opensearch/rest/RestController.java | 14 ++++++++++++-- .../org/opensearch/rest/RestControllerTests.java | 13 +++++++++++++ 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/opensearch/rest/RestController.java b/server/src/main/java/org/opensearch/rest/RestController.java index b576f8b83e5a0..78bebcb9a0af1 100644 --- a/server/src/main/java/org/opensearch/rest/RestController.java +++ b/server/src/main/java/org/opensearch/rest/RestController.java @@ -56,6 +56,7 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; +import java.net.URI; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -447,7 +448,9 @@ private void handleUnsupportedHttpMethod( msg.append("Incorrect HTTP method for uri [").append(uri); msg.append("] and method [").append(method).append("]"); } else { - msg.append(exception.getMessage()); + // Not using the error message directly from 'exception.getMessage()' to avoid unescaped HTML special characters, + // in case false-positive cross site scripting vulnerability is detected by common security scanners. + msg.append("Unexpected HTTP method"); } if (validMethodSet.isEmpty() == false) { msg.append(", allowed: ").append(validMethodSet); @@ -488,7 +491,14 @@ private void handleBadRequest(String uri, RestRequest.Method method, RestChannel try (XContentBuilder builder = channel.newErrorBuilder()) { builder.startObject(); { - builder.field("error", "no handler found for uri [" + uri + "] and method [" + method + "]"); + try { + // Validate input URI to filter out HTML special characters in the error message, + // in case false-positive cross site scripting vulnerability is detected by common security scanners. + uri = new URI(uri).getPath(); + builder.field("error", "no handler found for uri [" + uri + "] and method [" + method + "]"); + } catch (Exception e) { + builder.field("error", "invalid uri has been requested"); + } } builder.endObject(); channel.sendResponse(new BytesRestResponse(BAD_REQUEST, builder)); diff --git a/server/src/test/java/org/opensearch/rest/RestControllerTests.java b/server/src/test/java/org/opensearch/rest/RestControllerTests.java index 6004613c0ed17..bd4c7c9a4f824 100644 --- a/server/src/test/java/org/opensearch/rest/RestControllerTests.java +++ b/server/src/test/java/org/opensearch/rest/RestControllerTests.java @@ -553,6 +553,15 @@ public void testFaviconWithWrongHttpMethod() { assertThat(channel.getRestResponse().getHeaders().get("Allow"), hasItem(equalTo(RestRequest.Method.GET.toString()))); } + public void testHandleBadRequestWithHtmlSpecialCharsInUri() { + final FakeRestRequest fakeRestRequest = new FakeRestRequest.Builder(NamedXContentRegistry.EMPTY).withPath( + "/" + ).build(); + final AssertingChannel channel = new AssertingChannel(fakeRestRequest, true, RestStatus.BAD_REQUEST); + restController.dispatchRequest(fakeRestRequest, channel, client.threadPool().getThreadContext()); + assertThat(channel.getRestResponse().content().utf8ToString(), containsString("invalid uri has been requested")); + } + public void testDispatchUnsupportedHttpMethod() { final boolean hasContent = randomBoolean(); final RestRequest request = RestRequest.request(xContentRegistry(), new HttpRequest() { @@ -623,6 +632,10 @@ public Exception getInboundException() { assertTrue(channel.getSendResponseCalled()); assertThat(channel.getRestResponse().getHeaders().containsKey("Allow"), equalTo(true)); assertThat(channel.getRestResponse().getHeaders().get("Allow"), hasItem(equalTo(RestRequest.Method.GET.toString()))); + assertThat( + channel.getRestResponse().content().utf8ToString(), + equalTo("{\"error\":\"Unexpected HTTP method, allowed: [GET]\",\"status\":405}") + ); } private static final class TestHttpServerTransport extends AbstractLifecycleComponent implements HttpServerTransport { From 9de83e7ca62b6fc36561252b670c3e671e64a1d4 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Thu, 2 Jun 2022 12:56:18 -0400 Subject: [PATCH 41/75] Support use of IRSA for repository-s3 plugin credentials (#3475) * Support use of IRSA for repository-s3 plugin credentials Signed-off-by: Andriy Redko * Address code review comments Signed-off-by: Andriy Redko * Address code review comments Signed-off-by: Andriy Redko --- plugins/repository-s3/build.gradle | 1 + .../aws-java-sdk-sts-1.11.749.jar.sha1 | 1 + .../repositories/s3/AmazonS3Reference.java | 26 +++- .../s3/AmazonS3WithCredentials.java | 39 ++++++ .../repositories/s3/S3ClientSettings.java | 94 ++++++++++++- .../repositories/s3/S3RepositoryPlugin.java | 5 +- .../opensearch/repositories/s3/S3Service.java | 125 ++++++++++++++++-- .../s3/AwsS3ServiceImplTests.java | 105 +++++++++++++++ .../s3/RepositoryCredentialsTests.java | 7 +- .../s3/S3ClientSettingsTests.java | 49 ++++++- .../repositories/s3/S3ServiceTests.java | 29 +++- 11 files changed, 464 insertions(+), 17 deletions(-) create mode 100644 plugins/repository-s3/licenses/aws-java-sdk-sts-1.11.749.jar.sha1 create mode 100644 plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonS3WithCredentials.java diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 33448b0039ce2..54a2593f4c6f4 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -51,6 +51,7 @@ versions << [ dependencies { api "com.amazonaws:aws-java-sdk-s3:${versions.aws}" api "com.amazonaws:aws-java-sdk-core:${versions.aws}" + api "com.amazonaws:aws-java-sdk-sts:${versions.aws}" api "com.amazonaws:jmespath-java:${versions.aws}" api "org.apache.httpcomponents:httpclient:${versions.httpclient}" api "org.apache.httpcomponents:httpcore:${versions.httpcore}" diff --git a/plugins/repository-s3/licenses/aws-java-sdk-sts-1.11.749.jar.sha1 b/plugins/repository-s3/licenses/aws-java-sdk-sts-1.11.749.jar.sha1 new file mode 100644 index 0000000000000..29c9a93542058 --- /dev/null +++ b/plugins/repository-s3/licenses/aws-java-sdk-sts-1.11.749.jar.sha1 @@ -0,0 +1 @@ +724bd22c0ff41c496469e18f9bea12bdfb2f7540 \ No newline at end of file diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonS3Reference.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonS3Reference.java index 62e415705a011..6f14cd850ccf6 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonS3Reference.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonS3Reference.java @@ -32,17 +32,39 @@ package org.opensearch.repositories.s3; +import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3Client; + +import org.opensearch.common.Nullable; import org.opensearch.common.concurrent.RefCountedReleasable; +import java.io.Closeable; +import java.io.IOException; + /** * Handles the shutdown of the wrapped {@link AmazonS3Client} using reference * counting. */ public class AmazonS3Reference extends RefCountedReleasable { - AmazonS3Reference(AmazonS3 client) { - super("AWS_S3_CLIENT", client, client::shutdown); + this(client, null); + } + + AmazonS3Reference(AmazonS3WithCredentials client) { + this(client.client(), client.credentials()); + } + + AmazonS3Reference(AmazonS3 client, @Nullable AWSCredentialsProvider credentials) { + super("AWS_S3_CLIENT", client, () -> { + client.shutdown(); + if (credentials instanceof Closeable) { + try { + ((Closeable) credentials).close(); + } catch (IOException e) { + /* Do nothing here */ + } + } + }); } } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonS3WithCredentials.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonS3WithCredentials.java new file mode 100644 index 0000000000000..5622be5546cb1 --- /dev/null +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/AmazonS3WithCredentials.java @@ -0,0 +1,39 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.repositories.s3; + +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.services.s3.AmazonS3; + +import org.opensearch.common.Nullable; + +/** + * The holder of the AmazonS3 and AWSCredentialsProvider + */ +final class AmazonS3WithCredentials { + private final AmazonS3 client; + private final AWSCredentialsProvider credentials; + + private AmazonS3WithCredentials(final AmazonS3 client, @Nullable final AWSCredentialsProvider credentials) { + this.client = client; + this.credentials = credentials; + } + + AmazonS3 client() { + return client; + } + + AWSCredentialsProvider credentials() { + return credentials; + } + + static AmazonS3WithCredentials create(final AmazonS3 client, @Nullable final AWSCredentialsProvider credentials) { + return new AmazonS3WithCredentials(client, credentials); + } +} diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java index e02c7cae89378..1f9af5314f30d 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3ClientSettings.java @@ -67,6 +67,29 @@ final class S3ClientSettings { /** Placeholder client name for normalizing client settings in the repository settings. */ private static final String PLACEHOLDER_CLIENT = "placeholder"; + // Properties to support using IAM Roles for Service Accounts (IRSA) + + /** The identity token file for connecting to s3. */ + static final Setting.AffixSetting IDENTITY_TOKEN_FILE_SETTING = Setting.affixKeySetting( + PREFIX, + "identity_token_file", + key -> SecureSetting.simpleString(key, Property.NodeScope) + ); + + /** The role ARN (Amazon Resource Name) for connecting to s3. */ + static final Setting.AffixSetting ROLE_ARN_SETTING = Setting.affixKeySetting( + PREFIX, + "role_arn", + key -> SecureSetting.secureString(key, null) + ); + + /** The role session name for connecting to s3. */ + static final Setting.AffixSetting ROLE_SESSION_NAME_SETTING = Setting.affixKeySetting( + PREFIX, + "role_session_name", + key -> SecureSetting.secureString(key, null) + ); + /** The access key (ie login id) for connecting to s3. */ static final Setting.AffixSetting ACCESS_KEY_SETTING = Setting.affixKeySetting( PREFIX, @@ -189,6 +212,9 @@ final class S3ClientSettings { /** Credentials to authenticate with s3. */ final S3BasicCredentials credentials; + /** Credentials to authenticate with s3 using IAM Roles for Service Accounts (IRSA). */ + final IrsaCredentials irsaCredentials; + /** The s3 endpoint the client should talk to, or empty string to use the default. */ final String endpoint; @@ -221,6 +247,7 @@ final class S3ClientSettings { private S3ClientSettings( S3BasicCredentials credentials, + IrsaCredentials irsaCredentials, String endpoint, Protocol protocol, int readTimeoutMillis, @@ -233,6 +260,7 @@ private S3ClientSettings( ProxySettings proxySettings ) { this.credentials = credentials; + this.irsaCredentials = irsaCredentials; this.endpoint = endpoint; this.protocol = protocol; this.readTimeoutMillis = readTimeoutMillis; @@ -301,6 +329,7 @@ S3ClientSettings refine(Settings repositorySettings) { validateInetAddressFor(newProxyHost); return new S3ClientSettings( newCredentials, + irsaCredentials, newEndpoint, newProtocol, newReadTimeoutMillis, @@ -396,12 +425,27 @@ private static S3BasicCredentials loadCredentials(Settings settings, String clie } } + private static IrsaCredentials loadIrsaCredentials(Settings settings, String clientName) { + String identityTokenFile = getConfigValue(settings, clientName, IDENTITY_TOKEN_FILE_SETTING); + try ( + SecureString roleArn = getConfigValue(settings, clientName, ROLE_ARN_SETTING); + SecureString roleSessionName = getConfigValue(settings, clientName, ROLE_SESSION_NAME_SETTING) + ) { + if (identityTokenFile.length() != 0 || roleArn.length() != 0 || roleSessionName.length() != 0) { + return new IrsaCredentials(identityTokenFile.toString(), roleArn.toString(), roleSessionName.toString()); + } + + return null; + } + } + // pkg private for tests /** Parse settings for a single client. */ static S3ClientSettings getClientSettings(final Settings settings, final String clientName) { final Protocol awsProtocol = getConfigValue(settings, clientName, PROTOCOL_SETTING); return new S3ClientSettings( S3ClientSettings.loadCredentials(settings, clientName), + S3ClientSettings.loadIrsaCredentials(settings, clientName), getConfigValue(settings, clientName, ENDPOINT_SETTING), awsProtocol, Math.toIntExact(getConfigValue(settings, clientName, READ_TIMEOUT_SETTING).millis()), @@ -482,7 +526,8 @@ public boolean equals(final Object o) { && proxySettings.equals(that.proxySettings) && Objects.equals(disableChunkedEncoding, that.disableChunkedEncoding) && Objects.equals(region, that.region) - && Objects.equals(signerOverride, that.signerOverride); + && Objects.equals(signerOverride, that.signerOverride) + && Objects.equals(irsaCredentials, that.irsaCredentials); } @Override @@ -512,4 +557,51 @@ private static T getRepoSettingOrDefault(Setting.AffixSetting setting, Se } return defaultValue; } + + /** + * Class to store IAM Roles for Service Accounts (IRSA) credentials + * See please: https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html + */ + static class IrsaCredentials { + private final String identityTokenFile; + private final String roleArn; + private final String roleSessionName; + + IrsaCredentials(String identityTokenFile, String roleArn, String roleSessionName) { + this.identityTokenFile = Strings.isNullOrEmpty(identityTokenFile) ? null : identityTokenFile; + this.roleArn = Strings.isNullOrEmpty(roleArn) ? null : roleArn; + this.roleSessionName = Strings.isNullOrEmpty(roleSessionName) ? "s3-sdk-java-" + System.currentTimeMillis() : roleSessionName; + } + + public String getIdentityTokenFile() { + return identityTokenFile; + } + + public String getRoleArn() { + return roleArn; + } + + public String getRoleSessionName() { + return roleSessionName; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final IrsaCredentials that = (IrsaCredentials) o; + return Objects.equals(identityTokenFile, that.identityTokenFile) + && Objects.equals(roleArn, that.roleArn) + && Objects.equals(roleSessionName, that.roleSessionName); + } + + @Override + public int hashCode() { + return Objects.hash(identityTokenFile, roleArn, roleSessionName); + } + } } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java index 679243b28cfc7..e1ea31dc53d1e 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RepositoryPlugin.java @@ -132,7 +132,10 @@ public List> getSettings() { S3Repository.ACCESS_KEY_SETTING, S3Repository.SECRET_KEY_SETTING, S3ClientSettings.SIGNER_OVERRIDE, - S3ClientSettings.REGION + S3ClientSettings.REGION, + S3ClientSettings.ROLE_ARN_SETTING, + S3ClientSettings.IDENTITY_TOKEN_FILE_SETTING, + S3ClientSettings.ROLE_SESSION_NAME_SETTING ); } diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java index 3ce19378ac05c..6919549874445 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java @@ -35,8 +35,11 @@ import com.amazonaws.ClientConfiguration; import com.amazonaws.auth.AWSCredentials; import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.auth.AWSSessionCredentialsProvider; import com.amazonaws.auth.AWSStaticCredentialsProvider; import com.amazonaws.auth.EC2ContainerCredentialsProviderWrapper; +import com.amazonaws.auth.STSAssumeRoleSessionCredentialsProvider; +import com.amazonaws.auth.STSAssumeRoleWithWebIdentitySessionCredentialsProvider; import com.amazonaws.client.builder.AwsClientBuilder; import com.amazonaws.http.IdleConnectionReaper; import com.amazonaws.http.SystemPropertyTlsKeyManagersProvider; @@ -45,6 +48,8 @@ import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; import com.amazonaws.services.s3.internal.Constants; +import com.amazonaws.services.securitytoken.AWSSecurityTokenService; +import com.amazonaws.services.securitytoken.AWSSecurityTokenServiceClientBuilder; import org.apache.http.conn.ssl.DefaultHostnameVerifier; import org.apache.http.conn.ssl.SSLConnectionSocketFactory; @@ -52,9 +57,11 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.common.Nullable; import org.opensearch.common.Strings; import org.opensearch.common.collect.MapBuilder; import org.opensearch.common.settings.Settings; +import org.opensearch.repositories.s3.S3ClientSettings.IrsaCredentials; import javax.net.ssl.SSLContext; import java.io.Closeable; @@ -67,6 +74,9 @@ import java.security.SecureRandom; import java.util.Map; +import static com.amazonaws.SDKGlobalConfiguration.AWS_ROLE_ARN_ENV_VAR; +import static com.amazonaws.SDKGlobalConfiguration.AWS_ROLE_SESSION_NAME_ENV_VAR; +import static com.amazonaws.SDKGlobalConfiguration.AWS_WEB_IDENTITY_ENV_VAR; import static java.util.Collections.emptyMap; class S3Service implements Closeable { @@ -163,9 +173,11 @@ S3ClientSettings settings(RepositoryMetadata repositoryMetadata) { } // proxy for testing - AmazonS3 buildClient(final S3ClientSettings clientSettings) { + AmazonS3WithCredentials buildClient(final S3ClientSettings clientSettings) { final AmazonS3ClientBuilder builder = AmazonS3ClientBuilder.standard(); - builder.withCredentials(buildCredentials(logger, clientSettings)); + + final AWSCredentialsProvider credentials = buildCredentials(logger, clientSettings); + builder.withCredentials(credentials); builder.withClientConfiguration(buildConfiguration(clientSettings)); String endpoint = Strings.hasLength(clientSettings.endpoint) ? clientSettings.endpoint : Constants.S3_HOSTNAME; @@ -192,7 +204,8 @@ AmazonS3 buildClient(final S3ClientSettings clientSettings) { if (clientSettings.disableChunkedEncoding) { builder.disableChunkedEncoding(); } - return SocketAccess.doPrivileged(builder::build); + final AmazonS3 client = SocketAccess.doPrivileged(builder::build); + return AmazonS3WithCredentials.create(client, credentials); } // pkg private for tests @@ -258,24 +271,83 @@ public Socket createSocket(final HttpContext ctx) throws IOException { // pkg private for tests static AWSCredentialsProvider buildCredentials(Logger logger, S3ClientSettings clientSettings) { - final S3BasicCredentials credentials = clientSettings.credentials; - if (credentials == null) { + final S3BasicCredentials basicCredentials = clientSettings.credentials; + final IrsaCredentials irsaCredentials = buildFromEnviroment(clientSettings.irsaCredentials); + + // If IAM Roles for Service Accounts (IRSA) credentials are configured, start with them first + if (irsaCredentials != null) { + logger.debug("Using IRSA credentials"); + + AWSSecurityTokenService securityTokenService = null; + final String region = Strings.hasLength(clientSettings.region) ? clientSettings.region : null; + if (region != null || basicCredentials != null) { + securityTokenService = SocketAccess.doPrivileged( + () -> AWSSecurityTokenServiceClientBuilder.standard() + .withCredentials((basicCredentials != null) ? new AWSStaticCredentialsProvider(basicCredentials) : null) + .withRegion(region) + .build() + ); + } + + if (irsaCredentials.getIdentityTokenFile() == null) { + return new PrivilegedSTSAssumeRoleSessionCredentialsProvider<>( + securityTokenService, + new STSAssumeRoleSessionCredentialsProvider.Builder(irsaCredentials.getRoleArn(), irsaCredentials.getRoleSessionName()) + .withStsClient(securityTokenService) + .build() + ); + } else { + return new PrivilegedSTSAssumeRoleSessionCredentialsProvider<>( + securityTokenService, + new STSAssumeRoleWithWebIdentitySessionCredentialsProvider.Builder( + irsaCredentials.getRoleArn(), + irsaCredentials.getRoleSessionName(), + irsaCredentials.getIdentityTokenFile() + ).withStsClient(securityTokenService).build() + ); + } + } else if (basicCredentials != null) { + logger.debug("Using basic key/secret credentials"); + return new AWSStaticCredentialsProvider(basicCredentials); + } else { logger.debug("Using instance profile credentials"); return new PrivilegedInstanceProfileCredentialsProvider(); - } else { - logger.debug("Using basic key/secret credentials"); - return new AWSStaticCredentialsProvider(credentials); } } + private static IrsaCredentials buildFromEnviroment(IrsaCredentials defaults) { + if (defaults == null) { + return null; + } + + String webIdentityTokenFile = defaults.getIdentityTokenFile(); + if (webIdentityTokenFile == null) { + webIdentityTokenFile = System.getenv(AWS_WEB_IDENTITY_ENV_VAR); + } + + String roleArn = defaults.getRoleArn(); + if (roleArn == null) { + roleArn = System.getenv(AWS_ROLE_ARN_ENV_VAR); + } + + String roleSessionName = defaults.getRoleSessionName(); + if (roleSessionName == null) { + roleSessionName = System.getenv(AWS_ROLE_SESSION_NAME_ENV_VAR); + } + + return new IrsaCredentials(webIdentityTokenFile, roleArn, roleSessionName); + } + private synchronized void releaseCachedClients() { // the clients will shutdown when they will not be used anymore for (final AmazonS3Reference clientReference : clientsCache.values()) { clientReference.decRef(); } + // clear previously cached clients, they will be build lazily clientsCache = emptyMap(); derivedClientSettings = emptyMap(); + // shutdown IdleConnectionReaper background thread // it will be restarted on new client usage IdleConnectionReaper.shutdown(); @@ -300,6 +372,43 @@ public void refresh() { } } + static class PrivilegedSTSAssumeRoleSessionCredentialsProvider

+ implements + AWSCredentialsProvider, + Closeable { + private final P credentials; + private final AWSSecurityTokenService securityTokenService; + + private PrivilegedSTSAssumeRoleSessionCredentialsProvider( + @Nullable final AWSSecurityTokenService securityTokenService, + final P credentials + ) { + this.securityTokenService = securityTokenService; + this.credentials = credentials; + } + + @Override + public AWSCredentials getCredentials() { + return SocketAccess.doPrivileged(credentials::getCredentials); + } + + @Override + public void refresh() { + SocketAccess.doPrivilegedVoid(credentials::refresh); + } + + @Override + public void close() throws IOException { + SocketAccess.doPrivilegedIOException(() -> { + credentials.close(); + if (securityTokenService != null) { + securityTokenService.shutdown(); + } + return null; + }); + }; + } + @Override public void close() { releaseCachedClients(); diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/AwsS3ServiceImplTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/AwsS3ServiceImplTests.java index 38d9ebf337731..76bd5d303e5fb 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/AwsS3ServiceImplTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/AwsS3ServiceImplTests.java @@ -36,11 +36,16 @@ import com.amazonaws.Protocol; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.auth.AWSStaticCredentialsProvider; +import com.amazonaws.http.IdleConnectionReaper; + +import org.junit.AfterClass; import org.opensearch.common.settings.MockSecureSettings; import org.opensearch.common.settings.Settings; import org.opensearch.test.OpenSearchTestCase; +import java.io.Closeable; import java.io.IOException; +import java.util.HashMap; import java.util.Locale; import java.util.Map; @@ -51,6 +56,11 @@ import static org.opensearch.repositories.s3.S3ClientSettings.PROXY_TYPE_SETTING; public class AwsS3ServiceImplTests extends OpenSearchTestCase { + @AfterClass + public static void shutdownIdleConnectionReaper() { + // created by default STS client + IdleConnectionReaper.shutdown(); + } public void testAWSCredentialsDefaultToInstanceProviders() { final String inexistentClientName = randomAlphaOfLength(8).toLowerCase(Locale.ROOT); @@ -86,6 +96,101 @@ public void testAWSCredentialsFromKeystore() { assertThat(defaultCredentialsProvider, instanceOf(S3Service.PrivilegedInstanceProfileCredentialsProvider.class)); } + public void testCredentialsAndIrsaWithIdentityTokenFileCredentialsFromKeystore() throws IOException { + final Map plainSettings = new HashMap<>(); + final MockSecureSettings secureSettings = new MockSecureSettings(); + final String clientNamePrefix = "some_client_name_"; + final int clientsCount = randomIntBetween(0, 4); + for (int i = 0; i < clientsCount; i++) { + final String clientName = clientNamePrefix + i; + secureSettings.setString("s3.client." + clientName + ".role_arn", clientName + "_role_arn"); + + // Use static AWS credentials for tests + secureSettings.setString("s3.client." + clientName + ".access_key", clientName + "_aws_access_key"); + secureSettings.setString("s3.client." + clientName + ".secret_key", clientName + "_aws_secret_key"); + + // Use explicit region setting + plainSettings.put("s3.client." + clientName + ".region", "us-east1"); + plainSettings.put("s3.client." + clientName + ".identity_token_file", clientName + "_identity_token_file"); + } + final Settings settings = Settings.builder().loadFromMap(plainSettings).setSecureSettings(secureSettings).build(); + final Map allClientsSettings = S3ClientSettings.load(settings); + // no less, no more + assertThat(allClientsSettings.size(), is(clientsCount + 1)); // including default + for (int i = 0; i < clientsCount; i++) { + final String clientName = clientNamePrefix + i; + final S3ClientSettings someClientSettings = allClientsSettings.get(clientName); + final AWSCredentialsProvider credentialsProvider = S3Service.buildCredentials(logger, someClientSettings); + assertThat(credentialsProvider, instanceOf(S3Service.PrivilegedSTSAssumeRoleSessionCredentialsProvider.class)); + ((Closeable) credentialsProvider).close(); + } + // test default exists and is an Instance provider + final S3ClientSettings defaultClientSettings = allClientsSettings.get("default"); + final AWSCredentialsProvider defaultCredentialsProvider = S3Service.buildCredentials(logger, defaultClientSettings); + assertThat(defaultCredentialsProvider, instanceOf(S3Service.PrivilegedInstanceProfileCredentialsProvider.class)); + } + + public void testCredentialsAndIrsaCredentialsFromKeystore() throws IOException { + final Map plainSettings = new HashMap<>(); + final MockSecureSettings secureSettings = new MockSecureSettings(); + final String clientNamePrefix = "some_client_name_"; + final int clientsCount = randomIntBetween(0, 4); + for (int i = 0; i < clientsCount; i++) { + final String clientName = clientNamePrefix + i; + secureSettings.setString("s3.client." + clientName + ".role_arn", clientName + "_role_arn"); + secureSettings.setString("s3.client." + clientName + ".role_session_name", clientName + "_role_session_name"); + + // Use static AWS credentials for tests + secureSettings.setString("s3.client." + clientName + ".access_key", clientName + "_aws_access_key"); + secureSettings.setString("s3.client." + clientName + ".secret_key", clientName + "_aws_secret_key"); + + // Use explicit region setting + plainSettings.put("s3.client." + clientName + ".region", "us-east1"); + } + final Settings settings = Settings.builder().loadFromMap(plainSettings).setSecureSettings(secureSettings).build(); + final Map allClientsSettings = S3ClientSettings.load(settings); + // no less, no more + assertThat(allClientsSettings.size(), is(clientsCount + 1)); // including default + for (int i = 0; i < clientsCount; i++) { + final String clientName = clientNamePrefix + i; + final S3ClientSettings someClientSettings = allClientsSettings.get(clientName); + final AWSCredentialsProvider credentialsProvider = S3Service.buildCredentials(logger, someClientSettings); + assertThat(credentialsProvider, instanceOf(S3Service.PrivilegedSTSAssumeRoleSessionCredentialsProvider.class)); + ((Closeable) credentialsProvider).close(); + } + // test default exists and is an Instance provider + final S3ClientSettings defaultClientSettings = allClientsSettings.get("default"); + final AWSCredentialsProvider defaultCredentialsProvider = S3Service.buildCredentials(logger, defaultClientSettings); + assertThat(defaultCredentialsProvider, instanceOf(S3Service.PrivilegedInstanceProfileCredentialsProvider.class)); + } + + public void testIrsaCredentialsFromKeystore() throws IOException { + final Map plainSettings = new HashMap<>(); + final MockSecureSettings secureSettings = new MockSecureSettings(); + final String clientNamePrefix = "some_client_name_"; + final int clientsCount = randomIntBetween(0, 4); + for (int i = 0; i < clientsCount; i++) { + final String clientName = clientNamePrefix + i; + secureSettings.setString("s3.client." + clientName + ".role_arn", clientName + "_role_arn"); + secureSettings.setString("s3.client." + clientName + ".role_session_name", clientName + "_role_session_name"); + } + final Settings settings = Settings.builder().loadFromMap(plainSettings).setSecureSettings(secureSettings).build(); + final Map allClientsSettings = S3ClientSettings.load(settings); + // no less, no more + assertThat(allClientsSettings.size(), is(clientsCount + 1)); // including default + for (int i = 0; i < clientsCount; i++) { + final String clientName = clientNamePrefix + i; + final S3ClientSettings someClientSettings = allClientsSettings.get(clientName); + final AWSCredentialsProvider credentialsProvider = S3Service.buildCredentials(logger, someClientSettings); + assertThat(credentialsProvider, instanceOf(S3Service.PrivilegedSTSAssumeRoleSessionCredentialsProvider.class)); + ((Closeable) credentialsProvider).close(); + } + // test default exists and is an Instance provider + final S3ClientSettings defaultClientSettings = allClientsSettings.get("default"); + final AWSCredentialsProvider defaultCredentialsProvider = S3Service.buildCredentials(logger, defaultClientSettings); + assertThat(defaultCredentialsProvider, instanceOf(S3Service.PrivilegedInstanceProfileCredentialsProvider.class)); + } + public void testSetDefaultCredential() { final MockSecureSettings secureSettings = new MockSecureSettings(); final String awsAccessKey = randomAlphaOfLength(8); diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java index 9c359d67db88b..a30b36cdd659c 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/RepositoryCredentialsTests.java @@ -317,9 +317,10 @@ public static final class ProxyS3Service extends S3Service { private static final Logger logger = LogManager.getLogger(ProxyS3Service.class); @Override - AmazonS3 buildClient(final S3ClientSettings clientSettings) { - final AmazonS3 client = super.buildClient(clientSettings); - return new ClientAndCredentials(client, buildCredentials(logger, clientSettings)); + AmazonS3WithCredentials buildClient(final S3ClientSettings clientSettings) { + final AmazonS3WithCredentials client = super.buildClient(clientSettings); + final AWSCredentialsProvider credentials = buildCredentials(logger, clientSettings); + return AmazonS3WithCredentials.create(new ClientAndCredentials(client.client(), credentials), credentials); } } diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3ClientSettingsTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3ClientSettingsTests.java index 462ed5377ff9a..a86ed3af17476 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3ClientSettingsTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3ClientSettingsTests.java @@ -45,6 +45,7 @@ import java.util.Locale; import java.util.Map; +import static org.hamcrest.CoreMatchers.startsWith; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.emptyString; import static org.hamcrest.Matchers.is; @@ -118,6 +119,52 @@ public void testRejectionOfLoneSessionToken() { assertThat(e.getMessage(), is("Missing access key and secret key for s3 client [default]")); } + public void testIrsaCredentialsTypeWithIdentityTokenFile() { + final Map settings = S3ClientSettings.load( + Settings.builder().put("s3.client.default.identity_token_file", "file").build() + ); + final S3ClientSettings defaultSettings = settings.get("default"); + final S3ClientSettings.IrsaCredentials credentials = defaultSettings.irsaCredentials; + assertThat(credentials.getIdentityTokenFile(), is("file")); + assertThat(credentials.getRoleArn(), is(nullValue())); + assertThat(credentials.getRoleSessionName(), startsWith("s3-sdk-java-")); + } + + public void testIrsaCredentialsTypeRoleArn() { + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("s3.client.default.role_arn", "role"); + final Map settings = S3ClientSettings.load(Settings.builder().setSecureSettings(secureSettings).build()); + final S3ClientSettings defaultSettings = settings.get("default"); + final S3ClientSettings.IrsaCredentials credentials = defaultSettings.irsaCredentials; + assertThat(credentials.getRoleArn(), is("role")); + assertThat(credentials.getRoleSessionName(), startsWith("s3-sdk-java-")); + } + + public void testIrsaCredentialsTypeWithRoleArnAndRoleSessionName() { + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("s3.client.default.role_arn", "role"); + secureSettings.setString("s3.client.default.role_session_name", "session"); + final Map settings = S3ClientSettings.load(Settings.builder().setSecureSettings(secureSettings).build()); + final S3ClientSettings defaultSettings = settings.get("default"); + final S3ClientSettings.IrsaCredentials credentials = defaultSettings.irsaCredentials; + assertThat(credentials.getRoleArn(), is("role")); + assertThat(credentials.getRoleSessionName(), is("session")); + } + + public void testIrsaCredentialsTypeWithRoleArnAndRoleSessionNameAndIdentityTokeFile() { + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("s3.client.default.role_arn", "role"); + secureSettings.setString("s3.client.default.role_session_name", "session"); + final Map settings = S3ClientSettings.load( + Settings.builder().setSecureSettings(secureSettings).put("s3.client.default.identity_token_file", "file").build() + ); + final S3ClientSettings defaultSettings = settings.get("default"); + final S3ClientSettings.IrsaCredentials credentials = defaultSettings.irsaCredentials; + assertThat(credentials.getIdentityTokenFile(), is("file")); + assertThat(credentials.getRoleArn(), is("role")); + assertThat(credentials.getRoleSessionName(), is("session")); + } + public void testCredentialsTypeWithAccessKeyAndSecretKey() { final MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString("s3.client.default.access_key", "access_key"); @@ -199,7 +246,7 @@ public void testRegionCanBeSet() { assertThat(settings.get("default").region, is("")); assertThat(settings.get("other").region, is(region)); try (S3Service s3Service = new S3Service()) { - AmazonS3Client other = (AmazonS3Client) s3Service.buildClient(settings.get("other")); + AmazonS3Client other = (AmazonS3Client) s3Service.buildClient(settings.get("other")).client(); assertThat(other.getSignerRegionOverride(), is(region)); } } diff --git a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3ServiceTests.java b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3ServiceTests.java index cb0e76e272b4e..71e42907ab997 100644 --- a/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3ServiceTests.java +++ b/plugins/repository-s3/src/test/java/org/opensearch/repositories/s3/S3ServiceTests.java @@ -32,10 +32,12 @@ package org.opensearch.repositories.s3; import org.opensearch.cluster.metadata.RepositoryMetadata; - +import org.opensearch.common.settings.MockSecureSettings; import org.opensearch.common.settings.Settings; import org.opensearch.test.OpenSearchTestCase; +import java.util.Map; + public class S3ServiceTests extends OpenSearchTestCase { public void testCachedClientsAreReleased() { @@ -56,4 +58,29 @@ public void testCachedClientsAreReleased() { final S3ClientSettings clientSettingsReloaded = s3Service.settings(metadata1); assertNotSame(clientSettings, clientSettingsReloaded); } + + public void testCachedClientsWithCredentialsAreReleased() { + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("s3.client.default.role_arn", "role"); + final Map defaults = S3ClientSettings.load( + Settings.builder().setSecureSettings(secureSettings).put("s3.client.default.identity_token_file", "file").build() + ); + final S3Service s3Service = new S3Service(); + s3Service.refreshAndClearCache(defaults); + final Settings settings = Settings.builder().put("endpoint", "http://first").put("region", "us-east-2").build(); + final RepositoryMetadata metadata1 = new RepositoryMetadata("first", "s3", settings); + final RepositoryMetadata metadata2 = new RepositoryMetadata("second", "s3", settings); + final S3ClientSettings clientSettings = s3Service.settings(metadata2); + final S3ClientSettings otherClientSettings = s3Service.settings(metadata2); + assertSame(clientSettings, otherClientSettings); + final AmazonS3Reference reference = s3Service.client(metadata1); + reference.close(); + s3Service.close(); + final AmazonS3Reference referenceReloaded = s3Service.client(metadata1); + assertNotSame(referenceReloaded, reference); + referenceReloaded.close(); + s3Service.close(); + final S3ClientSettings clientSettingsReloaded = s3Service.settings(metadata1); + assertNotSame(clientSettings, clientSettingsReloaded); + } } From c626043639b4281d97b26214c7b923f2463ac608 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 2 Jun 2022 13:06:50 -0700 Subject: [PATCH 42/75] Bump google-auth-library-oauth2-http from 0.20.0 to 1.7.0 in /plugins/repository-gcs (#3473) * Bump google-auth-library-oauth2-http in /plugins/repository-gcs Bumps google-auth-library-oauth2-http from 0.20.0 to 1.7.0. --- updated-dependencies: - dependency-name: com.google.auth:google-auth-library-oauth2-http dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Use variable to define the version of dependency google-auth-library-java Signed-off-by: Tianli Feng Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Co-authored-by: Tianli Feng --- plugins/repository-gcs/build.gradle | 8 ++++++-- .../google-auth-library-credentials-0.20.0.jar.sha1 | 1 - .../google-auth-library-credentials-1.7.0.jar.sha1 | 1 + .../google-auth-library-oauth2-http-0.20.0.jar.sha1 | 1 - .../google-auth-library-oauth2-http-1.7.0.jar.sha1 | 1 + 5 files changed, 8 insertions(+), 4 deletions(-) delete mode 100644 plugins/repository-gcs/licenses/google-auth-library-credentials-0.20.0.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/google-auth-library-credentials-1.7.0.jar.sha1 delete mode 100644 plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.20.0.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/google-auth-library-oauth2-http-1.7.0.jar.sha1 diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 72964f9444026..92ddc69c89f47 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -49,6 +49,10 @@ opensearchplugin { classname 'org.opensearch.repositories.gcs.GoogleCloudStoragePlugin' } +versions << [ + 'google_auth': '1.7.0' +] + dependencies { api 'com.google.cloud:google-cloud-storage:1.113.1' api 'com.google.cloud:google-cloud-core:2.5.10' @@ -67,8 +71,8 @@ dependencies { api 'com.google.api.grpc:proto-google-common-protos:2.8.0' api 'com.google.api.grpc:proto-google-iam-v1:0.12.0' api 'com.google.cloud:google-cloud-core-http:1.93.3' - api 'com.google.auth:google-auth-library-credentials:0.20.0' - api 'com.google.auth:google-auth-library-oauth2-http:0.20.0' + api "com.google.auth:google-auth-library-credentials:${versions.google_auth}" + api "com.google.auth:google-auth-library-oauth2-http:${versions.google_auth}" api 'com.google.oauth-client:google-oauth-client:1.33.1' api 'com.google.api-client:google-api-client:1.34.0' api 'com.google.http-client:google-http-client-appengine:1.41.8' diff --git a/plugins/repository-gcs/licenses/google-auth-library-credentials-0.20.0.jar.sha1 b/plugins/repository-gcs/licenses/google-auth-library-credentials-0.20.0.jar.sha1 deleted file mode 100644 index 14cc742737eed..0000000000000 --- a/plugins/repository-gcs/licenses/google-auth-library-credentials-0.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -87a91a373e64ba5c3cdf8cc5cf54b189dd1492f8 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-auth-library-credentials-1.7.0.jar.sha1 b/plugins/repository-gcs/licenses/google-auth-library-credentials-1.7.0.jar.sha1 new file mode 100644 index 0000000000000..f2e9a4f7283bf --- /dev/null +++ b/plugins/repository-gcs/licenses/google-auth-library-credentials-1.7.0.jar.sha1 @@ -0,0 +1 @@ +b29af5a9ea94e9e7f86bded11e39f5afda5b17e8 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.20.0.jar.sha1 b/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.20.0.jar.sha1 deleted file mode 100644 index 7911c34780cbe..0000000000000 --- a/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.20.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f33d4d6c91a68826816606a2208990eea93fcb2a \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-1.7.0.jar.sha1 b/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-1.7.0.jar.sha1 new file mode 100644 index 0000000000000..738645d6b8c7b --- /dev/null +++ b/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-1.7.0.jar.sha1 @@ -0,0 +1 @@ +985d183303dbd4b7ceb348056e41e59677f6f74f \ No newline at end of file From e0c1cf6da20ff03202372426466864c1bc9005c6 Mon Sep 17 00:00:00 2001 From: Kartik Ganesh Date: Fri, 3 Jun 2022 10:53:58 -0700 Subject: [PATCH 43/75] [Segment Replication] Added source-side classes for orchestrating replication events (#3470) This change expands on the existing SegmentReplicationSource interface and its corresponding Factory class by introducing an implementation where the replication source is a primary shard (PrimaryShardReplicationSource). These code paths execute on the target. The primary shard implementation creates the requests to be send to the source/primary shard. Correspondingly, this change also defines two request classes for the GET_CHECKPOINT_INFO and GET_SEGMENT_FILES requests as well as an abstract superclass. A CopyState class has been introduced that captures point-in-time, file-level details from an IndexShard. This implementation mirrors Lucene's NRT CopyState implementation. Finally, a service class has been introduce for segment replication that runs on the source side (SegmentReplicationSourceService) which handles these two types of incoming requests. This includes private handler classes that house the logic to respond to these requests, with some functionality stubbed for now. The service class also uses a simple map to cache CopyState objects that would be needed by replication targets. Unit tests have been added/updated for all new functionality. Signed-off-by: Kartik Ganesh --- .../org/opensearch/index/engine/Engine.java | 21 +++ .../index/engine/InternalEngine.java | 17 ++ .../index/engine/NRTReplicationEngine.java | 1 + .../index/engine/ReadOnlyEngine.java | 7 + .../opensearch/index/shard/IndexShard.java | 18 ++ .../org/opensearch/index/store/Store.java | 97 +++++++---- .../replication/CheckpointInfoRequest.java | 54 ++++++ .../replication/GetSegmentFilesRequest.java | 60 +++++++ .../PrimaryShardReplicationSource.java | 90 ++++++++++ .../SegmentReplicationSourceFactory.java | 17 +- .../SegmentReplicationSourceService.java | 160 +++++++++++++++++ .../indices/replication/common/CopyState.java | 103 +++++++++++ .../SegmentReplicationTransportRequest.java | 49 ++++++ .../index/engine/EngineConfigTests.java | 108 ++++++++++++ .../index/engine/InternalEngineTests.java | 31 ++++ .../index/engine/ReadOnlyEngineTests.java | 3 + .../opensearch/index/store/StoreTests.java | 29 +++- .../PeerRecoveryTargetServiceTests.java | 2 +- .../recovery/RecoverySourceHandlerTests.java | 8 +- .../PrimaryShardReplicationSourceTests.java | 139 +++++++++++++++ .../SegmentReplicationSourceServiceTests.java | 161 ++++++++++++++++++ .../replication/common/CopyStateTests.java | 80 +++++++++ 22 files changed, 1204 insertions(+), 51 deletions(-) create mode 100644 server/src/main/java/org/opensearch/indices/replication/CheckpointInfoRequest.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/GetSegmentFilesRequest.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/common/CopyState.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/common/SegmentReplicationTransportRequest.java create mode 100644 server/src/test/java/org/opensearch/index/engine/EngineConfigTests.java create mode 100644 server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java create mode 100644 server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java create mode 100644 server/src/test/java/org/opensearch/indices/replication/common/CopyStateTests.java diff --git a/server/src/main/java/org/opensearch/index/engine/Engine.java b/server/src/main/java/org/opensearch/index/engine/Engine.java index c242d98b4b65c..4829148322b31 100644 --- a/server/src/main/java/org/opensearch/index/engine/Engine.java +++ b/server/src/main/java/org/opensearch/index/engine/Engine.java @@ -175,6 +175,21 @@ public final EngineConfig config() { */ protected abstract SegmentInfos getLatestSegmentInfos(); + /** + * In contrast to {@link #getLatestSegmentInfos()}, which returns a {@link SegmentInfos} + * object directly, this method returns a {@link GatedCloseable} reference to the same object. + * This allows the engine to include a clean-up {@link org.opensearch.common.CheckedRunnable} + * which is run when the reference is closed. The default implementation of the clean-up + * procedure is a no-op. + * + * @return {@link GatedCloseable} - A wrapper around a {@link SegmentInfos} instance that + * must be closed for segment files to be deleted. + */ + public GatedCloseable getSegmentInfosSnapshot() { + // default implementation + return new GatedCloseable<>(getLatestSegmentInfos(), () -> {}); + } + public MergeStats getMergeStats() { return new MergeStats(); } @@ -846,6 +861,12 @@ public final CommitStats commitStats() { */ public abstract long getPersistedLocalCheckpoint(); + /** + * @return the latest checkpoint that has been processed but not necessarily persisted. + * Also see {@link #getPersistedLocalCheckpoint()} + */ + public abstract long getProcessedLocalCheckpoint(); + /** * @return a {@link SeqNoStats} object, using local state and the supplied global checkpoint */ diff --git a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java index e60e650372ec4..b63a39ebb1222 100644 --- a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java @@ -2305,6 +2305,22 @@ public SegmentInfos getLatestSegmentInfos() { } } + /** + * Fetch the latest {@link SegmentInfos} object via {@link #getLatestSegmentInfos()} + * but also increment the ref-count to ensure that these segment files are retained + * until the reference is closed. On close, the ref-count is decremented. + */ + @Override + public GatedCloseable getSegmentInfosSnapshot() { + final SegmentInfos segmentInfos = getLatestSegmentInfos(); + try { + indexWriter.incRefDeleter(segmentInfos); + } catch (IOException e) { + throw new EngineException(shardId, e.getMessage(), e); + } + return new GatedCloseable<>(segmentInfos, () -> indexWriter.decRefDeleter(segmentInfos)); + } + @Override protected final void writerSegmentStats(SegmentsStats stats) { stats.addVersionMapMemoryInBytes(versionMap.ramBytesUsed()); @@ -2724,6 +2740,7 @@ public long getLastSyncedGlobalCheckpoint() { return getTranslog().getLastSyncedGlobalCheckpoint(); } + @Override public long getProcessedLocalCheckpoint() { return localCheckpointTracker.getProcessedCheckpoint(); } diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java index 106643198cc3b..e4f4bbbba8f16 100644 --- a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java @@ -248,6 +248,7 @@ public long getPersistedLocalCheckpoint() { return localCheckpointTracker.getPersistedCheckpoint(); } + @Override public long getProcessedLocalCheckpoint() { return localCheckpointTracker.getProcessedCheckpoint(); } diff --git a/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java index 23a86d8da5599..6262a9269c01c 100644 --- a/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java @@ -374,6 +374,13 @@ public long getPersistedLocalCheckpoint() { return seqNoStats.getLocalCheckpoint(); } + @Override + public long getProcessedLocalCheckpoint() { + // the read-only engine does not process checkpoints, so its + // processed checkpoint is identical to its persisted one. + return getPersistedLocalCheckpoint(); + } + @Override public SeqNoStats getSeqNoStats(long globalCheckpoint) { return new SeqNoStats(seqNoStats.getMaxSeqNo(), seqNoStats.getLocalCheckpoint(), globalCheckpoint); diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 995a92e94aeb3..5d11c34ca205c 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -2638,6 +2638,14 @@ public long getLocalCheckpoint() { return getEngine().getPersistedLocalCheckpoint(); } + /** + * Fetch the latest checkpoint that has been processed but not necessarily persisted. + * Also see {@link #getLocalCheckpoint()}. + */ + public long getProcessedLocalCheckpoint() { + return getEngine().getProcessedLocalCheckpoint(); + } + /** * Returns the global checkpoint for the shard. * @@ -4005,4 +4013,14 @@ public void verifyShardBeforeIndexClosing() throws IllegalStateException { RetentionLeaseSyncer getRetentionLeaseSyncer() { return retentionLeaseSyncer; } + + /** + * Fetch the latest SegmentInfos held by the shard's underlying Engine, wrapped + * by a a {@link GatedCloseable} to ensure files are not deleted/merged away. + * + * @throws EngineException - When segment infos cannot be safely retrieved + */ + public GatedCloseable getSegmentInfosSnapshot() { + return getEngine().getSegmentInfosSnapshot(); + } } diff --git a/server/src/main/java/org/opensearch/index/store/Store.java b/server/src/main/java/org/opensearch/index/store/Store.java index 65c47f66b7654..f818456c3a2c8 100644 --- a/server/src/main/java/org/opensearch/index/store/Store.java +++ b/server/src/main/java/org/opensearch/index/store/Store.java @@ -274,6 +274,13 @@ public MetadataSnapshot getMetadata(IndexCommit commit) throws IOException { return getMetadata(commit, false); } + /** + * Convenience wrapper around the {@link #getMetadata(IndexCommit)} method for null input. + */ + public MetadataSnapshot getMetadata() throws IOException { + return getMetadata(null, false); + } + /** * Returns a new MetadataSnapshot for the given commit. If the given commit is null * the latest commit point is used. @@ -315,6 +322,16 @@ public MetadataSnapshot getMetadata(IndexCommit commit, boolean lockDirectory) t } } + /** + * Returns a new {@link MetadataSnapshot} for the given {@link SegmentInfos} object. + * In contrast to {@link #getMetadata(IndexCommit)}, this method is useful for scenarios + * where we need to construct a MetadataSnapshot from an in-memory SegmentInfos object that + * may not have a IndexCommit associated with it, such as with segment replication. + */ + public MetadataSnapshot getMetadata(SegmentInfos segmentInfos) throws IOException { + return new MetadataSnapshot(segmentInfos, directory, logger); + } + /** * Renames all the given files from the key of the map to the * value of the map. All successfully renamed files are removed from the map in-place. @@ -477,7 +494,7 @@ public static MetadataSnapshot readMetadataSnapshot( Directory dir = new NIOFSDirectory(indexLocation) ) { failIfCorrupted(dir); - return new MetadataSnapshot(null, dir, logger); + return new MetadataSnapshot((IndexCommit) null, dir, logger); } catch (IndexNotFoundException ex) { // that's fine - happens all the time no need to log } catch (FileNotFoundException | NoSuchFileException ex) { @@ -682,7 +699,7 @@ public void cleanupAndVerify(String reason, MetadataSnapshot sourceMetadata) thr } } directory.syncMetaData(); - final Store.MetadataSnapshot metadataOrEmpty = getMetadata(null); + final Store.MetadataSnapshot metadataOrEmpty = getMetadata(); verifyAfterCleanup(sourceMetadata, metadataOrEmpty); } finally { metadataLock.writeLock().unlock(); @@ -822,7 +839,14 @@ public MetadataSnapshot(Map metadata, Map builder = new HashMap<>(); - Map commitUserDataBuilder = new HashMap<>(); try { final SegmentInfos segmentCommitInfos = Store.readSegmentsInfo(commit, directory); - numDocs = Lucene.getNumDocs(segmentCommitInfos); - commitUserDataBuilder.putAll(segmentCommitInfos.getUserData()); - // we don't know which version was used to write so we take the max version. - Version maxVersion = segmentCommitInfos.getMinSegmentLuceneVersion(); - for (SegmentCommitInfo info : segmentCommitInfos) { - final Version version = info.info.getVersion(); - if (version == null) { - // version is written since 3.1+: we should have already hit IndexFormatTooOld. - throw new IllegalArgumentException("expected valid version value: " + info.info.toString()); - } - if (version.onOrAfter(maxVersion)) { - maxVersion = version; - } - for (String file : info.files()) { - checksumFromLuceneFile( - directory, - file, - builder, - logger, - version, - SEGMENT_INFO_EXTENSION.equals(IndexFileNames.getExtension(file)) - ); - } - } - if (maxVersion == null) { - maxVersion = org.opensearch.Version.CURRENT.minimumIndexCompatibilityVersion().luceneVersion; - } - final String segmentsFile = segmentCommitInfos.getSegmentsFileName(); - checksumFromLuceneFile(directory, segmentsFile, builder, logger, maxVersion, true); + return loadMetadata(segmentCommitInfos, directory, logger); } catch (CorruptIndexException | IndexNotFoundException | IndexFormatTooOldException | IndexFormatTooNewException ex) { // we either know the index is corrupted or it's just not there throw ex; @@ -949,6 +942,40 @@ static LoadedMetadata loadMetadata(IndexCommit commit, Directory directory, Logg } throw ex; } + } + + static LoadedMetadata loadMetadata(SegmentInfos segmentInfos, Directory directory, Logger logger) throws IOException { + long numDocs = Lucene.getNumDocs(segmentInfos); + Map commitUserDataBuilder = new HashMap<>(); + commitUserDataBuilder.putAll(segmentInfos.getUserData()); + Map builder = new HashMap<>(); + // we don't know which version was used to write so we take the max version. + Version maxVersion = segmentInfos.getMinSegmentLuceneVersion(); + for (SegmentCommitInfo info : segmentInfos) { + final Version version = info.info.getVersion(); + if (version == null) { + // version is written since 3.1+: we should have already hit IndexFormatTooOld. + throw new IllegalArgumentException("expected valid version value: " + info.info.toString()); + } + if (version.onOrAfter(maxVersion)) { + maxVersion = version; + } + for (String file : info.files()) { + checksumFromLuceneFile( + directory, + file, + builder, + logger, + version, + SEGMENT_INFO_EXTENSION.equals(IndexFileNames.getExtension(file)) + ); + } + } + if (maxVersion == null) { + maxVersion = org.opensearch.Version.CURRENT.minimumIndexCompatibilityVersion().luceneVersion; + } + final String segmentsFile = segmentInfos.getSegmentsFileName(); + checksumFromLuceneFile(directory, segmentsFile, builder, logger, maxVersion, true); return new LoadedMetadata(unmodifiableMap(builder), unmodifiableMap(commitUserDataBuilder), numDocs); } diff --git a/server/src/main/java/org/opensearch/indices/replication/CheckpointInfoRequest.java b/server/src/main/java/org/opensearch/indices/replication/CheckpointInfoRequest.java new file mode 100644 index 0000000000000..188a4c1e40fa7 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/CheckpointInfoRequest.java @@ -0,0 +1,54 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.indices.replication.common.SegmentReplicationTransportRequest; + +import java.io.IOException; + +/** + * Request object for fetching segment metadata for a {@link ReplicationCheckpoint} from + * a {@link SegmentReplicationSource}. This object is created by the target node and sent + * to the source node. + * + * @opensearch.internal + */ +public class CheckpointInfoRequest extends SegmentReplicationTransportRequest { + + private final ReplicationCheckpoint checkpoint; + + public CheckpointInfoRequest(StreamInput in) throws IOException { + super(in); + checkpoint = new ReplicationCheckpoint(in); + } + + public CheckpointInfoRequest( + long replicationId, + String targetAllocationId, + DiscoveryNode targetNode, + ReplicationCheckpoint checkpoint + ) { + super(replicationId, targetAllocationId, targetNode); + this.checkpoint = checkpoint; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + checkpoint.writeTo(out); + } + + public ReplicationCheckpoint getCheckpoint() { + return checkpoint; + } +} diff --git a/server/src/main/java/org/opensearch/indices/replication/GetSegmentFilesRequest.java b/server/src/main/java/org/opensearch/indices/replication/GetSegmentFilesRequest.java new file mode 100644 index 0000000000000..21749d3fe7d8a --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/GetSegmentFilesRequest.java @@ -0,0 +1,60 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.indices.replication.common.SegmentReplicationTransportRequest; + +import java.io.IOException; +import java.util.List; + +/** + * Request object for fetching a list of segment files metadata from a {@link SegmentReplicationSource}. + * This object is created by the target node and sent to the source node. + * + * @opensearch.internal + */ +public class GetSegmentFilesRequest extends SegmentReplicationTransportRequest { + + private final List filesToFetch; + private final ReplicationCheckpoint checkpoint; + + public GetSegmentFilesRequest(StreamInput in) throws IOException { + super(in); + this.filesToFetch = in.readList(StoreFileMetadata::new); + this.checkpoint = new ReplicationCheckpoint(in); + } + + public GetSegmentFilesRequest( + long replicationId, + String targetAllocationId, + DiscoveryNode targetNode, + List filesToFetch, + ReplicationCheckpoint checkpoint + ) { + super(replicationId, targetAllocationId, targetNode); + this.filesToFetch = filesToFetch; + this.checkpoint = checkpoint; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeList(filesToFetch); + checkpoint.writeTo(out); + } + + public ReplicationCheckpoint getCheckpoint() { + return checkpoint; + } +} diff --git a/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java b/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java new file mode 100644 index 0000000000000..08dc0b97b31d5 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/PrimaryShardReplicationSource.java @@ -0,0 +1,90 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.ActionListener; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.io.stream.Writeable; +import org.opensearch.index.store.Store; +import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.indices.recovery.RetryableTransportClient; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.transport.TransportService; + +import java.util.List; + +import static org.opensearch.indices.replication.SegmentReplicationSourceService.Actions.GET_CHECKPOINT_INFO; +import static org.opensearch.indices.replication.SegmentReplicationSourceService.Actions.GET_SEGMENT_FILES; + +/** + * Implementation of a {@link SegmentReplicationSource} where the source is a primary node. + * This code executes on the target node. + * + * @opensearch.internal + */ +public class PrimaryShardReplicationSource implements SegmentReplicationSource { + + private static final Logger logger = LogManager.getLogger(PrimaryShardReplicationSource.class); + + private final RetryableTransportClient transportClient; + private final DiscoveryNode targetNode; + private final String targetAllocationId; + + public PrimaryShardReplicationSource( + DiscoveryNode targetNode, + String targetAllocationId, + TransportService transportService, + RecoverySettings recoverySettings, + DiscoveryNode sourceNode + ) { + this.targetAllocationId = targetAllocationId; + this.transportClient = new RetryableTransportClient( + transportService, + sourceNode, + recoverySettings.internalActionRetryTimeout(), + logger + ); + this.targetNode = targetNode; + } + + @Override + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener listener + ) { + final Writeable.Reader reader = CheckpointInfoResponse::new; + final ActionListener responseListener = ActionListener.map(listener, r -> r); + final CheckpointInfoRequest request = new CheckpointInfoRequest(replicationId, targetAllocationId, targetNode, checkpoint); + transportClient.executeRetryableAction(GET_CHECKPOINT_INFO, request, responseListener, reader); + } + + @Override + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List filesToFetch, + Store store, + ActionListener listener + ) { + final Writeable.Reader reader = GetSegmentFilesResponse::new; + final ActionListener responseListener = ActionListener.map(listener, r -> r); + final GetSegmentFilesRequest request = new GetSegmentFilesRequest( + replicationId, + targetAllocationId, + targetNode, + filesToFetch, + checkpoint + ); + transportClient.executeRetryableAction(GET_SEGMENT_FILES, request, responseListener, reader); + } +} diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java index 3ca31503f176d..afbb80d263805 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceFactory.java @@ -8,8 +8,11 @@ package org.opensearch.indices.replication; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.service.ClusterService; import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.ShardId; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.transport.TransportService; @@ -35,7 +38,17 @@ public SegmentReplicationSourceFactory( } public SegmentReplicationSource get(IndexShard shard) { - // TODO: Default to an implementation that uses the primary shard. - return null; + return new PrimaryShardReplicationSource( + clusterService.localNode(), + shard.routingEntry().allocationId().getId(), + transportService, + recoverySettings, + getPrimaryNode(shard.shardId()) + ); + } + + private DiscoveryNode getPrimaryNode(ShardId shardId) { + ShardRouting primaryShard = clusterService.state().routingTable().shardRoutingTable(shardId).primaryShard(); + return clusterService.state().nodes().get(primaryShard.currentNodeId()); } } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java new file mode 100644 index 0000000000000..9f70120dedd6c --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java @@ -0,0 +1,160 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.index.IndexService; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.ShardId; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.indices.replication.common.CopyState; +import org.opensearch.tasks.Task; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportChannel; +import org.opensearch.transport.TransportRequestHandler; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +/** + * Service class that handles segment replication requests from replica shards. + * Typically, the "source" is a primary shard. This code executes on the source node. + * + * @opensearch.internal + */ +public class SegmentReplicationSourceService { + + private static final Logger logger = LogManager.getLogger(SegmentReplicationSourceService.class); + + /** + * Internal actions used by the segment replication source service on the primary shard + * + * @opensearch.internal + */ + public static class Actions { + public static final String GET_CHECKPOINT_INFO = "internal:index/shard/replication/get_checkpoint_info"; + public static final String GET_SEGMENT_FILES = "internal:index/shard/replication/get_segment_files"; + } + + private final Map copyStateMap; + private final TransportService transportService; + private final IndicesService indicesService; + + // TODO mark this as injected and bind in Node + public SegmentReplicationSourceService(TransportService transportService, IndicesService indicesService) { + copyStateMap = Collections.synchronizedMap(new HashMap<>()); + this.transportService = transportService; + this.indicesService = indicesService; + + transportService.registerRequestHandler( + Actions.GET_CHECKPOINT_INFO, + ThreadPool.Names.GENERIC, + CheckpointInfoRequest::new, + new CheckpointInfoRequestHandler() + ); + transportService.registerRequestHandler( + Actions.GET_SEGMENT_FILES, + ThreadPool.Names.GENERIC, + GetSegmentFilesRequest::new, + new GetSegmentFilesRequestHandler() + ); + } + + private class CheckpointInfoRequestHandler implements TransportRequestHandler { + @Override + public void messageReceived(CheckpointInfoRequest request, TransportChannel channel, Task task) throws Exception { + final ReplicationCheckpoint checkpoint = request.getCheckpoint(); + logger.trace("Received request for checkpoint {}", checkpoint); + final CopyState copyState = getCachedCopyState(checkpoint); + channel.sendResponse( + new CheckpointInfoResponse( + copyState.getCheckpoint(), + copyState.getMetadataSnapshot(), + copyState.getInfosBytes(), + copyState.getPendingDeleteFiles() + ) + ); + } + } + + class GetSegmentFilesRequestHandler implements TransportRequestHandler { + @Override + public void messageReceived(GetSegmentFilesRequest request, TransportChannel channel, Task task) throws Exception { + if (isInCopyStateMap(request.getCheckpoint())) { + // TODO send files + } else { + // Return an empty list of files + channel.sendResponse(new GetSegmentFilesResponse(Collections.emptyList())); + } + } + } + + /** + * Operations on the {@link #copyStateMap} member. + */ + + /** + * A synchronized method that checks {@link #copyStateMap} for the given {@link ReplicationCheckpoint} key + * and returns the cached value if one is present. If the key is not present, a {@link CopyState} + * object is constructed and stored in the map before being returned. + */ + private synchronized CopyState getCachedCopyState(ReplicationCheckpoint checkpoint) throws IOException { + if (isInCopyStateMap(checkpoint)) { + final CopyState copyState = fetchFromCopyStateMap(checkpoint); + copyState.incRef(); + return copyState; + } else { + // From the checkpoint's shard ID, fetch the IndexShard + ShardId shardId = checkpoint.getShardId(); + final IndexService indexService = indicesService.indexService(shardId.getIndex()); + final IndexShard indexShard = indexService.getShard(shardId.id()); + // build the CopyState object and cache it before returning + final CopyState copyState = new CopyState(indexShard); + + /** + * Use the checkpoint from the request as the key in the map, rather than + * the checkpoint from the created CopyState. This maximizes cache hits + * if replication targets make a request with an older checkpoint. + * Replication targets are expected to fetch the checkpoint in the response + * CopyState to bring themselves up to date. + */ + addToCopyStateMap(checkpoint, copyState); + return copyState; + } + } + + /** + * Adds the input {@link CopyState} object to {@link #copyStateMap}. + * The key is the CopyState's {@link ReplicationCheckpoint} object. + */ + private void addToCopyStateMap(ReplicationCheckpoint checkpoint, CopyState copyState) { + copyStateMap.putIfAbsent(checkpoint, copyState); + } + + /** + * Given a {@link ReplicationCheckpoint}, return the corresponding + * {@link CopyState} object, if any, from {@link #copyStateMap}. + */ + private CopyState fetchFromCopyStateMap(ReplicationCheckpoint replicationCheckpoint) { + return copyStateMap.get(replicationCheckpoint); + } + + /** + * Checks if the {@link #copyStateMap} has the input {@link ReplicationCheckpoint} + * as a key by invoking {@link Map#containsKey(Object)}. + */ + private boolean isInCopyStateMap(ReplicationCheckpoint replicationCheckpoint) { + return copyStateMap.containsKey(replicationCheckpoint); + } +} diff --git a/server/src/main/java/org/opensearch/indices/replication/common/CopyState.java b/server/src/main/java/org/opensearch/indices/replication/common/CopyState.java new file mode 100644 index 0000000000000..250df3481435a --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/common/CopyState.java @@ -0,0 +1,103 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication.common; + +import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.store.ByteBuffersDataOutput; +import org.apache.lucene.store.ByteBuffersIndexOutput; +import org.opensearch.common.concurrent.GatedCloseable; +import org.opensearch.common.util.concurrent.AbstractRefCounted; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.store.Store; +import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.HashSet; +import java.util.Set; + +/** + * An Opensearch-specific version of Lucene's CopyState class that + * holds incRef'd file level details for one point-in-time segment infos. + * + * @opensearch.internal + */ +public class CopyState extends AbstractRefCounted { + + private final GatedCloseable segmentInfosRef; + private final ReplicationCheckpoint replicationCheckpoint; + private final Store.MetadataSnapshot metadataSnapshot; + private final HashSet pendingDeleteFiles; + private final byte[] infosBytes; + private GatedCloseable commitRef; + + public CopyState(IndexShard shard) throws IOException { + super("CopyState-" + shard.shardId()); + this.segmentInfosRef = shard.getSegmentInfosSnapshot(); + SegmentInfos segmentInfos = this.segmentInfosRef.get(); + this.metadataSnapshot = shard.store().getMetadata(segmentInfos); + this.replicationCheckpoint = new ReplicationCheckpoint( + shard.shardId(), + shard.getOperationPrimaryTerm(), + segmentInfos.getGeneration(), + shard.getProcessedLocalCheckpoint(), + segmentInfos.getVersion() + ); + + // Send files that are merged away in the latest SegmentInfos but not in the latest on disk Segments_N. + // This ensures that the store on replicas is in sync with the store on primaries. + this.commitRef = shard.acquireLastIndexCommit(false); + Store.MetadataSnapshot metadata = shard.store().getMetadata(this.commitRef.get()); + final Store.RecoveryDiff diff = metadata.recoveryDiff(this.metadataSnapshot); + this.pendingDeleteFiles = new HashSet<>(diff.missing); + if (this.pendingDeleteFiles.isEmpty()) { + // If there are no additional files we can release the last commit immediately. + this.commitRef.close(); + this.commitRef = null; + } + + ByteBuffersDataOutput buffer = new ByteBuffersDataOutput(); + // resource description and name are not used, but resource description cannot be null + try (ByteBuffersIndexOutput indexOutput = new ByteBuffersIndexOutput(buffer, "", null)) { + segmentInfos.write(indexOutput); + } + this.infosBytes = buffer.toArrayCopy(); + } + + @Override + protected void closeInternal() { + try { + segmentInfosRef.close(); + // commitRef may be null if there were no pending delete files + if (commitRef != null) { + commitRef.close(); + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + public ReplicationCheckpoint getCheckpoint() { + return replicationCheckpoint; + } + + public Store.MetadataSnapshot getMetadataSnapshot() { + return metadataSnapshot; + } + + public byte[] getInfosBytes() { + return infosBytes; + } + + public Set getPendingDeleteFiles() { + return pendingDeleteFiles; + } +} diff --git a/server/src/main/java/org/opensearch/indices/replication/common/SegmentReplicationTransportRequest.java b/server/src/main/java/org/opensearch/indices/replication/common/SegmentReplicationTransportRequest.java new file mode 100644 index 0000000000000..db8206d131c13 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/common/SegmentReplicationTransportRequest.java @@ -0,0 +1,49 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication.common; + +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.transport.TransportRequest; + +import java.io.IOException; + +/** + * Abstract base class for transport-layer requests related to segment replication. + * + * @opensearch.internal + */ +public abstract class SegmentReplicationTransportRequest extends TransportRequest { + + private final long replicationId; + private final String targetAllocationId; + private final DiscoveryNode targetNode; + + protected SegmentReplicationTransportRequest(long replicationId, String targetAllocationId, DiscoveryNode targetNode) { + this.replicationId = replicationId; + this.targetAllocationId = targetAllocationId; + this.targetNode = targetNode; + } + + protected SegmentReplicationTransportRequest(StreamInput in) throws IOException { + super(in); + this.replicationId = in.readLong(); + this.targetAllocationId = in.readString(); + this.targetNode = new DiscoveryNode(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeLong(this.replicationId); + out.writeString(this.targetAllocationId); + targetNode.writeTo(out); + } +} diff --git a/server/src/test/java/org/opensearch/index/engine/EngineConfigTests.java b/server/src/test/java/org/opensearch/index/engine/EngineConfigTests.java new file mode 100644 index 0000000000000..1c6d06e9bcc08 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/engine/EngineConfigTests.java @@ -0,0 +1,108 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.engine; + +import org.opensearch.Version; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.seqno.RetentionLeases; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTestCase; + +public class EngineConfigTests extends OpenSearchTestCase { + + private IndexSettings defaultIndexSettings; + + @Override + public void setUp() throws Exception { + super.setUp(); + final IndexMetadata defaultIndexMetadata = IndexMetadata.builder("test") + .settings(settings(Version.CURRENT)) + .numberOfShards(1) + .numberOfReplicas(1) + .build(); + defaultIndexSettings = IndexSettingsModule.newIndexSettings("test", defaultIndexMetadata.getSettings()); + } + + public void testEngineConfig_DefaultValueForReadOnlyEngine() { + EngineConfig config = new EngineConfig( + null, + null, + defaultIndexSettings, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + () -> RetentionLeases.EMPTY, + null, + null + ); + assertFalse(config.isReadOnlyReplica()); + } + + public void testEngineConfig_ReadOnlyEngineWithSegRepDisabled() { + expectThrows(IllegalArgumentException.class, () -> createReadOnlyEngine(defaultIndexSettings)); + } + + public void testEngineConfig_ReadOnlyEngineWithSegRepEnabled() { + IndexSettings indexSettings = IndexSettingsModule.newIndexSettings( + "test", + Settings.builder() + .put(defaultIndexSettings.getSettings()) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build() + ); + EngineConfig engineConfig = createReadOnlyEngine(indexSettings); + assertTrue(engineConfig.isReadOnlyReplica()); + } + + private EngineConfig createReadOnlyEngine(IndexSettings indexSettings) { + return new EngineConfig( + null, + null, + indexSettings, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + () -> RetentionLeases.EMPTY, + null, + null, + true + ); + } +} diff --git a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java index cbae55a047a1e..b14ad15070118 100644 --- a/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/InternalEngineTests.java @@ -211,7 +211,9 @@ import static org.hamcrest.Matchers.nullValue; import static org.mockito.Mockito.atLeastOnce; import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import static org.opensearch.index.engine.Engine.Operation.Origin.LOCAL_RESET; @@ -7384,4 +7386,33 @@ public void testMaxDocsOnReplica() throws Exception { restoreIndexWriterMaxDocs(); } } + + public void testGetSegmentInfosSnapshot() throws IOException { + IOUtils.close(store, engine); + Store store = createStore(); + InternalEngine engine = spy(createEngine(store, createTempDir())); + GatedCloseable segmentInfosSnapshot = engine.getSegmentInfosSnapshot(); + assertNotNull(segmentInfosSnapshot); + assertNotNull(segmentInfosSnapshot.get()); + verify(engine, times(1)).getLatestSegmentInfos(); + store.close(); + engine.close(); + } + + public void testGetProcessedLocalCheckpoint() throws IOException { + final long expectedLocalCheckpoint = 1L; + IOUtils.close(store, engine); + // set up mock + final LocalCheckpointTracker mockCheckpointTracker = mock(LocalCheckpointTracker.class); + when(mockCheckpointTracker.getProcessedCheckpoint()).thenReturn(expectedLocalCheckpoint); + + Store store = createStore(); + InternalEngine engine = createEngine(store, createTempDir(), (a, b) -> mockCheckpointTracker); + + long actualLocalCheckpoint = engine.getProcessedLocalCheckpoint(); + assertEquals(expectedLocalCheckpoint, actualLocalCheckpoint); + verify(mockCheckpointTracker, atLeastOnce()).getProcessedCheckpoint(); + store.close(); + engine.close(); + } } diff --git a/server/src/test/java/org/opensearch/index/engine/ReadOnlyEngineTests.java b/server/src/test/java/org/opensearch/index/engine/ReadOnlyEngineTests.java index 2106c5e1067fb..da0db02ac402e 100644 --- a/server/src/test/java/org/opensearch/index/engine/ReadOnlyEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/ReadOnlyEngineTests.java @@ -107,6 +107,7 @@ public void testReadOnlyEngine() throws Exception { lastSeqNoStats = engine.getSeqNoStats(globalCheckpoint.get()); lastDocIds = getDocIds(engine, true); assertThat(readOnlyEngine.getPersistedLocalCheckpoint(), equalTo(lastSeqNoStats.getLocalCheckpoint())); + assertThat(readOnlyEngine.getProcessedLocalCheckpoint(), equalTo(readOnlyEngine.getPersistedLocalCheckpoint())); assertThat(readOnlyEngine.getSeqNoStats(globalCheckpoint.get()).getMaxSeqNo(), equalTo(lastSeqNoStats.getMaxSeqNo())); assertThat(getDocIds(readOnlyEngine, false), equalTo(lastDocIds)); for (int i = 0; i < numDocs; i++) { @@ -131,6 +132,7 @@ public void testReadOnlyEngine() throws Exception { IOUtils.close(external, internal); // the locked down engine should still point to the previous commit assertThat(readOnlyEngine.getPersistedLocalCheckpoint(), equalTo(lastSeqNoStats.getLocalCheckpoint())); + assertThat(readOnlyEngine.getProcessedLocalCheckpoint(), equalTo(readOnlyEngine.getPersistedLocalCheckpoint())); assertThat(readOnlyEngine.getSeqNoStats(globalCheckpoint.get()).getMaxSeqNo(), equalTo(lastSeqNoStats.getMaxSeqNo())); assertThat(getDocIds(readOnlyEngine, false), equalTo(lastDocIds)); try (Engine.GetResult getResult = readOnlyEngine.get(get, readOnlyEngine::acquireSearcher)) { @@ -142,6 +144,7 @@ public void testReadOnlyEngine() throws Exception { recoveringEngine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); // the locked down engine should still point to the previous commit assertThat(readOnlyEngine.getPersistedLocalCheckpoint(), equalTo(lastSeqNoStats.getLocalCheckpoint())); + assertThat(readOnlyEngine.getProcessedLocalCheckpoint(), equalTo(readOnlyEngine.getPersistedLocalCheckpoint())); assertThat(readOnlyEngine.getSeqNoStats(globalCheckpoint.get()).getMaxSeqNo(), equalTo(lastSeqNoStats.getMaxSeqNo())); assertThat(getDocIds(readOnlyEngine, false), equalTo(lastDocIds)); } diff --git a/server/src/test/java/org/opensearch/index/store/StoreTests.java b/server/src/test/java/org/opensearch/index/store/StoreTests.java index fdec86e7912fd..d99bde4764adf 100644 --- a/server/src/test/java/org/opensearch/index/store/StoreTests.java +++ b/server/src/test/java/org/opensearch/index/store/StoreTests.java @@ -364,14 +364,14 @@ public void testNewChecksums() throws IOException { Store.MetadataSnapshot metadata; // check before we committed try { - store.getMetadata(null); + store.getMetadata(); fail("no index present - expected exception"); } catch (IndexNotFoundException ex) { // expected } writer.commit(); writer.close(); - metadata = store.getMetadata(null); + metadata = store.getMetadata(); assertThat(metadata.asMap().isEmpty(), is(false)); for (StoreFileMetadata meta : metadata) { try (IndexInput input = store.directory().openInput(meta.name(), IOContext.DEFAULT)) { @@ -552,7 +552,7 @@ public void testRecoveryDiff() throws IOException, InterruptedException { } writer.commit(); writer.close(); - first = store.getMetadata(null); + first = store.getMetadata(); assertDeleteContent(store, store.directory()); store.close(); } @@ -581,7 +581,7 @@ public void testRecoveryDiff() throws IOException, InterruptedException { } writer.commit(); writer.close(); - second = store.getMetadata(null); + second = store.getMetadata(); } Store.RecoveryDiff diff = first.recoveryDiff(second); assertThat(first.size(), equalTo(second.size())); @@ -610,7 +610,7 @@ public void testRecoveryDiff() throws IOException, InterruptedException { writer.deleteDocuments(new Term("id", Integer.toString(random().nextInt(numDocs)))); writer.commit(); writer.close(); - Store.MetadataSnapshot metadata = store.getMetadata(null); + Store.MetadataSnapshot metadata = store.getMetadata(); StoreFileMetadata delFile = null; for (StoreFileMetadata md : metadata) { if (md.name().endsWith(".liv")) { @@ -645,7 +645,7 @@ public void testRecoveryDiff() throws IOException, InterruptedException { writer.addDocument(docs.get(0)); writer.close(); - Store.MetadataSnapshot newCommitMetadata = store.getMetadata(null); + Store.MetadataSnapshot newCommitMetadata = store.getMetadata(); Store.RecoveryDiff newCommitDiff = newCommitMetadata.recoveryDiff(metadata); if (delFile != null) { assertThat(newCommitDiff.identical.size(), equalTo(newCommitMetadata.size() - 5)); // segments_N, del file, cfs, cfe, si for the @@ -710,7 +710,7 @@ public void testCleanupFromSnapshot() throws IOException { writer.addDocument(doc); } - Store.MetadataSnapshot firstMeta = store.getMetadata(null); + Store.MetadataSnapshot firstMeta = store.getMetadata(); if (random().nextBoolean()) { for (int i = 0; i < docs; i++) { @@ -731,7 +731,7 @@ public void testCleanupFromSnapshot() throws IOException { writer.commit(); writer.close(); - Store.MetadataSnapshot secondMeta = store.getMetadata(null); + Store.MetadataSnapshot secondMeta = store.getMetadata(); if (randomBoolean()) { store.cleanupAndVerify("test", firstMeta); @@ -1000,7 +1000,7 @@ public void testMarkCorruptedOnTruncatedSegmentsFile() throws IOException { try { if (randomBoolean()) { - store.getMetadata(null); + store.getMetadata(); } else { store.readLastCommittedSegmentsInfo(); } @@ -1138,4 +1138,15 @@ public void testGetPendingFiles() throws IOException { } } } + + public void testGetMetadataWithSegmentInfos() throws IOException { + final ShardId shardId = new ShardId("index", "_na_", 1); + Store store = new Store(shardId, INDEX_SETTINGS, new NIOFSDirectory(createTempDir()), new DummyShardLock(shardId)); + store.createEmpty(Version.LATEST); + SegmentInfos segmentInfos = Lucene.readSegmentInfos(store.directory()); + Store.MetadataSnapshot metadataSnapshot = store.getMetadata(segmentInfos); + // loose check for equality + assertEquals(segmentInfos.getSegmentsFileName(), metadataSnapshot.getSegmentsFile().name()); + store.close(); + } } diff --git a/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java index bda2a910d922e..d85b2f1e22979 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/PeerRecoveryTargetServiceTests.java @@ -85,7 +85,7 @@ public void testWriteFileChunksConcurrently() throws Exception { indexDoc(sourceShard, "_doc", Integer.toString(i)); } sourceShard.flush(new FlushRequest()); - Store.MetadataSnapshot sourceSnapshot = sourceShard.store().getMetadata(null); + Store.MetadataSnapshot sourceSnapshot = sourceShard.store().getMetadata(); List mdFiles = new ArrayList<>(); for (StoreFileMetadata md : sourceSnapshot) { mdFiles.add(md); diff --git a/server/src/test/java/org/opensearch/indices/recovery/RecoverySourceHandlerTests.java b/server/src/test/java/org/opensearch/indices/recovery/RecoverySourceHandlerTests.java index 1739f546150d9..fc5c429d74b16 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/RecoverySourceHandlerTests.java @@ -189,7 +189,7 @@ public void testSendFiles() throws Throwable { writer.commit(); writer.close(); - Store.MetadataSnapshot metadata = store.getMetadata(null); + Store.MetadataSnapshot metadata = store.getMetadata(); ReplicationLuceneIndex luceneIndex = new ReplicationLuceneIndex(); List metas = new ArrayList<>(); for (StoreFileMetadata md : metadata) { @@ -226,7 +226,7 @@ public void writeFileChunk( PlainActionFuture sendFilesFuture = new PlainActionFuture<>(); handler.sendFiles(store, metas.toArray(new StoreFileMetadata[0]), () -> 0, sendFilesFuture); sendFilesFuture.actionGet(); - Store.MetadataSnapshot targetStoreMetadata = targetStore.getMetadata(null); + Store.MetadataSnapshot targetStoreMetadata = targetStore.getMetadata(); Store.RecoveryDiff recoveryDiff = targetStoreMetadata.recoveryDiff(metadata); assertEquals(metas.size(), recoveryDiff.identical.size()); assertEquals(0, recoveryDiff.different.size()); @@ -512,7 +512,7 @@ public void testHandleCorruptedIndexOnSendSendFiles() throws Throwable { writer.close(); ReplicationLuceneIndex luceneIndex = new ReplicationLuceneIndex(); - Store.MetadataSnapshot metadata = store.getMetadata(null); + Store.MetadataSnapshot metadata = store.getMetadata(); List metas = new ArrayList<>(); for (StoreFileMetadata md : metadata) { metas.add(md); @@ -594,7 +594,7 @@ public void testHandleExceptionOnSendFiles() throws Throwable { writer.commit(); writer.close(); - Store.MetadataSnapshot metadata = store.getMetadata(null); + Store.MetadataSnapshot metadata = store.getMetadata(); List metas = new ArrayList<>(); for (StoreFileMetadata md : metadata) { metas.add(md); diff --git a/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java b/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java new file mode 100644 index 0000000000000..6bce74be569c3 --- /dev/null +++ b/server/src/test/java/org/opensearch/indices/replication/PrimaryShardReplicationSourceTests.java @@ -0,0 +1,139 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import org.apache.lucene.util.Version; +import org.opensearch.action.ActionListener; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.internal.io.IOUtils; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.IndexShardTestCase; +import org.opensearch.index.store.Store; +import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.test.ClusterServiceUtils; +import org.opensearch.test.transport.CapturingTransport; +import org.opensearch.transport.TransportService; + +import java.util.Arrays; +import java.util.Collections; + +import static org.mockito.Mockito.mock; + +public class PrimaryShardReplicationSourceTests extends IndexShardTestCase { + + private static final long PRIMARY_TERM = 1L; + private static final long SEGMENTS_GEN = 2L; + private static final long SEQ_NO = 3L; + private static final long VERSION = 4L; + private static final long REPLICATION_ID = 123L; + + private CapturingTransport transport; + private ClusterService clusterService; + private TransportService transportService; + private PrimaryShardReplicationSource replicationSource; + private IndexShard indexShard; + private DiscoveryNode sourceNode; + + @Override + public void setUp() throws Exception { + super.setUp(); + final Settings settings = Settings.builder().put("node.name", SegmentReplicationTargetServiceTests.class.getSimpleName()).build(); + final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); + transport = new CapturingTransport(); + sourceNode = newDiscoveryNode("sourceNode"); + final DiscoveryNode localNode = newDiscoveryNode("localNode"); + clusterService = ClusterServiceUtils.createClusterService(threadPool, localNode); + transportService = transport.createTransportService( + clusterService.getSettings(), + threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + boundAddress -> clusterService.localNode(), + null, + Collections.emptySet() + ); + transportService.start(); + transportService.acceptIncomingRequests(); + + indexShard = newStartedShard(true); + + replicationSource = new PrimaryShardReplicationSource( + localNode, + indexShard.routingEntry().allocationId().toString(), + transportService, + recoverySettings, + sourceNode + ); + } + + @Override + public void tearDown() throws Exception { + IOUtils.close(transportService, clusterService, transport); + closeShards(indexShard); + super.tearDown(); + } + + public void testGetCheckpointMetadata() { + final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint( + indexShard.shardId(), + PRIMARY_TERM, + SEGMENTS_GEN, + SEQ_NO, + VERSION + ); + replicationSource.getCheckpointMetadata(REPLICATION_ID, checkpoint, mock(ActionListener.class)); + CapturingTransport.CapturedRequest[] requestList = transport.getCapturedRequestsAndClear(); + assertEquals(1, requestList.length); + CapturingTransport.CapturedRequest capturedRequest = requestList[0]; + assertEquals(SegmentReplicationSourceService.Actions.GET_CHECKPOINT_INFO, capturedRequest.action); + assertEquals(sourceNode, capturedRequest.node); + assertTrue(capturedRequest.request instanceof CheckpointInfoRequest); + } + + public void testGetSegmentFiles() { + final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint( + indexShard.shardId(), + PRIMARY_TERM, + SEGMENTS_GEN, + SEQ_NO, + VERSION + ); + StoreFileMetadata testMetadata = new StoreFileMetadata("testFile", 1L, "checksum", Version.LATEST); + replicationSource.getSegmentFiles( + REPLICATION_ID, + checkpoint, + Arrays.asList(testMetadata), + mock(Store.class), + mock(ActionListener.class) + ); + CapturingTransport.CapturedRequest[] requestList = transport.getCapturedRequestsAndClear(); + assertEquals(1, requestList.length); + CapturingTransport.CapturedRequest capturedRequest = requestList[0]; + assertEquals(SegmentReplicationSourceService.Actions.GET_SEGMENT_FILES, capturedRequest.action); + assertEquals(sourceNode, capturedRequest.node); + assertTrue(capturedRequest.request instanceof GetSegmentFilesRequest); + } + + private DiscoveryNode newDiscoveryNode(String nodeName) { + return new DiscoveryNode( + nodeName, + randomAlphaOfLength(10), + buildNewFakeTransportAddress(), + Collections.emptyMap(), + Collections.singleton(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE), + org.opensearch.Version.CURRENT + ); + } +} diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java new file mode 100644 index 0000000000000..67c867d360e70 --- /dev/null +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java @@ -0,0 +1,161 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication; + +import org.opensearch.Version; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.IndexService; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.ShardId; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.indices.replication.common.CopyStateTests; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.transport.CapturingTransport; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportException; +import org.opensearch.transport.TransportResponseHandler; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.Collections; +import java.util.concurrent.TimeUnit; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SegmentReplicationSourceServiceTests extends OpenSearchTestCase { + + private ShardId testShardId; + private ReplicationCheckpoint testCheckpoint; + private IndicesService mockIndicesService; + private IndexService mockIndexService; + private IndexShard mockIndexShard; + private TestThreadPool testThreadPool; + private CapturingTransport transport; + private TransportService transportService; + private DiscoveryNode localNode; + private SegmentReplicationSourceService segmentReplicationSourceService; + + @Override + public void setUp() throws Exception { + super.setUp(); + // setup mocks + mockIndexShard = CopyStateTests.createMockIndexShard(); + testShardId = mockIndexShard.shardId(); + mockIndicesService = mock(IndicesService.class); + mockIndexService = mock(IndexService.class); + when(mockIndicesService.indexService(testShardId.getIndex())).thenReturn(mockIndexService); + when(mockIndexService.getShard(testShardId.id())).thenReturn(mockIndexShard); + + // This mirrors the creation of the ReplicationCheckpoint inside CopyState + testCheckpoint = new ReplicationCheckpoint( + testShardId, + mockIndexShard.getOperationPrimaryTerm(), + 0L, + mockIndexShard.getProcessedLocalCheckpoint(), + 0L + ); + testThreadPool = new TestThreadPool("test", Settings.EMPTY); + transport = new CapturingTransport(); + localNode = new DiscoveryNode("local", buildNewFakeTransportAddress(), Version.CURRENT); + transportService = transport.createTransportService( + Settings.EMPTY, + testThreadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + boundAddress -> localNode, + null, + Collections.emptySet() + ); + transportService.start(); + transportService.acceptIncomingRequests(); + segmentReplicationSourceService = new SegmentReplicationSourceService(transportService, mockIndicesService); + } + + @Override + public void tearDown() throws Exception { + ThreadPool.terminate(testThreadPool, 30, TimeUnit.SECONDS); + testThreadPool = null; + super.tearDown(); + } + + public void testGetSegmentFiles_EmptyResponse() { + final GetSegmentFilesRequest request = new GetSegmentFilesRequest( + 1, + "allocationId", + localNode, + Collections.emptyList(), + testCheckpoint + ); + transportService.sendRequest( + localNode, + SegmentReplicationSourceService.Actions.GET_SEGMENT_FILES, + request, + new TransportResponseHandler() { + @Override + public void handleResponse(GetSegmentFilesResponse response) { + assertEquals(0, response.files.size()); + } + + @Override + public void handleException(TransportException e) { + fail("unexpected exception: " + e); + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + + @Override + public GetSegmentFilesResponse read(StreamInput in) throws IOException { + return new GetSegmentFilesResponse(in); + } + } + ); + } + + public void testCheckpointInfo() { + final CheckpointInfoRequest request = new CheckpointInfoRequest(1L, "testAllocationId", localNode, testCheckpoint); + transportService.sendRequest( + localNode, + SegmentReplicationSourceService.Actions.GET_CHECKPOINT_INFO, + request, + new TransportResponseHandler() { + @Override + public void handleResponse(CheckpointInfoResponse response) { + assertEquals(testCheckpoint, response.getCheckpoint()); + assertNotNull(response.getInfosBytes()); + // CopyStateTests sets up one pending delete file and one committed segments file + assertEquals(1, response.getPendingDeleteFiles().size()); + assertEquals(1, response.getSnapshot().size()); + } + + @Override + public void handleException(TransportException e) { + fail("unexpected exception: " + e); + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + + @Override + public CheckpointInfoResponse read(StreamInput in) throws IOException { + return new CheckpointInfoResponse(in); + } + } + ); + } + +} diff --git a/server/src/test/java/org/opensearch/indices/replication/common/CopyStateTests.java b/server/src/test/java/org/opensearch/indices/replication/common/CopyStateTests.java new file mode 100644 index 0000000000000..afa38afb0cf2f --- /dev/null +++ b/server/src/test/java/org/opensearch/indices/replication/common/CopyStateTests.java @@ -0,0 +1,80 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication.common; + +import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.index.IndexFileNames; +import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.util.Version; +import org.opensearch.common.collect.Map; +import org.opensearch.common.concurrent.GatedCloseable; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.IndexShardTestCase; +import org.opensearch.index.shard.ShardId; +import org.opensearch.index.store.Store; +import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; + +import java.io.IOException; +import java.util.Set; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class CopyStateTests extends IndexShardTestCase { + + private static final long EXPECTED_LONG_VALUE = 1L; + private static final ShardId TEST_SHARD_ID = new ShardId("testIndex", "testUUID", 0); + private static final StoreFileMetadata SEGMENTS_FILE = new StoreFileMetadata(IndexFileNames.SEGMENTS, 1L, "0", Version.LATEST); + private static final StoreFileMetadata PENDING_DELETE_FILE = new StoreFileMetadata("pendingDelete.del", 1L, "1", Version.LATEST); + + private static final Store.MetadataSnapshot COMMIT_SNAPSHOT = new Store.MetadataSnapshot( + Map.of(SEGMENTS_FILE.name(), SEGMENTS_FILE, PENDING_DELETE_FILE.name(), PENDING_DELETE_FILE), + null, + 0 + ); + + private static final Store.MetadataSnapshot SI_SNAPSHOT = new Store.MetadataSnapshot( + Map.of(SEGMENTS_FILE.name(), SEGMENTS_FILE), + null, + 0 + ); + + public void testCopyStateCreation() throws IOException { + CopyState copyState = new CopyState(createMockIndexShard()); + ReplicationCheckpoint checkpoint = copyState.getCheckpoint(); + assertEquals(TEST_SHARD_ID, checkpoint.getShardId()); + // version was never set so this should be zero + assertEquals(0, checkpoint.getSegmentInfosVersion()); + assertEquals(EXPECTED_LONG_VALUE, checkpoint.getPrimaryTerm()); + + Set pendingDeleteFiles = copyState.getPendingDeleteFiles(); + assertEquals(1, pendingDeleteFiles.size()); + assertTrue(pendingDeleteFiles.contains(PENDING_DELETE_FILE)); + } + + public static IndexShard createMockIndexShard() throws IOException { + IndexShard mockShard = mock(IndexShard.class); + when(mockShard.shardId()).thenReturn(TEST_SHARD_ID); + when(mockShard.getOperationPrimaryTerm()).thenReturn(EXPECTED_LONG_VALUE); + when(mockShard.getProcessedLocalCheckpoint()).thenReturn(EXPECTED_LONG_VALUE); + + Store mockStore = mock(Store.class); + when(mockShard.store()).thenReturn(mockStore); + + SegmentInfos testSegmentInfos = new SegmentInfos(Version.LATEST.major); + when(mockShard.getSegmentInfosSnapshot()).thenReturn(new GatedCloseable<>(testSegmentInfos, () -> {})); + when(mockStore.getMetadata(testSegmentInfos)).thenReturn(SI_SNAPSHOT); + + IndexCommit mockIndexCommit = mock(IndexCommit.class); + when(mockShard.acquireLastIndexCommit(false)).thenReturn(new GatedCloseable<>(mockIndexCommit, () -> {})); + when(mockStore.getMetadata(mockIndexCommit)).thenReturn(COMMIT_SNAPSHOT); + return mockShard; + } +} From bd5a004f01d8f946c17485c350d7725f144fc9e6 Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Fri, 3 Jun 2022 10:54:40 -0700 Subject: [PATCH 44/75] [Dependency upgrade] google-oauth-client to 1.33.3 (#3500) Signed-off-by: Suraj Singh --- plugins/repository-gcs/build.gradle | 2 +- .../repository-gcs/licenses/google-oauth-client-1.33.1.jar.sha1 | 1 - .../repository-gcs/licenses/google-oauth-client-1.33.3.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-gcs/licenses/google-oauth-client-1.33.1.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/google-oauth-client-1.33.3.jar.sha1 diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 92ddc69c89f47..0e1c2125f5d81 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -73,7 +73,7 @@ dependencies { api 'com.google.cloud:google-cloud-core-http:1.93.3' api "com.google.auth:google-auth-library-credentials:${versions.google_auth}" api "com.google.auth:google-auth-library-oauth2-http:${versions.google_auth}" - api 'com.google.oauth-client:google-oauth-client:1.33.1' + api 'com.google.oauth-client:google-oauth-client:1.33.3' api 'com.google.api-client:google-api-client:1.34.0' api 'com.google.http-client:google-http-client-appengine:1.41.8' api 'com.google.http-client:google-http-client-jackson2:1.35.0' diff --git a/plugins/repository-gcs/licenses/google-oauth-client-1.33.1.jar.sha1 b/plugins/repository-gcs/licenses/google-oauth-client-1.33.1.jar.sha1 deleted file mode 100644 index 3897a85310ec6..0000000000000 --- a/plugins/repository-gcs/licenses/google-oauth-client-1.33.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -0a431f1a677c5f89507591ab47a7ccdb0b18b6f7 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-oauth-client-1.33.3.jar.sha1 b/plugins/repository-gcs/licenses/google-oauth-client-1.33.3.jar.sha1 new file mode 100644 index 0000000000000..f2afaa1bc2dba --- /dev/null +++ b/plugins/repository-gcs/licenses/google-oauth-client-1.33.3.jar.sha1 @@ -0,0 +1 @@ +9d445a8649b0de731922b9a3ebf1552b5403611d \ No newline at end of file From 0b4a4f4812d30e88583c879206f1418aaa6afc59 Mon Sep 17 00:00:00 2001 From: Cole White <42356806+shdubsh@users.noreply.github.com> Date: Fri, 3 Jun 2022 12:24:46 -0600 Subject: [PATCH 45/75] move bash flag to set statement (#3494) Passing bash with flags to the first argument of /usr/bin/env requires its own flag to interpret it correctly. Rather than use `env -S` to split the argument, have the script `set -e` to enable the same behavior explicitly in preinst and postinst scripts. Also set `-o pipefail` for consistency. Closes: #3492 Signed-off-by: Cole White --- buildSrc/src/main/resources/deb/postinst.ftl | 3 ++- buildSrc/src/main/resources/deb/preinst.ftl | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/buildSrc/src/main/resources/deb/postinst.ftl b/buildSrc/src/main/resources/deb/postinst.ftl index 605f620e16444..1fe98263a0fdf 100644 --- a/buildSrc/src/main/resources/deb/postinst.ftl +++ b/buildSrc/src/main/resources/deb/postinst.ftl @@ -1,2 +1,3 @@ -#!/usr/bin/env bash -e +#!/usr/bin/env bash +set -e -o pipefail <% commands.each {command -> %><%= command %><% } %> diff --git a/buildSrc/src/main/resources/deb/preinst.ftl b/buildSrc/src/main/resources/deb/preinst.ftl index 605f620e16444..1fe98263a0fdf 100644 --- a/buildSrc/src/main/resources/deb/preinst.ftl +++ b/buildSrc/src/main/resources/deb/preinst.ftl @@ -1,2 +1,3 @@ -#!/usr/bin/env bash -e +#!/usr/bin/env bash +set -e -o pipefail <% commands.each {command -> %><%= command %><% } %> From 01c01ef445313a99a99358d4f9f6ced93494fe87 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Mon, 6 Jun 2022 13:36:34 -0400 Subject: [PATCH 46/75] Support use of IRSA for repository-s3 plugin credentials: added YAML Rest test case (#3499) Signed-off-by: Andriy Redko --- plugins/repository-s3/build.gradle | 69 ++++- .../opensearch/repositories/s3/S3Service.java | 27 +- .../60_repository_eks_credentials.yml | 268 ++++++++++++++++++ test/fixtures/s3-fixture/Dockerfile.eks | 25 ++ test/fixtures/s3-fixture/docker-compose.yml | 17 ++ .../java/fixture/s3/S3HttpFixtureWithEKS.java | 103 +++++++ 6 files changed, 496 insertions(+), 13 deletions(-) create mode 100644 plugins/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/60_repository_eks_credentials.yml create mode 100644 test/fixtures/s3-fixture/Dockerfile.eks create mode 100644 test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithEKS.java diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index 54a2593f4c6f4..ff6e2148fab37 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -131,6 +131,9 @@ String s3EC2BasePath = System.getenv("amazon_s3_base_path_ec2") String s3ECSBucket = System.getenv("amazon_s3_bucket_ecs") String s3ECSBasePath = System.getenv("amazon_s3_base_path_ecs") +String s3EKSBucket = System.getenv("amazon_s3_bucket_eks") +String s3EKSBasePath = System.getenv("amazon_s3_base_path_eks") + boolean s3DisableChunkedEncoding = (new Random(Long.parseUnsignedLong(BuildParams.testSeed.tokenize(':').get(0), 16))).nextBoolean() // If all these variables are missing then we are testing against the internal fixture instead, which has the following @@ -160,13 +163,15 @@ if (!s3TemporaryAccessKey && !s3TemporarySecretKey && !s3TemporaryBucket && !s3T throw new IllegalArgumentException("not all options specified to run against external S3 service as temporary credentials are present") } -if (!s3EC2Bucket && !s3EC2BasePath && !s3ECSBucket && !s3ECSBasePath) { +if (!s3EC2Bucket && !s3EC2BasePath && !s3ECSBucket && !s3ECSBasePath && !s3EKSBucket && !s3EKSBasePath) { s3EC2Bucket = 'ec2_bucket' s3EC2BasePath = 'ec2_base_path' s3ECSBucket = 'ecs_bucket' s3ECSBasePath = 'ecs_base_path' -} else if (!s3EC2Bucket || !s3EC2BasePath || !s3ECSBucket || !s3ECSBasePath) { - throw new IllegalArgumentException("not all options specified to run EC2/ECS tests are present") + s3EKSBucket = 'eks_bucket' + s3EKSBasePath = 'eks_base_path' +} else if (!s3EC2Bucket || !s3EC2BasePath || !s3ECSBucket || !s3ECSBasePath || !s3EKSBucket || !s3EKSBasePath) { + throw new IllegalArgumentException("not all options specified to run EC2/ECS/EKS tests are present") } processYamlRestTestResources { @@ -179,7 +184,9 @@ processYamlRestTestResources { 'ec2_base_path': s3EC2BasePath, 'ecs_bucket': s3ECSBucket, 'ecs_base_path': s3ECSBasePath, - 'disable_chunked_encoding': s3DisableChunkedEncoding, + 'eks_bucket': s3EKSBucket, + 'eks_base_path': s3EKSBasePath, + 'disable_chunked_encoding': s3DisableChunkedEncoding ] inputs.properties(expansions) MavenFilteringHack.filter(it, expansions) @@ -198,7 +205,8 @@ yamlRestTest { [ 'repository_s3/30_repository_temporary_credentials/*', 'repository_s3/40_repository_ec2_credentials/*', - 'repository_s3/50_repository_ecs_credentials/*' + 'repository_s3/50_repository_ecs_credentials/*', + 'repository_s3/60_repository_eks_credentials/*' ] ).join(",") } @@ -215,6 +223,7 @@ testClusters.yamlRestTest { testFixtures.useFixture(':test:fixtures:s3-fixture', 's3-fixture') testFixtures.useFixture(':test:fixtures:s3-fixture', 's3-fixture-with-session-token') testFixtures.useFixture(':test:fixtures:s3-fixture', 's3-fixture-with-ec2') + testFixtures.useFixture(':test:fixtures:s3-fixture', 's3-fixture-with-eks') normalization { runtimeClasspath { @@ -223,12 +232,21 @@ testClusters.yamlRestTest { } } + keystore 's3.client.integration_test_eks.role_arn', "arn:aws:iam::000000000000:role/test" + keystore 's3.client.integration_test_eks.role_session_name', "s3-test" + keystore 's3.client.integration_test_eks.access_key', "access_key" + keystore 's3.client.integration_test_eks.secret_key', "secret_key" + setting 's3.client.integration_test_permanent.endpoint', { "${-> fixtureAddress('s3-fixture', 's3-fixture', '80')}" }, IGNORE_VALUE setting 's3.client.integration_test_temporary.endpoint', { "${-> fixtureAddress('s3-fixture', 's3-fixture-with-session-token', '80')}" }, IGNORE_VALUE setting 's3.client.integration_test_ec2.endpoint', { "${-> fixtureAddress('s3-fixture', 's3-fixture-with-ec2', '80')}" }, IGNORE_VALUE + setting 's3.client.integration_test_eks.endpoint', { "${-> fixtureAddress('s3-fixture', 's3-fixture-with-eks', '80')}" }, IGNORE_VALUE + setting 's3.client.integration_test_eks.region', { "us-east-2" }, IGNORE_VALUE // to redirect InstanceProfileCredentialsProvider to custom auth point systemProperty "com.amazonaws.sdk.ec2MetadataServiceEndpointOverride", { "${-> fixtureAddress('s3-fixture', 's3-fixture-with-ec2', '80')}" }, IGNORE_VALUE + // to redirect AWSSecurityTokenServiceClient to custom auth point + systemProperty "com.amazonaws.sdk.stsEndpointOverride", { "${-> fixtureAddress('s3-fixture', 's3-fixture-with-eks', '80')}/eks_credentials_endpoint" }, IGNORE_VALUE } else { println "Using an external service to test the repository-s3 plugin" } @@ -250,7 +268,8 @@ if (useFixture) { systemProperty 'tests.rest.denylist', [ 'repository_s3/30_repository_temporary_credentials/*', 'repository_s3/40_repository_ec2_credentials/*', - 'repository_s3/50_repository_ecs_credentials/*' + 'repository_s3/50_repository_ecs_credentials/*', + 'repository_s3/60_repository_eks_credentials/*' ].join(",") } check.dependsOn(yamlRestTestMinio) @@ -277,7 +296,8 @@ if (useFixture) { 'repository_s3/10_basic/*', 'repository_s3/20_repository_permanent_credentials/*', 'repository_s3/30_repository_temporary_credentials/*', - 'repository_s3/40_repository_ec2_credentials/*' + 'repository_s3/40_repository_ec2_credentials/*', + 'repository_s3/60_repository_eks_credentials/*' ].join(",") } check.dependsOn(yamlRestTestECS) @@ -289,6 +309,41 @@ if (useFixture) { } } +// EKS +if (useFixture) { + testFixtures.useFixture(':test:fixtures:s3-fixture', 's3-fixture-with-eks') + task yamlRestTestEKS(type: RestIntegTestTask.class) { + description = "Runs tests using the EKS repository." + dependsOn('bundlePlugin') + SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); + SourceSet yamlRestTestSourceSet = sourceSets.getByName(YamlRestTestPlugin.SOURCE_SET_NAME) + setTestClassesDirs(yamlRestTestSourceSet.getOutput().getClassesDirs()) + setClasspath(yamlRestTestSourceSet.getRuntimeClasspath()) + systemProperty 'tests.rest.denylist', [ + 'repository_s3/10_basic/*', + 'repository_s3/20_repository_permanent_credentials/*', + 'repository_s3/30_repository_temporary_credentials/*', + 'repository_s3/40_repository_ec2_credentials/*', + 'repository_s3/50_repository_ecs_credentials/*' + ].join(",") + } + check.dependsOn(yamlRestTestEKS) + + testClusters.yamlRestTestEKS { + keystore 's3.client.integration_test_eks.role_arn', "arn:aws:iam::000000000000:role/test" + keystore 's3.client.integration_test_eks.role_session_name', "s3-test" + keystore 's3.client.integration_test_eks.access_key', "access_key" + keystore 's3.client.integration_test_eks.secret_key', "secret_key" + + setting 's3.client.integration_test_eks.endpoint', { "${-> fixtureAddress('s3-fixture', 's3-fixture-with-eks', '80')}" }, IGNORE_VALUE + setting 's3.client.integration_test_eks.region', { "us-east-2" }, IGNORE_VALUE + plugin tasks.bundlePlugin.archiveFile + + // to redirect AWSSecurityTokenServiceClient to custom auth point + systemProperty "com.amazonaws.sdk.stsEndpointOverride", { "${-> fixtureAddress('s3-fixture', 's3-fixture-with-eks', '80')}/eks_credentials_endpoint" }, IGNORE_VALUE + } +} + // 3rd Party Tests TaskProvider s3ThirdPartyTest = tasks.register("s3ThirdPartyTest", Test) { SourceSetContainer sourceSets = project.getExtensions().getByType(SourceSetContainer.class); diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java index 6919549874445..18bb62944dede 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3Service.java @@ -41,6 +41,7 @@ import com.amazonaws.auth.STSAssumeRoleSessionCredentialsProvider; import com.amazonaws.auth.STSAssumeRoleWithWebIdentitySessionCredentialsProvider; import com.amazonaws.client.builder.AwsClientBuilder; +import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration; import com.amazonaws.http.IdleConnectionReaper; import com.amazonaws.http.SystemPropertyTlsKeyManagersProvider; import com.amazonaws.http.conn.ssl.SdkTLSSocketFactory; @@ -82,6 +83,8 @@ class S3Service implements Closeable { private static final Logger logger = LogManager.getLogger(S3Service.class); + private static final String STS_ENDPOINT_OVERRIDE_SYSTEM_PROPERTY = "com.amazonaws.sdk.stsEndpointOverride"; + private volatile Map clientsCache = emptyMap(); /** @@ -280,13 +283,25 @@ static AWSCredentialsProvider buildCredentials(Logger logger, S3ClientSettings c AWSSecurityTokenService securityTokenService = null; final String region = Strings.hasLength(clientSettings.region) ? clientSettings.region : null; + if (region != null || basicCredentials != null) { - securityTokenService = SocketAccess.doPrivileged( - () -> AWSSecurityTokenServiceClientBuilder.standard() - .withCredentials((basicCredentials != null) ? new AWSStaticCredentialsProvider(basicCredentials) : null) - .withRegion(region) - .build() - ); + securityTokenService = SocketAccess.doPrivileged(() -> { + AWSSecurityTokenServiceClientBuilder builder = AWSSecurityTokenServiceClientBuilder.standard(); + + // Use similar approach to override STS endpoint as SDKGlobalConfiguration.EC2_METADATA_SERVICE_OVERRIDE_SYSTEM_PROPERTY + final String stsEndpoint = System.getProperty(STS_ENDPOINT_OVERRIDE_SYSTEM_PROPERTY); + if (region != null && stsEndpoint != null) { + builder = builder.withEndpointConfiguration(new EndpointConfiguration(stsEndpoint, region)); + } else { + builder = builder.withRegion(region); + } + + if (basicCredentials != null) { + builder = builder.withCredentials(new AWSStaticCredentialsProvider(basicCredentials)); + } + + return builder.build(); + }); } if (irsaCredentials.getIdentityTokenFile() == null) { diff --git a/plugins/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/60_repository_eks_credentials.yml b/plugins/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/60_repository_eks_credentials.yml new file mode 100644 index 0000000000000..15f2c9612a2ba --- /dev/null +++ b/plugins/repository-s3/src/yamlRestTest/resources/rest-api-spec/test/repository_s3/60_repository_eks_credentials.yml @@ -0,0 +1,268 @@ +# Integration tests for repository-s3 + +--- +setup: + + # Register repository with eks credentials + - do: + snapshot.create_repository: + repository: repository_eks + body: + type: s3 + settings: + bucket: ${eks_bucket} + client: integration_test_eks + base_path: "${eks_base_path}" + canned_acl: private + storage_class: standard + disable_chunked_encoding: ${disable_chunked_encoding} + +--- +"Snapshot and Restore with repository-s3 using eks credentials": + + # Get repository + - do: + snapshot.get_repository: + repository: repository_eks + + - match: { repository_eks.settings.bucket : ${eks_bucket} } + - match: { repository_eks.settings.client : "integration_test_eks" } + - match: { repository_eks.settings.base_path : "${eks_base_path}" } + - match: { repository_eks.settings.canned_acl : "private" } + - match: { repository_eks.settings.storage_class : "standard" } + - is_false: repository_eks.settings.access_key + - is_false: repository_eks.settings.secret_key + - is_false: repository_eks.settings.session_token + - is_false: repository_eks.settings.role_arn + - is_false: repository_eks.settings.role_session_name + - is_false: repository_eks.settings.identity_token_file + + # Index documents + - do: + bulk: + refresh: true + body: + - index: + _index: docs + _id: 1 + - snapshot: one + - index: + _index: docs + _id: 2 + - snapshot: one + - index: + _index: docs + _id: 3 + - snapshot: one + + - do: + count: + index: docs + + - match: {count: 3} + + # Create a first snapshot + - do: + snapshot.create: + repository: repository_eks + snapshot: snapshot-one + wait_for_completion: true + + - match: { snapshot.snapshot: snapshot-one } + - match: { snapshot.state : SUCCESS } + - match: { snapshot.include_global_state: true } + - match: { snapshot.shards.failed : 0 } + + - do: + snapshot.status: + repository: repository_eks + snapshot: snapshot-one + + - is_true: snapshots + - match: { snapshots.0.snapshot: snapshot-one } + - match: { snapshots.0.state : SUCCESS } + + # Index more documents + - do: + bulk: + refresh: true + body: + - index: + _index: docs + _id: 4 + - snapshot: two + - index: + _index: docs + _id: 5 + - snapshot: two + - index: + _index: docs + _id: 6 + - snapshot: two + - index: + _index: docs + _id: 7 + - snapshot: two + + - do: + count: + index: docs + + - match: {count: 7} + + # Create a second snapshot + - do: + snapshot.create: + repository: repository_eks + snapshot: snapshot-two + wait_for_completion: true + + - match: { snapshot.snapshot: snapshot-two } + - match: { snapshot.state : SUCCESS } + - match: { snapshot.shards.failed : 0 } + + - do: + snapshot.get: + repository: repository_eks + snapshot: snapshot-one,snapshot-two + + - is_true: snapshots + - match: { snapshots.0.state : SUCCESS } + - match: { snapshots.1.state : SUCCESS } + + # Delete the index + - do: + indices.delete: + index: docs + + # Restore the second snapshot + - do: + snapshot.restore: + repository: repository_eks + snapshot: snapshot-two + wait_for_completion: true + + - do: + count: + index: docs + + - match: {count: 7} + + # Delete the index again + - do: + indices.delete: + index: docs + + # Restore the first snapshot + - do: + snapshot.restore: + repository: repository_eks + snapshot: snapshot-one + wait_for_completion: true + + - do: + count: + index: docs + + - match: {count: 3} + + # Remove the snapshots + - do: + snapshot.delete: + repository: repository_eks + snapshot: snapshot-two + + - do: + snapshot.delete: + repository: repository_eks + snapshot: snapshot-one + +--- +"Register a repository with a non existing bucket": + + - do: + catch: /repository_verification_exception/ + snapshot.create_repository: + repository: repository_eks + body: + type: s3 + settings: + bucket: zHHkfSqlbnBsbpSgvCYtxrEfFLqghXtyPvvvKPNBnRCicNHQLE + client: integration_test_eks + +--- +"Register a repository with a non existing client": + + - do: + catch: /illegal_argument_exception/ + snapshot.create_repository: + repository: repository_eks + body: + type: s3 + settings: + bucket: repository_eks + client: unknown + +--- +"Register a read-only repository with a non existing bucket": + +- do: + catch: /repository_verification_exception/ + snapshot.create_repository: + repository: repository_eks + body: + type: s3 + settings: + readonly: true + bucket: zHHkfSqlbnBsbpSgvCYtxrEfFLqghXtyPvvvKPNBnRCicNHQLE + client: integration_test_eks + +--- +"Register a read-only repository with a non existing client": + +- do: + catch: /illegal_argument_exception/ + snapshot.create_repository: + repository: repository_eks + body: + type: s3 + settings: + readonly: true + bucket: repository_eks + client: unknown + +--- +"Get a non existing snapshot": + + - do: + catch: /snapshot_missing_exception/ + snapshot.get: + repository: repository_eks + snapshot: missing + +--- +"Delete a non existing snapshot": + + - do: + catch: /snapshot_missing_exception/ + snapshot.delete: + repository: repository_eks + snapshot: missing + +--- +"Restore a non existing snapshot": + + - do: + catch: /snapshot_restore_exception/ + snapshot.restore: + repository: repository_eks + snapshot: missing + wait_for_completion: true + +--- +teardown: + + # Remove our repository + - do: + snapshot.delete_repository: + repository: repository_eks diff --git a/test/fixtures/s3-fixture/Dockerfile.eks b/test/fixtures/s3-fixture/Dockerfile.eks new file mode 100644 index 0000000000000..d03960472a6a8 --- /dev/null +++ b/test/fixtures/s3-fixture/Dockerfile.eks @@ -0,0 +1,25 @@ +FROM ubuntu:18.04 + +RUN apt-get update -qqy +RUN apt-get install -qqy openjdk-11-jre-headless + +ARG fixtureClass +ARG port +ARG bucket +ARG basePath +ARG accessKey +ARG roleArn +ARG roleSessionName + +ENV S3_FIXTURE_CLASS=${fixtureClass} +ENV S3_FIXTURE_PORT=${port} +ENV S3_FIXTURE_BUCKET=${bucket} +ENV S3_FIXTURE_BASE_PATH=${basePath} +ENV S3_FIXTURE_ACCESS_KEY=${accessKey} +ENV S3_FIXTURE_ROLE_ARN=${roleArn} +ENV S3_FIXTURE_ROLE_SESSION_NAME=${roleSessionName} + +ENTRYPOINT exec java -classpath "/fixture/shared/*" \ + $S3_FIXTURE_CLASS 0.0.0.0 "$S3_FIXTURE_PORT" "$S3_FIXTURE_BUCKET" "$S3_FIXTURE_BASE_PATH" "$S3_FIXTURE_ACCESS_KEY" "$S3_FIXTURE_ROLE_ARN" "$S3_FIXTURE_ROLE_SESSION_NAME" + +EXPOSE $port diff --git a/test/fixtures/s3-fixture/docker-compose.yml b/test/fixtures/s3-fixture/docker-compose.yml index 22d101f41c318..d2b44f13c9530 100644 --- a/test/fixtures/s3-fixture/docker-compose.yml +++ b/test/fixtures/s3-fixture/docker-compose.yml @@ -92,3 +92,20 @@ services: - ./testfixtures_shared/shared:/fixture/shared ports: - "80" + + s3-fixture-with-eks: + build: + context: . + args: + fixtureClass: fixture.s3.S3HttpFixtureWithEKS + port: 80 + bucket: "eks_bucket" + basePath: "eks_base_path" + accessKey: "eks_access_key" + roleArn: "eks_role_arn" + roleSessionName: "eks_role_session_name" + dockerfile: Dockerfile.eks + volumes: + - ./testfixtures_shared/shared:/fixture/shared + ports: + - "80" diff --git a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithEKS.java b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithEKS.java new file mode 100644 index 0000000000000..b26c82a3cb7d4 --- /dev/null +++ b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpFixtureWithEKS.java @@ -0,0 +1,103 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package fixture.s3; + +import com.sun.net.httpserver.HttpHandler; +import org.opensearch.rest.RestStatus; + +import java.nio.charset.StandardCharsets; +import java.time.LocalDateTime; +import java.time.ZoneId; +import java.time.format.DateTimeFormatter; +import java.util.Objects; + +public class S3HttpFixtureWithEKS extends S3HttpFixture { + + private S3HttpFixtureWithEKS(final String[] args) throws Exception { + super(args); + } + + @Override + protected HttpHandler createHandler(final String[] args) { + final String accessKey = Objects.requireNonNull(args[4]); + final String eksRoleArn = Objects.requireNonNull(args[5]); + final HttpHandler delegate = super.createHandler(args); + + return exchange -> { + // https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html + if ("POST".equals(exchange.getRequestMethod()) && exchange.getRequestURI().getPath().startsWith("/eks_credentials_endpoint")) { + final byte[] response = buildCredentialResponse(eksRoleArn, accessKey).getBytes(StandardCharsets.UTF_8); + exchange.getResponseHeaders().add("Content-Type", "application/xml"); + exchange.sendResponseHeaders(RestStatus.OK.getStatus(), response.length); + exchange.getResponseBody().write(response); + exchange.close(); + return; + } + + delegate.handle(exchange); + }; + } + + protected String buildCredentialResponse(final String roleArn, final String accessKey) { + // See please: https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html + return "\n" + + " \n" + + " amzn1.account.AF6RHO7KZU5XRVQJGXK6HB56KR2A\n" + + " client.5498841531868486423.1548@apps.example.com\n" + + " \n" + + " " + roleArn + "\n" + + " AROACLKWSDQRAOEXAMPLE:s3\n" + + " \n" + + " \n" + + " AQoDYXdzEE0a8ANXXXXXXXXNO1ewxE5TijQyp+IEXAMPLE\n" + + " wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY\n" + + " " + LocalDateTime.now().plusMonths(1).atZone(ZoneId.of("UTC")).format(DateTimeFormatter.ISO_OFFSET_DATE_TIME) + "\n" + + " " + accessKey + "\n" + + " \n" + + " SourceIdentityValue\n" + + " www.amazon.com\n" + + " \n" + + " \n" + + " ad4156e9-bce1-11e2-82e6-6b6efEXAMPLE\n" + + " \n" + + ""; + } + + public static void main(final String[] args) throws Exception { + if (args == null || args.length < 6) { + throw new IllegalArgumentException("S3HttpFixtureWithEKS expects 6 arguments " + + "[address, port, bucket, base path, role arn, role session name]"); + } + final S3HttpFixtureWithEKS fixture = new S3HttpFixtureWithEKS(args); + fixture.start(); + } +} From e8d7cab5698c96e2309398a49bd22d9238e07bb0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Jun 2022 13:40:04 -0400 Subject: [PATCH 47/75] Bump azure-storage-common from 12.15.0 to 12.16.0 in /plugins/repository-azure (#3517) * Bump azure-storage-common in /plugins/repository-azure Bumps [azure-storage-common](https://github.com/Azure/azure-sdk-for-java) from 12.15.0 to 12.16.0. - [Release notes](https://github.com/Azure/azure-sdk-for-java/releases) - [Commits](https://github.com/Azure/azure-sdk-for-java/compare/azure-storage-blob_12.15.0...azure-storage-blob_12.16.0) --- updated-dependencies: - dependency-name: com.azure:azure-storage-common dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- plugins/repository-azure/build.gradle | 2 +- .../licenses/azure-storage-common-12.15.0.jar.sha1 | 1 - .../licenses/azure-storage-common-12.16.0.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-azure/licenses/azure-storage-common-12.15.0.jar.sha1 create mode 100644 plugins/repository-azure/licenses/azure-storage-common-12.16.0.jar.sha1 diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index dd2ad78ebed04..227d7d1b68977 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -45,7 +45,7 @@ opensearchplugin { dependencies { api 'com.azure:azure-core:1.27.0' - api 'com.azure:azure-storage-common:12.15.0' + api 'com.azure:azure-storage-common:12.16.0' api 'com.azure:azure-core-http-netty:1.12.0' api "io.netty:netty-codec-dns:${versions.netty}" api "io.netty:netty-codec-socks:${versions.netty}" diff --git a/plugins/repository-azure/licenses/azure-storage-common-12.15.0.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-common-12.15.0.jar.sha1 deleted file mode 100644 index 1f3adfc161c7f..0000000000000 --- a/plugins/repository-azure/licenses/azure-storage-common-12.15.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4d63ce8bbd20379c5e5262b1204ceac7b31a7743 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-storage-common-12.16.0.jar.sha1 b/plugins/repository-azure/licenses/azure-storage-common-12.16.0.jar.sha1 new file mode 100644 index 0000000000000..ebf328aa69ee8 --- /dev/null +++ b/plugins/repository-azure/licenses/azure-storage-common-12.16.0.jar.sha1 @@ -0,0 +1 @@ +9f652b89a30269bdff6644468632726d4ba4fbd1 \ No newline at end of file From 1b5f99320d612ca4c6e8a92b31d1a7e507e0161e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Jun 2022 13:54:56 -0400 Subject: [PATCH 48/75] Bump google-oauth-client from 1.33.3 to 1.34.0 in /plugins/discovery-gce (#3516) * Bump google-oauth-client from 1.33.3 to 1.34.0 in /plugins/discovery-gce Bumps [google-oauth-client](https://github.com/googleapis/google-oauth-java-client) from 1.33.3 to 1.34.0. - [Release notes](https://github.com/googleapis/google-oauth-java-client/releases) - [Changelog](https://github.com/googleapis/google-oauth-java-client/blob/main/CHANGELOG.md) - [Commits](https://github.com/googleapis/google-oauth-java-client/compare/v1.33.3...v1.34.0) --- updated-dependencies: - dependency-name: com.google.oauth-client:google-oauth-client dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- plugins/discovery-gce/build.gradle | 2 +- .../discovery-gce/licenses/google-oauth-client-1.33.3.jar.sha1 | 1 - .../discovery-gce/licenses/google-oauth-client-1.34.0.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/discovery-gce/licenses/google-oauth-client-1.33.3.jar.sha1 create mode 100644 plugins/discovery-gce/licenses/google-oauth-client-1.34.0.jar.sha1 diff --git a/plugins/discovery-gce/build.gradle b/plugins/discovery-gce/build.gradle index beae0d84685a4..983a2907e4e67 100644 --- a/plugins/discovery-gce/build.gradle +++ b/plugins/discovery-gce/build.gradle @@ -24,7 +24,7 @@ versions << [ dependencies { api "com.google.apis:google-api-services-compute:v1-rev160-${versions.google}" api "com.google.api-client:google-api-client:${versions.google}" - api "com.google.oauth-client:google-oauth-client:1.33.3" + api "com.google.oauth-client:google-oauth-client:1.34.0" api "com.google.http-client:google-http-client:${versions.google}" api "com.google.http-client:google-http-client-jackson2:${versions.google}" api 'com.google.code.findbugs:jsr305:3.0.2' diff --git a/plugins/discovery-gce/licenses/google-oauth-client-1.33.3.jar.sha1 b/plugins/discovery-gce/licenses/google-oauth-client-1.33.3.jar.sha1 deleted file mode 100644 index f2afaa1bc2dba..0000000000000 --- a/plugins/discovery-gce/licenses/google-oauth-client-1.33.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9d445a8649b0de731922b9a3ebf1552b5403611d \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-oauth-client-1.34.0.jar.sha1 b/plugins/discovery-gce/licenses/google-oauth-client-1.34.0.jar.sha1 new file mode 100644 index 0000000000000..57c5c16b34deb --- /dev/null +++ b/plugins/discovery-gce/licenses/google-oauth-client-1.34.0.jar.sha1 @@ -0,0 +1 @@ +a0dc471bd498c62280120037a42d410c0e36f5d6 \ No newline at end of file From 74eda2ab989a62a8879b9cb224802bc2b24cf8b8 Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Mon, 6 Jun 2022 13:41:56 -0700 Subject: [PATCH 49/75] Fix the support of RestClient Node Sniffer for version 2.x and update tests (#3487) Fix the support of RestClient Node Sniffer for OpenSearch 2.x, and update unit tests for OpenSearch. The current code contains the logic to be compatible with Elasticsearch 2.x version, which is conflict with OpenSearch 2.x, so removed that part of legacy code. * Update the script create_test_nodes_info.bash to dump the response of Nodes Info API GET _nodes/http for OpenSearch 1.0 and 2.0 version, which used for unit test. * Remove the support of Elasticsearch version 2.x for the Sniffer * Update unit test to validate the Sniffer compatible with OpenSearch 1.x and 2.x * Update the API response parser to meet the array notation (in ES 6.1 and above) for the node attributes setting. It will result the value of `node.attr` setting will not be parsed as array in the Sniffer, when using the Sniffer on cluster in Elasticsearch 6.0 and above. * Replace "master" node role with "cluster_manager" in unit test Signed-off-by: Tianli Feng --- .../client/sniff/OpenSearchNodesSniffer.java | 70 +----- .../OpenSearchNodesSnifferParseTests.java | 92 +++----- .../sniff/OpenSearchNodesSnifferTests.java | 16 +- ..._nodes_http.json => 1.0.0_nodes_http.json} | 79 ++++--- .../src/test/resources/2.0.0_nodes_http.json | 144 +++++++----- .../src/test/resources/5.0.0_nodes_http.json | 217 ------------------ .../src/test/resources/6.0.0_nodes_http.json | 217 ------------------ .../resources/create_test_nodes_info.bash | 56 +++-- 8 files changed, 191 insertions(+), 700 deletions(-) rename client/sniffer/src/test/resources/{7.3.0_nodes_http.json => 1.0.0_nodes_http.json} (77%) delete mode 100644 client/sniffer/src/test/resources/5.0.0_nodes_http.json delete mode 100644 client/sniffer/src/test/resources/6.0.0_nodes_http.json diff --git a/client/sniffer/src/main/java/org/opensearch/client/sniff/OpenSearchNodesSniffer.java b/client/sniffer/src/main/java/org/opensearch/client/sniff/OpenSearchNodesSniffer.java index 2829439627dbc..c1a0fcf9a8acf 100644 --- a/client/sniffer/src/main/java/org/opensearch/client/sniff/OpenSearchNodesSniffer.java +++ b/client/sniffer/src/main/java/org/opensearch/client/sniff/OpenSearchNodesSniffer.java @@ -49,6 +49,7 @@ import java.io.InputStream; import java.net.URI; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -241,74 +242,23 @@ private static Node readNode(String nodeId, JsonParser parser, Scheme scheme) th } Map> realAttributes = new HashMap<>(protoAttributes.size()); - List keys = new ArrayList<>(protoAttributes.keySet()); - for (String key : keys) { - if (key.endsWith(".0")) { - String realKey = key.substring(0, key.length() - 2); - List values = new ArrayList<>(); - int i = 0; - while (true) { - String value = protoAttributes.remove(realKey + "." + i); - if (value == null) { - break; - } - values.add(value); - i++; - } - realAttributes.put(realKey, unmodifiableList(values)); - } - } for (Map.Entry entry : protoAttributes.entrySet()) { - realAttributes.put(entry.getKey(), singletonList(entry.getValue())); - } - - if (version.startsWith("2.")) { - /* - * 2.x doesn't send roles, instead we try to read them from - * attributes. - */ - boolean clientAttribute = v2RoleAttributeValue(realAttributes, "client", false); - Boolean masterAttribute = v2RoleAttributeValue(realAttributes, "master", null); - Boolean dataAttribute = v2RoleAttributeValue(realAttributes, "data", null); - if ((masterAttribute == null && false == clientAttribute) || masterAttribute) { - roles.add("master"); + if (entry.getValue().startsWith("[")) { + // Convert string array to list + String value = entry.getValue(); + String[] values = value.substring(1, value.length() - 1).split(", "); + realAttributes.put(entry.getKey(), unmodifiableList(Arrays.asList(values))); + } else { + realAttributes.put(entry.getKey(), singletonList(entry.getValue())); } - if ((dataAttribute == null && false == clientAttribute) || dataAttribute) { - roles.add("data"); - } - } else { - assert sawRoles : "didn't see roles for [" + nodeId + "]"; } + + assert sawRoles : "didn't see roles for [" + nodeId + "]"; assert boundHosts.contains(publishedHost) : "[" + nodeId + "] doesn't make sense! publishedHost should be in boundHosts"; logger.trace("adding node [" + nodeId + "]"); return new Node(publishedHost, boundHosts, name, version, new Roles(roles), unmodifiableMap(realAttributes)); } - /** - * Returns {@code defaultValue} if the attribute didn't come back, - * {@code true} or {@code false} if it did come back as - * either of those, or throws an IOException if the attribute - * came back in a strange way. - */ - private static Boolean v2RoleAttributeValue(Map> attributes, String name, Boolean defaultValue) - throws IOException { - List valueList = attributes.remove(name); - if (valueList == null) { - return defaultValue; - } - if (valueList.size() != 1) { - throw new IOException("expected only a single attribute value for [" + name + "] but got " + valueList); - } - switch (valueList.get(0)) { - case "true": - return true; - case "false": - return false; - default: - throw new IOException("expected [" + name + "] to be either [true] or [false] but was [" + valueList.get(0) + "]"); - } - } - /** * The supported host schemes. */ diff --git a/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferParseTests.java b/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferParseTests.java index a9ff47eab5366..58b60ac13dee8 100644 --- a/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferParseTests.java +++ b/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferParseTests.java @@ -45,8 +45,8 @@ import java.io.IOException; import java.io.InputStream; import java.util.Arrays; -import java.util.HashMap; import java.util.HashSet; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Set; @@ -85,59 +85,31 @@ private void checkFile(String file, Node... expected) throws IOException { } } - public void test2x() throws IOException { - checkFile( - "2.0.0_nodes_http.json", - node(9200, "m1", "2.0.0", true, false, false), - node(9201, "m2", "2.0.0", true, true, false), - node(9202, "m3", "2.0.0", true, false, false), - node(9203, "d1", "2.0.0", false, true, false), - node(9204, "d2", "2.0.0", false, true, false), - node(9205, "d3", "2.0.0", false, true, false), - node(9206, "c1", "2.0.0", false, false, false), - node(9207, "c2", "2.0.0", false, false, false) - ); - } - - public void test5x() throws IOException { - checkFile( - "5.0.0_nodes_http.json", - node(9200, "m1", "5.0.0", true, false, true), - node(9201, "m2", "5.0.0", true, true, true), - node(9202, "m3", "5.0.0", true, false, true), - node(9203, "d1", "5.0.0", false, true, true), - node(9204, "d2", "5.0.0", false, true, true), - node(9205, "d3", "5.0.0", false, true, true), - node(9206, "c1", "5.0.0", false, false, true), - node(9207, "c2", "5.0.0", false, false, true) - ); - } - - public void test6x() throws IOException { + public void test1x() throws IOException { checkFile( - "6.0.0_nodes_http.json", - node(9200, "m1", "6.0.0", true, false, true), - node(9201, "m2", "6.0.0", true, true, true), - node(9202, "m3", "6.0.0", true, false, true), - node(9203, "d1", "6.0.0", false, true, true), - node(9204, "d2", "6.0.0", false, true, true), - node(9205, "d3", "6.0.0", false, true, true), - node(9206, "c1", "6.0.0", false, false, true), - node(9207, "c2", "6.0.0", false, false, true) + "1.0.0_nodes_http.json", + node(9200, "m1", "1.0.0", "master", "ingest"), + node(9201, "m2", "1.0.0", "master", "data", "ingest"), + node(9202, "m3", "1.0.0", "master", "ingest"), + node(9203, "d1", "1.0.0", "data", "ingest"), + node(9204, "d2", "1.0.0", "data", "ingest"), + node(9205, "d3", "1.0.0", "data", "ingest"), + node(9206, "c1", "1.0.0", "ingest"), + node(9207, "c2", "1.0.0", "ingest") ); } - public void test7x() throws IOException { + public void test2x() throws IOException { checkFile( - "7.3.0_nodes_http.json", - node(9200, "m1", "7.3.0", "master", "ingest"), - node(9201, "m2", "7.3.0", "master", "data", "ingest"), - node(9202, "m3", "7.3.0", "master", "ingest"), - node(9203, "d1", "7.3.0", "data", "ingest", "ml"), - node(9204, "d2", "7.3.0", "data", "ingest"), - node(9205, "d3", "7.3.0", "data", "ingest"), - node(9206, "c1", "7.3.0", "ingest"), - node(9207, "c2", "7.3.0", "ingest") + "2.0.0_nodes_http.json", + node(9200, "m1", "2.0.0", "cluster_manager", "ingest"), + node(9201, "m2", "2.0.0", "cluster_manager", "data", "ingest"), + node(9202, "m3", "2.0.0", "cluster_manager", "ingest"), + node(9203, "d1", "2.0.0", "data", "ingest"), + node(9204, "d2", "2.0.0", "data", "ingest"), + node(9205, "d3", "2.0.0", "data", "ingest"), + node(9206, "c1", "2.0.0", "ingest"), + node(9207, "c2", "2.0.0", "ingest") ); } @@ -163,20 +135,6 @@ public void testParsingPublishAddressWithES7Format() throws IOException { assertEquals("http", nodes.get(0).getHost().getSchemeName()); } - private Node node(int port, String name, String version, boolean master, boolean data, boolean ingest) { - final Set roles = new TreeSet<>(); - if (master) { - roles.add("master"); - } - if (data) { - roles.add("data"); - } - if (ingest) { - roles.add("ingest"); - } - return node(port, name, version, roles); - } - private Node node(int port, String name, String version, String... roles) { return node(port, name, version, new TreeSet<>(Arrays.asList(roles))); } @@ -184,11 +142,15 @@ private Node node(int port, String name, String version, String... roles) { private Node node(int port, String name, String version, Set roles) { HttpHost host = new HttpHost("127.0.0.1", port); Set boundHosts = new HashSet<>(2); - boundHosts.add(host); boundHosts.add(new HttpHost("[::1]", port)); - Map> attributes = new HashMap<>(); + boundHosts.add(host); + Map> attributes = new LinkedHashMap<>(); // LinkedHashMap to preserve insertion order attributes.put("dummy", singletonList("everyone_has_me")); attributes.put("number", singletonList(name.substring(1))); + if (!version.startsWith("1.0") && !version.startsWith("1.1")) { + // Shard Indexing Pressure feature is added in version 1.2.0 + attributes.put("shard_indexing_pressure_enabled", singletonList(Boolean.TRUE.toString())); + } attributes.put("array", Arrays.asList(name.substring(0, 1), name.substring(1))); return new Node(host, boundHosts, name, version, new Roles(new TreeSet<>(roles)), attributes); } diff --git a/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferTests.java b/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferTests.java index 993844524c2d1..8cc6f5f006861 100644 --- a/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferTests.java +++ b/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferTests.java @@ -234,7 +234,7 @@ private static SniffResponse buildSniffResponse(OpenSearchNodesSniffer.Scheme sc final Set nodeRoles = new TreeSet<>(); if (randomBoolean()) { - nodeRoles.add("master"); + nodeRoles.add("cluster_manager"); } if (randomBoolean()) { nodeRoles.add("data"); @@ -283,12 +283,12 @@ private static SniffResponse buildSniffResponse(OpenSearchNodesSniffer.Scheme sc generator.writeEndObject(); } - List roles = Arrays.asList(new String[] { "master", "data", "ingest" }); + List roles = Arrays.asList(new String[] { "cluster_manager", "data", "ingest" }); Collections.shuffle(roles, getRandom()); generator.writeArrayFieldStart("roles"); for (String role : roles) { - if ("master".equals(role) && node.getRoles().isMasterEligible()) { - generator.writeString("master"); + if ("cluster_manager".equals(role) && node.getRoles().isMasterEligible()) { + generator.writeString("cluster_manager"); } if ("data".equals(role) && node.getRoles().isData()) { generator.writeString("data"); @@ -307,13 +307,7 @@ private static SniffResponse buildSniffResponse(OpenSearchNodesSniffer.Scheme sc if (numAttributes > 0) { generator.writeObjectFieldStart("attributes"); for (Map.Entry> entry : attributes.entrySet()) { - if (entry.getValue().size() == 1) { - generator.writeStringField(entry.getKey(), entry.getValue().get(0)); - } else { - for (int v = 0; v < entry.getValue().size(); v++) { - generator.writeStringField(entry.getKey() + "." + v, entry.getValue().get(v)); - } - } + generator.writeStringField(entry.getKey(), entry.getValue().toString()); } generator.writeEndObject(); } diff --git a/client/sniffer/src/test/resources/7.3.0_nodes_http.json b/client/sniffer/src/test/resources/1.0.0_nodes_http.json similarity index 77% rename from client/sniffer/src/test/resources/7.3.0_nodes_http.json rename to client/sniffer/src/test/resources/1.0.0_nodes_http.json index 9e85511fadb62..5557f0c7955c2 100644 --- a/client/sniffer/src/test/resources/7.3.0_nodes_http.json +++ b/client/sniffer/src/test/resources/1.0.0_nodes_http.json @@ -11,17 +11,17 @@ "transport_address": "127.0.0.1:9300", "host": "127.0.0.1", "ip": "127.0.0.1", - "version": "7.3.0", - "build_hash": "8f0685b", + "version": "1.0.0", + "build_type": "tar", + "build_hash": "34550c5b17124ddc59458ef774f6b43a086522e3", "roles": [ - "master", - "ingest" + "ingest", + "master" ], "attributes": { "dummy": "everyone_has_me", "number": "1", - "array.0": "m", - "array.1": "1" + "array": "[m, 1]" }, "http": { "bound_address": [ @@ -37,18 +37,18 @@ "transport_address": "127.0.0.1:9301", "host": "127.0.0.1", "ip": "127.0.0.1", - "version": "7.3.0", - "build_hash": "8f0685b", + "version": "1.0.0", + "build_type": "tar", + "build_hash": "34550c5b17124ddc59458ef774f6b43a086522e3", "roles": [ - "master", "data", - "ingest" + "ingest", + "master" ], "attributes": { "dummy": "everyone_has_me", "number": "2", - "array.0": "m", - "array.1": "2" + "array": "[m, 2]" }, "http": { "bound_address": [ @@ -64,17 +64,17 @@ "transport_address": "127.0.0.1:9302", "host": "127.0.0.1", "ip": "127.0.0.1", - "version": "7.3.0", - "build_hash": "8f0685b", + "version": "1.0.0", + "build_type": "tar", + "build_hash": "34550c5b17124ddc59458ef774f6b43a086522e3", "roles": [ - "master", - "ingest" + "ingest", + "master" ], "attributes": { "dummy": "everyone_has_me", "number": "3", - "array.0": "m", - "array.1": "3" + "array": "[m, 3]" }, "http": { "bound_address": [ @@ -90,18 +90,17 @@ "transport_address": "127.0.0.1:9303", "host": "127.0.0.1", "ip": "127.0.0.1", - "version": "7.3.0", - "build_hash": "8f0685b", + "version": "1.0.0", + "build_type": "tar", + "build_hash": "34550c5b17124ddc59458ef774f6b43a086522e3", "roles": [ "data", - "ingest", - "ml" + "ingest" ], "attributes": { "dummy": "everyone_has_me", "number": "1", - "array.0": "d", - "array.1": "1" + "array": "[d, 1]" }, "http": { "bound_address": [ @@ -117,8 +116,9 @@ "transport_address": "127.0.0.1:9304", "host": "127.0.0.1", "ip": "127.0.0.1", - "version": "7.3.0", - "build_hash": "8f0685b", + "version": "1.0.0", + "build_type": "tar", + "build_hash": "34550c5b17124ddc59458ef774f6b43a086522e3", "roles": [ "data", "ingest" @@ -126,8 +126,7 @@ "attributes": { "dummy": "everyone_has_me", "number": "2", - "array.0": "d", - "array.1": "2" + "array": "[d, 2]" }, "http": { "bound_address": [ @@ -143,8 +142,9 @@ "transport_address": "127.0.0.1:9305", "host": "127.0.0.1", "ip": "127.0.0.1", - "version": "7.3.0", - "build_hash": "8f0685b", + "version": "1.0.0", + "build_type": "tar", + "build_hash": "34550c5b17124ddc59458ef774f6b43a086522e3", "roles": [ "data", "ingest" @@ -152,8 +152,7 @@ "attributes": { "dummy": "everyone_has_me", "number": "3", - "array.0": "d", - "array.1": "3" + "array": "[d, 3]" }, "http": { "bound_address": [ @@ -169,16 +168,16 @@ "transport_address": "127.0.0.1:9306", "host": "127.0.0.1", "ip": "127.0.0.1", - "version": "7.3.0", - "build_hash": "8f0685b", + "version": "1.0.0", + "build_type": "tar", + "build_hash": "34550c5b17124ddc59458ef774f6b43a086522e3", "roles": [ "ingest" ], "attributes": { "dummy": "everyone_has_me", "number": "1", - "array.0": "c", - "array.1": "1" + "array": "[c, 1]" }, "http": { "bound_address": [ @@ -194,16 +193,16 @@ "transport_address": "127.0.0.1:9307", "host": "127.0.0.1", "ip": "127.0.0.1", - "version": "7.3.0", - "build_hash": "8f0685b", + "version": "1.0.0", + "build_type": "tar", + "build_hash": "34550c5b17124ddc59458ef774f6b43a086522e3", "roles": [ "ingest" ], "attributes": { "dummy": "everyone_has_me", "number": "2", - "array.0": "c", - "array.1": "2" + "array": "[c, 2]" }, "http": { "bound_address": [ diff --git a/client/sniffer/src/test/resources/2.0.0_nodes_http.json b/client/sniffer/src/test/resources/2.0.0_nodes_http.json index 4e8dbbcba58c4..e1b75d460d7d9 100644 --- a/client/sniffer/src/test/resources/2.0.0_nodes_http.json +++ b/client/sniffer/src/test/resources/2.0.0_nodes_http.json @@ -1,4 +1,9 @@ { + "_nodes": { + "total": 8, + "successful": 8, + "failed": 0 + }, "cluster_name": "opensearch", "nodes": { "qr-SOrELSaGW8SlU8nflBw": { @@ -7,20 +12,22 @@ "host": "127.0.0.1", "ip": "127.0.0.1", "version": "2.0.0", - "build": "de54438", - "http_address": "127.0.0.1:9200", + "build_type": "tar", + "build_hash": "bae3b4e4178c20ac24fece8e82099abe3b2630d0", + "roles": [ + "cluster_manager", + "ingest" + ], "attributes": { "dummy": "everyone_has_me", "number": "1", - "array.0": "m", - "data": "false", - "array.1": "1", - "master": "true" + "array": "[m, 1]", + "shard_indexing_pressure_enabled": "true" }, "http": { "bound_address": [ - "127.0.0.1:9200", - "[::1]:9200" + "[::1]:9200", + "127.0.0.1:9200" ], "publish_address": "127.0.0.1:9200", "max_content_length_in_bytes": 104857600 @@ -32,19 +39,23 @@ "host": "127.0.0.1", "ip": "127.0.0.1", "version": "2.0.0", - "build": "de54438", - "http_address": "127.0.0.1:9201", + "build_type": "tar", + "build_hash": "bae3b4e4178c20ac24fece8e82099abe3b2630d0", + "roles": [ + "cluster_manager", + "data", + "ingest" + ], "attributes": { "dummy": "everyone_has_me", "number": "2", - "array.0": "m", - "array.1": "2", - "master": "true" + "shard_indexing_pressure_enabled": "true", + "array": "[m, 2]" }, "http": { "bound_address": [ - "127.0.0.1:9201", - "[::1]:9201" + "[::1]:9201", + "127.0.0.1:9201" ], "publish_address": "127.0.0.1:9201", "max_content_length_in_bytes": 104857600 @@ -56,20 +67,22 @@ "host": "127.0.0.1", "ip": "127.0.0.1", "version": "2.0.0", - "build": "de54438", - "http_address": "127.0.0.1:9202", + "build_type": "tar", + "build_hash": "bae3b4e4178c20ac24fece8e82099abe3b2630d0", + "roles": [ + "cluster_manager", + "ingest" + ], "attributes": { "dummy": "everyone_has_me", "number": "3", - "array.0": "m", - "data": "false", - "array.1": "3", - "master": "true" + "shard_indexing_pressure_enabled": "true", + "array": "[m, 3]" }, "http": { "bound_address": [ - "127.0.0.1:9202", - "[::1]:9202" + "[::1]:9202", + "127.0.0.1:9202" ], "publish_address": "127.0.0.1:9202", "max_content_length_in_bytes": 104857600 @@ -81,19 +94,22 @@ "host": "127.0.0.1", "ip": "127.0.0.1", "version": "2.0.0", - "build": "de54438", - "http_address": "127.0.0.1:9203", + "build_type": "tar", + "build_hash": "bae3b4e4178c20ac24fece8e82099abe3b2630d0", + "roles": [ + "data", + "ingest" + ], "attributes": { "dummy": "everyone_has_me", "number": "1", - "array.0": "d", - "array.1": "1", - "master": "false" + "shard_indexing_pressure_enabled": "true", + "array": "[d, 1]" }, "http": { "bound_address": [ - "127.0.0.1:9203", - "[::1]:9203" + "[::1]:9203", + "127.0.0.1:9203" ], "publish_address": "127.0.0.1:9203", "max_content_length_in_bytes": 104857600 @@ -105,19 +121,22 @@ "host": "127.0.0.1", "ip": "127.0.0.1", "version": "2.0.0", - "build": "de54438", - "http_address": "127.0.0.1:9204", + "build_type": "tar", + "build_hash": "bae3b4e4178c20ac24fece8e82099abe3b2630d0", + "roles": [ + "data", + "ingest" + ], "attributes": { "dummy": "everyone_has_me", "number": "2", - "array.0": "d", - "array.1": "2", - "master": "false" + "shard_indexing_pressure_enabled": "true", + "array": "[d, 2]" }, "http": { "bound_address": [ - "127.0.0.1:9204", - "[::1]:9204" + "[::1]:9204", + "127.0.0.1:9204" ], "publish_address": "127.0.0.1:9204", "max_content_length_in_bytes": 104857600 @@ -129,19 +148,22 @@ "host": "127.0.0.1", "ip": "127.0.0.1", "version": "2.0.0", - "build": "de54438", - "http_address": "127.0.0.1:9205", + "build_type": "tar", + "build_hash": "bae3b4e4178c20ac24fece8e82099abe3b2630d0", + "roles": [ + "data", + "ingest" + ], "attributes": { "dummy": "everyone_has_me", "number": "3", - "array.0": "d", - "array.1": "3", - "master": "false" + "shard_indexing_pressure_enabled": "true", + "array": "[d, 3]" }, "http": { "bound_address": [ - "127.0.0.1:9205", - "[::1]:9205" + "[::1]:9205", + "127.0.0.1:9205" ], "publish_address": "127.0.0.1:9205", "max_content_length_in_bytes": 104857600 @@ -153,20 +175,21 @@ "host": "127.0.0.1", "ip": "127.0.0.1", "version": "2.0.0", - "build": "de54438", - "http_address": "127.0.0.1:9206", + "build_type": "tar", + "build_hash": "bae3b4e4178c20ac24fece8e82099abe3b2630d0", + "roles": [ + "ingest" + ], "attributes": { "dummy": "everyone_has_me", "number": "1", - "array.0": "c", - "data": "false", - "array.1": "1", - "master": "false" + "shard_indexing_pressure_enabled": "true", + "array": "[c, 1]" }, "http": { "bound_address": [ - "127.0.0.1:9206", - "[::1]:9206" + "[::1]:9206", + "127.0.0.1:9206" ], "publish_address": "127.0.0.1:9206", "max_content_length_in_bytes": 104857600 @@ -178,20 +201,21 @@ "host": "127.0.0.1", "ip": "127.0.0.1", "version": "2.0.0", - "build": "de54438", - "http_address": "127.0.0.1:9207", + "build_type": "tar", + "build_hash": "bae3b4e4178c20ac24fece8e82099abe3b2630d0", + "roles": [ + "ingest" + ], "attributes": { "dummy": "everyone_has_me", "number": "2", - "array.0": "c", - "data": "false", - "array.1": "2", - "master": "false" + "shard_indexing_pressure_enabled": "true", + "array": "[c, 2]" }, "http": { "bound_address": [ - "127.0.0.1:9207", - "[::1]:9207" + "[::1]:9207", + "127.0.0.1:9207" ], "publish_address": "127.0.0.1:9207", "max_content_length_in_bytes": 104857600 diff --git a/client/sniffer/src/test/resources/5.0.0_nodes_http.json b/client/sniffer/src/test/resources/5.0.0_nodes_http.json deleted file mode 100644 index 4eb0443bc09d8..0000000000000 --- a/client/sniffer/src/test/resources/5.0.0_nodes_http.json +++ /dev/null @@ -1,217 +0,0 @@ -{ - "_nodes": { - "total": 8, - "successful": 8, - "failed": 0 - }, - "cluster_name": "opensearch", - "nodes": { - "0S4r3NurTYSFSb8R9SxwWA": { - "name": "m1", - "transport_address": "127.0.0.1:9300", - "host": "127.0.0.1", - "ip": "127.0.0.1", - "version": "5.0.0", - "build_hash": "253032b", - "roles": [ - "master", - "ingest" - ], - "attributes": { - "dummy": "everyone_has_me", - "number": "1", - "array.0": "m", - "array.1": "1" - }, - "http": { - "bound_address": [ - "[::1]:9200", - "127.0.0.1:9200" - ], - "publish_address": "127.0.0.1:9200", - "max_content_length_in_bytes": 104857600 - } - }, - "k_CBrMXARkS57Qb5-3Mw5g": { - "name": "m2", - "transport_address": "127.0.0.1:9301", - "host": "127.0.0.1", - "ip": "127.0.0.1", - "version": "5.0.0", - "build_hash": "253032b", - "roles": [ - "master", - "data", - "ingest" - ], - "attributes": { - "dummy": "everyone_has_me", - "number": "2", - "array.0": "m", - "array.1": "2" - }, - "http": { - "bound_address": [ - "[::1]:9201", - "127.0.0.1:9201" - ], - "publish_address": "127.0.0.1:9201", - "max_content_length_in_bytes": 104857600 - } - }, - "6eynRPQ1RleJTeGDuTR9mw": { - "name": "m3", - "transport_address": "127.0.0.1:9302", - "host": "127.0.0.1", - "ip": "127.0.0.1", - "version": "5.0.0", - "build_hash": "253032b", - "roles": [ - "master", - "ingest" - ], - "attributes": { - "dummy": "everyone_has_me", - "number": "3", - "array.0": "m", - "array.1": "3" - }, - "http": { - "bound_address": [ - "[::1]:9202", - "127.0.0.1:9202" - ], - "publish_address": "127.0.0.1:9202", - "max_content_length_in_bytes": 104857600 - } - }, - "cbGC-ay1QNWaESvEh5513w": { - "name": "d1", - "transport_address": "127.0.0.1:9303", - "host": "127.0.0.1", - "ip": "127.0.0.1", - "version": "5.0.0", - "build_hash": "253032b", - "roles": [ - "data", - "ingest" - ], - "attributes": { - "dummy": "everyone_has_me", - "number": "1", - "array.0": "d", - "array.1": "1" - }, - "http": { - "bound_address": [ - "[::1]:9203", - "127.0.0.1:9203" - ], - "publish_address": "127.0.0.1:9203", - "max_content_length_in_bytes": 104857600 - } - }, - "LexndPpXR2ytYsU5fTElnQ": { - "name": "d2", - "transport_address": "127.0.0.1:9304", - "host": "127.0.0.1", - "ip": "127.0.0.1", - "version": "5.0.0", - "build_hash": "253032b", - "roles": [ - "data", - "ingest" - ], - "attributes": { - "dummy": "everyone_has_me", - "number": "2", - "array.0": "d", - "array.1": "2" - }, - "http": { - "bound_address": [ - "[::1]:9204", - "127.0.0.1:9204" - ], - "publish_address": "127.0.0.1:9204", - "max_content_length_in_bytes": 104857600 - } - }, - "SbNG1DKYSBu20zfOz2gDZQ": { - "name": "d3", - "transport_address": "127.0.0.1:9305", - "host": "127.0.0.1", - "ip": "127.0.0.1", - "version": "5.0.0", - "build_hash": "253032b", - "roles": [ - "data", - "ingest" - ], - "attributes": { - "dummy": "everyone_has_me", - "number": "3", - "array.0": "d", - "array.1": "3" - }, - "http": { - "bound_address": [ - "[::1]:9205", - "127.0.0.1:9205" - ], - "publish_address": "127.0.0.1:9205", - "max_content_length_in_bytes": 104857600 - } - }, - "fM4H-m2WTDWmsGsL7jIJew": { - "name": "c1", - "transport_address": "127.0.0.1:9306", - "host": "127.0.0.1", - "ip": "127.0.0.1", - "version": "5.0.0", - "build_hash": "253032b", - "roles": [ - "ingest" - ], - "attributes": { - "dummy": "everyone_has_me", - "number": "1", - "array.0": "c", - "array.1": "1" - }, - "http": { - "bound_address": [ - "[::1]:9206", - "127.0.0.1:9206" - ], - "publish_address": "127.0.0.1:9206", - "max_content_length_in_bytes": 104857600 - } - }, - "pFoh7d0BTbqqI3HKd9na5A": { - "name": "c2", - "transport_address": "127.0.0.1:9307", - "host": "127.0.0.1", - "ip": "127.0.0.1", - "version": "5.0.0", - "build_hash": "253032b", - "roles": [ - "ingest" - ], - "attributes": { - "dummy": "everyone_has_me", - "number": "2", - "array.0": "c", - "array.1": "2" - }, - "http": { - "bound_address": [ - "[::1]:9207", - "127.0.0.1:9207" - ], - "publish_address": "127.0.0.1:9207", - "max_content_length_in_bytes": 104857600 - } - } - } -} diff --git a/client/sniffer/src/test/resources/6.0.0_nodes_http.json b/client/sniffer/src/test/resources/6.0.0_nodes_http.json deleted file mode 100644 index adc8f535d6aad..0000000000000 --- a/client/sniffer/src/test/resources/6.0.0_nodes_http.json +++ /dev/null @@ -1,217 +0,0 @@ -{ - "_nodes": { - "total": 8, - "successful": 8, - "failed": 0 - }, - "cluster_name": "opensearch", - "nodes": { - "ikXK_skVTfWkhONhldnbkw": { - "name": "m1", - "transport_address": "127.0.0.1:9300", - "host": "127.0.0.1", - "ip": "127.0.0.1", - "version": "6.0.0", - "build_hash": "8f0685b", - "roles": [ - "master", - "ingest" - ], - "attributes": { - "dummy": "everyone_has_me", - "number": "1", - "array.0": "m", - "array.1": "1" - }, - "http": { - "bound_address": [ - "[::1]:9200", - "127.0.0.1:9200" - ], - "publish_address": "127.0.0.1:9200", - "max_content_length_in_bytes": 104857600 - } - }, - "TMHa34w4RqeuYoHCfJGXZg": { - "name": "m2", - "transport_address": "127.0.0.1:9301", - "host": "127.0.0.1", - "ip": "127.0.0.1", - "version": "6.0.0", - "build_hash": "8f0685b", - "roles": [ - "master", - "data", - "ingest" - ], - "attributes": { - "dummy": "everyone_has_me", - "number": "2", - "array.0": "m", - "array.1": "2" - }, - "http": { - "bound_address": [ - "[::1]:9201", - "127.0.0.1:9201" - ], - "publish_address": "127.0.0.1:9201", - "max_content_length_in_bytes": 104857600 - } - }, - "lzaMRJTVT166sgVZdQ5thA": { - "name": "m3", - "transport_address": "127.0.0.1:9302", - "host": "127.0.0.1", - "ip": "127.0.0.1", - "version": "6.0.0", - "build_hash": "8f0685b", - "roles": [ - "master", - "ingest" - ], - "attributes": { - "dummy": "everyone_has_me", - "number": "3", - "array.0": "m", - "array.1": "3" - }, - "http": { - "bound_address": [ - "[::1]:9202", - "127.0.0.1:9202" - ], - "publish_address": "127.0.0.1:9202", - "max_content_length_in_bytes": 104857600 - } - }, - "tGP5sUecSd6BLTWk1NWF8Q": { - "name": "d1", - "transport_address": "127.0.0.1:9303", - "host": "127.0.0.1", - "ip": "127.0.0.1", - "version": "6.0.0", - "build_hash": "8f0685b", - "roles": [ - "data", - "ingest" - ], - "attributes": { - "dummy": "everyone_has_me", - "number": "1", - "array.0": "d", - "array.1": "1" - }, - "http": { - "bound_address": [ - "[::1]:9203", - "127.0.0.1:9203" - ], - "publish_address": "127.0.0.1:9203", - "max_content_length_in_bytes": 104857600 - } - }, - "c1UgW5ROTkSa2YnM_T56tw": { - "name": "d2", - "transport_address": "127.0.0.1:9304", - "host": "127.0.0.1", - "ip": "127.0.0.1", - "version": "6.0.0", - "build_hash": "8f0685b", - "roles": [ - "data", - "ingest" - ], - "attributes": { - "dummy": "everyone_has_me", - "number": "2", - "array.0": "d", - "array.1": "2" - }, - "http": { - "bound_address": [ - "[::1]:9204", - "127.0.0.1:9204" - ], - "publish_address": "127.0.0.1:9204", - "max_content_length_in_bytes": 104857600 - } - }, - "QM9yjqjmS72MstpNYV_trg": { - "name": "d3", - "transport_address": "127.0.0.1:9305", - "host": "127.0.0.1", - "ip": "127.0.0.1", - "version": "6.0.0", - "build_hash": "8f0685b", - "roles": [ - "data", - "ingest" - ], - "attributes": { - "dummy": "everyone_has_me", - "number": "3", - "array.0": "d", - "array.1": "3" - }, - "http": { - "bound_address": [ - "[::1]:9205", - "127.0.0.1:9205" - ], - "publish_address": "127.0.0.1:9205", - "max_content_length_in_bytes": 104857600 - } - }, - "wLtzAssoQYeX_4TstgCj0Q": { - "name": "c1", - "transport_address": "127.0.0.1:9306", - "host": "127.0.0.1", - "ip": "127.0.0.1", - "version": "6.0.0", - "build_hash": "8f0685b", - "roles": [ - "ingest" - ], - "attributes": { - "dummy": "everyone_has_me", - "number": "1", - "array.0": "c", - "array.1": "1" - }, - "http": { - "bound_address": [ - "[::1]:9206", - "127.0.0.1:9206" - ], - "publish_address": "127.0.0.1:9206", - "max_content_length_in_bytes": 104857600 - } - }, - "ONOzpst8TH-ZebG7fxGwaA": { - "name": "c2", - "transport_address": "127.0.0.1:9307", - "host": "127.0.0.1", - "ip": "127.0.0.1", - "version": "6.0.0", - "build_hash": "8f0685b", - "roles": [ - "ingest" - ], - "attributes": { - "dummy": "everyone_has_me", - "number": "2", - "array.0": "c", - "array.1": "2" - }, - "http": { - "bound_address": [ - "[::1]:9207", - "127.0.0.1:9207" - ], - "publish_address": "127.0.0.1:9207", - "max_content_length_in_bytes": 104857600 - } - } - } -} diff --git a/client/sniffer/src/test/resources/create_test_nodes_info.bash b/client/sniffer/src/test/resources/create_test_nodes_info.bash index 06350be4ba205..78e67562d815b 100644 --- a/client/sniffer/src/test/resources/create_test_nodes_info.bash +++ b/client/sniffer/src/test/resources/create_test_nodes_info.bash @@ -21,15 +21,11 @@ work=$(mktemp -d) pushd ${work} >> /dev/null echo Working in ${work} -wget https://download.elasticsearch.org/elasticsearch/release/org/elasticsearch/distribution/tar/elasticsearch/2.0.0/elasticsearch-2.0.0.tar.gz -wget https://artifacts-no-kpi.elastic.co/downloads/elasticsearch/elasticsearch-5.0.0.tar.gz -wget https://artifacts-no-kpi.elastic.co/downloads/elasticsearch/elasticsearch-6.0.0.tar.gz -sha1sum -c - << __SHAs -e369d8579bd3a2e8b5344278d5043f19f14cac88 elasticsearch-2.0.0.tar.gz -d25f6547bccec9f0b5ea7583815f96a6f50849e0 elasticsearch-5.0.0.tar.gz -__SHAs +wget https://artifacts.opensearch.org/releases/core/opensearch/1.0.0/opensearch-min-1.0.0-linux-x64.tar.gz +wget https://artifacts.opensearch.org/releases/core/opensearch/2.0.0/opensearch-min-2.0.0-linux-x64.tar.gz sha512sum -c - << __SHAs -25bb622d2fc557d8b8eded634a9b333766f7b58e701359e1bcfafee390776eb323cb7ea7a5e02e8803e25d8b1d3aabec0ec1b0cf492d0bab5689686fe440181c elasticsearch-6.0.0.tar.gz +96595cd3b173188d8a3f0f18d7bfa2457782839d06b519f01a99b4dc0280f81b08ba1d01bd1aef454feaa574cbbd04d3ad9a1f6a829182627e914f3e58f2899f opensearch-min-1.0.0-linux-x64.tar.gz +5b91456a2eb517bc48f13bec0a3f9c220494bd5fe979946dce6cfc3fa7ca00b003927157194d62f2a1c36c850eda74c70b93fbffa91bb082b2e1a17985d50976 opensearch-min-2.0.0-linux-x64.tar.gz __SHAs @@ -40,37 +36,38 @@ function do_version() { mkdir -p ${version} pushd ${version} >> /dev/null - tar xf ../opensearch-${version}.tar.gz + tar xf ../opensearch-min-${version}-linux-x64.tar.gz local http_port=9200 for node in ${nodes}; do mkdir ${node} cp -r opensearch-${version}/* ${node} - local master=$([[ "$node" =~ ^m.* ]] && echo true || echo false) - local data=$([[ "$node" =~ ^d.* ]] && echo true || echo false) - # m2 is always master and data for these test just so we have a node like that - data=$([[ "$node" == 'm2' ]] && echo true || echo ${data}) - local attr=$([ ${version} == '2.0.0' ] && echo '' || echo '.attr') + local cluster_manager=$([[ "$node" =~ ^m.* ]] && echo 'cluster_manager,' || echo '') + # 'cluster_manager' role is add in version 2.x and above, use 'master' role in 1.x + cluster_manager=$([[ ! "$cluster_manager" == '' && ${version} =~ ^1\. ]] && echo 'master,' || echo ${cluster_manager}) + local data=$([[ "$node" =~ ^d.* ]] && echo 'data,' || echo '') + # m2 is always cluster_manager and data for these test just so we have a node like that + data=$([[ "$node" == 'm2' ]] && echo 'data,' || echo ${data}) + # setting name 'cluster.initial_cluster_manager_nodes' is add in version 2.x and above + local initial_cluster_manager_nodes=$([[ ${version} =~ ^1\. ]] && echo 'initial_master_nodes' || echo 'initial_cluster_manager_nodes') local transport_port=$((http_port+100)) - cat >> ${node}/config/opensearch.yml << __ES_YML + cat >> ${node}/config/opensearch.yml << __OPENSEARCH_YML node.name: ${node} -node.master: ${master} -node.data: ${data} -node${attr}.dummy: everyone_has_me -node${attr}.number: ${node:1} -node${attr}.array: [${node:0:1}, ${node:1}] +node.roles: [${cluster_manager} ${data} ingest] +node.attr.dummy: everyone_has_me +node.attr.number: ${node:1} +node.attr.array: [${node:0:1}, ${node:1}] http.port: ${http_port} transport.tcp.port: ${transport_port} -discovery.zen.minimum_master_nodes: 3 -discovery.zen.ping.unicast.hosts: ['localhost:9300','localhost:9301','localhost:9302'] -__ES_YML +cluster.${initial_cluster_manager_nodes}: [m1, m2, m3] +discovery.seed_hosts: ['localhost:9300','localhost:9301','localhost:9302'] +__OPENSEARCH_YML - if [ ${version} != '2.0.0' ]; then - perl -pi -e 's/-Xm([sx]).+/-Xm${1}512m/g' ${node}/config/jvm.options - fi + # configure the JVM heap size + perl -pi -e 's/-Xm([sx]).+/-Xm${1}512m/g' ${node}/config/jvm.options echo "starting ${version}/${node}..." - ${node}/bin/opensearch -d -p ${node}/pidfile + ${node}/bin/opensearch -d -p pidfile ((http_port++)) done @@ -99,9 +96,8 @@ __ES_YML popd >> /dev/null } -JAVA_HOME=$JAVA8_HOME do_version 2.0.0 -JAVA_HOME=$JAVA8_HOME do_version 5.0.0 -JAVA_HOME=$JAVA8_HOME do_version 6.0.0 +JAVA_HOME=$JAVA11_HOME do_version 1.0.0 +JAVA_HOME=$JAVA11_HOME do_version 2.0.0 popd >> /dev/null rm -rf ${work} From 81d71d25a2f7557ce2f0c9267b52324ccb069cf1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Jun 2022 12:19:08 -0700 Subject: [PATCH 50/75] Bump com.diffplug.spotless from 6.6.1 to 6.7.0 (#3513) Bumps com.diffplug.spotless from 6.6.1 to 6.7.0. --- updated-dependencies: - dependency-name: com.diffplug.spotless dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.gradle b/build.gradle index 9f2c483fa8de0..8b32d3393fe81 100644 --- a/build.gradle +++ b/build.gradle @@ -48,7 +48,7 @@ plugins { id 'lifecycle-base' id 'opensearch.docker-support' id 'opensearch.global-build-info' - id "com.diffplug.spotless" version "6.6.1" apply false + id "com.diffplug.spotless" version "6.7.0" apply false id "org.gradle.test-retry" version "1.4.0" apply false id "test-report-aggregation" id 'jacoco-report-aggregation' From 43c48cadd6a05dd645dfb1e1553afb2843d0d449 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Jun 2022 17:05:19 -0700 Subject: [PATCH 51/75] Bump guava from 18.0 to 23.0 in /plugins/ingest-attachment (#3357) * Bump guava from 18.0 to 23.0 in /plugins/ingest-attachment Bumps [guava](https://github.com/google/guava) from 18.0 to 23.0. - [Release notes](https://github.com/google/guava/releases) - [Commits](https://github.com/google/guava/compare/v18.0...v23.0) --- updated-dependencies: - dependency-name: com.google.guava:guava dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Add more ingorance of using internal java API sun.misc.Unsafe Signed-off-by: Tianli Feng Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Co-authored-by: Tianli Feng --- plugins/ingest-attachment/build.gradle | 14 ++++++++++++-- .../ingest-attachment/licenses/guava-18.0.jar.sha1 | 1 - .../ingest-attachment/licenses/guava-23.0.jar.sha1 | 1 + 3 files changed, 13 insertions(+), 3 deletions(-) delete mode 100644 plugins/ingest-attachment/licenses/guava-18.0.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/guava-23.0.jar.sha1 diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 1452d871a605b..456b652ff82a3 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -54,7 +54,7 @@ dependencies { api "org.apache.tika:tika-langdetect-optimaize:${versions.tika}" // Optimaize libraries/dependencies runtimeOnly "com.optimaize.languagedetector:language-detector:0.6" - runtimeOnly 'com.google.guava:guava:18.0' + runtimeOnly 'com.google.guava:guava:23.0' // Other dependencies api 'org.tukaani:xz:1.9' api 'commons-io:commons-io:2.11.0' @@ -119,11 +119,21 @@ forbiddenPatterns { thirdPartyAudit { ignoreMissingClasses() ignoreViolations( + // uses internal java api: sun.misc.Unsafe 'com.google.common.cache.Striped64', 'com.google.common.cache.Striped64$1', 'com.google.common.cache.Striped64$Cell', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$1', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$2', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$3', + 'com.google.common.hash.Striped64', + 'com.google.common.hash.Striped64$1', + 'com.google.common.hash.Striped64$Cell', 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator', - 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1' + 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1', + 'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper', + 'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper$1' ) } diff --git a/plugins/ingest-attachment/licenses/guava-18.0.jar.sha1 b/plugins/ingest-attachment/licenses/guava-18.0.jar.sha1 deleted file mode 100644 index 87f7acb8158ec..0000000000000 --- a/plugins/ingest-attachment/licenses/guava-18.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cce0823396aa693798f8882e64213b1772032b09 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/guava-23.0.jar.sha1 b/plugins/ingest-attachment/licenses/guava-23.0.jar.sha1 new file mode 100644 index 0000000000000..197134628d939 --- /dev/null +++ b/plugins/ingest-attachment/licenses/guava-23.0.jar.sha1 @@ -0,0 +1 @@ +c947004bb13d18182be60077ade044099e4f26f1 \ No newline at end of file From 8096fc725a24a19908ccdda230b5eff710c19be1 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Tue, 7 Jun 2022 18:15:36 -0700 Subject: [PATCH 52/75] Added bwc version 2.0.1 (#3452) Signed-off-by: Kunal Kotwani Co-authored-by: opensearch-ci-bot --- .ci/bwcVersions | 1 + server/src/main/java/org/opensearch/Version.java | 1 + 2 files changed, 2 insertions(+) diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 245c112356178..0461af4966e92 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -41,4 +41,5 @@ BWC_VERSION: - "1.3.2" - "1.3.3" - "2.0.0" + - "2.0.1" - "2.1.0" diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index a69c1f3c3bcb1..04907ee5d054b 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -88,6 +88,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_1_3_2 = new Version(1030299, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_1_3_3 = new Version(1030399, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_2_0_0 = new Version(2000099, org.apache.lucene.util.Version.LUCENE_9_1_0); + public static final Version V_2_0_1 = new Version(2000199, org.apache.lucene.util.Version.LUCENE_9_1_0); public static final Version V_2_1_0 = new Version(2010099, org.apache.lucene.util.Version.LUCENE_9_2_0); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version CURRENT = V_3_0_0; From 3a3d513bef4c4abb82cae338e2440a24baeba100 Mon Sep 17 00:00:00 2001 From: Xue Zhou <85715413+xuezhou25@users.noreply.github.com> Date: Thu, 9 Jun 2022 19:00:29 +0000 Subject: [PATCH 53/75] Add release notes for 1.3.3 (#3549) Signed-off-by: Xue Zhou --- release-notes/opensearch.release-notes-1.3.3.md | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 release-notes/opensearch.release-notes-1.3.3.md diff --git a/release-notes/opensearch.release-notes-1.3.3.md b/release-notes/opensearch.release-notes-1.3.3.md new file mode 100644 index 0000000000000..fd80e526166f0 --- /dev/null +++ b/release-notes/opensearch.release-notes-1.3.3.md @@ -0,0 +1,10 @@ +## Version 1.3.3 Release Notes + +### Upgrades +* Upgrade google-oauth-client to 1.33.3 ([#3502](https://github.com/opensearch-project/OpenSearch/pull/3502)) +* Upgrade log4j-core to 2.17.1 ([#3508](https://github.com/opensearch-project/OpenSearch/pull/3508)) +* Upgrade jdom2 to 2.0.6.1 ([#3509](https://github.com/opensearch-project/OpenSearch/pull/3509)) + +### Bug Fixes +* Fixing org.opensearch.monitor.os.OsProbeTests::testLogWarnCpuMessageOnlyOnes when CGroups are not available ([#2101](https://github.com/opensearch-project/OpenSearch/pull/2101)) +* Fixing org.opensearch.monitor.os.OsProbeTests > testLogWarnCpuMessageOnlyOnes when cgroups are available but cgroup stats is not ([#3448](https://github.com/opensearch-project/OpenSearch/pull/3448)) From 2f2e1ca6cd19d8bc524f8eb3d0f059575bc1f3c2 Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Thu, 9 Jun 2022 16:39:00 -0500 Subject: [PATCH 54/75] [Upgrade] Lucene-9.3.0-snapshot-b7231bb (#3537) Upgrades to latest snapshot of lucene 9.3; including reducing maxFullFlushMergeWaitMillis in LuceneTest.testWrapLiveDocsNotExposeAbortedDocuments to 0 ms to ensure aborted docs are not merged away in the test with the new mergeOnRefresh default policy. Signed-off-by: Nicholas Walter Knize --- buildSrc/version.properties | 2 +- .../lucene-expressions-9.3.0-snapshot-823df23.jar.sha1 | 1 - .../lucene-expressions-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + .../lucene-analysis-icu-9.3.0-snapshot-823df23.jar.sha1 | 1 - .../lucene-analysis-icu-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + .../lucene-analysis-kuromoji-9.3.0-snapshot-823df23.jar.sha1 | 1 - .../lucene-analysis-kuromoji-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + .../lucene-analysis-nori-9.3.0-snapshot-823df23.jar.sha1 | 1 - .../lucene-analysis-nori-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + .../lucene-analysis-phonetic-9.3.0-snapshot-823df23.jar.sha1 | 1 - .../lucene-analysis-phonetic-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + .../lucene-analysis-smartcn-9.3.0-snapshot-823df23.jar.sha1 | 1 - .../lucene-analysis-smartcn-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + .../lucene-analysis-stempel-9.3.0-snapshot-823df23.jar.sha1 | 1 - .../lucene-analysis-stempel-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + .../lucene-analysis-morfologik-9.3.0-snapshot-823df23.jar.sha1 | 1 - .../lucene-analysis-morfologik-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + .../lucene-analysis-common-9.3.0-snapshot-823df23.jar.sha1 | 1 - .../lucene-analysis-common-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + .../lucene-backward-codecs-9.3.0-snapshot-823df23.jar.sha1 | 1 - .../lucene-backward-codecs-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + server/licenses/lucene-core-9.3.0-snapshot-823df23.jar.sha1 | 1 - server/licenses/lucene-core-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + .../licenses/lucene-grouping-9.3.0-snapshot-823df23.jar.sha1 | 1 - .../licenses/lucene-grouping-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + .../lucene-highlighter-9.3.0-snapshot-823df23.jar.sha1 | 1 - .../lucene-highlighter-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + server/licenses/lucene-join-9.3.0-snapshot-823df23.jar.sha1 | 1 - server/licenses/lucene-join-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + server/licenses/lucene-memory-9.3.0-snapshot-823df23.jar.sha1 | 1 - server/licenses/lucene-memory-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + server/licenses/lucene-misc-9.3.0-snapshot-823df23.jar.sha1 | 1 - server/licenses/lucene-misc-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + server/licenses/lucene-queries-9.3.0-snapshot-823df23.jar.sha1 | 1 - server/licenses/lucene-queries-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + .../lucene-queryparser-9.3.0-snapshot-823df23.jar.sha1 | 1 - .../lucene-queryparser-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + server/licenses/lucene-sandbox-9.3.0-snapshot-823df23.jar.sha1 | 1 - server/licenses/lucene-sandbox-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + .../lucene-spatial-extras-9.3.0-snapshot-823df23.jar.sha1 | 1 - .../lucene-spatial-extras-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + .../licenses/lucene-spatial3d-9.3.0-snapshot-823df23.jar.sha1 | 1 - .../licenses/lucene-spatial3d-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + server/licenses/lucene-suggest-9.3.0-snapshot-823df23.jar.sha1 | 1 - server/licenses/lucene-suggest-9.3.0-snapshot-b7231bb.jar.sha1 | 1 + .../test/java/org/opensearch/common/lucene/LuceneTests.java | 3 +++ 46 files changed, 26 insertions(+), 23 deletions(-) delete mode 100644 modules/lang-expression/licenses/lucene-expressions-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 modules/lang-expression/licenses/lucene-expressions-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 server/licenses/lucene-analysis-common-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 server/licenses/lucene-analysis-common-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 server/licenses/lucene-backward-codecs-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 server/licenses/lucene-backward-codecs-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 server/licenses/lucene-core-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 server/licenses/lucene-core-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 server/licenses/lucene-grouping-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 server/licenses/lucene-grouping-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 server/licenses/lucene-highlighter-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 server/licenses/lucene-highlighter-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 server/licenses/lucene-join-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 server/licenses/lucene-join-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 server/licenses/lucene-memory-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 server/licenses/lucene-memory-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 server/licenses/lucene-misc-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 server/licenses/lucene-misc-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 server/licenses/lucene-queries-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 server/licenses/lucene-queries-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 server/licenses/lucene-queryparser-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 server/licenses/lucene-queryparser-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 server/licenses/lucene-sandbox-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 server/licenses/lucene-sandbox-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-extras-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 server/licenses/lucene-spatial-extras-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 server/licenses/lucene-spatial3d-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 server/licenses/lucene-spatial3d-9.3.0-snapshot-b7231bb.jar.sha1 delete mode 100644 server/licenses/lucene-suggest-9.3.0-snapshot-823df23.jar.sha1 create mode 100644 server/licenses/lucene-suggest-9.3.0-snapshot-b7231bb.jar.sha1 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index fe2cfe6a63ee6..87dbad73229b4 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ opensearch = 3.0.0 -lucene = 9.3.0-snapshot-823df23 +lucene = 9.3.0-snapshot-b7231bb bundled_jdk_vendor = adoptium bundled_jdk = 17.0.3+7 diff --git a/modules/lang-expression/licenses/lucene-expressions-9.3.0-snapshot-823df23.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index 540a48bf7415f..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -610ec9bb8001a2d2ea88e3384eb516017504139e \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.3.0-snapshot-b7231bb.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..f527a3b68b6a3 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +57ae445a0050ad492ef494b692b486dfe718b564 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0-snapshot-823df23.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index 7bc128d4562fa..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -43f2ea45a2d12b4c75c7ac11b85ec736c73bc07f \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0-snapshot-b7231bb.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..51cbf51d90626 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +b10e5bdae6df879b770060e0006bbc1c780c886d \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0-snapshot-823df23.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index bad2a0bdcfa2a..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fb46807684a5b0e28a02b2a1ea3d528e4c25aa05 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0-snapshot-b7231bb.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..ff57bbc283385 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +f0ddc3072fd16012dafc74928f87fdfd7669ea4a \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0-snapshot-823df23.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index b2c62bcbbade1..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -be94b15085b6390ed64a8e8a4f5afbcb2d4d5181 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0-snapshot-b7231bb.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..13dd3c8a8bb24 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +d761fa983d9c21099c433731d5519651737750c1 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0-snapshot-823df23.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index c7f8fd797c589..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3a6f705a7df2007f5583215420da0725f844ac4f \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0-snapshot-b7231bb.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..5cba6f6700769 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +43abbbe7c3c789ac448f898981acf54e487407a6 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0-snapshot-823df23.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index 28424c2dd1c7a..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ea9931a34288fa6cbd894e244a101e86926ebfb8 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0-snapshot-b7231bb.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..62097dc39ae20 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +55df9442a35fe09d4f3f98bd2dda4d1a1dbfd996 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0-snapshot-823df23.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index d7c4b20a29db2..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c339ce0a3b02d92a804081f5ff44b99f7a468caf \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0-snapshot-b7231bb.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..1666e4aae21a6 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +102cbb1d619b96e1f3e524520658b9327a93aba1 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0-snapshot-823df23.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index b4a9090408165..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a8faa5faa38ab8f545e12cf3dd914e934a2f2bfe \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0-snapshot-b7231bb.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..3a2d3cec6b952 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +0d5dc4dfb74d698e51dc9b95268faf6dde4b0815 \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-analysis-common-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index ab4abfd7d6a49..0000000000000 --- a/server/licenses/lucene-analysis-common-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8dbb5828e79780989a8758b7cbb5a1aacac0004f \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-analysis-common-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..4cb292ad20c1f --- /dev/null +++ b/server/licenses/lucene-analysis-common-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +6c6a9569777e4f01c90ed840e5a04234dfcaf42e \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-backward-codecs-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index 8ff6a25c9547e..0000000000000 --- a/server/licenses/lucene-backward-codecs-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -68ebd183f1e9edde9f2f37c60f784e4f03555eec \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-backward-codecs-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..3878ed346c9ce --- /dev/null +++ b/server/licenses/lucene-backward-codecs-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +a7ef963f9f9f15fc5018c5fa68bae5cf65692ca9 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-core-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index 2ec15eb0012c5..0000000000000 --- a/server/licenses/lucene-core-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ea3cb640597d93168765174207542c6765c1fe15 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-core-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..9f9f6be85c57c --- /dev/null +++ b/server/licenses/lucene-core-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +da113c963d62f0c8786d7c294dbbb63d5d7953ab \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-grouping-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index 7b6c561ddeedf..0000000000000 --- a/server/licenses/lucene-grouping-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ab2bcdbade5976e127c7e9393bf7a7e25a957d9a \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-grouping-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..92d0c41c6f4d2 --- /dev/null +++ b/server/licenses/lucene-grouping-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +54f65917cfa6c9c54cd0354ba333aa7e0f2980e5 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-highlighter-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index b2aa53fcdfb83..0000000000000 --- a/server/licenses/lucene-highlighter-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -31ce6ff9188dea49dc4b4d082b498332cc7b86e7 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-highlighter-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..ecab2abeb6220 --- /dev/null +++ b/server/licenses/lucene-highlighter-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +d73ebe32147c9a12d321c0b1273d5e5d797b705f \ No newline at end of file diff --git a/server/licenses/lucene-join-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-join-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index 7918597d46763..0000000000000 --- a/server/licenses/lucene-join-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c387884f0bc00fb1c064754a69e1e81dff12c755 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-join-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..725fc883c272b --- /dev/null +++ b/server/licenses/lucene-join-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +797c92ffe35af37ab1783906fb93ed95a145a701 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-memory-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index a87d3de9e2310..0000000000000 --- a/server/licenses/lucene-memory-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e278a2cfe1500b76da770aa29ecd487fea5f8dc3 \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-memory-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..312a65edb6e24 --- /dev/null +++ b/server/licenses/lucene-memory-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +5714d64c39021c65dece8ee979d9ea39a327bb87 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-misc-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index 18a165097d2be..0000000000000 --- a/server/licenses/lucene-misc-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -77933cdffbcd0f56888a50fd1d9fb39cf6148f1a \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-misc-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..b701384ab601d --- /dev/null +++ b/server/licenses/lucene-misc-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +4d401c55114367e574ed51e914661f0a97f91e88 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-queries-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index 4d148f3a840c8..0000000000000 --- a/server/licenses/lucene-queries-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8d521efa3a111e2feab1a7f07a0cc944bbdcddf4 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-queries-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..ec2f7508d35cc --- /dev/null +++ b/server/licenses/lucene-queries-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +0f165ff86546565d32a508c82ca80ac2840bcf38 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-queryparser-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index c6e913767696a..0000000000000 --- a/server/licenses/lucene-queryparser-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -30d6f8f757a007248804ed5db624a125ada24154 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-queryparser-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..40a125ccada21 --- /dev/null +++ b/server/licenses/lucene-queryparser-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +d6fb5af1873628dc026e18b5438042143a9a9824 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-sandbox-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index 22b7769ee3b4d..0000000000000 --- a/server/licenses/lucene-sandbox-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8dd68761fade2dc4d2ea0d9d476a5172cfd22cd2 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-sandbox-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..b4784be40d072 --- /dev/null +++ b/server/licenses/lucene-sandbox-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +c48ab8982e6bf9429eded6a06d640db922eb2b69 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-spatial-extras-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index 22d9211a3b623..0000000000000 --- a/server/licenses/lucene-spatial-extras-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -044ac03b461aaae4568f64948f783e87dae85a8b \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-spatial-extras-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..6f39582081758 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +d757dc379fee639f54d0574443c5a6fd0b70613a \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-spatial3d-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index 66998393ed970..0000000000000 --- a/server/licenses/lucene-spatial3d-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -53a02ec5b0eabe7fdf97fea1b19eeca5a6cf1122 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-spatial3d-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..b5986970cb4da --- /dev/null +++ b/server/licenses/lucene-spatial3d-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +6a4e6de9b40cd027233a3ed00774810c36457a6c \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.3.0-snapshot-823df23.jar.sha1 b/server/licenses/lucene-suggest-9.3.0-snapshot-823df23.jar.sha1 deleted file mode 100644 index e5aca63b21732..0000000000000 --- a/server/licenses/lucene-suggest-9.3.0-snapshot-823df23.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a57b91ee1c6f3f666dcac697ce6a7de9bd5abba7 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.3.0-snapshot-b7231bb.jar.sha1 b/server/licenses/lucene-suggest-9.3.0-snapshot-b7231bb.jar.sha1 new file mode 100644 index 0000000000000..682a0ee88868f --- /dev/null +++ b/server/licenses/lucene-suggest-9.3.0-snapshot-b7231bb.jar.sha1 @@ -0,0 +1 @@ +e793761c4a4292de0d52f066787ab5f3133382cd \ No newline at end of file diff --git a/server/src/test/java/org/opensearch/common/lucene/LuceneTests.java b/server/src/test/java/org/opensearch/common/lucene/LuceneTests.java index 4c179309f16ba..776b44d346fb5 100644 --- a/server/src/test/java/org/opensearch/common/lucene/LuceneTests.java +++ b/server/src/test/java/org/opensearch/common/lucene/LuceneTests.java @@ -591,6 +591,9 @@ public void testWrapLiveDocsNotExposeAbortedDocuments() throws Exception { Directory dir = newDirectory(); IndexWriterConfig config = newIndexWriterConfig().setSoftDeletesField(Lucene.SOFT_DELETES_FIELD) .setMergePolicy(new SoftDeletesRetentionMergePolicy(Lucene.SOFT_DELETES_FIELD, MatchAllDocsQuery::new, newMergePolicy())); + // override 500ms default introduced in + // https://issues.apache.org/jira/browse/LUCENE-10078 + config.setMaxFullFlushMergeWaitMillis(0); IndexWriter writer = new IndexWriter(dir, config); int numDocs = between(1, 10); List liveDocs = new ArrayList<>(); From 76149a0260869d1490d9c6bde3da3b47f13e44b6 Mon Sep 17 00:00:00 2001 From: Sachin Kale Date: Fri, 10 Jun 2022 11:02:53 +0530 Subject: [PATCH 55/75] [Remote Store] Upload segments to remote store post refresh (#3460) * Add RemoteDirectory interface to copy segment files to/from remote store Signed-off-by: Sachin Kale Co-authored-by: Sachin Kale * Add index level setting for remote store Signed-off-by: Sachin Kale Co-authored-by: Sachin Kale * Add RemoteDirectoryFactory and use RemoteDirectory instance in RefreshListener Co-authored-by: Sachin Kale Signed-off-by: Sachin Kale * Upload segment to remote store post refresh Signed-off-by: Sachin Kale Co-authored-by: Sachin Kale --- .../opensearch/index/shard/IndexShardIT.java | 3 +- .../cluster/metadata/IndexMetadata.java | 11 + .../common/settings/IndexScopedSettings.java | 4 +- .../opensearch/common/util/FeatureFlags.java | 6 + .../org/opensearch/index/IndexModule.java | 4 + .../org/opensearch/index/IndexService.java | 28 ++- .../org/opensearch/index/IndexSettings.java | 9 + .../opensearch/index/shard/IndexShard.java | 16 +- .../shard/RemoteStoreRefreshListener.java | 87 ++++++++ .../index/store/RemoteDirectory.java | 193 ++++++++++++++++++ .../index/store/RemoteDirectoryFactory.java | 37 ++++ .../index/store/RemoteIndexInput.java | 85 ++++++++ .../index/store/RemoteIndexOutput.java | 99 +++++++++ .../opensearch/indices/IndicesService.java | 8 +- .../opensearch/plugins/IndexStorePlugin.java | 17 ++ .../common/util/FeatureFlagTests.java | 7 + .../opensearch/index/IndexSettingsTests.java | 39 ++++ .../RemoteStoreRefreshListenerTests.java | 139 +++++++++++++ .../store/RemoteDirectoryFactoryTests.java | 65 ++++++ .../index/store/RemoteDirectoryTests.java | 158 ++++++++++++++ .../index/store/RemoteIndexInputTests.java | 99 +++++++++ .../index/store/RemoteIndexOutputTests.java | 68 ++++++ ...dicesLifecycleListenerSingleNodeTests.java | 3 +- .../index/shard/IndexShardTestCase.java | 3 +- 24 files changed, 1176 insertions(+), 12 deletions(-) create mode 100644 server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java create mode 100644 server/src/main/java/org/opensearch/index/store/RemoteDirectory.java create mode 100644 server/src/main/java/org/opensearch/index/store/RemoteDirectoryFactory.java create mode 100644 server/src/main/java/org/opensearch/index/store/RemoteIndexInput.java create mode 100644 server/src/main/java/org/opensearch/index/store/RemoteIndexOutput.java create mode 100644 server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java create mode 100644 server/src/test/java/org/opensearch/index/store/RemoteDirectoryFactoryTests.java create mode 100644 server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java create mode 100644 server/src/test/java/org/opensearch/index/store/RemoteIndexInputTests.java create mode 100644 server/src/test/java/org/opensearch/index/store/RemoteIndexOutputTests.java diff --git a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java index 888881d43eb11..2bf73b34247b3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java @@ -675,7 +675,8 @@ public static final IndexShard newIndexShard( () -> {}, RetentionLeaseSyncer.EMPTY, cbs, - SegmentReplicationCheckpointPublisher.EMPTY + SegmentReplicationCheckpointPublisher.EMPTY, + null ); } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index ec70e642ababc..442137fb70e1f 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -283,6 +283,17 @@ public Iterator> settings() { Property.Final ); + public static final String SETTING_REMOTE_STORE = "index.remote_store"; + /** + * Used to specify if the index data should be persisted in the remote store. + */ + public static final Setting INDEX_REMOTE_STORE_SETTING = Setting.boolSetting( + SETTING_REMOTE_STORE, + false, + Property.IndexScope, + Property.Final + ); + public static final String SETTING_AUTO_EXPAND_REPLICAS = "index.auto_expand_replicas"; public static final Setting INDEX_AUTO_EXPAND_REPLICAS_SETTING = AutoExpandReplicas.SETTING; diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index 3eb68a7686c96..1a31bec5935c8 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -218,7 +218,9 @@ public final class IndexScopedSettings extends AbstractScopedSettings { */ public static final Map FEATURE_FLAGGED_INDEX_SETTINGS = Map.of( FeatureFlags.REPLICATION_TYPE, - IndexMetadata.INDEX_REPLICATION_TYPE_SETTING + IndexMetadata.INDEX_REPLICATION_TYPE_SETTING, + FeatureFlags.REMOTE_STORE, + IndexMetadata.INDEX_REMOTE_STORE_SETTING ); public static final IndexScopedSettings DEFAULT_SCOPED_SETTINGS = new IndexScopedSettings(Settings.EMPTY, BUILT_IN_INDEX_SETTINGS); diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java index 0b31e3814667a..fa39dc9ac5aa0 100644 --- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -23,6 +23,12 @@ public class FeatureFlags { */ public static final String REPLICATION_TYPE = "opensearch.experimental.feature.replication_type.enabled"; + /** + * Gates the visibility of the index setting that allows persisting data to remote store along with local disk. + * Once the feature is ready for production release, this feature flag can be removed. + */ + public static final String REMOTE_STORE = "opensearch.experimental.feature.remote_store.enabled"; + /** * Used to test feature flags whose values are expected to be booleans. * This method returns true if the value is "true" (case-insensitive), diff --git a/server/src/main/java/org/opensearch/index/IndexModule.java b/server/src/main/java/org/opensearch/index/IndexModule.java index 49daf8293656c..2cea0e4e3e95c 100644 --- a/server/src/main/java/org/opensearch/index/IndexModule.java +++ b/server/src/main/java/org/opensearch/index/IndexModule.java @@ -70,6 +70,7 @@ import org.opensearch.index.shard.SearchOperationListener; import org.opensearch.index.similarity.SimilarityService; import org.opensearch.index.store.FsDirectoryFactory; +import org.opensearch.index.store.RemoteDirectoryFactory; import org.opensearch.indices.IndicesQueryCache; import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; @@ -118,6 +119,8 @@ public final class IndexModule { private static final FsDirectoryFactory DEFAULT_DIRECTORY_FACTORY = new FsDirectoryFactory(); + private static final RemoteDirectoryFactory REMOTE_DIRECTORY_FACTORY = new RemoteDirectoryFactory(); + private static final IndexStorePlugin.RecoveryStateFactory DEFAULT_RECOVERY_STATE_FACTORY = RecoveryState::new; public static final Setting INDEX_STORE_TYPE_SETTING = new Setting<>( @@ -516,6 +519,7 @@ public IndexService newIndexService( client, queryCache, directoryFactory, + REMOTE_DIRECTORY_FACTORY, eventListener, readerWrapperFactory, mapperRegistry, diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index 0a6d1501f2bea..f699278919d6b 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -81,6 +81,7 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardClosedException; import org.opensearch.index.shard.IndexingOperationListener; +import org.opensearch.index.shard.RemoteStoreRefreshListener; import org.opensearch.index.shard.SearchOperationListener; import org.opensearch.index.shard.ShardId; import org.opensearch.index.shard.ShardNotFoundException; @@ -96,6 +97,9 @@ import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.plugins.IndexStorePlugin; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.Repository; +import org.opensearch.repositories.RepositoryMissingException; import org.opensearch.script.ScriptService; import org.opensearch.search.aggregations.support.ValuesSourceRegistry; import org.opensearch.threadpool.ThreadPool; @@ -136,6 +140,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust private final NodeEnvironment nodeEnv; private final ShardStoreDeleter shardStoreDeleter; private final IndexStorePlugin.DirectoryFactory directoryFactory; + private final IndexStorePlugin.RemoteDirectoryFactory remoteDirectoryFactory; private final IndexStorePlugin.RecoveryStateFactory recoveryStateFactory; private final CheckedFunction readerWrapper; private final IndexCache indexCache; @@ -190,6 +195,7 @@ public IndexService( Client client, QueryCache queryCache, IndexStorePlugin.DirectoryFactory directoryFactory, + IndexStorePlugin.RemoteDirectoryFactory remoteDirectoryFactory, IndexEventListener eventListener, Function> wrapperFactory, MapperRegistry mapperRegistry, @@ -260,6 +266,7 @@ public IndexService( this.eventListener = eventListener; this.nodeEnv = nodeEnv; this.directoryFactory = directoryFactory; + this.remoteDirectoryFactory = remoteDirectoryFactory; this.recoveryStateFactory = recoveryStateFactory; this.engineFactory = Objects.requireNonNull(engineFactory); this.engineConfigFactory = Objects.requireNonNull(engineConfigFactory); @@ -430,7 +437,8 @@ public synchronized IndexShard createShard( final ShardRouting routing, final Consumer globalCheckpointSyncer, final RetentionLeaseSyncer retentionLeaseSyncer, - final SegmentReplicationCheckpointPublisher checkpointPublisher + final SegmentReplicationCheckpointPublisher checkpointPublisher, + final RepositoriesService repositoriesService ) throws IOException { Objects.requireNonNull(retentionLeaseSyncer); /* @@ -504,6 +512,21 @@ public synchronized IndexShard createShard( } }; Directory directory = directoryFactory.newDirectory(this.indexSettings, path); + Directory remoteDirectory = null; + RemoteStoreRefreshListener remoteStoreRefreshListener = null; + if (this.indexSettings.isRemoteStoreEnabled()) { + try { + Repository repository = repositoriesService.repository(clusterService.state().metadata().clusterUUID()); + remoteDirectory = remoteDirectoryFactory.newDirectory(this.indexSettings, path, repository); + remoteStoreRefreshListener = new RemoteStoreRefreshListener(directory, remoteDirectory); + } catch (RepositoryMissingException e) { + throw new IllegalArgumentException( + "Repository should be created before creating index with remote_store enabled setting", + e + ); + } + } + store = new Store( shardId, this.indexSettings, @@ -533,7 +556,8 @@ public synchronized IndexShard createShard( () -> globalCheckpointSyncer.accept(shardId), retentionLeaseSyncer, circuitBreakerService, - this.indexSettings.isSegRepEnabled() && routing.primary() ? checkpointPublisher : null + this.indexSettings.isSegRepEnabled() && routing.primary() ? checkpointPublisher : null, + remoteStoreRefreshListener ); eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created"); eventListener.afterIndexShardCreated(indexShard); diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 2da9fc0c6d995..457c2a2b0680a 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -545,6 +545,7 @@ public final class IndexSettings { private final Settings nodeSettings; private final int numberOfShards; private final ReplicationType replicationType; + private final boolean isRemoteStoreEnabled; // volatile fields are updated via #updateIndexMetadata(IndexMetadata) under lock private volatile Settings settings; private volatile IndexMetadata indexMetadata; @@ -701,6 +702,7 @@ public IndexSettings(final IndexMetadata indexMetadata, final Settings nodeSetti this.indexMetadata = indexMetadata; numberOfShards = settings.getAsInt(IndexMetadata.SETTING_NUMBER_OF_SHARDS, null); replicationType = ReplicationType.parseString(settings.get(IndexMetadata.SETTING_REPLICATION_TYPE)); + isRemoteStoreEnabled = settings.getAsBoolean(IndexMetadata.SETTING_REMOTE_STORE, false); this.searchThrottled = INDEX_SEARCH_THROTTLED.get(settings); this.queryStringLenient = QUERY_STRING_LENIENT_SETTING.get(settings); @@ -944,6 +946,13 @@ public boolean isSegRepEnabled() { return ReplicationType.SEGMENT.equals(replicationType); } + /** + * Returns if remote store is enabled for this index. + */ + public boolean isRemoteStoreEnabled() { + return isRemoteStoreEnabled; + } + /** * Returns the node settings. The settings returned from {@link #getSettings()} are a merged version of the * index settings and the node settings where node settings are overwritten by index settings. diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 5d11c34ca205c..bad412003df26 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -304,6 +304,8 @@ Runnable getGlobalCheckpointSyncer() { private volatile boolean useRetentionLeasesInPeerRecovery; private final ReferenceManager.RefreshListener checkpointRefreshListener; + private final RemoteStoreRefreshListener remoteStoreRefreshListener; + public IndexShard( final ShardRouting shardRouting, final IndexSettings indexSettings, @@ -325,7 +327,8 @@ public IndexShard( final Runnable globalCheckpointSyncer, final RetentionLeaseSyncer retentionLeaseSyncer, final CircuitBreakerService circuitBreakerService, - @Nullable final SegmentReplicationCheckpointPublisher checkpointPublisher + @Nullable final SegmentReplicationCheckpointPublisher checkpointPublisher, + @Nullable final RemoteStoreRefreshListener remoteStoreRefreshListener ) throws IOException { super(shardRouting.shardId(), indexSettings); assert shardRouting.initializing(); @@ -413,6 +416,7 @@ public boolean shouldCache(Query query) { } else { this.checkpointRefreshListener = null; } + this.remoteStoreRefreshListener = remoteStoreRefreshListener; } public ThreadPool getThreadPool() { @@ -3139,11 +3143,13 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) { } }; - final List internalRefreshListener; + final List internalRefreshListener = new ArrayList<>(); + internalRefreshListener.add(new RefreshMetricUpdater(refreshMetric)); + if (remoteStoreRefreshListener != null && shardRouting.primary()) { + internalRefreshListener.add(remoteStoreRefreshListener); + } if (this.checkpointRefreshListener != null) { - internalRefreshListener = Arrays.asList(new RefreshMetricUpdater(refreshMetric), checkpointRefreshListener); - } else { - internalRefreshListener = Collections.singletonList(new RefreshMetricUpdater(refreshMetric)); + internalRefreshListener.add(checkpointRefreshListener); } return this.engineConfigFactory.newEngineConfig( diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java new file mode 100644 index 0000000000000..4b549ec485c0e --- /dev/null +++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java @@ -0,0 +1,87 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.shard; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.search.ReferenceManager; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IOContext; + +import java.io.IOException; +import java.nio.file.NoSuchFileException; +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; + +/** + * RefreshListener implementation to upload newly created segment files to the remote store + */ +public class RemoteStoreRefreshListener implements ReferenceManager.RefreshListener { + + private final Directory storeDirectory; + private final Directory remoteDirectory; + // ToDo: This can be a map with metadata of the uploaded file as value of the map (GitHub #3398) + private final Set filesUploadedToRemoteStore; + private static final Logger logger = LogManager.getLogger(RemoteStoreRefreshListener.class); + + public RemoteStoreRefreshListener(Directory storeDirectory, Directory remoteDirectory) throws IOException { + this.storeDirectory = storeDirectory; + this.remoteDirectory = remoteDirectory; + // ToDo: Handle failures in reading list of files (GitHub #3397) + this.filesUploadedToRemoteStore = new HashSet<>(Arrays.asList(remoteDirectory.listAll())); + } + + @Override + public void beforeRefresh() throws IOException { + // Do Nothing + } + + /** + * Upload new segment files created as part of the last refresh to the remote segment store. + * The method also deletes segment files from remote store which are not part of local filesystem. + * @param didRefresh true if the refresh opened a new reference + * @throws IOException in case of I/O error in reading list of local files + */ + @Override + public void afterRefresh(boolean didRefresh) throws IOException { + if (didRefresh) { + Set localFiles = Set.of(storeDirectory.listAll()); + localFiles.stream().filter(file -> !filesUploadedToRemoteStore.contains(file)).forEach(file -> { + try { + remoteDirectory.copyFrom(storeDirectory, file, file, IOContext.DEFAULT); + filesUploadedToRemoteStore.add(file); + } catch (NoSuchFileException e) { + logger.info( + () -> new ParameterizedMessage("The file {} does not exist anymore. It can happen in case of temp files", file), + e + ); + } catch (IOException e) { + // ToDO: Handle transient and permanent un-availability of the remote store (GitHub #3397) + logger.warn(() -> new ParameterizedMessage("Exception while uploading file {} to the remote segment store", file), e); + } + }); + + Set remoteFilesToBeDeleted = new HashSet<>(); + // ToDo: Instead of deleting files in sync, mark them and delete in async/periodic flow (GitHub #3142) + filesUploadedToRemoteStore.stream().filter(file -> !localFiles.contains(file)).forEach(file -> { + try { + remoteDirectory.deleteFile(file); + remoteFilesToBeDeleted.add(file); + } catch (IOException e) { + // ToDO: Handle transient and permanent un-availability of the remote store (GitHub #3397) + logger.warn(() -> new ParameterizedMessage("Exception while deleting file {} from the remote segment store", file), e); + } + }); + + remoteFilesToBeDeleted.forEach(filesUploadedToRemoteStore::remove); + } + } +} diff --git a/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java new file mode 100644 index 0000000000000..2f8f977537327 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/store/RemoteDirectory.java @@ -0,0 +1,193 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store; + +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.store.Lock; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobMetadata; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.nio.file.NoSuchFileException; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.Set; + +/** + * A {@code RemoteDirectory} provides an abstraction layer for storing a list of files to a remote store. + * A remoteDirectory contains only files (no sub-folder hierarchy). This class does not support all the methods in + * the Directory interface. Currently, it contains implementation of methods which are used to copy files to/from + * the remote store. Implementation of remaining methods will be added as remote store is integrated with + * replication, peer recovery etc. + * + * @opensearch.internal + */ +public final class RemoteDirectory extends Directory { + + private final BlobContainer blobContainer; + + public RemoteDirectory(BlobContainer blobContainer) { + this.blobContainer = blobContainer; + } + + /** + * Returns names of all files stored in this directory. The output must be in sorted (UTF-16, + * java's {@link String#compareTo}) order. + */ + @Override + public String[] listAll() throws IOException { + return blobContainer.listBlobs().keySet().stream().sorted().toArray(String[]::new); + } + + /** + * Removes an existing file in the directory. + * + *

This method will not throw an exception when the file doesn't exist and simply ignores this case. + * This is a deviation from the {@code Directory} interface where it is expected to throw either + * {@link NoSuchFileException} or {@link FileNotFoundException} if {@code name} points to a non-existing file. + * + * @param name the name of an existing file. + * @throws IOException if the file exists but could not be deleted. + */ + @Override + public void deleteFile(String name) throws IOException { + // ToDo: Add a check for file existence + blobContainer.deleteBlobsIgnoringIfNotExists(Collections.singletonList(name)); + } + + /** + * Creates and returns a new instance of {@link RemoteIndexOutput} which will be used to copy files to the remote + * store. + * + *

In the {@link Directory} interface, it is expected to throw {@link java.nio.file.FileAlreadyExistsException} + * if the file already exists in the remote store. As this method does not open a file, it does not throw the + * exception. + * + * @param name the name of the file to copy to remote store. + */ + @Override + public IndexOutput createOutput(String name, IOContext context) { + return new RemoteIndexOutput(name, blobContainer); + } + + /** + * Opens a stream for reading an existing file and returns {@link RemoteIndexInput} enclosing the stream. + * + * @param name the name of an existing file. + * @throws IOException in case of I/O error + * @throws NoSuchFileException if the file does not exist + */ + @Override + public IndexInput openInput(String name, IOContext context) throws IOException { + return new RemoteIndexInput(name, blobContainer.readBlob(name), fileLength(name)); + } + + /** + * Closes the directory by deleting all the files in this directory + */ + @Override + public void close() throws IOException { + blobContainer.delete(); + } + + /** + * Returns the byte length of a file in the directory. + * + * @param name the name of an existing file. + * @throws IOException in case of I/O error + * @throws NoSuchFileException if the file does not exist + */ + @Override + public long fileLength(String name) throws IOException { + // ToDo: Instead of calling remote store each time, keep a cache with segment metadata + Map metadata = blobContainer.listBlobsByPrefix(name); + if (metadata.containsKey(name)) { + return metadata.get(name).length(); + } + throw new NoSuchFileException(name); + } + + /** + * Guaranteed to throw an exception and leave the directory unmodified. + * Once soft deleting is supported segment files in the remote store, this method will provide details of + * number of files marked as deleted but not actually deleted from the remote store. + * + * @throws UnsupportedOperationException always + */ + @Override + public Set getPendingDeletions() throws IOException { + throw new UnsupportedOperationException(); + } + + /** + * Guaranteed to throw an exception and leave the directory unmodified. + * Temporary IndexOutput is not required while working with Remote store. + * + * @throws UnsupportedOperationException always + */ + @Override + public IndexOutput createTempOutput(String prefix, String suffix, IOContext context) { + throw new UnsupportedOperationException(); + } + + /** + * Guaranteed to throw an exception and leave the directory unmodified. + * Segment upload to the remote store will be permanent and does not require a separate sync API. + * This may change in the future if segment upload to remote store happens via cache and we need sync API to write + * the cache contents to the store permanently. + * + * @throws UnsupportedOperationException always + */ + @Override + public void sync(Collection names) throws IOException { + throw new UnsupportedOperationException(); + } + + /** + * Guaranteed to throw an exception and leave the directory unmodified. + * Once metadata to be stored with each shard is finalized, syncMetaData method will be used to sync the directory + * metadata to the remote store. + * + * @throws UnsupportedOperationException always + */ + @Override + public void syncMetaData() { + throw new UnsupportedOperationException(); + } + + /** + * Guaranteed to throw an exception and leave the directory unmodified. + * As this method is used by IndexWriter to publish commits, the implementation of this method is required when + * IndexWriter is backed by RemoteDirectory. + * + * @throws UnsupportedOperationException always + */ + @Override + public void rename(String source, String dest) throws IOException { + throw new UnsupportedOperationException(); + + } + + /** + * Guaranteed to throw an exception and leave the directory unmodified. + * Once locking segment files in remote store is supported, implementation of this method is required with + * remote store specific LockFactory. + * + * @throws UnsupportedOperationException always + */ + @Override + public Lock obtainLock(String name) throws IOException { + throw new UnsupportedOperationException(); + } +} diff --git a/server/src/main/java/org/opensearch/index/store/RemoteDirectoryFactory.java b/server/src/main/java/org/opensearch/index/store/RemoteDirectoryFactory.java new file mode 100644 index 0000000000000..eb7912a1f4a2b --- /dev/null +++ b/server/src/main/java/org/opensearch/index/store/RemoteDirectoryFactory.java @@ -0,0 +1,37 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store; + +import org.apache.lucene.store.Directory; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.shard.ShardPath; +import org.opensearch.plugins.IndexStorePlugin; +import org.opensearch.repositories.Repository; +import org.opensearch.repositories.blobstore.BlobStoreRepository; + +import java.io.IOException; + +/** + * Factory for a remote store directory + * + * @opensearch.internal + */ +public class RemoteDirectoryFactory implements IndexStorePlugin.RemoteDirectoryFactory { + + @Override + public Directory newDirectory(IndexSettings indexSettings, ShardPath path, Repository repository) throws IOException { + assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; + BlobPath blobPath = new BlobPath(); + blobPath = blobPath.add(indexSettings.getIndex().getName()).add(String.valueOf(path.getShardId().getId())); + BlobContainer blobContainer = ((BlobStoreRepository) repository).blobStore().blobContainer(blobPath); + return new RemoteDirectory(blobContainer); + } +} diff --git a/server/src/main/java/org/opensearch/index/store/RemoteIndexInput.java b/server/src/main/java/org/opensearch/index/store/RemoteIndexInput.java new file mode 100644 index 0000000000000..24e1128dec1b5 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/store/RemoteIndexInput.java @@ -0,0 +1,85 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store; + +import org.apache.lucene.store.IndexInput; + +import java.io.IOException; +import java.io.InputStream; + +/** + * Class for input from a file in a {@link RemoteDirectory}. Used for all input operations from the remote store. + * Currently, only methods from {@link IndexInput} that are required for reading a file from remote store are + * implemented. Remaining methods will be implemented as we open up remote store for other use cases like replication, + * peer recovery etc. + * ToDo: Extend ChecksumIndexInput + * @see RemoteDirectory + * + * @opensearch.internal + */ +public class RemoteIndexInput extends IndexInput { + + private final InputStream inputStream; + private final long size; + + public RemoteIndexInput(String name, InputStream inputStream, long size) { + super(name); + this.inputStream = inputStream; + this.size = size; + } + + @Override + public byte readByte() throws IOException { + byte[] buffer = new byte[1]; + inputStream.read(buffer); + return buffer[0]; + } + + @Override + public void readBytes(byte[] b, int offset, int len) throws IOException { + inputStream.read(b, offset, len); + } + + @Override + public void close() throws IOException { + inputStream.close(); + } + + @Override + public long length() { + return size; + } + + @Override + public void seek(long pos) throws IOException { + inputStream.skip(pos); + } + + /** + * Guaranteed to throw an exception and leave the RemoteIndexInput unmodified. + * This method is not implemented as it is not used for the file transfer to/from the remote store. + * + * @throws UnsupportedOperationException always + */ + @Override + public long getFilePointer() { + throw new UnsupportedOperationException(); + } + + /** + * Guaranteed to throw an exception and leave the RemoteIndexInput unmodified. + * This method is not implemented as it is not used for the file transfer to/from the remote store. + * + * @throws UnsupportedOperationException always + */ + @Override + public IndexInput slice(String sliceDescription, long offset, long length) throws IOException { + throw new UnsupportedOperationException(); + } +} diff --git a/server/src/main/java/org/opensearch/index/store/RemoteIndexOutput.java b/server/src/main/java/org/opensearch/index/store/RemoteIndexOutput.java new file mode 100644 index 0000000000000..2af65452a6eac --- /dev/null +++ b/server/src/main/java/org/opensearch/index/store/RemoteIndexOutput.java @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store; + +import org.apache.lucene.store.DataInput; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.lucene.store.InputStreamIndexInput; + +import java.io.IOException; + +/** + * Class for output to a file in a {@link RemoteDirectory}. Used for all output operations to the remote store. + * Currently, only methods from {@link IndexOutput} that are required for uploading a segment file to remote store are + * implemented. Remaining methods will be implemented as we open up remote store for other use cases like replication, + * peer recovery etc. + * ToDo: Extend ChecksumIndexInput + * @see RemoteDirectory + * + * @opensearch.internal + */ +public class RemoteIndexOutput extends IndexOutput { + + private final BlobContainer blobContainer; + + public RemoteIndexOutput(String name, BlobContainer blobContainer) { + super(name, name); + this.blobContainer = blobContainer; + } + + @Override + public void copyBytes(DataInput input, long numBytes) throws IOException { + assert input instanceof IndexInput : "input should be instance of IndexInput"; + blobContainer.writeBlob(getName(), new InputStreamIndexInput((IndexInput) input, numBytes), numBytes, false); + } + + /** + * This is a no-op. Once segment file upload to the remote store is complete, we don't need to explicitly close + * the stream. It is taken care by internal APIs of client of the remote store. + */ + @Override + public void close() throws IOException { + // do nothing + } + + /** + * Guaranteed to throw an exception and leave the RemoteIndexOutput unmodified. + * This method is not implemented as it is not used for the file transfer to/from the remote store. + * + * @throws UnsupportedOperationException always + */ + @Override + public void writeByte(byte b) throws IOException { + throw new UnsupportedOperationException(); + } + + /** + * Guaranteed to throw an exception and leave the RemoteIndexOutput unmodified. + * This method is not implemented as it is not used for the file transfer to/from the remote store. + * + * @throws UnsupportedOperationException always + */ + @Override + public void writeBytes(byte[] byteArray, int offset, int length) throws IOException { + throw new UnsupportedOperationException(); + } + + /** + * Guaranteed to throw an exception and leave the RemoteIndexOutput unmodified. + * This method is not implemented as it is not used for the file transfer to/from the remote store. + * + * @throws UnsupportedOperationException always + */ + @Override + public long getFilePointer() { + throw new UnsupportedOperationException(); + } + + /** + * Guaranteed to throw an exception and leave the RemoteIndexOutput unmodified. + * This method is not implemented as it is not directly used for the file transfer to/from the remote store. + * But the checksum is important to verify integrity of the data and that means implementing this method will + * be required for the segment upload as well. + * + * @throws UnsupportedOperationException always + */ + @Override + public long getChecksum() throws IOException { + throw new UnsupportedOperationException(); + } + +} diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 79fd2893fb78c..b2f6f10c19638 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -859,7 +859,13 @@ public IndexShard createShard( IndexService indexService = indexService(shardRouting.index()); assert indexService != null; RecoveryState recoveryState = indexService.createRecoveryState(shardRouting, targetNode, sourceNode); - IndexShard indexShard = indexService.createShard(shardRouting, globalCheckpointSyncer, retentionLeaseSyncer, checkpointPublisher); + IndexShard indexShard = indexService.createShard( + shardRouting, + globalCheckpointSyncer, + retentionLeaseSyncer, + checkpointPublisher, + repositoriesService + ); indexShard.addShardFailureCallback(onShardFailure); indexShard.startRecovery(recoveryState, recoveryTargetService, recoveryListener, repositoriesService, mapping -> { assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS diff --git a/server/src/main/java/org/opensearch/plugins/IndexStorePlugin.java b/server/src/main/java/org/opensearch/plugins/IndexStorePlugin.java index 2f549fec54759..52ddf6dcf2753 100644 --- a/server/src/main/java/org/opensearch/plugins/IndexStorePlugin.java +++ b/server/src/main/java/org/opensearch/plugins/IndexStorePlugin.java @@ -39,6 +39,7 @@ import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.ShardPath; import org.opensearch.indices.recovery.RecoveryState; +import org.opensearch.repositories.Repository; import java.io.IOException; import java.util.Collections; @@ -66,6 +67,22 @@ interface DirectoryFactory { Directory newDirectory(IndexSettings indexSettings, ShardPath shardPath) throws IOException; } + /** + * An interface that describes how to create a new remote directory instance per shard. + */ + @FunctionalInterface + interface RemoteDirectoryFactory { + /** + * Creates a new remote directory per shard. This method is called once per shard on shard creation. + * @param indexSettings the shards index settings + * @param shardPath the path the shard is using + * @param repository to get the BlobContainer details + * @return a new RemoteDirectory instance + * @throws IOException if an IOException occurs while opening the directory + */ + Directory newDirectory(IndexSettings indexSettings, ShardPath shardPath, Repository repository) throws IOException; + } + /** * The {@link DirectoryFactory} mappings for this plugin. When an index is created the store type setting * {@link org.opensearch.index.IndexModule#INDEX_STORE_TYPE_SETTING} on the index will be examined and either use the default or a diff --git a/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java b/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java index 1084f9c658db4..a4f2b242564e2 100644 --- a/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java +++ b/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java @@ -21,6 +21,7 @@ public class FeatureFlagTests extends OpenSearchTestCase { @BeforeClass public static void enableFeature() { AccessController.doPrivileged((PrivilegedAction) () -> System.setProperty(FeatureFlags.REPLICATION_TYPE, "true")); + AccessController.doPrivileged((PrivilegedAction) () -> System.setProperty(FeatureFlags.REMOTE_STORE, "true")); } public void testReplicationTypeFeatureFlag() { @@ -40,4 +41,10 @@ public void testNonBooleanFeatureFlag() { assertNotNull(System.getProperty(javaVersionProperty)); assertFalse(FeatureFlags.isEnabled(javaVersionProperty)); } + + public void testRemoteStoreFeatureFlag() { + String remoteStoreFlag = FeatureFlags.REMOTE_STORE; + assertNotNull(System.getProperty(remoteStoreFlag)); + assertTrue(FeatureFlags.isEnabled(remoteStoreFlag)); + } } diff --git a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java index 71433673eef5a..4b3dc041b9f54 100644 --- a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java +++ b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java @@ -41,6 +41,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.translog.Translog; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; @@ -56,6 +57,7 @@ import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.core.StringContains.containsString; import static org.hamcrest.object.HasToString.hasToString; +import static org.opensearch.common.settings.IndexScopedSettings.FEATURE_FLAGGED_INDEX_SETTINGS; public class IndexSettingsTests extends OpenSearchTestCase { @@ -753,4 +755,41 @@ public void testIgnoreTranslogRetentionSettingsIfSoftDeletesEnabled() { assertThat(indexSettings.getTranslogRetentionAge().millis(), equalTo(-1L)); assertThat(indexSettings.getTranslogRetentionSize().getBytes(), equalTo(-1L)); } + + public void testRemoteStoreDefaultSetting() { + IndexMetadata metadata = newIndexMeta( + "index", + Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).build() + ); + IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); + assertFalse(settings.isRemoteStoreEnabled()); + } + + public void testRemoteStoreExplicitSetting() { + IndexMetadata metadata = newIndexMeta( + "index", + Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_REMOTE_STORE, true) + .build() + ); + IndexSettings settings = new IndexSettings(metadata, Settings.EMPTY); + assertTrue(settings.isRemoteStoreEnabled()); + } + + public void testUpdateRemoteStoreFails() { + Set> remoteStoreSettingSet = new HashSet<>(); + remoteStoreSettingSet.add(FEATURE_FLAGGED_INDEX_SETTINGS.get(FeatureFlags.REMOTE_STORE)); + IndexScopedSettings settings = new IndexScopedSettings(Settings.EMPTY, remoteStoreSettingSet); + IllegalArgumentException error = expectThrows( + IllegalArgumentException.class, + () -> settings.updateSettings( + Settings.builder().put("index.remote_store", randomBoolean()).build(), + Settings.builder(), + Settings.builder(), + "index" + ) + ); + assertEquals(error.getMessage(), "final index setting [index.remote_store], not updateable"); + } } diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java new file mode 100644 index 0000000000000..af92d821a9043 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java @@ -0,0 +1,139 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.shard; + +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IOContext; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.nio.file.NoSuchFileException; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.doThrow; + +public class RemoteStoreRefreshListenerTests extends OpenSearchTestCase { + private Directory storeDirectory; + private Directory remoteDirectory; + + private RemoteStoreRefreshListener remoteStoreRefreshListener; + + public void setup(String[] remoteFiles) throws IOException { + storeDirectory = mock(Directory.class); + remoteDirectory = mock(Directory.class); + when(remoteDirectory.listAll()).thenReturn(remoteFiles); + remoteStoreRefreshListener = new RemoteStoreRefreshListener(storeDirectory, remoteDirectory); + } + + public void testAfterRefreshFalse() throws IOException { + setup(new String[0]); + remoteStoreRefreshListener.afterRefresh(false); + verify(storeDirectory, times(0)).listAll(); + } + + public void testAfterRefreshTrueNoLocalFiles() throws IOException { + setup(new String[0]); + + when(storeDirectory.listAll()).thenReturn(new String[0]); + + remoteStoreRefreshListener.afterRefresh(true); + verify(storeDirectory).listAll(); + verify(remoteDirectory, times(0)).copyFrom(any(), any(), any(), any()); + verify(remoteDirectory, times(0)).deleteFile(any()); + } + + public void testAfterRefreshOnlyUploadFiles() throws IOException { + setup(new String[0]); + + String[] localFiles = new String[] { "segments_1", "0.si", "0.cfs", "0.cfe" }; + when(storeDirectory.listAll()).thenReturn(localFiles); + + remoteStoreRefreshListener.afterRefresh(true); + verify(storeDirectory).listAll(); + verify(remoteDirectory).copyFrom(storeDirectory, "segments_1", "segments_1", IOContext.DEFAULT); + verify(remoteDirectory).copyFrom(storeDirectory, "0.si", "0.si", IOContext.DEFAULT); + verify(remoteDirectory).copyFrom(storeDirectory, "0.cfs", "0.cfs", IOContext.DEFAULT); + verify(remoteDirectory).copyFrom(storeDirectory, "0.cfe", "0.cfe", IOContext.DEFAULT); + verify(remoteDirectory, times(0)).deleteFile(any()); + } + + public void testAfterRefreshOnlyUploadAndDelete() throws IOException { + setup(new String[] { "0.si", "0.cfs" }); + + String[] localFiles = new String[] { "segments_1", "1.si", "1.cfs", "1.cfe" }; + when(storeDirectory.listAll()).thenReturn(localFiles); + + remoteStoreRefreshListener.afterRefresh(true); + verify(storeDirectory).listAll(); + verify(remoteDirectory).copyFrom(storeDirectory, "segments_1", "segments_1", IOContext.DEFAULT); + verify(remoteDirectory).copyFrom(storeDirectory, "1.si", "1.si", IOContext.DEFAULT); + verify(remoteDirectory).copyFrom(storeDirectory, "1.cfs", "1.cfs", IOContext.DEFAULT); + verify(remoteDirectory).copyFrom(storeDirectory, "1.cfe", "1.cfe", IOContext.DEFAULT); + verify(remoteDirectory).deleteFile("0.si"); + verify(remoteDirectory).deleteFile("0.cfs"); + } + + public void testAfterRefreshOnlyDelete() throws IOException { + setup(new String[] { "0.si", "0.cfs" }); + + String[] localFiles = new String[] { "0.si" }; + when(storeDirectory.listAll()).thenReturn(localFiles); + + remoteStoreRefreshListener.afterRefresh(true); + verify(storeDirectory).listAll(); + verify(remoteDirectory, times(0)).copyFrom(any(), any(), any(), any()); + verify(remoteDirectory).deleteFile("0.cfs"); + } + + public void testAfterRefreshTempLocalFile() throws IOException { + setup(new String[0]); + + String[] localFiles = new String[] { "segments_1", "0.si", "0.cfs.tmp" }; + when(storeDirectory.listAll()).thenReturn(localFiles); + doThrow(new NoSuchFileException("0.cfs.tmp")).when(remoteDirectory) + .copyFrom(storeDirectory, "0.cfs.tmp", "0.cfs.tmp", IOContext.DEFAULT); + + remoteStoreRefreshListener.afterRefresh(true); + verify(storeDirectory).listAll(); + verify(remoteDirectory).copyFrom(storeDirectory, "segments_1", "segments_1", IOContext.DEFAULT); + verify(remoteDirectory).copyFrom(storeDirectory, "0.si", "0.si", IOContext.DEFAULT); + verify(remoteDirectory, times(0)).deleteFile(any()); + } + + public void testAfterRefreshConsecutive() throws IOException { + setup(new String[0]); + + String[] localFiles = new String[] { "segments_1", "0.si", "0.cfs", "0.cfe" }; + when(storeDirectory.listAll()).thenReturn(localFiles); + doThrow(new IOException("0.cfs")).when(remoteDirectory).copyFrom(storeDirectory, "0.cfs", "0.cfe", IOContext.DEFAULT); + doThrow(new IOException("0.cfe")).when(remoteDirectory).copyFrom(storeDirectory, "0.cfe", "0.cfe", IOContext.DEFAULT); + + remoteStoreRefreshListener.afterRefresh(true); + verify(storeDirectory).listAll(); + verify(remoteDirectory).copyFrom(storeDirectory, "segments_1", "segments_1", IOContext.DEFAULT); + verify(remoteDirectory).copyFrom(storeDirectory, "0.si", "0.si", IOContext.DEFAULT); + verify(remoteDirectory).copyFrom(storeDirectory, "0.cfs", "0.cfs", IOContext.DEFAULT); + verify(remoteDirectory).copyFrom(storeDirectory, "0.cfe", "0.cfe", IOContext.DEFAULT); + verify(remoteDirectory, times(0)).deleteFile(any()); + + String[] localFilesSecondRefresh = new String[] { "segments_1", "0.cfs", "1.cfs", "1.cfe" }; + when(storeDirectory.listAll()).thenReturn(localFilesSecondRefresh); + + remoteStoreRefreshListener.afterRefresh(true); + + verify(remoteDirectory).copyFrom(storeDirectory, "0.cfs", "0.cfs", IOContext.DEFAULT); + verify(remoteDirectory).copyFrom(storeDirectory, "1.cfs", "1.cfs", IOContext.DEFAULT); + verify(remoteDirectory).copyFrom(storeDirectory, "1.cfe", "1.cfe", IOContext.DEFAULT); + verify(remoteDirectory).deleteFile("0.si"); + } +} diff --git a/server/src/test/java/org/opensearch/index/store/RemoteDirectoryFactoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteDirectoryFactoryTests.java new file mode 100644 index 0000000000000..d781fad9ab99c --- /dev/null +++ b/server/src/test/java/org/opensearch/index/store/RemoteDirectoryFactoryTests.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store; + +import org.apache.lucene.store.Directory; +import org.junit.Before; +import org.mockito.ArgumentCaptor; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.blobstore.BlobStore; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.shard.ShardId; +import org.opensearch.index.shard.ShardPath; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.Collections; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.mockito.Mockito.verify; + +public class RemoteDirectoryFactoryTests extends OpenSearchTestCase { + + private RemoteDirectoryFactory remoteDirectoryFactory; + + @Before + public void setup() { + remoteDirectoryFactory = new RemoteDirectoryFactory(); + } + + public void testNewDirectory() throws IOException { + Settings settings = Settings.builder().build(); + IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("foo", settings); + Path tempDir = createTempDir().resolve(indexSettings.getUUID()).resolve("0"); + ShardPath shardPath = new ShardPath(false, tempDir, tempDir, new ShardId(indexSettings.getIndex(), 0)); + BlobStoreRepository repository = mock(BlobStoreRepository.class); + BlobStore blobStore = mock(BlobStore.class); + BlobContainer blobContainer = mock(BlobContainer.class); + when(repository.blobStore()).thenReturn(blobStore); + when(blobStore.blobContainer(any())).thenReturn(blobContainer); + when(blobContainer.listBlobs()).thenReturn(Collections.emptyMap()); + + Directory directory = remoteDirectoryFactory.newDirectory(indexSettings, shardPath, repository); + assertTrue(directory instanceof RemoteDirectory); + ArgumentCaptor blobPathCaptor = ArgumentCaptor.forClass(BlobPath.class); + verify(blobStore).blobContainer(blobPathCaptor.capture()); + BlobPath blobPath = blobPathCaptor.getValue(); + assertEquals("foo/0/", blobPath.buildAsString()); + + directory.listAll(); + verify(blobContainer).listBlobs(); + } +} diff --git a/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java new file mode 100644 index 0000000000000..c2c365d9140df --- /dev/null +++ b/server/src/test/java/org/opensearch/index/store/RemoteDirectoryTests.java @@ -0,0 +1,158 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store; + +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.junit.Before; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobMetadata; +import org.opensearch.common.blobstore.support.PlainBlobMetadata; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.NoSuchFileException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.mockito.Mockito.*; + +public class RemoteDirectoryTests extends OpenSearchTestCase { + private BlobContainer blobContainer; + + private RemoteDirectory remoteDirectory; + + @Before + public void setup() { + blobContainer = mock(BlobContainer.class); + remoteDirectory = new RemoteDirectory(blobContainer); + } + + public void testListAllEmpty() throws IOException { + when(blobContainer.listBlobs()).thenReturn(Collections.emptyMap()); + + String[] actualFileNames = remoteDirectory.listAll(); + String[] expectedFileName = new String[] {}; + assertArrayEquals(expectedFileName, actualFileNames); + } + + public void testListAll() throws IOException { + Map fileNames = Stream.of("abc", "xyz", "pqr", "lmn", "jkl") + .collect(Collectors.toMap(filename -> filename, filename -> new PlainBlobMetadata(filename, 100))); + + when(blobContainer.listBlobs()).thenReturn(fileNames); + + String[] actualFileNames = remoteDirectory.listAll(); + String[] expectedFileName = new String[] { "abc", "jkl", "lmn", "pqr", "xyz" }; + assertArrayEquals(expectedFileName, actualFileNames); + } + + public void testListAllException() throws IOException { + when(blobContainer.listBlobs()).thenThrow(new IOException("Error reading blob store")); + + assertThrows(IOException.class, () -> remoteDirectory.listAll()); + } + + public void testDeleteFile() throws IOException { + remoteDirectory.deleteFile("segment_1"); + + verify(blobContainer).deleteBlobsIgnoringIfNotExists(Collections.singletonList("segment_1")); + } + + public void testDeleteFileException() throws IOException { + doThrow(new IOException("Error writing to blob store")).when(blobContainer) + .deleteBlobsIgnoringIfNotExists(Collections.singletonList("segment_1")); + + assertThrows(IOException.class, () -> remoteDirectory.deleteFile("segment_1")); + } + + public void testCreateOutput() { + IndexOutput indexOutput = remoteDirectory.createOutput("segment_1", IOContext.DEFAULT); + assertTrue(indexOutput instanceof RemoteIndexOutput); + assertEquals("segment_1", indexOutput.getName()); + } + + public void testOpenInput() throws IOException { + InputStream mockInputStream = mock(InputStream.class); + when(blobContainer.readBlob("segment_1")).thenReturn(mockInputStream); + Map fileInfo = new HashMap<>(); + fileInfo.put("segment_1", new PlainBlobMetadata("segment_1", 100)); + when(blobContainer.listBlobsByPrefix("segment_1")).thenReturn(fileInfo); + + IndexInput indexInput = remoteDirectory.openInput("segment_1", IOContext.DEFAULT); + assertTrue(indexInput instanceof RemoteIndexInput); + assertEquals(100, indexInput.length()); + } + + public void testOpenInputIOException() throws IOException { + when(blobContainer.readBlob("segment_1")).thenThrow(new IOException("Error while reading")); + + assertThrows(IOException.class, () -> remoteDirectory.openInput("segment_1", IOContext.DEFAULT)); + } + + public void testOpenInputNoSuchFileException() throws IOException { + InputStream mockInputStream = mock(InputStream.class); + when(blobContainer.readBlob("segment_1")).thenReturn(mockInputStream); + when(blobContainer.listBlobsByPrefix("segment_1")).thenThrow(new NoSuchFileException("segment_1")); + + assertThrows(NoSuchFileException.class, () -> remoteDirectory.openInput("segment_1", IOContext.DEFAULT)); + } + + public void testClose() throws IOException { + remoteDirectory.close(); + + verify(blobContainer).delete(); + } + + public void testCloseIOException() throws IOException { + when(blobContainer.delete()).thenThrow(new IOException("Error while writing to blob store")); + + assertThrows(IOException.class, () -> remoteDirectory.close()); + } + + public void testFileLength() throws IOException { + Map fileInfo = new HashMap<>(); + fileInfo.put("segment_1", new PlainBlobMetadata("segment_1", 100)); + when(blobContainer.listBlobsByPrefix("segment_1")).thenReturn(fileInfo); + + assertEquals(100, remoteDirectory.fileLength("segment_1")); + } + + public void testFileLengthIOException() throws IOException { + when(blobContainer.listBlobsByPrefix("segment_1")).thenThrow(new NoSuchFileException("segment_1")); + + assertThrows(IOException.class, () -> remoteDirectory.fileLength("segment_1")); + } + + public void testGetPendingDeletions() { + assertThrows(UnsupportedOperationException.class, () -> remoteDirectory.getPendingDeletions()); + } + + public void testCreateTempOutput() { + assertThrows(UnsupportedOperationException.class, () -> remoteDirectory.createTempOutput("segment_1", "tmp", IOContext.DEFAULT)); + } + + public void testSync() { + assertThrows(UnsupportedOperationException.class, () -> remoteDirectory.sync(Collections.emptyList())); + } + + public void testRename() { + assertThrows(UnsupportedOperationException.class, () -> remoteDirectory.rename("segment_1", "segment_2")); + } + + public void testObtainLock() { + assertThrows(UnsupportedOperationException.class, () -> remoteDirectory.obtainLock("segment_1")); + } + +} diff --git a/server/src/test/java/org/opensearch/index/store/RemoteIndexInputTests.java b/server/src/test/java/org/opensearch/index/store/RemoteIndexInputTests.java new file mode 100644 index 0000000000000..c2f81c035e424 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/store/RemoteIndexInputTests.java @@ -0,0 +1,99 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store; + +import org.junit.Before; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.io.InputStream; + +import static org.mockito.Mockito.*; + +public class RemoteIndexInputTests extends OpenSearchTestCase { + + private static final String FILENAME = "segment_1"; + private static final long FILESIZE = 200; + + private InputStream inputStream; + private RemoteIndexInput remoteIndexInput; + + @Before + public void setup() { + inputStream = mock(InputStream.class); + remoteIndexInput = new RemoteIndexInput(FILENAME, inputStream, FILESIZE); + } + + public void testReadByte() throws IOException { + InputStream inputStream = spy(InputStream.class); + remoteIndexInput = new RemoteIndexInput(FILENAME, inputStream, FILESIZE); + + when(inputStream.read()).thenReturn(10); + + assertEquals(10, remoteIndexInput.readByte()); + + verify(inputStream).read(any()); + } + + public void testReadByteIOException() throws IOException { + when(inputStream.read(any())).thenThrow(new IOException("Error reading")); + + assertThrows(IOException.class, () -> remoteIndexInput.readByte()); + } + + public void testReadBytes() throws IOException { + byte[] buffer = new byte[10]; + remoteIndexInput.readBytes(buffer, 10, 20); + + verify(inputStream).read(buffer, 10, 20); + } + + public void testReadBytesIOException() throws IOException { + byte[] buffer = new byte[10]; + when(inputStream.read(buffer, 10, 20)).thenThrow(new IOException("Error reading")); + + assertThrows(IOException.class, () -> remoteIndexInput.readBytes(buffer, 10, 20)); + } + + public void testClose() throws IOException { + remoteIndexInput.close(); + + verify(inputStream).close(); + } + + public void testCloseIOException() throws IOException { + doThrow(new IOException("Error closing")).when(inputStream).close(); + + assertThrows(IOException.class, () -> remoteIndexInput.close()); + } + + public void testLength() { + assertEquals(FILESIZE, remoteIndexInput.length()); + } + + public void testSeek() throws IOException { + remoteIndexInput.seek(10); + + verify(inputStream).skip(10); + } + + public void testSeekIOException() throws IOException { + when(inputStream.skip(10)).thenThrow(new IOException("Error reading")); + + assertThrows(IOException.class, () -> remoteIndexInput.seek(10)); + } + + public void testGetFilePointer() { + assertThrows(UnsupportedOperationException.class, () -> remoteIndexInput.getFilePointer()); + } + + public void testSlice() { + assertThrows(UnsupportedOperationException.class, () -> remoteIndexInput.slice("Slice middle", 50, 100)); + } +} diff --git a/server/src/test/java/org/opensearch/index/store/RemoteIndexOutputTests.java b/server/src/test/java/org/opensearch/index/store/RemoteIndexOutputTests.java new file mode 100644 index 0000000000000..64975f2ac4892 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/store/RemoteIndexOutputTests.java @@ -0,0 +1,68 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store; + +import org.apache.lucene.store.IndexInput; +import org.junit.Before; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.lucene.store.InputStreamIndexInput; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.*; + +public class RemoteIndexOutputTests extends OpenSearchTestCase { + private static final String FILENAME = "segment_1"; + + private BlobContainer blobContainer; + + private RemoteIndexOutput remoteIndexOutput; + + @Before + public void setup() { + blobContainer = mock(BlobContainer.class); + remoteIndexOutput = new RemoteIndexOutput(FILENAME, blobContainer); + } + + public void testCopyBytes() throws IOException { + IndexInput indexInput = mock(IndexInput.class); + remoteIndexOutput.copyBytes(indexInput, 100); + + verify(blobContainer).writeBlob(eq(FILENAME), any(InputStreamIndexInput.class), eq(100L), eq(false)); + } + + public void testCopyBytesIOException() throws IOException { + doThrow(new IOException("Error writing")).when(blobContainer) + .writeBlob(eq(FILENAME), any(InputStreamIndexInput.class), eq(100L), eq(false)); + + IndexInput indexInput = mock(IndexInput.class); + assertThrows(IOException.class, () -> remoteIndexOutput.copyBytes(indexInput, 100)); + } + + public void testWriteByte() { + byte b = 10; + assertThrows(UnsupportedOperationException.class, () -> remoteIndexOutput.writeByte(b)); + } + + public void testWriteBytes() { + byte[] buffer = new byte[10]; + assertThrows(UnsupportedOperationException.class, () -> remoteIndexOutput.writeBytes(buffer, 50, 60)); + } + + public void testGetFilePointer() { + assertThrows(UnsupportedOperationException.class, () -> remoteIndexOutput.getFilePointer()); + } + + public void testGetChecksum() { + assertThrows(UnsupportedOperationException.class, () -> remoteIndexOutput.getChecksum()); + } +} diff --git a/server/src/test/java/org/opensearch/indices/IndicesLifecycleListenerSingleNodeTests.java b/server/src/test/java/org/opensearch/indices/IndicesLifecycleListenerSingleNodeTests.java index 0989bf869f18e..213a22539971f 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesLifecycleListenerSingleNodeTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesLifecycleListenerSingleNodeTests.java @@ -153,7 +153,8 @@ public void afterIndexRemoved(Index index, IndexSettings indexSettings, IndexRem newRouting, s -> {}, RetentionLeaseSyncer.EMPTY, - SegmentReplicationCheckpointPublisher.EMPTY + SegmentReplicationCheckpointPublisher.EMPTY, + null ); IndexShardTestCase.updateRoutingEntry(shard, newRouting); assertEquals(5, counter.get()); diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 371fa6d102304..62c52ab636255 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -525,7 +525,8 @@ protected IndexShard newShard( globalCheckpointSyncer, retentionLeaseSyncer, breakerService, - checkpointPublisher + checkpointPublisher, + null ); indexShard.addShardFailureCallback(DEFAULT_SHARD_FAILURE_HANDLER); success = true; From 50387bb7bc1f714b38031faf5aaaa6ddb1332843 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Mon, 13 Jun 2022 10:55:09 -0400 Subject: [PATCH 56/75] Fixing VerifyVersionConstantsIT test failure (#3574) Signed-off-by: Andriy Redko --- server/src/main/java/org/opensearch/Version.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index 04907ee5d054b..ec33e674c4d5f 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -89,7 +89,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_1_3_3 = new Version(1030399, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_2_0_0 = new Version(2000099, org.apache.lucene.util.Version.LUCENE_9_1_0); public static final Version V_2_0_1 = new Version(2000199, org.apache.lucene.util.Version.LUCENE_9_1_0); - public static final Version V_2_1_0 = new Version(2010099, org.apache.lucene.util.Version.LUCENE_9_2_0); + public static final Version V_2_1_0 = new Version(2010099, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version CURRENT = V_3_0_0; From 88509d5ba18b61c2b5829541a5aa534ffc1ba665 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jun 2022 21:27:01 -0700 Subject: [PATCH 57/75] Bump jettison from 1.4.1 to 1.5.0 in /plugins/discovery-azure-classic (#3571) * Bump jettison from 1.4.1 to 1.5.0 in /plugins/discovery-azure-classic Bumps [jettison](https://github.com/jettison-json/jettison) from 1.4.1 to 1.5.0. - [Release notes](https://github.com/jettison-json/jettison/releases) - [Commits](https://github.com/jettison-json/jettison/compare/jettison-1.4.1...jettison-1.5.0) --- updated-dependencies: - dependency-name: org.codehaus.jettison:jettison dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- plugins/discovery-azure-classic/build.gradle | 2 +- .../discovery-azure-classic/licenses/jettison-1.4.1.jar.sha1 | 1 - .../discovery-azure-classic/licenses/jettison-1.5.0.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/discovery-azure-classic/licenses/jettison-1.4.1.jar.sha1 create mode 100644 plugins/discovery-azure-classic/licenses/jettison-1.5.0.jar.sha1 diff --git a/plugins/discovery-azure-classic/build.gradle b/plugins/discovery-azure-classic/build.gradle index 575b8858b16ba..5755ff55bfff9 100644 --- a/plugins/discovery-azure-classic/build.gradle +++ b/plugins/discovery-azure-classic/build.gradle @@ -59,7 +59,7 @@ dependencies { api "com.sun.jersey:jersey-client:${versions.jersey}" api "com.sun.jersey:jersey-core:${versions.jersey}" api "com.sun.jersey:jersey-json:${versions.jersey}" - api 'org.codehaus.jettison:jettison:1.4.1' + api 'org.codehaus.jettison:jettison:1.5.0' api 'com.sun.xml.bind:jaxb-impl:2.2.3-1' // HACK: javax.xml.bind was removed from default modules in java 9, so we pull the api in here, diff --git a/plugins/discovery-azure-classic/licenses/jettison-1.4.1.jar.sha1 b/plugins/discovery-azure-classic/licenses/jettison-1.4.1.jar.sha1 deleted file mode 100644 index 815d87d917f2e..0000000000000 --- a/plugins/discovery-azure-classic/licenses/jettison-1.4.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8d16bbcbac93446942c9e5da04530159afbe3e65 \ No newline at end of file diff --git a/plugins/discovery-azure-classic/licenses/jettison-1.5.0.jar.sha1 b/plugins/discovery-azure-classic/licenses/jettison-1.5.0.jar.sha1 new file mode 100644 index 0000000000000..ec93f83474541 --- /dev/null +++ b/plugins/discovery-azure-classic/licenses/jettison-1.5.0.jar.sha1 @@ -0,0 +1 @@ +933c7df7a4b78c9a9322f431014ea699b1fc0cc0 \ No newline at end of file From 56875ff79cfdb35f1c30b85488afb1f1e1d4f1be Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jun 2022 21:39:23 -0700 Subject: [PATCH 58/75] Bump google-api-services-storage from v1-rev20200814-1.30.10 to v1-rev20220608-1.32.1 in /plugins/repository-gcs (#3573) * Bump google-api-services-storage in /plugins/repository-gcs Bumps google-api-services-storage from v1-rev20200814-1.30.10 to v1-rev20220608-1.32.1. --- updated-dependencies: - dependency-name: com.google.apis:google-api-services-storage dependency-type: direct:production ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Upgrade Google HTTP Client to 1.42.0 Signed-off-by: Xue Zhou Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Co-authored-by: Xue Zhou --- plugins/repository-gcs/build.gradle | 4 ++-- ...oogle-api-services-storage-v1-rev20200814-1.30.10.jar.sha1 | 1 - ...google-api-services-storage-v1-rev20220608-1.32.1.jar.sha1 | 1 + .../licenses/google-http-client-1.35.0.jar.sha1 | 1 - .../licenses/google-http-client-1.42.0.jar.sha1 | 1 + 5 files changed, 4 insertions(+), 4 deletions(-) delete mode 100644 plugins/repository-gcs/licenses/google-api-services-storage-v1-rev20200814-1.30.10.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/google-api-services-storage-v1-rev20220608-1.32.1.jar.sha1 delete mode 100644 plugins/repository-gcs/licenses/google-http-client-1.35.0.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/google-http-client-1.42.0.jar.sha1 diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 0e1c2125f5d81..097e96fcd8fdc 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -58,7 +58,7 @@ dependencies { api 'com.google.cloud:google-cloud-core:2.5.10' runtimeOnly 'com.google.guava:guava:30.1.1-jre' api 'com.google.guava:failureaccess:1.0.1' - api 'com.google.http-client:google-http-client:1.35.0' + api 'com.google.http-client:google-http-client:1.42.0' api "commons-logging:commons-logging:${versions.commonslogging}" api "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" api "commons-codec:commons-codec:${versions.commonscodec}" @@ -82,7 +82,7 @@ dependencies { api 'io.grpc:grpc-context:1.46.0' api 'io.opencensus:opencensus-api:0.18.0' api 'io.opencensus:opencensus-contrib-http-util:0.18.0' - api 'com.google.apis:google-api-services-storage:v1-rev20200814-1.30.10' + api 'com.google.apis:google-api-services-storage:v1-rev20220608-1.32.1' testImplementation project(':test:fixtures:gcs-fixture') } diff --git a/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev20200814-1.30.10.jar.sha1 b/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev20200814-1.30.10.jar.sha1 deleted file mode 100644 index e399aa5865413..0000000000000 --- a/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev20200814-1.30.10.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fe3b480958961fc7144da10ce3653065d5eb5490 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev20220608-1.32.1.jar.sha1 b/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev20220608-1.32.1.jar.sha1 new file mode 100644 index 0000000000000..07aaadb2664b2 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev20220608-1.32.1.jar.sha1 @@ -0,0 +1 @@ +74724addc6cecac408dad3a6a26423b7647b3724 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-1.35.0.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-1.35.0.jar.sha1 deleted file mode 100644 index 802a6ab3a8d04..0000000000000 --- a/plugins/repository-gcs/licenses/google-http-client-1.35.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f2348dd57d5417c29388bd430f5055dca863c600 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-1.42.0.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-1.42.0.jar.sha1 new file mode 100644 index 0000000000000..9c20d9f12d4b0 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-1.42.0.jar.sha1 @@ -0,0 +1 @@ +4f319ce80ba6888d04a38234916c43d5486842a5 \ No newline at end of file From 02639804d82ad0c25768f43400f502635b174a83 Mon Sep 17 00:00:00 2001 From: Rishab Nahata Date: Tue, 14 Jun 2022 10:13:22 +0530 Subject: [PATCH 59/75] Add flat_skew setting to node overload decider (#3563) * Add flat_skew setting to node overload decider Signed-off-by: Rishab Nahata --- .../allocation/AwarenessAllocationIT.java | 139 +++++++++++ .../NodeLoadAwareAllocationDecider.java | 26 +- .../common/settings/ClusterSettings.java | 1 + .../NodeLoadAwareAllocationTests.java | 222 ++++++++++++++---- 4 files changed, 338 insertions(+), 50 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/AwarenessAllocationIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/AwarenessAllocationIT.java index 224db09d99a99..2b73c5da27606 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/AwarenessAllocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/allocation/AwarenessAllocationIT.java @@ -45,14 +45,17 @@ import org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.opensearch.common.Priority; import org.opensearch.common.settings.Settings; +import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; +import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import static org.opensearch.cluster.routing.ShardRoutingState.STARTED; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.empty; @@ -351,4 +354,140 @@ public void testAwarenessZonesIncrementalNodes() { assertThat(counts.get(B_1), equalTo(2)); assertThat(counts.get(noZoneNode), equalTo(2)); } + + public void testThreeZoneOneReplicaWithForceZoneValueAndLoadAwareness() throws Exception { + int nodeCountPerAZ = 5; + int numOfShards = 30; + int numOfReplica = 1; + Settings commonSettings = Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .put("cluster.routing.allocation.load_awareness.skew_factor", "0.0") + .put("cluster.routing.allocation.load_awareness.provisioned_capacity", Integer.toString(nodeCountPerAZ * 3)) + .build(); + + logger.info("--> starting 15 nodes on zones 'a' & 'b' & 'c'"); + List nodes_in_zone_a = internalCluster().startNodes( + nodeCountPerAZ, + Settings.builder().put(commonSettings).put("node.attr.zone", "a").build() + ); + List nodes_in_zone_b = internalCluster().startNodes( + nodeCountPerAZ, + Settings.builder().put(commonSettings).put("node.attr.zone", "b").build() + ); + List nodes_in_zone_c = internalCluster().startNodes( + nodeCountPerAZ, + Settings.builder().put(commonSettings).put("node.attr.zone", "c").build() + ); + + // Creating index with 30 primary and 1 replica + createIndex( + "test-1", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numOfShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numOfReplica) + .build() + ); + + ClusterHealthResponse health = client().admin() + .cluster() + .prepareHealth() + .setIndices("test-1") + .setWaitForEvents(Priority.LANGUID) + .setWaitForGreenStatus() + .setWaitForNodes(Integer.toString(nodeCountPerAZ * 3)) + .setWaitForNoRelocatingShards(true) + .setWaitForNoInitializingShards(true) + .execute() + .actionGet(); + assertFalse(health.isTimedOut()); + + ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState(); + ObjectIntHashMap counts = new ObjectIntHashMap<>(); + + for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) { + for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) { + for (ShardRouting shardRouting : indexShardRoutingTable) { + counts.addTo(clusterState.nodes().get(shardRouting.currentNodeId()).getName(), 1); + } + } + } + + assertThat(counts.size(), equalTo(nodeCountPerAZ * 3)); + // All shards should be started + assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(numOfShards * (numOfReplica + 1))); + + // stopping half nodes in zone a + int nodesToStop = nodeCountPerAZ / 2; + List nodeDataPathSettings = new ArrayList<>(); + for (int i = 0; i < nodesToStop; i++) { + nodeDataPathSettings.add(internalCluster().dataPathSettings(nodes_in_zone_a.get(i))); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodes_in_zone_a.get(i))); + } + + client().admin().cluster().prepareReroute().setRetryFailed(true).get(); + health = client().admin() + .cluster() + .prepareHealth() + .setIndices("test-1") + .setWaitForEvents(Priority.LANGUID) + .setWaitForNodes(Integer.toString(nodeCountPerAZ * 3 - nodesToStop)) + .setWaitForNoRelocatingShards(true) + .setWaitForNoInitializingShards(true) + .execute() + .actionGet(); + assertFalse(health.isTimedOut()); + + // Creating another index with 30 primary and 1 replica + createIndex( + "test-2", + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numOfShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numOfReplica) + .build() + ); + + health = client().admin() + .cluster() + .prepareHealth() + .setIndices("test-1", "test-2") + .setWaitForEvents(Priority.LANGUID) + .setWaitForNodes(Integer.toString(nodeCountPerAZ * 3 - nodesToStop)) + .setWaitForNoRelocatingShards(true) + .setWaitForNoInitializingShards(true) + .execute() + .actionGet(); + assertFalse(health.isTimedOut()); + + // Restarting the nodes back + for (int i = 0; i < nodesToStop; i++) { + internalCluster().startNode( + Settings.builder() + .put("node.name", nodes_in_zone_a.get(i)) + .put(nodeDataPathSettings.get(i)) + .put(commonSettings) + .put("node.attr.zone", "a") + .build() + ); + } + client().admin().cluster().prepareReroute().setRetryFailed(true).get(); + + health = client().admin() + .cluster() + .prepareHealth() + .setIndices("test-1", "test-2") + .setWaitForEvents(Priority.LANGUID) + .setWaitForNodes(Integer.toString(nodeCountPerAZ * 3)) + .setWaitForGreenStatus() + .setWaitForActiveShards(2 * numOfShards * (numOfReplica + 1)) + .setWaitForNoRelocatingShards(true) + .setWaitForNoInitializingShards(true) + .execute() + .actionGet(); + clusterState = client().admin().cluster().prepareState().execute().actionGet().getState(); + + // All shards should be started now and cluster health should be green + assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(2 * numOfShards * (numOfReplica + 1))); + assertThat(health.isTimedOut(), equalTo(false)); + } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/NodeLoadAwareAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/NodeLoadAwareAllocationDecider.java index 8e2824163709d..c43fb3be214a9 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/NodeLoadAwareAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/NodeLoadAwareAllocationDecider.java @@ -33,11 +33,13 @@ * *

* and prevent allocation on the surviving nodes of the under capacity cluster - * based on overload factor defined as a percentage by + * based on overload factor defined as a percentage and flat skew as absolute allowed skewness by + *

*
  * cluster.routing.allocation.load_awareness.skew_factor: X
+ * cluster.routing.allocation.load_awareness.flat_skew: N
  * 
- * The total limit per node based on skew_factor doesn't limit primaries that previously + * The total limit per node based on skew_factor and flat_skew doesn't limit primaries that previously * existed on the disk as those shards are force allocated by * {@link AllocationDeciders#canForceAllocatePrimary(ShardRouting, RoutingNode, RoutingAllocation)} * however new primaries due to index creation, snapshot restore etc can be controlled via the below settings. @@ -74,6 +76,13 @@ public class NodeLoadAwareAllocationDecider extends AllocationDecider { Setting.Property.Dynamic, Property.NodeScope ); + public static final Setting CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_FLAT_SKEW_SETTING = Setting.intSetting( + "cluster.routing.allocation.load_awareness.flat_skew", + 2, + 2, + Property.Dynamic, + Property.NodeScope + ); private volatile int provisionedCapacity; @@ -81,12 +90,15 @@ public class NodeLoadAwareAllocationDecider extends AllocationDecider { private volatile boolean allowUnassignedPrimaries; + private volatile int flatSkew; + private static final Logger logger = LogManager.getLogger(NodeLoadAwareAllocationDecider.class); public NodeLoadAwareAllocationDecider(Settings settings, ClusterSettings clusterSettings) { this.skewFactor = CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_SKEW_FACTOR_SETTING.get(settings); this.provisionedCapacity = CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_PROVISIONED_CAPACITY_SETTING.get(settings); this.allowUnassignedPrimaries = CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_ALLOW_UNASSIGNED_PRIMARIES_SETTING.get(settings); + this.flatSkew = CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_FLAT_SKEW_SETTING.get(settings); clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_SKEW_FACTOR_SETTING, this::setSkewFactor); clusterSettings.addSettingsUpdateConsumer( CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_PROVISIONED_CAPACITY_SETTING, @@ -96,6 +108,7 @@ public NodeLoadAwareAllocationDecider(Settings settings, ClusterSettings cluster CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_ALLOW_UNASSIGNED_PRIMARIES_SETTING, this::setAllowUnassignedPrimaries ); + clusterSettings.addSettingsUpdateConsumer(CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_FLAT_SKEW_SETTING, this::setFlatSkew); } private void setAllowUnassignedPrimaries(boolean allowUnassignedPrimaries) { @@ -110,6 +123,10 @@ private void setProvisionedCapacity(int provisionedCapacity) { this.provisionedCapacity = provisionedCapacity; } + private void setFlatSkew(int flatSkew) { + this.flatSkew = flatSkew; + } + @Override public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { return underCapacity(shardRouting, node, allocation, (count, limit) -> count >= limit); @@ -146,7 +163,7 @@ private Decision underCapacity( Metadata metadata = allocation.metadata(); float expectedAvgShardsPerNode = (float) metadata.getTotalNumberOfShards() / provisionedCapacity; int nodeShardCount = node.numberOfOwningShards(); - int limit = (int) Math.ceil(expectedAvgShardsPerNode * (1 + skewFactor / 100.0)); + int limit = flatSkew + (int) Math.ceil(expectedAvgShardsPerNode * (1 + skewFactor / 100.0)); if (decider.test(nodeShardCount, limit)) { logger.debug( () -> new ParameterizedMessage( @@ -163,10 +180,11 @@ private Decision underCapacity( Decision.NO, NAME, "too many shards [%d] allocated to this node, limit per node [%d] considering" - + " overload factor [%.2f] based on capacity [%d]", + + " overload factor [%.2f] and flat skew [%d] based on capacity [%d]", nodeShardCount, limit, skewFactor, + flatSkew, provisionedCapacity ); } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 1d0039c26670a..27de358bb2350 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -560,6 +560,7 @@ public void apply(Settings value, Settings current, Settings previous) { NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_PROVISIONED_CAPACITY_SETTING, NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_SKEW_FACTOR_SETTING, NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_ALLOW_UNASSIGNED_PRIMARIES_SETTING, + NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_FLAT_SKEW_SETTING, ShardIndexingPressureSettings.SHARD_INDEXING_PRESSURE_ENABLED, ShardIndexingPressureSettings.SHARD_INDEXING_PRESSURE_ENFORCED, ShardIndexingPressureSettings.REQUEST_SIZE_WINDOW, diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/NodeLoadAwareAllocationTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/NodeLoadAwareAllocationTests.java index d2e7e0e7e636a..c4dcae84581cb 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/NodeLoadAwareAllocationTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/NodeLoadAwareAllocationTests.java @@ -22,7 +22,6 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.UnassignedInfo; -import org.opensearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.NodeLoadAwareAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; import org.opensearch.common.settings.Settings; @@ -106,9 +105,11 @@ public void testNewUnassignedPrimaryAllocationOnOverload() { .nodes(DiscoveryNodes.builder(newState.nodes()).add(newNode("node1", singletonMap("zone", "zone_1")))) .build(); - // 4 existing shards from this node's local store get started + // 4 existing shards from this node's local store get started and cluster rebalances newState = strategy.reroute(newState, "reroute"); - newState = startInitializingShardsAndReroute(strategy, newState); + while (newState.getRoutingNodes().shardsWithState(INITIALIZING).isEmpty() == false) { + newState = startInitializingShardsAndReroute(strategy, newState); + } assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(32)); // add back node2 when skewness is still breached @@ -282,11 +283,14 @@ public void testExistingPrimariesAllocationOnOverload() { newState = ClusterState.builder(newState).metadata(metadata).routingTable(updatedRoutingTable).build(); newState = strategy.reroute(newState, "reroute"); - newState = startInitializingShardsAndReroute(strategy, newState); + while (newState.getRoutingNodes().shardsWithState(INITIALIZING).isEmpty() == false) { + newState = startInitializingShardsAndReroute(strategy, newState); + } + // 28 shards should be assigned (14 on each node -> 8 * 1.5 + 2) logger.info("limits should be applied on newly create primaries"); - assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(24)); - assertThat(newState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(16)); + assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(28)); + assertThat(newState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(12)); assertEquals( 12L, @@ -298,7 +302,7 @@ public void testExistingPrimariesAllocationOnOverload() { ); assertEquals( - 4L, + 0L, newState.getRoutingNodes() .shardsWithState(UNASSIGNED) .stream() @@ -306,7 +310,7 @@ public void testExistingPrimariesAllocationOnOverload() { .count() ); - assertThat(newState.getRoutingNodes().node("node4").size(), equalTo(12)); + assertThat(newState.getRoutingNodes().node("node4").size(), equalTo(14)); logger.info("--> Remove node4 from zone holding primaries"); newState = removeNodes(newState, strategy, "node4"); @@ -339,10 +343,10 @@ public void testExistingPrimariesAllocationOnOverload() { logger.info("--> do another reroute, make sure nothing moves"); assertThat(strategy.reroute(newState, "reroute").routingTable(), sameInstance(newState.routingTable())); - assertThat(newState.getRoutingNodes().node("node4").size(), equalTo(12)); - assertThat(newState.getRoutingNodes().node("node5").size(), equalTo(12)); + assertThat(newState.getRoutingNodes().node("node4").size(), equalTo(14)); + assertThat(newState.getRoutingNodes().node("node5").size(), equalTo(14)); - assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(24)); + assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(28)); newState = ClusterState.builder(newState) .nodes(DiscoveryNodes.builder(newState.nodes()).add(newNode("node1", singletonMap("zone", "zone_1")))) @@ -436,7 +440,8 @@ public void testSingleZoneOneReplicaLimitsShardAllocationOnOverload() { newState = startInitializingShardsAndReroute(strategy, newState); } - assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(30)); + // Each node can take 12 shards each (2 + ceil(8*1.2)) + assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(36)); for (ShardRouting shard : newState.getRoutingNodes().shardsWithState(UNASSIGNED)) { assertEquals(shard.unassignedInfo().getReason(), UnassignedInfo.Reason.NODE_LEFT); @@ -458,10 +463,12 @@ public void testSingleZoneOneReplicaLimitsShardAllocationOnOverload() { newState = ClusterState.builder(newState).metadata(metadata).routingTable(updatedRoutingTable).build(); newState = strategy.reroute(newState, "reroute"); - newState = startInitializingShardsAndReroute(strategy, newState); + while (!newState.getRoutingNodes().shardsWithState(INITIALIZING).isEmpty()) { + newState = startInitializingShardsAndReroute(strategy, newState); + } - assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(60)); - assertThat(newState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(20)); + assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(66)); + assertThat(newState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(14)); logger.info("add another index with 60 shards"); metadata = Metadata.builder(newState.metadata()) @@ -482,8 +489,8 @@ public void testSingleZoneOneReplicaLimitsShardAllocationOnOverload() { newState = startInitializingShardsAndReroute(strategy, newState); } - assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(120)); - assertThat(newState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(20)); + assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(126)); + assertThat(newState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(14)); logger.info("change settings to allow unassigned primaries"); strategy = createAllocationServiceWithAdditionalSettings( @@ -499,7 +506,7 @@ public void testSingleZoneOneReplicaLimitsShardAllocationOnOverload() { ); for (RoutingNode node : newState.getRoutingNodes()) { - assertThat(node.size(), equalTo(40)); + assertThat(node.size(), equalTo(42)); } logger.info("add another index with 5 shards"); @@ -513,15 +520,15 @@ public void testSingleZoneOneReplicaLimitsShardAllocationOnOverload() { ) .build(); updatedRoutingTable = RoutingTable.builder(newState.routingTable()).addAsNew(metadata.index("test3")).build(); - // increases avg shard per node to 145/5 = 29, overload factor 1.2, total allowed 35 per node and NO primaries get assigned - // since total owning shards are 40 per node already + // increases avg shard per node to 145/5 = 29, overload factor 1.2, total allowed 35+2=37 per node and NO primaries get assigned + // since total owning shards are 42 per node already newState = ClusterState.builder(newState).metadata(metadata).routingTable(updatedRoutingTable).build(); newState = strategy.reroute(newState, "reroute"); newState = startInitializingShardsAndReroute(strategy, newState); - assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(120)); - assertThat(newState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(25)); + assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(126)); + assertThat(newState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(19)); assertThat(newState.getRoutingNodes().shardsWithState(UNASSIGNED).stream().filter(ShardRouting::primary).count(), equalTo(5L)); } @@ -600,21 +607,24 @@ public void testThreeZoneTwoReplicaLimitsShardAllocationOnOverload() { newState = startInitializingShardsAndReroute(strategy, newState); } - assertThat(newState.getRoutingNodes().node("node14").size(), equalTo(5)); - assertThat(newState.getRoutingNodes().node("node15").size(), equalTo(5)); + assertThat(newState.getRoutingNodes().node("node14").size(), equalTo(7)); + assertThat(newState.getRoutingNodes().node("node15").size(), equalTo(7)); // add the removed node newState = addNodes(newState, strategy, "zone3", "node11"); - assertThat(newState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(5)); - newState = startInitializingShardsAndReroute(strategy, newState); - assertThat(newState.getRoutingNodes().node("node11").size(), equalTo(5)); + assertThat(newState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(6)); + while (newState.getRoutingNodes().shardsWithState(INITIALIZING).isEmpty() == false) { + newState = startInitializingShardsAndReroute(strategy, newState); + } + assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(60)); // add the removed node newState = addNodes(newState, strategy, "zone3", "node12"); - assertThat(newState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(5)); - newState = startInitializingShardsAndReroute(strategy, newState); + while (newState.getRoutingNodes().shardsWithState(INITIALIZING).isEmpty() == false) { + newState = startInitializingShardsAndReroute(strategy, newState); + } assertThat(newState.getRoutingNodes().node("node12").size(), equalTo(5)); // add the removed node @@ -674,13 +684,14 @@ public void testThreeZoneOneReplicaLimitsShardAllocationOnOverload() { logger.info("--> add five new node in new zone and reroute"); clusterState = addNodes(clusterState, strategy, "zone2", "node6", "node7", "node8", "node9", "node10"); + // Each node can take 7 shards each now (2 + ceil(4*1.2)) assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(30)); - assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(25)); + assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(30)); logger.info("--> complete relocation"); clusterState = startInitializingShardsAndReroute(strategy, clusterState); - assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(55)); + assertThat(clusterState.getRoutingNodes().shardsWithState(ShardRoutingState.STARTED).size(), equalTo(60)); logger.info("--> do another reroute, make sure nothing moves"); assertThat(strategy.reroute(clusterState, "reroute").routingTable(), sameInstance(clusterState.routingTable())); @@ -707,6 +718,7 @@ public void testThreeZoneOneReplicaLimitsShardAllocationOnOverload() { newState = startInitializingShardsAndReroute(strategy, newState); } + // Each node can now have 5 shards each assertThat(newState.getRoutingNodes().node("node14").size(), equalTo(5)); assertThat(newState.getRoutingNodes().node("node15").size(), equalTo(5)); @@ -791,8 +803,9 @@ public void testThreeZoneTwoReplicaLimitsShardAllocationOnOverloadAcrossZones() newState = startInitializingShardsAndReroute(strategy, newState); } // ensure minority zone doesn't get overloaded - assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(53)); - assertThat(newState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(10)); + // each node can take 10 shards each (2 + ceil(7*1.1)) + assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(61)); + assertThat(newState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(2)); for (ShardRouting shard : newState.getRoutingNodes().shardsWithState(UNASSIGNED)) { assertEquals(shard.unassignedInfo().getReason(), UnassignedInfo.Reason.NODE_LEFT); } @@ -912,15 +925,20 @@ public void testSingleZoneOneReplicaLimitsReplicaAllocationOnOverload() { clusterState = startInitializingShardsAndReroute(strategy, clusterState); assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(20)); - // assert replicas are not assigned but primaries are - logger.info("--> replicas are not initializing"); - assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(0)); + // Each node can take 11 shards each (2 + ceil(8*1.1)), hence 2 replicas will also start + logger.info("--> 2 replicas are initializing"); + assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(2)); for (ShardRouting shard : clusterState.getRoutingNodes().shardsWithState(UNASSIGNED)) { assertEquals(shard.unassignedInfo().getReason(), UnassignedInfo.Reason.INDEX_CREATED); assertFalse(shard.primary()); } + logger.info("--> start the shards (replicas)"); + while (clusterState.getRoutingNodes().shardsWithState(INITIALIZING).isEmpty() == false) { + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + } + logger.info("--> do another reroute, make sure nothing moves"); assertThat(strategy.reroute(clusterState, "reroute").routingTable(), sameInstance(clusterState.routingTable())); @@ -929,10 +947,12 @@ public void testSingleZoneOneReplicaLimitsReplicaAllocationOnOverload() { assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(18)); - clusterState = startInitializingShardsAndReroute(strategy, clusterState); + while (clusterState.getRoutingNodes().shardsWithState(INITIALIZING).isEmpty() == false) { + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + } logger.info("--> replicas are started"); - assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(38)); + assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(40)); for (ShardRouting shard : clusterState.getRoutingNodes().shardsWithState(UNASSIGNED)) { assertEquals(shard.unassignedInfo().getReason(), UnassignedInfo.Reason.INDEX_CREATED); @@ -1012,11 +1032,12 @@ public void testThreeZoneTwoReplicaLimitsReplicaAllocationUnderFullZoneFailure() newState = startInitializingShardsAndReroute(strategy, newState); } - assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(50)); - assertThat(newState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(10)); + // Each node can take 7 shards max ( 2 + ceil(4*1.2)) + assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(60)); + assertThat(newState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(0)); for (RoutingNode node : newState.getRoutingNodes()) { - assertThat(node.size(), equalTo(5)); + assertThat(node.size(), equalTo(6)); } // add the removed node @@ -1025,9 +1046,7 @@ public void testThreeZoneTwoReplicaLimitsReplicaAllocationUnderFullZoneFailure() .build(); newState = strategy.reroute(newState, "reroute"); - assertThat(newState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(5)); newState = startInitializingShardsAndReroute(strategy, newState); - assertThat(newState.getRoutingNodes().node("node11").size(), equalTo(5)); // add the removed node newState = ClusterState.builder(newState) @@ -1035,9 +1054,7 @@ public void testThreeZoneTwoReplicaLimitsReplicaAllocationUnderFullZoneFailure() .build(); newState = strategy.reroute(newState, "reroute"); - assertThat(newState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(5)); newState = startInitializingShardsAndReroute(strategy, newState); - assertThat(newState.getRoutingNodes().node("node12").size(), equalTo(5)); // add the removed node newState = ClusterState.builder(newState) @@ -1068,6 +1085,120 @@ public void testThreeZoneTwoReplicaLimitsReplicaAllocationUnderFullZoneFailure() assertThat(newState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(0)); } + public void testThreeZoneOneReplicaWithSkewFactorZeroAllShardsAssignedAfterRecovery() { + AllocationService strategy = createAllocationServiceWithAdditionalSettings( + org.opensearch.common.collect.Map.of( + NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_PROVISIONED_CAPACITY_SETTING.getKey(), + 15, + NodeLoadAwareAllocationDecider.CLUSTER_ROUTING_ALLOCATION_LOAD_AWARENESS_SKEW_FACTOR_SETTING.getKey(), + 0, + "cluster.routing.allocation.awareness.force.zone.values", + "zone1,zone2,zone3" + ) + ); + + logger.info("Building initial routing table for 'testThreeZoneOneReplicaWithSkewFactorZeroAllShardsAssignedAfterRecovery'"); + + Metadata metadata = Metadata.builder() + .put(IndexMetadata.builder("test").settings(settings(Version.CURRENT)).numberOfShards(30).numberOfReplicas(1)) + .build(); + + RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metadata.index("test")).build(); + + ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(initialRoutingTable) + .build(); + + logger.info("--> adding five nodes on same zone and do rerouting"); + clusterState = addNodes(clusterState, strategy, "zone1", "node1", "node2", "node3", "node4", "node5"); + clusterState = strategy.reroute(clusterState, "reroute"); + assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(30)); + + logger.info("--> start the shards (primaries)"); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + + logger.info("--> add five new node in new zone and reroute"); + clusterState = addNodes(clusterState, strategy, "zone2", "node6", "node7", "node8", "node9", "node10"); + + logger.info("--> complete relocation"); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + + ClusterState newState = addNodes(clusterState, strategy, "zone3", "node11", "node12", "node13", "node14", "node15"); + + while (newState.getRoutingNodes().shardsWithState(INITIALIZING).isEmpty() == false) { + newState = startInitializingShardsAndReroute(strategy, newState); + } + assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(60)); + assertThat(newState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(0)); + + assertThat(newState.getRoutingNodes().node("node11").size(), equalTo(4)); + assertThat(newState.getRoutingNodes().node("node12").size(), equalTo(4)); + assertThat(newState.getRoutingNodes().node("node13").size(), equalTo(4)); + assertThat(newState.getRoutingNodes().node("node14").size(), equalTo(4)); + assertThat(newState.getRoutingNodes().node("node15").size(), equalTo(4)); + + logger.info("--> Removing three nodes from zone3"); + newState = removeNodes(newState, strategy, "node11", "node12", "node13"); + + while (newState.getRoutingNodes().shardsWithState(INITIALIZING).isEmpty() == false) { + newState = startInitializingShardsAndReroute(strategy, newState); + } + + // Each node can take 6 shards max (2 + ceil(4*1.0)), so all shards should be assigned + assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(60)); + assertThat(newState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(0)); + + logger.info("add another index with 30 primary 1 replica"); + metadata = Metadata.builder(newState.metadata()) + .put( + IndexMetadata.builder("test1") + .settings( + settings(Version.CURRENT).put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 30) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + ) + ) + .build(); + RoutingTable updatedRoutingTable = RoutingTable.builder(newState.routingTable()).addAsNew(metadata.index("test1")).build(); + + newState = ClusterState.builder(newState).metadata(metadata).routingTable(updatedRoutingTable).build(); + newState = strategy.reroute(newState, "reroute"); + + newState = startInitializingShardsAndReroute(strategy, newState); + + // add the removed node + newState = ClusterState.builder(newState) + .nodes(DiscoveryNodes.builder(newState.nodes()).add(newNode("node11", singletonMap("zone", "zone3")))) + .build(); + newState = strategy.reroute(newState, "reroute"); + + newState = startInitializingShardsAndReroute(strategy, newState); + + // add the removed node + newState = ClusterState.builder(newState) + .nodes(DiscoveryNodes.builder(newState.nodes()).add(newNode("node12", singletonMap("zone", "zone3")))) + .build(); + newState = strategy.reroute(newState, "reroute"); + + newState = startInitializingShardsAndReroute(strategy, newState); + + // add the removed node + newState = ClusterState.builder(newState) + .nodes(DiscoveryNodes.builder(newState.nodes()).add(newNode("node13", singletonMap("zone", "zone3")))) + .build(); + newState = strategy.reroute(newState, "reroute"); + + while (newState.getRoutingNodes().shardsWithState(INITIALIZING).isEmpty() == false) { + newState = startInitializingShardsAndReroute(strategy, newState); + } + assertThat(newState.getRoutingNodes().node("node13").size(), equalTo(8)); + assertThat(newState.getRoutingNodes().node("node12").size(), equalTo(8)); + assertThat(newState.getRoutingNodes().node("node11").size(), equalTo(8)); + // ensure all shards are assigned + assertThat(newState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(120)); + assertThat(newState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(0)); + } + private ClusterState removeNodes(ClusterState clusterState, AllocationService allocationService, String... nodeIds) { DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder(clusterState.getNodes()); org.opensearch.common.collect.List.of(nodeIds).forEach(nodeId -> nodeBuilder.remove(nodeId)); @@ -1097,7 +1228,6 @@ private Settings buildSettings(Map settingsValue) { .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getKey(), 20) .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING.getKey(), 20) .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_REPLICAS_RECOVERIES_SETTING.getKey(), 20) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") .put("cluster.routing.allocation.awareness.attributes", "zone"); settingsValue.forEach((k, v) -> { if (v instanceof Integer) settingsBuilder.put(k, (Integer) (v)); From e55040277664628acaffb4a7c42c2fb2ba4906d7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jun 2022 22:03:30 -0700 Subject: [PATCH 60/75] Bump xmlbeans from 5.0.3 to 5.1.0 in /plugins/ingest-attachment (#3572) * Bump xmlbeans from 5.0.3 to 5.1.0 in /plugins/ingest-attachment Bumps xmlbeans from 5.0.3 to 5.1.0. --- updated-dependencies: - dependency-name: org.apache.xmlbeans:xmlbeans dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- plugins/ingest-attachment/build.gradle | 2 +- plugins/ingest-attachment/licenses/xmlbeans-5.0.3.jar.sha1 | 1 - plugins/ingest-attachment/licenses/xmlbeans-5.1.0.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/ingest-attachment/licenses/xmlbeans-5.0.3.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/xmlbeans-5.1.0.jar.sha1 diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index 456b652ff82a3..86694b9bc9da7 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -79,7 +79,7 @@ dependencies { api "org.apache.poi:poi:${versions.poi}" api "org.apache.poi:poi-ooxml-lite:${versions.poi}" api "commons-codec:commons-codec:${versions.commonscodec}" - api 'org.apache.xmlbeans:xmlbeans:5.0.3' + api 'org.apache.xmlbeans:xmlbeans:5.1.0' api 'org.apache.commons:commons-collections4:4.4' // MS Office api "org.apache.poi:poi-scratchpad:${versions.poi}" diff --git a/plugins/ingest-attachment/licenses/xmlbeans-5.0.3.jar.sha1 b/plugins/ingest-attachment/licenses/xmlbeans-5.0.3.jar.sha1 deleted file mode 100644 index 7451ee17640d6..0000000000000 --- a/plugins/ingest-attachment/licenses/xmlbeans-5.0.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e1ef1382ae9dfb2438b82b6dd575566355c2f30f \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/xmlbeans-5.1.0.jar.sha1 b/plugins/ingest-attachment/licenses/xmlbeans-5.1.0.jar.sha1 new file mode 100644 index 0000000000000..85f757b61048c --- /dev/null +++ b/plugins/ingest-attachment/licenses/xmlbeans-5.1.0.jar.sha1 @@ -0,0 +1 @@ +3534ab896663e6f6d8a2cf46882d7407641d7a31 \ No newline at end of file From 5daa3a772db0c0e9d5550ea0415f515bfdf98fb7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jun 2022 22:03:59 -0700 Subject: [PATCH 61/75] Bump google-oauth-client from 1.34.0 to 1.34.1 in /plugins/discovery-gce (#3570) * Bump google-oauth-client from 1.34.0 to 1.34.1 in /plugins/discovery-gce Bumps [google-oauth-client](https://github.com/googleapis/google-oauth-java-client) from 1.34.0 to 1.34.1. - [Release notes](https://github.com/googleapis/google-oauth-java-client/releases) - [Changelog](https://github.com/googleapis/google-oauth-java-client/blob/main/CHANGELOG.md) - [Commits](https://github.com/googleapis/google-oauth-java-client/compare/v1.34.0...v1.34.1) --- updated-dependencies: - dependency-name: com.google.oauth-client:google-oauth-client dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- plugins/discovery-gce/build.gradle | 2 +- .../discovery-gce/licenses/google-oauth-client-1.34.0.jar.sha1 | 1 - .../discovery-gce/licenses/google-oauth-client-1.34.1.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/discovery-gce/licenses/google-oauth-client-1.34.0.jar.sha1 create mode 100644 plugins/discovery-gce/licenses/google-oauth-client-1.34.1.jar.sha1 diff --git a/plugins/discovery-gce/build.gradle b/plugins/discovery-gce/build.gradle index 983a2907e4e67..c8b52d3afcd45 100644 --- a/plugins/discovery-gce/build.gradle +++ b/plugins/discovery-gce/build.gradle @@ -24,7 +24,7 @@ versions << [ dependencies { api "com.google.apis:google-api-services-compute:v1-rev160-${versions.google}" api "com.google.api-client:google-api-client:${versions.google}" - api "com.google.oauth-client:google-oauth-client:1.34.0" + api "com.google.oauth-client:google-oauth-client:1.34.1" api "com.google.http-client:google-http-client:${versions.google}" api "com.google.http-client:google-http-client-jackson2:${versions.google}" api 'com.google.code.findbugs:jsr305:3.0.2' diff --git a/plugins/discovery-gce/licenses/google-oauth-client-1.34.0.jar.sha1 b/plugins/discovery-gce/licenses/google-oauth-client-1.34.0.jar.sha1 deleted file mode 100644 index 57c5c16b34deb..0000000000000 --- a/plugins/discovery-gce/licenses/google-oauth-client-1.34.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a0dc471bd498c62280120037a42d410c0e36f5d6 \ No newline at end of file diff --git a/plugins/discovery-gce/licenses/google-oauth-client-1.34.1.jar.sha1 b/plugins/discovery-gce/licenses/google-oauth-client-1.34.1.jar.sha1 new file mode 100644 index 0000000000000..a8434bd380761 --- /dev/null +++ b/plugins/discovery-gce/licenses/google-oauth-client-1.34.1.jar.sha1 @@ -0,0 +1 @@ +4a4f88c5e13143f882268c98239fb85c3b2c6cb2 \ No newline at end of file From f933133108060e4ddbb68ecd6917cfbd9b461b19 Mon Sep 17 00:00:00 2001 From: Anshu Agarwal Date: Tue, 14 Jun 2022 11:29:52 +0530 Subject: [PATCH 62/75] Fix for bug showing incorrect awareness attributes count in AwarenessAllocationDecider (#3428) * Fix for bug showing incorrect awareness attributes count in AwarenessAllocationDecider Signed-off-by: Anshu Agarwal --- .../decider/AwarenessAllocationDecider.java | 13 ++- .../allocation/AwarenessAllocationTests.java | 92 +++++++++++++++++++ 2 files changed, 101 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java index a873129723577..3d7ba09c839fc 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java @@ -33,11 +33,14 @@ package org.opensearch.cluster.routing.allocation.decider; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.function.Function; import com.carrotsearch.hppc.ObjectIntHashMap; +import com.carrotsearch.hppc.cursors.ObjectCursor; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.RoutingNode; import org.opensearch.cluster.routing.ShardRouting; @@ -207,12 +210,14 @@ private Decision underCapacity(ShardRouting shardRouting, RoutingNode node, Rout int numberOfAttributes = nodesPerAttribute.size(); List fullValues = forcedAwarenessAttributes.get(awarenessAttribute); + if (fullValues != null) { - for (String fullValue : fullValues) { - if (shardPerAttribute.containsKey(fullValue) == false) { - numberOfAttributes++; - } + // If forced awareness is enabled, numberOfAttributes = count(distinct((union(discovered_attributes, forced_attributes))) + Set attributesSet = new HashSet<>(fullValues); + for (ObjectCursor stringObjectCursor : nodesPerAttribute.keys()) { + attributesSet.add(stringObjectCursor.value); } + numberOfAttributes = attributesSet.size(); } // TODO should we remove ones that are not part of full list? diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/AwarenessAllocationTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/AwarenessAllocationTests.java index c9e427a178515..b2adcd21cd8c9 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/AwarenessAllocationTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/AwarenessAllocationTests.java @@ -35,23 +35,32 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.Version; +import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.RoutingNode; +import org.opensearch.cluster.routing.RoutingNodes; import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.allocation.command.AllocationCommands; import org.opensearch.cluster.routing.allocation.command.CancelAllocationCommand; import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; +import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; import org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.opensearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider; +import org.opensearch.cluster.routing.allocation.decider.Decision; import org.opensearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; + +import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; +import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import static java.util.Collections.singletonMap; @@ -971,4 +980,87 @@ public void testMultipleAwarenessAttributes() { assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(2)); assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(0)); } + + public void testAllocationExplainForUnassignedShardsWithUnbalancedZones() { + Settings settings = Settings.builder() + .put("cluster.routing.allocation.node_concurrent_recoveries", 10) + .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_REPLICAS_RECOVERIES_SETTING.getKey(), 10) + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .build(); + + AllocationService strategy = createAllocationService(settings); + + logger.info("Building initial routing table for 'testAllocationExplainForUnassignedShardsWithUnbalancedZones'"); + Metadata metadata = Metadata.builder() + .put(IndexMetadata.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(2)) + .build(); + + RoutingTable initialRoutingTable = RoutingTable.builder().addAsNew(metadata.index("test")).build(); + + ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metadata(metadata) + .routingTable(initialRoutingTable) + .build(); + + logger.info("--> adding 3 nodes in different zones and do rerouting"); + clusterState = ClusterState.builder(clusterState) + .nodes( + DiscoveryNodes.builder() + .add(newNode("A-0", singletonMap("zone", "a"))) + .add(newNode("A-1", singletonMap("zone", "a"))) + .add(newNode("B-0", singletonMap("zone", "b"))) + ) + .build(); + clusterState = strategy.reroute(clusterState, "reroute"); + assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(0)); + assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); + + logger.info("--> start the shard (primary)"); + clusterState = startInitializingShardsAndReroute(strategy, clusterState); + assertThat(clusterState.getRoutingNodes().shardsWithState(STARTED).size(), equalTo(1)); + assertThat(clusterState.getRoutingNodes().shardsWithState(INITIALIZING).size(), equalTo(1)); + // One Shard is unassigned due to forced zone awareness + assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(1)); + + List unassignedShards = clusterState.getRoutingTable().shardsWithState(UNASSIGNED); + + ClusterSettings EMPTY_CLUSTER_SETTINGS = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + // Add a new node in zone c + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder(clusterState.nodes()).add(newNode("C-0", singletonMap("zone", "c")))) + .build(); + + final AwarenessAllocationDecider decider = new AwarenessAllocationDecider(settings, EMPTY_CLUSTER_SETTINGS); + + final RoutingAllocation allocation = new RoutingAllocation( + new AllocationDeciders(Collections.singleton(decider)), + clusterState.getRoutingNodes(), + clusterState, + null, + null, + 0L + ); + allocation.debugDecision(true); + + Decision decision = null; + RoutingNodes nodes = clusterState.getRoutingNodes(); + + for (RoutingNode node : nodes) { + // Try to allocate unassigned shard to A-0, fails because of forced zone awareness + if (node.nodeId().equals("A-0")) { + decision = decider.canAllocate(unassignedShards.get(0), node, allocation); + assertEquals(Decision.Type.NO, decision.type()); + assertEquals( + decision.getExplanation(), + "there are too many copies of the shard allocated to nodes with attribute" + + " [zone], there are [3] total configured shard copies for this shard id and [3]" + + " total attribute values, expected the allocated shard count per attribute [2] to" + + " be less than or equal to the upper bound of the required number of shards per attribute [1]" + ); + } + + } + } } From 17584c51f157009a8ba07fdce9063497908c423a Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Tue, 14 Jun 2022 09:57:45 -0400 Subject: [PATCH 63/75] Added bwc version 1.3.4 (#3552) Signed-off-by: GitHub Co-authored-by: opensearch-ci-bot --- .ci/bwcVersions | 1 + server/src/main/java/org/opensearch/Version.java | 1 + 2 files changed, 2 insertions(+) diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 0461af4966e92..378c0f52da3ad 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -40,6 +40,7 @@ BWC_VERSION: - "1.3.1" - "1.3.2" - "1.3.3" + - "1.3.4" - "2.0.0" - "2.0.1" - "2.1.0" diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index ec33e674c4d5f..2cc8cde2cf0f3 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -87,6 +87,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_1_3_1 = new Version(1030199, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_1_3_2 = new Version(1030299, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_1_3_3 = new Version(1030399, org.apache.lucene.util.Version.LUCENE_8_10_1); + public static final Version V_1_3_4 = new Version(1030499, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_2_0_0 = new Version(2000099, org.apache.lucene.util.Version.LUCENE_9_1_0); public static final Version V_2_0_1 = new Version(2000199, org.apache.lucene.util.Version.LUCENE_9_1_0); public static final Version V_2_1_0 = new Version(2010099, org.apache.lucene.util.Version.LUCENE_9_3_0); From f6c1629bec348d1ea78239274ed9f849ddfd2601 Mon Sep 17 00:00:00 2001 From: Yaliang Wu Date: Tue, 14 Jun 2022 22:48:48 +0000 Subject: [PATCH 64/75] Support dynamic node role (#3436) * Support unknown node role Currently OpenSearch only supports several built-in nodes like data node role. If specify unknown node role, OpenSearch node will fail to start. This limit how to extend OpenSearch to support some extension function. For example, user may prefer to run ML tasks on some dedicated node which doesn't serve as any built-in node roles. So the ML tasks won't impact OpenSearch core function. This PR removed the limitation and user can specify any node role and OpenSearch will start node correctly with that unknown role. This opens the door for plugin developer to run specific tasks on dedicated nodes. Issue: https://github.com/opensearch-project/OpenSearch/issues/2877 Signed-off-by: Yaliang Wu * fix cat nodes rest API spec Signed-off-by: Yaliang Wu * fix mixed cluster IT failure Signed-off-by: Yaliang Wu * add DynamicRole Signed-off-by: Yaliang Wu * change generator method name Signed-off-by: Yaliang Wu * fix failed docker test Signed-off-by: Yaliang Wu * transform role name to lower case to avoid confusion Signed-off-by: Yaliang Wu * transform the node role abbreviation to lower case Signed-off-by: Yaliang Wu * fix checkstyle Signed-off-by: Yaliang Wu * add test for case-insensitive role name change Signed-off-by: Yaliang Wu --- .../resources/rest-api-spec/test/11_nodes.yml | 8 +- .../rest-api-spec/test/cat.nodes/10_basic.yml | 12 +-- .../cluster/node/DiscoveryNode.java | 15 +++- .../cluster/node/DiscoveryNodeRole.java | 50 ++++++++++-- .../rest/action/cat/RestNodesAction.java | 14 +++- .../node/DiscoveryNodeRoleGenerator.java | 16 ++++ .../cluster/node/DiscoveryNodeRoleTests.java | 23 +++++- .../cluster/node/DiscoveryNodeTests.java | 11 +++ .../node/NodeRoleSettingsTests.java | 20 +++++ .../rest/action/cat/RestNodesActionTests.java | 76 ++++++++++++++++--- 10 files changed, 210 insertions(+), 35 deletions(-) create mode 100644 server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeRoleGenerator.java diff --git a/distribution/docker/src/test/resources/rest-api-spec/test/11_nodes.yml b/distribution/docker/src/test/resources/rest-api-spec/test/11_nodes.yml index a6b78645087f4..1c10166e96eeb 100644 --- a/distribution/docker/src/test/resources/rest-api-spec/test/11_nodes.yml +++ b/distribution/docker/src/test/resources/rest-api-spec/test/11_nodes.yml @@ -24,8 +24,8 @@ - match: $body: | - / #ip heap.percent ram.percent cpu load_1m load_5m load_15m node.role cluster_manager name - ^ ((\d{1,3}\.){3}\d{1,3} \s+ \d+ \s+ \d* \s+ (-)?\d* \s+ ((-)?\d*(\.\d+)?)? \s+ ((-)?\d*(\.\d+)?)?\s+ ((-)?\d*(\.\d+)?)? \s+ (-|[cdhilmrstvw]{1,11}) \s+ [-*x] \s+ (\S+\s?)+ \n)+ $/ + / #ip heap.percent ram.percent cpu load_1m load_5m load_15m node.role node.roles cluster_manager name + ^ ((\d{1,3}\.){3}\d{1,3} \s+ \d+ \s+ \d* \s+ (-)?\d* \s+ ((-)?\d*(\.\d+)?)? \s+ ((-)?\d*(\.\d+)?)?\s+ ((-)?\d*(\.\d+)?)? \s+ (-|[cdhilmrstvw]{1,11}) (\s+ (-|\w+(,\w+)*+))? \s+ [-*x] \s+ (\S+\s?)+ \n)+ $/ - do: cat.nodes: @@ -33,8 +33,8 @@ - match: $body: | - /^ ip \s+ heap\.percent \s+ ram\.percent \s+ cpu \s+ load_1m \s+ load_5m \s+ load_15m \s+ node\.role \s+ cluster_manager \s+ name \n - ((\d{1,3}\.){3}\d{1,3} \s+ \d+ \s+ \d* \s+ (-)?\d* \s+ ((-)?\d*(\.\d+)?)? \s+ ((-)?\d*(\.\d+)?)? \s+ ((-)?\d*(\.\d+)?)? \s+ (-|[cdhilmrstvw]{1,11}) \s+ [-*x] \s+ (\S+\s?)+ \n)+ $/ + /^ ip \s+ heap\.percent \s+ ram\.percent \s+ cpu \s+ load_1m \s+ load_5m \s+ load_15m \s+ node\.role \s+ node\.roles \s+ cluster_manager \s+ name \n + ((\d{1,3}\.){3}\d{1,3} \s+ \d+ \s+ \d* \s+ (-)?\d* \s+ ((-)?\d*(\.\d+)?)? \s+ ((-)?\d*(\.\d+)?)? \s+ ((-)?\d*(\.\d+)?)? \s+ (-|[cdhilmrstvw]{1,11}) (\s+ (-|\w+(,\w+)*+ ))? \s+ [-*x] \s+ (\S+\s?)+ \n)+ $/ - do: cat.nodes: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.nodes/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.nodes/10_basic.yml index f04c674d420ee..525d705de88f1 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.nodes/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.nodes/10_basic.yml @@ -10,9 +10,9 @@ v: true node_selector: # Only send request to nodes in <2.0 versions, especially during ':qa:mixed-cluster:v1.x.x#mixedClusterTest'. - # Because YAML REST test takes the minimum OpenSearch version in the cluster to apply the filter in 'skip' section, + # Because YAML REST test takes the minimum OpenSearch version in the cluster to apply the filter in 'skip' section, # see OpenSearchClientYamlSuiteTestCase#initAndResetContext() for detail. - # During 'mixedClusterTest', the cluster can be mixed with nodes in 1.x and 2.x versions, + # During 'mixedClusterTest', the cluster can be mixed with nodes in 1.x and 2.x versions, # so node_selector is required, and only filtering version in 'skip' is not enough. version: "1.0.0 - 1.4.99" @@ -32,8 +32,8 @@ - match: $body: | - / #ip heap.percent ram.percent cpu load_1m load_5m load_15m node.role cluster_manager name - ^ ((\d{1,3}\.){3}\d{1,3} \s+ \d+ \s+ \d* \s+ (-)?\d* \s+ ((-)?\d*(\.\d+)?)? \s+ ((-)?\d*(\.\d+)?)?\s+ ((-)?\d*(\.\d+)?)? \s+ (-|[cdhilmrstvw]{1,11}) \s+ [-*x] \s+ (\S+\s?)+ \n)+ $/ + / #ip heap.percent ram.percent cpu load_1m load_5m load_15m node.role node.roles cluster_manager name + ^ ((\d{1,3}\.){3}\d{1,3} \s+ \d+ \s+ \d* \s+ (-)?\d* \s+ ((-)?\d*(\.\d+)?)? \s+ ((-)?\d*(\.\d+)?)?\s+ ((-)?\d*(\.\d+)?)? \s+ (-|[cdhilmrstvw]{1,11}) (\s+ (-|\w+(,\w+)*+))? \s+ [-*x] \s+ (\S+\s?)+ \n)+ $/ - do: cat.nodes: @@ -41,8 +41,8 @@ - match: $body: | - /^ ip \s+ heap\.percent \s+ ram\.percent \s+ cpu \s+ load_1m \s+ load_5m \s+ load_15m \s+ node\.role \s+ cluster_manager \s+ name \n - ((\d{1,3}\.){3}\d{1,3} \s+ \d+ \s+ \d* \s+ (-)?\d* \s+ ((-)?\d*(\.\d+)?)? \s+ ((-)?\d*(\.\d+)?)? \s+ ((-)?\d*(\.\d+)?)? \s+ (-|[cdhilmrstvw]{1,11}) \s+ [-*x] \s+ (\S+\s?)+ \n)+ $/ + /^ ip \s+ heap\.percent \s+ ram\.percent \s+ cpu \s+ load_1m \s+ load_5m \s+ load_15m \s+ node\.role (\s+ node\.roles)? \s+ cluster_manager \s+ name \n + ((\d{1,3}\.){3}\d{1,3} \s+ \d+ \s+ \d* \s+ (-)?\d* \s+ ((-)?\d*(\.\d+)?)? \s+ ((-)?\d*(\.\d+)?)? \s+ ((-)?\d*(\.\d+)?)? \s+ (-|[cdhilmrstvw]{1,11}) (\s+ (-|\w+(,\w+)*+ ))? \s+ [-*x] \s+ (\S+\s?)+ \n)+ $/ - do: cat.nodes: diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java index 38e4cb6d8791a..0d55624a35998 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java @@ -52,6 +52,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; @@ -328,7 +329,11 @@ public DiscoveryNode(StreamInput in) throws IOException { } final DiscoveryNodeRole role = roleMap.get(roleName); if (role == null) { - roles.add(new DiscoveryNodeRole.UnknownRole(roleName, roleNameAbbreviation, canContainData)); + if (in.getVersion().onOrAfter(Version.V_2_1_0)) { + roles.add(new DiscoveryNodeRole.DynamicRole(roleName, roleNameAbbreviation, canContainData)); + } else { + roles.add(new DiscoveryNodeRole.UnknownRole(roleName, roleNameAbbreviation, canContainData)); + } } else { assert roleName.equals(role.roleName()) : "role name [" + roleName + "] does not match role [" + role.roleName() + "]"; assert roleNameAbbreviation.equals(role.roleNameAbbreviation()) : "role name abbreviation [" @@ -567,10 +572,12 @@ private static Map rolesToMap(final Stream roleMap = rolesToMap(DiscoveryNodeRole.BUILT_IN_ROLES.stream()); public static DiscoveryNodeRole getRoleFromRoleName(final String roleName) { - if (roleMap.containsKey(roleName) == false) { - throw new IllegalArgumentException("unknown role [" + roleName + "]"); + // As we are supporting dynamic role, should make role name case-insensitive to avoid confusion of role name like "Data"/"DATA" + String lowerCasedRoleName = Objects.requireNonNull(roleName).toLowerCase(Locale.ROOT); + if (roleMap.containsKey(lowerCasedRoleName)) { + return roleMap.get(lowerCasedRoleName); } - return roleMap.get(roleName); + return new DiscoveryNodeRole.DynamicRole(lowerCasedRoleName, lowerCasedRoleName, false); } public static Set getPossibleRoles() { diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java index 26ace4b9d80c1..5685667c05b1a 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java @@ -58,7 +58,6 @@ public abstract class DiscoveryNodeRole implements Comparable private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(DiscoveryNodeRole.class); public static final String MASTER_ROLE_DEPRECATION_MESSAGE = "Assigning [master] role in setting [node.roles] is deprecated. To promote inclusive language, please use [cluster_manager] role instead."; - private final String roleName; /** @@ -95,6 +94,8 @@ public final boolean canContainData() { private final boolean isKnownRole; + private final boolean isDynamicRole; + /** * Whether this role is known by this node, or is an {@link DiscoveryNodeRole.UnknownRole}. */ @@ -102,6 +103,10 @@ public final boolean isKnownRole() { return isKnownRole; } + public final boolean isDynamicRole() { + return isDynamicRole; + } + public boolean isEnabledByDefault(final Settings settings) { return legacySetting() != null && legacySetting().get(settings); } @@ -111,18 +116,21 @@ protected DiscoveryNodeRole(final String roleName, final String roleNameAbbrevia } protected DiscoveryNodeRole(final String roleName, final String roleNameAbbreviation, final boolean canContainData) { - this(true, roleName, roleNameAbbreviation, canContainData); + this(true, false, roleName, roleNameAbbreviation, canContainData); } private DiscoveryNodeRole( final boolean isKnownRole, + final boolean isDynamicRole, final String roleName, final String roleNameAbbreviation, final boolean canContainData ) { this.isKnownRole = isKnownRole; - this.roleName = Objects.requireNonNull(roleName); - this.roleNameAbbreviation = Objects.requireNonNull(roleNameAbbreviation); + this.isDynamicRole = isDynamicRole; + // As we are supporting dynamic role, should make role name case-insensitive to avoid confusion of role name like "Data"/"DATA" + this.roleName = Objects.requireNonNull(roleName).toLowerCase(Locale.ROOT); + this.roleNameAbbreviation = Objects.requireNonNull(roleNameAbbreviation).toLowerCase(Locale.ROOT); this.canContainData = canContainData; } @@ -153,12 +161,13 @@ public final boolean equals(Object o) { return roleName.equals(that.roleName) && roleNameAbbreviation.equals(that.roleNameAbbreviation) && canContainData == that.canContainData - && isKnownRole == that.isKnownRole; + && isKnownRole == that.isKnownRole + && isDynamicRole == that.isDynamicRole; } @Override public final int hashCode() { - return Objects.hash(isKnownRole, roleName(), roleNameAbbreviation(), canContainData()); + return Objects.hash(isKnownRole, isDynamicRole, roleName(), roleNameAbbreviation(), canContainData()); } @Override @@ -178,6 +187,7 @@ public final String toString() { + ", canContainData=" + canContainData + (isKnownRole ? "" : ", isKnownRole=false") + + (isDynamicRole ? "" : ", isDynamicRole=false") + '}'; } @@ -311,7 +321,7 @@ static class UnknownRole extends DiscoveryNodeRole { * @param canContainData whether or not nodes with the role can contain data */ UnknownRole(final String roleName, final String roleNameAbbreviation, final boolean canContainData) { - super(false, roleName, roleNameAbbreviation, canContainData); + super(false, false, roleName, roleNameAbbreviation, canContainData); } @Override @@ -323,6 +333,32 @@ public Setting legacySetting() { } + /** + * Represents a dynamic role. This can occur if a custom role that not in {@link DiscoveryNodeRole#BUILT_IN_ROLES} added for a node. + * Some plugin can support extension function with dynamic roles. For example, ML plugin may run machine learning tasks on nodes + * with "ml" dynamic role. + */ + static class DynamicRole extends DiscoveryNodeRole { + + /** + * Construct a dynamic role with the specified role name and role name abbreviation. + * + * @param roleName the role name + * @param roleNameAbbreviation the role name abbreviation + * @param canContainData whether or not nodes with the role can contain data + */ + DynamicRole(final String roleName, final String roleNameAbbreviation, final boolean canContainData) { + super(false, true, roleName, roleNameAbbreviation, canContainData); + } + + @Override + public Setting legacySetting() { + // return null as dynamic role has no legacy setting + return null; + } + + } + /** * Check if the role is {@link #CLUSTER_MANAGER_ROLE} or {@link #MASTER_ROLE}. * @deprecated As of 2.0, because promoting inclusive language. MASTER_ROLE is deprecated. diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java index 661a53e3d37b8..aaa0413dc4c5f 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java @@ -195,10 +195,12 @@ protected Table getTableWithHeader(final RestRequest request) { table.addCell("load_5m", "alias:l;text-align:right;desc:5m load avg"); table.addCell("load_15m", "alias:l;text-align:right;desc:15m load avg"); table.addCell("uptime", "default:false;alias:u;text-align:right;desc:node uptime"); + // TODO: Deprecate "node.role", use "node.roles" which shows full node role names table.addCell( "node.role", "alias:r,role,nodeRole;desc:m:master eligible node, d:data node, i:ingest node, -:coordinating node only" ); + table.addCell("node.roles", "alias:rs,all roles;desc: -:coordinating node only"); // TODO: Remove the header alias 'master', after removing MASTER_ROLE. It's added for compatibility when using parameter 'h=master'. table.addCell("cluster_manager", "alias:cm,m,master;desc:*:current cluster manager"); table.addCell("name", "alias:n;desc:node name"); @@ -423,12 +425,22 @@ Table buildTable( table.addCell(jvmStats == null ? null : jvmStats.getUptime()); final String roles; + final String allRoles; if (node.getRoles().isEmpty()) { roles = "-"; + allRoles = "-"; } else { - roles = node.getRoles().stream().map(DiscoveryNodeRole::roleNameAbbreviation).sorted().collect(Collectors.joining()); + List knownNodeRoles = node.getRoles() + .stream() + .filter(DiscoveryNodeRole::isKnownRole) + .collect(Collectors.toList()); + roles = knownNodeRoles.size() > 0 + ? knownNodeRoles.stream().map(DiscoveryNodeRole::roleNameAbbreviation).sorted().collect(Collectors.joining()) + : "-"; + allRoles = node.getRoles().stream().map(DiscoveryNodeRole::roleName).sorted().collect(Collectors.joining(",")); } table.addCell(roles); + table.addCell(allRoles); table.addCell(clusterManagerId == null ? "x" : clusterManagerId.equals(node.getId()) ? "*" : "-"); table.addCell(node.getName()); diff --git a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeRoleGenerator.java b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeRoleGenerator.java new file mode 100644 index 0000000000000..c1aa9390fec94 --- /dev/null +++ b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeRoleGenerator.java @@ -0,0 +1,16 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.node; + +public class DiscoveryNodeRoleGenerator { + + public static DiscoveryNodeRole createDynamicRole(String roleName) { + return new DiscoveryNodeRole.DynamicRole(roleName, roleName, false); + } +} diff --git a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeRoleTests.java b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeRoleTests.java index d1acec2832b7c..f906a0f937d28 100644 --- a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeRoleTests.java +++ b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeRoleTests.java @@ -38,6 +38,7 @@ import java.util.Arrays; import java.util.HashSet; +import java.util.Locale; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.hasToString; @@ -117,7 +118,7 @@ public void testDiscoveryNodeRoleEqualsHashCode() { } - public void testUnknownRoleIsDistinctFromKnownRoles() { + public void testUnknownRoleIsDistinctFromKnownOrDynamicRoles() { for (DiscoveryNodeRole buildInRole : DiscoveryNodeRole.BUILT_IN_ROLES) { final DiscoveryNodeRole.UnknownRole unknownDataRole = new DiscoveryNodeRole.UnknownRole( buildInRole.roleName(), @@ -126,6 +127,15 @@ public void testUnknownRoleIsDistinctFromKnownRoles() { ); assertNotEquals(buildInRole, unknownDataRole); assertNotEquals(buildInRole.toString(), unknownDataRole.toString()); + final DiscoveryNodeRole.DynamicRole dynamicRole = new DiscoveryNodeRole.DynamicRole( + buildInRole.roleName(), + buildInRole.roleNameAbbreviation(), + buildInRole.canContainData() + ); + assertNotEquals(buildInRole, dynamicRole); + assertNotEquals(buildInRole.toString(), dynamicRole.toString()); + assertNotEquals(unknownDataRole, dynamicRole); + assertNotEquals(unknownDataRole.toString(), dynamicRole.toString()); } } @@ -138,4 +148,15 @@ public void testIsClusterManager() { assertTrue(DiscoveryNodeRole.MASTER_ROLE.isClusterManager()); assertFalse(randomFrom(DiscoveryNodeRole.DATA_ROLE.isClusterManager(), DiscoveryNodeRole.INGEST_ROLE.isClusterManager())); } + + public void testRoleNameIsCaseInsensitive() { + String roleName = "TestRole"; + String roleNameAbbreviation = "T"; + DiscoveryNodeRole unknownRole = new DiscoveryNodeRole.UnknownRole(roleName, roleNameAbbreviation, false); + assertEquals(roleName.toLowerCase(Locale.ROOT), unknownRole.roleName()); + assertEquals(roleNameAbbreviation.toLowerCase(Locale.ROOT), unknownRole.roleNameAbbreviation()); + DiscoveryNodeRole dynamicRole = new DiscoveryNodeRole.DynamicRole(roleName, roleNameAbbreviation, false); + assertEquals(roleName.toLowerCase(Locale.ROOT), dynamicRole.roleName()); + assertEquals(roleNameAbbreviation.toLowerCase(Locale.ROOT), dynamicRole.roleNameAbbreviation()); + } } diff --git a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java index 1b7f698ae1f5c..abd1cae1ed97d 100644 --- a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java +++ b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java @@ -44,6 +44,7 @@ import java.net.InetAddress; import java.util.Collections; import java.util.HashSet; +import java.util.Locale; import java.util.Set; import java.util.stream.Collectors; @@ -193,4 +194,14 @@ private void runTestDiscoveryNodeIsRemoteClusterClient(final Settings settings, } } + public void testGetRoleFromRoleNameIsCaseInsensitive() { + String dataRoleName = "DATA"; + DiscoveryNodeRole dataNodeRole = DiscoveryNode.getRoleFromRoleName(dataRoleName); + assertEquals(DiscoveryNodeRole.DATA_ROLE, dataNodeRole); + + String dynamicRoleName = "TestRole"; + DiscoveryNodeRole dynamicNodeRole = DiscoveryNode.getRoleFromRoleName(dynamicRoleName); + assertEquals(dynamicRoleName.toLowerCase(Locale.ROOT), dynamicNodeRole.roleName()); + assertEquals(dynamicRoleName.toLowerCase(Locale.ROOT), dynamicNodeRole.roleNameAbbreviation()); + } } diff --git a/server/src/test/java/org/opensearch/node/NodeRoleSettingsTests.java b/server/src/test/java/org/opensearch/node/NodeRoleSettingsTests.java index c875fec1979d1..3248b97b8b71f 100644 --- a/server/src/test/java/org/opensearch/node/NodeRoleSettingsTests.java +++ b/server/src/test/java/org/opensearch/node/NodeRoleSettingsTests.java @@ -15,6 +15,7 @@ import java.util.Arrays; import java.util.Collections; +import java.util.List; import static org.hamcrest.Matchers.containsString; @@ -54,4 +55,23 @@ public void testMasterRoleDeprecationMessage() { assertEquals(Collections.singletonList(DiscoveryNodeRole.MASTER_ROLE), NodeRoleSettings.NODE_ROLES_SETTING.get(roleSettings)); assertWarnings(DiscoveryNodeRole.MASTER_ROLE_DEPRECATION_MESSAGE); } + + public void testUnknownNodeRoleAndBuiltInRoleCanCoexist() { + String testRole = "test_role"; + Settings roleSettings = Settings.builder().put(NodeRoleSettings.NODE_ROLES_SETTING.getKey(), "data, " + testRole).build(); + List nodeRoles = NodeRoleSettings.NODE_ROLES_SETTING.get(roleSettings); + assertEquals(2, nodeRoles.size()); + assertEquals(DiscoveryNodeRole.DATA_ROLE, nodeRoles.get(0)); + assertEquals(testRole, nodeRoles.get(1).roleName()); + assertEquals(testRole, nodeRoles.get(1).roleNameAbbreviation()); + } + + public void testUnknownNodeRoleOnly() { + String testRole = "test_role"; + Settings roleSettings = Settings.builder().put(NodeRoleSettings.NODE_ROLES_SETTING.getKey(), testRole).build(); + List nodeRoles = NodeRoleSettings.NODE_ROLES_SETTING.get(roleSettings); + assertEquals(1, nodeRoles.size()); + assertEquals(testRole, nodeRoles.get(0).roleName()); + assertEquals(testRole, nodeRoles.get(0).roleNameAbbreviation()); + } } diff --git a/server/src/test/java/org/opensearch/rest/action/cat/RestNodesActionTests.java b/server/src/test/java/org/opensearch/rest/action/cat/RestNodesActionTests.java index 593ad2907797e..6485ddd3bbc94 100644 --- a/server/src/test/java/org/opensearch/rest/action/cat/RestNodesActionTests.java +++ b/server/src/test/java/org/opensearch/rest/action/cat/RestNodesActionTests.java @@ -40,7 +40,10 @@ import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.node.DiscoveryNodeRoleGenerator; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.common.Table; import org.opensearch.common.settings.Settings; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.rest.FakeRestRequest; @@ -48,6 +51,11 @@ import org.junit.Before; import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.Consumer; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; @@ -64,18 +72,15 @@ public void setUpAction() { } public void testBuildTableDoesNotThrowGivenNullNodeInfoAndStats() { - ClusterName clusterName = new ClusterName("cluster-1"); - DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); - builder.add(new DiscoveryNode("node-1", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT)); - DiscoveryNodes discoveryNodes = builder.build(); - ClusterState clusterState = mock(ClusterState.class); - when(clusterState.nodes()).thenReturn(discoveryNodes); - - ClusterStateResponse clusterStateResponse = new ClusterStateResponse(clusterName, clusterState, false); - NodesInfoResponse nodesInfoResponse = new NodesInfoResponse(clusterName, Collections.emptyList(), Collections.emptyList()); - NodesStatsResponse nodesStatsResponse = new NodesStatsResponse(clusterName, Collections.emptyList(), Collections.emptyList()); - - action.buildTable(false, new FakeRestRequest(), clusterStateResponse, nodesInfoResponse, nodesStatsResponse); + testBuildTableWithRoles(emptySet(), (table) -> { + Map> nodeInfoMap = table.getAsMap(); + List cells = nodeInfoMap.get("node.role"); + assertEquals(1, cells.size()); + assertEquals("-", cells.get(0).value); + cells = nodeInfoMap.get("node.roles"); + assertEquals(1, cells.size()); + assertEquals("-", cells.get(0).value); + }); } public void testCatNodesWithLocalDeprecationWarning() { @@ -89,4 +94,51 @@ public void testCatNodesWithLocalDeprecationWarning() { terminate(threadPool); } + + public void testBuildTableWithDynamicRoleOnly() { + Set roles = new HashSet<>(); + String roleName = "test_role"; + DiscoveryNodeRole testRole = DiscoveryNodeRoleGenerator.createDynamicRole(roleName); + roles.add(testRole); + + testBuildTableWithRoles(roles, (table) -> { + Map> nodeInfoMap = table.getAsMap(); + List cells = nodeInfoMap.get("node.roles"); + assertEquals(1, cells.size()); + assertEquals(roleName, cells.get(0).value); + }); + } + + public void testBuildTableWithBothBuiltInAndDynamicRoles() { + Set roles = new HashSet<>(); + roles.add(DiscoveryNodeRole.DATA_ROLE); + String roleName = "test_role"; + DiscoveryNodeRole testRole = DiscoveryNodeRoleGenerator.createDynamicRole(roleName); + roles.add(testRole); + + testBuildTableWithRoles(roles, (table) -> { + Map> nodeInfoMap = table.getAsMap(); + List cells = nodeInfoMap.get("node.roles"); + assertEquals(1, cells.size()); + assertEquals("data,test_role", cells.get(0).value); + }); + } + + private void testBuildTableWithRoles(Set roles, Consumer verificationFunction) { + ClusterName clusterName = new ClusterName("cluster-1"); + DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); + + builder.add(new DiscoveryNode("node-1", buildNewFakeTransportAddress(), emptyMap(), roles, Version.CURRENT)); + DiscoveryNodes discoveryNodes = builder.build(); + ClusterState clusterState = mock(ClusterState.class); + when(clusterState.nodes()).thenReturn(discoveryNodes); + + ClusterStateResponse clusterStateResponse = new ClusterStateResponse(clusterName, clusterState, false); + NodesInfoResponse nodesInfoResponse = new NodesInfoResponse(clusterName, Collections.emptyList(), Collections.emptyList()); + NodesStatsResponse nodesStatsResponse = new NodesStatsResponse(clusterName, Collections.emptyList(), Collections.emptyList()); + + Table table = action.buildTable(false, new FakeRestRequest(), clusterStateResponse, nodesInfoResponse, nodesStatsResponse); + + verificationFunction.accept(table); + } } From a4aacdcc98b5efc2905ea31a52a5c4fb4695eee1 Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Wed, 15 Jun 2022 06:01:34 -0700 Subject: [PATCH 65/75] Rename package 'o.o.action.support.master' to 'o.o.action.support.clustermanager' (#3556) * Rename package org.opensearch.action.support.master to org.opensearch.action.support.clustermanager Signed-off-by: Tianli Feng * Rename classes with master term in the package org.opensearch.action.support.master Signed-off-by: Tianli Feng * Deprecate classes in org.opensearch.action.support.master Signed-off-by: Tianli Feng * Remove pakcage o.o.action.support.master Signed-off-by: Tianli Feng * Move package-info back Signed-off-by: Tianli Feng * Move package-info to new folder Signed-off-by: Tianli Feng * Correct the package-info Signed-off-by: Tianli Feng --- .../java/org/opensearch/client/ClusterClient.java | 2 +- .../java/org/opensearch/client/IndicesClient.java | 2 +- .../java/org/opensearch/client/IngestClient.java | 2 +- .../opensearch/client/RestHighLevelClient.java | 2 +- .../org/opensearch/client/SnapshotClient.java | 2 +- .../client/indices/CloseIndexResponse.java | 2 +- .../client/indices/CreateIndexResponse.java | 2 +- .../client/indices/PutIndexTemplateRequest.java | 7 +++++-- .../client/indices/rollover/RolloverResponse.java | 2 +- .../org/opensearch/client/ClusterClientIT.java | 2 +- .../client/ClusterRequestConvertersTests.java | 2 +- .../org/opensearch/client/IndicesClientIT.java | 2 +- .../client/IndicesRequestConvertersTests.java | 2 +- .../org/opensearch/client/IngestClientIT.java | 2 +- .../client/IngestRequestConvertersTests.java | 2 +- .../opensearch/client/RequestConvertersTests.java | 8 ++++---- .../java/org/opensearch/client/SnapshotIT.java | 2 +- .../client/SnapshotRequestConvertersTests.java | 2 +- .../client/core/AcknowledgedResponseTests.java | 8 ++++---- .../ClusterClientDocumentationIT.java | 2 +- .../IndicesClientDocumentationIT.java | 2 +- .../IngestClientDocumentationIT.java | 2 +- .../SnapshotClientDocumentationIT.java | 2 +- .../StoredScriptsDocumentationIT.java | 2 +- .../client/indices/CloseIndexResponseTests.java | 4 ++-- .../repositories/url/URLSnapshotRestoreIT.java | 2 +- .../index/mapper/size/SizeMappingIT.java | 2 +- .../azure/AzureStorageCleanupThirdPartyTests.java | 2 +- .../gcs/GoogleCloudStorageThirdPartyTests.java | 2 +- .../repositories/hdfs/HdfsRepositoryTests.java | 2 +- .../opensearch/repositories/hdfs/HdfsTests.java | 2 +- .../s3/S3RepositoryThirdPartyTests.java | 2 +- .../admin/indices/create/CreateIndexIT.java | 2 +- .../indices/datastream/DataStreamTestCase.java | 2 +- .../opensearch/action/bulk/BulkIntegrationIT.java | 2 +- .../IndexingClusterManagerFailoverIT.java | 2 +- .../org/opensearch/aliases/IndexAliasesIT.java | 2 +- .../org/opensearch/blocks/SimpleBlocksIT.java | 2 +- .../cluster/coordination/RareClusterStateIT.java | 2 +- .../cluster/shards/ClusterShardLimitIT.java | 2 +- .../opensearch/index/seqno/RetentionLeaseIT.java | 2 +- .../indices/IndicesOptionsIntegrationIT.java | 2 +- .../mapping/UpdateMappingIntegrationIT.java | 2 +- .../state/CloseWhileRelocatingShardsIT.java | 2 +- .../indices/state/OpenCloseIndexIT.java | 2 +- .../org/opensearch/ingest/IngestClientIT.java | 2 +- .../IngestProcessorNotInstalledOnAllNodesIT.java | 2 +- .../search/suggest/CompletionSuggestSearchIT.java | 2 +- .../org/opensearch/snapshots/CloneSnapshotIT.java | 2 +- .../snapshots/ConcurrentSnapshotsIT.java | 2 +- .../DedicatedClusterSnapshotRestoreIT.java | 2 +- .../org/opensearch/snapshots/RepositoriesIT.java | 2 +- .../ClusterAllocationExplainRequest.java | 4 ++-- .../ClusterAllocationExplainRequestBuilder.java | 4 ++-- .../TransportClusterAllocationExplainAction.java | 4 ++-- .../AddVotingConfigExclusionsRequest.java | 4 ++-- .../ClearVotingConfigExclusionsRequest.java | 4 ++-- .../TransportAddVotingConfigExclusionsAction.java | 4 ++-- ...ransportClearVotingConfigExclusionsAction.java | 4 ++-- .../cluster/health/ClusterHealthRequest.java | 4 ++-- .../health/ClusterHealthRequestBuilder.java | 4 ++-- .../health/TransportClusterHealthAction.java | 4 ++-- .../cleanup/CleanupRepositoryRequest.java | 2 +- .../cleanup/CleanupRepositoryRequestBuilder.java | 4 ++-- .../cleanup/TransportCleanupRepositoryAction.java | 6 ++++-- .../delete/DeleteRepositoryAction.java | 2 +- .../delete/DeleteRepositoryRequest.java | 2 +- .../delete/DeleteRepositoryRequestBuilder.java | 4 ++-- .../delete/TransportDeleteRepositoryAction.java | 6 +++--- .../repositories/get/GetRepositoriesRequest.java | 4 ++-- .../get/GetRepositoriesRequestBuilder.java | 4 ++-- .../get/TransportGetRepositoriesAction.java | 4 ++-- .../repositories/put/PutRepositoryAction.java | 2 +- .../repositories/put/PutRepositoryRequest.java | 2 +- .../put/PutRepositoryRequestBuilder.java | 4 ++-- .../put/TransportPutRepositoryAction.java | 6 +++--- .../verify/TransportVerifyRepositoryAction.java | 4 ++-- .../verify/VerifyRepositoryRequest.java | 2 +- .../verify/VerifyRepositoryRequestBuilder.java | 4 ++-- .../cluster/reroute/ClusterRerouteRequest.java | 2 +- .../reroute/ClusterRerouteRequestBuilder.java | 2 +- .../cluster/reroute/ClusterRerouteResponse.java | 2 +- .../reroute/TransportClusterRerouteAction.java | 4 ++-- .../settings/ClusterGetSettingsRequest.java | 4 ++-- .../settings/ClusterUpdateSettingsRequest.java | 2 +- .../ClusterUpdateSettingsRequestBuilder.java | 2 +- .../settings/ClusterUpdateSettingsResponse.java | 2 +- .../TransportClusterUpdateSettingsAction.java | 4 ++-- .../shards/ClusterSearchShardsRequest.java | 6 ++++-- .../shards/ClusterSearchShardsRequestBuilder.java | 4 ++-- .../TransportClusterSearchShardsAction.java | 4 ++-- .../snapshots/clone/CloneSnapshotAction.java | 2 +- .../snapshots/clone/CloneSnapshotRequest.java | 7 +++++-- .../clone/CloneSnapshotRequestBuilder.java | 6 +++--- .../clone/TransportCloneSnapshotAction.java | 6 +++--- .../snapshots/create/CreateSnapshotRequest.java | 4 ++-- .../create/CreateSnapshotRequestBuilder.java | 4 ++-- .../create/TransportCreateSnapshotAction.java | 4 ++-- .../snapshots/delete/DeleteSnapshotAction.java | 2 +- .../snapshots/delete/DeleteSnapshotRequest.java | 4 ++-- .../delete/DeleteSnapshotRequestBuilder.java | 6 +++--- .../delete/TransportDeleteSnapshotAction.java | 6 +++--- .../snapshots/get/GetSnapshotsRequest.java | 4 ++-- .../snapshots/get/GetSnapshotsRequestBuilder.java | 4 ++-- .../get/TransportGetSnapshotsAction.java | 4 ++-- .../snapshots/restore/RestoreSnapshotRequest.java | 4 ++-- .../restore/RestoreSnapshotRequestBuilder.java | 4 ++-- .../restore/TransportRestoreSnapshotAction.java | 4 ++-- .../snapshots/status/SnapshotsStatusRequest.java | 4 ++-- .../status/SnapshotsStatusRequestBuilder.java | 4 ++-- .../status/TransportSnapshotsStatusAction.java | 4 ++-- .../admin/cluster/state/ClusterStateRequest.java | 4 ++-- .../cluster/state/ClusterStateRequestBuilder.java | 4 ++-- .../state/TransportClusterStateAction.java | 4 ++-- .../storedscripts/DeleteStoredScriptAction.java | 2 +- .../storedscripts/DeleteStoredScriptRequest.java | 2 +- .../DeleteStoredScriptRequestBuilder.java | 4 ++-- .../storedscripts/GetStoredScriptRequest.java | 4 ++-- .../GetStoredScriptRequestBuilder.java | 4 ++-- .../storedscripts/PutStoredScriptAction.java | 2 +- .../storedscripts/PutStoredScriptRequest.java | 2 +- .../PutStoredScriptRequestBuilder.java | 4 ++-- .../TransportDeleteStoredScriptAction.java | 6 +++--- .../TransportGetStoredScriptAction.java | 4 ++-- .../TransportPutStoredScriptAction.java | 6 +++--- .../cluster/tasks/PendingClusterTasksRequest.java | 4 ++-- .../tasks/PendingClusterTasksRequestBuilder.java | 4 ++-- .../tasks/TransportPendingClusterTasksAction.java | 4 ++-- .../admin/indices/alias/IndicesAliasesAction.java | 2 +- .../indices/alias/IndicesAliasesRequest.java | 2 +- .../alias/IndicesAliasesRequestBuilder.java | 4 ++-- .../alias/TransportIndicesAliasesAction.java | 6 +++--- .../alias/get/BaseAliasesRequestBuilder.java | 4 ++-- .../indices/alias/get/GetAliasesRequest.java | 4 ++-- .../alias/get/TransportGetAliasesAction.java | 4 ++-- .../admin/indices/close/CloseIndexRequest.java | 2 +- .../indices/close/CloseIndexRequestBuilder.java | 2 +- .../admin/indices/close/CloseIndexResponse.java | 2 +- .../indices/close/TransportCloseIndexAction.java | 4 ++-- .../admin/indices/create/AutoCreateAction.java | 4 ++-- .../admin/indices/create/CreateIndexRequest.java | 2 +- .../indices/create/CreateIndexRequestBuilder.java | 2 +- .../admin/indices/create/CreateIndexResponse.java | 2 +- .../create/TransportCreateIndexAction.java | 4 ++-- .../delete/DeleteDanglingIndexAction.java | 2 +- .../delete/DeleteDanglingIndexRequest.java | 2 +- .../TransportDeleteDanglingIndexAction.java | 8 +++++--- .../import_index/ImportDanglingIndexAction.java | 2 +- .../import_index/ImportDanglingIndexRequest.java | 2 +- .../TransportImportDanglingIndexAction.java | 2 +- .../datastream/CreateDataStreamAction.java | 8 ++++---- .../datastream/DeleteDataStreamAction.java | 10 +++++----- .../indices/datastream/GetDataStreamAction.java | 8 ++++---- .../admin/indices/delete/DeleteIndexAction.java | 2 +- .../admin/indices/delete/DeleteIndexRequest.java | 2 +- .../indices/delete/DeleteIndexRequestBuilder.java | 4 ++-- .../delete/TransportDeleteIndexAction.java | 6 +++--- .../exists/indices/IndicesExistsRequest.java | 4 ++-- .../indices/IndicesExistsRequestBuilder.java | 4 ++-- .../indices/TransportIndicesExistsAction.java | 4 ++-- .../action/admin/indices/get/GetIndexRequest.java | 2 +- .../admin/indices/get/GetIndexRequestBuilder.java | 2 +- .../indices/get/TransportGetIndexAction.java | 2 +- .../indices/mapping/get/GetMappingsRequest.java | 2 +- .../mapping/get/GetMappingsRequestBuilder.java | 2 +- .../mapping/get/TransportGetMappingsAction.java | 2 +- .../indices/mapping/put/AutoPutMappingAction.java | 2 +- .../indices/mapping/put/PutMappingAction.java | 2 +- .../indices/mapping/put/PutMappingRequest.java | 4 ++-- .../mapping/put/PutMappingRequestBuilder.java | 4 ++-- .../put/TransportAutoPutMappingAction.java | 6 +++--- .../mapping/put/TransportPutMappingAction.java | 6 +++--- .../admin/indices/open/OpenIndexRequest.java | 2 +- .../indices/open/OpenIndexRequestBuilder.java | 2 +- .../admin/indices/open/OpenIndexResponse.java | 2 +- .../indices/open/TransportOpenIndexAction.java | 4 ++-- .../indices/readonly/AddIndexBlockRequest.java | 2 +- .../readonly/AddIndexBlockRequestBuilder.java | 2 +- .../indices/readonly/AddIndexBlockResponse.java | 2 +- .../readonly/TransportAddIndexBlockAction.java | 4 ++-- .../admin/indices/rollover/RolloverRequest.java | 2 +- .../indices/rollover/RolloverRequestBuilder.java | 7 +++++-- .../admin/indices/rollover/RolloverResponse.java | 2 +- .../indices/rollover/TransportRolloverAction.java | 4 ++-- .../indices/settings/get/GetSettingsRequest.java | 4 ++-- .../settings/get/GetSettingsRequestBuilder.java | 4 ++-- .../settings/get/TransportGetSettingsAction.java | 4 ++-- .../put/TransportUpdateSettingsAction.java | 6 +++--- .../settings/put/UpdateSettingsAction.java | 2 +- .../settings/put/UpdateSettingsRequest.java | 2 +- .../put/UpdateSettingsRequestBuilder.java | 4 ++-- .../shards/IndicesShardStoreRequestBuilder.java | 4 ++-- .../indices/shards/IndicesShardStoresRequest.java | 6 ++++-- .../shards/TransportIndicesShardStoresAction.java | 4 ++-- .../admin/indices/shrink/ResizeRequest.java | 2 +- .../indices/shrink/ResizeRequestBuilder.java | 2 +- .../indices/shrink/TransportResizeAction.java | 4 ++-- .../delete/DeleteComponentTemplateAction.java | 6 +++--- .../DeleteComposableIndexTemplateAction.java | 6 +++--- .../delete/DeleteIndexTemplateAction.java | 2 +- .../delete/DeleteIndexTemplateRequest.java | 4 ++-- .../delete/DeleteIndexTemplateRequestBuilder.java | 6 +++--- .../TransportDeleteComponentTemplateAction.java | 6 +++--- ...nsportDeleteComposableIndexTemplateAction.java | 6 +++--- .../TransportDeleteIndexTemplateAction.java | 8 +++++--- .../template/get/GetComponentTemplateAction.java | 4 ++-- .../get/GetComposableIndexTemplateAction.java | 4 ++-- .../template/get/GetIndexTemplatesRequest.java | 4 ++-- .../get/GetIndexTemplatesRequestBuilder.java | 4 ++-- .../get/TransportGetComponentTemplateAction.java | 4 ++-- ...TransportGetComposableIndexTemplateAction.java | 4 ++-- .../get/TransportGetIndexTemplatesAction.java | 6 ++++-- .../post/SimulateIndexTemplateRequest.java | 4 ++-- .../template/post/SimulateTemplateAction.java | 4 ++-- .../TransportSimulateIndexTemplateAction.java | 4 ++-- .../post/TransportSimulateTemplateAction.java | 4 ++-- .../template/put/PutComponentTemplateAction.java | 6 +++--- .../put/PutComposableIndexTemplateAction.java | 6 +++--- .../template/put/PutIndexTemplateAction.java | 2 +- .../template/put/PutIndexTemplateRequest.java | 7 +++++-- .../put/PutIndexTemplateRequestBuilder.java | 6 +++--- .../put/TransportPutComponentTemplateAction.java | 6 +++--- ...TransportPutComposableIndexTemplateAction.java | 6 +++--- .../put/TransportPutIndexTemplateAction.java | 6 +++--- .../post/TransportUpgradeSettingsAction.java | 6 +++--- .../upgrade/post/UpgradeSettingsAction.java | 2 +- .../upgrade/post/UpgradeSettingsRequest.java | 2 +- .../action/ingest/DeletePipelineAction.java | 2 +- .../action/ingest/DeletePipelineRequest.java | 2 +- .../ingest/DeletePipelineRequestBuilder.java | 2 +- .../ingest/DeletePipelineTransportAction.java | 6 +++--- .../action/ingest/GetPipelineRequest.java | 4 ++-- .../action/ingest/GetPipelineRequestBuilder.java | 4 ++-- .../action/ingest/GetPipelineTransportAction.java | 4 ++-- .../action/ingest/PutPipelineAction.java | 2 +- .../action/ingest/PutPipelineRequest.java | 2 +- .../action/ingest/PutPipelineRequestBuilder.java | 2 +- .../action/ingest/PutPipelineTransportAction.java | 6 +++--- .../AcknowledgedRequest.java | 4 ++-- .../AcknowledgedRequestBuilder.java | 4 ++-- .../AcknowledgedResponse.java | 2 +- ...lusterManagerNodeOperationRequestBuilder.java} | 10 +++++----- ...erManagerNodeReadOperationRequestBuilder.java} | 12 ++++++------ .../ClusterManagerNodeReadRequest.java} | 9 +++++---- .../ClusterManagerNodeRequest.java} | 8 ++++---- .../ShardsAcknowledgedResponse.java | 2 +- .../TransportClusterManagerNodeAction.java} | 14 +++++++------- .../TransportClusterManagerNodeReadAction.java} | 11 ++++++----- .../info/ClusterInfoRequest.java | 6 +++--- .../info/ClusterInfoRequestBuilder.java | 6 +++--- .../info/TransportClusterInfoAction.java | 6 +++--- .../info}/package-info.java | 4 ++-- .../info => clustermanager}/package-info.java | 4 ++-- .../org/opensearch/client/ClusterAdminClient.java | 2 +- .../org/opensearch/client/IndicesAdminClient.java | 2 +- .../opensearch/client/support/AbstractClient.java | 2 +- .../action/index/MappingUpdatedAction.java | 4 ++-- .../metadata/MetadataCreateDataStreamService.java | 2 +- .../metadata/MetadataIndexTemplateService.java | 8 ++++---- .../cluster/metadata/TemplateUpgradeService.java | 2 +- .../java/org/opensearch/ingest/IngestService.java | 2 +- .../CompletionPersistentTaskAction.java | 15 ++++++--------- .../persistent/RemovePersistentTaskAction.java | 15 ++++++--------- .../persistent/StartPersistentTaskAction.java | 15 ++++++--------- .../UpdatePersistentTaskStatusAction.java | 15 ++++++--------- .../java/org/opensearch/rest/BaseRestHandler.java | 4 ++-- .../dangling/RestDeleteDanglingIndexAction.java | 2 +- .../dangling/RestImportDanglingIndexAction.java | 2 +- .../rest/action/cat/RestIndicesAction.java | 2 +- .../java/org/opensearch/script/ScriptService.java | 2 +- .../opensearch/snapshots/SnapshotsService.java | 4 ++-- .../UpdateIndexShardSnapshotStatusRequest.java | 4 ++-- .../RenamedTimeoutRequestParameterTests.java | 6 +++--- .../reroute/ClusterRerouteRequestTests.java | 6 +++--- .../ShardsAcknowledgedResponseTests.java | 2 +- .../TransportClusterManagerNodeActionTests.java} | 8 ++++---- .../TransportMasterNodeActionUtils.java | 8 ++++---- .../MetadataIndexTemplateServiceTests.java | 2 +- .../metadata/TemplateUpgradeServiceTests.java | 2 +- .../indices/cluster/ClusterStateChanges.java | 10 +++++----- .../settings/InternalOrPrivateSettingsPlugin.java | 8 ++++---- .../blobstore/BlobStoreRepositoryTests.java | 2 +- .../snapshots/SnapshotResiliencyTests.java | 2 +- .../snapshots/AbstractSnapshotIntegTestCase.java | 2 +- .../java/org/opensearch/test/TestCluster.java | 2 +- .../test/hamcrest/OpenSearchAssertions.java | 4 ++-- 286 files changed, 559 insertions(+), 545 deletions(-) rename server/src/internalClusterTest/java/org/opensearch/action/support/{master => clustermanager}/IndexingClusterManagerFailoverIT.java (98%) rename server/src/main/java/org/opensearch/action/support/{master => clustermanager}/AcknowledgedRequest.java (93%) rename server/src/main/java/org/opensearch/action/support/{master => clustermanager}/AcknowledgedRequestBuilder.java (94%) rename server/src/main/java/org/opensearch/action/support/{master => clustermanager}/AcknowledgedResponse.java (98%) rename server/src/main/java/org/opensearch/action/support/{master/MasterNodeOperationRequestBuilder.java => clustermanager/ClusterManagerNodeOperationRequestBuilder.java} (83%) rename server/src/main/java/org/opensearch/action/support/{master/MasterNodeReadOperationRequestBuilder.java => clustermanager/ClusterManagerNodeReadOperationRequestBuilder.java} (77%) rename server/src/main/java/org/opensearch/action/support/{master/MasterNodeReadRequest.java => clustermanager/ClusterManagerNodeReadRequest.java} (86%) rename server/src/main/java/org/opensearch/action/support/{master/MasterNodeRequest.java => clustermanager/ClusterManagerNodeRequest.java} (89%) rename server/src/main/java/org/opensearch/action/support/{master => clustermanager}/ShardsAcknowledgedResponse.java (98%) rename server/src/main/java/org/opensearch/action/support/{master/TransportMasterNodeAction.java => clustermanager/TransportClusterManagerNodeAction.java} (96%) rename server/src/main/java/org/opensearch/action/support/{master/TransportMasterNodeReadAction.java => clustermanager/TransportClusterManagerNodeReadAction.java} (87%) rename server/src/main/java/org/opensearch/action/support/{master => clustermanager}/info/ClusterInfoRequest.java (93%) rename server/src/main/java/org/opensearch/action/support/{master => clustermanager}/info/ClusterInfoRequestBuilder.java (91%) rename server/src/main/java/org/opensearch/action/support/{master => clustermanager}/info/TransportClusterInfoAction.java (93%) rename server/src/main/java/org/opensearch/action/support/{master => clustermanager/info}/package-info.java (63%) rename server/src/main/java/org/opensearch/action/support/{master/info => clustermanager}/package-info.java (66%) rename server/src/test/java/org/opensearch/action/support/{master => clustermanager}/ShardsAcknowledgedResponseTests.java (97%) rename server/src/test/java/org/opensearch/action/support/{master/TransportMasterNodeActionTests.java => clustermanager/TransportClusterManagerNodeActionTests.java} (98%) rename server/src/test/java/org/opensearch/action/support/{master => clustermanager}/TransportMasterNodeActionUtils.java (79%) diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/ClusterClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/ClusterClient.java index 10cfec9497862..1c943ec24411a 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/ClusterClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/ClusterClient.java @@ -39,7 +39,7 @@ import org.opensearch.action.admin.cluster.settings.ClusterGetSettingsResponse; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.cluster.RemoteInfoRequest; import org.opensearch.client.cluster.RemoteInfoResponse; import org.opensearch.client.indices.ComponentTemplatesExistRequest; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/IndicesClient.java index 9b4586ec6bf89..2a1d471e73eb5 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/IndicesClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/IndicesClient.java @@ -52,7 +52,7 @@ import org.opensearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; import org.opensearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.opensearch.action.admin.indices.validate.query.ValidateQueryResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.indices.AnalyzeRequest; import org.opensearch.client.indices.AnalyzeResponse; import org.opensearch.client.indices.CloseIndexRequest; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/IngestClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/IngestClient.java index cd304019e771c..512d0eb09ed84 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/IngestClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/IngestClient.java @@ -39,7 +39,7 @@ import org.opensearch.action.ingest.PutPipelineRequest; import org.opensearch.action.ingest.SimulatePipelineRequest; import org.opensearch.action.ingest.SimulatePipelineResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import java.io.IOException; import java.util.Collections; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java index f3360630a26b7..50864ed829944 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java @@ -66,7 +66,7 @@ import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchScrollRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.update.UpdateRequest; import org.opensearch.action.update.UpdateResponse; import org.opensearch.client.core.CountRequest; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/SnapshotClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/SnapshotClient.java index 85a793dec24ce..78c140dc8f4d4 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/SnapshotClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/SnapshotClient.java @@ -51,7 +51,7 @@ import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequest; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import java.io.IOException; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/CloseIndexResponse.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/CloseIndexResponse.java index 817d1c08532c6..3740f4f3fc5ab 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/CloseIndexResponse.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/CloseIndexResponse.java @@ -33,7 +33,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.action.support.DefaultShardOperationFailedException; -import org.opensearch.action.support.master.ShardsAcknowledgedResponse; +import org.opensearch.action.support.clustermanager.ShardsAcknowledgedResponse; import org.opensearch.common.Nullable; import org.opensearch.common.ParseField; import org.opensearch.common.xcontent.ConstructingObjectParser; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/CreateIndexResponse.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/CreateIndexResponse.java index 7e1ea2894961d..b7a94eb5ea8b8 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/CreateIndexResponse.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/CreateIndexResponse.java @@ -32,7 +32,7 @@ package org.opensearch.client.indices; -import org.opensearch.action.support.master.ShardsAcknowledgedResponse; +import org.opensearch.action.support.clustermanager.ShardsAcknowledgedResponse; import org.opensearch.common.ParseField; import org.opensearch.common.xcontent.ConstructingObjectParser; import org.opensearch.common.xcontent.ObjectParser; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/PutIndexTemplateRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/PutIndexTemplateRequest.java index f248e8efa24f4..5f43ec7f1d0fe 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/PutIndexTemplateRequest.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/PutIndexTemplateRequest.java @@ -37,7 +37,7 @@ import org.opensearch.action.IndicesRequest; import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.MasterNodeRequest; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.common.Nullable; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.bytes.BytesReference; @@ -69,7 +69,10 @@ /** * A request to create an index template. */ -public class PutIndexTemplateRequest extends MasterNodeRequest implements IndicesRequest, ToXContentFragment { +public class PutIndexTemplateRequest extends ClusterManagerNodeRequest + implements + IndicesRequest, + ToXContentFragment { private String name; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/rollover/RolloverResponse.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/rollover/RolloverResponse.java index 0303dba2535e7..415f3dbec249f 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/rollover/RolloverResponse.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/rollover/RolloverResponse.java @@ -32,7 +32,7 @@ package org.opensearch.client.indices.rollover; -import org.opensearch.action.support.master.ShardsAcknowledgedResponse; +import org.opensearch.action.support.clustermanager.ShardsAcknowledgedResponse; import org.opensearch.common.ParseField; import org.opensearch.common.xcontent.ConstructingObjectParser; import org.opensearch.common.xcontent.XContentParser; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/ClusterClientIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/ClusterClientIT.java index 71b869fb59e7b..40059af46774f 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/ClusterClientIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/ClusterClientIT.java @@ -41,7 +41,7 @@ import org.opensearch.action.admin.cluster.settings.ClusterGetSettingsResponse; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.cluster.RemoteConnectionInfo; import org.opensearch.client.cluster.RemoteInfoRequest; import org.opensearch.client.cluster.RemoteInfoResponse; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java index ec6847630dc92..e1c232103b207 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java @@ -38,7 +38,7 @@ import org.opensearch.action.admin.cluster.settings.ClusterGetSettingsRequest; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.support.ActiveShardCount; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.client.cluster.RemoteInfoRequest; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.Priority; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java index f9c8851f8839e..aa7af5a9d1250 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java @@ -65,7 +65,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.WriteRequest; import org.opensearch.action.support.broadcast.BroadcastResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.indices.AnalyzeRequest; import org.opensearch.client.indices.AnalyzeResponse; import org.opensearch.client.indices.CloseIndexRequest; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java index bf6d6c922fdd7..a277e65d2ac33 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java @@ -53,7 +53,7 @@ import org.opensearch.action.admin.indices.shrink.ResizeType; import org.opensearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; import org.opensearch.action.admin.indices.validate.query.ValidateQueryRequest; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.client.indices.AnalyzeRequest; import org.opensearch.client.indices.CloseIndexRequest; import org.opensearch.client.indices.CreateDataStreamRequest; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/IngestClientIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/IngestClientIT.java index 78a3202f35892..e85ddc21b8fda 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/IngestClientIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/IngestClientIT.java @@ -41,7 +41,7 @@ import org.opensearch.action.ingest.SimulateDocumentVerboseResult; import org.opensearch.action.ingest.SimulatePipelineRequest; import org.opensearch.action.ingest.SimulatePipelineResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentType; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/IngestRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/IngestRequestConvertersTests.java index 200069ade1ea2..c65fa95c5e92a 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/IngestRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/IngestRequestConvertersTests.java @@ -40,7 +40,7 @@ import org.opensearch.action.ingest.GetPipelineRequest; import org.opensearch.action.ingest.PutPipelineRequest; import org.opensearch.action.ingest.SimulatePipelineRequest; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.xcontent.XContentType; import org.opensearch.test.OpenSearchTestCase; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java index 4f0b2ac0d88a1..1f1b4543cf704 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java @@ -61,8 +61,8 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.WriteRequest; -import org.opensearch.action.support.master.AcknowledgedRequest; -import org.opensearch.action.support.master.MasterNodeRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.action.support.replication.ReplicationRequest; import org.opensearch.action.update.UpdateRequest; import org.opensearch.client.RequestConverters.EndpointBuilder; @@ -2128,7 +2128,7 @@ static void setRandomTimeoutTimeValue(Consumer setter, TimeValue defa } } - static void setRandomClusterManagerTimeout(MasterNodeRequest request, Map expectedParams) { + static void setRandomClusterManagerTimeout(ClusterManagerNodeRequest request, Map expectedParams) { setRandomClusterManagerTimeout(request::masterNodeTimeout, expectedParams); } @@ -2145,7 +2145,7 @@ static void setRandomClusterManagerTimeout(Consumer setter, Map { @Override - protected org.opensearch.action.support.master.AcknowledgedResponse createServerTestInstance(XContentType xContentType) { - return new org.opensearch.action.support.master.AcknowledgedResponse(randomBoolean()); + protected org.opensearch.action.support.clustermanager.AcknowledgedResponse createServerTestInstance(XContentType xContentType) { + return new org.opensearch.action.support.clustermanager.AcknowledgedResponse(randomBoolean()); } @Override @@ -55,7 +55,7 @@ protected AcknowledgedResponse doParseToClientInstance(XContentParser parser) th @Override protected void assertInstances( - org.opensearch.action.support.master.AcknowledgedResponse serverTestInstance, + org.opensearch.action.support.clustermanager.AcknowledgedResponse serverTestInstance, AcknowledgedResponse clientInstance ) { assertThat(clientInstance.isAcknowledged(), is(serverTestInstance.isAcknowledged())); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/ClusterClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/ClusterClientDocumentationIT.java index 7762813aa53ce..f75c6a10a8afe 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/ClusterClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/ClusterClientDocumentationIT.java @@ -41,7 +41,7 @@ import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.opensearch.action.support.ActiveShardCount; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.OpenSearchRestHighLevelClientTestCase; import org.opensearch.client.RequestOptions; import org.opensearch.client.RestHighLevelClient; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IndicesClientDocumentationIT.java index 3fbe7f63b09a2..9e6bdd8d769a6 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IndicesClientDocumentationIT.java @@ -63,7 +63,7 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.OpenSearchRestHighLevelClientTestCase; import org.opensearch.client.GetAliasesResponse; import org.opensearch.client.RequestOptions; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IngestClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IngestClientDocumentationIT.java index a6157f2903103..46417659eddad 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IngestClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IngestClientDocumentationIT.java @@ -44,7 +44,7 @@ import org.opensearch.action.ingest.SimulatePipelineRequest; import org.opensearch.action.ingest.SimulatePipelineResponse; import org.opensearch.action.ingest.SimulateProcessorResult; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.OpenSearchRestHighLevelClientTestCase; import org.opensearch.client.RequestOptions; import org.opensearch.client.RestHighLevelClient; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/SnapshotClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/SnapshotClientDocumentationIT.java index 0ca3c0cd5a598..46473402ab69c 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/SnapshotClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/SnapshotClientDocumentationIT.java @@ -52,7 +52,7 @@ import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequest; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.OpenSearchRestHighLevelClientTestCase; import org.opensearch.client.Request; import org.opensearch.client.RequestOptions; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/StoredScriptsDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/StoredScriptsDocumentationIT.java index c9c36a6165c45..0d36348a6a96d 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/StoredScriptsDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/StoredScriptsDocumentationIT.java @@ -38,7 +38,7 @@ import org.opensearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; import org.opensearch.action.admin.cluster.storedscripts.GetStoredScriptResponse; import org.opensearch.action.admin.cluster.storedscripts.PutStoredScriptRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.OpenSearchRestHighLevelClientTestCase; import org.opensearch.client.RequestOptions; import org.opensearch.client.RestHighLevelClient; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/indices/CloseIndexResponseTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/indices/CloseIndexResponseTests.java index 3fa35f6fffd22..a5c8086118fcd 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/indices/CloseIndexResponseTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/indices/CloseIndexResponseTests.java @@ -32,8 +32,8 @@ package org.opensearch.client.indices; import org.opensearch.OpenSearchStatusException; -import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.action.support.master.ShardsAcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.ShardsAcknowledgedResponse; import org.opensearch.client.AbstractResponseTestCase; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.xcontent.LoggingDeprecationHandler; diff --git a/modules/repository-url/src/internalClusterTest/java/org/opensearch/repositories/url/URLSnapshotRestoreIT.java b/modules/repository-url/src/internalClusterTest/java/org/opensearch/repositories/url/URLSnapshotRestoreIT.java index aa274549f3a9b..b819722d59f13 100644 --- a/modules/repository-url/src/internalClusterTest/java/org/opensearch/repositories/url/URLSnapshotRestoreIT.java +++ b/modules/repository-url/src/internalClusterTest/java/org/opensearch/repositories/url/URLSnapshotRestoreIT.java @@ -35,7 +35,7 @@ import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.ByteSizeUnit; diff --git a/plugins/mapper-size/src/internalClusterTest/java/org/opensearch/index/mapper/size/SizeMappingIT.java b/plugins/mapper-size/src/internalClusterTest/java/org/opensearch/index/mapper/size/SizeMappingIT.java index 3a430331167f6..24ec8f0eaf4c5 100644 --- a/plugins/mapper-size/src/internalClusterTest/java/org/opensearch/index/mapper/size/SizeMappingIT.java +++ b/plugins/mapper-size/src/internalClusterTest/java/org/opensearch/index/mapper/size/SizeMappingIT.java @@ -33,7 +33,7 @@ import org.opensearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.opensearch.action.get.GetResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentType; import org.opensearch.plugin.mapper.MapperSizePlugin; diff --git a/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java b/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java index 6d71a65a35a4c..fe4223a5aca87 100644 --- a/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java +++ b/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java @@ -42,7 +42,7 @@ import org.junit.AfterClass; import org.opensearch.action.ActionRunnable; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.common.Strings; import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.MockSecureSettings; diff --git a/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java b/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java index f1b2f78a37380..f4979c6caaddb 100644 --- a/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java +++ b/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java @@ -32,7 +32,7 @@ package org.opensearch.repositories.gcs; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.common.Strings; import org.opensearch.common.settings.MockSecureSettings; import org.opensearch.common.settings.SecureSettings; diff --git a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsRepositoryTests.java b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsRepositoryTests.java index 4e12de7cce212..d7209e47bff11 100644 --- a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsRepositoryTests.java +++ b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsRepositoryTests.java @@ -34,7 +34,7 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.common.settings.MockSecureSettings; import org.opensearch.common.settings.SecureSettings; import org.opensearch.common.settings.Settings; diff --git a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsTests.java b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsTests.java index d46d0b2092d2a..61a990b4d5525 100644 --- a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsTests.java +++ b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsTests.java @@ -35,7 +35,7 @@ import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.ClusterState; import org.opensearch.common.settings.Settings; diff --git a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java index bc2839d066092..952d8214cb91f 100644 --- a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java +++ b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java @@ -31,7 +31,7 @@ package org.opensearch.repositories.s3; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.settings.MockSecureSettings; diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/CreateIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/CreateIndexIT.java index 3ef2a63c7d0ac..e772583697cb9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/CreateIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/CreateIndexIT.java @@ -40,7 +40,7 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.MappingMetadata; diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamTestCase.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamTestCase.java index 7b0d917504a2f..8f2bdbdcc5973 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamTestCase.java @@ -12,7 +12,7 @@ import org.opensearch.action.admin.indices.rollover.RolloverResponse; import org.opensearch.action.admin.indices.template.delete.DeleteComposableIndexTemplateAction; import org.opensearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.cluster.metadata.ComposableIndexTemplate; import org.opensearch.cluster.metadata.DataStream; import org.opensearch.cluster.metadata.Template; diff --git a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkIntegrationIT.java index e2a1363f163da..93f75e3918391 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkIntegrationIT.java @@ -41,7 +41,7 @@ import org.opensearch.action.index.IndexRequest; import org.opensearch.action.index.IndexResponse; import org.opensearch.action.ingest.PutPipelineRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.replication.ReplicationRequest; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.bytes.BytesReference; diff --git a/server/src/internalClusterTest/java/org/opensearch/action/support/master/IndexingClusterManagerFailoverIT.java b/server/src/internalClusterTest/java/org/opensearch/action/support/clustermanager/IndexingClusterManagerFailoverIT.java similarity index 98% rename from server/src/internalClusterTest/java/org/opensearch/action/support/master/IndexingClusterManagerFailoverIT.java rename to server/src/internalClusterTest/java/org/opensearch/action/support/clustermanager/IndexingClusterManagerFailoverIT.java index 14e0dd94ea640..60ba9a4bfcf98 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/support/master/IndexingClusterManagerFailoverIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/support/clustermanager/IndexingClusterManagerFailoverIT.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.action.support.master; +package org.opensearch.action.support.clustermanager; import org.opensearch.action.DocWriteResponse; import org.opensearch.action.index.IndexResponse; diff --git a/server/src/internalClusterTest/java/org/opensearch/aliases/IndexAliasesIT.java b/server/src/internalClusterTest/java/org/opensearch/aliases/IndexAliasesIT.java index 574046509de75..46a5dc421fbb6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/aliases/IndexAliasesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/aliases/IndexAliasesIT.java @@ -42,7 +42,7 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.WriteRequest.RefreshPolicy; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.AliasMetadata; import org.opensearch.cluster.metadata.IndexAbstraction; diff --git a/server/src/internalClusterTest/java/org/opensearch/blocks/SimpleBlocksIT.java b/server/src/internalClusterTest/java/org/opensearch/blocks/SimpleBlocksIT.java index 8ede3e25b2e1a..f1f5260f8f2f0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/blocks/SimpleBlocksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/blocks/SimpleBlocksIT.java @@ -42,7 +42,7 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.index.IndexResponse; import org.opensearch.action.support.ActiveShardCount; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/RareClusterStateIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/RareClusterStateIT.java index 61b186c951ce8..f5273803fa716 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/RareClusterStateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/RareClusterStateIT.java @@ -40,7 +40,7 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.action.ActionResponse; import org.opensearch.action.index.IndexResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.block.ClusterBlocks; diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java index a92849a077376..1259a011147b8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java @@ -38,7 +38,7 @@ import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.Metadata; diff --git a/server/src/internalClusterTest/java/org/opensearch/index/seqno/RetentionLeaseIT.java b/server/src/internalClusterTest/java/org/opensearch/index/seqno/RetentionLeaseIT.java index ed6074b39c8a7..df62797e1194d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/seqno/RetentionLeaseIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/seqno/RetentionLeaseIT.java @@ -34,7 +34,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.support.replication.ReplicationResponse; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.ShardRouting; diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesOptionsIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesOptionsIntegrationIT.java index 1f3d865811939..2504e676acf41 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesOptionsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesOptionsIntegrationIT.java @@ -51,7 +51,7 @@ import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.common.Strings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/mapping/UpdateMappingIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/mapping/UpdateMappingIntegrationIT.java index 51ff5de34240a..4e6c6519c2055 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/mapping/UpdateMappingIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/mapping/UpdateMappingIntegrationIT.java @@ -36,7 +36,7 @@ import org.opensearch.action.admin.indices.refresh.RefreshResponse; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.action.index.MappingUpdatedAction; import org.opensearch.cluster.metadata.MappingMetadata; diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseWhileRelocatingShardsIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseWhileRelocatingShardsIT.java index 3d70622e122c0..11587d1232ec1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseWhileRelocatingShardsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseWhileRelocatingShardsIT.java @@ -33,7 +33,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.action.admin.cluster.reroute.ClusterRerouteRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.IndexRoutingTable; diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/state/OpenCloseIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/state/OpenCloseIndexIT.java index ca1e1399f8fdc..df5372d65fda3 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/state/OpenCloseIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/state/OpenCloseIndexIT.java @@ -41,7 +41,7 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Strings; diff --git a/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java b/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java index 404b13aae5b9c..fbfb4c3c3479d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java @@ -48,7 +48,7 @@ import org.opensearch.action.ingest.SimulateDocumentBaseResult; import org.opensearch.action.ingest.SimulatePipelineRequest; import org.opensearch.action.ingest.SimulatePipelineResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.update.UpdateRequest; import org.opensearch.client.Requests; import org.opensearch.common.bytes.BytesReference; diff --git a/server/src/internalClusterTest/java/org/opensearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java index a615cceffb5df..585e4755a54ad 100644 --- a/server/src/internalClusterTest/java/org/opensearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java @@ -33,7 +33,7 @@ package org.opensearch.ingest; import org.opensearch.OpenSearchParseException; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.xcontent.XContentType; import org.opensearch.node.NodeService; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java index 690564fe1cac8..0fb856efdda1e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java @@ -42,7 +42,7 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.common.FieldMemoryStats; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.Fuzziness; diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java index d5f36608941d5..147e0e98e5b33 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java @@ -35,7 +35,7 @@ import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotIndexStatus; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotStatus; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.SnapshotsInProgress; import org.opensearch.common.unit.TimeValue; diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java index 04ec3f027f908..08059b49213ee 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java @@ -43,7 +43,7 @@ import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; import org.opensearch.action.support.GroupedActionListener; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.SnapshotDeletionsInProgress; import org.opensearch.cluster.SnapshotsInProgress; diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 2eca8555e1388..29b58eab9b865 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -48,7 +48,7 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.ClusterState; diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java index e72110f4c4efd..27aeda1262db6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java @@ -35,7 +35,7 @@ import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; import org.opensearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.RepositoriesMetadata; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java index 0102cc517dbcd..9e682445e7dc4 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.allocation; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.MasterNodeRequest; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.common.Nullable; import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; @@ -50,7 +50,7 @@ * * @opensearch.internal */ -public class ClusterAllocationExplainRequest extends MasterNodeRequest { +public class ClusterAllocationExplainRequest extends ClusterManagerNodeRequest { private static final ObjectParser PARSER = new ObjectParser<>("cluster/allocation/explain"); static { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequestBuilder.java index 240520241c42b..d85cb3929873d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/ClusterAllocationExplainRequestBuilder.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.cluster.allocation; -import org.opensearch.action.support.master.MasterNodeOperationRequestBuilder; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; /** @@ -40,7 +40,7 @@ * * @opensearch.internal */ -public class ClusterAllocationExplainRequestBuilder extends MasterNodeOperationRequestBuilder< +public class ClusterAllocationExplainRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< ClusterAllocationExplainRequest, ClusterAllocationExplainResponse, ClusterAllocationExplainRequestBuilder> { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java index 233cc506a32c1..16721d5a9ec07 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java @@ -36,7 +36,7 @@ import org.apache.logging.log4j.Logger; import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterInfo; import org.opensearch.cluster.ClusterInfoService; import org.opensearch.cluster.ClusterState; @@ -68,7 +68,7 @@ * * @opensearch.internal */ -public class TransportClusterAllocationExplainAction extends TransportMasterNodeAction< +public class TransportClusterAllocationExplainAction extends TransportClusterManagerNodeAction< ClusterAllocationExplainRequest, ClusterAllocationExplainResponse> { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java index ba44fdfeb8ff6..a8035a10e8d91 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java @@ -33,7 +33,7 @@ import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.MasterNodeRequest; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.CoordinationMetadata.VotingConfigExclusion; import org.opensearch.cluster.node.DiscoveryNode; @@ -59,7 +59,7 @@ * * @opensearch.internal */ -public class AddVotingConfigExclusionsRequest extends MasterNodeRequest { +public class AddVotingConfigExclusionsRequest extends ClusterManagerNodeRequest { public static final String DEPRECATION_MESSAGE = "nodeDescription is deprecated and will be removed, use nodeIds or nodeNames instead"; private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(AddVotingConfigExclusionsRequest.class); private final String[] nodeDescriptions; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsRequest.java index cbe19abe069b2..5ba91c1f8f239 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/ClearVotingConfigExclusionsRequest.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.cluster.configuration; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.MasterNodeRequest; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.unit.TimeValue; @@ -45,7 +45,7 @@ * * @opensearch.internal */ -public class ClearVotingConfigExclusionsRequest extends MasterNodeRequest { +public class ClearVotingConfigExclusionsRequest extends ClusterManagerNodeRequest { private boolean waitForRemoval = true; private TimeValue timeout = TimeValue.timeValueSeconds(30); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsAction.java index c9b27f4822fcd..ab72ce964668f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsAction.java @@ -37,7 +37,7 @@ import org.opensearch.OpenSearchTimeoutException; import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateObserver; import org.opensearch.cluster.ClusterStateObserver.Listener; @@ -71,7 +71,7 @@ * * @opensearch.internal */ -public class TransportAddVotingConfigExclusionsAction extends TransportMasterNodeAction< +public class TransportAddVotingConfigExclusionsAction extends TransportClusterManagerNodeAction< AddVotingConfigExclusionsRequest, AddVotingConfigExclusionsResponse> { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java index 3791b3e8301ee..3a9da6cebef53 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java @@ -37,7 +37,7 @@ import org.opensearch.OpenSearchTimeoutException; import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateObserver; import org.opensearch.cluster.ClusterStateObserver.Listener; @@ -65,7 +65,7 @@ * * @opensearch.internal */ -public class TransportClearVotingConfigExclusionsAction extends TransportMasterNodeAction< +public class TransportClearVotingConfigExclusionsAction extends TransportClusterManagerNodeAction< ClearVotingConfigExclusionsRequest, ClearVotingConfigExclusionsResponse> { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequest.java index 026cf2274452e..8567694bd3880 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequest.java @@ -37,7 +37,7 @@ import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.MasterNodeReadRequest; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.Priority; import org.opensearch.common.io.stream.StreamInput; @@ -53,7 +53,7 @@ * * @opensearch.internal */ -public class ClusterHealthRequest extends MasterNodeReadRequest implements IndicesRequest.Replaceable { +public class ClusterHealthRequest extends ClusterManagerNodeReadRequest implements IndicesRequest.Replaceable { private String[] indices; private IndicesOptions indicesOptions = IndicesOptions.lenientExpandHidden(); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java index d1e68e0a22510..3874bf31e1e23 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java @@ -34,7 +34,7 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.MasterNodeReadOperationRequestBuilder; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.Priority; @@ -45,7 +45,7 @@ * * @opensearch.internal */ -public class ClusterHealthRequestBuilder extends MasterNodeReadOperationRequestBuilder< +public class ClusterHealthRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< ClusterHealthRequest, ClusterHealthResponse, ClusterHealthRequestBuilder> { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java index 09082536dfbbb..6120317dfeace 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -39,7 +39,7 @@ import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.TransportMasterNodeReadAction; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateObserver; import org.opensearch.cluster.ClusterStateUpdateTask; @@ -72,7 +72,7 @@ * * @opensearch.internal */ -public class TransportClusterHealthAction extends TransportMasterNodeReadAction { +public class TransportClusterHealthAction extends TransportClusterManagerNodeReadAction { private static final Logger logger = LogManager.getLogger(TransportClusterHealthAction.class); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java index 0f265681cd241..852ef9e2b173b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.cluster.repositories.cleanup; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequestBuilder.java index fc5365e7e836d..95c4fb372572f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequestBuilder.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.cluster.repositories.cleanup; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.MasterNodeOperationRequestBuilder; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; /** @@ -40,7 +40,7 @@ * * @opensearch.internal */ -public class CleanupRepositoryRequestBuilder extends MasterNodeOperationRequestBuilder< +public class CleanupRepositoryRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< CleanupRepositoryRequest, CleanupRepositoryResponse, CleanupRepositoryRequestBuilder> { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java index fb972136bf695..25513e6c9d7da 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java @@ -40,7 +40,7 @@ import org.opensearch.action.ActionRunnable; import org.opensearch.action.StepListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.RepositoryCleanupInProgress; @@ -85,7 +85,9 @@ * * @opensearch.internal */ -public final class TransportCleanupRepositoryAction extends TransportMasterNodeAction { +public final class TransportCleanupRepositoryAction extends TransportClusterManagerNodeAction< + CleanupRepositoryRequest, + CleanupRepositoryResponse> { private static final Logger logger = LogManager.getLogger(TransportCleanupRepositoryAction.class); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryAction.java index 2031e4f7a716f..5f17afe2abf76 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.repositories.delete; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; /** * Unregister repository action diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java index a3f4bb768c649..2e28a3fd4f41d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.repositories.delete; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java index ffef8d5b41979..f2fcb0bd8857c 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java @@ -32,8 +32,8 @@ package org.opensearch.action.admin.cluster.repositories.delete; -import org.opensearch.action.support.master.AcknowledgedRequestBuilder; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; /** diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java index 97a0463df0e41..6ce7f411e7ef4 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java @@ -34,8 +34,8 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -54,7 +54,7 @@ * * @opensearch.internal */ -public class TransportDeleteRepositoryAction extends TransportMasterNodeAction { +public class TransportDeleteRepositoryAction extends TransportClusterManagerNodeAction { private final RepositoriesService repositoriesService; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java index 9e93b7ab68dc3..d042e60638c47 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.repositories.get; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.MasterNodeReadRequest; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -47,7 +47,7 @@ * * @opensearch.internal */ -public class GetRepositoriesRequest extends MasterNodeReadRequest { +public class GetRepositoriesRequest extends ClusterManagerNodeReadRequest { private String[] repositories = Strings.EMPTY_ARRAY; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java index 2174d02c6852e..4b93aff4c25bc 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.cluster.repositories.get; -import org.opensearch.action.support.master.MasterNodeReadOperationRequestBuilder; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.util.ArrayUtils; @@ -41,7 +41,7 @@ * * @opensearch.internal */ -public class GetRepositoriesRequestBuilder extends MasterNodeReadOperationRequestBuilder< +public class GetRepositoriesRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< GetRepositoriesRequest, GetRepositoriesResponse, GetRepositoriesRequestBuilder> { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java index e7cef381a2346..6e61752c78656 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java @@ -34,7 +34,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.TransportMasterNodeReadAction; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -62,7 +62,7 @@ * * @opensearch.internal */ -public class TransportGetRepositoriesAction extends TransportMasterNodeReadAction { +public class TransportGetRepositoriesAction extends TransportClusterManagerNodeReadAction { @Inject public TransportGetRepositoriesAction( diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryAction.java index c2f90d869d873..9e56d1dfb3560 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.repositories.put; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; /** * Register repository action diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java index 1bdc8e024447d..8ab8d40936c67 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.repositories.put; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.settings.Settings; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java index 6e1b2795b6375..bcf6aeceebedd 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java @@ -32,8 +32,8 @@ package org.opensearch.action.admin.cluster.repositories.put; -import org.opensearch.action.support.master.AcknowledgedRequestBuilder; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentType; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java index 1d47dbb0fd194..1f4603ab87070 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java @@ -34,8 +34,8 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -54,7 +54,7 @@ * * @opensearch.internal */ -public class TransportPutRepositoryAction extends TransportMasterNodeAction { +public class TransportPutRepositoryAction extends TransportClusterManagerNodeAction { private final RepositoriesService repositoriesService; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java index 661e99aa1dee3..a673f34058a83 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java @@ -34,7 +34,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -54,7 +54,7 @@ * * @opensearch.internal */ -public class TransportVerifyRepositoryAction extends TransportMasterNodeAction { +public class TransportVerifyRepositoryAction extends TransportClusterManagerNodeAction { private final RepositoriesService repositoriesService; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequest.java index 001030f6a67f5..3cd28e9a05206 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.repositories.verify; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java index 85c6d4e341e72..c405fb9bc12cd 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.cluster.repositories.verify; -import org.opensearch.action.support.master.MasterNodeOperationRequestBuilder; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; /** @@ -40,7 +40,7 @@ * * @opensearch.internal */ -public class VerifyRepositoryRequestBuilder extends MasterNodeOperationRequestBuilder< +public class VerifyRepositoryRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< VerifyRepositoryRequest, VerifyRepositoryResponse, VerifyRepositoryRequestBuilder> { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequest.java index 1ca5ca1148b87..ad50b7c44aec4 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.reroute; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.cluster.routing.allocation.command.AllocationCommand; import org.opensearch.cluster.routing.allocation.command.AllocationCommands; import org.opensearch.common.io.stream.StreamInput; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java index 01d52cb43320d..30eb0a4f36b3a 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.cluster.reroute; -import org.opensearch.action.support.master.AcknowledgedRequestBuilder; +import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.cluster.routing.allocation.command.AllocationCommand; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponse.java index dcddc98bdc43a..9f0609a77b1c6 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponse.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.cluster.reroute; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.routing.allocation.RoutingExplanations; import org.opensearch.common.io.stream.StreamInput; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java index 406ff4bcd8e06..5080ce2c0fd67 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java @@ -42,7 +42,7 @@ import org.opensearch.action.admin.indices.shards.IndicesShardStoresRequest; import org.opensearch.action.admin.indices.shards.IndicesShardStoresResponse; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.AckedClusterStateUpdateTask; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; @@ -75,7 +75,7 @@ * * @opensearch.internal */ -public class TransportClusterRerouteAction extends TransportMasterNodeAction { +public class TransportClusterRerouteAction extends TransportClusterManagerNodeAction { private static final Logger logger = LogManager.getLogger(TransportClusterRerouteAction.class); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterGetSettingsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterGetSettingsRequest.java index 01aeb0f6ec988..e1d5b62c59966 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterGetSettingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterGetSettingsRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.settings; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.MasterNodeReadRequest; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; import org.opensearch.action.admin.cluster.state.ClusterStateRequest; /** @@ -42,7 +42,7 @@ * * @opensearch.internal */ -public class ClusterGetSettingsRequest extends MasterNodeReadRequest { +public class ClusterGetSettingsRequest extends ClusterManagerNodeReadRequest { private boolean includeDefaults = false; @Override diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java index f3f7db03ac67e..50ca3ee204797 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.settings; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java index 4d08c94f78b6a..2978b27d726db 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.cluster.settings; -import org.opensearch.action.support.master.AcknowledgedRequestBuilder; +import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentType; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java index a4edd1d99148a..f7a66572fb174 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.cluster.settings; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java index af5da6f538d67..0799a8bd22b45 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java @@ -38,7 +38,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.AckedClusterStateUpdateTask; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; @@ -63,7 +63,7 @@ * * @opensearch.internal */ -public class TransportClusterUpdateSettingsAction extends TransportMasterNodeAction< +public class TransportClusterUpdateSettingsAction extends TransportClusterManagerNodeAction< ClusterUpdateSettingsRequest, ClusterUpdateSettingsResponse> { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java index e79697a415f1e..3ab19cc595d98 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java @@ -35,7 +35,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.MasterNodeReadRequest; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; import org.opensearch.common.Nullable; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; @@ -49,7 +49,9 @@ * * @opensearch.internal */ -public class ClusterSearchShardsRequest extends MasterNodeReadRequest implements IndicesRequest.Replaceable { +public class ClusterSearchShardsRequest extends ClusterManagerNodeReadRequest + implements + IndicesRequest.Replaceable { private String[] indices = Strings.EMPTY_ARRAY; @Nullable diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java index 26246197cbfa8..53940e47bb0df 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.shards; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.MasterNodeReadOperationRequestBuilder; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; /** @@ -41,7 +41,7 @@ * * @opensearch.internal */ -public class ClusterSearchShardsRequestBuilder extends MasterNodeReadOperationRequestBuilder< +public class ClusterSearchShardsRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< ClusterSearchShardsRequest, ClusterSearchShardsResponse, ClusterSearchShardsRequestBuilder> { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java index 1d0173b2446dd..ae2d2aeb827ba 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java @@ -34,7 +34,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.TransportMasterNodeReadAction; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -63,7 +63,7 @@ * * @opensearch.internal */ -public class TransportClusterSearchShardsAction extends TransportMasterNodeReadAction< +public class TransportClusterSearchShardsAction extends TransportClusterManagerNodeReadAction< ClusterSearchShardsRequest, ClusterSearchShardsResponse> { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotAction.java index c6fe102544a7e..189b6aa7b7544 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.snapshots.clone; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; /** * Transport action for cloning a snapshot diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequest.java index 7044a7412058a..5ece7ac6f1c40 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequest.java @@ -35,7 +35,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.MasterNodeRequest; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -51,7 +51,10 @@ * * @opensearch.internal */ -public class CloneSnapshotRequest extends MasterNodeRequest implements IndicesRequest.Replaceable, ToXContentObject { +public class CloneSnapshotRequest extends ClusterManagerNodeRequest + implements + IndicesRequest.Replaceable, + ToXContentObject { private final String repository; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequestBuilder.java index c2dd9b2b491f2..14e87bd622cf2 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequestBuilder.java @@ -34,8 +34,8 @@ import org.opensearch.action.ActionType; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.action.support.master.MasterNodeOperationRequestBuilder; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.Strings; @@ -44,7 +44,7 @@ * * @opensearch.internal */ -public class CloneSnapshotRequestBuilder extends MasterNodeOperationRequestBuilder< +public class CloneSnapshotRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< CloneSnapshotRequest, AcknowledgedResponse, CloneSnapshotRequestBuilder> { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/TransportCloneSnapshotAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/TransportCloneSnapshotAction.java index a17d19bb870fa..c1946792f43db 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/TransportCloneSnapshotAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/TransportCloneSnapshotAction.java @@ -34,8 +34,8 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -54,7 +54,7 @@ * * @opensearch.internal */ -public final class TransportCloneSnapshotAction extends TransportMasterNodeAction { +public final class TransportCloneSnapshotAction extends TransportClusterManagerNodeAction { private final SnapshotsService snapshotsService; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java index b9d96ed2a752d..7b4a92497c41b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java @@ -37,7 +37,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.MasterNodeRequest; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.common.Strings; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.io.stream.StreamInput; @@ -78,7 +78,7 @@ * * @opensearch.internal */ -public class CreateSnapshotRequest extends MasterNodeRequest +public class CreateSnapshotRequest extends ClusterManagerNodeRequest implements IndicesRequest.Replaceable, ToXContentObject { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java index 3f74e7d24bcdb..40d440419819c 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.snapshots.create; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.MasterNodeOperationRequestBuilder; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentType; @@ -45,7 +45,7 @@ * * @opensearch.internal */ -public class CreateSnapshotRequestBuilder extends MasterNodeOperationRequestBuilder< +public class CreateSnapshotRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< CreateSnapshotRequest, CreateSnapshotResponse, CreateSnapshotRequestBuilder> { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java index f05980f2eb41f..4b28bafc258cf 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java @@ -34,7 +34,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -53,7 +53,7 @@ * * @opensearch.internal */ -public class TransportCreateSnapshotAction extends TransportMasterNodeAction { +public class TransportCreateSnapshotAction extends TransportClusterManagerNodeAction { private final SnapshotsService snapshotsService; @Inject diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotAction.java index 0b98a4b31fd53..60d9cadc0aede 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.snapshots.delete; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; /** * Delete snapshot action diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java index d446221e8e175..61c4fdb9d5c14 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.snapshots.delete; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.MasterNodeRequest; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.snapshots.SnapshotsService; @@ -50,7 +50,7 @@ * * @opensearch.internal */ -public class DeleteSnapshotRequest extends MasterNodeRequest { +public class DeleteSnapshotRequest extends ClusterManagerNodeRequest { private String repository; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java index 684d96d1aa8d9..ad41d94227da8 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java @@ -32,8 +32,8 @@ package org.opensearch.action.admin.cluster.snapshots.delete; -import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.action.support.master.MasterNodeOperationRequestBuilder; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; /** @@ -41,7 +41,7 @@ * * @opensearch.internal */ -public class DeleteSnapshotRequestBuilder extends MasterNodeOperationRequestBuilder< +public class DeleteSnapshotRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< DeleteSnapshotRequest, AcknowledgedResponse, DeleteSnapshotRequestBuilder> { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java index ad71f970edcd5..7bbd91a4b4a03 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java @@ -34,8 +34,8 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -54,7 +54,7 @@ * * @opensearch.internal */ -public class TransportDeleteSnapshotAction extends TransportMasterNodeAction { +public class TransportDeleteSnapshotAction extends TransportClusterManagerNodeAction { private final SnapshotsService snapshotsService; @Inject diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java index 822598bd7f78b..bb50cbf4316a9 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.snapshots.get; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.MasterNodeRequest; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -47,7 +47,7 @@ * * @opensearch.internal */ -public class GetSnapshotsRequest extends MasterNodeRequest { +public class GetSnapshotsRequest extends ClusterManagerNodeRequest { public static final String ALL_SNAPSHOTS = "_all"; public static final String CURRENT_SNAPSHOT = "_current"; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java index 46317a3493650..3434f1cb47a99 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.cluster.snapshots.get; -import org.opensearch.action.support.master.MasterNodeOperationRequestBuilder; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.util.ArrayUtils; @@ -41,7 +41,7 @@ * * @opensearch.internal */ -public class GetSnapshotsRequestBuilder extends MasterNodeOperationRequestBuilder< +public class GetSnapshotsRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< GetSnapshotsRequest, GetSnapshotsResponse, GetSnapshotsRequestBuilder> { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java index dab87c72c5dce..0be3f8be0bc80 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java @@ -39,7 +39,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.SnapshotsInProgress; import org.opensearch.cluster.block.ClusterBlockException; @@ -79,7 +79,7 @@ * * @opensearch.internal */ -public class TransportGetSnapshotsAction extends TransportMasterNodeAction { +public class TransportGetSnapshotsAction extends TransportClusterManagerNodeAction { private static final Logger logger = LogManager.getLogger(TransportGetSnapshotsAction.class); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java index ec3809fb52516..1b673217a248b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java @@ -35,7 +35,7 @@ import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.MasterNodeRequest; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.common.Nullable; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; @@ -64,7 +64,7 @@ * * @opensearch.internal */ -public class RestoreSnapshotRequest extends MasterNodeRequest implements ToXContentObject { +public class RestoreSnapshotRequest extends ClusterManagerNodeRequest implements ToXContentObject { private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(RestoreSnapshotRequest.class); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java index 34a6586d52917..68397851699fb 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.snapshots.restore; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.MasterNodeOperationRequestBuilder; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentType; @@ -46,7 +46,7 @@ * * @opensearch.internal */ -public class RestoreSnapshotRequestBuilder extends MasterNodeOperationRequestBuilder< +public class RestoreSnapshotRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< RestoreSnapshotRequest, RestoreSnapshotResponse, RestoreSnapshotRequestBuilder> { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java index 2deed9f2dc93b..fa7c0c6efa469 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java @@ -34,7 +34,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -53,7 +53,7 @@ * * @opensearch.internal */ -public class TransportRestoreSnapshotAction extends TransportMasterNodeAction { +public class TransportRestoreSnapshotAction extends TransportClusterManagerNodeAction { private final RestoreService restoreService; @Inject diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequest.java index d5c7c63b0db43..7afa7a25c7c0e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.snapshots.status; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.MasterNodeRequest; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -47,7 +47,7 @@ * * @opensearch.internal */ -public class SnapshotsStatusRequest extends MasterNodeRequest { +public class SnapshotsStatusRequest extends ClusterManagerNodeRequest { private String repository = "_all"; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java index 3e281ce8059d1..55f156d4a470e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.cluster.snapshots.status; -import org.opensearch.action.support.master.MasterNodeOperationRequestBuilder; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.util.ArrayUtils; @@ -41,7 +41,7 @@ * * @opensearch.internal */ -public class SnapshotsStatusRequestBuilder extends MasterNodeOperationRequestBuilder< +public class SnapshotsStatusRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< SnapshotsStatusRequest, SnapshotsStatusResponse, SnapshotsStatusRequestBuilder> { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index dec169a6633cf..31b19848f59f4 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -40,7 +40,7 @@ import org.opensearch.action.ActionRunnable; import org.opensearch.action.StepListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.SnapshotsInProgress; import org.opensearch.cluster.block.ClusterBlockException; @@ -89,7 +89,7 @@ * * @opensearch.internal */ -public class TransportSnapshotsStatusAction extends TransportMasterNodeAction { +public class TransportSnapshotsStatusAction extends TransportClusterManagerNodeAction { private static final Logger logger = LogManager.getLogger(TransportSnapshotsStatusAction.class); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateRequest.java index bf2204a9f8e15..1c6afaf08a89c 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateRequest.java @@ -35,7 +35,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.MasterNodeReadRequest; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -48,7 +48,7 @@ * * @opensearch.internal */ -public class ClusterStateRequest extends MasterNodeReadRequest implements IndicesRequest.Replaceable { +public class ClusterStateRequest extends ClusterManagerNodeReadRequest implements IndicesRequest.Replaceable { public static final TimeValue DEFAULT_WAIT_FOR_NODE_TIMEOUT = TimeValue.timeValueMinutes(1); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateRequestBuilder.java index 530d99f5db808..b9bfeca9f7386 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateRequestBuilder.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.state; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.MasterNodeReadOperationRequestBuilder; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.unit.TimeValue; @@ -42,7 +42,7 @@ * * @opensearch.internal */ -public class ClusterStateRequestBuilder extends MasterNodeReadOperationRequestBuilder< +public class ClusterStateRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< ClusterStateRequest, ClusterStateResponse, ClusterStateRequestBuilder> { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java index 885769dd200cf..673153c40bf46 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -37,7 +37,7 @@ import org.apache.logging.log4j.Logger; import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.TransportMasterNodeReadAction; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateObserver; import org.opensearch.cluster.NotMasterException; @@ -63,7 +63,7 @@ * * @opensearch.internal */ -public class TransportClusterStateAction extends TransportMasterNodeReadAction { +public class TransportClusterStateAction extends TransportClusterManagerNodeReadAction { private final Logger logger = LogManager.getLogger(getClass()); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptAction.java index 3645ef21d2e12..483004a3365c5 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.storedscripts; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; /** * Transport action for deleting stored scripts diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequest.java index 93d2c3ba3c452..a23f2fea698fd 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.storedscripts; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java index 34e0d429f2098..c77ebfa85422f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java @@ -32,8 +32,8 @@ package org.opensearch.action.admin.cluster.storedscripts; -import org.opensearch.action.support.master.AcknowledgedRequestBuilder; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; /** diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptRequest.java index 2a51bd9ad3eef..bdff2e27b0f2d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.storedscripts; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.MasterNodeReadRequest; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -46,7 +46,7 @@ * * @opensearch.internal */ -public class GetStoredScriptRequest extends MasterNodeReadRequest { +public class GetStoredScriptRequest extends ClusterManagerNodeReadRequest { protected String id; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptRequestBuilder.java index 3c8b74c240f29..ae969963be62f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/GetStoredScriptRequestBuilder.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.cluster.storedscripts; -import org.opensearch.action.support.master.MasterNodeReadOperationRequestBuilder; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; /** @@ -40,7 +40,7 @@ * * @opensearch.internal */ -public class GetStoredScriptRequestBuilder extends MasterNodeReadOperationRequestBuilder< +public class GetStoredScriptRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< GetStoredScriptRequest, GetStoredScriptResponse, GetStoredScriptRequestBuilder> { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptAction.java index 2845d895a69e8..cc571c2f26136 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.storedscripts; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; /** * Transport action for putting stored script diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java index 2bddf2823f962..8b9eb83bb531c 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.storedscripts; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java index ef3c14df29627..b829cc3466f70 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java @@ -32,8 +32,8 @@ package org.opensearch.action.admin.cluster.storedscripts; -import org.opensearch.action.support.master.AcknowledgedRequestBuilder; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.xcontent.XContentType; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportDeleteStoredScriptAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportDeleteStoredScriptAction.java index 1550af534e5bf..60990e14e1a57 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportDeleteStoredScriptAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportDeleteStoredScriptAction.java @@ -34,8 +34,8 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -54,7 +54,7 @@ * * @opensearch.internal */ -public class TransportDeleteStoredScriptAction extends TransportMasterNodeAction { +public class TransportDeleteStoredScriptAction extends TransportClusterManagerNodeAction { private final ScriptService scriptService; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportGetStoredScriptAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportGetStoredScriptAction.java index b3f5890de40b9..d2d5a49fcde23 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportGetStoredScriptAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportGetStoredScriptAction.java @@ -34,7 +34,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.TransportMasterNodeReadAction; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -53,7 +53,7 @@ * * @opensearch.internal */ -public class TransportGetStoredScriptAction extends TransportMasterNodeReadAction { +public class TransportGetStoredScriptAction extends TransportClusterManagerNodeReadAction { private final ScriptService scriptService; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportPutStoredScriptAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportPutStoredScriptAction.java index fa0e97d4c02f1..c8ae0c213b3dc 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportPutStoredScriptAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/TransportPutStoredScriptAction.java @@ -34,8 +34,8 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -54,7 +54,7 @@ * * @opensearch.internal */ -public class TransportPutStoredScriptAction extends TransportMasterNodeAction { +public class TransportPutStoredScriptAction extends TransportClusterManagerNodeAction { private final ScriptService scriptService; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksRequest.java index 27f5e3bc9b991..ec9a830abf1b8 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.tasks; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.MasterNodeReadRequest; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; import org.opensearch.common.io.stream.StreamInput; import java.io.IOException; @@ -43,7 +43,7 @@ * * @opensearch.internal */ -public class PendingClusterTasksRequest extends MasterNodeReadRequest { +public class PendingClusterTasksRequest extends ClusterManagerNodeReadRequest { public PendingClusterTasksRequest() {} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java index 08afa81a8194d..b5e77f291a701 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.cluster.tasks; -import org.opensearch.action.support.master.MasterNodeReadOperationRequestBuilder; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; /** @@ -40,7 +40,7 @@ * * @opensearch.internal */ -public class PendingClusterTasksRequestBuilder extends MasterNodeReadOperationRequestBuilder< +public class PendingClusterTasksRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< PendingClusterTasksRequest, PendingClusterTasksResponse, PendingClusterTasksRequestBuilder> { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java index 13a805e1e49f0..8962f0395cc6f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java @@ -36,7 +36,7 @@ import org.apache.logging.log4j.Logger; import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.TransportMasterNodeReadAction; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; @@ -55,7 +55,7 @@ * * @opensearch.internal */ -public class TransportPendingClusterTasksAction extends TransportMasterNodeReadAction< +public class TransportPendingClusterTasksAction extends TransportClusterManagerNodeReadAction< PendingClusterTasksRequest, PendingClusterTasksResponse> { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesAction.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesAction.java index 4d735e984c34e..9ce10c2853ff6 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.alias; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; /** * Transport action for listing index aliases diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java index 62f51aa3f3bff..0119b892dadf8 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java @@ -37,7 +37,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.AliasesRequest; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.cluster.metadata.AliasAction; import org.opensearch.common.ParseField; import org.opensearch.common.ParsingException; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java index 13c57cc781925..ebc1fc9e9e2ce 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java @@ -32,8 +32,8 @@ package org.opensearch.action.admin.indices.alias; -import org.opensearch.action.support.master.AcknowledgedRequestBuilder; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; import org.opensearch.index.query.QueryBuilder; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/TransportIndicesAliasesAction.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/TransportIndicesAliasesAction.java index 2e5cf23360125..90bc246fe34e7 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/TransportIndicesAliasesAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/TransportIndicesAliasesAction.java @@ -38,8 +38,8 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.RequestValidators; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ack.ClusterStateUpdateResponse; import org.opensearch.cluster.block.ClusterBlockException; @@ -75,7 +75,7 @@ * * @opensearch.internal */ -public class TransportIndicesAliasesAction extends TransportMasterNodeAction { +public class TransportIndicesAliasesAction extends TransportClusterManagerNodeAction { private static final Logger logger = LogManager.getLogger(TransportIndicesAliasesAction.class); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java index 82f9d9a35dd2c..a21b8e97a6d6e 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/BaseAliasesRequestBuilder.java @@ -35,7 +35,7 @@ import org.opensearch.action.ActionType; import org.opensearch.action.ActionResponse; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.MasterNodeReadOperationRequestBuilder; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.util.ArrayUtils; @@ -46,7 +46,7 @@ */ public abstract class BaseAliasesRequestBuilder< Response extends ActionResponse, - Builder extends BaseAliasesRequestBuilder> extends MasterNodeReadOperationRequestBuilder< + Builder extends BaseAliasesRequestBuilder> extends ClusterManagerNodeReadOperationRequestBuilder< GetAliasesRequest, Response, Builder> { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesRequest.java index 46f2ee8765910..81dd9a6045cf5 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/GetAliasesRequest.java @@ -34,7 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.AliasesRequest; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.MasterNodeReadRequest; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -46,7 +46,7 @@ * * @opensearch.internal */ -public class GetAliasesRequest extends MasterNodeReadRequest implements AliasesRequest { +public class GetAliasesRequest extends ClusterManagerNodeReadRequest implements AliasesRequest { private String[] indices = Strings.EMPTY_ARRAY; private String[] aliases = Strings.EMPTY_ARRAY; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/TransportGetAliasesAction.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/TransportGetAliasesAction.java index 1996b11901c3a..a2f975ff9cbbc 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/get/TransportGetAliasesAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/get/TransportGetAliasesAction.java @@ -33,7 +33,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.TransportMasterNodeReadAction; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -64,7 +64,7 @@ * * @opensearch.internal */ -public class TransportGetAliasesAction extends TransportMasterNodeReadAction { +public class TransportGetAliasesAction extends TransportClusterManagerNodeReadAction { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(TransportGetAliasesAction.class); private final SystemIndices systemIndices; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java index b16cabfda4d67..529767a00af82 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java @@ -37,7 +37,7 @@ import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.util.CollectionUtils; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequestBuilder.java index b3b53a0043c70..15307c821178c 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequestBuilder.java @@ -34,7 +34,7 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedRequestBuilder; +import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; /** diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java index 1fc9017359a8c..4206b4e9e0926 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java @@ -34,7 +34,7 @@ import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.action.support.DefaultShardOperationFailedException; -import org.opensearch.action.support.master.ShardsAcknowledgedResponse; +import org.opensearch.action.support.clustermanager.ShardsAcknowledgedResponse; import org.opensearch.common.Nullable; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/TransportCloseIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/close/TransportCloseIndexAction.java index 0084977d0fdf0..5f3ed38a05228 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/TransportCloseIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/TransportCloseIndexAction.java @@ -38,7 +38,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.DestructiveOperations; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -64,7 +64,7 @@ * * @opensearch.internal */ -public class TransportCloseIndexAction extends TransportMasterNodeAction { +public class TransportCloseIndexAction extends TransportClusterManagerNodeAction { private static final Logger logger = LogManager.getLogger(TransportCloseIndexAction.class); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/AutoCreateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/create/AutoCreateAction.java index 6b510291f1ccb..b931ab4a924ed 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/AutoCreateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/AutoCreateAction.java @@ -36,7 +36,7 @@ import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.ActiveShardsObserver; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.AckedClusterStateUpdateTask; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ack.ClusterStateUpdateResponse; @@ -79,7 +79,7 @@ private AutoCreateAction() { * * @opensearch.internal */ - public static final class TransportAction extends TransportMasterNodeAction { + public static final class TransportAction extends TransportClusterManagerNodeAction { private final ActiveShardsObserver activeShardsObserver; private final MetadataCreateIndexService createIndexService; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java index 95837d82be7ac..28db8dad69084 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java @@ -42,7 +42,7 @@ import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.common.ParseField; import org.opensearch.common.Strings; import org.opensearch.common.bytes.BytesArray; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequestBuilder.java index 4c5780b87b3f2..2de77681aa127 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequestBuilder.java @@ -34,7 +34,7 @@ import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.support.ActiveShardCount; -import org.opensearch.action.support.master.AcknowledgedRequestBuilder; +import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.settings.Settings; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexResponse.java index 871576d8e336a..fca1f7cce71d9 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexResponse.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.indices.create; -import org.opensearch.action.support.master.ShardsAcknowledgedResponse; +import org.opensearch.action.support.clustermanager.ShardsAcknowledgedResponse; import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/TransportCreateIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/create/TransportCreateIndexAction.java index 859a9d6b21bd3..8b2a62304b71c 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/TransportCreateIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/TransportCreateIndexAction.java @@ -34,7 +34,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -53,7 +53,7 @@ * * @opensearch.internal */ -public class TransportCreateIndexAction extends TransportMasterNodeAction { +public class TransportCreateIndexAction extends TransportClusterManagerNodeAction { private final MetadataCreateIndexService createIndexService; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexAction.java index 6559ef4cd89bd..2ccc422f2edd6 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.dangling.delete; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; /** * This action causes a dangling index to be considered as deleted by the cluster. diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexRequest.java index 4fad5498de375..3ded069dd6d89 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.dangling.delete; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/TransportDeleteDanglingIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/TransportDeleteDanglingIndexAction.java index 495e8cb1fcac8..df3c5c4ff99ac 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/TransportDeleteDanglingIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/TransportDeleteDanglingIndexAction.java @@ -44,8 +44,8 @@ import org.opensearch.action.admin.indices.dangling.list.ListDanglingIndicesResponse; import org.opensearch.action.admin.indices.dangling.list.NodeListDanglingIndicesResponse; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.AckedClusterStateUpdateTask; import org.opensearch.cluster.ClusterState; @@ -73,7 +73,9 @@ * * @opensearch.internal */ -public class TransportDeleteDanglingIndexAction extends TransportMasterNodeAction { +public class TransportDeleteDanglingIndexAction extends TransportClusterManagerNodeAction< + DeleteDanglingIndexRequest, + AcknowledgedResponse> { private static final Logger logger = LogManager.getLogger(TransportDeleteDanglingIndexAction.class); private final Settings settings; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexAction.java index 5f7a096b1d749..308720aa6139f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.dangling.import_index; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; /** * Represents a request to import a particular dangling index. diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexRequest.java index 73fbad248b8b1..0b442e33f1e21 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.dangling.import_index; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/TransportImportDanglingIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/TransportImportDanglingIndexAction.java index 2010515249371..1b6102cbbc2fd 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/TransportImportDanglingIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/TransportImportDanglingIndexAction.java @@ -50,7 +50,7 @@ import org.opensearch.action.admin.indices.dangling.find.NodeFindDanglingIndexResponse; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.HandledTransportAction; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.inject.Inject; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/datastream/CreateDataStreamAction.java b/server/src/main/java/org/opensearch/action/admin/indices/datastream/CreateDataStreamAction.java index 955f1f9475360..c5c37e06137d2 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/datastream/CreateDataStreamAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/datastream/CreateDataStreamAction.java @@ -38,9 +38,9 @@ import org.opensearch.action.ValidateActions; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -134,7 +134,7 @@ public IndicesOptions indicesOptions() { * * @opensearch.internal */ - public static class TransportAction extends TransportMasterNodeAction { + public static class TransportAction extends TransportClusterManagerNodeAction { private final MetadataCreateDataStreamService metadataCreateDataStreamService; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/datastream/DeleteDataStreamAction.java b/server/src/main/java/org/opensearch/action/admin/indices/datastream/DeleteDataStreamAction.java index 95ac25c47e842..1b3485ad65203 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/datastream/DeleteDataStreamAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/datastream/DeleteDataStreamAction.java @@ -39,9 +39,9 @@ import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.action.support.master.MasterNodeRequest; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.block.ClusterBlockException; @@ -94,7 +94,7 @@ private DeleteDataStreamAction() { * * @opensearch.internal */ - public static class Request extends MasterNodeRequest implements IndicesRequest.Replaceable { + public static class Request extends ClusterManagerNodeRequest implements IndicesRequest.Replaceable { private String[] names; @@ -164,7 +164,7 @@ public IndicesRequest indices(String... indices) { * * @opensearch.internal */ - public static class TransportAction extends TransportMasterNodeAction { + public static class TransportAction extends TransportClusterManagerNodeAction { private final MetadataDeleteIndexService deleteIndexService; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/datastream/GetDataStreamAction.java b/server/src/main/java/org/opensearch/action/admin/indices/datastream/GetDataStreamAction.java index c8270424e42df..6140d10bd293c 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/datastream/GetDataStreamAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/datastream/GetDataStreamAction.java @@ -40,8 +40,8 @@ import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.MasterNodeReadRequest; -import org.opensearch.action.support.master.TransportMasterNodeReadAction; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; import org.opensearch.cluster.AbstractDiffable; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; @@ -92,7 +92,7 @@ private GetDataStreamAction() { * * @opensearch.internal */ - public static class Request extends MasterNodeReadRequest implements IndicesRequest.Replaceable { + public static class Request extends ClusterManagerNodeReadRequest implements IndicesRequest.Replaceable { private String[] names; @@ -287,7 +287,7 @@ public int hashCode() { * * @opensearch.internal */ - public static class TransportAction extends TransportMasterNodeReadAction { + public static class TransportAction extends TransportClusterManagerNodeReadAction { private static final Logger logger = LogManager.getLogger(TransportAction.class); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexAction.java index 696c1244c7504..a3aa9e751a8ec 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.delete; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; /** * Transport action for deleting an index diff --git a/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequest.java index 7475121a910c4..b8100502a2e0a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequest.java @@ -35,7 +35,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.util.CollectionUtils; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java index 33f6342e94139..a1cee63875a77 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java @@ -33,8 +33,8 @@ package org.opensearch.action.admin.indices.delete; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedRequestBuilder; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; /** diff --git a/server/src/main/java/org/opensearch/action/admin/indices/delete/TransportDeleteIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/delete/TransportDeleteIndexAction.java index 70cb6d8115f15..0cc1a164603f6 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/delete/TransportDeleteIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/delete/TransportDeleteIndexAction.java @@ -38,8 +38,8 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.DestructiveOperations; -import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ack.ClusterStateUpdateResponse; import org.opensearch.cluster.block.ClusterBlockException; @@ -63,7 +63,7 @@ * * @opensearch.internal */ -public class TransportDeleteIndexAction extends TransportMasterNodeAction { +public class TransportDeleteIndexAction extends TransportClusterManagerNodeAction { private static final Logger logger = LogManager.getLogger(TransportDeleteIndexAction.class); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsRequest.java index 89c237a990dc8..0b83478933cd4 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsRequest.java @@ -35,7 +35,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.MasterNodeReadRequest; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -49,7 +49,7 @@ * * @opensearch.internal */ -public class IndicesExistsRequest extends MasterNodeReadRequest implements IndicesRequest.Replaceable { +public class IndicesExistsRequest extends ClusterManagerNodeReadRequest implements IndicesRequest.Replaceable { private String[] indices = Strings.EMPTY_ARRAY; private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, true); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsRequestBuilder.java index 9b83d2b29302c..8459bbd8b874e 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/IndicesExistsRequestBuilder.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.indices.exists.indices; -import org.opensearch.action.support.master.MasterNodeReadOperationRequestBuilder; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; /** @@ -40,7 +40,7 @@ * * @opensearch.internal */ -public class IndicesExistsRequestBuilder extends MasterNodeReadOperationRequestBuilder< +public class IndicesExistsRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< IndicesExistsRequest, IndicesExistsResponse, IndicesExistsRequestBuilder> { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java index 49ab15dadb19a..a7f73a203f4c5 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/exists/indices/TransportIndicesExistsAction.java @@ -35,7 +35,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.TransportMasterNodeReadAction; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -54,7 +54,7 @@ * * @opensearch.internal */ -public class TransportIndicesExistsAction extends TransportMasterNodeReadAction { +public class TransportIndicesExistsAction extends TransportClusterManagerNodeReadAction { @Inject public TransportIndicesExistsAction( diff --git a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequest.java index 9a7fae9f84a98..ee0b204c77aa3 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.get; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.info.ClusterInfoRequest; +import org.opensearch.action.support.clustermanager.info.ClusterInfoRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.util.ArrayUtils; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequestBuilder.java index 3019191e5570e..ed106c44ea36a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/get/GetIndexRequestBuilder.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.indices.get; -import org.opensearch.action.support.master.info.ClusterInfoRequestBuilder; +import org.opensearch.action.support.clustermanager.info.ClusterInfoRequestBuilder; import org.opensearch.client.OpenSearchClient; /** diff --git a/server/src/main/java/org/opensearch/action/admin/indices/get/TransportGetIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/get/TransportGetIndexAction.java index 493d3354a1b70..0142e70d18221 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/get/TransportGetIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/get/TransportGetIndexAction.java @@ -34,7 +34,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.info.TransportClusterInfoAction; +import org.opensearch.action.support.clustermanager.info.TransportClusterInfoAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.AliasMetadata; import org.opensearch.cluster.metadata.IndexMetadata; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequest.java index 2c9bec8398b66..1fd9323edd2f8 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.mapping.get; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.info.ClusterInfoRequest; +import org.opensearch.action.support.clustermanager.info.ClusterInfoRequest; import org.opensearch.common.io.stream.StreamInput; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java index 85bf8c2ffd9c6..0a6d7cac79133 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/GetMappingsRequestBuilder.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.indices.mapping.get; -import org.opensearch.action.support.master.info.ClusterInfoRequestBuilder; +import org.opensearch.action.support.clustermanager.info.ClusterInfoRequestBuilder; import org.opensearch.client.OpenSearchClient; /** diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java index 1edbba547a917..71438ad300e0c 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java @@ -36,7 +36,7 @@ import org.apache.logging.log4j.Logger; import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.info.TransportClusterInfoAction; +import org.opensearch.action.support.clustermanager.info.TransportClusterInfoAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; import org.opensearch.cluster.metadata.MappingMetadata; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/AutoPutMappingAction.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/AutoPutMappingAction.java index f2430eb54db9b..6f0cad2fe178d 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/AutoPutMappingAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/AutoPutMappingAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.mapping.put; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; /** * Transport action to automatically put field mappings. diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingAction.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingAction.java index 8bca1b59ee2e2..9088d1241ad2a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.mapping.put; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; /** * Transport action to put field mappings. diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java index 85fd74f0762a5..a02dd620b8661 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -39,8 +39,8 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.common.Strings; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.bytes.BytesReference; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java index 78115e1fab4ec..f0e0876dbf877 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java @@ -33,8 +33,8 @@ package org.opensearch.action.admin.indices.mapping.put; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedRequestBuilder; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentType; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportAutoPutMappingAction.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportAutoPutMappingAction.java index 5252fd24fd2fa..e42a6841867ea 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportAutoPutMappingAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportAutoPutMappingAction.java @@ -33,8 +33,8 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -55,7 +55,7 @@ * * @opensearch.internal */ -public class TransportAutoPutMappingAction extends TransportMasterNodeAction { +public class TransportAutoPutMappingAction extends TransportClusterManagerNodeAction { private final MetadataMappingService metadataMappingService; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportPutMappingAction.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportPutMappingAction.java index ec5a92ada4454..33385c421722c 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportPutMappingAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/TransportPutMappingAction.java @@ -38,8 +38,8 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.RequestValidators; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ack.ClusterStateUpdateResponse; import org.opensearch.cluster.block.ClusterBlockException; @@ -66,7 +66,7 @@ * * @opensearch.internal */ -public class TransportPutMappingAction extends TransportMasterNodeAction { +public class TransportPutMappingAction extends TransportClusterManagerNodeAction { private static final Logger logger = LogManager.getLogger(TransportPutMappingAction.class); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequest.java index c6c1c2dc8f0cb..21c5fcd6ed1c2 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequest.java @@ -36,7 +36,7 @@ import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.util.CollectionUtils; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequestBuilder.java index bf09c3f173491..2760fb43a727f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequestBuilder.java @@ -34,7 +34,7 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedRequestBuilder; +import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; /** diff --git a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexResponse.java index f7bd4cf31aa17..38ec7226d3c68 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexResponse.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.indices.open; -import org.opensearch.action.support.master.ShardsAcknowledgedResponse; +import org.opensearch.action.support.clustermanager.ShardsAcknowledgedResponse; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.xcontent.ConstructingObjectParser; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/open/TransportOpenIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/open/TransportOpenIndexAction.java index 6cd3c0682e851..aa17027aa3e6a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/open/TransportOpenIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/open/TransportOpenIndexAction.java @@ -38,7 +38,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.DestructiveOperations; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ack.OpenIndexClusterStateUpdateResponse; import org.opensearch.cluster.block.ClusterBlockException; @@ -60,7 +60,7 @@ * * @opensearch.internal */ -public class TransportOpenIndexAction extends TransportMasterNodeAction { +public class TransportOpenIndexAction extends TransportClusterManagerNodeAction { private static final Logger logger = LogManager.getLogger(TransportOpenIndexAction.class); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequest.java index 7d208b5e0ac77..7715480fcaca5 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequest.java @@ -35,7 +35,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.cluster.metadata.IndexMetadata.APIBlock; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequestBuilder.java index 8322ba19f433e..66ff659c6a90a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequestBuilder.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.readonly; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedRequestBuilder; +import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.cluster.metadata.IndexMetadata.APIBlock; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockResponse.java index 6a07a645f9315..22b12d195b9c3 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockResponse.java @@ -33,7 +33,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.action.support.DefaultShardOperationFailedException; -import org.opensearch.action.support.master.ShardsAcknowledgedResponse; +import org.opensearch.action.support.clustermanager.ShardsAcknowledgedResponse; import org.opensearch.common.Nullable; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/readonly/TransportAddIndexBlockAction.java b/server/src/main/java/org/opensearch/action/admin/indices/readonly/TransportAddIndexBlockAction.java index 80af0a2c2dcc9..b505ba5927f66 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/readonly/TransportAddIndexBlockAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/readonly/TransportAddIndexBlockAction.java @@ -38,7 +38,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.DestructiveOperations; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -65,7 +65,7 @@ * * @opensearch.internal */ -public class TransportAddIndexBlockAction extends TransportMasterNodeAction { +public class TransportAddIndexBlockAction extends TransportClusterManagerNodeAction { private static final Logger logger = LogManager.getLogger(TransportAddIndexBlockAction.class); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java index db5dd0af6ab2a..3216fc9ce0b71 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java @@ -36,7 +36,7 @@ import org.opensearch.action.admin.indices.create.CreateIndexRequest; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequestBuilder.java index bec084450b389..ed598c14acec3 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequestBuilder.java @@ -33,7 +33,7 @@ import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.support.ActiveShardCount; -import org.opensearch.action.support.master.MasterNodeOperationRequestBuilder; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.ByteSizeValue; @@ -44,7 +44,10 @@ * * @opensearch.internal */ -public class RolloverRequestBuilder extends MasterNodeOperationRequestBuilder { +public class RolloverRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< + RolloverRequest, + RolloverResponse, + RolloverRequestBuilder> { public RolloverRequestBuilder(OpenSearchClient client, RolloverAction action) { super(client, action, new RolloverRequest()); } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java index 330d258f9461f..ed08595f55cea 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.indices.rollover; -import org.opensearch.action.support.master.ShardsAcknowledgedResponse; +import org.opensearch.action.support.clustermanager.ShardsAcknowledgedResponse; import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/TransportRolloverAction.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/TransportRolloverAction.java index 1a2f4be522e2b..8ab8061039aa9 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -40,7 +40,7 @@ import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.ActiveShardsObserver; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.client.Client; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; @@ -72,7 +72,7 @@ * * @opensearch.internal */ -public class TransportRolloverAction extends TransportMasterNodeAction { +public class TransportRolloverAction extends TransportClusterManagerNodeAction { private final MetadataRolloverService rolloverService; private final ActiveShardsObserver activeShardsObserver; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequest.java index bf68a66d24c5a..b917e6734e932 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequest.java @@ -36,7 +36,7 @@ import org.opensearch.action.IndicesRequest; import org.opensearch.action.ValidateActions; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.MasterNodeReadRequest; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -50,7 +50,7 @@ * * @opensearch.internal */ -public class GetSettingsRequest extends MasterNodeReadRequest implements IndicesRequest.Replaceable { +public class GetSettingsRequest extends ClusterManagerNodeReadRequest implements IndicesRequest.Replaceable { private String[] indices = Strings.EMPTY_ARRAY; private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, true, true, true); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequestBuilder.java index d978ffd90386a..84cd4e8682e93 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/get/GetSettingsRequestBuilder.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.settings.get; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.MasterNodeReadOperationRequestBuilder; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.util.ArrayUtils; @@ -42,7 +42,7 @@ * * @opensearch.internal */ -public class GetSettingsRequestBuilder extends MasterNodeReadOperationRequestBuilder< +public class GetSettingsRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< GetSettingsRequest, GetSettingsResponse, GetSettingsRequestBuilder> { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/get/TransportGetSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/get/TransportGetSettingsAction.java index 4f55cf3c4b5ca..000d6d70d7af7 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/get/TransportGetSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/get/TransportGetSettingsAction.java @@ -34,7 +34,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.TransportMasterNodeReadAction; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -60,7 +60,7 @@ * * @opensearch.internal */ -public class TransportGetSettingsAction extends TransportMasterNodeReadAction { +public class TransportGetSettingsAction extends TransportClusterManagerNodeReadAction { private final SettingsFilter settingsFilter; private final IndexScopedSettings indexScopedSettings; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java index 52ce7dffbad80..4b6dd3a28c3bf 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java @@ -37,8 +37,8 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ack.ClusterStateUpdateResponse; import org.opensearch.cluster.block.ClusterBlockException; @@ -60,7 +60,7 @@ * * @opensearch.internal */ -public class TransportUpdateSettingsAction extends TransportMasterNodeAction { +public class TransportUpdateSettingsAction extends TransportClusterManagerNodeAction { private static final Logger logger = LogManager.getLogger(TransportUpdateSettingsAction.class); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsAction.java index 2333a2aad6bc6..aa26acb7e3fc5 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.settings.put; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; /** * Action for updating index settings diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequest.java index 2b0452301a5f5..fd6aac7696013 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequest.java @@ -35,7 +35,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java index 7501f0c7798de..459b16c2a9b7e 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java @@ -33,8 +33,8 @@ package org.opensearch.action.admin.indices.settings.put; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedRequestBuilder; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentType; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java index 6eec8ccb63d20..bc9633f2bd2db 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoreRequestBuilder.java @@ -34,7 +34,7 @@ import org.opensearch.action.ActionType; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.MasterNodeReadOperationRequestBuilder; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.cluster.health.ClusterHealthStatus; @@ -43,7 +43,7 @@ * * @opensearch.internal */ -public class IndicesShardStoreRequestBuilder extends MasterNodeReadOperationRequestBuilder< +public class IndicesShardStoreRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< IndicesShardStoresRequest, IndicesShardStoresResponse, IndicesShardStoreRequestBuilder> { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresRequest.java index 0b66e314731d1..e73addbc56ce0 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresRequest.java @@ -34,7 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.MasterNodeReadRequest; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; @@ -48,7 +48,9 @@ * * @opensearch.internal */ -public class IndicesShardStoresRequest extends MasterNodeReadRequest implements IndicesRequest.Replaceable { +public class IndicesShardStoresRequest extends ClusterManagerNodeReadRequest + implements + IndicesRequest.Replaceable { private String[] indices = Strings.EMPTY_ARRAY; private IndicesOptions indicesOptions = IndicesOptions.strictExpand(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java b/server/src/main/java/org/opensearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java index 077bee78021c3..b2f2c9e5d03a3 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shards/TransportIndicesShardStoresAction.java @@ -37,7 +37,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.TransportMasterNodeReadAction; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -81,7 +81,7 @@ * * @opensearch.internal */ -public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAction< +public class TransportIndicesShardStoresAction extends TransportClusterManagerNodeReadAction< IndicesShardStoresRequest, IndicesShardStoresResponse> { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java index 50784e60a3f19..969263df5621a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java @@ -38,7 +38,7 @@ import org.opensearch.action.admin.indices.create.CreateIndexRequest; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequestBuilder.java index 418e83a5431ec..0dcaf1c524df5 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequestBuilder.java @@ -34,7 +34,7 @@ import org.opensearch.action.ActionType; import org.opensearch.action.admin.indices.create.CreateIndexRequest; import org.opensearch.action.support.ActiveShardCount; -import org.opensearch.action.support.master.AcknowledgedRequestBuilder; +import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.settings.Settings; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java index 24c5466c2ba0b..7ebcafcd5549d 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java @@ -38,7 +38,7 @@ import org.opensearch.action.admin.indices.create.CreateIndexRequest; import org.opensearch.action.admin.indices.stats.IndexShardStats; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.client.Client; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; @@ -69,7 +69,7 @@ * * @opensearch.internal */ -public class TransportResizeAction extends TransportMasterNodeAction { +public class TransportResizeAction extends TransportClusterManagerNodeAction { private final MetadataCreateIndexService createIndexService; private final Client client; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteComponentTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteComponentTemplateAction.java index 70c5a42f6e7ae..78cd4b7bc19c1 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteComponentTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteComponentTemplateAction.java @@ -34,8 +34,8 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.action.support.master.MasterNodeRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -62,7 +62,7 @@ private DeleteComponentTemplateAction() { * * @opensearch.internal */ - public static class Request extends MasterNodeRequest { + public static class Request extends ClusterManagerNodeRequest { private String name; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteComposableIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteComposableIndexTemplateAction.java index ae1ca59d2ed56..388e3d8f80748 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteComposableIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteComposableIndexTemplateAction.java @@ -34,8 +34,8 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.action.support.master.MasterNodeRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -63,7 +63,7 @@ private DeleteComposableIndexTemplateAction() { * * @opensearch.internal */ - public static class Request extends MasterNodeRequest { + public static class Request extends ClusterManagerNodeRequest { private String name; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateAction.java index 789d03f8e8d8c..5773fcf93c49e 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.template.delete; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; /** * Transport action for deleting an index template diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequest.java index e3a92107670ff..cf8c2762990d1 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequest.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.indices.template.delete; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.MasterNodeRequest; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -45,7 +45,7 @@ * * @opensearch.internal */ -public class DeleteIndexTemplateRequest extends MasterNodeRequest { +public class DeleteIndexTemplateRequest extends ClusterManagerNodeRequest { private String name; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java index 2fc958ba93c6b..8f272a98d57a0 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateRequestBuilder.java @@ -31,8 +31,8 @@ package org.opensearch.action.admin.indices.template.delete; -import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.action.support.master.MasterNodeOperationRequestBuilder; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; /** @@ -40,7 +40,7 @@ * * @opensearch.internal */ -public class DeleteIndexTemplateRequestBuilder extends MasterNodeOperationRequestBuilder< +public class DeleteIndexTemplateRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< DeleteIndexTemplateRequest, AcknowledgedResponse, DeleteIndexTemplateRequestBuilder> { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteComponentTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteComponentTemplateAction.java index 491b3e520ee51..cf481480d6806 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteComponentTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteComponentTemplateAction.java @@ -36,8 +36,8 @@ import org.apache.logging.log4j.Logger; import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -56,7 +56,7 @@ * * @opensearch.internal */ -public class TransportDeleteComponentTemplateAction extends TransportMasterNodeAction< +public class TransportDeleteComponentTemplateAction extends TransportClusterManagerNodeAction< DeleteComponentTemplateAction.Request, AcknowledgedResponse> { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteComposableIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteComposableIndexTemplateAction.java index 4e7f3f292d810..44a30189d8252 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteComposableIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteComposableIndexTemplateAction.java @@ -36,8 +36,8 @@ import org.apache.logging.log4j.Logger; import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -56,7 +56,7 @@ * * @opensearch.internal */ -public class TransportDeleteComposableIndexTemplateAction extends TransportMasterNodeAction< +public class TransportDeleteComposableIndexTemplateAction extends TransportClusterManagerNodeAction< DeleteComposableIndexTemplateAction.Request, AcknowledgedResponse> { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java index 89668fd0ed164..08fc6e4f17d5c 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java @@ -36,8 +36,8 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -56,7 +56,9 @@ * * @opensearch.internal */ -public class TransportDeleteIndexTemplateAction extends TransportMasterNodeAction { +public class TransportDeleteIndexTemplateAction extends TransportClusterManagerNodeAction< + DeleteIndexTemplateRequest, + AcknowledgedResponse> { private static final Logger logger = LogManager.getLogger(TransportDeleteIndexTemplateAction.class); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetComponentTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetComponentTemplateAction.java index 6722be7a066a4..c1b25f7b31fba 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetComponentTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetComponentTemplateAction.java @@ -35,7 +35,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.ActionResponse; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.MasterNodeReadRequest; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; import org.opensearch.cluster.metadata.ComponentTemplate; import org.opensearch.common.Nullable; import org.opensearch.common.ParseField; @@ -67,7 +67,7 @@ private GetComponentTemplateAction() { * * @opensearch.internal */ - public static class Request extends MasterNodeReadRequest { + public static class Request extends ClusterManagerNodeReadRequest { @Nullable private String name; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java index 79760e9914784..4b4ba92a47d8b 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java @@ -35,7 +35,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.ActionResponse; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.MasterNodeReadRequest; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; import org.opensearch.cluster.metadata.ComposableIndexTemplate; import org.opensearch.common.Nullable; import org.opensearch.common.ParseField; @@ -67,7 +67,7 @@ private GetComposableIndexTemplateAction() { * * @opensearch.internal */ - public static class Request extends MasterNodeReadRequest { + public static class Request extends ClusterManagerNodeReadRequest { @Nullable private String name; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesRequest.java index 806b1ee0b9162..c0a990c0d8bbd 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesRequest.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.indices.template.get; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.MasterNodeReadRequest; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -46,7 +46,7 @@ * * @opensearch.internal */ -public class GetIndexTemplatesRequest extends MasterNodeReadRequest { +public class GetIndexTemplatesRequest extends ClusterManagerNodeReadRequest { private String[] names; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java index d5e5845955dae..09de1733239fc 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/get/GetIndexTemplatesRequestBuilder.java @@ -31,7 +31,7 @@ package org.opensearch.action.admin.indices.template.get; -import org.opensearch.action.support.master.MasterNodeReadOperationRequestBuilder; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; /** @@ -39,7 +39,7 @@ * * @opensearch.internal */ -public class GetIndexTemplatesRequestBuilder extends MasterNodeReadOperationRequestBuilder< +public class GetIndexTemplatesRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< GetIndexTemplatesRequest, GetIndexTemplatesResponse, GetIndexTemplatesRequestBuilder> { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetComponentTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetComponentTemplateAction.java index b7efb584ba92f..c6016ad78a681 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetComponentTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetComponentTemplateAction.java @@ -35,7 +35,7 @@ import org.opensearch.ResourceNotFoundException; import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.TransportMasterNodeReadAction; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -57,7 +57,7 @@ * * @opensearch.internal */ -public class TransportGetComponentTemplateAction extends TransportMasterNodeReadAction< +public class TransportGetComponentTemplateAction extends TransportClusterManagerNodeReadAction< GetComponentTemplateAction.Request, GetComponentTemplateAction.Response> { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetComposableIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetComposableIndexTemplateAction.java index e3eb619b52569..405dc7afc769f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetComposableIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetComposableIndexTemplateAction.java @@ -35,7 +35,7 @@ import org.opensearch.ResourceNotFoundException; import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.TransportMasterNodeReadAction; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -57,7 +57,7 @@ * * @opensearch.internal */ -public class TransportGetComposableIndexTemplateAction extends TransportMasterNodeReadAction< +public class TransportGetComposableIndexTemplateAction extends TransportClusterManagerNodeReadAction< GetComposableIndexTemplateAction.Request, GetComposableIndexTemplateAction.Response> { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java index 993f40fd4f625..44969022c9e06 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java @@ -34,7 +34,7 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.TransportMasterNodeReadAction; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -57,7 +57,9 @@ * * @opensearch.internal */ -public class TransportGetIndexTemplatesAction extends TransportMasterNodeReadAction { +public class TransportGetIndexTemplatesAction extends TransportClusterManagerNodeReadAction< + GetIndexTemplatesRequest, + GetIndexTemplatesResponse> { @Inject public TransportGetIndexTemplatesAction( diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java index 3db0ec47c5df2..d2ebccc058f78 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java @@ -34,7 +34,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; -import org.opensearch.action.support.master.MasterNodeReadRequest; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; import org.opensearch.common.Nullable; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; @@ -49,7 +49,7 @@ * * @opensearch.internal */ -public class SimulateIndexTemplateRequest extends MasterNodeReadRequest { +public class SimulateIndexTemplateRequest extends ClusterManagerNodeReadRequest { private String indexName; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateTemplateAction.java index 8cd5aa53f3373..210e8030093e1 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/post/SimulateTemplateAction.java @@ -36,7 +36,7 @@ import org.opensearch.action.ActionType; import org.opensearch.action.ValidateActions; import org.opensearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; -import org.opensearch.action.support.master.MasterNodeReadRequest; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; import org.opensearch.common.Nullable; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -64,7 +64,7 @@ private SimulateTemplateAction() { * * @opensearch.internal */ - public static class Request extends MasterNodeReadRequest { + public static class Request extends ClusterManagerNodeReadRequest { @Nullable private String templateName; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java index 89d04dc3f59f5..51a634e876886 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateIndexTemplateAction.java @@ -35,7 +35,7 @@ import org.opensearch.Version; import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.TransportMasterNodeReadAction; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -81,7 +81,7 @@ * * @opensearch.internal */ -public class TransportSimulateIndexTemplateAction extends TransportMasterNodeReadAction< +public class TransportSimulateIndexTemplateAction extends TransportClusterManagerNodeReadAction< SimulateIndexTemplateRequest, SimulateIndexTemplateResponse> { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java index 5b14d0a00ebfe..5b7395b3bc3a1 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/post/TransportSimulateTemplateAction.java @@ -34,7 +34,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.TransportMasterNodeReadAction; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -67,7 +67,7 @@ * * @opensearch.internal */ -public class TransportSimulateTemplateAction extends TransportMasterNodeReadAction< +public class TransportSimulateTemplateAction extends TransportClusterManagerNodeReadAction< SimulateTemplateAction.Request, SimulateIndexTemplateResponse> { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutComponentTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutComponentTemplateAction.java index ff2fdeaab8582..4df98a57b01f1 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutComponentTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutComponentTemplateAction.java @@ -34,8 +34,8 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.action.support.master.MasterNodeRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.cluster.metadata.ComponentTemplate; import org.opensearch.common.Nullable; import org.opensearch.common.Strings; @@ -65,7 +65,7 @@ private PutComponentTemplateAction() { * * @opensearch.internal */ - public static class Request extends MasterNodeRequest { + public static class Request extends ClusterManagerNodeRequest { private final String name; @Nullable private String cause; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutComposableIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutComposableIndexTemplateAction.java index 17cbe83faa625..1facbc137a754 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutComposableIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutComposableIndexTemplateAction.java @@ -36,8 +36,8 @@ import org.opensearch.action.ActionType; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.action.support.master.MasterNodeRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.ComposableIndexTemplate; import org.opensearch.common.Nullable; @@ -70,7 +70,7 @@ private PutComposableIndexTemplateAction() { * * @opensearch.internal */ - public static class Request extends MasterNodeRequest implements IndicesRequest { + public static class Request extends ClusterManagerNodeRequest implements IndicesRequest { private final String name; @Nullable private String cause; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateAction.java index 06a9f6fbba409..eb21b81350fda 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.template.put; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; /** * An action for putting an index template into the cluster state diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequest.java index 4c826477978fc..3cfb1a3ed0637 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequest.java @@ -39,7 +39,7 @@ import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.MasterNodeRequest; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.common.Nullable; import org.opensearch.common.Strings; import org.opensearch.common.bytes.BytesArray; @@ -80,7 +80,10 @@ * * @opensearch.internal */ -public class PutIndexTemplateRequest extends MasterNodeRequest implements IndicesRequest, ToXContentObject { +public class PutIndexTemplateRequest extends ClusterManagerNodeRequest + implements + IndicesRequest, + ToXContentObject { private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(PutIndexTemplateRequest.class); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java index df12dc5d66998..42ff1fb2aab4c 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateRequestBuilder.java @@ -32,8 +32,8 @@ package org.opensearch.action.admin.indices.template.put; import org.opensearch.action.admin.indices.alias.Alias; -import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.action.support.master.MasterNodeOperationRequestBuilder; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.settings.Settings; @@ -48,7 +48,7 @@ * * @opensearch.internal */ -public class PutIndexTemplateRequestBuilder extends MasterNodeOperationRequestBuilder< +public class PutIndexTemplateRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< PutIndexTemplateRequest, AcknowledgedResponse, PutIndexTemplateRequestBuilder> { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComponentTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComponentTemplateAction.java index 469c20e497822..4d63b338d999d 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComponentTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComponentTemplateAction.java @@ -34,8 +34,8 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -59,7 +59,7 @@ * * @opensearch.internal */ -public class TransportPutComponentTemplateAction extends TransportMasterNodeAction< +public class TransportPutComponentTemplateAction extends TransportClusterManagerNodeAction< PutComponentTemplateAction.Request, AcknowledgedResponse> { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComposableIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComposableIndexTemplateAction.java index 19485afe5d706..73039c85596a8 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComposableIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutComposableIndexTemplateAction.java @@ -34,8 +34,8 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -55,7 +55,7 @@ * * @opensearch.internal */ -public class TransportPutComposableIndexTemplateAction extends TransportMasterNodeAction< +public class TransportPutComposableIndexTemplateAction extends TransportClusterManagerNodeAction< PutComposableIndexTemplateAction.Request, AcknowledgedResponse> { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java index 778e0b374e2aa..fb7696e207ca2 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java @@ -36,8 +36,8 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -59,7 +59,7 @@ * * @opensearch.internal */ -public class TransportPutIndexTemplateAction extends TransportMasterNodeAction { +public class TransportPutIndexTemplateAction extends TransportClusterManagerNodeAction { private static final Logger logger = LogManager.getLogger(TransportPutIndexTemplateAction.class); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java index 4c352a172c040..1faec4330e16e 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/TransportUpgradeSettingsAction.java @@ -37,8 +37,8 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ack.ClusterStateUpdateResponse; import org.opensearch.cluster.block.ClusterBlockException; @@ -58,7 +58,7 @@ * * @opensearch.internal */ -public class TransportUpgradeSettingsAction extends TransportMasterNodeAction { +public class TransportUpgradeSettingsAction extends TransportClusterManagerNodeAction { private static final Logger logger = LogManager.getLogger(TransportUpgradeSettingsAction.class); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeSettingsAction.java index 05944e781d109..4c42b4abbf678 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeSettingsAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.upgrade.post; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; /** * Transport action for upgrading index settings diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeSettingsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeSettingsRequest.java index d6b784e44befb..0fe8e83e30258 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeSettingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeSettingsRequest.java @@ -34,7 +34,7 @@ import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/ingest/DeletePipelineAction.java b/server/src/main/java/org/opensearch/action/ingest/DeletePipelineAction.java index 6017be9747912..82bb78a9b89d6 100644 --- a/server/src/main/java/org/opensearch/action/ingest/DeletePipelineAction.java +++ b/server/src/main/java/org/opensearch/action/ingest/DeletePipelineAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.ingest; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; /** * Transport action to delete a pipeline diff --git a/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequest.java b/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequest.java index 0bd102849eee8..8e770d49d6771 100644 --- a/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequest.java +++ b/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.ingest; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequestBuilder.java b/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequestBuilder.java index 6a2eb494e8d3f..d26f0ba509ec8 100644 --- a/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequestBuilder.java @@ -33,7 +33,7 @@ package org.opensearch.action.ingest; import org.opensearch.action.ActionRequestBuilder; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; /** diff --git a/server/src/main/java/org/opensearch/action/ingest/DeletePipelineTransportAction.java b/server/src/main/java/org/opensearch/action/ingest/DeletePipelineTransportAction.java index 62fa2cbc595a6..a490c68401466 100644 --- a/server/src/main/java/org/opensearch/action/ingest/DeletePipelineTransportAction.java +++ b/server/src/main/java/org/opensearch/action/ingest/DeletePipelineTransportAction.java @@ -34,8 +34,8 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -53,7 +53,7 @@ * * @opensearch.internal */ -public class DeletePipelineTransportAction extends TransportMasterNodeAction { +public class DeletePipelineTransportAction extends TransportClusterManagerNodeAction { private final IngestService ingestService; diff --git a/server/src/main/java/org/opensearch/action/ingest/GetPipelineRequest.java b/server/src/main/java/org/opensearch/action/ingest/GetPipelineRequest.java index f6ae98a5ea193..1c7b7efe61455 100644 --- a/server/src/main/java/org/opensearch/action/ingest/GetPipelineRequest.java +++ b/server/src/main/java/org/opensearch/action/ingest/GetPipelineRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.ingest; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.MasterNodeReadRequest; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -45,7 +45,7 @@ * * @opensearch.internal */ -public class GetPipelineRequest extends MasterNodeReadRequest { +public class GetPipelineRequest extends ClusterManagerNodeReadRequest { private String[] ids; diff --git a/server/src/main/java/org/opensearch/action/ingest/GetPipelineRequestBuilder.java b/server/src/main/java/org/opensearch/action/ingest/GetPipelineRequestBuilder.java index 6ba0d4fbfe2f7..bdc13523ffdc6 100644 --- a/server/src/main/java/org/opensearch/action/ingest/GetPipelineRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/ingest/GetPipelineRequestBuilder.java @@ -32,7 +32,7 @@ package org.opensearch.action.ingest; -import org.opensearch.action.support.master.MasterNodeReadOperationRequestBuilder; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; /** @@ -40,7 +40,7 @@ * * @opensearch.internal */ -public class GetPipelineRequestBuilder extends MasterNodeReadOperationRequestBuilder< +public class GetPipelineRequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder< GetPipelineRequest, GetPipelineResponse, GetPipelineRequestBuilder> { diff --git a/server/src/main/java/org/opensearch/action/ingest/GetPipelineTransportAction.java b/server/src/main/java/org/opensearch/action/ingest/GetPipelineTransportAction.java index 9c7af1cfe3419..3a5493bfa4b36 100644 --- a/server/src/main/java/org/opensearch/action/ingest/GetPipelineTransportAction.java +++ b/server/src/main/java/org/opensearch/action/ingest/GetPipelineTransportAction.java @@ -34,7 +34,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.TransportMasterNodeReadAction; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -53,7 +53,7 @@ * * @opensearch.internal */ -public class GetPipelineTransportAction extends TransportMasterNodeReadAction { +public class GetPipelineTransportAction extends TransportClusterManagerNodeReadAction { @Inject public GetPipelineTransportAction( diff --git a/server/src/main/java/org/opensearch/action/ingest/PutPipelineAction.java b/server/src/main/java/org/opensearch/action/ingest/PutPipelineAction.java index 1fcbd783d246b..be47bff8f4e92 100644 --- a/server/src/main/java/org/opensearch/action/ingest/PutPipelineAction.java +++ b/server/src/main/java/org/opensearch/action/ingest/PutPipelineAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.ingest; import org.opensearch.action.ActionType; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; /** * Transport action to put a new pipeline diff --git a/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequest.java b/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequest.java index d5fbaa46810f7..fcba2e720e8c6 100644 --- a/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequest.java +++ b/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.ingest; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequestBuilder.java b/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequestBuilder.java index fec2cdef089e4..57c29147f1176 100644 --- a/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequestBuilder.java @@ -33,7 +33,7 @@ package org.opensearch.action.ingest; import org.opensearch.action.ActionRequestBuilder; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.xcontent.XContentType; diff --git a/server/src/main/java/org/opensearch/action/ingest/PutPipelineTransportAction.java b/server/src/main/java/org/opensearch/action/ingest/PutPipelineTransportAction.java index 6f21744e5df08..c294321d39c43 100644 --- a/server/src/main/java/org/opensearch/action/ingest/PutPipelineTransportAction.java +++ b/server/src/main/java/org/opensearch/action/ingest/PutPipelineTransportAction.java @@ -36,8 +36,8 @@ import org.opensearch.action.admin.cluster.node.info.NodeInfo; import org.opensearch.action.admin.cluster.node.info.NodesInfoRequest; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.client.OriginSettingClient; import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.ClusterState; @@ -63,7 +63,7 @@ * * @opensearch.internal */ -public class PutPipelineTransportAction extends TransportMasterNodeAction { +public class PutPipelineTransportAction extends TransportClusterManagerNodeAction { private final IngestService ingestService; private final OriginSettingClient client; diff --git a/server/src/main/java/org/opensearch/action/support/master/AcknowledgedRequest.java b/server/src/main/java/org/opensearch/action/support/clustermanager/AcknowledgedRequest.java similarity index 93% rename from server/src/main/java/org/opensearch/action/support/master/AcknowledgedRequest.java rename to server/src/main/java/org/opensearch/action/support/clustermanager/AcknowledgedRequest.java index 7f665b4e658a1..b67356d2567b5 100644 --- a/server/src/main/java/org/opensearch/action/support/master/AcknowledgedRequest.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/AcknowledgedRequest.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.action.support.master; +package org.opensearch.action.support.clustermanager; import org.opensearch.cluster.ack.AckedRequest; import org.opensearch.common.io.stream.StreamInput; @@ -46,7 +46,7 @@ * * @opensearch.internal */ -public abstract class AcknowledgedRequest> extends MasterNodeRequest +public abstract class AcknowledgedRequest> extends ClusterManagerNodeRequest implements AckedRequest { diff --git a/server/src/main/java/org/opensearch/action/support/master/AcknowledgedRequestBuilder.java b/server/src/main/java/org/opensearch/action/support/clustermanager/AcknowledgedRequestBuilder.java similarity index 94% rename from server/src/main/java/org/opensearch/action/support/master/AcknowledgedRequestBuilder.java rename to server/src/main/java/org/opensearch/action/support/clustermanager/AcknowledgedRequestBuilder.java index 7a0824c6d30ca..fa957f159ec9d 100644 --- a/server/src/main/java/org/opensearch/action/support/master/AcknowledgedRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/AcknowledgedRequestBuilder.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.action.support.master; +package org.opensearch.action.support.clustermanager; import org.opensearch.action.ActionType; import org.opensearch.client.OpenSearchClient; @@ -43,7 +43,7 @@ public abstract class AcknowledgedRequestBuilder< Request extends AcknowledgedRequest, Response extends AcknowledgedResponse, - RequestBuilder extends AcknowledgedRequestBuilder> extends MasterNodeOperationRequestBuilder< + RequestBuilder extends AcknowledgedRequestBuilder> extends ClusterManagerNodeOperationRequestBuilder< Request, Response, RequestBuilder> { diff --git a/server/src/main/java/org/opensearch/action/support/master/AcknowledgedResponse.java b/server/src/main/java/org/opensearch/action/support/clustermanager/AcknowledgedResponse.java similarity index 98% rename from server/src/main/java/org/opensearch/action/support/master/AcknowledgedResponse.java rename to server/src/main/java/org/opensearch/action/support/clustermanager/AcknowledgedResponse.java index 415e52b68e368..1db116ffaf74a 100644 --- a/server/src/main/java/org/opensearch/action/support/master/AcknowledgedResponse.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/AcknowledgedResponse.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.action.support.master; +package org.opensearch.action.support.clustermanager; import org.opensearch.action.ActionResponse; import org.opensearch.common.ParseField; diff --git a/server/src/main/java/org/opensearch/action/support/master/MasterNodeOperationRequestBuilder.java b/server/src/main/java/org/opensearch/action/support/clustermanager/ClusterManagerNodeOperationRequestBuilder.java similarity index 83% rename from server/src/main/java/org/opensearch/action/support/master/MasterNodeOperationRequestBuilder.java rename to server/src/main/java/org/opensearch/action/support/clustermanager/ClusterManagerNodeOperationRequestBuilder.java index b327e76a25873..6d8509a0671f2 100644 --- a/server/src/main/java/org/opensearch/action/support/master/MasterNodeOperationRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/ClusterManagerNodeOperationRequestBuilder.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.action.support.master; +package org.opensearch.action.support.clustermanager; import org.opensearch.action.ActionType; import org.opensearch.action.ActionRequestBuilder; @@ -43,14 +43,14 @@ * * @opensearch.internal */ -public abstract class MasterNodeOperationRequestBuilder< - Request extends MasterNodeRequest, +public abstract class ClusterManagerNodeOperationRequestBuilder< + Request extends ClusterManagerNodeRequest, Response extends ActionResponse, - RequestBuilder extends MasterNodeOperationRequestBuilder> extends ActionRequestBuilder< + RequestBuilder extends ClusterManagerNodeOperationRequestBuilder> extends ActionRequestBuilder< Request, Response> { - protected MasterNodeOperationRequestBuilder(OpenSearchClient client, ActionType action, Request request) { + protected ClusterManagerNodeOperationRequestBuilder(OpenSearchClient client, ActionType action, Request request) { super(client, action, request); } diff --git a/server/src/main/java/org/opensearch/action/support/master/MasterNodeReadOperationRequestBuilder.java b/server/src/main/java/org/opensearch/action/support/clustermanager/ClusterManagerNodeReadOperationRequestBuilder.java similarity index 77% rename from server/src/main/java/org/opensearch/action/support/master/MasterNodeReadOperationRequestBuilder.java rename to server/src/main/java/org/opensearch/action/support/clustermanager/ClusterManagerNodeReadOperationRequestBuilder.java index d36b419577cab..b0ac743e6a1dc 100644 --- a/server/src/main/java/org/opensearch/action/support/master/MasterNodeReadOperationRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/ClusterManagerNodeReadOperationRequestBuilder.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.action.support.master; +package org.opensearch.action.support.clustermanager; import org.opensearch.action.ActionType; import org.opensearch.action.ActionResponse; @@ -41,13 +41,13 @@ * * @opensearch.internal */ -public abstract class MasterNodeReadOperationRequestBuilder< - Request extends MasterNodeReadRequest, +public abstract class ClusterManagerNodeReadOperationRequestBuilder< + Request extends ClusterManagerNodeReadRequest, Response extends ActionResponse, - RequestBuilder extends MasterNodeReadOperationRequestBuilder> extends - MasterNodeOperationRequestBuilder { + RequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder> extends + ClusterManagerNodeOperationRequestBuilder { - protected MasterNodeReadOperationRequestBuilder(OpenSearchClient client, ActionType action, Request request) { + protected ClusterManagerNodeReadOperationRequestBuilder(OpenSearchClient client, ActionType action, Request request) { super(client, action, request); } diff --git a/server/src/main/java/org/opensearch/action/support/master/MasterNodeReadRequest.java b/server/src/main/java/org/opensearch/action/support/clustermanager/ClusterManagerNodeReadRequest.java similarity index 86% rename from server/src/main/java/org/opensearch/action/support/master/MasterNodeReadRequest.java rename to server/src/main/java/org/opensearch/action/support/clustermanager/ClusterManagerNodeReadRequest.java index 3188f632c5ec8..e9fd0c77a5ec6 100644 --- a/server/src/main/java/org/opensearch/action/support/master/MasterNodeReadRequest.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/ClusterManagerNodeReadRequest.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.action.support.master; +package org.opensearch.action.support.clustermanager; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -42,13 +42,14 @@ * * @opensearch.internal */ -public abstract class MasterNodeReadRequest> extends MasterNodeRequest { +public abstract class ClusterManagerNodeReadRequest> extends + ClusterManagerNodeRequest { protected boolean local = false; - protected MasterNodeReadRequest() {} + protected ClusterManagerNodeReadRequest() {} - protected MasterNodeReadRequest(StreamInput in) throws IOException { + protected ClusterManagerNodeReadRequest(StreamInput in) throws IOException { super(in); local = in.readBoolean(); } diff --git a/server/src/main/java/org/opensearch/action/support/master/MasterNodeRequest.java b/server/src/main/java/org/opensearch/action/support/clustermanager/ClusterManagerNodeRequest.java similarity index 89% rename from server/src/main/java/org/opensearch/action/support/master/MasterNodeRequest.java rename to server/src/main/java/org/opensearch/action/support/clustermanager/ClusterManagerNodeRequest.java index 34a8c65dde491..9cce7562a988b 100644 --- a/server/src/main/java/org/opensearch/action/support/master/MasterNodeRequest.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/ClusterManagerNodeRequest.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.action.support.master; +package org.opensearch.action.support.clustermanager; import org.opensearch.action.ActionRequest; import org.opensearch.common.io.stream.StreamInput; @@ -44,15 +44,15 @@ * * @opensearch.internal */ -public abstract class MasterNodeRequest> extends ActionRequest { +public abstract class ClusterManagerNodeRequest> extends ActionRequest { public static final TimeValue DEFAULT_MASTER_NODE_TIMEOUT = TimeValue.timeValueSeconds(30); protected TimeValue masterNodeTimeout = DEFAULT_MASTER_NODE_TIMEOUT; - protected MasterNodeRequest() {} + protected ClusterManagerNodeRequest() {} - protected MasterNodeRequest(StreamInput in) throws IOException { + protected ClusterManagerNodeRequest(StreamInput in) throws IOException { super(in); masterNodeTimeout = in.readTimeValue(); } diff --git a/server/src/main/java/org/opensearch/action/support/master/ShardsAcknowledgedResponse.java b/server/src/main/java/org/opensearch/action/support/clustermanager/ShardsAcknowledgedResponse.java similarity index 98% rename from server/src/main/java/org/opensearch/action/support/master/ShardsAcknowledgedResponse.java rename to server/src/main/java/org/opensearch/action/support/clustermanager/ShardsAcknowledgedResponse.java index d100874296844..dc24adcfa0ca1 100644 --- a/server/src/main/java/org/opensearch/action/support/master/ShardsAcknowledgedResponse.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/ShardsAcknowledgedResponse.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.action.support.master; +package org.opensearch.action.support.clustermanager; import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; diff --git a/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java similarity index 96% rename from server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeAction.java rename to server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java index 4e0a180fe0cd4..507a019390ff9 100644 --- a/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeAction.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeAction.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.action.support.master; +package org.opensearch.action.support.clustermanager; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -71,10 +71,10 @@ * * @opensearch.internal */ -public abstract class TransportMasterNodeAction, Response extends ActionResponse> extends - HandledTransportAction { +public abstract class TransportClusterManagerNodeAction, Response extends ActionResponse> + extends HandledTransportAction { - private static final Logger logger = LogManager.getLogger(TransportMasterNodeAction.class); + private static final Logger logger = LogManager.getLogger(TransportClusterManagerNodeAction.class); protected final ThreadPool threadPool; protected final TransportService transportService; @@ -83,7 +83,7 @@ public abstract class TransportMasterNodeAction(listener, TransportMasterNodeAction.this::read) { + new ActionListenerResponseHandler(listener, TransportClusterManagerNodeAction.this::read) { @Override public void handleException(final TransportException exp) { Throwable cause = exp.unwrapCause(); diff --git a/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeReadAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeReadAction.java similarity index 87% rename from server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeReadAction.java rename to server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeReadAction.java index 8514174bca8c7..1bfd7faa90262 100644 --- a/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeReadAction.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeReadAction.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.action.support.master; +package org.opensearch.action.support.clustermanager; import org.opensearch.action.ActionResponse; import org.opensearch.action.support.ActionFilters; @@ -46,10 +46,11 @@ * * @opensearch.internal */ -public abstract class TransportMasterNodeReadAction, Response extends ActionResponse> extends - TransportMasterNodeAction { +public abstract class TransportClusterManagerNodeReadAction< + Request extends ClusterManagerNodeReadRequest, + Response extends ActionResponse> extends TransportClusterManagerNodeAction { - protected TransportMasterNodeReadAction( + protected TransportClusterManagerNodeReadAction( String actionName, TransportService transportService, ClusterService clusterService, @@ -61,7 +62,7 @@ protected TransportMasterNodeReadAction( this(actionName, true, transportService, clusterService, threadPool, actionFilters, request, indexNameExpressionResolver); } - protected TransportMasterNodeReadAction( + protected TransportClusterManagerNodeReadAction( String actionName, boolean checkSizeLimit, TransportService transportService, diff --git a/server/src/main/java/org/opensearch/action/support/master/info/ClusterInfoRequest.java b/server/src/main/java/org/opensearch/action/support/clustermanager/info/ClusterInfoRequest.java similarity index 93% rename from server/src/main/java/org/opensearch/action/support/master/info/ClusterInfoRequest.java rename to server/src/main/java/org/opensearch/action/support/clustermanager/info/ClusterInfoRequest.java index ae3b2350f574c..d0b9d291c16c7 100644 --- a/server/src/main/java/org/opensearch/action/support/master/info/ClusterInfoRequest.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/info/ClusterInfoRequest.java @@ -30,12 +30,12 @@ * GitHub history for details. */ -package org.opensearch.action.support.master.info; +package org.opensearch.action.support.clustermanager.info; import org.opensearch.Version; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.MasterNodeReadRequest; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -47,7 +47,7 @@ * * @opensearch.internal */ -public abstract class ClusterInfoRequest> extends MasterNodeReadRequest +public abstract class ClusterInfoRequest> extends ClusterManagerNodeReadRequest implements IndicesRequest.Replaceable { diff --git a/server/src/main/java/org/opensearch/action/support/master/info/ClusterInfoRequestBuilder.java b/server/src/main/java/org/opensearch/action/support/clustermanager/info/ClusterInfoRequestBuilder.java similarity index 91% rename from server/src/main/java/org/opensearch/action/support/master/info/ClusterInfoRequestBuilder.java rename to server/src/main/java/org/opensearch/action/support/clustermanager/info/ClusterInfoRequestBuilder.java index 09ed9b590d9c4..77c1c3656ce59 100644 --- a/server/src/main/java/org/opensearch/action/support/master/info/ClusterInfoRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/info/ClusterInfoRequestBuilder.java @@ -29,12 +29,12 @@ * GitHub history for details. */ -package org.opensearch.action.support.master.info; +package org.opensearch.action.support.clustermanager.info; import org.opensearch.action.ActionType; import org.opensearch.action.ActionResponse; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.MasterNodeReadOperationRequestBuilder; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.util.ArrayUtils; @@ -46,7 +46,7 @@ public abstract class ClusterInfoRequestBuilder< Request extends ClusterInfoRequest, Response extends ActionResponse, - Builder extends ClusterInfoRequestBuilder> extends MasterNodeReadOperationRequestBuilder< + Builder extends ClusterInfoRequestBuilder> extends ClusterManagerNodeReadOperationRequestBuilder< Request, Response, Builder> { diff --git a/server/src/main/java/org/opensearch/action/support/master/info/TransportClusterInfoAction.java b/server/src/main/java/org/opensearch/action/support/clustermanager/info/TransportClusterInfoAction.java similarity index 93% rename from server/src/main/java/org/opensearch/action/support/master/info/TransportClusterInfoAction.java rename to server/src/main/java/org/opensearch/action/support/clustermanager/info/TransportClusterInfoAction.java index e3ad4afcad02e..caf89fc7b6c8e 100644 --- a/server/src/main/java/org/opensearch/action/support/master/info/TransportClusterInfoAction.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/info/TransportClusterInfoAction.java @@ -29,12 +29,12 @@ * GitHub history for details. */ -package org.opensearch.action.support.master.info; +package org.opensearch.action.support.clustermanager.info; import org.opensearch.action.ActionListener; import org.opensearch.action.ActionResponse; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.TransportMasterNodeReadAction; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; @@ -50,7 +50,7 @@ * @opensearch.internal */ public abstract class TransportClusterInfoAction, Response extends ActionResponse> extends - TransportMasterNodeReadAction { + TransportClusterManagerNodeReadAction { public TransportClusterInfoAction( String actionName, diff --git a/server/src/main/java/org/opensearch/action/support/master/package-info.java b/server/src/main/java/org/opensearch/action/support/clustermanager/info/package-info.java similarity index 63% rename from server/src/main/java/org/opensearch/action/support/master/package-info.java rename to server/src/main/java/org/opensearch/action/support/clustermanager/info/package-info.java index b0f6f7942b688..7f21d5d22ec2e 100644 --- a/server/src/main/java/org/opensearch/action/support/master/package-info.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/info/package-info.java @@ -6,5 +6,5 @@ * compatible open source license. */ -/** Master Node transport handlers. */ -package org.opensearch.action.support.master; +/** Cluster Manager Node Information transport handlers. */ +package org.opensearch.action.support.clustermanager.info; diff --git a/server/src/main/java/org/opensearch/action/support/master/info/package-info.java b/server/src/main/java/org/opensearch/action/support/clustermanager/package-info.java similarity index 66% rename from server/src/main/java/org/opensearch/action/support/master/info/package-info.java rename to server/src/main/java/org/opensearch/action/support/clustermanager/package-info.java index 6ae2eb5465db5..13d604ed71e3d 100644 --- a/server/src/main/java/org/opensearch/action/support/master/info/package-info.java +++ b/server/src/main/java/org/opensearch/action/support/clustermanager/package-info.java @@ -6,5 +6,5 @@ * compatible open source license. */ -/** Master Node Information transport handlers. */ -package org.opensearch.action.support.master.info; +/** Cluster Manager Node transport handlers. */ +package org.opensearch.action.support.clustermanager; diff --git a/server/src/main/java/org/opensearch/client/ClusterAdminClient.java b/server/src/main/java/org/opensearch/client/ClusterAdminClient.java index f4eaa979ff18c..8907de6b0bac7 100644 --- a/server/src/main/java/org/opensearch/client/ClusterAdminClient.java +++ b/server/src/main/java/org/opensearch/client/ClusterAdminClient.java @@ -130,7 +130,7 @@ import org.opensearch.action.ingest.SimulatePipelineRequest; import org.opensearch.action.ingest.SimulatePipelineRequestBuilder; import org.opensearch.action.ingest.SimulatePipelineResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.xcontent.XContentType; import org.opensearch.tasks.TaskId; diff --git a/server/src/main/java/org/opensearch/client/IndicesAdminClient.java b/server/src/main/java/org/opensearch/client/IndicesAdminClient.java index c9cd0d0900b5a..ede22df071821 100644 --- a/server/src/main/java/org/opensearch/client/IndicesAdminClient.java +++ b/server/src/main/java/org/opensearch/client/IndicesAdminClient.java @@ -124,7 +124,7 @@ import org.opensearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.opensearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder; import org.opensearch.action.admin.indices.validate.query.ValidateQueryResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.cluster.metadata.IndexMetadata.APIBlock; import org.opensearch.common.Nullable; diff --git a/server/src/main/java/org/opensearch/client/support/AbstractClient.java b/server/src/main/java/org/opensearch/client/support/AbstractClient.java index 6cc0827310bd1..8465d410b8ea2 100644 --- a/server/src/main/java/org/opensearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/opensearch/client/support/AbstractClient.java @@ -339,7 +339,7 @@ import org.opensearch.action.search.SearchScrollRequest; import org.opensearch.action.search.SearchScrollRequestBuilder; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.termvectors.MultiTermVectorsAction; import org.opensearch.action.termvectors.MultiTermVectorsRequest; import org.opensearch.action.termvectors.MultiTermVectorsRequestBuilder; diff --git a/server/src/main/java/org/opensearch/cluster/action/index/MappingUpdatedAction.java b/server/src/main/java/org/opensearch/cluster/action/index/MappingUpdatedAction.java index 414152dd0af05..a183e195707af 100644 --- a/server/src/main/java/org/opensearch/cluster/action/index/MappingUpdatedAction.java +++ b/server/src/main/java/org/opensearch/cluster/action/index/MappingUpdatedAction.java @@ -37,7 +37,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.mapping.put.AutoPutMappingAction; import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest; -import org.opensearch.action.support.master.MasterNodeRequest; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.client.Client; import org.opensearch.client.IndicesAdminClient; import org.opensearch.cluster.service.ClusterService; @@ -108,7 +108,7 @@ public void setClient(Client client) { /** * Update mappings on the cluster-manager node, waiting for the change to be committed, * but not for the mapping update to be applied on all nodes. The timeout specified by - * {@code timeout} is the cluster-manager node timeout ({@link MasterNodeRequest#masterNodeTimeout()}), + * {@code timeout} is the cluster-manager node timeout ({@link ClusterManagerNodeRequest#masterNodeTimeout()}), * potentially waiting for a cluster-manager node to be available. */ public void updateMappingOnMaster(Index index, Mapping mappingUpdate, ActionListener listener) { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateDataStreamService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateDataStreamService.java index 412d4dba628cb..97f198e087a93 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateDataStreamService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateDataStreamService.java @@ -40,7 +40,7 @@ import org.opensearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.ActiveShardsObserver; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.cluster.AckedClusterStateUpdateTask; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ack.ClusterStateUpdateRequest; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java index 9c734fd7b3bdc..2ea0b6b5de2e9 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexTemplateService.java @@ -41,8 +41,8 @@ import org.opensearch.Version; import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.alias.Alias; -import org.opensearch.action.support.master.AcknowledgedResponse; -import org.opensearch.action.support.master.MasterNodeRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.service.ClusterService; @@ -1526,7 +1526,7 @@ public static class PutRequest { String mappings = null; List aliases = new ArrayList<>(); - TimeValue masterTimeout = MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT; + TimeValue masterTimeout = ClusterManagerNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT; public PutRequest(String cause, String name) { this.cause = cause; @@ -1598,7 +1598,7 @@ public boolean acknowledged() { */ public static class RemoveRequest { final String name; - TimeValue masterTimeout = MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT; + TimeValue masterTimeout = ClusterManagerNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT; public RemoveRequest(String name) { this.name = name; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/TemplateUpgradeService.java b/server/src/main/java/org/opensearch/cluster/metadata/TemplateUpgradeService.java index e514ad60e5eaf..12faf25731657 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/TemplateUpgradeService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/TemplateUpgradeService.java @@ -40,7 +40,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; import org.opensearch.action.admin.indices.template.put.PutIndexTemplateRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterState; diff --git a/server/src/main/java/org/opensearch/ingest/IngestService.java b/server/src/main/java/org/opensearch/ingest/IngestService.java index b8256fe896da4..ac740c304d1f9 100644 --- a/server/src/main/java/org/opensearch/ingest/IngestService.java +++ b/server/src/main/java/org/opensearch/ingest/IngestService.java @@ -44,7 +44,7 @@ import org.opensearch.action.index.IndexRequest; import org.opensearch.action.ingest.DeletePipelineRequest; import org.opensearch.action.ingest.PutPipelineRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.AckedClusterStateUpdateTask; import org.opensearch.cluster.ClusterChangedEvent; diff --git a/server/src/main/java/org/opensearch/persistent/CompletionPersistentTaskAction.java b/server/src/main/java/org/opensearch/persistent/CompletionPersistentTaskAction.java index 69f6c7ca6c233..1dda269a68491 100644 --- a/server/src/main/java/org/opensearch/persistent/CompletionPersistentTaskAction.java +++ b/server/src/main/java/org/opensearch/persistent/CompletionPersistentTaskAction.java @@ -35,9 +35,9 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.ActionType; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.MasterNodeOperationRequestBuilder; -import org.opensearch.action.support.master.MasterNodeRequest; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.client.OpenSearchClient; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; @@ -75,7 +75,7 @@ private CompletionPersistentTaskAction() { * * @opensearch.internal */ - public static class Request extends MasterNodeRequest { + public static class Request extends ClusterManagerNodeRequest { private String taskId; @@ -139,10 +139,7 @@ public int hashCode() { * * @opensearch.internal */ - public static class RequestBuilder extends MasterNodeOperationRequestBuilder< - CompletionPersistentTaskAction.Request, - PersistentTaskResponse, - CompletionPersistentTaskAction.RequestBuilder> { + public static class RequestBuilder extends ClusterManagerNodeOperationRequestBuilder { protected RequestBuilder(OpenSearchClient client, CompletionPersistentTaskAction action) { super(client, action, new Request()); @@ -154,7 +151,7 @@ protected RequestBuilder(OpenSearchClient client, CompletionPersistentTaskAction * * @opensearch.internal */ - public static class TransportAction extends TransportMasterNodeAction { + public static class TransportAction extends TransportClusterManagerNodeAction { private final PersistentTasksClusterService persistentTasksClusterService; diff --git a/server/src/main/java/org/opensearch/persistent/RemovePersistentTaskAction.java b/server/src/main/java/org/opensearch/persistent/RemovePersistentTaskAction.java index 4f706ac1c48fb..56436d0e1aa0c 100644 --- a/server/src/main/java/org/opensearch/persistent/RemovePersistentTaskAction.java +++ b/server/src/main/java/org/opensearch/persistent/RemovePersistentTaskAction.java @@ -35,9 +35,9 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.ActionType; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.MasterNodeOperationRequestBuilder; -import org.opensearch.action.support.master.MasterNodeRequest; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.client.OpenSearchClient; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; @@ -72,7 +72,7 @@ private RemovePersistentTaskAction() { * * @opensearch.internal */ - public static class Request extends MasterNodeRequest { + public static class Request extends ClusterManagerNodeRequest { private String taskId; @@ -121,10 +121,7 @@ public int hashCode() { * * @opensearch.internal */ - public static class RequestBuilder extends MasterNodeOperationRequestBuilder< - RemovePersistentTaskAction.Request, - PersistentTaskResponse, - RemovePersistentTaskAction.RequestBuilder> { + public static class RequestBuilder extends ClusterManagerNodeOperationRequestBuilder { protected RequestBuilder(OpenSearchClient client, RemovePersistentTaskAction action) { super(client, action, new Request()); @@ -142,7 +139,7 @@ public final RequestBuilder setTaskId(String taskId) { * * @opensearch.internal */ - public static class TransportAction extends TransportMasterNodeAction { + public static class TransportAction extends TransportClusterManagerNodeAction { private final PersistentTasksClusterService persistentTasksClusterService; diff --git a/server/src/main/java/org/opensearch/persistent/StartPersistentTaskAction.java b/server/src/main/java/org/opensearch/persistent/StartPersistentTaskAction.java index 122313e6a8ebf..6f2f3a565427b 100644 --- a/server/src/main/java/org/opensearch/persistent/StartPersistentTaskAction.java +++ b/server/src/main/java/org/opensearch/persistent/StartPersistentTaskAction.java @@ -35,9 +35,9 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.ActionType; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.MasterNodeOperationRequestBuilder; -import org.opensearch.action.support.master.MasterNodeRequest; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.client.OpenSearchClient; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; @@ -75,7 +75,7 @@ private StartPersistentTaskAction() { * * @opensearch.internal */ - public static class Request extends MasterNodeRequest { + public static class Request extends ClusterManagerNodeRequest { private String taskId; @@ -173,10 +173,7 @@ public void setParams(PersistentTaskParams params) { * * @opensearch.internal */ - public static class RequestBuilder extends MasterNodeOperationRequestBuilder< - StartPersistentTaskAction.Request, - PersistentTaskResponse, - StartPersistentTaskAction.RequestBuilder> { + public static class RequestBuilder extends ClusterManagerNodeOperationRequestBuilder { protected RequestBuilder(OpenSearchClient client, StartPersistentTaskAction action) { super(client, action, new Request()); @@ -204,7 +201,7 @@ public RequestBuilder setRequest(PersistentTaskParams params) { * * @opensearch.internal */ - public static class TransportAction extends TransportMasterNodeAction { + public static class TransportAction extends TransportClusterManagerNodeAction { private final PersistentTasksClusterService persistentTasksClusterService; diff --git a/server/src/main/java/org/opensearch/persistent/UpdatePersistentTaskStatusAction.java b/server/src/main/java/org/opensearch/persistent/UpdatePersistentTaskStatusAction.java index 5cbbcee937247..aee79ac1ba3ea 100644 --- a/server/src/main/java/org/opensearch/persistent/UpdatePersistentTaskStatusAction.java +++ b/server/src/main/java/org/opensearch/persistent/UpdatePersistentTaskStatusAction.java @@ -35,9 +35,9 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.ActionType; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.MasterNodeOperationRequestBuilder; -import org.opensearch.action.support.master.MasterNodeRequest; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.client.OpenSearchClient; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; @@ -74,7 +74,7 @@ private UpdatePersistentTaskStatusAction() { * * @opensearch.internal */ - public static class Request extends MasterNodeRequest { + public static class Request extends ClusterManagerNodeRequest { private String taskId; private long allocationId = -1L; @@ -148,10 +148,7 @@ public int hashCode() { * * @opensearch.internal */ - public static class RequestBuilder extends MasterNodeOperationRequestBuilder< - UpdatePersistentTaskStatusAction.Request, - PersistentTaskResponse, - UpdatePersistentTaskStatusAction.RequestBuilder> { + public static class RequestBuilder extends ClusterManagerNodeOperationRequestBuilder { protected RequestBuilder(OpenSearchClient client, UpdatePersistentTaskStatusAction action) { super(client, action, new Request()); @@ -174,7 +171,7 @@ public final RequestBuilder setState(PersistentTaskState state) { * @opensearch.internal */ - public static class TransportAction extends TransportMasterNodeAction { + public static class TransportAction extends TransportClusterManagerNodeAction { private final PersistentTasksClusterService persistentTasksClusterService; diff --git a/server/src/main/java/org/opensearch/rest/BaseRestHandler.java b/server/src/main/java/org/opensearch/rest/BaseRestHandler.java index 4b4c020b43e74..d42350b34b1c0 100644 --- a/server/src/main/java/org/opensearch/rest/BaseRestHandler.java +++ b/server/src/main/java/org/opensearch/rest/BaseRestHandler.java @@ -37,7 +37,7 @@ import org.apache.lucene.search.spell.LevenshteinDistance; import org.apache.lucene.util.CollectionUtil; import org.opensearch.OpenSearchParseException; -import org.opensearch.action.support.master.MasterNodeRequest; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.client.node.NodeClient; import org.opensearch.common.CheckedConsumer; import org.opensearch.common.collect.Tuple; @@ -215,7 +215,7 @@ protected Set responseParams() { * @param logMsgKeyPrefix the key prefix of a deprecation message to avoid duplicate messages. */ public static void parseDeprecatedMasterTimeoutParameter( - MasterNodeRequest mnr, + ClusterManagerNodeRequest mnr, RestRequest request, DeprecationLogger logger, String logMsgKeyPrefix diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/dangling/RestDeleteDanglingIndexAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/dangling/RestDeleteDanglingIndexAction.java index d3ac05ddf8f5f..c4de2a6d12d98 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/dangling/RestDeleteDanglingIndexAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/dangling/RestDeleteDanglingIndexAction.java @@ -33,7 +33,7 @@ package org.opensearch.rest.action.admin.cluster.dangling; import org.opensearch.action.admin.indices.dangling.delete.DeleteDanglingIndexRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.node.NodeClient; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.rest.BaseRestHandler; diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/dangling/RestImportDanglingIndexAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/dangling/RestImportDanglingIndexAction.java index 85aa973d16336..4e27354928e97 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/dangling/RestImportDanglingIndexAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/dangling/RestImportDanglingIndexAction.java @@ -40,7 +40,7 @@ import java.util.List; import org.opensearch.action.admin.indices.dangling.import_index.ImportDanglingIndexRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.node.NodeClient; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.rest.BaseRestHandler; diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java index 9df7fe3c8fe5b..bdcfb5b577547 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestIndicesAction.java @@ -78,7 +78,7 @@ import static java.util.Arrays.asList; import static java.util.Collections.unmodifiableList; -import static org.opensearch.action.support.master.MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT; +import static org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT; import static org.opensearch.rest.RestRequest.Method.GET; /** diff --git a/server/src/main/java/org/opensearch/script/ScriptService.java b/server/src/main/java/org/opensearch/script/ScriptService.java index 303fc5ccbcf88..a643a31ed4123 100644 --- a/server/src/main/java/org/opensearch/script/ScriptService.java +++ b/server/src/main/java/org/opensearch/script/ScriptService.java @@ -39,7 +39,7 @@ import org.opensearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; import org.opensearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; import org.opensearch.action.admin.cluster.storedscripts.PutStoredScriptRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.cluster.AckedClusterStateUpdateTask; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterState; diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java index 106359c485f86..37e9c6d51abd0 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java @@ -48,7 +48,7 @@ import org.opensearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.GroupedActionListener; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateApplier; @@ -3621,7 +3621,7 @@ private void startExecutableClones(SnapshotsInProgress snapshotsInProgress, @Nul } } - private class UpdateSnapshotStatusAction extends TransportMasterNodeAction< + private class UpdateSnapshotStatusAction extends TransportClusterManagerNodeAction< UpdateIndexShardSnapshotStatusRequest, UpdateIndexShardSnapshotStatusResponse> { UpdateSnapshotStatusAction( diff --git a/server/src/main/java/org/opensearch/snapshots/UpdateIndexShardSnapshotStatusRequest.java b/server/src/main/java/org/opensearch/snapshots/UpdateIndexShardSnapshotStatusRequest.java index 7eac935825783..db7dcf3cc5c75 100644 --- a/server/src/main/java/org/opensearch/snapshots/UpdateIndexShardSnapshotStatusRequest.java +++ b/server/src/main/java/org/opensearch/snapshots/UpdateIndexShardSnapshotStatusRequest.java @@ -32,7 +32,7 @@ package org.opensearch.snapshots; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.master.MasterNodeRequest; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.cluster.SnapshotsInProgress; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -47,7 +47,7 @@ * * @opensearch.internal */ -public class UpdateIndexShardSnapshotStatusRequest extends MasterNodeRequest { +public class UpdateIndexShardSnapshotStatusRequest extends ClusterManagerNodeRequest { private final Snapshot snapshot; private final ShardId shardId; private final SnapshotsInProgress.ShardSnapshotStatus status; diff --git a/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java b/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java index 648766681a377..155e75653bbf9 100644 --- a/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java +++ b/server/src/test/java/org/opensearch/action/RenamedTimeoutRequestParameterTests.java @@ -10,7 +10,7 @@ import org.junit.After; import org.opensearch.OpenSearchParseException; -import org.opensearch.action.support.master.MasterNodeRequest; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.client.node.NodeClient; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.logging.DeprecationLogger; @@ -671,8 +671,8 @@ public void testPutStoredScript() { assertWarnings(MASTER_TIMEOUT_DEPRECATED_MESSAGE, "empty templates should no longer be used"); } - private MasterNodeRequest getMasterNodeRequest() { - return new MasterNodeRequest() { + private ClusterManagerNodeRequest getMasterNodeRequest() { + return new ClusterManagerNodeRequest() { @Override public ActionRequestValidationException validate() { return null; diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java index d48eb1619d36c..5dc1adf4e1352 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestTests.java @@ -32,8 +32,8 @@ package org.opensearch.action.admin.cluster.reroute; -import org.opensearch.action.support.master.AcknowledgedRequest; -import org.opensearch.action.support.master.MasterNodeRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; import org.opensearch.cluster.routing.allocation.command.AllocateEmptyPrimaryAllocationCommand; import org.opensearch.cluster.routing.allocation.command.AllocateReplicaAllocationCommand; import org.opensearch.cluster.routing.allocation.command.AllocateStalePrimaryAllocationCommand; @@ -231,7 +231,7 @@ private RestRequest toRestRequest(ClusterRerouteRequest original) throws IOExcep if (original.isRetryFailed() || randomBoolean()) { params.put("retry_failed", Boolean.toString(original.isRetryFailed())); } - if (false == original.masterNodeTimeout().equals(MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT) || randomBoolean()) { + if (false == original.masterNodeTimeout().equals(ClusterManagerNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT) || randomBoolean()) { params.put("cluster_manager_timeout", original.masterNodeTimeout().toString()); } if (original.getCommands() != null) { diff --git a/server/src/test/java/org/opensearch/action/support/master/ShardsAcknowledgedResponseTests.java b/server/src/test/java/org/opensearch/action/support/clustermanager/ShardsAcknowledgedResponseTests.java similarity index 97% rename from server/src/test/java/org/opensearch/action/support/master/ShardsAcknowledgedResponseTests.java rename to server/src/test/java/org/opensearch/action/support/clustermanager/ShardsAcknowledgedResponseTests.java index 90a428a765053..9adef2732083d 100644 --- a/server/src/test/java/org/opensearch/action/support/master/ShardsAcknowledgedResponseTests.java +++ b/server/src/test/java/org/opensearch/action/support/clustermanager/ShardsAcknowledgedResponseTests.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.action.support.master; +package org.opensearch.action.support.clustermanager; import org.opensearch.Version; import org.opensearch.common.io.stream.NamedWriteableRegistry; diff --git a/server/src/test/java/org/opensearch/action/support/master/TransportMasterNodeActionTests.java b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java similarity index 98% rename from server/src/test/java/org/opensearch/action/support/master/TransportMasterNodeActionTests.java rename to server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java index 512749346588e..879a2f8ea953f 100644 --- a/server/src/test/java/org/opensearch/action/support/master/TransportMasterNodeActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportClusterManagerNodeActionTests.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package org.opensearch.action.support.master; +package org.opensearch.action.support.clustermanager; import org.opensearch.OpenSearchException; import org.opensearch.Version; @@ -86,7 +86,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; -public class TransportMasterNodeActionTests extends OpenSearchTestCase { +public class TransportClusterManagerNodeActionTests extends OpenSearchTestCase { private static ThreadPool threadPool; private ClusterService clusterService; @@ -156,7 +156,7 @@ void assertListenerThrows(String msg, ActionFuture listener, Class klass) } } - public static class Request extends MasterNodeRequest { + public static class Request extends ClusterManagerNodeRequest { Request() {} Request(StreamInput in) throws IOException { @@ -198,7 +198,7 @@ public void writeTo(StreamOutput out) throws IOException { } } - class Action extends TransportMasterNodeAction { + class Action extends TransportClusterManagerNodeAction { Action(String actionName, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) { super( actionName, diff --git a/server/src/test/java/org/opensearch/action/support/master/TransportMasterNodeActionUtils.java b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportMasterNodeActionUtils.java similarity index 79% rename from server/src/test/java/org/opensearch/action/support/master/TransportMasterNodeActionUtils.java rename to server/src/test/java/org/opensearch/action/support/clustermanager/TransportMasterNodeActionUtils.java index 391103eb5cebd..3927cd1d13040 100644 --- a/server/src/test/java/org/opensearch/action/support/master/TransportMasterNodeActionUtils.java +++ b/server/src/test/java/org/opensearch/action/support/clustermanager/TransportMasterNodeActionUtils.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.action.support.master; +package org.opensearch.action.support.clustermanager; import org.opensearch.action.ActionListener; import org.opensearch.action.ActionResponse; @@ -39,11 +39,11 @@ public class TransportMasterNodeActionUtils { /** - * Allows to directly call {@link TransportMasterNodeAction#masterOperation(MasterNodeRequest, ClusterState, ActionListener)} which is + * Allows to directly call {@link TransportClusterManagerNodeAction#masterOperation(ClusterManagerNodeRequest, ClusterState, ActionListener)} which is * a protected method. */ - public static , Response extends ActionResponse> void runClusterManagerOperation( - TransportMasterNodeAction clusterManagerNodeAction, + public static , Response extends ActionResponse> void runClusterManagerOperation( + TransportClusterManagerNodeAction clusterManagerNodeAction, Request request, ClusterState clusterState, ActionListener actionListener diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java index c3a16a1e25bc8..bfc388119c609 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java @@ -35,7 +35,7 @@ import org.opensearch.Version; import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.alias.Alias; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.MetadataIndexTemplateService.PutRequest; import org.opensearch.cluster.service.ClusterService; diff --git a/server/src/test/java/org/opensearch/cluster/metadata/TemplateUpgradeServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/TemplateUpgradeServiceTests.java index 1e52fa380793e..ed7195df367bc 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/TemplateUpgradeServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/TemplateUpgradeServiceTests.java @@ -36,7 +36,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; import org.opensearch.action.admin.indices.template.put.PutIndexTemplateRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.AdminClient; import org.opensearch.client.Client; import org.opensearch.client.IndicesAdminClient; diff --git a/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java index 8139ceec4611f..c37a610b28bc9 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/opensearch/indices/cluster/ClusterStateChanges.java @@ -55,9 +55,9 @@ import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.DestructiveOperations; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.action.support.master.MasterNodeRequest; -import org.opensearch.action.support.master.TransportMasterNodeAction; -import org.opensearch.action.support.master.TransportMasterNodeActionUtils; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.action.support.clustermanager.TransportMasterNodeActionUtils; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateTaskExecutor; import org.opensearch.cluster.ClusterStateTaskExecutor.ClusterTasksResult; @@ -463,8 +463,8 @@ private ClusterState runTasks(ClusterStateTaskExecutor executor, ClusterS } } - private , Response extends ActionResponse> ClusterState execute( - TransportMasterNodeAction masterNodeAction, + private , Response extends ActionResponse> ClusterState execute( + TransportClusterManagerNodeAction masterNodeAction, Request request, ClusterState clusterState ) { diff --git a/server/src/test/java/org/opensearch/indices/settings/InternalOrPrivateSettingsPlugin.java b/server/src/test/java/org/opensearch/indices/settings/InternalOrPrivateSettingsPlugin.java index a44dc66d6feac..775b4bb185881 100644 --- a/server/src/test/java/org/opensearch/indices/settings/InternalOrPrivateSettingsPlugin.java +++ b/server/src/test/java/org/opensearch/indices/settings/InternalOrPrivateSettingsPlugin.java @@ -38,8 +38,8 @@ import org.opensearch.action.ActionResponse; import org.opensearch.action.ActionType; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.master.MasterNodeRequest; -import org.opensearch.action.support.master.TransportMasterNodeAction; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.block.ClusterBlockException; @@ -90,7 +90,7 @@ public UpdateInternalOrPrivateAction() { super(NAME, UpdateInternalOrPrivateAction.Response::new); } - public static class Request extends MasterNodeRequest { + public static class Request extends ClusterManagerNodeRequest { private String index; private String key; @@ -139,7 +139,7 @@ public void writeTo(StreamOutput out) throws IOException {} } - public static class TransportUpdateInternalOrPrivateAction extends TransportMasterNodeAction< + public static class TransportUpdateInternalOrPrivateAction extends TransportClusterManagerNodeAction< UpdateInternalOrPrivateAction.Request, UpdateInternalOrPrivateAction.Response> { diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java index 14f9a46169fbb..5ce970e0633d2 100644 --- a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java @@ -35,7 +35,7 @@ import org.opensearch.Version; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.service.ClusterService; diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 68a6af25a7c82..4a18415751718 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -100,7 +100,7 @@ import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.TransportAction; import org.opensearch.action.support.WriteRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.action.update.UpdateHelper; import org.opensearch.client.AdminClient; import org.opensearch.client.node.NodeClient; diff --git a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java index 3594bf9f53ca4..e3569b08ee617 100644 --- a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -38,7 +38,7 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateObserver; import org.opensearch.cluster.ClusterStateUpdateTask; diff --git a/test/framework/src/main/java/org/opensearch/test/TestCluster.java b/test/framework/src/main/java/org/opensearch/test/TestCluster.java index 26081d947431d..407d9cef1f63c 100644 --- a/test/framework/src/main/java/org/opensearch/test/TestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/TestCluster.java @@ -40,7 +40,7 @@ import org.opensearch.action.admin.indices.datastream.DeleteDataStreamAction; import org.opensearch.action.admin.indices.template.get.GetIndexTemplatesResponse; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexTemplateMetadata; diff --git a/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchAssertions.java b/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchAssertions.java index 16d44d1f8eeb4..96edfdb40e531 100644 --- a/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchAssertions.java +++ b/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchAssertions.java @@ -51,8 +51,8 @@ import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.broadcast.BroadcastResponse; -import org.opensearch.action.support.master.AcknowledgedRequestBuilder; -import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.cluster.block.ClusterBlock; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.metadata.IndexMetadata; From 677dd7c889dc5a9a5caee0ba847c2978f5180e6f Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Wed, 15 Jun 2022 12:21:06 -0400 Subject: [PATCH 66/75] Fixing flakiness of ShuffleForcedMergePolicyTests (#3591) Signed-off-by: Andriy Redko --- .../opensearch/lucene/index/ShuffleForcedMergePolicyTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/test/java/org/opensearch/lucene/index/ShuffleForcedMergePolicyTests.java b/server/src/test/java/org/opensearch/lucene/index/ShuffleForcedMergePolicyTests.java index e583c12473a30..8842b7db5c6b4 100644 --- a/server/src/test/java/org/opensearch/lucene/index/ShuffleForcedMergePolicyTests.java +++ b/server/src/test/java/org/opensearch/lucene/index/ShuffleForcedMergePolicyTests.java @@ -58,7 +58,7 @@ public class ShuffleForcedMergePolicyTests extends BaseMergePolicyTestCase { public void testDiagnostics() throws IOException { try (Directory dir = newDirectory()) { - IndexWriterConfig iwc = newIndexWriterConfig(); + IndexWriterConfig iwc = newIndexWriterConfig().setMaxFullFlushMergeWaitMillis(0); MergePolicy mp = new ShuffleForcedMergePolicy(newTieredMergePolicy()); iwc.setMergePolicy(mp); boolean sorted = random().nextBoolean(); From 6c1a18a84b957d2df9ee8f913482e241cb4adb66 Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Wed, 15 Jun 2022 13:00:15 -0700 Subject: [PATCH 67/75] Deprecate classes in org.opensearch.action.support.master (#3593) Signed-off-by: Tianli Feng --- .../support/master/AcknowledgedRequest.java | 55 ++++++++++++ .../master/AcknowledgedRequestBuilder.java | 53 +++++++++++ .../support/master/AcknowledgedResponse.java | 56 ++++++++++++ .../MasterNodeOperationRequestBuilder.java | 57 ++++++++++++ ...MasterNodeReadOperationRequestBuilder.java | 58 ++++++++++++ .../support/master/MasterNodeReadRequest.java | 53 +++++++++++ .../support/master/MasterNodeRequest.java | 52 +++++++++++ .../master/ShardsAcknowledgedResponse.java | 50 +++++++++++ .../master/TransportMasterNodeAction.java | 89 ++++++++++++++++++ .../master/TransportMasterNodeReadAction.java | 90 +++++++++++++++++++ .../master/info/ClusterInfoRequest.java | 53 +++++++++++ .../info/ClusterInfoRequestBuilder.java | 53 +++++++++++ .../info/TransportClusterInfoAction.java | 62 +++++++++++++ .../support/master/info/package-info.java | 15 ++++ .../action/support/master/package-info.java | 15 ++++ 15 files changed, 811 insertions(+) create mode 100644 server/src/main/java/org/opensearch/action/support/master/AcknowledgedRequest.java create mode 100644 server/src/main/java/org/opensearch/action/support/master/AcknowledgedRequestBuilder.java create mode 100644 server/src/main/java/org/opensearch/action/support/master/AcknowledgedResponse.java create mode 100644 server/src/main/java/org/opensearch/action/support/master/MasterNodeOperationRequestBuilder.java create mode 100644 server/src/main/java/org/opensearch/action/support/master/MasterNodeReadOperationRequestBuilder.java create mode 100644 server/src/main/java/org/opensearch/action/support/master/MasterNodeReadRequest.java create mode 100644 server/src/main/java/org/opensearch/action/support/master/MasterNodeRequest.java create mode 100644 server/src/main/java/org/opensearch/action/support/master/ShardsAcknowledgedResponse.java create mode 100644 server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeAction.java create mode 100644 server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeReadAction.java create mode 100644 server/src/main/java/org/opensearch/action/support/master/info/ClusterInfoRequest.java create mode 100644 server/src/main/java/org/opensearch/action/support/master/info/ClusterInfoRequestBuilder.java create mode 100644 server/src/main/java/org/opensearch/action/support/master/info/TransportClusterInfoAction.java create mode 100644 server/src/main/java/org/opensearch/action/support/master/info/package-info.java create mode 100644 server/src/main/java/org/opensearch/action/support/master/package-info.java diff --git a/server/src/main/java/org/opensearch/action/support/master/AcknowledgedRequest.java b/server/src/main/java/org/opensearch/action/support/master/AcknowledgedRequest.java new file mode 100644 index 0000000000000..857f4dc26a111 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/master/AcknowledgedRequest.java @@ -0,0 +1,55 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.action.support.master; + +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.common.io.stream.StreamInput; + +import java.io.IOException; + +/** + * Abstract class that allows to mark action requests that support acknowledgements. + * Facilitates consistency across different api. + * + * @opensearch.internal + */ +public abstract class AcknowledgedRequest> extends + org.opensearch.action.support.clustermanager.AcknowledgedRequest { + + protected AcknowledgedRequest() { + super(); + } + + protected AcknowledgedRequest(StreamInput in) throws IOException { + super(in); + } +} diff --git a/server/src/main/java/org/opensearch/action/support/master/AcknowledgedRequestBuilder.java b/server/src/main/java/org/opensearch/action/support/master/AcknowledgedRequestBuilder.java new file mode 100644 index 0000000000000..e247734691eca --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/master/AcknowledgedRequestBuilder.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.action.support.master; + +import org.opensearch.action.ActionType; +import org.opensearch.action.support.clustermanager.AcknowledgedRequest; +import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.client.OpenSearchClient; + +/** + * Base request builder for cluster-manager node operations that support acknowledgements + * + * @opensearch.internal + */ +public abstract class AcknowledgedRequestBuilder< + Request extends AcknowledgedRequest, + Response extends AcknowledgedResponse, + RequestBuilder extends AcknowledgedRequestBuilder> extends + org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder { + + protected AcknowledgedRequestBuilder(OpenSearchClient client, ActionType action, Request request) { + super(client, action, request); + } +} diff --git a/server/src/main/java/org/opensearch/action/support/master/AcknowledgedResponse.java b/server/src/main/java/org/opensearch/action/support/master/AcknowledgedResponse.java new file mode 100644 index 0000000000000..86ae1c313a8e6 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/master/AcknowledgedResponse.java @@ -0,0 +1,56 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.action.support.master; + +import org.opensearch.common.io.stream.StreamInput; + +import java.io.IOException; + +/** + * A response that indicates that a request has been acknowledged + * + * @opensearch.internal + */ +public class AcknowledgedResponse extends org.opensearch.action.support.clustermanager.AcknowledgedResponse { + + public AcknowledgedResponse(StreamInput in) throws IOException { + super(in); + } + + public AcknowledgedResponse(StreamInput in, boolean readAcknowledged) throws IOException { + super(in, readAcknowledged); + } + + public AcknowledgedResponse(boolean acknowledged) { + super(acknowledged); + } +} diff --git a/server/src/main/java/org/opensearch/action/support/master/MasterNodeOperationRequestBuilder.java b/server/src/main/java/org/opensearch/action/support/master/MasterNodeOperationRequestBuilder.java new file mode 100644 index 0000000000000..9c96c45e11847 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/master/MasterNodeOperationRequestBuilder.java @@ -0,0 +1,57 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.action.support.master; + +import org.opensearch.action.ActionType; +import org.opensearch.action.ActionResponse; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.client.OpenSearchClient; + +/** + * Base request builder for cluster-manager node operations + * + * @opensearch.internal + * @deprecated As of 2.1, because supporting inclusive language, replaced by {@link ClusterManagerNodeOperationRequestBuilder} + */ +@Deprecated +public abstract class MasterNodeOperationRequestBuilder< + Request extends ClusterManagerNodeRequest, + Response extends ActionResponse, + RequestBuilder extends MasterNodeOperationRequestBuilder> extends + ClusterManagerNodeOperationRequestBuilder { + + protected MasterNodeOperationRequestBuilder(OpenSearchClient client, ActionType action, Request request) { + super(client, action, request); + } +} diff --git a/server/src/main/java/org/opensearch/action/support/master/MasterNodeReadOperationRequestBuilder.java b/server/src/main/java/org/opensearch/action/support/master/MasterNodeReadOperationRequestBuilder.java new file mode 100644 index 0000000000000..2ac6b2ba05b4c --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/master/MasterNodeReadOperationRequestBuilder.java @@ -0,0 +1,58 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.action.support.master; + +import org.opensearch.action.ActionType; +import org.opensearch.action.ActionResponse; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadOperationRequestBuilder; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.client.OpenSearchClient; + +/** + * Base request builder for cluster-manager node read operations that can be executed on the local node as well + * + * @opensearch.internal + * @deprecated As of 2.1, because supporting inclusive language, replaced by {@link ClusterManagerNodeReadOperationRequestBuilder} + */ +@Deprecated +public abstract class MasterNodeReadOperationRequestBuilder< + Request extends ClusterManagerNodeReadRequest, + Response extends ActionResponse, + RequestBuilder extends ClusterManagerNodeReadOperationRequestBuilder> extends + ClusterManagerNodeOperationRequestBuilder { + + protected MasterNodeReadOperationRequestBuilder(OpenSearchClient client, ActionType action, Request request) { + super(client, action, request); + } +} diff --git a/server/src/main/java/org/opensearch/action/support/master/MasterNodeReadRequest.java b/server/src/main/java/org/opensearch/action/support/master/MasterNodeReadRequest.java new file mode 100644 index 0000000000000..d8c407722ed8d --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/master/MasterNodeReadRequest.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.action.support.master; + +import org.opensearch.action.support.clustermanager.ClusterManagerNodeReadRequest; +import org.opensearch.common.io.stream.StreamInput; + +import java.io.IOException; + +/** + * Base request for cluster-manager based read operations that allows to read the cluster state from the local node if needed + * + * @opensearch.internal + * @deprecated As of 2.1, because supporting inclusive language, replaced by {@link ClusterManagerNodeReadRequest} + */ +@Deprecated +public abstract class MasterNodeReadRequest> extends ClusterManagerNodeReadRequest { + protected MasterNodeReadRequest() {} + + protected MasterNodeReadRequest(StreamInput in) throws IOException { + super(in); + } +} diff --git a/server/src/main/java/org/opensearch/action/support/master/MasterNodeRequest.java b/server/src/main/java/org/opensearch/action/support/master/MasterNodeRequest.java new file mode 100644 index 0000000000000..ca72e29913326 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/master/MasterNodeRequest.java @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.action.support.master; + +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.common.io.stream.StreamInput; + +import java.io.IOException; + +/** + * A based request for cluster-manager based operation. + * + * @opensearch.internal + * @deprecated As of 2.1, because supporting inclusive language, replaced by {@link ClusterManagerNodeRequest} + */ +@Deprecated +public abstract class MasterNodeRequest> extends ClusterManagerNodeRequest { + + protected MasterNodeRequest(StreamInput in) throws IOException { + super(in); + } +} diff --git a/server/src/main/java/org/opensearch/action/support/master/ShardsAcknowledgedResponse.java b/server/src/main/java/org/opensearch/action/support/master/ShardsAcknowledgedResponse.java new file mode 100644 index 0000000000000..ac22c0d4eb542 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/master/ShardsAcknowledgedResponse.java @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.action.support.master; + +import org.opensearch.common.io.stream.StreamInput; + +import java.io.IOException; + +/** + * Transport response for shard acknowledgements + * + * @opensearch.internal + */ +public abstract class ShardsAcknowledgedResponse extends org.opensearch.action.support.clustermanager.ShardsAcknowledgedResponse { + + protected ShardsAcknowledgedResponse(StreamInput in, boolean readShardsAcknowledged) throws IOException { + super(in, readShardsAcknowledged); + } + +} diff --git a/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeAction.java b/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeAction.java new file mode 100644 index 0000000000000..5805baad0946b --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeAction.java @@ -0,0 +1,89 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.action.support.master; + +import org.opensearch.action.ActionResponse; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.io.stream.Writeable; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +/** + * A base class for operations that needs to be performed on the cluster-manager node. + * + * @opensearch.internal + * @deprecated As of 2.1, because supporting inclusive language, replaced by {@link ClusterManagerNodeRequest} + */ +@Deprecated +public abstract class TransportMasterNodeAction, Response extends ActionResponse> extends + TransportClusterManagerNodeAction { + + protected TransportMasterNodeAction( + String actionName, + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + Writeable.Reader request, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + super(actionName, true, transportService, clusterService, threadPool, actionFilters, request, indexNameExpressionResolver); + } + + protected TransportMasterNodeAction( + String actionName, + boolean canTripCircuitBreaker, + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + Writeable.Reader request, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + super( + actionName, + canTripCircuitBreaker, + transportService, + clusterService, + threadPool, + actionFilters, + request, + indexNameExpressionResolver + ); + } + +} diff --git a/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeReadAction.java b/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeReadAction.java new file mode 100644 index 0000000000000..9b3d34ad3d931 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/master/TransportMasterNodeReadAction.java @@ -0,0 +1,90 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.action.support.master; + +import org.opensearch.action.ActionResponse; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeReadAction; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.io.stream.Writeable; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +/** + * A base class for read operations that needs to be performed on the cluster-manager node. + * Can also be executed on the local node if needed. + * + * @opensearch.internal + * @deprecated As of 2.1, because supporting inclusive language, replaced by {@link ClusterManagerNodeRequest} + */ +@Deprecated +public abstract class TransportMasterNodeReadAction, Response extends ActionResponse> extends + TransportClusterManagerNodeReadAction { + + protected TransportMasterNodeReadAction( + String actionName, + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + Writeable.Reader request, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + super(actionName, true, transportService, clusterService, threadPool, actionFilters, request, indexNameExpressionResolver); + } + + protected TransportMasterNodeReadAction( + String actionName, + boolean checkSizeLimit, + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + Writeable.Reader request, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + super( + actionName, + checkSizeLimit, + transportService, + clusterService, + threadPool, + actionFilters, + request, + indexNameExpressionResolver + ); + } + +} diff --git a/server/src/main/java/org/opensearch/action/support/master/info/ClusterInfoRequest.java b/server/src/main/java/org/opensearch/action/support/master/info/ClusterInfoRequest.java new file mode 100644 index 0000000000000..1f59fca1b4081 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/master/info/ClusterInfoRequest.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.action.support.master.info; + +import org.opensearch.common.io.stream.StreamInput; + +import java.io.IOException; + +/** + * Transport request for cluster information + * + * @opensearch.internal + */ +public abstract class ClusterInfoRequest> extends + org.opensearch.action.support.clustermanager.info.ClusterInfoRequest { + + public ClusterInfoRequest() {} + + public ClusterInfoRequest(StreamInput in) throws IOException { + super(in); + } + +} diff --git a/server/src/main/java/org/opensearch/action/support/master/info/ClusterInfoRequestBuilder.java b/server/src/main/java/org/opensearch/action/support/master/info/ClusterInfoRequestBuilder.java new file mode 100644 index 0000000000000..c13dbe296dff2 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/master/info/ClusterInfoRequestBuilder.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.action.support.master.info; + +import org.opensearch.action.ActionType; +import org.opensearch.action.ActionResponse; +import org.opensearch.action.support.clustermanager.info.ClusterInfoRequest; +import org.opensearch.client.OpenSearchClient; + +/** + * Transport request builder for cluster information + * + * @opensearch.internal + */ +public abstract class ClusterInfoRequestBuilder< + Request extends ClusterInfoRequest, + Response extends ActionResponse, + Builder extends ClusterInfoRequestBuilder> extends + org.opensearch.action.support.clustermanager.info.ClusterInfoRequestBuilder { + + protected ClusterInfoRequestBuilder(OpenSearchClient client, ActionType action, Request request) { + super(client, action, request); + } +} diff --git a/server/src/main/java/org/opensearch/action/support/master/info/TransportClusterInfoAction.java b/server/src/main/java/org/opensearch/action/support/master/info/TransportClusterInfoAction.java new file mode 100644 index 0000000000000..26d31b874f2c0 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/master/info/TransportClusterInfoAction.java @@ -0,0 +1,62 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.action.support.master.info; + +import org.opensearch.action.ActionResponse; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.io.stream.Writeable; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +/** + * Perform cluster information action + * + * @opensearch.internal + */ +public abstract class TransportClusterInfoAction, Response extends ActionResponse> extends + org.opensearch.action.support.clustermanager.info.TransportClusterInfoAction { + + public TransportClusterInfoAction( + String actionName, + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + Writeable.Reader request, + IndexNameExpressionResolver indexNameExpressionResolver + ) { + super(actionName, transportService, clusterService, threadPool, actionFilters, request, indexNameExpressionResolver); + } + +} diff --git a/server/src/main/java/org/opensearch/action/support/master/info/package-info.java b/server/src/main/java/org/opensearch/action/support/master/info/package-info.java new file mode 100644 index 0000000000000..8f21383c1b90c --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/master/info/package-info.java @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Master Node Information transport handlers. + * + * As of 2.1, because supporting inclusive language, replaced by {@link org.opensearch.action.support.clustermanager.info} + */ +@Deprecated +package org.opensearch.action.support.master.info; diff --git a/server/src/main/java/org/opensearch/action/support/master/package-info.java b/server/src/main/java/org/opensearch/action/support/master/package-info.java new file mode 100644 index 0000000000000..9e90d96986fe1 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/master/package-info.java @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * Master Node transport handlers. + * + * As of 2.1, because supporting inclusive language, replaced by {@link org.opensearch.action.support.clustermanager} + */ +@Deprecated +package org.opensearch.action.support.master; From 688e795cf771fb55b6da97edf128e9b491097003 Mon Sep 17 00:00:00 2001 From: Kunal Kotwani Date: Wed, 15 Jun 2022 15:51:01 -0700 Subject: [PATCH 68/75] Add release notes for version 2.0.1 (#3595) Signed-off-by: Kunal Kotwani --- release-notes/opensearch.release-notes-2.0.1.md | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 release-notes/opensearch.release-notes-2.0.1.md diff --git a/release-notes/opensearch.release-notes-2.0.1.md b/release-notes/opensearch.release-notes-2.0.1.md new file mode 100644 index 0000000000000..e2918d14a055c --- /dev/null +++ b/release-notes/opensearch.release-notes-2.0.1.md @@ -0,0 +1,9 @@ +## 2022-06-15 Version 2.0.1 Release Notes + +### Bug Fixes +* Fixing the Node Sniffer RestClient support for OpenSearch 2.x ([#3522](https://github.com/opensearch-project/OpenSearch/pull/3522)) +* Adding MainResponse version override cluster setting ([#3536](https://github.com/opensearch-project/OpenSearch/pull/3536)) + +### Tests +* Fixing Docker test for Ubuntu ([#3465](https://github.com/opensearch-project/OpenSearch/pull/3465)) +* [Type removal] Fixing the cluster upgrade backward compatibility test ([#3531](https://github.com/opensearch-project/OpenSearch/pull/3531)) From 94945272912259159fae6393d5d856fdf909e15b Mon Sep 17 00:00:00 2001 From: George Apaaboah <35894485+GeorgeAp@users.noreply.github.com> Date: Thu, 16 Jun 2022 21:24:52 +0200 Subject: [PATCH 69/75] Fix NPE when minBound/maxBound is not set before being called. (#3605) Signed-off-by: George Apaaboah --- .../histogram/HistogramAggregationBuilder.java | 4 ++-- .../search/aggregations/bucket/HistogramTests.java | 13 +++++++++++++ 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java index ceb7709907057..72ce0e5e831d5 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java @@ -227,12 +227,12 @@ public HistogramAggregationBuilder offset(double offset) { /** Get the current minimum bound that is set on this builder. */ public double minBound() { - return extendedBounds.getMin(); + return DoubleBounds.getEffectiveMin(extendedBounds); } /** Get the current maximum bound that is set on this builder. */ public double maxBound() { - return extendedBounds.getMax(); + return DoubleBounds.getEffectiveMax(extendedBounds); } protected DoubleBounds extendedBounds() { diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/HistogramTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/HistogramTests.java index e233bee85462c..b4ec55543f494 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/HistogramTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/HistogramTests.java @@ -103,6 +103,19 @@ public void testInvalidBounds() { assertThat(ex.getMessage(), equalTo("max bound [0.4] must be greater than min bound [0.5]")); } + /** + * Check that minBound/maxBound does not throw {@link NullPointerException} when called before being set. + */ + public void testMinBoundMaxBoundDefaultValues() { + HistogramAggregationBuilder factory = new HistogramAggregationBuilder("foo"); + + double minValue = factory.minBound(); + double maxValue = factory.maxBound(); + + assertThat(minValue, equalTo(Double.POSITIVE_INFINITY)); + assertThat(maxValue, equalTo(Double.NEGATIVE_INFINITY)); + } + private List randomOrder() { List orders = new ArrayList<>(); switch (randomInt(4)) { From 5b3a1643ac1d60b18550277c9979e9962e5ce502 Mon Sep 17 00:00:00 2001 From: "opensearch-trigger-bot[bot]" <98922864+opensearch-trigger-bot[bot]@users.noreply.github.com> Date: Thu, 16 Jun 2022 18:49:40 -0400 Subject: [PATCH 70/75] Added bwc version 2.0.2 (#3613) Co-authored-by: opensearch-ci-bot --- .ci/bwcVersions | 1 + server/src/main/java/org/opensearch/Version.java | 1 + 2 files changed, 2 insertions(+) diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 378c0f52da3ad..f2d4aef40b4d9 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -43,4 +43,5 @@ BWC_VERSION: - "1.3.4" - "2.0.0" - "2.0.1" + - "2.0.2" - "2.1.0" diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index 2cc8cde2cf0f3..1672378fb8225 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -90,6 +90,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_1_3_4 = new Version(1030499, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_2_0_0 = new Version(2000099, org.apache.lucene.util.Version.LUCENE_9_1_0); public static final Version V_2_0_1 = new Version(2000199, org.apache.lucene.util.Version.LUCENE_9_1_0); + public static final Version V_2_0_2 = new Version(2000299, org.apache.lucene.util.Version.LUCENE_9_1_0); public static final Version V_2_1_0 = new Version(2010099, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version CURRENT = V_3_0_0; From bc3dffb2098d7a11609e51aaaec056f94a9afa90 Mon Sep 17 00:00:00 2001 From: Ahmad AbuKhalil <105249973+aabukhalil@users.noreply.github.com> Date: Fri, 17 Jun 2022 11:32:10 -0700 Subject: [PATCH 71/75] Fix false positive query timeouts due to using cached time (#3454) * Fix false positive query timeouts due to using cached time Signed-off-by: Ahmad AbuKhalil * delegate nanoTime call to SearchContext Signed-off-by: Ahmad AbuKhalil * add override to SearchContext getRelativeTimeInMillis to force non cached time Signed-off-by: Ahmad AbuKhalil --- .../NanoTimeVsCurrentTimeMillisBenchmark.java | 42 +++++++++++++ .../search/internal/SearchContext.java | 16 ++++- .../opensearch/search/query/QueryPhase.java | 32 +++++++--- .../search/DefaultSearchContextTests.java | 4 ++ .../search/query/QueryPhaseTests.java | 59 +++++++++++++++++++ 5 files changed, 143 insertions(+), 10 deletions(-) create mode 100644 benchmarks/src/main/java/org/opensearch/benchmark/time/NanoTimeVsCurrentTimeMillisBenchmark.java diff --git a/benchmarks/src/main/java/org/opensearch/benchmark/time/NanoTimeVsCurrentTimeMillisBenchmark.java b/benchmarks/src/main/java/org/opensearch/benchmark/time/NanoTimeVsCurrentTimeMillisBenchmark.java new file mode 100644 index 0000000000000..4e4662a8f0427 --- /dev/null +++ b/benchmarks/src/main/java/org/opensearch/benchmark/time/NanoTimeVsCurrentTimeMillisBenchmark.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.benchmark.time; + +import org.openjdk.jmh.annotations.*; + +import java.util.concurrent.TimeUnit; + +@Fork(3) +@Warmup(iterations = 10) +@Measurement(iterations = 20) +@BenchmarkMode(Mode.AverageTime) +@OutputTimeUnit(TimeUnit.NANOSECONDS) +@State(Scope.Benchmark) +@SuppressWarnings("unused") // invoked by benchmarking framework +public class NanoTimeVsCurrentTimeMillisBenchmark { + private volatile long var = 0; + + @Benchmark + public long currentTimeMillis() { + return System.currentTimeMillis(); + } + + @Benchmark + public long nanoTime() { + return System.nanoTime(); + } + + /* + * this acts as upper bound of how time is cached in org.opensearch.threadpool.ThreadPool + * */ + @Benchmark + public long accessLongVar() { + return var++; + } +} diff --git a/server/src/main/java/org/opensearch/search/internal/SearchContext.java b/server/src/main/java/org/opensearch/search/internal/SearchContext.java index 60438014f8d53..76d0d7b72c6b4 100644 --- a/server/src/main/java/org/opensearch/search/internal/SearchContext.java +++ b/server/src/main/java/org/opensearch/search/internal/SearchContext.java @@ -388,10 +388,24 @@ public final boolean hasOnlySuggest() { /** * Returns time in milliseconds that can be used for relative time calculations. - * WARN: This is not the epoch time. + * WARN: This is not the epoch time and can be a cached time. */ public abstract long getRelativeTimeInMillis(); + /** + * Returns time in milliseconds that can be used for relative time calculations. this method will fall back to + * {@link SearchContext#getRelativeTimeInMillis()} (which might be a cached time) if useCache was set to true else it will be just be a + * wrapper of {@link System#nanoTime()} converted to milliseconds. + * @param useCache to allow using cached time if true or forcing calling {@link System#nanoTime()} if false + * @return Returns time in milliseconds that can be used for relative time calculations. + */ + public long getRelativeTimeInMillis(boolean useCache) { + if (useCache) { + return getRelativeTimeInMillis(); + } + return TimeValue.nsecToMSec(System.nanoTime()); + } + /** Return a view of the additional query collector managers that should be run for this context. */ public abstract Map, CollectorManager> queryCollectorManagers(); diff --git a/server/src/main/java/org/opensearch/search/query/QueryPhase.java b/server/src/main/java/org/opensearch/search/query/QueryPhase.java index 8de44a7448023..94e1524cccb7d 100644 --- a/server/src/main/java/org/opensearch/search/query/QueryPhase.java +++ b/server/src/main/java/org/opensearch/search/query/QueryPhase.java @@ -257,15 +257,7 @@ static boolean executeInternal(SearchContext searchContext, QueryPhaseSearcher q final Runnable timeoutRunnable; if (timeoutSet) { - final long startTime = searchContext.getRelativeTimeInMillis(); - final long timeout = searchContext.timeout().millis(); - final long maxTime = startTime + timeout; - timeoutRunnable = searcher.addQueryCancellation(() -> { - final long time = searchContext.getRelativeTimeInMillis(); - if (time > maxTime) { - throw new TimeExceededException(); - } - }); + timeoutRunnable = searcher.addQueryCancellation(createQueryTimeoutChecker(searchContext)); } else { timeoutRunnable = null; } @@ -309,6 +301,28 @@ static boolean executeInternal(SearchContext searchContext, QueryPhaseSearcher q } } + /** + * Create runnable which throws {@link TimeExceededException} when the runnable is called after timeout + runnable creation time + * exceeds currentTime + * @param searchContext to extract timeout from and to get relative time from + * @return the created runnable + */ + static Runnable createQueryTimeoutChecker(final SearchContext searchContext) { + /* for startTime, relative non-cached precise time must be used to prevent false positive timeouts. + * Using cached time for startTime will fail and produce false positive timeouts when maxTime = (startTime + timeout) falls in + * next time cache slot(s) AND time caching lifespan > passed timeout */ + final long startTime = searchContext.getRelativeTimeInMillis(false); + final long maxTime = startTime + searchContext.timeout().millis(); + return () -> { + /* As long as startTime is non cached time, using cached time here might only produce false negative timeouts within the time + * cache life span which is acceptable */ + final long time = searchContext.getRelativeTimeInMillis(); + if (time > maxTime) { + throw new TimeExceededException(); + } + }; + } + private static boolean searchWithCollector( SearchContext searchContext, ContextIndexSearcher searcher, diff --git a/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java b/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java index 3c83f899dd1b5..0ec7d090f1a04 100644 --- a/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java +++ b/server/src/test/java/org/opensearch/search/DefaultSearchContextTests.java @@ -368,6 +368,10 @@ protected Engine.Searcher acquireSearcherInternal(String source) { ParsedQuery parsedQuery = ParsedQuery.parsedMatchAllQuery(); context3.sliceBuilder(null).parsedQuery(parsedQuery).preProcess(false); assertEquals(context3.query(), context3.buildFilteredQuery(parsedQuery.query())); + // make sure getPreciseRelativeTimeInMillis is same as System.nanoTime() + long timeToleranceInMs = 10; + long currTime = TimeValue.nsecToMSec(System.nanoTime()); + assertTrue(Math.abs(context3.getRelativeTimeInMillis(false) - currTime) <= timeToleranceInMs); when(queryShardContext.getIndexSettings()).thenReturn(indexSettings); when(queryShardContext.fieldMapper(anyString())).thenReturn(mock(MappedFieldType.class)); diff --git a/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java b/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java index 2234c8a980923..9b2edccff82ee 100644 --- a/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java +++ b/server/src/test/java/org/opensearch/search/query/QueryPhaseTests.java @@ -85,6 +85,7 @@ import org.apache.lucene.util.FixedBitSet; import org.opensearch.action.search.SearchShardTask; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; import org.opensearch.index.mapper.DateFieldMapper; import org.opensearch.index.mapper.MappedFieldType; import org.opensearch.index.mapper.MapperService; @@ -105,6 +106,7 @@ import org.opensearch.search.sort.SortAndFormats; import org.opensearch.tasks.TaskCancelledException; import org.opensearch.test.TestSearchContext; +import org.opensearch.threadpool.ThreadPool; import java.io.IOException; import java.util.ArrayList; @@ -117,9 +119,14 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.atLeastOnce; +import static org.mockito.Mockito.verifyNoMoreInteractions; import static org.opensearch.search.query.TopDocsCollectorContext.hasInfMaxScore; public class QueryPhaseTests extends IndexShardTestCase { @@ -1079,6 +1086,58 @@ public void testCancellationDuringPreprocess() throws IOException { } } + public void testQueryTimeoutChecker() throws Exception { + long timeCacheLifespan = ThreadPool.ESTIMATED_TIME_INTERVAL_SETTING.get(Settings.EMPTY).millis(); + long timeTolerance = timeCacheLifespan / 20; + + // should throw time exceed exception for sure after timeCacheLifespan*2+timeTolerance (next's next cached time is available) + assertThrows( + QueryPhase.TimeExceededException.class, + () -> createTimeoutCheckerThenWaitThenRun(timeCacheLifespan, timeCacheLifespan * 2 + timeTolerance, true, false) + ); + + // should not throw time exceed exception after timeCacheLifespan+timeTolerance because new cached time - init time < timeout + createTimeoutCheckerThenWaitThenRun(timeCacheLifespan, timeCacheLifespan + timeTolerance, true, false); + + // should not throw time exceed exception after timeout < timeCacheLifespan when cached time didn't change + createTimeoutCheckerThenWaitThenRun(timeCacheLifespan / 2, timeCacheLifespan / 2 + timeTolerance, false, true); + createTimeoutCheckerThenWaitThenRun(timeCacheLifespan / 4, timeCacheLifespan / 2 + timeTolerance, false, true); + } + + private void createTimeoutCheckerThenWaitThenRun( + long timeout, + long sleepAfterCreation, + boolean checkCachedTimeChanged, + boolean checkCachedTimeHasNotChanged + ) throws Exception { + long timeCacheLifespan = ThreadPool.ESTIMATED_TIME_INTERVAL_SETTING.get(Settings.EMPTY).millis(); + long timeTolerance = timeCacheLifespan / 20; + long currentTimeDiffWithCachedTime = TimeValue.nsecToMSec(System.nanoTime()) - threadPool.relativeTimeInMillis(); + // need to run this test approximately at the start of cached time window + long timeToAlignTimeWithCachedTimeOffset = timeCacheLifespan - currentTimeDiffWithCachedTime + timeTolerance; + Thread.sleep(timeToAlignTimeWithCachedTimeOffset); + + long initialRelativeCachedTime = threadPool.relativeTimeInMillis(); + SearchContext mockedSearchContext = mock(SearchContext.class); + when(mockedSearchContext.timeout()).thenReturn(TimeValue.timeValueMillis(timeout)); + when(mockedSearchContext.getRelativeTimeInMillis()).thenAnswer(invocation -> threadPool.relativeTimeInMillis()); + when(mockedSearchContext.getRelativeTimeInMillis(eq(false))).thenCallRealMethod(); + Runnable queryTimeoutChecker = QueryPhase.createQueryTimeoutChecker(mockedSearchContext); + // make sure next time slot become available + Thread.sleep(sleepAfterCreation); + if (checkCachedTimeChanged) { + assertNotEquals(initialRelativeCachedTime, threadPool.relativeTimeInMillis()); + } + if (checkCachedTimeHasNotChanged) { + assertEquals(initialRelativeCachedTime, threadPool.relativeTimeInMillis()); + } + queryTimeoutChecker.run(); + verify(mockedSearchContext, times(1)).timeout(); + verify(mockedSearchContext, times(1)).getRelativeTimeInMillis(eq(false)); + verify(mockedSearchContext, atLeastOnce()).getRelativeTimeInMillis(); + verifyNoMoreInteractions(mockedSearchContext); + } + private static class TestSearchContextWithRewriteAndCancellation extends TestSearchContext { private TestSearchContextWithRewriteAndCancellation( From 75b86b9a0389d932fea5422c25ccba377b505585 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Fri, 17 Jun 2022 13:51:30 -0700 Subject: [PATCH 72/75] Fix random gradle check failure issue 3584. (#3627) --- .../engine/NRTReplicationEngineTests.java | 20 ++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java index 6aa00bb9312dd..d3496fcb5d13a 100644 --- a/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java @@ -110,8 +110,9 @@ public void testUpdateSegments() throws Exception { engine.refresh("test"); - nrtEngine.updateSegments(engine.getLatestSegmentInfos(), engine.getProcessedLocalCheckpoint()); - assertMatchingSegmentsAndCheckpoints(nrtEngine); + final SegmentInfos latestPrimaryInfos = engine.getLatestSegmentInfos(); + nrtEngine.updateSegments(latestPrimaryInfos, engine.getProcessedLocalCheckpoint()); + assertMatchingSegmentsAndCheckpoints(nrtEngine, latestPrimaryInfos); // assert a doc from the operations exists. final ParsedDocument parsedDoc = createParsedDoc(operations.stream().findFirst().get().id(), null); @@ -139,8 +140,9 @@ public void testUpdateSegments() throws Exception { ); } - nrtEngine.updateSegments(engine.getLastCommittedSegmentInfos(), engine.getProcessedLocalCheckpoint()); - assertMatchingSegmentsAndCheckpoints(nrtEngine); + final SegmentInfos primaryInfos = engine.getLastCommittedSegmentInfos(); + nrtEngine.updateSegments(primaryInfos, engine.getProcessedLocalCheckpoint()); + assertMatchingSegmentsAndCheckpoints(nrtEngine, primaryInfos); assertEquals( nrtEngine.getTranslog().getGeneration().translogFileGeneration, @@ -196,14 +198,14 @@ public void testTrimTranslogOps() throws Exception { } } - private void assertMatchingSegmentsAndCheckpoints(NRTReplicationEngine nrtEngine) throws IOException { + private void assertMatchingSegmentsAndCheckpoints(NRTReplicationEngine nrtEngine, SegmentInfos expectedSegmentInfos) + throws IOException { assertEquals(engine.getPersistedLocalCheckpoint(), nrtEngine.getPersistedLocalCheckpoint()); assertEquals(engine.getProcessedLocalCheckpoint(), nrtEngine.getProcessedLocalCheckpoint()); assertEquals(engine.getLocalCheckpointTracker().getMaxSeqNo(), nrtEngine.getLocalCheckpointTracker().getMaxSeqNo()); - assertEquals(engine.getLatestSegmentInfos().files(true), nrtEngine.getLatestSegmentInfos().files(true)); - assertEquals(engine.getLatestSegmentInfos().getUserData(), nrtEngine.getLatestSegmentInfos().getUserData()); - assertEquals(engine.getLatestSegmentInfos().getVersion(), nrtEngine.getLatestSegmentInfos().getVersion()); - assertEquals(engine.segments(true), nrtEngine.segments(true)); + assertEquals(expectedSegmentInfos.files(true), nrtEngine.getLatestSegmentInfos().files(true)); + assertEquals(expectedSegmentInfos.getUserData(), nrtEngine.getLatestSegmentInfos().getUserData()); + assertEquals(expectedSegmentInfos.getVersion(), nrtEngine.getLatestSegmentInfos().getVersion()); } private void assertSearcherHits(Engine engine, int hits) { From c3f4730b1dceb91da0cf47f7a62fe9c8b245359b Mon Sep 17 00:00:00 2001 From: Bharathwaj G Date: Mon, 20 Jun 2022 13:23:29 +0530 Subject: [PATCH 73/75] Re-adding code which was removed Signed-off-by: Bharathwaj G --- .../action/search/TransportSearchAction.java | 76 +++++++++++++++++++ 1 file changed, 76 insertions(+) diff --git a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java index ebb0f21d6fe16..1ca477942cdf6 100644 --- a/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/opensearch/action/search/TransportSearchAction.java @@ -65,6 +65,7 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.AtomicArray; import org.opensearch.common.util.concurrent.CountDown; import org.opensearch.index.Index; import org.opensearch.index.query.Rewriteable; @@ -297,6 +298,81 @@ void executeOnShardTarget( ); } + public void executeRequest( + Task task, + SearchRequest searchRequest, + String actionName, + boolean includeSearchContext, + SinglePhaseSearchAction phaseSearchAction, + ActionListener listener + ) { + executeRequest(task, searchRequest, new SearchAsyncActionProvider() { + @Override + public AbstractSearchAsyncAction asyncSearchAction( + SearchTask task, + SearchRequest searchRequest, + Executor executor, + GroupShardsIterator shardsIts, + SearchTimeProvider timeProvider, + BiFunction connectionLookup, + ClusterState clusterState, + Map aliasFilter, + Map concreteIndexBoosts, + Map> indexRoutings, + ActionListener listener, + boolean preFilter, + ThreadPool threadPool, + SearchResponse.Clusters clusters + ) { + return new AbstractSearchAsyncAction( + actionName, + logger, + searchTransportService, + connectionLookup, + aliasFilter, + concreteIndexBoosts, + indexRoutings, + executor, + searchRequest, + listener, + shardsIts, + timeProvider, + clusterState, + task, + new ArraySearchPhaseResults<>(shardsIts.size()), + searchRequest.getMaxConcurrentShardRequests(), + clusters + ) { + @Override + protected void executePhaseOnShard( + SearchShardIterator shardIt, + SearchShardTarget shard, + SearchActionListener listener + ) { + final Transport.Connection connection = getConnection(shard.getClusterAlias(), shard.getNodeId()); + phaseSearchAction.executeOnShardTarget(task, shard, connection, listener); + } + + @Override + protected SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context) { + return new SearchPhase(getName()) { + @Override + public void run() { + final AtomicArray atomicArray = results.getAtomicArray(); + sendSearchResponse(InternalSearchResponse.empty(), atomicArray); + } + }; + } + + @Override + boolean buildPointInTimeFromSearchResults() { + return includeSearchContext; + } + }; + } + }, listener); + } + private void executeRequest( Task task, SearchRequest searchRequest, From 93f8db717f7e9729df0f7c54217db1378a24cc6b Mon Sep 17 00:00:00 2001 From: Bharathwaj G Date: Fri, 24 Jun 2022 12:18:01 +0530 Subject: [PATCH 74/75] Addressing comments - adding tests Signed-off-by: Bharathwaj G --- .../rest-api-spec/api/delete_pit.json | 3 +- .../rest-api-spec/test/pit/10_basic.yml | 129 ++++++++++++++++++ .../action/search/DeletePitInfo.java | 1 + .../action/search/DeletePitResponse.java | 4 +- .../search/DeletePitResponseTests.java | 67 +++++++++ .../opensearch/search/SearchServiceTests.java | 12 +- 6 files changed, 211 insertions(+), 5 deletions(-) create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/pit/10_basic.yml create mode 100644 server/src/test/java/org/opensearch/search/DeletePitResponseTests.java diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_pit.json b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_pit.json index d674eb80e4722..b54d9f76204f4 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/delete_pit.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/delete_pit.json @@ -16,7 +16,8 @@ ] }, "body":{ - "description":"A comma-separated list of pit IDs to clear" + "description":"A comma-separated list of pit IDs to clear", + "required":true } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/pit/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/pit/10_basic.yml new file mode 100644 index 0000000000000..89d0345d03fb5 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/pit/10_basic.yml @@ -0,0 +1,129 @@ +"Create PIT, Search with PIT ID and Delete": + - do: + indices.create: + index: test_pit + - do: + index: + index: test_pit + id: 42 + body: { foo: 1 } + + - do: + index: + index: test_pit + id: 43 + body: { foo: 2 } + + - do: + indices.refresh: {} + + - do: + create_pit: + allow_partial_pit_creation: true + index: test_pit + keep_alive: 23h + + - set: {id: pit_id} + - match: { _shards.total: 1} + - match: { _shards.successful: 1} + - match: { _shards.failed: 0} + - do: + search: + rest_total_hits_as_int: true + size: 1 + sort: foo + body: + query: + match_all: {} + pit: {"id": "$pit_id"} + + - match: {hits.total: 2 } + - length: {hits.hits: 1 } + - match: {hits.hits.0._id: "42" } + + - do: + index: + index: test_pit + id: 44 + body: { foo: 3 } + + - do: + indices.refresh: {} + + - do: + search: + rest_total_hits_as_int: true + size: 1 + sort: foo + body: + query: + match_all: {} + pit: {"id": "$pit_id", "keep_alive":"10m"} + + - match: {hits.total: 2 } + - length: {hits.hits: 1 } + - match: {hits.hits.0._id: "42" } + + + - do: + search: + rest_total_hits_as_int: true + index: test_pit + size: 1 + sort: foo + body: + query: + match_all: {} + + - match: {hits.total: 3 } + - length: {hits.hits: 1 } + + - do: + delete_pit: + body: + "pit_id": [$pit_id] + + - match: {pits.0.pitId: $pit_id} + - match: {pits.0.succeeded: true } + +--- +"Delete all": + - do: + indices.create: + index: test_pit + - do: + index: + index: test_pit + id: 42 + body: { foo: 1 } + + - do: + index: + index: test_pit + id: 43 + body: { foo: 2 } + + - do: + indices.refresh: {} + + - do: + create_pit: + allow_partial_pit_creation: true + index: test_pit + keep_alive: 23h + + - set: {id: pit_id} + - match: { _shards.total: 1} + - match: { _shards.successful: 1} + - match: { _shards.failed: 0} + + - do: + delete_all_pits: {} + + - match: {pits.0.pitId: $pit_id} + - match: {pits.0.succeeded: true } + + - do: + delete_all_pits: { } + + - match: { pits: []} diff --git a/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java b/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java index 25dd48b208a48..d825a35c956fe 100644 --- a/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java +++ b/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java @@ -79,4 +79,5 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.endObject(); return builder; } + } diff --git a/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java b/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java index cc377d99e6d2e..fab16196e5c21 100644 --- a/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java +++ b/server/src/main/java/org/opensearch/action/search/DeletePitResponse.java @@ -58,9 +58,7 @@ public List getDeletePitResults() { @Override public RestStatus status() { - for (DeletePitInfo deletePitResult : deletePitResults) { - if (!deletePitResult.isSucceeded()) return NOT_FOUND; - } + if(deletePitResults.isEmpty()) return NOT_FOUND; return OK; } diff --git a/server/src/test/java/org/opensearch/search/DeletePitResponseTests.java b/server/src/test/java/org/opensearch/search/DeletePitResponseTests.java new file mode 100644 index 0000000000000..1ff956f4b1d9d --- /dev/null +++ b/server/src/test/java/org/opensearch/search/DeletePitResponseTests.java @@ -0,0 +1,67 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search; + +import org.opensearch.action.search.DeletePitInfo; +import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.xcontent.ToXContent; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertToXContentEquivalent; + +public class DeletePitResponseTests extends OpenSearchTestCase { + + public void testDeletePitResponseToXContent() throws IOException { + DeletePitInfo deletePitInfo = new DeletePitInfo(true, "pitId"); + List deletePitInfoList = new ArrayList<>(); + deletePitInfoList.add(deletePitInfo); + DeletePitResponse deletePitResponse = new DeletePitResponse(deletePitInfoList); + + try (XContentBuilder builder = JsonXContent.contentBuilder()) { + deletePitResponse.toXContent(builder, ToXContent.EMPTY_PARAMS); + } + assertEquals(true, deletePitResponse.getDeletePitResults().get(0).getPitId().equals("pitId")); + assertEquals(true, deletePitResponse.getDeletePitResults().get(0).isSucceeded()); + } + + public void testDeletePitResponseToAndFromXContent() throws IOException { + XContentType xContentType = randomFrom(XContentType.values()); + DeletePitResponse originalResponse = createDeletePitResponseTestItem(); + ; + BytesReference originalBytes = toShuffledXContent(originalResponse, xContentType, ToXContent.EMPTY_PARAMS, randomBoolean()); + DeletePitResponse parsedResponse; + try (XContentParser parser = createParser(xContentType.xContent(), originalBytes)) { + parsedResponse = DeletePitResponse.fromXContent(parser); + } + assertEquals( + originalResponse.getDeletePitResults().get(0).isSucceeded(), + parsedResponse.getDeletePitResults().get(0).isSucceeded() + ); + assertEquals(originalResponse.getDeletePitResults().get(0).getPitId(), parsedResponse.getDeletePitResults().get(0).getPitId()); + BytesReference parsedBytes = XContentHelper.toXContent(parsedResponse, xContentType, randomBoolean()); + assertToXContentEquivalent(originalBytes, parsedBytes, xContentType); + } + + private static DeletePitResponse createDeletePitResponseTestItem() { + DeletePitInfo deletePitInfo = new DeletePitInfo(randomBoolean(), "pitId"); + List deletePitInfoList = new ArrayList<>(); + deletePitInfoList.add(deletePitInfo); + return new DeletePitResponse(deletePitInfoList); + } +} diff --git a/server/src/test/java/org/opensearch/search/SearchServiceTests.java b/server/src/test/java/org/opensearch/search/SearchServiceTests.java index 111bd24bf480d..b7a5b45259308 100644 --- a/server/src/test/java/org/opensearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/opensearch/search/SearchServiceTests.java @@ -40,7 +40,17 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.OriginalIndices; import org.opensearch.action.index.IndexResponse; -import org.opensearch.action.search.*; +import org.opensearch.action.search.ClearScrollRequest; +import org.opensearch.action.search.DeletePitResponse; +import org.opensearch.action.search.PitSearchContextIdForNode; +import org.opensearch.action.search.SearchContextIdForNode; +import org.opensearch.action.search.SearchPhaseExecutionException; +import org.opensearch.action.search.SearchRequest; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.action.search.SearchShardTask; +import org.opensearch.action.search.SearchType; +import org.opensearch.action.search.UpdatePitContextRequest; +import org.opensearch.action.search.UpdatePitContextResponse; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.WriteRequest; From 20e5216349a9bb265df2d3e4f4958aafe37be714 Mon Sep 17 00:00:00 2001 From: Bharathwaj G Date: Mon, 27 Jun 2022 11:43:58 +0530 Subject: [PATCH 75/75] resolving conflicts Signed-off-by: Bharathwaj G --- .../org/opensearch/client/ClusterClient.java | 2 +- .../org/opensearch/client/IndicesClient.java | 2 +- .../org/opensearch/client/IngestClient.java | 2 +- .../client/RestHighLevelClient.java | 2 +- .../org/opensearch/client/SnapshotClient.java | 2 +- .../client/indices/CloseIndexResponse.java | 2 +- .../client/indices/CreateIndexResponse.java | 2 +- .../indices/rollover/RolloverResponse.java | 2 +- .../opensearch/client/ClusterClientIT.java | 2 +- .../client/ClusterRequestConvertersTests.java | 2 +- .../opensearch/client/IndicesClientIT.java | 2 +- .../client/IndicesRequestConvertersTests.java | 2 +- .../org/opensearch/client/IngestClientIT.java | 2 +- .../client/IngestRequestConvertersTests.java | 2 +- .../org/opensearch/client/SnapshotIT.java | 2 +- .../SnapshotRequestConvertersTests.java | 2 +- .../core/AcknowledgedResponseTests.java | 8 +- .../ClusterClientDocumentationIT.java | 2 +- .../IndicesClientDocumentationIT.java | 2 +- .../IngestClientDocumentationIT.java | 2 +- .../SnapshotClientDocumentationIT.java | 2 +- .../StoredScriptsDocumentationIT.java | 2 +- .../indices/CloseIndexResponseTests.java | 4 +- .../url/URLSnapshotRestoreIT.java | 2 +- .../index/mapper/size/SizeMappingIT.java | 2 +- .../AzureStorageCleanupThirdPartyTests.java | 2 +- .../GoogleCloudStorageThirdPartyTests.java | 2 +- .../hdfs/HdfsRepositoryTests.java | 2 +- .../repositories/hdfs/HdfsTests.java | 2 +- .../aws-java-sdk-sts-1.11.749.jar.sha1 | 1 - .../s3/S3RepositoryThirdPartyTests.java | 2 +- .../admin/indices/create/CreateIndexIT.java | 2 +- .../datastream/DataStreamTestCase.java | 2 +- .../action/bulk/BulkIntegrationIT.java | 2 +- .../opensearch/aliases/IndexAliasesIT.java | 2 +- .../org/opensearch/blocks/SimpleBlocksIT.java | 2 +- .../coordination/RareClusterStateIT.java | 2 +- .../cluster/shards/ClusterShardLimitIT.java | 2 +- .../index/seqno/RetentionLeaseIT.java | 2 +- .../indices/IndicesOptionsIntegrationIT.java | 2 +- .../mapping/UpdateMappingIntegrationIT.java | 2 +- .../state/CloseWhileRelocatingShardsIT.java | 2 +- .../indices/state/OpenCloseIndexIT.java | 2 +- .../org/opensearch/ingest/IngestClientIT.java | 2 +- ...gestProcessorNotInstalledOnAllNodesIT.java | 2 +- .../suggest/CompletionSuggestSearchIT.java | 2 +- .../opensearch/snapshots/CloneSnapshotIT.java | 2 +- .../snapshots/ConcurrentSnapshotsIT.java | 2 +- .../DedicatedClusterSnapshotRestoreIT.java | 2 +- .../opensearch/snapshots/RepositoriesIT.java | 2 +- .../cleanup/CleanupRepositoryRequest.java | 2 +- .../delete/DeleteRepositoryAction.java | 2 +- .../delete/DeleteRepositoryRequest.java | 2 +- .../DeleteRepositoryRequestBuilder.java | 4 +- .../repositories/put/PutRepositoryAction.java | 2 +- .../put/PutRepositoryRequest.java | 2 +- .../put/PutRepositoryRequestBuilder.java | 4 +- .../verify/VerifyRepositoryRequest.java | 2 +- .../reroute/ClusterRerouteRequest.java | 2 +- .../reroute/ClusterRerouteRequestBuilder.java | 2 +- .../reroute/ClusterRerouteResponse.java | 2 +- .../ClusterUpdateSettingsRequest.java | 2 +- .../ClusterUpdateSettingsRequestBuilder.java | 2 +- .../ClusterUpdateSettingsResponse.java | 2 +- .../snapshots/clone/CloneSnapshotAction.java | 2 +- .../delete/DeleteSnapshotAction.java | 2 +- .../DeleteStoredScriptAction.java | 2 +- .../DeleteStoredScriptRequest.java | 2 +- .../DeleteStoredScriptRequestBuilder.java | 4 +- .../storedscripts/PutStoredScriptAction.java | 2 +- .../storedscripts/PutStoredScriptRequest.java | 2 +- .../PutStoredScriptRequestBuilder.java | 4 +- .../indices/alias/IndicesAliasesAction.java | 2 +- .../indices/alias/IndicesAliasesRequest.java | 2 +- .../alias/IndicesAliasesRequestBuilder.java | 4 +- .../indices/close/CloseIndexRequest.java | 2 +- .../close/CloseIndexRequestBuilder.java | 2 +- .../indices/close/CloseIndexResponse.java | 2 +- .../indices/create/CreateIndexRequest.java | 2 +- .../create/CreateIndexRequestBuilder.java | 2 +- .../indices/create/CreateIndexResponse.java | 2 +- .../delete/DeleteDanglingIndexAction.java | 2 +- .../delete/DeleteDanglingIndexRequest.java | 2 +- .../ImportDanglingIndexAction.java | 2 +- .../ImportDanglingIndexRequest.java | 2 +- .../TransportImportDanglingIndexAction.java | 2 +- .../indices/delete/DeleteIndexAction.java | 2 +- .../indices/delete/DeleteIndexRequest.java | 2 +- .../delete/DeleteIndexRequestBuilder.java | 4 +- .../mapping/put/AutoPutMappingAction.java | 2 +- .../indices/mapping/put/PutMappingAction.java | 2 +- .../mapping/put/PutMappingRequest.java | 4 +- .../mapping/put/PutMappingRequestBuilder.java | 4 +- .../admin/indices/open/OpenIndexRequest.java | 2 +- .../indices/open/OpenIndexRequestBuilder.java | 2 +- .../admin/indices/open/OpenIndexResponse.java | 2 +- .../readonly/AddIndexBlockRequest.java | 2 +- .../readonly/AddIndexBlockRequestBuilder.java | 2 +- .../readonly/AddIndexBlockResponse.java | 2 +- .../indices/rollover/RolloverRequest.java | 2 +- .../indices/rollover/RolloverResponse.java | 2 +- .../settings/put/UpdateSettingsAction.java | 2 +- .../settings/put/UpdateSettingsRequest.java | 2 +- .../put/UpdateSettingsRequestBuilder.java | 4 +- .../admin/indices/shrink/ResizeRequest.java | 2 +- .../indices/shrink/ResizeRequestBuilder.java | 2 +- .../delete/DeleteIndexTemplateAction.java | 2 +- .../template/put/PutIndexTemplateAction.java | 2 +- .../upgrade/post/UpgradeSettingsAction.java | 2 +- .../upgrade/post/UpgradeSettingsRequest.java | 2 +- .../action/ingest/DeletePipelineAction.java | 2 +- .../action/ingest/DeletePipelineRequest.java | 2 +- .../ingest/DeletePipelineRequestBuilder.java | 2 +- .../action/ingest/PutPipelineAction.java | 2 +- .../action/ingest/PutPipelineRequest.java | 2 +- .../ingest/PutPipelineRequestBuilder.java | 2 +- .../clustermanager/AcknowledgedRequest.java | 105 ------------ .../AcknowledgedRequestBuilder.java | 73 --------- .../clustermanager/AcknowledgedResponse.java | 149 ------------------ .../ShardsAcknowledgedResponse.java | 117 -------------- .../support/master/AcknowledgedRequest.java | 62 +++++++- .../master/AcknowledgedRequestBuilder.java | 34 +++- .../support/master/AcknowledgedResponse.java | 99 +++++++++++- .../master/ShardsAcknowledgedResponse.java | 71 ++++++++- .../opensearch/client/ClusterAdminClient.java | 2 +- .../opensearch/client/IndicesAdminClient.java | 2 +- .../client/support/AbstractClient.java | 2 +- .../MetadataCreateDataStreamService.java | 2 +- .../metadata/TemplateUpgradeService.java | 2 +- .../org/opensearch/ingest/IngestService.java | 2 +- .../RestDeleteDanglingIndexAction.java | 2 +- .../RestImportDanglingIndexAction.java | 2 +- .../org/opensearch/script/ScriptService.java | 2 +- .../MetadataIndexTemplateServiceTests.java | 2 +- .../metadata/TemplateUpgradeServiceTests.java | 2 +- .../blobstore/BlobStoreRepositoryTests.java | 2 +- .../snapshots/SnapshotResiliencyTests.java | 2 +- .../AbstractSnapshotIntegTestCase.java | 2 +- .../java/org/opensearch/test/TestCluster.java | 2 +- .../test/hamcrest/OpenSearchAssertions.java | 4 +- 140 files changed, 393 insertions(+), 608 deletions(-) delete mode 100644 plugins/repository-s3/licenses/aws-java-sdk-sts-1.11.749.jar.sha1 delete mode 100644 server/src/main/java/org/opensearch/action/support/clustermanager/AcknowledgedRequest.java delete mode 100644 server/src/main/java/org/opensearch/action/support/clustermanager/AcknowledgedRequestBuilder.java delete mode 100644 server/src/main/java/org/opensearch/action/support/clustermanager/AcknowledgedResponse.java delete mode 100644 server/src/main/java/org/opensearch/action/support/clustermanager/ShardsAcknowledgedResponse.java diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/ClusterClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/ClusterClient.java index 1c943ec24411a..10cfec9497862 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/ClusterClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/ClusterClient.java @@ -39,7 +39,7 @@ import org.opensearch.action.admin.cluster.settings.ClusterGetSettingsResponse; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.cluster.RemoteInfoRequest; import org.opensearch.client.cluster.RemoteInfoResponse; import org.opensearch.client.indices.ComponentTemplatesExistRequest; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/IndicesClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/IndicesClient.java index 2a1d471e73eb5..9b4586ec6bf89 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/IndicesClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/IndicesClient.java @@ -52,7 +52,7 @@ import org.opensearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; import org.opensearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.opensearch.action.admin.indices.validate.query.ValidateQueryResponse; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.indices.AnalyzeRequest; import org.opensearch.client.indices.AnalyzeResponse; import org.opensearch.client.indices.CloseIndexRequest; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/IngestClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/IngestClient.java index 512d0eb09ed84..cd304019e771c 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/IngestClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/IngestClient.java @@ -39,7 +39,7 @@ import org.opensearch.action.ingest.PutPipelineRequest; import org.opensearch.action.ingest.SimulatePipelineRequest; import org.opensearch.action.ingest.SimulatePipelineResponse; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import java.io.IOException; import java.util.Collections; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java index 765cdcc75c03e..0c4d41b595b5c 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java @@ -68,7 +68,7 @@ import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.search.SearchScrollRequest; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.update.UpdateRequest; import org.opensearch.action.update.UpdateResponse; import org.opensearch.client.core.CountRequest; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/SnapshotClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/SnapshotClient.java index 78c140dc8f4d4..85a793dec24ce 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/SnapshotClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/SnapshotClient.java @@ -51,7 +51,7 @@ import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequest; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import java.io.IOException; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/CloseIndexResponse.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/CloseIndexResponse.java index 3740f4f3fc5ab..817d1c08532c6 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/CloseIndexResponse.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/CloseIndexResponse.java @@ -33,7 +33,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.action.support.DefaultShardOperationFailedException; -import org.opensearch.action.support.clustermanager.ShardsAcknowledgedResponse; +import org.opensearch.action.support.master.ShardsAcknowledgedResponse; import org.opensearch.common.Nullable; import org.opensearch.common.ParseField; import org.opensearch.common.xcontent.ConstructingObjectParser; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/CreateIndexResponse.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/CreateIndexResponse.java index b7a94eb5ea8b8..7e1ea2894961d 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/CreateIndexResponse.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/CreateIndexResponse.java @@ -32,7 +32,7 @@ package org.opensearch.client.indices; -import org.opensearch.action.support.clustermanager.ShardsAcknowledgedResponse; +import org.opensearch.action.support.master.ShardsAcknowledgedResponse; import org.opensearch.common.ParseField; import org.opensearch.common.xcontent.ConstructingObjectParser; import org.opensearch.common.xcontent.ObjectParser; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/rollover/RolloverResponse.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/rollover/RolloverResponse.java index 415f3dbec249f..0303dba2535e7 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/rollover/RolloverResponse.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/rollover/RolloverResponse.java @@ -32,7 +32,7 @@ package org.opensearch.client.indices.rollover; -import org.opensearch.action.support.clustermanager.ShardsAcknowledgedResponse; +import org.opensearch.action.support.master.ShardsAcknowledgedResponse; import org.opensearch.common.ParseField; import org.opensearch.common.xcontent.ConstructingObjectParser; import org.opensearch.common.xcontent.XContentParser; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/ClusterClientIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/ClusterClientIT.java index 40059af46774f..71b869fb59e7b 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/ClusterClientIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/ClusterClientIT.java @@ -41,7 +41,7 @@ import org.opensearch.action.admin.cluster.settings.ClusterGetSettingsResponse; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.cluster.RemoteConnectionInfo; import org.opensearch.client.cluster.RemoteInfoRequest; import org.opensearch.client.cluster.RemoteInfoResponse; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java index 557783727f748..27adc18fd37b8 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java @@ -38,7 +38,7 @@ import org.opensearch.action.admin.cluster.settings.ClusterGetSettingsRequest; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.support.ActiveShardCount; -import org.opensearch.action.support.clustermanager.AcknowledgedRequest; +import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.client.cluster.RemoteInfoRequest; import org.opensearch.cluster.health.ClusterHealthStatus; import org.opensearch.common.Priority; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java index aa7af5a9d1250..f9c8851f8839e 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java @@ -65,7 +65,7 @@ import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.WriteRequest; import org.opensearch.action.support.broadcast.BroadcastResponse; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.indices.AnalyzeRequest; import org.opensearch.client.indices.AnalyzeResponse; import org.opensearch.client.indices.CloseIndexRequest; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java index a277e65d2ac33..bf6d6c922fdd7 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java @@ -53,7 +53,7 @@ import org.opensearch.action.admin.indices.shrink.ResizeType; import org.opensearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; import org.opensearch.action.admin.indices.validate.query.ValidateQueryRequest; -import org.opensearch.action.support.clustermanager.AcknowledgedRequest; +import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.client.indices.AnalyzeRequest; import org.opensearch.client.indices.CloseIndexRequest; import org.opensearch.client.indices.CreateDataStreamRequest; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/IngestClientIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/IngestClientIT.java index e85ddc21b8fda..78a3202f35892 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/IngestClientIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/IngestClientIT.java @@ -41,7 +41,7 @@ import org.opensearch.action.ingest.SimulateDocumentVerboseResult; import org.opensearch.action.ingest.SimulatePipelineRequest; import org.opensearch.action.ingest.SimulatePipelineResponse; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentType; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/IngestRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/IngestRequestConvertersTests.java index c65fa95c5e92a..200069ade1ea2 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/IngestRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/IngestRequestConvertersTests.java @@ -40,7 +40,7 @@ import org.opensearch.action.ingest.GetPipelineRequest; import org.opensearch.action.ingest.PutPipelineRequest; import org.opensearch.action.ingest.SimulatePipelineRequest; -import org.opensearch.action.support.clustermanager.AcknowledgedRequest; +import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.xcontent.XContentType; import org.opensearch.test.OpenSearchTestCase; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotIT.java index 94af656676f25..1d93dae5b2c5b 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotIT.java @@ -51,7 +51,7 @@ import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequest; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentType; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotRequestConvertersTests.java index b3d1a5c1ce2fc..10baaa2e53dd4 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotRequestConvertersTests.java @@ -45,7 +45,7 @@ import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequest; -import org.opensearch.action.support.clustermanager.AcknowledgedRequest; +import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.ByteSizeUnit; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/core/AcknowledgedResponseTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/core/AcknowledgedResponseTests.java index 30ca5865532a3..a517e37dc8764 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/core/AcknowledgedResponseTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/core/AcknowledgedResponseTests.java @@ -40,12 +40,12 @@ import static org.hamcrest.Matchers.is; public class AcknowledgedResponseTests extends AbstractResponseTestCase< - org.opensearch.action.support.clustermanager.AcknowledgedResponse, + org.opensearch.action.support.master.AcknowledgedResponse, AcknowledgedResponse> { @Override - protected org.opensearch.action.support.clustermanager.AcknowledgedResponse createServerTestInstance(XContentType xContentType) { - return new org.opensearch.action.support.clustermanager.AcknowledgedResponse(randomBoolean()); + protected org.opensearch.action.support.master.AcknowledgedResponse createServerTestInstance(XContentType xContentType) { + return new org.opensearch.action.support.master.AcknowledgedResponse(randomBoolean()); } @Override @@ -55,7 +55,7 @@ protected AcknowledgedResponse doParseToClientInstance(XContentParser parser) th @Override protected void assertInstances( - org.opensearch.action.support.clustermanager.AcknowledgedResponse serverTestInstance, + org.opensearch.action.support.master.AcknowledgedResponse serverTestInstance, AcknowledgedResponse clientInstance ) { assertThat(clientInstance.isAcknowledged(), is(serverTestInstance.isAcknowledged())); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/ClusterClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/ClusterClientDocumentationIT.java index ae85c6750d9f2..baebd12e22a99 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/ClusterClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/ClusterClientDocumentationIT.java @@ -41,7 +41,7 @@ import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.opensearch.action.support.ActiveShardCount; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchRestHighLevelClientTestCase; import org.opensearch.client.RequestOptions; import org.opensearch.client.RestHighLevelClient; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IndicesClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IndicesClientDocumentationIT.java index 197612ebae63b..85c5d622f6f60 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IndicesClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IndicesClientDocumentationIT.java @@ -63,7 +63,7 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchRestHighLevelClientTestCase; import org.opensearch.client.GetAliasesResponse; import org.opensearch.client.RequestOptions; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IngestClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IngestClientDocumentationIT.java index 9ec8860fc9d8d..5654791347832 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IngestClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/IngestClientDocumentationIT.java @@ -44,7 +44,7 @@ import org.opensearch.action.ingest.SimulatePipelineRequest; import org.opensearch.action.ingest.SimulatePipelineResponse; import org.opensearch.action.ingest.SimulateProcessorResult; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchRestHighLevelClientTestCase; import org.opensearch.client.RequestOptions; import org.opensearch.client.RestHighLevelClient; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/SnapshotClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/SnapshotClientDocumentationIT.java index bc90031f60471..c70f5dbade5d3 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/SnapshotClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/SnapshotClientDocumentationIT.java @@ -52,7 +52,7 @@ import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequest; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchRestHighLevelClientTestCase; import org.opensearch.client.Request; import org.opensearch.client.RequestOptions; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/StoredScriptsDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/StoredScriptsDocumentationIT.java index dcae326ea2783..11978a5377e1e 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/StoredScriptsDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/StoredScriptsDocumentationIT.java @@ -38,7 +38,7 @@ import org.opensearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; import org.opensearch.action.admin.cluster.storedscripts.GetStoredScriptResponse; import org.opensearch.action.admin.cluster.storedscripts.PutStoredScriptRequest; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchRestHighLevelClientTestCase; import org.opensearch.client.RequestOptions; import org.opensearch.client.RestHighLevelClient; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/indices/CloseIndexResponseTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/indices/CloseIndexResponseTests.java index a5c8086118fcd..3fa35f6fffd22 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/indices/CloseIndexResponseTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/indices/CloseIndexResponseTests.java @@ -32,8 +32,8 @@ package org.opensearch.client.indices; import org.opensearch.OpenSearchStatusException; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; -import org.opensearch.action.support.clustermanager.ShardsAcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.action.support.master.ShardsAcknowledgedResponse; import org.opensearch.client.AbstractResponseTestCase; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.xcontent.LoggingDeprecationHandler; diff --git a/modules/repository-url/src/internalClusterTest/java/org/opensearch/repositories/url/URLSnapshotRestoreIT.java b/modules/repository-url/src/internalClusterTest/java/org/opensearch/repositories/url/URLSnapshotRestoreIT.java index b819722d59f13..aa274549f3a9b 100644 --- a/modules/repository-url/src/internalClusterTest/java/org/opensearch/repositories/url/URLSnapshotRestoreIT.java +++ b/modules/repository-url/src/internalClusterTest/java/org/opensearch/repositories/url/URLSnapshotRestoreIT.java @@ -35,7 +35,7 @@ import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.ByteSizeUnit; diff --git a/plugins/mapper-size/src/internalClusterTest/java/org/opensearch/index/mapper/size/SizeMappingIT.java b/plugins/mapper-size/src/internalClusterTest/java/org/opensearch/index/mapper/size/SizeMappingIT.java index 24ec8f0eaf4c5..3a430331167f6 100644 --- a/plugins/mapper-size/src/internalClusterTest/java/org/opensearch/index/mapper/size/SizeMappingIT.java +++ b/plugins/mapper-size/src/internalClusterTest/java/org/opensearch/index/mapper/size/SizeMappingIT.java @@ -33,7 +33,7 @@ import org.opensearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.opensearch.action.get.GetResponse; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentType; import org.opensearch.plugin.mapper.MapperSizePlugin; diff --git a/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java b/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java index fe4223a5aca87..6d71a65a35a4c 100644 --- a/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java +++ b/plugins/repository-azure/src/internalClusterTest/java/org/opensearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java @@ -42,7 +42,7 @@ import org.junit.AfterClass; import org.opensearch.action.ActionRunnable; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.common.Strings; import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.MockSecureSettings; diff --git a/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java b/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java index f4979c6caaddb..f1b2f78a37380 100644 --- a/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java +++ b/plugins/repository-gcs/src/internalClusterTest/java/org/opensearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java @@ -32,7 +32,7 @@ package org.opensearch.repositories.gcs; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.common.Strings; import org.opensearch.common.settings.MockSecureSettings; import org.opensearch.common.settings.SecureSettings; diff --git a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsRepositoryTests.java b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsRepositoryTests.java index d7209e47bff11..4e12de7cce212 100644 --- a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsRepositoryTests.java +++ b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsRepositoryTests.java @@ -34,7 +34,7 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakFilters; import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryResponse; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.common.settings.MockSecureSettings; import org.opensearch.common.settings.SecureSettings; import org.opensearch.common.settings.Settings; diff --git a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsTests.java b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsTests.java index 61a990b4d5525..d46d0b2092d2a 100644 --- a/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsTests.java +++ b/plugins/repository-hdfs/src/test/java/org/opensearch/repositories/hdfs/HdfsTests.java @@ -35,7 +35,7 @@ import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.ClusterState; import org.opensearch.common.settings.Settings; diff --git a/plugins/repository-s3/licenses/aws-java-sdk-sts-1.11.749.jar.sha1 b/plugins/repository-s3/licenses/aws-java-sdk-sts-1.11.749.jar.sha1 deleted file mode 100644 index 29c9a93542058..0000000000000 --- a/plugins/repository-s3/licenses/aws-java-sdk-sts-1.11.749.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -724bd22c0ff41c496469e18f9bea12bdfb2f7540 \ No newline at end of file diff --git a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java index 952d8214cb91f..bc2839d066092 100644 --- a/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java +++ b/plugins/repository-s3/src/internalClusterTest/java/org/opensearch/repositories/s3/S3RepositoryThirdPartyTests.java @@ -31,7 +31,7 @@ package org.opensearch.repositories.s3; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.settings.MockSecureSettings; diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/CreateIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/CreateIndexIT.java index e772583697cb9..3ef2a63c7d0ac 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/CreateIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/CreateIndexIT.java @@ -40,7 +40,7 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.MappingMetadata; diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamTestCase.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamTestCase.java index 8f2bdbdcc5973..7b0d917504a2f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/datastream/DataStreamTestCase.java @@ -12,7 +12,7 @@ import org.opensearch.action.admin.indices.rollover.RolloverResponse; import org.opensearch.action.admin.indices.template.delete.DeleteComposableIndexTemplateAction; import org.opensearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.metadata.ComposableIndexTemplate; import org.opensearch.cluster.metadata.DataStream; import org.opensearch.cluster.metadata.Template; diff --git a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkIntegrationIT.java index 93f75e3918391..e2a1363f163da 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkIntegrationIT.java @@ -41,7 +41,7 @@ import org.opensearch.action.index.IndexRequest; import org.opensearch.action.index.IndexResponse; import org.opensearch.action.ingest.PutPipelineRequest; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.support.replication.ReplicationRequest; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.bytes.BytesReference; diff --git a/server/src/internalClusterTest/java/org/opensearch/aliases/IndexAliasesIT.java b/server/src/internalClusterTest/java/org/opensearch/aliases/IndexAliasesIT.java index 46a5dc421fbb6..574046509de75 100644 --- a/server/src/internalClusterTest/java/org/opensearch/aliases/IndexAliasesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/aliases/IndexAliasesIT.java @@ -42,7 +42,7 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.WriteRequest.RefreshPolicy; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.AliasMetadata; import org.opensearch.cluster.metadata.IndexAbstraction; diff --git a/server/src/internalClusterTest/java/org/opensearch/blocks/SimpleBlocksIT.java b/server/src/internalClusterTest/java/org/opensearch/blocks/SimpleBlocksIT.java index f1f5260f8f2f0..8ede3e25b2e1a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/blocks/SimpleBlocksIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/blocks/SimpleBlocksIT.java @@ -42,7 +42,7 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.index.IndexResponse; import org.opensearch.action.support.ActiveShardCount; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/RareClusterStateIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/RareClusterStateIT.java index f5273803fa716..61b186c951ce8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/RareClusterStateIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/RareClusterStateIT.java @@ -40,7 +40,7 @@ import org.opensearch.action.ActionRequestBuilder; import org.opensearch.action.ActionResponse; import org.opensearch.action.index.IndexResponse; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.block.ClusterBlocks; diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java index 1259a011147b8..a92849a077376 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/shards/ClusterShardLimitIT.java @@ -38,7 +38,7 @@ import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.Metadata; diff --git a/server/src/internalClusterTest/java/org/opensearch/index/seqno/RetentionLeaseIT.java b/server/src/internalClusterTest/java/org/opensearch/index/seqno/RetentionLeaseIT.java index df62797e1194d..ed6074b39c8a7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/seqno/RetentionLeaseIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/seqno/RetentionLeaseIT.java @@ -34,7 +34,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.support.replication.ReplicationResponse; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.ShardRouting; diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesOptionsIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesOptionsIntegrationIT.java index 2504e676acf41..1f3d865811939 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesOptionsIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesOptionsIntegrationIT.java @@ -51,7 +51,7 @@ import org.opensearch.action.search.SearchRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.common.Strings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/mapping/UpdateMappingIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/mapping/UpdateMappingIntegrationIT.java index 39e0d1d78e4ea..da3dcdc6b750e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/mapping/UpdateMappingIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/mapping/UpdateMappingIntegrationIT.java @@ -36,7 +36,7 @@ import org.opensearch.action.admin.indices.refresh.RefreshResponse; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.action.index.MappingUpdatedAction; import org.opensearch.cluster.metadata.MappingMetadata; diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseWhileRelocatingShardsIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseWhileRelocatingShardsIT.java index 11587d1232ec1..3d70622e122c0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseWhileRelocatingShardsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/state/CloseWhileRelocatingShardsIT.java @@ -33,7 +33,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.action.admin.cluster.reroute.ClusterRerouteRequest; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.IndexRoutingTable; diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/state/OpenCloseIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/state/OpenCloseIndexIT.java index df5372d65fda3..ca1e1399f8fdc 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/state/OpenCloseIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/state/OpenCloseIndexIT.java @@ -41,7 +41,7 @@ import org.opensearch.action.search.SearchResponse; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Strings; diff --git a/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java b/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java index fbfb4c3c3479d..404b13aae5b9c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/ingest/IngestClientIT.java @@ -48,7 +48,7 @@ import org.opensearch.action.ingest.SimulateDocumentBaseResult; import org.opensearch.action.ingest.SimulatePipelineRequest; import org.opensearch.action.ingest.SimulatePipelineResponse; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.update.UpdateRequest; import org.opensearch.client.Requests; import org.opensearch.common.bytes.BytesReference; diff --git a/server/src/internalClusterTest/java/org/opensearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java b/server/src/internalClusterTest/java/org/opensearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java index 585e4755a54ad..a615cceffb5df 100644 --- a/server/src/internalClusterTest/java/org/opensearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/ingest/IngestProcessorNotInstalledOnAllNodesIT.java @@ -33,7 +33,7 @@ package org.opensearch.ingest; import org.opensearch.OpenSearchParseException; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.xcontent.XContentType; import org.opensearch.node.NodeService; diff --git a/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java b/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java index 0fb856efdda1e..690564fe1cac8 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/suggest/CompletionSuggestSearchIT.java @@ -42,7 +42,7 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchPhaseExecutionException; import org.opensearch.action.search.SearchResponse; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.common.FieldMemoryStats; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.Fuzziness; diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java index 147e0e98e5b33..d5f36608941d5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java @@ -35,7 +35,7 @@ import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotIndexStatus; import org.opensearch.action.admin.cluster.snapshots.status.SnapshotStatus; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.SnapshotsInProgress; import org.opensearch.common.unit.TimeValue; diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java index 08059b49213ee..04ec3f027f908 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/ConcurrentSnapshotsIT.java @@ -43,7 +43,7 @@ import org.opensearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; import org.opensearch.action.support.GroupedActionListener; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.SnapshotDeletionsInProgress; import org.opensearch.cluster.SnapshotsInProgress; diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 29b58eab9b865..2eca8555e1388 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -48,7 +48,7 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.ClusterState; diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java index 27aeda1262db6..e72110f4c4efd 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/RepositoriesIT.java @@ -35,7 +35,7 @@ import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; import org.opensearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.metadata.RepositoriesMetadata; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java index 852ef9e2b173b..0f265681cd241 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/CleanupRepositoryRequest.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.cluster.repositories.cleanup; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.clustermanager.AcknowledgedRequest; +import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryAction.java index 5f17afe2abf76..2031e4f7a716f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.repositories.delete; import org.opensearch.action.ActionType; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; /** * Unregister repository action diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java index 2e28a3fd4f41d..a3f4bb768c649 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.repositories.delete; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.clustermanager.AcknowledgedRequest; +import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java index f2fcb0bd8857c..ffef8d5b41979 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java @@ -32,8 +32,8 @@ package org.opensearch.action.admin.cluster.repositories.delete; -import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedRequestBuilder; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; /** diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryAction.java index 9e56d1dfb3560..c2f90d869d873 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.repositories.put; import org.opensearch.action.ActionType; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; /** * Register repository action diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java index 8ab8d40936c67..1bdc8e024447d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.repositories.put; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.clustermanager.AcknowledgedRequest; +import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.settings.Settings; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java index bcf6aeceebedd..6e1b2795b6375 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java @@ -32,8 +32,8 @@ package org.opensearch.action.admin.cluster.repositories.put; -import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedRequestBuilder; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentType; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequest.java index 3cd28e9a05206..001030f6a67f5 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.repositories.verify; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.clustermanager.AcknowledgedRequest; +import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequest.java index eaa223635a432..806fa80691202 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.reroute; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.clustermanager.AcknowledgedRequest; +import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.cluster.routing.allocation.command.AllocationCommand; import org.opensearch.cluster.routing.allocation.command.AllocationCommands; import org.opensearch.common.io.stream.StreamInput; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java index 30eb0a4f36b3a..01d52cb43320d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteRequestBuilder.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.cluster.reroute; -import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; +import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.cluster.routing.allocation.command.AllocationCommand; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponse.java index 9f0609a77b1c6..dcddc98bdc43a 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponse.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.cluster.reroute; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.routing.allocation.RoutingExplanations; import org.opensearch.common.io.stream.StreamInput; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java index 50ca3ee204797..f3f7db03ac67e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.settings; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.clustermanager.AcknowledgedRequest; +import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java index 2978b27d726db..4d08c94f78b6a 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.cluster.settings; -import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; +import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentType; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java index f7a66572fb174..a4edd1d99148a 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.cluster.settings; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotAction.java index 189b6aa7b7544..c6fe102544a7e 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/clone/CloneSnapshotAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.snapshots.clone; import org.opensearch.action.ActionType; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; /** * Transport action for cloning a snapshot diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotAction.java index 60d9cadc0aede..0b98a4b31fd53 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/delete/DeleteSnapshotAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.snapshots.delete; import org.opensearch.action.ActionType; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; /** * Delete snapshot action diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptAction.java index 483004a3365c5..3645ef21d2e12 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.storedscripts; import org.opensearch.action.ActionType; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; /** * Transport action for deleting stored scripts diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequest.java index a23f2fea698fd..93d2c3ba3c452 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.storedscripts; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.clustermanager.AcknowledgedRequest; +import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java index c77ebfa85422f..34e0d429f2098 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/DeleteStoredScriptRequestBuilder.java @@ -32,8 +32,8 @@ package org.opensearch.action.admin.cluster.storedscripts; -import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedRequestBuilder; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; /** diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptAction.java index cc571c2f26136..2845d895a69e8 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.storedscripts; import org.opensearch.action.ActionType; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; /** * Transport action for putting stored script diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java index 8b9eb83bb531c..2bddf2823f962 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.cluster.storedscripts; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.clustermanager.AcknowledgedRequest; +import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java index b829cc3466f70..ef3c14df29627 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequestBuilder.java @@ -32,8 +32,8 @@ package org.opensearch.action.admin.cluster.storedscripts; -import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedRequestBuilder; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.xcontent.XContentType; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesAction.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesAction.java index 9ce10c2853ff6..4d735e984c34e 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.alias; import org.opensearch.action.ActionType; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; /** * Transport action for listing index aliases diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java index 0119b892dadf8..62f51aa3f3bff 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java @@ -37,7 +37,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.AliasesRequest; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.clustermanager.AcknowledgedRequest; +import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.cluster.metadata.AliasAction; import org.opensearch.common.ParseField; import org.opensearch.common.ParsingException; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java index ebc1fc9e9e2ce..13c57cc781925 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequestBuilder.java @@ -32,8 +32,8 @@ package org.opensearch.action.admin.indices.alias; -import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedRequestBuilder; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; import org.opensearch.index.query.QueryBuilder; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java index 529767a00af82..b16cabfda4d67 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java @@ -37,7 +37,7 @@ import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.clustermanager.AcknowledgedRequest; +import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.util.CollectionUtils; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequestBuilder.java index 15307c821178c..b3b53a0043c70 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequestBuilder.java @@ -34,7 +34,7 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; +import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; /** diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java index 4206b4e9e0926..1fc9017359a8c 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java @@ -34,7 +34,7 @@ import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.action.support.DefaultShardOperationFailedException; -import org.opensearch.action.support.clustermanager.ShardsAcknowledgedResponse; +import org.opensearch.action.support.master.ShardsAcknowledgedResponse; import org.opensearch.common.Nullable; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java index 28db8dad69084..95837d82be7ac 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java @@ -42,7 +42,7 @@ import org.opensearch.action.admin.indices.mapping.put.PutMappingRequest; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.clustermanager.AcknowledgedRequest; +import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.common.ParseField; import org.opensearch.common.Strings; import org.opensearch.common.bytes.BytesArray; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequestBuilder.java index 2de77681aa127..4c5780b87b3f2 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequestBuilder.java @@ -34,7 +34,7 @@ import org.opensearch.action.admin.indices.alias.Alias; import org.opensearch.action.support.ActiveShardCount; -import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; +import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.settings.Settings; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexResponse.java index fca1f7cce71d9..871576d8e336a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexResponse.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.indices.create; -import org.opensearch.action.support.clustermanager.ShardsAcknowledgedResponse; +import org.opensearch.action.support.master.ShardsAcknowledgedResponse; import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexAction.java index 2ccc422f2edd6..6559ef4cd89bd 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.dangling.delete; import org.opensearch.action.ActionType; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; /** * This action causes a dangling index to be considered as deleted by the cluster. diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexRequest.java index 3ded069dd6d89..4fad5498de375 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/delete/DeleteDanglingIndexRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.dangling.delete; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.clustermanager.AcknowledgedRequest; +import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexAction.java index 308720aa6139f..5f7a096b1d749 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.dangling.import_index; import org.opensearch.action.ActionType; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; /** * Represents a request to import a particular dangling index. diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexRequest.java index 0b442e33f1e21..73fbad248b8b1 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/ImportDanglingIndexRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.dangling.import_index; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.clustermanager.AcknowledgedRequest; +import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/TransportImportDanglingIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/TransportImportDanglingIndexAction.java index 1b6102cbbc2fd..2010515249371 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/TransportImportDanglingIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/import_index/TransportImportDanglingIndexAction.java @@ -50,7 +50,7 @@ import org.opensearch.action.admin.indices.dangling.find.NodeFindDanglingIndexResponse; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.HandledTransportAction; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.node.NodeClient; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.inject.Inject; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexAction.java index a3aa9e751a8ec..696c1244c7504 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.delete; import org.opensearch.action.ActionType; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; /** * Transport action for deleting an index diff --git a/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequest.java index b8100502a2e0a..7475121a910c4 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequest.java @@ -35,7 +35,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.clustermanager.AcknowledgedRequest; +import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.util.CollectionUtils; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java index a1cee63875a77..33f6342e94139 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/delete/DeleteIndexRequestBuilder.java @@ -33,8 +33,8 @@ package org.opensearch.action.admin.indices.delete; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedRequestBuilder; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; /** diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/AutoPutMappingAction.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/AutoPutMappingAction.java index 6f0cad2fe178d..f2430eb54db9b 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/AutoPutMappingAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/AutoPutMappingAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.mapping.put; import org.opensearch.action.ActionType; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; /** * Transport action to automatically put field mappings. diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingAction.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingAction.java index 9088d1241ad2a..8bca1b59ee2e2 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.mapping.put; import org.opensearch.action.ActionType; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; /** * Transport action to put field mappings. diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java index a02dd620b8661..85fd74f0762a5 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -39,8 +39,8 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.clustermanager.AcknowledgedRequest; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedRequest; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.common.Strings; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.bytes.BytesReference; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java index f0e0876dbf877..78115e1fab4ec 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequestBuilder.java @@ -33,8 +33,8 @@ package org.opensearch.action.admin.indices.mapping.put; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedRequestBuilder; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentType; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequest.java index 21c5fcd6ed1c2..c6c1c2dc8f0cb 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequest.java @@ -36,7 +36,7 @@ import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.clustermanager.AcknowledgedRequest; +import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.util.CollectionUtils; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequestBuilder.java index 2760fb43a727f..bf09c3f173491 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexRequestBuilder.java @@ -34,7 +34,7 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; +import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; /** diff --git a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexResponse.java index 38ec7226d3c68..f7bd4cf31aa17 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/open/OpenIndexResponse.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.indices.open; -import org.opensearch.action.support.clustermanager.ShardsAcknowledgedResponse; +import org.opensearch.action.support.master.ShardsAcknowledgedResponse; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.xcontent.ConstructingObjectParser; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequest.java index 7715480fcaca5..7d208b5e0ac77 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequest.java @@ -35,7 +35,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.clustermanager.AcknowledgedRequest; +import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.cluster.metadata.IndexMetadata.APIBlock; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequestBuilder.java index 66ff659c6a90a..8322ba19f433e 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockRequestBuilder.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.readonly; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; +import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.cluster.metadata.IndexMetadata.APIBlock; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockResponse.java index 22b12d195b9c3..6a07a645f9315 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/readonly/AddIndexBlockResponse.java @@ -33,7 +33,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.action.support.DefaultShardOperationFailedException; -import org.opensearch.action.support.clustermanager.ShardsAcknowledgedResponse; +import org.opensearch.action.support.master.ShardsAcknowledgedResponse; import org.opensearch.common.Nullable; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java index 3216fc9ce0b71..db5dd0af6ab2a 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverRequest.java @@ -36,7 +36,7 @@ import org.opensearch.action.admin.indices.create.CreateIndexRequest; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.clustermanager.AcknowledgedRequest; +import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java index ed08595f55cea..330d258f9461f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/rollover/RolloverResponse.java @@ -32,7 +32,7 @@ package org.opensearch.action.admin.indices.rollover; -import org.opensearch.action.support.clustermanager.ShardsAcknowledgedResponse; +import org.opensearch.action.support.master.ShardsAcknowledgedResponse; import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsAction.java index aa26acb7e3fc5..2333a2aad6bc6 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.settings.put; import org.opensearch.action.ActionType; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; /** * Action for updating index settings diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequest.java index 98f4695337dce..cab5f6bc58863 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequest.java @@ -35,7 +35,7 @@ import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.clustermanager.AcknowledgedRequest; +import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java index 459b16c2a9b7e..7501f0c7798de 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/settings/put/UpdateSettingsRequestBuilder.java @@ -33,8 +33,8 @@ package org.opensearch.action.admin.indices.settings.put; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedRequestBuilder; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.XContentType; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java index 969263df5621a..50784e60a3f19 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java @@ -38,7 +38,7 @@ import org.opensearch.action.admin.indices.create.CreateIndexRequest; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.clustermanager.AcknowledgedRequest; +import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequestBuilder.java index 0dcaf1c524df5..418e83a5431ec 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequestBuilder.java @@ -34,7 +34,7 @@ import org.opensearch.action.ActionType; import org.opensearch.action.admin.indices.create.CreateIndexRequest; import org.opensearch.action.support.ActiveShardCount; -import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; +import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.settings.Settings; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateAction.java index 5773fcf93c49e..789d03f8e8d8c 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/delete/DeleteIndexTemplateAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.template.delete; import org.opensearch.action.ActionType; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; /** * Transport action for deleting an index template diff --git a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateAction.java b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateAction.java index eb21b81350fda..06a9f6fbba409 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/template/put/PutIndexTemplateAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.template.put; import org.opensearch.action.ActionType; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; /** * An action for putting an index template into the cluster state diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeSettingsAction.java index 4c42b4abbf678..05944e781d109 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeSettingsAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.admin.indices.upgrade.post; import org.opensearch.action.ActionType; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; /** * Transport action for upgrading index settings diff --git a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeSettingsRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeSettingsRequest.java index 0fe8e83e30258..d6b784e44befb 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeSettingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/upgrade/post/UpgradeSettingsRequest.java @@ -34,7 +34,7 @@ import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.clustermanager.AcknowledgedRequest; +import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/ingest/DeletePipelineAction.java b/server/src/main/java/org/opensearch/action/ingest/DeletePipelineAction.java index 82bb78a9b89d6..6017be9747912 100644 --- a/server/src/main/java/org/opensearch/action/ingest/DeletePipelineAction.java +++ b/server/src/main/java/org/opensearch/action/ingest/DeletePipelineAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.ingest; import org.opensearch.action.ActionType; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; /** * Transport action to delete a pipeline diff --git a/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequest.java b/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequest.java index 8e770d49d6771..0bd102849eee8 100644 --- a/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequest.java +++ b/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.ingest; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.clustermanager.AcknowledgedRequest; +import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequestBuilder.java b/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequestBuilder.java index d26f0ba509ec8..6a2eb494e8d3f 100644 --- a/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/ingest/DeletePipelineRequestBuilder.java @@ -33,7 +33,7 @@ package org.opensearch.action.ingest; import org.opensearch.action.ActionRequestBuilder; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; /** diff --git a/server/src/main/java/org/opensearch/action/ingest/PutPipelineAction.java b/server/src/main/java/org/opensearch/action/ingest/PutPipelineAction.java index be47bff8f4e92..1fcbd783d246b 100644 --- a/server/src/main/java/org/opensearch/action/ingest/PutPipelineAction.java +++ b/server/src/main/java/org/opensearch/action/ingest/PutPipelineAction.java @@ -33,7 +33,7 @@ package org.opensearch.action.ingest; import org.opensearch.action.ActionType; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; /** * Transport action to put a new pipeline diff --git a/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequest.java b/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequest.java index fcba2e720e8c6..d5fbaa46810f7 100644 --- a/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequest.java +++ b/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequest.java @@ -33,7 +33,7 @@ package org.opensearch.action.ingest; import org.opensearch.action.ActionRequestValidationException; -import org.opensearch.action.support.clustermanager.AcknowledgedRequest; +import org.opensearch.action.support.master.AcknowledgedRequest; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; diff --git a/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequestBuilder.java b/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequestBuilder.java index 57c29147f1176..fec2cdef089e4 100644 --- a/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequestBuilder.java @@ -33,7 +33,7 @@ package org.opensearch.action.ingest; import org.opensearch.action.ActionRequestBuilder; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.xcontent.XContentType; diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/AcknowledgedRequest.java b/server/src/main/java/org/opensearch/action/support/clustermanager/AcknowledgedRequest.java deleted file mode 100644 index b67356d2567b5..0000000000000 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/AcknowledgedRequest.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.action.support.clustermanager; - -import org.opensearch.cluster.ack.AckedRequest; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.common.unit.TimeValue; - -import java.io.IOException; - -import static org.opensearch.common.unit.TimeValue.timeValueSeconds; - -/** - * Abstract class that allows to mark action requests that support acknowledgements. - * Facilitates consistency across different api. - * - * @opensearch.internal - */ -public abstract class AcknowledgedRequest> extends ClusterManagerNodeRequest - implements - AckedRequest { - - public static final TimeValue DEFAULT_ACK_TIMEOUT = timeValueSeconds(30); - - protected TimeValue timeout = DEFAULT_ACK_TIMEOUT; - - protected AcknowledgedRequest() {} - - protected AcknowledgedRequest(StreamInput in) throws IOException { - super(in); - this.timeout = in.readTimeValue(); - } - - /** - * Allows to set the timeout - * @param timeout timeout as a string (e.g. 1s) - * @return the request itself - */ - @SuppressWarnings("unchecked") - public final Request timeout(String timeout) { - this.timeout = TimeValue.parseTimeValue(timeout, this.timeout, getClass().getSimpleName() + ".timeout"); - return (Request) this; - } - - /** - * Allows to set the timeout - * @param timeout timeout as a {@link TimeValue} - * @return the request itself - */ - @SuppressWarnings("unchecked") - public final Request timeout(TimeValue timeout) { - this.timeout = timeout; - return (Request) this; - } - - /** - * Returns the current timeout - * @return the current timeout as a {@link TimeValue} - */ - public final TimeValue timeout() { - return timeout; - } - - @Override - public TimeValue ackTimeout() { - return timeout; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeTimeValue(timeout); - } - -} diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/AcknowledgedRequestBuilder.java b/server/src/main/java/org/opensearch/action/support/clustermanager/AcknowledgedRequestBuilder.java deleted file mode 100644 index fa957f159ec9d..0000000000000 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/AcknowledgedRequestBuilder.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.action.support.clustermanager; - -import org.opensearch.action.ActionType; -import org.opensearch.client.OpenSearchClient; -import org.opensearch.common.unit.TimeValue; - -/** - * Base request builder for cluster-manager node operations that support acknowledgements - * - * @opensearch.internal - */ -public abstract class AcknowledgedRequestBuilder< - Request extends AcknowledgedRequest, - Response extends AcknowledgedResponse, - RequestBuilder extends AcknowledgedRequestBuilder> extends ClusterManagerNodeOperationRequestBuilder< - Request, - Response, - RequestBuilder> { - - protected AcknowledgedRequestBuilder(OpenSearchClient client, ActionType action, Request request) { - super(client, action, request); - } - - /** - * Sets the maximum wait for acknowledgement from other nodes - */ - @SuppressWarnings("unchecked") - public RequestBuilder setTimeout(TimeValue timeout) { - request.timeout(timeout); - return (RequestBuilder) this; - } - - /** - * Timeout to wait for the operation to be acknowledged by current cluster nodes. Defaults - * to {@code 10s}. - */ - @SuppressWarnings("unchecked") - public RequestBuilder setTimeout(String timeout) { - request.timeout(timeout); - return (RequestBuilder) this; - } -} diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/AcknowledgedResponse.java b/server/src/main/java/org/opensearch/action/support/clustermanager/AcknowledgedResponse.java deleted file mode 100644 index 1db116ffaf74a..0000000000000 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/AcknowledgedResponse.java +++ /dev/null @@ -1,149 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.action.support.clustermanager; - -import org.opensearch.action.ActionResponse; -import org.opensearch.common.ParseField; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.common.xcontent.ConstructingObjectParser; -import org.opensearch.common.xcontent.ObjectParser; -import org.opensearch.common.xcontent.ToXContentObject; -import org.opensearch.common.xcontent.XContentBuilder; -import org.opensearch.common.xcontent.XContentParser; - -import java.io.IOException; -import java.util.Objects; - -import static org.opensearch.common.xcontent.ConstructingObjectParser.constructorArg; - -/** - * A response that indicates that a request has been acknowledged - * - * @opensearch.internal - */ -public class AcknowledgedResponse extends ActionResponse implements ToXContentObject { - - private static final ParseField ACKNOWLEDGED = new ParseField("acknowledged"); - - protected static void declareAcknowledgedField(ConstructingObjectParser objectParser) { - objectParser.declareField( - constructorArg(), - (parser, context) -> parser.booleanValue(), - ACKNOWLEDGED, - ObjectParser.ValueType.BOOLEAN - ); - } - - protected boolean acknowledged; - - public AcknowledgedResponse(StreamInput in) throws IOException { - super(in); - acknowledged = in.readBoolean(); - } - - public AcknowledgedResponse(StreamInput in, boolean readAcknowledged) throws IOException { - super(in); - if (readAcknowledged) { - acknowledged = in.readBoolean(); - } - } - - public AcknowledgedResponse(boolean acknowledged) { - this.acknowledged = acknowledged; - } - - /** - * Returns whether the response is acknowledged or not - * @return true if the response is acknowledged, false otherwise - */ - public final boolean isAcknowledged() { - return acknowledged; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeBoolean(acknowledged); - } - - @Override - public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.field(ACKNOWLEDGED.getPreferredName(), isAcknowledged()); - addCustomFields(builder, params); - builder.endObject(); - return builder; - } - - protected void addCustomFields(XContentBuilder builder, Params params) throws IOException { - - } - - /** - * A generic parser that simply parses the acknowledged flag - */ - private static final ConstructingObjectParser ACKNOWLEDGED_FLAG_PARSER = new ConstructingObjectParser<>( - "acknowledged_flag", - true, - args -> (Boolean) args[0] - ); - - static { - ACKNOWLEDGED_FLAG_PARSER.declareField( - constructorArg(), - (parser, context) -> parser.booleanValue(), - ACKNOWLEDGED, - ObjectParser.ValueType.BOOLEAN - ); - } - - public static AcknowledgedResponse fromXContent(XContentParser parser) throws IOException { - return new AcknowledgedResponse(ACKNOWLEDGED_FLAG_PARSER.apply(parser, null)); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - AcknowledgedResponse that = (AcknowledgedResponse) o; - return isAcknowledged() == that.isAcknowledged(); - } - - @Override - public int hashCode() { - return Objects.hash(isAcknowledged()); - } -} diff --git a/server/src/main/java/org/opensearch/action/support/clustermanager/ShardsAcknowledgedResponse.java b/server/src/main/java/org/opensearch/action/support/clustermanager/ShardsAcknowledgedResponse.java deleted file mode 100644 index dc24adcfa0ca1..0000000000000 --- a/server/src/main/java/org/opensearch/action/support/clustermanager/ShardsAcknowledgedResponse.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.action.support.clustermanager; - -import org.opensearch.common.ParseField; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.common.xcontent.ConstructingObjectParser; -import org.opensearch.common.xcontent.ObjectParser; -import org.opensearch.common.xcontent.XContentBuilder; - -import java.io.IOException; -import java.util.Objects; - -import static org.opensearch.common.xcontent.ConstructingObjectParser.constructorArg; - -/** - * Transport response for shard acknowledgements - * - * @opensearch.internal - */ -public abstract class ShardsAcknowledgedResponse extends AcknowledgedResponse { - - protected static final ParseField SHARDS_ACKNOWLEDGED = new ParseField("shards_acknowledged"); - - protected static void declareAcknowledgedAndShardsAcknowledgedFields( - ConstructingObjectParser objectParser - ) { - declareAcknowledgedField(objectParser); - objectParser.declareField( - constructorArg(), - (parser, context) -> parser.booleanValue(), - SHARDS_ACKNOWLEDGED, - ObjectParser.ValueType.BOOLEAN - ); - } - - private final boolean shardsAcknowledged; - - protected ShardsAcknowledgedResponse(StreamInput in, boolean readShardsAcknowledged) throws IOException { - super(in); - if (readShardsAcknowledged) { - this.shardsAcknowledged = in.readBoolean(); - } else { - this.shardsAcknowledged = false; - } - } - - protected ShardsAcknowledgedResponse(boolean acknowledged, boolean shardsAcknowledged) { - super(acknowledged); - assert acknowledged || shardsAcknowledged == false; // if it's not acknowledged, then shards acked should be false too - this.shardsAcknowledged = shardsAcknowledged; - } - - /** - * Returns true if the requisite number of shards were started before - * returning from the index creation operation. If {@link #isAcknowledged()} - * is false, then this also returns false. - */ - public boolean isShardsAcknowledged() { - return shardsAcknowledged; - } - - protected void writeShardsAcknowledged(StreamOutput out) throws IOException { - out.writeBoolean(shardsAcknowledged); - } - - @Override - protected void addCustomFields(XContentBuilder builder, Params params) throws IOException { - builder.field(SHARDS_ACKNOWLEDGED.getPreferredName(), isShardsAcknowledged()); - } - - @Override - public boolean equals(Object o) { - if (super.equals(o)) { - ShardsAcknowledgedResponse that = (ShardsAcknowledgedResponse) o; - return isShardsAcknowledged() == that.isShardsAcknowledged(); - } - return false; - } - - @Override - public int hashCode() { - return Objects.hash(super.hashCode(), isShardsAcknowledged()); - } - -} diff --git a/server/src/main/java/org/opensearch/action/support/master/AcknowledgedRequest.java b/server/src/main/java/org/opensearch/action/support/master/AcknowledgedRequest.java index 857f4dc26a111..e6d657244dfce 100644 --- a/server/src/main/java/org/opensearch/action/support/master/AcknowledgedRequest.java +++ b/server/src/main/java/org/opensearch/action/support/master/AcknowledgedRequest.java @@ -31,25 +31,75 @@ package org.opensearch.action.support.master; -import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.cluster.ack.AckedRequest; import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.unit.TimeValue; import java.io.IOException; +import static org.opensearch.common.unit.TimeValue.timeValueSeconds; + /** * Abstract class that allows to mark action requests that support acknowledgements. * Facilitates consistency across different api. * * @opensearch.internal */ -public abstract class AcknowledgedRequest> extends - org.opensearch.action.support.clustermanager.AcknowledgedRequest { +public abstract class AcknowledgedRequest> extends MasterNodeRequest + implements + AckedRequest { - protected AcknowledgedRequest() { - super(); - } + public static final TimeValue DEFAULT_ACK_TIMEOUT = timeValueSeconds(30); + + protected TimeValue timeout = DEFAULT_ACK_TIMEOUT; + + protected AcknowledgedRequest() {} protected AcknowledgedRequest(StreamInput in) throws IOException { super(in); + this.timeout = in.readTimeValue(); + } + + /** + * Allows to set the timeout + * @param timeout timeout as a string (e.g. 1s) + * @return the request itself + */ + @SuppressWarnings("unchecked") + public final Request timeout(String timeout) { + this.timeout = TimeValue.parseTimeValue(timeout, this.timeout, getClass().getSimpleName() + ".timeout"); + return (Request) this; + } + + /** + * Allows to set the timeout + * @param timeout timeout as a {@link TimeValue} + * @return the request itself + */ + @SuppressWarnings("unchecked") + public final Request timeout(TimeValue timeout) { + this.timeout = timeout; + return (Request) this; } + + /** + * Returns the current timeout + * @return the current timeout as a {@link TimeValue} + */ + public final TimeValue timeout() { + return timeout; + } + + @Override + public TimeValue ackTimeout() { + return timeout; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeTimeValue(timeout); + } + } diff --git a/server/src/main/java/org/opensearch/action/support/master/AcknowledgedRequestBuilder.java b/server/src/main/java/org/opensearch/action/support/master/AcknowledgedRequestBuilder.java index e247734691eca..bffada456b4c2 100644 --- a/server/src/main/java/org/opensearch/action/support/master/AcknowledgedRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/support/master/AcknowledgedRequestBuilder.java @@ -32,9 +32,8 @@ package org.opensearch.action.support.master; import org.opensearch.action.ActionType; -import org.opensearch.action.support.clustermanager.AcknowledgedRequest; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; import org.opensearch.client.OpenSearchClient; +import org.opensearch.common.unit.TimeValue; /** * Base request builder for cluster-manager node operations that support acknowledgements @@ -42,12 +41,33 @@ * @opensearch.internal */ public abstract class AcknowledgedRequestBuilder< - Request extends AcknowledgedRequest, - Response extends AcknowledgedResponse, - RequestBuilder extends AcknowledgedRequestBuilder> extends - org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder { + Request extends AcknowledgedRequest, + Response extends AcknowledgedResponse, + RequestBuilder extends AcknowledgedRequestBuilder> extends MasterNodeOperationRequestBuilder< + Request, + Response, + RequestBuilder> { - protected AcknowledgedRequestBuilder(OpenSearchClient client, ActionType action, Request request) { + protected AcknowledgedRequestBuilder(OpenSearchClient client, ActionType action, Request request) { super(client, action, request); } + + /** + * Sets the maximum wait for acknowledgement from other nodes + */ + @SuppressWarnings("unchecked") + public RequestBuilder setTimeout(TimeValue timeout) { + request.timeout(timeout); + return (RequestBuilder) this; + } + + /** + * Timeout to wait for the operation to be acknowledged by current cluster nodes. Defaults + * to {@code 10s}. + */ + @SuppressWarnings("unchecked") + public RequestBuilder setTimeout(String timeout) { + request.timeout(timeout); + return (RequestBuilder) this; + } } diff --git a/server/src/main/java/org/opensearch/action/support/master/AcknowledgedResponse.java b/server/src/main/java/org/opensearch/action/support/master/AcknowledgedResponse.java index 86ae1c313a8e6..05f2f8d3f291a 100644 --- a/server/src/main/java/org/opensearch/action/support/master/AcknowledgedResponse.java +++ b/server/src/main/java/org/opensearch/action/support/master/AcknowledgedResponse.java @@ -31,26 +31,119 @@ package org.opensearch.action.support.master; +import org.opensearch.action.ActionResponse; +import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.xcontent.ConstructingObjectParser; +import org.opensearch.common.xcontent.ObjectParser; +import org.opensearch.common.xcontent.ToXContentObject; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.common.xcontent.XContentParser; import java.io.IOException; +import java.util.Objects; + +import static org.opensearch.common.xcontent.ConstructingObjectParser.constructorArg; /** * A response that indicates that a request has been acknowledged * * @opensearch.internal */ -public class AcknowledgedResponse extends org.opensearch.action.support.clustermanager.AcknowledgedResponse { +public class AcknowledgedResponse extends ActionResponse implements ToXContentObject { + + private static final ParseField ACKNOWLEDGED = new ParseField("acknowledged"); + + protected static void declareAcknowledgedField(ConstructingObjectParser objectParser) { + objectParser.declareField( + constructorArg(), + (parser, context) -> parser.booleanValue(), + ACKNOWLEDGED, + ObjectParser.ValueType.BOOLEAN + ); + } + + protected boolean acknowledged; public AcknowledgedResponse(StreamInput in) throws IOException { super(in); + acknowledged = in.readBoolean(); } public AcknowledgedResponse(StreamInput in, boolean readAcknowledged) throws IOException { - super(in, readAcknowledged); + super(in); + if (readAcknowledged) { + acknowledged = in.readBoolean(); + } } public AcknowledgedResponse(boolean acknowledged) { - super(acknowledged); + this.acknowledged = acknowledged; + } + + /** + * Returns whether the response is acknowledged or not + * @return true if the response is acknowledged, false otherwise + */ + public final boolean isAcknowledged() { + return acknowledged; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeBoolean(acknowledged); + } + + @Override + public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(ACKNOWLEDGED.getPreferredName(), isAcknowledged()); + addCustomFields(builder, params); + builder.endObject(); + return builder; + } + + protected void addCustomFields(XContentBuilder builder, Params params) throws IOException { + + } + + /** + * A generic parser that simply parses the acknowledged flag + */ + private static final ConstructingObjectParser ACKNOWLEDGED_FLAG_PARSER = new ConstructingObjectParser<>( + "acknowledged_flag", + true, + args -> (Boolean) args[0] + ); + + static { + ACKNOWLEDGED_FLAG_PARSER.declareField( + constructorArg(), + (parser, context) -> parser.booleanValue(), + ACKNOWLEDGED, + ObjectParser.ValueType.BOOLEAN + ); + } + + public static AcknowledgedResponse fromXContent(XContentParser parser) throws IOException { + return new AcknowledgedResponse(ACKNOWLEDGED_FLAG_PARSER.apply(parser, null)); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + AcknowledgedResponse that = (AcknowledgedResponse) o; + return isAcknowledged() == that.isAcknowledged(); + } + + @Override + public int hashCode() { + return Objects.hash(isAcknowledged()); } } diff --git a/server/src/main/java/org/opensearch/action/support/master/ShardsAcknowledgedResponse.java b/server/src/main/java/org/opensearch/action/support/master/ShardsAcknowledgedResponse.java index ac22c0d4eb542..07819217018f8 100644 --- a/server/src/main/java/org/opensearch/action/support/master/ShardsAcknowledgedResponse.java +++ b/server/src/main/java/org/opensearch/action/support/master/ShardsAcknowledgedResponse.java @@ -32,19 +32,86 @@ package org.opensearch.action.support.master; +import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.xcontent.ConstructingObjectParser; +import org.opensearch.common.xcontent.ObjectParser; +import org.opensearch.common.xcontent.XContentBuilder; import java.io.IOException; +import java.util.Objects; + +import static org.opensearch.common.xcontent.ConstructingObjectParser.constructorArg; /** * Transport response for shard acknowledgements * * @opensearch.internal */ -public abstract class ShardsAcknowledgedResponse extends org.opensearch.action.support.clustermanager.ShardsAcknowledgedResponse { +public abstract class ShardsAcknowledgedResponse extends AcknowledgedResponse { + + protected static final ParseField SHARDS_ACKNOWLEDGED = new ParseField("shards_acknowledged"); + + protected static void declareAcknowledgedAndShardsAcknowledgedFields( + ConstructingObjectParser objectParser + ) { + declareAcknowledgedField(objectParser); + objectParser.declareField( + constructorArg(), + (parser, context) -> parser.booleanValue(), + SHARDS_ACKNOWLEDGED, + ObjectParser.ValueType.BOOLEAN + ); + } + + private final boolean shardsAcknowledged; protected ShardsAcknowledgedResponse(StreamInput in, boolean readShardsAcknowledged) throws IOException { - super(in, readShardsAcknowledged); + super(in); + if (readShardsAcknowledged) { + this.shardsAcknowledged = in.readBoolean(); + } else { + this.shardsAcknowledged = false; + } + } + + protected ShardsAcknowledgedResponse(boolean acknowledged, boolean shardsAcknowledged) { + super(acknowledged); + assert acknowledged || shardsAcknowledged == false; // if it's not acknowledged, then shards acked should be false too + this.shardsAcknowledged = shardsAcknowledged; + } + + /** + * Returns true if the requisite number of shards were started before + * returning from the index creation operation. If {@link #isAcknowledged()} + * is false, then this also returns false. + */ + public boolean isShardsAcknowledged() { + return shardsAcknowledged; + } + + protected void writeShardsAcknowledged(StreamOutput out) throws IOException { + out.writeBoolean(shardsAcknowledged); + } + + @Override + protected void addCustomFields(XContentBuilder builder, Params params) throws IOException { + builder.field(SHARDS_ACKNOWLEDGED.getPreferredName(), isShardsAcknowledged()); + } + + @Override + public boolean equals(Object o) { + if (super.equals(o)) { + ShardsAcknowledgedResponse that = (ShardsAcknowledgedResponse) o; + return isShardsAcknowledged() == that.isShardsAcknowledged(); + } + return false; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), isShardsAcknowledged()); } } diff --git a/server/src/main/java/org/opensearch/client/ClusterAdminClient.java b/server/src/main/java/org/opensearch/client/ClusterAdminClient.java index 8907de6b0bac7..f4eaa979ff18c 100644 --- a/server/src/main/java/org/opensearch/client/ClusterAdminClient.java +++ b/server/src/main/java/org/opensearch/client/ClusterAdminClient.java @@ -130,7 +130,7 @@ import org.opensearch.action.ingest.SimulatePipelineRequest; import org.opensearch.action.ingest.SimulatePipelineRequestBuilder; import org.opensearch.action.ingest.SimulatePipelineResponse; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.xcontent.XContentType; import org.opensearch.tasks.TaskId; diff --git a/server/src/main/java/org/opensearch/client/IndicesAdminClient.java b/server/src/main/java/org/opensearch/client/IndicesAdminClient.java index ede22df071821..c9cd0d0900b5a 100644 --- a/server/src/main/java/org/opensearch/client/IndicesAdminClient.java +++ b/server/src/main/java/org/opensearch/client/IndicesAdminClient.java @@ -124,7 +124,7 @@ import org.opensearch.action.admin.indices.validate.query.ValidateQueryRequest; import org.opensearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder; import org.opensearch.action.admin.indices.validate.query.ValidateQueryResponse; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.metadata.IndexMetadata.APIBlock; import org.opensearch.common.Nullable; diff --git a/server/src/main/java/org/opensearch/client/support/AbstractClient.java b/server/src/main/java/org/opensearch/client/support/AbstractClient.java index 5c1205db057b4..f99454a8a8913 100644 --- a/server/src/main/java/org/opensearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/opensearch/client/support/AbstractClient.java @@ -342,7 +342,7 @@ import org.opensearch.action.search.SearchScrollRequest; import org.opensearch.action.search.SearchScrollRequestBuilder; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.termvectors.MultiTermVectorsAction; import org.opensearch.action.termvectors.MultiTermVectorsRequest; import org.opensearch.action.termvectors.MultiTermVectorsRequestBuilder; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateDataStreamService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateDataStreamService.java index 97f198e087a93..412d4dba628cb 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateDataStreamService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateDataStreamService.java @@ -40,7 +40,7 @@ import org.opensearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.ActiveShardsObserver; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.AckedClusterStateUpdateTask; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ack.ClusterStateUpdateRequest; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/TemplateUpgradeService.java b/server/src/main/java/org/opensearch/cluster/metadata/TemplateUpgradeService.java index d3b5e721922ae..01cadf3910267 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/TemplateUpgradeService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/TemplateUpgradeService.java @@ -40,7 +40,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; import org.opensearch.action.admin.indices.template.put.PutIndexTemplateRequest; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterState; diff --git a/server/src/main/java/org/opensearch/ingest/IngestService.java b/server/src/main/java/org/opensearch/ingest/IngestService.java index ac740c304d1f9..b8256fe896da4 100644 --- a/server/src/main/java/org/opensearch/ingest/IngestService.java +++ b/server/src/main/java/org/opensearch/ingest/IngestService.java @@ -44,7 +44,7 @@ import org.opensearch.action.index.IndexRequest; import org.opensearch.action.ingest.DeletePipelineRequest; import org.opensearch.action.ingest.PutPipelineRequest; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.AckedClusterStateUpdateTask; import org.opensearch.cluster.ClusterChangedEvent; diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/dangling/RestDeleteDanglingIndexAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/dangling/RestDeleteDanglingIndexAction.java index 02aa4d110e494..0cf0b76a25e23 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/dangling/RestDeleteDanglingIndexAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/dangling/RestDeleteDanglingIndexAction.java @@ -33,7 +33,7 @@ package org.opensearch.rest.action.admin.cluster.dangling; import org.opensearch.action.admin.indices.dangling.delete.DeleteDanglingIndexRequest; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.node.NodeClient; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.rest.BaseRestHandler; diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/dangling/RestImportDanglingIndexAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/dangling/RestImportDanglingIndexAction.java index 7c25e1d14ee3a..f2405afdab834 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/dangling/RestImportDanglingIndexAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/dangling/RestImportDanglingIndexAction.java @@ -40,7 +40,7 @@ import java.util.List; import org.opensearch.action.admin.indices.dangling.import_index.ImportDanglingIndexRequest; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.node.NodeClient; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.rest.BaseRestHandler; diff --git a/server/src/main/java/org/opensearch/script/ScriptService.java b/server/src/main/java/org/opensearch/script/ScriptService.java index a643a31ed4123..303fc5ccbcf88 100644 --- a/server/src/main/java/org/opensearch/script/ScriptService.java +++ b/server/src/main/java/org/opensearch/script/ScriptService.java @@ -39,7 +39,7 @@ import org.opensearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; import org.opensearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; import org.opensearch.action.admin.cluster.storedscripts.PutStoredScriptRequest; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.AckedClusterStateUpdateTask; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterState; diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java index bfc388119c609..c3a16a1e25bc8 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexTemplateServiceTests.java @@ -35,7 +35,7 @@ import org.opensearch.Version; import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.alias.Alias; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.MetadataIndexTemplateService.PutRequest; import org.opensearch.cluster.service.ClusterService; diff --git a/server/src/test/java/org/opensearch/cluster/metadata/TemplateUpgradeServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/TemplateUpgradeServiceTests.java index ed7195df367bc..1e52fa380793e 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/TemplateUpgradeServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/TemplateUpgradeServiceTests.java @@ -36,7 +36,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.template.delete.DeleteIndexTemplateRequest; import org.opensearch.action.admin.indices.template.put.PutIndexTemplateRequest; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.AdminClient; import org.opensearch.client.Client; import org.opensearch.client.IndicesAdminClient; diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java index 5ce970e0633d2..14f9a46169fbb 100644 --- a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java @@ -35,7 +35,7 @@ import org.opensearch.Version; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.service.ClusterService; diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 4a18415751718..68a6af25a7c82 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -100,7 +100,7 @@ import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.TransportAction; import org.opensearch.action.support.WriteRequest; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.action.update.UpdateHelper; import org.opensearch.client.AdminClient; import org.opensearch.client.node.NodeClient; diff --git a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java index e3569b08ee617..3594bf9f53ca4 100644 --- a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -38,7 +38,7 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateObserver; import org.opensearch.cluster.ClusterStateUpdateTask; diff --git a/test/framework/src/main/java/org/opensearch/test/TestCluster.java b/test/framework/src/main/java/org/opensearch/test/TestCluster.java index 407d9cef1f63c..26081d947431d 100644 --- a/test/framework/src/main/java/org/opensearch/test/TestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/TestCluster.java @@ -40,7 +40,7 @@ import org.opensearch.action.admin.indices.datastream.DeleteDataStreamAction; import org.opensearch.action.admin.indices.template.get.GetIndexTemplatesResponse; import org.opensearch.action.support.IndicesOptions; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.IndexTemplateMetadata; diff --git a/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchAssertions.java b/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchAssertions.java index 96edfdb40e531..16d44d1f8eeb4 100644 --- a/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchAssertions.java +++ b/test/framework/src/main/java/org/opensearch/test/hamcrest/OpenSearchAssertions.java @@ -51,8 +51,8 @@ import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.broadcast.BroadcastResponse; -import org.opensearch.action.support.clustermanager.AcknowledgedRequestBuilder; -import org.opensearch.action.support.clustermanager.AcknowledgedResponse; +import org.opensearch.action.support.master.AcknowledgedRequestBuilder; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.block.ClusterBlock; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.metadata.IndexMetadata;