From 77074b4a81f88bab44e9e39e0b6eb9323880e740 Mon Sep 17 00:00:00 2001 From: Shourya <114977491+shourya035@users.noreply.github.com> Date: Tue, 1 Aug 2023 17:45:40 +0530 Subject: [PATCH 01/24] [Remote Store] Add Segment download stats to remotestore stats API (#8718) --------- Signed-off-by: Shourya Dutta Biswas <114977491+shourya035@users.noreply.github.com> Signed-off-by: Shourya <114977491+shourya035@users.noreply.github.com> Signed-off-by: Ashish Singh Co-authored-by: Ashish Singh --- .../RemoteStoreBackpressureIT.java | 10 +- .../remotestore/RemoteStoreStatsIT.java | 491 +++++++++++++++++- .../remotestore/stats/RemoteStoreStats.java | 172 ++++-- .../stats/RemoteStoreStatsResponse.java | 51 +- .../TransportRemoteStoreStatsAction.java | 11 +- .../RemoteRefreshSegmentPressureService.java | 49 +- ...java => RemoteSegmentTransferTracker.java} | 31 +- .../shard/RemoteStoreRefreshListener.java | 6 +- .../store/DirectoryFileTransferTracker.java | 195 +++++++ .../org/opensearch/index/store/Store.java | 54 +- .../stats/RemoteStoreStatsResponseTests.java | 114 +++- .../stats/RemoteStoreStatsTestHelper.java | 290 +++++++++-- .../stats/RemoteStoreStatsTests.java | 190 ++++++- .../TransportRemoteStoreStatsActionTests.java | 14 +- ...oteRefreshSegmentPressureServiceTests.java | 6 +- ...=> RemoteSegmentTransferTrackerTests.java} | 218 ++++++-- .../RemoteStoreRefreshListenerTests.java | 14 +- 17 files changed, 1656 insertions(+), 260 deletions(-) rename server/src/main/java/org/opensearch/index/remote/{RemoteRefreshSegmentTracker.java => RemoteSegmentTransferTracker.java} (94%) create mode 100644 server/src/main/java/org/opensearch/index/store/DirectoryFileTransferTracker.java rename server/src/test/java/org/opensearch/index/remote/{RemoteRefreshSegmentTrackerTests.java => RemoteSegmentTransferTrackerTests.java} (66%) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureIT.java index 608d7d9d02581..9641c013bf226 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBackpressureIT.java @@ -17,7 +17,7 @@ import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; -import org.opensearch.index.remote.RemoteRefreshSegmentTracker; +import org.opensearch.index.remote.RemoteSegmentTransferTracker; import org.opensearch.repositories.RepositoriesService; import org.opensearch.snapshots.mockstore.MockRepository; import org.opensearch.test.OpenSearchIntegTestCase; @@ -92,7 +92,7 @@ private void validateBackpressure( assertTrue(ex.getMessage().contains("rejected execution on primary shard")); assertTrue(ex.getMessage().contains(breachMode)); - RemoteRefreshSegmentTracker.Stats stats = stats(); + RemoteSegmentTransferTracker.Stats stats = stats(); assertTrue(stats.bytesLag > 0); assertTrue(stats.refreshTimeLagMs > 0); assertTrue(stats.localRefreshNumber - stats.remoteRefreshNumber > 0); @@ -102,7 +102,7 @@ private void validateBackpressure( .setRandomControlIOExceptionRate(0d); assertBusy(() -> { - RemoteRefreshSegmentTracker.Stats finalStats = stats(); + RemoteSegmentTransferTracker.Stats finalStats = stats(); assertEquals(0, finalStats.bytesLag); assertEquals(0, finalStats.refreshTimeLagMs); assertEquals(0, finalStats.localRefreshNumber - finalStats.remoteRefreshNumber); @@ -115,11 +115,11 @@ private void validateBackpressure( deleteRepo(); } - private RemoteRefreshSegmentTracker.Stats stats() { + private RemoteSegmentTransferTracker.Stats stats() { String shardId = "0"; RemoteStoreStatsResponse response = client().admin().cluster().prepareRemoteStoreStats(INDEX_NAME, shardId).get(); final String indexShardId = String.format(Locale.ROOT, "[%s][%s]", INDEX_NAME, shardId); - List matches = Arrays.stream(response.getShards()) + List matches = Arrays.stream(response.getRemoteStoreStats()) .filter(stat -> indexShardId.equals(stat.getStats().shardId.toString())) .collect(Collectors.toList()); assertEquals(1, matches.size()); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java index 76ef153fab963..840e3a07ed255 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java @@ -9,18 +9,32 @@ package org.opensearch.remotestore; import org.junit.Before; +import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStats; import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsRequestBuilder; import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsResponse; +import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.index.remote.RemoteRefreshSegmentTracker; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.cluster.routing.allocation.command.MoveAllocationCommand; +import org.opensearch.common.settings.Settings; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.remote.RemoteSegmentTransferTracker; import org.opensearch.test.OpenSearchIntegTestCase; +import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Locale; +import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 3) public class RemoteStoreStatsIT extends RemoteStoreBaseIntegTestCase { @@ -50,14 +64,41 @@ public void testStatsResponseFromAllNodes() { for (String node : nodes) { RemoteStoreStatsResponse response = client(node).admin().cluster().prepareRemoteStoreStats(INDEX_NAME, shardId).get(); assertTrue(response.getSuccessfulShards() > 0); - assertTrue(response.getShards() != null && response.getShards().length != 0); + assertTrue(response.getRemoteStoreStats() != null && response.getRemoteStoreStats().length != 0); final String indexShardId = String.format(Locale.ROOT, "[%s][%s]", INDEX_NAME, shardId); - List matches = Arrays.stream(response.getShards()) + List matches = Arrays.stream(response.getRemoteStoreStats()) .filter(stat -> indexShardId.equals(stat.getStats().shardId.toString())) .collect(Collectors.toList()); assertEquals(1, matches.size()); - RemoteRefreshSegmentTracker.Stats stats = matches.get(0).getStats(); - assertResponseStats(stats); + RemoteSegmentTransferTracker.Stats stats = matches.get(0).getStats(); + validateUploadStats(stats); + assertEquals(0, stats.directoryFileTransferTrackerStats.transferredBytesStarted); + } + + // Step 3 - Enable replicas on the existing indices and ensure that download + // stats are being populated as well + changeReplicaCountAndEnsureGreen(1); + for (String node : nodes) { + RemoteStoreStatsResponse response = client(node).admin().cluster().prepareRemoteStoreStats(INDEX_NAME, shardId).get(); + assertTrue(response.getSuccessfulShards() > 0); + assertTrue(response.getRemoteStoreStats() != null && response.getRemoteStoreStats().length != 0); + final String indexShardId = String.format(Locale.ROOT, "[%s][%s]", INDEX_NAME, shardId); + List matches = Arrays.stream(response.getRemoteStoreStats()) + .filter(stat -> indexShardId.equals(stat.getStats().shardId.toString())) + .collect(Collectors.toList()); + assertEquals(2, matches.size()); + for (RemoteStoreStats stat : matches) { + ShardRouting routing = stat.getShardRouting(); + validateShardRouting(routing); + RemoteSegmentTransferTracker.Stats stats = stat.getStats(); + if (routing.primary()) { + validateUploadStats(stats); + assertEquals(0, stats.directoryFileTransferTrackerStats.transferredBytesStarted); + } else { + validateDownloadStats(stats); + assertEquals(0, stats.totalUploadsStarted); + } + } } } @@ -79,10 +120,31 @@ public void testStatsResponseAllShards() { .cluster() .prepareRemoteStoreStats(INDEX_NAME, null); RemoteStoreStatsResponse response = remoteStoreStatsRequestBuilder.get(); - assertTrue(response.getSuccessfulShards() == 3); - assertTrue(response.getShards() != null && response.getShards().length == 3); - RemoteRefreshSegmentTracker.Stats stats = response.getShards()[0].getStats(); - assertResponseStats(stats); + assertEquals(3, response.getSuccessfulShards()); + assertTrue(response.getRemoteStoreStats() != null && response.getRemoteStoreStats().length == 3); + RemoteSegmentTransferTracker.Stats stats = response.getRemoteStoreStats()[0].getStats(); + validateUploadStats(stats); + assertEquals(0, stats.directoryFileTransferTrackerStats.transferredBytesStarted); + + // Step 3 - Enable replicas on the existing indices and ensure that download + // stats are being populated as well + changeReplicaCountAndEnsureGreen(1); + response = client(node).admin().cluster().prepareRemoteStoreStats(INDEX_NAME, null).get(); + assertEquals(6, response.getSuccessfulShards()); + assertTrue(response.getRemoteStoreStats() != null && response.getRemoteStoreStats().length == 6); + for (RemoteStoreStats stat : response.getRemoteStoreStats()) { + ShardRouting routing = stat.getShardRouting(); + validateShardRouting(routing); + stats = stat.getStats(); + if (routing.primary()) { + validateUploadStats(stats); + assertEquals(0, stats.directoryFileTransferTrackerStats.transferredBytesStarted); + } else { + validateDownloadStats(stats); + assertEquals(0, stats.totalUploadsStarted); + } + } + } public void testStatsResponseFromLocalNode() { @@ -105,29 +167,398 @@ public void testStatsResponseFromLocalNode() { .prepareRemoteStoreStats(INDEX_NAME, null); remoteStoreStatsRequestBuilder.setLocal(true); RemoteStoreStatsResponse response = remoteStoreStatsRequestBuilder.get(); - assertTrue(response.getSuccessfulShards() == 1); - assertTrue(response.getShards() != null && response.getShards().length == 1); - RemoteRefreshSegmentTracker.Stats stats = response.getShards()[0].getStats(); - assertResponseStats(stats); + assertEquals(1, response.getSuccessfulShards()); + assertTrue(response.getRemoteStoreStats() != null && response.getRemoteStoreStats().length == 1); + RemoteSegmentTransferTracker.Stats stats = response.getRemoteStoreStats()[0].getStats(); + validateUploadStats(stats); + assertEquals(0, stats.directoryFileTransferTrackerStats.transferredBytesStarted); + } + changeReplicaCountAndEnsureGreen(1); + for (String node : nodes) { + RemoteStoreStatsRequestBuilder remoteStoreStatsRequestBuilder = client(node).admin() + .cluster() + .prepareRemoteStoreStats(INDEX_NAME, null); + remoteStoreStatsRequestBuilder.setLocal(true); + RemoteStoreStatsResponse response = remoteStoreStatsRequestBuilder.get(); + assertTrue(response.getSuccessfulShards() > 0); + assertTrue(response.getRemoteStoreStats() != null && response.getRemoteStoreStats().length != 0); + for (RemoteStoreStats stat : response.getRemoteStoreStats()) { + ShardRouting routing = stat.getShardRouting(); + validateShardRouting(routing); + RemoteSegmentTransferTracker.Stats stats = stat.getStats(); + if (routing.primary()) { + validateUploadStats(stats); + assertEquals(0, stats.directoryFileTransferTrackerStats.transferredBytesStarted); + } else { + validateDownloadStats(stats); + assertEquals(0, stats.totalUploadsStarted); + } + } + } + } + + public void testDownloadStatsCorrectnessSinglePrimarySingleReplica() throws Exception { + // Scenario: + // - Create index with single primary and single replica shard + // - Disable Refresh Interval for the index + // - Index documents + // - Trigger refresh and flush + // - Assert that download stats == upload stats + // - Repeat this step for random times (between 5 and 10) + + // Create index with 1 pri and 1 replica and refresh interval disabled + createIndex( + INDEX_NAME, + Settings.builder().put(remoteStoreIndexSettings(1, 1)).put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1).build() + ); + ensureGreen(INDEX_NAME); + + // Manually invoke a refresh + refresh(INDEX_NAME); + + // Get zero state values + // Extract and assert zero state primary stats + RemoteStoreStatsResponse zeroStateResponse = client().admin().cluster().prepareRemoteStoreStats(INDEX_NAME, "0").get(); + RemoteSegmentTransferTracker.Stats zeroStatePrimaryStats = Arrays.stream(zeroStateResponse.getRemoteStoreStats()) + .filter(remoteStoreStats -> remoteStoreStats.getShardRouting().primary()) + .collect(Collectors.toList()) + .get(0) + .getStats(); + assertTrue( + zeroStatePrimaryStats.totalUploadsStarted == zeroStatePrimaryStats.totalUploadsSucceeded + && zeroStatePrimaryStats.totalUploadsSucceeded == 1 + ); + assertTrue( + zeroStatePrimaryStats.uploadBytesStarted == zeroStatePrimaryStats.uploadBytesSucceeded + && zeroStatePrimaryStats.uploadBytesSucceeded > 0 + ); + assertTrue(zeroStatePrimaryStats.totalUploadsFailed == 0 && zeroStatePrimaryStats.uploadBytesFailed == 0); + + // Extract and assert zero state replica stats + RemoteSegmentTransferTracker.Stats zeroStateReplicaStats = Arrays.stream(zeroStateResponse.getRemoteStoreStats()) + .filter(remoteStoreStats -> !remoteStoreStats.getShardRouting().primary()) + .collect(Collectors.toList()) + .get(0) + .getStats(); + assertTrue( + zeroStateReplicaStats.directoryFileTransferTrackerStats.transferredBytesStarted == 0 + && zeroStateReplicaStats.directoryFileTransferTrackerStats.transferredBytesSucceeded == 0 + ); + + // Index documents + for (int i = 1; i <= randomIntBetween(5, 10); i++) { + indexSingleDoc(INDEX_NAME); + // Running Flush & Refresh manually + flushAndRefresh(INDEX_NAME); + ensureGreen(INDEX_NAME); + + // Poll for RemoteStore Stats + assertBusy(() -> { + RemoteStoreStatsResponse response = client().admin().cluster().prepareRemoteStoreStats(INDEX_NAME, "0").get(); + // Iterate through the response and extract the relevant segment upload and download stats + List primaryStatsList = Arrays.stream(response.getRemoteStoreStats()) + .filter(remoteStoreStats -> remoteStoreStats.getShardRouting().primary()) + .collect(Collectors.toList()); + assertEquals(1, primaryStatsList.size()); + List replicaStatsList = Arrays.stream(response.getRemoteStoreStats()) + .filter(remoteStoreStats -> !remoteStoreStats.getShardRouting().primary()) + .collect(Collectors.toList()); + assertEquals(1, replicaStatsList.size()); + RemoteSegmentTransferTracker.Stats primaryStats = primaryStatsList.get(0).getStats(); + RemoteSegmentTransferTracker.Stats replicaStats = replicaStatsList.get(0).getStats(); + // Assert Upload syncs - zero state uploads == download syncs + assertTrue(primaryStats.totalUploadsStarted > 0); + assertTrue(primaryStats.totalUploadsSucceeded > 0); + assertTrue( + replicaStats.directoryFileTransferTrackerStats.transferredBytesStarted > 0 + && primaryStats.uploadBytesStarted + - zeroStatePrimaryStats.uploadBytesStarted == replicaStats.directoryFileTransferTrackerStats.transferredBytesStarted + ); + assertTrue( + replicaStats.directoryFileTransferTrackerStats.transferredBytesSucceeded > 0 + && primaryStats.uploadBytesSucceeded + - zeroStatePrimaryStats.uploadBytesSucceeded == replicaStats.directoryFileTransferTrackerStats.transferredBytesSucceeded + ); + // Assert zero failures + assertEquals(0, primaryStats.uploadBytesFailed); + assertEquals(0, replicaStats.directoryFileTransferTrackerStats.transferredBytesFailed); + }, 60, TimeUnit.SECONDS); + } + } + + public void testDownloadStatsCorrectnessSinglePrimaryMultipleReplicaShards() throws Exception { + // Scenario: + // - Create index with single primary and N-1 replica shards (N = no of data nodes) + // - Disable Refresh Interval for the index + // - Index documents + // - Trigger refresh and flush + // - Assert that download stats == upload stats + // - Repeat this step for random times (between 5 and 10) + + // Create index + int dataNodeCount = client().admin().cluster().prepareHealth().get().getNumberOfDataNodes(); + createIndex( + INDEX_NAME, + Settings.builder() + .put(remoteStoreIndexSettings(dataNodeCount - 1, 1)) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), -1) + .build() + ); + ensureGreen(INDEX_NAME); + + // Manually invoke a refresh + refresh(INDEX_NAME); + + // Get zero state values + // Extract and assert zero state primary stats + RemoteStoreStatsResponse zeroStateResponse = client().admin().cluster().prepareRemoteStoreStats(INDEX_NAME, "0").get(); + RemoteSegmentTransferTracker.Stats zeroStatePrimaryStats = Arrays.stream(zeroStateResponse.getRemoteStoreStats()) + .filter(remoteStoreStats -> remoteStoreStats.getShardRouting().primary()) + .collect(Collectors.toList()) + .get(0) + .getStats(); + assertTrue( + zeroStatePrimaryStats.totalUploadsStarted == zeroStatePrimaryStats.totalUploadsSucceeded + && zeroStatePrimaryStats.totalUploadsSucceeded == 1 + ); + assertTrue( + zeroStatePrimaryStats.uploadBytesStarted == zeroStatePrimaryStats.uploadBytesSucceeded + && zeroStatePrimaryStats.uploadBytesSucceeded > 0 + ); + assertTrue(zeroStatePrimaryStats.totalUploadsFailed == 0 && zeroStatePrimaryStats.uploadBytesFailed == 0); + + // Extract and assert zero state replica stats + List zeroStateReplicaStats = Arrays.stream(zeroStateResponse.getRemoteStoreStats()) + .filter(remoteStoreStats -> !remoteStoreStats.getShardRouting().primary()) + .collect(Collectors.toList()); + zeroStateReplicaStats.forEach(stats -> { + assertTrue( + stats.getStats().directoryFileTransferTrackerStats.transferredBytesStarted == 0 + && stats.getStats().directoryFileTransferTrackerStats.transferredBytesSucceeded == 0 + ); + }); + + int currentNodesInCluster = client().admin().cluster().prepareHealth().get().getNumberOfDataNodes(); + for (int i = 0; i < randomIntBetween(5, 10); i++) { + indexSingleDoc(INDEX_NAME); + // Running Flush & Refresh manually + flushAndRefresh(INDEX_NAME); + + assertBusy(() -> { + RemoteStoreStatsResponse response = client().admin().cluster().prepareRemoteStoreStats(INDEX_NAME, "0").get(); + assertEquals(currentNodesInCluster, response.getSuccessfulShards()); + long uploadsStarted = 0, uploadsSucceeded = 0, uploadsFailed = 0; + long uploadBytesStarted = 0, uploadBytesSucceeded = 0, uploadBytesFailed = 0; + List downloadBytesStarted = new ArrayList<>(), downloadBytesSucceeded = new ArrayList<>(), downloadBytesFailed = + new ArrayList<>(); + + // Assert that stats for primary shard and replica shard set are equal + for (RemoteStoreStats eachStatsObject : response.getRemoteStoreStats()) { + RemoteSegmentTransferTracker.Stats stats = eachStatsObject.getStats(); + if (eachStatsObject.getShardRouting().primary()) { + uploadBytesStarted = stats.uploadBytesStarted; + uploadBytesSucceeded = stats.uploadBytesSucceeded; + uploadBytesFailed = stats.uploadBytesFailed; + } else { + downloadBytesStarted.add(stats.directoryFileTransferTrackerStats.transferredBytesStarted); + downloadBytesSucceeded.add(stats.directoryFileTransferTrackerStats.transferredBytesSucceeded); + downloadBytesFailed.add(stats.directoryFileTransferTrackerStats.transferredBytesFailed); + } + } + + assertEquals(0, uploadsFailed); + assertEquals(0, uploadBytesFailed); + for (int j = 0; j < response.getSuccessfulShards() - 1; j++) { + assertEquals(uploadBytesStarted - zeroStatePrimaryStats.uploadBytesStarted, (long) downloadBytesStarted.get(j)); + assertEquals(uploadBytesSucceeded - zeroStatePrimaryStats.uploadBytesSucceeded, (long) downloadBytesSucceeded.get(j)); + assertEquals(0, (long) downloadBytesFailed.get(j)); + } + }, 60, TimeUnit.SECONDS); } } + public void testStatsOnShardRelocation() { + // Scenario: + // - Create index with single primary and single replica shard + // - Index documents + // - Reroute replica shard to one of the remaining nodes + // - Assert that remote store stats reflects the new node ID + + // Create index + createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 1)); + ensureGreen(INDEX_NAME); + // Index docs + indexDocs(); + + // Fetch current set of nodes in the cluster + List currentNodesInCluster = getClusterState().nodes() + .getDataNodes() + .values() + .stream() + .map(DiscoveryNode::getId) + .collect(Collectors.toList()); + DiscoveryNode[] discoveryNodesForIndex = client().admin().cluster().prepareSearchShards(INDEX_NAME).get().getNodes(); + + // Fetch nodes with shard copies of the created index + List nodeIdsWithShardCopies = new ArrayList<>(); + Arrays.stream(discoveryNodesForIndex).forEach(eachNode -> nodeIdsWithShardCopies.add(eachNode.getId())); + + // Fetch nodes which does not have any copies of the index + List nodeIdsWithoutShardCopy = currentNodesInCluster.stream() + .filter(eachNode -> !nodeIdsWithShardCopies.contains(eachNode)) + .collect(Collectors.toList()); + assertEquals(1, nodeIdsWithoutShardCopy.size()); + + // Manually reroute shard to a node which does not have any shard copy at present + ShardRouting replicaShardRouting = getClusterState().routingTable() + .index(INDEX_NAME) + .shard(0) + .assignedShards() + .stream() + .filter(shard -> !shard.primary()) + .collect(Collectors.toList()) + .get(0); + String sourceNode = replicaShardRouting.currentNodeId(); + String destinationNode = nodeIdsWithoutShardCopy.get(0); + relocateShard(0, sourceNode, destinationNode); + RemoteStoreStats[] allShardsStats = client().admin().cluster().prepareRemoteStoreStats(INDEX_NAME, "0").get().getRemoteStoreStats(); + RemoteStoreStats replicaShardStat = Arrays.stream(allShardsStats) + .filter(eachStat -> !eachStat.getShardRouting().primary()) + .collect(Collectors.toList()) + .get(0); + + // Assert that remote store stats reflect the new shard state + assertEquals(ShardRoutingState.STARTED, replicaShardStat.getShardRouting().state()); + assertEquals(destinationNode, replicaShardStat.getShardRouting().currentNodeId()); + } + + public void testStatsOnShardUnassigned() throws IOException { + // Scenario: + // - Create index with single primary and two replica shard + // - Index documents + // - Stop one data node + // - Assert: + // a. Total shard Count in the response object is equal to the previous node count + // b. Successful shard count in the response object is equal to the new node count + createIndex(INDEX_NAME, remoteStoreIndexSettings(2, 1)); + ensureGreen(INDEX_NAME); + indexDocs(); + int dataNodeCountBeforeStop = client().admin().cluster().prepareHealth().get().getNumberOfDataNodes(); + internalCluster().stopRandomDataNode(); + RemoteStoreStatsResponse response = client().admin().cluster().prepareRemoteStoreStats(INDEX_NAME, "0").get(); + int dataNodeCountAfterStop = client().admin().cluster().prepareHealth().get().getNumberOfDataNodes(); + assertEquals(dataNodeCountBeforeStop, response.getTotalShards()); + assertEquals(dataNodeCountAfterStop, response.getSuccessfulShards()); + } + + public void testStatsOnRemoteStoreRestore() throws IOException { + // Creating an index with primary shard count == total nodes in cluster and 0 replicas + int dataNodeCount = client().admin().cluster().prepareHealth().get().getNumberOfDataNodes(); + createIndex(INDEX_NAME, remoteStoreIndexSettings(0, dataNodeCount)); + ensureGreen(INDEX_NAME); + + // Index some docs to ensure segments being uploaded to remote store + indexDocs(); + refresh(INDEX_NAME); + + // Stop one data node to force the index into a red state + internalCluster().stopRandomDataNode(); + ensureRed(INDEX_NAME); + + // Start another data node to fulfil the previously launched capacity + internalCluster().startDataOnlyNode(); + + // Restore index from remote store + assertAcked(client().admin().indices().prepareClose(INDEX_NAME)); + client().admin() + .cluster() + .restoreRemoteStore(new RestoreRemoteStoreRequest().indices(INDEX_NAME).restoreAllShards(true), PlainActionFuture.newFuture()); + + // Ensure that the index is green + ensureGreen(INDEX_NAME); + + // Index some more docs to force segment uploads to remote store + indexDocs(); + + RemoteStoreStatsResponse remoteStoreStatsResponse = client().admin().cluster().prepareRemoteStoreStats(INDEX_NAME, "0").get(); + Arrays.stream(remoteStoreStatsResponse.getRemoteStoreStats()).forEach(statObject -> { + RemoteSegmentTransferTracker.Stats segmentTracker = statObject.getStats(); + // Assert that we have both upload and download stats for the index + assertTrue( + segmentTracker.totalUploadsStarted > 0 && segmentTracker.totalUploadsSucceeded > 0 && segmentTracker.totalUploadsFailed == 0 + ); + assertTrue( + segmentTracker.directoryFileTransferTrackerStats.transferredBytesStarted > 0 + && segmentTracker.directoryFileTransferTrackerStats.transferredBytesSucceeded > 0 + ); + }); + } + + public void testNonZeroPrimaryStatsOnNewlyCreatedIndexWithZeroDocs() throws Exception { + // Create an index with one primary and one replica shard + createIndex(INDEX_NAME, remoteStoreIndexSettings(1, 1)); + ensureGreen(INDEX_NAME); + refresh(INDEX_NAME); + + // Ensure that the index has 0 documents in it + assertEquals(0, client().admin().indices().prepareStats(INDEX_NAME).get().getTotal().docs.getCount()); + + // Assert that within 5 seconds the download and upload stats moves to a non-zero value + assertBusy(() -> { + RemoteStoreStats[] remoteStoreStats = client().admin() + .cluster() + .prepareRemoteStoreStats(INDEX_NAME, "0") + .get() + .getRemoteStoreStats(); + Arrays.stream(remoteStoreStats).forEach(statObject -> { + RemoteSegmentTransferTracker.Stats segmentTracker = statObject.getStats(); + if (statObject.getShardRouting().primary()) { + assertTrue( + segmentTracker.totalUploadsSucceeded == 1 + && segmentTracker.totalUploadsStarted == segmentTracker.totalUploadsSucceeded + && segmentTracker.totalUploadsFailed == 0 + ); + } else { + assertTrue( + segmentTracker.directoryFileTransferTrackerStats.transferredBytesStarted == 0 + && segmentTracker.directoryFileTransferTrackerStats.transferredBytesSucceeded == 0 + ); + } + }); + }, 5, TimeUnit.SECONDS); + } + private void indexDocs() { - // Indexing documents along with refreshes and flushes. for (int i = 0; i < randomIntBetween(5, 10); i++) { if (randomBoolean()) { flush(INDEX_NAME); } else { refresh(INDEX_NAME); } - int numberOfOperations = randomIntBetween(20, 50); + int numberOfOperations = randomIntBetween(10, 30); for (int j = 0; j < numberOfOperations; j++) { indexSingleDoc(INDEX_NAME); } } } - private void assertResponseStats(RemoteRefreshSegmentTracker.Stats stats) { + private void changeReplicaCountAndEnsureGreen(int replicaCount) { + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(SETTING_NUMBER_OF_REPLICAS, replicaCount)) + ); + ensureYellowAndNoInitializingShards(INDEX_NAME); + ensureGreen(INDEX_NAME); + } + + private void relocateShard(int shardId, String sourceNode, String destNode) { + assertAcked(client().admin().cluster().prepareReroute().add(new MoveAllocationCommand(INDEX_NAME, shardId, sourceNode, destNode))); + ensureGreen(INDEX_NAME); + } + + private void validateUploadStats(RemoteSegmentTransferTracker.Stats stats) { assertEquals(0, stats.refreshTimeLagMs); assertEquals(stats.localRefreshNumber, stats.remoteRefreshNumber); assertTrue(stats.uploadBytesStarted > 0); @@ -143,4 +574,32 @@ private void assertResponseStats(RemoteRefreshSegmentTracker.Stats stats) { assertTrue(stats.uploadBytesPerSecMovingAverage > 0); assertTrue(stats.uploadTimeMovingAverage > 0); } + + private void validateDownloadStats(RemoteSegmentTransferTracker.Stats stats) { + assertTrue(stats.directoryFileTransferTrackerStats.lastTransferTimestampMs > 0); + assertTrue(stats.directoryFileTransferTrackerStats.transferredBytesStarted > 0); + assertTrue(stats.directoryFileTransferTrackerStats.transferredBytesSucceeded > 0); + assertEquals(stats.directoryFileTransferTrackerStats.transferredBytesFailed, 0); + assertTrue(stats.directoryFileTransferTrackerStats.lastSuccessfulTransferInBytes > 0); + assertTrue(stats.directoryFileTransferTrackerStats.transferredBytesMovingAverage > 0); + assertTrue(stats.directoryFileTransferTrackerStats.transferredBytesPerSecMovingAverage > 0); + } + + // Validate if the shardRouting obtained from cluster state contains the exact same routing object + // parameters as obtained from the remote store stats API + private void validateShardRouting(ShardRouting routing) { + Stream currentRoutingTable = getClusterState().routingTable() + .getIndicesRouting() + .get(INDEX_NAME) + .shard(routing.id()) + .assignedShards() + .stream(); + assertTrue( + currentRoutingTable.anyMatch( + r -> (r.currentNodeId().equals(routing.currentNodeId()) + && r.state().equals(routing.state()) + && r.primary() == routing.primary()) + ) + ); + } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStats.java index 5ac9c1cf5f74c..6b4c9a26ab19b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStats.java @@ -11,9 +11,10 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.index.remote.RemoteRefreshSegmentTracker; +import org.opensearch.index.remote.RemoteSegmentTransferTracker; import java.io.IOException; @@ -24,72 +25,128 @@ */ public class RemoteStoreStats implements Writeable, ToXContentFragment { - private final RemoteRefreshSegmentTracker.Stats remoteSegmentUploadShardStats; + private final RemoteSegmentTransferTracker.Stats remoteSegmentShardStats; - public RemoteStoreStats(RemoteRefreshSegmentTracker.Stats remoteSegmentUploadShardStats) { - this.remoteSegmentUploadShardStats = remoteSegmentUploadShardStats; + private final ShardRouting shardRouting; + + public RemoteStoreStats(RemoteSegmentTransferTracker.Stats remoteSegmentUploadShardStats, ShardRouting shardRouting) { + this.remoteSegmentShardStats = remoteSegmentUploadShardStats; + this.shardRouting = shardRouting; } public RemoteStoreStats(StreamInput in) throws IOException { - remoteSegmentUploadShardStats = in.readOptionalWriteable(RemoteRefreshSegmentTracker.Stats::new); + this.remoteSegmentShardStats = in.readOptionalWriteable(RemoteSegmentTransferTracker.Stats::new); + this.shardRouting = new ShardRouting(in); + } + + public RemoteSegmentTransferTracker.Stats getStats() { + return remoteSegmentShardStats; } - public RemoteRefreshSegmentTracker.Stats getStats() { - return remoteSegmentUploadShardStats; + public ShardRouting getShardRouting() { + return shardRouting; } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject() - .field(Fields.SHARD_ID, remoteSegmentUploadShardStats.shardId) - .field(Fields.LOCAL_REFRESH_TIMESTAMP, remoteSegmentUploadShardStats.localRefreshClockTimeMs) - .field(Fields.REMOTE_REFRESH_TIMESTAMP, remoteSegmentUploadShardStats.remoteRefreshClockTimeMs) - .field(Fields.REFRESH_TIME_LAG_IN_MILLIS, remoteSegmentUploadShardStats.refreshTimeLagMs) - .field(Fields.REFRESH_LAG, remoteSegmentUploadShardStats.localRefreshNumber - remoteSegmentUploadShardStats.remoteRefreshNumber) - .field(Fields.BYTES_LAG, remoteSegmentUploadShardStats.bytesLag) - - .field(Fields.BACKPRESSURE_REJECTION_COUNT, remoteSegmentUploadShardStats.rejectionCount) - .field(Fields.CONSECUTIVE_FAILURE_COUNT, remoteSegmentUploadShardStats.consecutiveFailuresCount); - - builder.startObject(Fields.TOTAL_REMOTE_REFRESH); - builder.field(SubFields.STARTED, remoteSegmentUploadShardStats.totalUploadsStarted) - .field(SubFields.SUCCEEDED, remoteSegmentUploadShardStats.totalUploadsSucceeded) - .field(SubFields.FAILED, remoteSegmentUploadShardStats.totalUploadsFailed); + builder.startObject(); + buildShardRouting(builder); + builder.startObject(Fields.SEGMENT); + builder.startObject(SubFields.DOWNLOAD); + // Ensuring that we are not showing 0 metrics to the user + if (remoteSegmentShardStats.directoryFileTransferTrackerStats.transferredBytesStarted != 0) { + buildDownloadStats(builder); + } + builder.endObject(); + builder.startObject(SubFields.UPLOAD); + // Ensuring that we are not showing 0 metrics to the user + if (remoteSegmentShardStats.totalUploadsStarted != 0) { + buildUploadStats(builder); + } builder.endObject(); - - builder.startObject(Fields.TOTAL_UPLOADS_IN_BYTES); - builder.field(SubFields.STARTED, remoteSegmentUploadShardStats.uploadBytesStarted) - .field(SubFields.SUCCEEDED, remoteSegmentUploadShardStats.uploadBytesSucceeded) - .field(SubFields.FAILED, remoteSegmentUploadShardStats.uploadBytesFailed); builder.endObject(); + return builder.endObject(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalWriteable(remoteSegmentShardStats); + shardRouting.writeTo(out); + } - builder.startObject(Fields.REMOTE_REFRESH_SIZE_IN_BYTES); - builder.field(SubFields.LAST_SUCCESSFUL, remoteSegmentUploadShardStats.lastSuccessfulRemoteRefreshBytes); - builder.field(SubFields.MOVING_AVG, remoteSegmentUploadShardStats.uploadBytesMovingAverage); + private void buildUploadStats(XContentBuilder builder) throws IOException { + builder.field(UploadStatsFields.LOCAL_REFRESH_TIMESTAMP, remoteSegmentShardStats.localRefreshClockTimeMs) + .field(UploadStatsFields.REMOTE_REFRESH_TIMESTAMP, remoteSegmentShardStats.remoteRefreshClockTimeMs) + .field(UploadStatsFields.REFRESH_TIME_LAG_IN_MILLIS, remoteSegmentShardStats.refreshTimeLagMs) + .field(UploadStatsFields.REFRESH_LAG, remoteSegmentShardStats.localRefreshNumber - remoteSegmentShardStats.remoteRefreshNumber) + .field(UploadStatsFields.BYTES_LAG, remoteSegmentShardStats.bytesLag) + .field(UploadStatsFields.BACKPRESSURE_REJECTION_COUNT, remoteSegmentShardStats.rejectionCount) + .field(UploadStatsFields.CONSECUTIVE_FAILURE_COUNT, remoteSegmentShardStats.consecutiveFailuresCount); + builder.startObject(UploadStatsFields.TOTAL_SYNCS_TO_REMOTE) + .field(SubFields.STARTED, remoteSegmentShardStats.totalUploadsStarted) + .field(SubFields.SUCCEEDED, remoteSegmentShardStats.totalUploadsSucceeded) + .field(SubFields.FAILED, remoteSegmentShardStats.totalUploadsFailed); builder.endObject(); + builder.startObject(UploadStatsFields.TOTAL_UPLOADS_IN_BYTES) + .field(SubFields.STARTED, remoteSegmentShardStats.uploadBytesStarted) + .field(SubFields.SUCCEEDED, remoteSegmentShardStats.uploadBytesSucceeded) + .field(SubFields.FAILED, remoteSegmentShardStats.uploadBytesFailed); + builder.endObject(); + builder.startObject(UploadStatsFields.REMOTE_REFRESH_SIZE_IN_BYTES) + .field(SubFields.LAST_SUCCESSFUL, remoteSegmentShardStats.lastSuccessfulRemoteRefreshBytes) + .field(SubFields.MOVING_AVG, remoteSegmentShardStats.uploadBytesMovingAverage); + builder.endObject(); + builder.startObject(UploadStatsFields.UPLOAD_LATENCY_IN_BYTES_PER_SEC) + .field(SubFields.MOVING_AVG, remoteSegmentShardStats.uploadBytesPerSecMovingAverage); + builder.endObject(); + builder.startObject(UploadStatsFields.REMOTE_REFRESH_LATENCY_IN_MILLIS) + .field(SubFields.MOVING_AVG, remoteSegmentShardStats.uploadTimeMovingAverage); + builder.endObject(); + } - builder.startObject(Fields.UPLOAD_LATENCY_IN_BYTES_PER_SEC); - builder.field(SubFields.MOVING_AVG, remoteSegmentUploadShardStats.uploadBytesPerSecMovingAverage); + private void buildDownloadStats(XContentBuilder builder) throws IOException { + builder.field( + DownloadStatsFields.LAST_SYNC_TIMESTAMP, + remoteSegmentShardStats.directoryFileTransferTrackerStats.lastTransferTimestampMs + ); + builder.startObject(DownloadStatsFields.TOTAL_DOWNLOADS_IN_BYTES) + .field(SubFields.STARTED, remoteSegmentShardStats.directoryFileTransferTrackerStats.transferredBytesStarted) + .field(SubFields.SUCCEEDED, remoteSegmentShardStats.directoryFileTransferTrackerStats.transferredBytesSucceeded) + .field(SubFields.FAILED, remoteSegmentShardStats.directoryFileTransferTrackerStats.transferredBytesFailed); builder.endObject(); - builder.startObject(Fields.REMOTE_REFRESH_LATENCY_IN_MILLIS); - builder.field(SubFields.MOVING_AVG, remoteSegmentUploadShardStats.uploadTimeMovingAverage); + builder.startObject(DownloadStatsFields.DOWNLOAD_SIZE_IN_BYTES) + .field(SubFields.LAST_SUCCESSFUL, remoteSegmentShardStats.directoryFileTransferTrackerStats.lastSuccessfulTransferInBytes) + .field(SubFields.MOVING_AVG, remoteSegmentShardStats.directoryFileTransferTrackerStats.transferredBytesMovingAverage); builder.endObject(); + builder.startObject(DownloadStatsFields.DOWNLOAD_SPEED_IN_BYTES_PER_SEC) + .field(SubFields.MOVING_AVG, remoteSegmentShardStats.directoryFileTransferTrackerStats.transferredBytesPerSecMovingAverage); builder.endObject(); + } - return builder; + private void buildShardRouting(XContentBuilder builder) throws IOException { + builder.startObject(Fields.ROUTING); + builder.field(RoutingFields.STATE, shardRouting.state()); + builder.field(RoutingFields.PRIMARY, shardRouting.primary()); + builder.field(RoutingFields.NODE_ID, shardRouting.currentNodeId()); + builder.endObject(); } - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeOptionalWriteable(remoteSegmentUploadShardStats); + static final class Fields { + static final String ROUTING = "routing"; + static final String SEGMENT = "segment"; + static final String TRANSLOG = "translog"; + } + + static final class RoutingFields { + static final String STATE = "state"; + static final String PRIMARY = "primary"; + static final String NODE_ID = "node"; } /** * Fields for remote store stats response */ - static final class Fields { - static final String SHARD_ID = "shard_id"; - + static final class UploadStatsFields { /** * Lag in terms of bytes b/w local and remote store */ @@ -128,7 +185,7 @@ static final class Fields { /** * Represents the number of remote refreshes */ - static final String TOTAL_REMOTE_REFRESH = "total_remote_refresh"; + static final String TOTAL_SYNCS_TO_REMOTE = "total_syncs_to_remote"; /** * Represents the total uploads to remote store in bytes @@ -151,21 +208,46 @@ static final class Fields { static final String REMOTE_REFRESH_LATENCY_IN_MILLIS = "remote_refresh_latency_in_millis"; } + static final class DownloadStatsFields { + /** + * Last successful sync from remote in milliseconds + */ + static final String LAST_SYNC_TIMESTAMP = "last_sync_timestamp"; + + /** + * Total bytes of segment files downloaded from the remote store for a specific shard + */ + static final String TOTAL_DOWNLOADS_IN_BYTES = "total_downloads_in_bytes"; + + /** + * Size of each segment file downloaded from the remote store + */ + static final String DOWNLOAD_SIZE_IN_BYTES = "download_size_in_bytes"; + + /** + * Speed (in bytes/sec) for segment file downloads + */ + static final String DOWNLOAD_SPEED_IN_BYTES_PER_SEC = "download_speed_in_bytes_per_sec"; + } + /** - * Reusable sub fields for {@link Fields} + * Reusable sub fields for {@link UploadStatsFields} and {@link DownloadStatsFields} */ static final class SubFields { static final String STARTED = "started"; static final String SUCCEEDED = "succeeded"; static final String FAILED = "failed"; + static final String DOWNLOAD = "download"; + static final String UPLOAD = "upload"; + /** - * Moving avg over last N values stat for a {@link Fields} + * Moving avg over last N values stat */ static final String MOVING_AVG = "moving_avg"; /** - * Most recent successful attempt stat for a {@link Fields} + * Most recent successful attempt stat */ static final String LAST_SUCCESSFUL = "last_successful"; } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsResponse.java index 20023e30a271e..f6613c1d2ac50 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsResponse.java @@ -17,7 +17,10 @@ import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; /** * Remote Store stats response @@ -26,49 +29,71 @@ */ public class RemoteStoreStatsResponse extends BroadcastResponse { - private final RemoteStoreStats[] shards; + private final RemoteStoreStats[] remoteStoreStats; public RemoteStoreStatsResponse(StreamInput in) throws IOException { super(in); - shards = in.readArray(RemoteStoreStats::new, RemoteStoreStats[]::new); + remoteStoreStats = in.readArray(RemoteStoreStats::new, RemoteStoreStats[]::new); } public RemoteStoreStatsResponse( - RemoteStoreStats[] shards, + RemoteStoreStats[] remoteStoreStats, int totalShards, int successfulShards, int failedShards, List shardFailures ) { super(totalShards, successfulShards, failedShards, shardFailures); - this.shards = shards; + this.remoteStoreStats = remoteStoreStats; } - public RemoteStoreStats[] getShards() { - return this.shards; + public RemoteStoreStats[] getRemoteStoreStats() { + return this.remoteStoreStats; } - public RemoteStoreStats getAt(int position) { - return shards[position]; + public Map>> groupByIndexAndShards() { + Map>> indexWiseStats = new HashMap<>(); + for (RemoteStoreStats shardStat : remoteStoreStats) { + indexWiseStats.computeIfAbsent(shardStat.getShardRouting().getIndexName(), k -> new HashMap<>()) + .computeIfAbsent(shardStat.getShardRouting().getId(), k -> new ArrayList<>()) + .add(shardStat); + } + return indexWiseStats; } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeArray(shards); + out.writeArray(remoteStoreStats); } @Override protected void addCustomXContentFields(XContentBuilder builder, Params params) throws IOException { - builder.startArray("stats"); - for (RemoteStoreStats shard : shards) { - shard.toXContent(builder, params); + Map>> indexWiseStats = groupByIndexAndShards(); + builder.startObject(Fields.INDICES); + for (String indexName : indexWiseStats.keySet()) { + builder.startObject(indexName); + builder.startObject(Fields.SHARDS); + for (int shardId : indexWiseStats.get(indexName).keySet()) { + builder.startArray(Integer.toString(shardId)); + for (RemoteStoreStats shardStat : indexWiseStats.get(indexName).get(shardId)) { + shardStat.toXContent(builder, params); + } + builder.endArray(); + } + builder.endObject(); + builder.endObject(); } - builder.endArray(); + builder.endObject(); } @Override public String toString() { return Strings.toString(XContentType.JSON, this, true, false); } + + static final class Fields { + static final String SHARDS = "shards"; + static final String INDICES = "indices"; + } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/TransportRemoteStoreStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/TransportRemoteStoreStatsAction.java index 434abd1207f50..37835a5add3d6 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/TransportRemoteStoreStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/remotestore/stats/TransportRemoteStoreStatsAction.java @@ -24,7 +24,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.index.IndexService; import org.opensearch.index.remote.RemoteRefreshSegmentPressureService; -import org.opensearch.index.remote.RemoteRefreshSegmentTracker; +import org.opensearch.index.remote.RemoteSegmentTransferTracker; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ShardNotFoundException; import org.opensearch.indices.IndicesService; @@ -49,6 +49,7 @@ public class TransportRemoteStoreStatsAction extends TransportBroadcastByNodeAct RemoteStoreStats> { private final IndicesService indicesService; + private final RemoteRefreshSegmentPressureService remoteRefreshSegmentPressureService; @Inject @@ -95,7 +96,6 @@ protected ShardsIterator shards(ClusterState clusterState, RemoteStoreStatsReque || (shardRouting.currentNodeId() == null || shardRouting.currentNodeId().equals(clusterState.getNodes().getLocalNodeId())) ) - .filter(ShardRouting::primary) .filter( shardRouting -> Boolean.parseBoolean( clusterState.getMetadata().index(shardRouting.index()).getSettings().get(IndexMetadata.SETTING_REMOTE_STORE_ENABLED) @@ -153,11 +153,10 @@ protected RemoteStoreStats shardOperation(RemoteStoreStatsRequest request, Shard throw new ShardNotFoundException(indexShard.shardId()); } - RemoteRefreshSegmentTracker remoteRefreshSegmentTracker = remoteRefreshSegmentPressureService.getRemoteRefreshSegmentTracker( + RemoteSegmentTransferTracker remoteSegmentTransferTracker = remoteRefreshSegmentPressureService.getRemoteRefreshSegmentTracker( indexShard.shardId() ); - assert Objects.nonNull(remoteRefreshSegmentTracker); - - return new RemoteStoreStats(remoteRefreshSegmentTracker.stats()); + assert Objects.nonNull(remoteSegmentTransferTracker); + return new RemoteStoreStats(remoteSegmentTransferTracker.stats(), indexShard.routingEntry()); } } diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteRefreshSegmentPressureService.java b/server/src/main/java/org/opensearch/index/remote/RemoteRefreshSegmentPressureService.java index 3f1161f0c5e03..6f6364ac3b8a6 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteRefreshSegmentPressureService.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteRefreshSegmentPressureService.java @@ -26,7 +26,7 @@ import java.util.function.BiConsumer; /** - * Service used to validate if the incoming indexing request should be rejected based on the {@link RemoteRefreshSegmentTracker}. + * Service used to validate if the incoming indexing request should be rejected based on the {@link RemoteSegmentTransferTracker}. * * @opensearch.internal */ @@ -37,7 +37,7 @@ public class RemoteRefreshSegmentPressureService implements IndexEventListener { /** * Keeps map of remote-backed index shards and their corresponding backpressure tracker. */ - private final Map trackerMap = ConcurrentCollections.newConcurrentMap(); + private final Map trackerMap = ConcurrentCollections.newConcurrentMap(); /** * Remote refresh segment pressure settings which is used for creation of the backpressure tracker and as well as rejection. @@ -57,12 +57,12 @@ public RemoteRefreshSegmentPressureService(ClusterService clusterService, Settin } /** - * Get {@code RemoteRefreshSegmentTracker} only if the underlying Index has remote segments integration enabled. + * Get {@code RemoteSegmentTransferTracker} only if the underlying Index has remote segments integration enabled. * * @param shardId shard id * @return the tracker if index is remote-backed, else null. */ - public RemoteRefreshSegmentTracker getRemoteRefreshSegmentTracker(ShardId shardId) { + public RemoteSegmentTransferTracker getRemoteRefreshSegmentTracker(ShardId shardId) { return trackerMap.get(shardId); } @@ -74,8 +74,9 @@ public void afterIndexShardCreated(IndexShard indexShard) { ShardId shardId = indexShard.shardId(); trackerMap.put( shardId, - new RemoteRefreshSegmentTracker( + new RemoteSegmentTransferTracker( shardId, + indexShard.store().getDirectoryFileTransferTracker(), pressureSettings.getUploadBytesMovingAverageWindowSize(), pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), pressureSettings.getUploadTimeMovingAverageWindowSize() @@ -86,8 +87,8 @@ public void afterIndexShardCreated(IndexShard indexShard) { @Override public void afterIndexShardClosed(ShardId shardId, IndexShard indexShard, Settings indexSettings) { - RemoteRefreshSegmentTracker remoteRefreshSegmentTracker = trackerMap.remove(shardId); - if (remoteRefreshSegmentTracker != null) { + RemoteSegmentTransferTracker remoteSegmentTransferTracker = trackerMap.remove(shardId); + if (remoteSegmentTransferTracker != null) { logger.trace("Deleted tracker for shardId={}", shardId); } } @@ -107,34 +108,34 @@ public boolean isSegmentsUploadBackpressureEnabled() { * @param shardId shardId for which the validation needs to be done. */ public void validateSegmentsUploadLag(ShardId shardId) { - RemoteRefreshSegmentTracker remoteRefreshSegmentTracker = getRemoteRefreshSegmentTracker(shardId); + RemoteSegmentTransferTracker remoteSegmentTransferTracker = getRemoteRefreshSegmentTracker(shardId); // condition 1 - This will be null for non-remote backed indexes // condition 2 - This will be zero if the remote store is - if (remoteRefreshSegmentTracker == null || remoteRefreshSegmentTracker.getRefreshSeqNoLag() == 0) { + if (remoteSegmentTransferTracker == null || remoteSegmentTransferTracker.getRefreshSeqNoLag() == 0) { return; } for (LagValidator lagValidator : lagValidators) { - if (lagValidator.validate(remoteRefreshSegmentTracker, shardId) == false) { - remoteRefreshSegmentTracker.incrementRejectionCount(lagValidator.name()); - throw new OpenSearchRejectedExecutionException(lagValidator.rejectionMessage(remoteRefreshSegmentTracker, shardId)); + if (lagValidator.validate(remoteSegmentTransferTracker, shardId) == false) { + remoteSegmentTransferTracker.incrementRejectionCount(lagValidator.name()); + throw new OpenSearchRejectedExecutionException(lagValidator.rejectionMessage(remoteSegmentTransferTracker, shardId)); } } } void updateUploadBytesMovingAverageWindowSize(int updatedSize) { - updateMovingAverageWindowSize(RemoteRefreshSegmentTracker::updateUploadBytesMovingAverageWindowSize, updatedSize); + updateMovingAverageWindowSize(RemoteSegmentTransferTracker::updateUploadBytesMovingAverageWindowSize, updatedSize); } void updateUploadBytesPerSecMovingAverageWindowSize(int updatedSize) { - updateMovingAverageWindowSize(RemoteRefreshSegmentTracker::updateUploadBytesPerSecMovingAverageWindowSize, updatedSize); + updateMovingAverageWindowSize(RemoteSegmentTransferTracker::updateUploadBytesPerSecMovingAverageWindowSize, updatedSize); } void updateUploadTimeMsMovingAverageWindowSize(int updatedSize) { - updateMovingAverageWindowSize(RemoteRefreshSegmentTracker::updateUploadTimeMsMovingAverageWindowSize, updatedSize); + updateMovingAverageWindowSize(RemoteSegmentTransferTracker::updateUploadTimeMsMovingAverageWindowSize, updatedSize); } - void updateMovingAverageWindowSize(BiConsumer biConsumer, int updatedSize) { + void updateMovingAverageWindowSize(BiConsumer biConsumer, int updatedSize) { trackerMap.values().forEach(tracker -> biConsumer.accept(tracker, updatedSize)); } @@ -158,7 +159,7 @@ private LagValidator(RemoteRefreshSegmentPressureSettings pressureSettings) { * @param shardId shard id of the {@code IndexShard} currently being validated. * @return true if successfully validated that lag is acceptable. */ - abstract boolean validate(RemoteRefreshSegmentTracker pressureTracker, ShardId shardId); + abstract boolean validate(RemoteSegmentTransferTracker pressureTracker, ShardId shardId); /** * Returns the name of the lag validator. @@ -167,7 +168,7 @@ private LagValidator(RemoteRefreshSegmentPressureSettings pressureSettings) { */ abstract String name(); - abstract String rejectionMessage(RemoteRefreshSegmentTracker pressureTracker, ShardId shardId); + abstract String rejectionMessage(RemoteSegmentTransferTracker pressureTracker, ShardId shardId); } /** @@ -184,7 +185,7 @@ private BytesLagValidator(RemoteRefreshSegmentPressureSettings pressureSettings) } @Override - public boolean validate(RemoteRefreshSegmentTracker pressureTracker, ShardId shardId) { + public boolean validate(RemoteSegmentTransferTracker pressureTracker, ShardId shardId) { if (pressureTracker.getRefreshSeqNoLag() <= 1) { return true; } @@ -198,7 +199,7 @@ public boolean validate(RemoteRefreshSegmentTracker pressureTracker, ShardId sha } @Override - public String rejectionMessage(RemoteRefreshSegmentTracker pressureTracker, ShardId shardId) { + public String rejectionMessage(RemoteSegmentTransferTracker pressureTracker, ShardId shardId) { double dynamicBytesLagThreshold = pressureTracker.getUploadBytesAverage() * pressureSettings.getBytesLagVarianceFactor(); return String.format( Locale.ROOT, @@ -230,7 +231,7 @@ private TimeLagValidator(RemoteRefreshSegmentPressureSettings pressureSettings) } @Override - public boolean validate(RemoteRefreshSegmentTracker pressureTracker, ShardId shardId) { + public boolean validate(RemoteSegmentTransferTracker pressureTracker, ShardId shardId) { if (pressureTracker.getRefreshSeqNoLag() <= 1) { return true; } @@ -244,7 +245,7 @@ public boolean validate(RemoteRefreshSegmentTracker pressureTracker, ShardId sha } @Override - public String rejectionMessage(RemoteRefreshSegmentTracker pressureTracker, ShardId shardId) { + public String rejectionMessage(RemoteSegmentTransferTracker pressureTracker, ShardId shardId) { double dynamicTimeLagThreshold = pressureTracker.getUploadTimeMsAverage() * pressureSettings.getUploadTimeLagVarianceFactor(); return String.format( Locale.ROOT, @@ -276,14 +277,14 @@ private ConsecutiveFailureValidator(RemoteRefreshSegmentPressureSettings pressur } @Override - public boolean validate(RemoteRefreshSegmentTracker pressureTracker, ShardId shardId) { + public boolean validate(RemoteSegmentTransferTracker pressureTracker, ShardId shardId) { int failureStreakCount = pressureTracker.getConsecutiveFailureCount(); int minConsecutiveFailureThreshold = pressureSettings.getMinConsecutiveFailuresLimit(); return failureStreakCount <= minConsecutiveFailureThreshold; } @Override - public String rejectionMessage(RemoteRefreshSegmentTracker pressureTracker, ShardId shardId) { + public String rejectionMessage(RemoteSegmentTransferTracker pressureTracker, ShardId shardId) { return String.format( Locale.ROOT, "rejected execution on primary shard:%s due to remote segments lagging behind local segments." diff --git a/server/src/main/java/org/opensearch/index/remote/RemoteRefreshSegmentTracker.java b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java similarity index 94% rename from server/src/main/java/org/opensearch/index/remote/RemoteRefreshSegmentTracker.java rename to server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java index 332b0d1698800..cd5d461584f0f 100644 --- a/server/src/main/java/org/opensearch/index/remote/RemoteRefreshSegmentTracker.java +++ b/server/src/main/java/org/opensearch/index/remote/RemoteSegmentTransferTracker.java @@ -15,6 +15,7 @@ import org.opensearch.common.util.Streak; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.store.DirectoryFileTransferTracker; import java.io.IOException; import java.util.HashMap; @@ -30,7 +31,7 @@ * * @opensearch.internal */ -public class RemoteRefreshSegmentTracker { +public class RemoteSegmentTransferTracker { /** * ShardId for which this instance tracks the remote segment upload metadata. @@ -169,8 +170,14 @@ public class RemoteRefreshSegmentTracker { private final Object uploadTimeMsMutex = new Object(); - public RemoteRefreshSegmentTracker( + /** + * {@link org.opensearch.index.store.Store.StoreDirectory} level file transfer tracker, used to show download stats + */ + private final DirectoryFileTransferTracker directoryFileTransferTracker; + + public RemoteSegmentTransferTracker( ShardId shardId, + DirectoryFileTransferTracker directoryFileTransferTracker, int uploadBytesMovingAverageWindowSize, int uploadBytesPerSecMovingAverageWindowSize, int uploadTimeMsMovingAverageWindowSize @@ -188,6 +195,7 @@ public RemoteRefreshSegmentTracker( uploadTimeMsMovingAverageReference = new AtomicReference<>(new MovingAverage(uploadTimeMsMovingAverageWindowSize)); latestLocalFileNameLengthMap = new HashMap<>(); + this.directoryFileTransferTracker = directoryFileTransferTracker; } ShardId getShardId() { @@ -472,8 +480,12 @@ void updateUploadTimeMsMovingAverageWindowSize(int updatedSize) { } } - public RemoteRefreshSegmentTracker.Stats stats() { - return new RemoteRefreshSegmentTracker.Stats( + public DirectoryFileTransferTracker getDirectoryFileTransferTracker() { + return directoryFileTransferTracker; + } + + public RemoteSegmentTransferTracker.Stats stats() { + return new RemoteSegmentTransferTracker.Stats( shardId, localRefreshClockTimeMs, remoteRefreshClockTimeMs, @@ -492,7 +504,8 @@ public RemoteRefreshSegmentTracker.Stats stats() { uploadBytesMovingAverageReference.get().getAverage(), uploadBytesPerSecMovingAverageReference.get().getAverage(), uploadTimeMsMovingAverageReference.get().getAverage(), - getBytesLag() + getBytesLag(), + directoryFileTransferTracker.stats() ); } @@ -522,6 +535,7 @@ public static class Stats implements Writeable { public final double uploadBytesPerSecMovingAverage; public final double uploadTimeMovingAverage; public final long bytesLag; + public final DirectoryFileTransferTracker.Stats directoryFileTransferTrackerStats; public Stats( ShardId shardId, @@ -542,7 +556,8 @@ public Stats( double uploadBytesMovingAverage, double uploadBytesPerSecMovingAverage, double uploadTimeMovingAverage, - long bytesLag + long bytesLag, + DirectoryFileTransferTracker.Stats directoryFileTransferTrackerStats ) { this.shardId = shardId; this.localRefreshClockTimeMs = localRefreshClockTimeMs; @@ -563,6 +578,7 @@ public Stats( this.uploadBytesPerSecMovingAverage = uploadBytesPerSecMovingAverage; this.uploadTimeMovingAverage = uploadTimeMovingAverage; this.bytesLag = bytesLag; + this.directoryFileTransferTrackerStats = directoryFileTransferTrackerStats; } public Stats(StreamInput in) throws IOException { @@ -586,6 +602,7 @@ public Stats(StreamInput in) throws IOException { this.uploadBytesPerSecMovingAverage = in.readDouble(); this.uploadTimeMovingAverage = in.readDouble(); this.bytesLag = in.readLong(); + this.directoryFileTransferTrackerStats = in.readOptionalWriteable(DirectoryFileTransferTracker.Stats::new); } catch (IOException e) { throw e; } @@ -612,7 +629,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeDouble(uploadBytesPerSecMovingAverage); out.writeDouble(uploadTimeMovingAverage); out.writeLong(bytesLag); + out.writeOptionalWriteable(directoryFileTransferTrackerStats); } } - } diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java index 2385b906a7ae5..4a70ff04770d3 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java @@ -28,7 +28,7 @@ import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.index.engine.EngineException; import org.opensearch.index.engine.InternalEngine; -import org.opensearch.index.remote.RemoteRefreshSegmentTracker; +import org.opensearch.index.remote.RemoteSegmentTransferTracker; import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.store.RemoteSegmentStoreDirectory; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; @@ -87,7 +87,7 @@ public final class RemoteStoreRefreshListener extends CloseableRetryableRefreshL private final IndexShard indexShard; private final Directory storeDirectory; private final RemoteSegmentStoreDirectory remoteDirectory; - private final RemoteRefreshSegmentTracker segmentTracker; + private final RemoteSegmentTransferTracker segmentTracker; private final Map localSegmentChecksumMap; private long primaryTerm; private volatile Iterator backoffDelayIterator; @@ -104,7 +104,7 @@ public final class RemoteStoreRefreshListener extends CloseableRetryableRefreshL public RemoteStoreRefreshListener( IndexShard indexShard, SegmentReplicationCheckpointPublisher checkpointPublisher, - RemoteRefreshSegmentTracker segmentTracker + RemoteSegmentTransferTracker segmentTracker ) { super(indexShard.getThreadPool()); logger = Loggers.getLogger(getClass(), indexShard.shardId()); diff --git a/server/src/main/java/org/opensearch/index/store/DirectoryFileTransferTracker.java b/server/src/main/java/org/opensearch/index/store/DirectoryFileTransferTracker.java new file mode 100644 index 0000000000000..7e0e231d7bad9 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/store/DirectoryFileTransferTracker.java @@ -0,0 +1,195 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.store; + +import org.opensearch.common.util.MovingAverage; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; + +import java.io.IOException; + +/** + * Tracks the amount of bytes transferred between two {@link org.apache.lucene.store.Directory} instances + * + * @opensearch.internal + */ +public class DirectoryFileTransferTracker { + /** + * Cumulative size of files (in bytes) attempted to be transferred over from the source {@link org.apache.lucene.store.Directory} + */ + private volatile long transferredBytesStarted; + + /** + * Cumulative size of files (in bytes) successfully transferred over from the source {@link org.apache.lucene.store.Directory} + */ + private volatile long transferredBytesFailed; + + /** + * Cumulative size of files (in bytes) failed in transfer over from the source {@link org.apache.lucene.store.Directory} + */ + private volatile long transferredBytesSucceeded; + + /** + * Time in milliseconds for the last successful transfer from the source {@link org.apache.lucene.store.Directory} + */ + private volatile long lastTransferTimestampMs; + + /** + * Provides moving average over the last N total size in bytes of files transferred from the source {@link org.apache.lucene.store.Directory}. + * N is window size + */ + private volatile MovingAverage transferredBytesMovingAverageReference; + + private volatile long lastSuccessfulTransferInBytes; + + /** + * Provides moving average over the last N transfer speed (in bytes/s) of segment files transferred from the source {@link org.apache.lucene.store.Directory}. + * N is window size + */ + private volatile MovingAverage transferredBytesPerSecMovingAverageReference; + + private final int DIRECTORY_FILES_TRANSFER_DEFAULT_WINDOW_SIZE = 20; + + public long getTransferredBytesStarted() { + return transferredBytesStarted; + } + + public void addTransferredBytesStarted(long size) { + transferredBytesStarted += size; + } + + public long getTransferredBytesFailed() { + return transferredBytesFailed; + } + + public void addTransferredBytesFailed(long size) { + transferredBytesFailed += size; + } + + public long getTransferredBytesSucceeded() { + return transferredBytesSucceeded; + } + + public void addTransferredBytesSucceeded(long size, long startTimeInMs) { + transferredBytesSucceeded += size; + updateLastSuccessfulTransferSize(size); + long currentTimeInMs = System.currentTimeMillis(); + updateLastTransferTimestampMs(currentTimeInMs); + long timeTakenInMS = Math.max(1, currentTimeInMs - startTimeInMs); + addTransferredBytesPerSec((size * 1_000L) / timeTakenInMS); + } + + public boolean isTransferredBytesPerSecAverageReady() { + return transferredBytesPerSecMovingAverageReference.isReady(); + } + + public double getTransferredBytesPerSecAverage() { + return transferredBytesPerSecMovingAverageReference.getAverage(); + } + + // Visible for testing + public void addTransferredBytesPerSec(long bytesPerSec) { + this.transferredBytesPerSecMovingAverageReference.record(bytesPerSec); + } + + public boolean isTransferredBytesAverageReady() { + return transferredBytesMovingAverageReference.isReady(); + } + + public double getTransferredBytesAverage() { + return transferredBytesMovingAverageReference.getAverage(); + } + + // Visible for testing + public void updateLastSuccessfulTransferSize(long size) { + lastSuccessfulTransferInBytes = size; + this.transferredBytesMovingAverageReference.record(size); + } + + public long getLastTransferTimestampMs() { + return lastTransferTimestampMs; + } + + // Visible for testing + public void updateLastTransferTimestampMs(long downloadTimestampInMs) { + this.lastTransferTimestampMs = downloadTimestampInMs; + } + + public DirectoryFileTransferTracker() { + transferredBytesMovingAverageReference = new MovingAverage(DIRECTORY_FILES_TRANSFER_DEFAULT_WINDOW_SIZE); + transferredBytesPerSecMovingAverageReference = new MovingAverage(DIRECTORY_FILES_TRANSFER_DEFAULT_WINDOW_SIZE); + } + + public DirectoryFileTransferTracker.Stats stats() { + return new Stats( + transferredBytesStarted, + transferredBytesFailed, + transferredBytesSucceeded, + lastTransferTimestampMs, + transferredBytesMovingAverageReference.getAverage(), + lastSuccessfulTransferInBytes, + transferredBytesPerSecMovingAverageReference.getAverage() + ); + } + + /** + * Represents the tracker's stats presentable to an API. + * + * @opensearch.internal + */ + public static class Stats implements Writeable { + public final long transferredBytesStarted; + public final long transferredBytesFailed; + public final long transferredBytesSucceeded; + public final long lastTransferTimestampMs; + public final double transferredBytesMovingAverage; + public final long lastSuccessfulTransferInBytes; + public final double transferredBytesPerSecMovingAverage; + + public Stats( + long transferredBytesStarted, + long transferredBytesFailed, + long downloadBytesSucceeded, + long lastTransferTimestampMs, + double transferredBytesMovingAverage, + long lastSuccessfulTransferInBytes, + double transferredBytesPerSecMovingAverage + ) { + this.transferredBytesStarted = transferredBytesStarted; + this.transferredBytesFailed = transferredBytesFailed; + this.transferredBytesSucceeded = downloadBytesSucceeded; + this.lastTransferTimestampMs = lastTransferTimestampMs; + this.transferredBytesMovingAverage = transferredBytesMovingAverage; + this.lastSuccessfulTransferInBytes = lastSuccessfulTransferInBytes; + this.transferredBytesPerSecMovingAverage = transferredBytesPerSecMovingAverage; + } + + public Stats(StreamInput in) throws IOException { + this.transferredBytesStarted = in.readLong(); + this.transferredBytesFailed = in.readLong(); + this.transferredBytesSucceeded = in.readLong(); + this.lastTransferTimestampMs = in.readLong(); + this.transferredBytesMovingAverage = in.readDouble(); + this.lastSuccessfulTransferInBytes = in.readLong(); + this.transferredBytesPerSecMovingAverage = in.readDouble(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(transferredBytesStarted); + out.writeLong(transferredBytesFailed); + out.writeLong(transferredBytesSucceeded); + out.writeLong(lastTransferTimestampMs); + out.writeDouble(transferredBytesMovingAverage); + out.writeLong(lastSuccessfulTransferInBytes); + out.writeDouble(transferredBytesPerSecMovingAverage); + } + } +} diff --git a/server/src/main/java/org/opensearch/index/store/Store.java b/server/src/main/java/org/opensearch/index/store/Store.java index 8967100d4faf0..a67b87f58110c 100644 --- a/server/src/main/java/org/opensearch/index/store/Store.java +++ b/server/src/main/java/org/opensearch/index/store/Store.java @@ -963,18 +963,24 @@ public void commitSegmentInfos(SegmentInfos latestSegmentInfos, long maxSeqNo, l } } + public DirectoryFileTransferTracker getDirectoryFileTransferTracker() { + return directory.getDirectoryFileTransferTracker(); + } + /** * A store directory * * @opensearch.internal */ static final class StoreDirectory extends FilterDirectory { - private final Logger deletesLogger; + public final DirectoryFileTransferTracker directoryFileTransferTracker; + StoreDirectory(ByteSizeCachingDirectory delegateDirectory, Logger deletesLogger) { super(delegateDirectory); this.deletesLogger = deletesLogger; + this.directoryFileTransferTracker = new DirectoryFileTransferTracker(); } /** Estimate the cumulative size of all files in this directory in bytes. */ @@ -1012,6 +1018,52 @@ public Set getPendingDeletions() throws IOException { // to be removed once fixed in FilterDirectory. return unwrap(this).getPendingDeletions(); } + + public DirectoryFileTransferTracker getDirectoryFileTransferTracker() { + return directoryFileTransferTracker; + } + + @Override + public void copyFrom(Directory from, String src, String dest, IOContext context) throws IOException { + long fileSize = from.fileLength(src); + beforeDownload(fileSize); + boolean success = false; + try { + long startTime = System.currentTimeMillis(); + super.copyFrom(from, src, dest, context); + success = true; + afterDownload(fileSize, startTime); + } finally { + if (!success) { + downloadFailed(fileSize); + } + } + } + + /** + * Updates the amount of bytes attempted for download + */ + private void beforeDownload(long fileSize) { + directoryFileTransferTracker.addTransferredBytesStarted(fileSize); + } + + /** + * Updates + * - The amount of bytes that has been successfully downloaded from the source store + * - The last successful download completion timestamp + * - The last successfully downloaded file + * - Download speed (in bytes/sec) + */ + private void afterDownload(long fileSize, long startTimeInMs) { + directoryFileTransferTracker.addTransferredBytesSucceeded(fileSize, startTimeInMs); + } + + /** + * Updates the amount of bytes failed in download + */ + private void downloadFailed(long fileSize) { + directoryFileTransferTracker.addTransferredBytesFailed(fileSize); + } } /** diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsResponseTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsResponseTests.java index a476b66719d3f..64dfda86c1af9 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsResponseTests.java @@ -10,10 +10,11 @@ import org.opensearch.core.action.support.DefaultShardOperationFailedException; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.index.remote.RemoteRefreshSegmentTracker; +import org.opensearch.index.remote.RemoteSegmentTransferTracker; import org.opensearch.core.index.shard.ShardId; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; @@ -23,7 +24,10 @@ import java.util.Map; import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.compareStatsResponse; -import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.createPressureTrackerStats; +import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.createShardRouting; +import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.createStatsForNewPrimary; +import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.createStatsForNewReplica; +import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.createStatsForRemoteStoreRestoredPrimary; import static org.opensearch.core.xcontent.ToXContent.EMPTY_PARAMS; public class RemoteStoreStatsResponseTests extends OpenSearchTestCase { @@ -43,11 +47,12 @@ public void tearDown() throws Exception { threadPool.shutdownNow(); } - public void testSerialization() throws Exception { - RemoteRefreshSegmentTracker.Stats pressureTrackerStats = createPressureTrackerStats(shardId); - RemoteStoreStats stats = new RemoteStoreStats(pressureTrackerStats); + public void testSerializationForPrimary() throws Exception { + RemoteSegmentTransferTracker.Stats mockPrimaryTrackerStats = createStatsForNewPrimary(shardId); + ShardRouting primaryShardRouting = createShardRouting(shardId, true); + RemoteStoreStats primaryShardStats = new RemoteStoreStats(mockPrimaryTrackerStats, primaryShardRouting); RemoteStoreStatsResponse statsResponse = new RemoteStoreStatsResponse( - new RemoteStoreStats[] { stats }, + new RemoteStoreStats[] { primaryShardStats }, 1, 1, 0, @@ -58,15 +63,96 @@ public void testSerialization() throws Exception { statsResponse.toXContent(builder, EMPTY_PARAMS); Map jsonResponseObject = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()) .v2(); + Map metadataShardsObject = (Map) jsonResponseObject.get("_shards"); + assertEquals(metadataShardsObject.get("total"), 1); + assertEquals(metadataShardsObject.get("successful"), 1); + assertEquals(metadataShardsObject.get("failed"), 0); + Map indicesObject = (Map) jsonResponseObject.get("indices"); + assertTrue(indicesObject.containsKey("index")); + Map shardsObject = (Map) ((Map) indicesObject.get("index")).get("shards"); + ArrayList> perShardNumberObject = (ArrayList>) shardsObject.get("0"); + assertEquals(perShardNumberObject.size(), 1); + Map perShardCopyObject = perShardNumberObject.get(0); + compareStatsResponse(perShardCopyObject, mockPrimaryTrackerStats, primaryShardRouting); + } + + public void testSerializationForBothPrimaryAndReplica() throws Exception { + RemoteSegmentTransferTracker.Stats mockPrimaryTrackerStats = createStatsForNewPrimary(shardId); + RemoteSegmentTransferTracker.Stats mockReplicaTrackerStats = createStatsForNewReplica(shardId); + ShardRouting primaryShardRouting = createShardRouting(shardId, true); + ShardRouting replicaShardRouting = createShardRouting(shardId, false); + RemoteStoreStats primaryShardStats = new RemoteStoreStats(mockPrimaryTrackerStats, primaryShardRouting); + RemoteStoreStats replicaShardStats = new RemoteStoreStats(mockReplicaTrackerStats, replicaShardRouting); + RemoteStoreStatsResponse statsResponse = new RemoteStoreStatsResponse( + new RemoteStoreStats[] { primaryShardStats, replicaShardStats }, + 2, + 2, + 0, + new ArrayList() + ); + + XContentBuilder builder = XContentFactory.jsonBuilder(); + statsResponse.toXContent(builder, EMPTY_PARAMS); + Map jsonResponseObject = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()) + .v2(); + Map metadataShardsObject = (Map) jsonResponseObject.get("_shards"); + assertEquals(2, metadataShardsObject.get("total")); + assertEquals(2, metadataShardsObject.get("successful")); + assertEquals(0, metadataShardsObject.get("failed")); + Map indicesObject = (Map) jsonResponseObject.get("indices"); + assertTrue(indicesObject.containsKey("index")); + Map shardsObject = (Map) ((Map) indicesObject.get("index")).get("shards"); + ArrayList> perShardNumberObject = (ArrayList>) shardsObject.get("0"); + assertEquals(2, perShardNumberObject.size()); + perShardNumberObject.forEach(shardObject -> { + boolean isPrimary = (boolean) ((Map) shardObject.get(RemoteStoreStats.Fields.ROUTING)).get( + RemoteStoreStats.RoutingFields.PRIMARY + ); + if (isPrimary) { + compareStatsResponse(shardObject, mockPrimaryTrackerStats, primaryShardRouting); + } else { + compareStatsResponse(shardObject, mockReplicaTrackerStats, replicaShardRouting); + } + }); + } - ArrayList> statsObjectArray = (ArrayList>) jsonResponseObject.get("stats"); - assertEquals(statsObjectArray.size(), 1); - Map statsObject = statsObjectArray.get(0); - Map shardsObject = (Map) jsonResponseObject.get("_shards"); + public void testSerializationForBothRemoteStoreRestoredPrimaryAndReplica() throws Exception { + RemoteSegmentTransferTracker.Stats mockPrimaryTrackerStats = createStatsForRemoteStoreRestoredPrimary(shardId); + RemoteSegmentTransferTracker.Stats mockReplicaTrackerStats = createStatsForNewReplica(shardId); + ShardRouting primaryShardRouting = createShardRouting(shardId, true); + ShardRouting replicaShardRouting = createShardRouting(shardId, false); + RemoteStoreStats primaryShardStats = new RemoteStoreStats(mockPrimaryTrackerStats, primaryShardRouting); + RemoteStoreStats replicaShardStats = new RemoteStoreStats(mockReplicaTrackerStats, replicaShardRouting); + RemoteStoreStatsResponse statsResponse = new RemoteStoreStatsResponse( + new RemoteStoreStats[] { primaryShardStats, replicaShardStats }, + 2, + 2, + 0, + new ArrayList() + ); - assertEquals(shardsObject.get("total"), 1); - assertEquals(shardsObject.get("successful"), 1); - assertEquals(shardsObject.get("failed"), 0); - compareStatsResponse(statsObject, pressureTrackerStats); + XContentBuilder builder = XContentFactory.jsonBuilder(); + statsResponse.toXContent(builder, EMPTY_PARAMS); + Map jsonResponseObject = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()) + .v2(); + Map metadataShardsObject = (Map) jsonResponseObject.get("_shards"); + assertEquals(2, metadataShardsObject.get("total")); + assertEquals(2, metadataShardsObject.get("successful")); + assertEquals(0, metadataShardsObject.get("failed")); + Map indicesObject = (Map) jsonResponseObject.get("indices"); + assertTrue(indicesObject.containsKey("index")); + Map shardsObject = (Map) ((Map) indicesObject.get("index")).get("shards"); + ArrayList> perShardNumberObject = (ArrayList>) shardsObject.get("0"); + assertEquals(2, perShardNumberObject.size()); + perShardNumberObject.forEach(shardObject -> { + boolean isPrimary = (boolean) ((Map) shardObject.get(RemoteStoreStats.Fields.ROUTING)).get( + RemoteStoreStats.RoutingFields.PRIMARY + ); + if (isPrimary) { + compareStatsResponse(shardObject, mockPrimaryTrackerStats, primaryShardRouting); + } else { + compareStatsResponse(shardObject, mockReplicaTrackerStats, replicaShardRouting); + } + }); } } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsTestHelper.java b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsTestHelper.java index 747dc692b1d5d..0c081ee238e2d 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsTestHelper.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsTestHelper.java @@ -8,80 +8,264 @@ package org.opensearch.action.admin.cluster.remotestore.stats; -import org.opensearch.index.remote.RemoteRefreshSegmentTracker; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.cluster.routing.TestShardRouting; +import org.opensearch.index.remote.RemoteSegmentTransferTracker; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.store.DirectoryFileTransferTracker; import java.util.Map; +import static org.junit.Assert.assertTrue; import static org.opensearch.test.OpenSearchTestCase.assertEquals; +import static org.opensearch.test.OpenSearchTestCase.randomAlphaOfLength; /** * Helper utilities for Remote Store stats tests */ public class RemoteStoreStatsTestHelper { - static RemoteRefreshSegmentTracker.Stats createPressureTrackerStats(ShardId shardId) { - return new RemoteRefreshSegmentTracker.Stats(shardId, 101, 102, 100, 3, 2, 10, 5, 5, 10, 5, 5, 3, 2, 5, 2, 3, 4, 9); + static RemoteSegmentTransferTracker.Stats createStatsForNewPrimary(ShardId shardId) { + return new RemoteSegmentTransferTracker.Stats( + shardId, + 101, + 102, + 100, + 0, + 10, + 2, + 10, + 5, + 5, + 0, + 0, + 0, + 5, + 5, + 5, + 0, + 0, + 0, + createZeroDirectoryFileTransferStats() + ); } - static void compareStatsResponse(Map statsObject, RemoteRefreshSegmentTracker.Stats pressureTrackerStats) { - assertEquals(statsObject.get(RemoteStoreStats.Fields.SHARD_ID), pressureTrackerStats.shardId.toString()); - assertEquals(statsObject.get(RemoteStoreStats.Fields.LOCAL_REFRESH_TIMESTAMP), (int) pressureTrackerStats.localRefreshClockTimeMs); - assertEquals( - statsObject.get(RemoteStoreStats.Fields.REMOTE_REFRESH_TIMESTAMP), - (int) pressureTrackerStats.remoteRefreshClockTimeMs - ); - assertEquals(statsObject.get(RemoteStoreStats.Fields.REFRESH_TIME_LAG_IN_MILLIS), (int) pressureTrackerStats.refreshTimeLagMs); - assertEquals( - statsObject.get(RemoteStoreStats.Fields.REFRESH_LAG), - (int) (pressureTrackerStats.localRefreshNumber - pressureTrackerStats.remoteRefreshNumber) + static RemoteSegmentTransferTracker.Stats createStatsForNewReplica(ShardId shardId) { + return new RemoteSegmentTransferTracker.Stats( + shardId, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + createSampleDirectoryFileTransferStats() ); - assertEquals(statsObject.get(RemoteStoreStats.Fields.BYTES_LAG), (int) pressureTrackerStats.bytesLag); + } - assertEquals(statsObject.get(RemoteStoreStats.Fields.BACKPRESSURE_REJECTION_COUNT), (int) pressureTrackerStats.rejectionCount); - assertEquals( - statsObject.get(RemoteStoreStats.Fields.CONSECUTIVE_FAILURE_COUNT), - (int) pressureTrackerStats.consecutiveFailuresCount + static RemoteSegmentTransferTracker.Stats createStatsForRemoteStoreRestoredPrimary(ShardId shardId) { + return new RemoteSegmentTransferTracker.Stats( + shardId, + 50, + 50, + 0, + 50, + 11, + 11, + 10, + 10, + 0, + 10, + 10, + 0, + 10, + 10, + 0, + 0, + 0, + 100, + createSampleDirectoryFileTransferStats() ); + } + static DirectoryFileTransferTracker.Stats createSampleDirectoryFileTransferStats() { + return new DirectoryFileTransferTracker.Stats(10, 0, 10, 12345, 5, 5, 5); + } + + static DirectoryFileTransferTracker.Stats createZeroDirectoryFileTransferStats() { + return new DirectoryFileTransferTracker.Stats(0, 0, 0, 0, 0, 0, 0); + } + + static ShardRouting createShardRouting(ShardId shardId, boolean isPrimary) { + return TestShardRouting.newShardRouting(shardId, randomAlphaOfLength(4), isPrimary, ShardRoutingState.STARTED); + } + + static void compareStatsResponse( + Map statsObject, + RemoteSegmentTransferTracker.Stats statsTracker, + ShardRouting routing + ) { assertEquals( - ((Map) statsObject.get(RemoteStoreStats.Fields.TOTAL_UPLOADS_IN_BYTES)).get(RemoteStoreStats.SubFields.STARTED), - (int) pressureTrackerStats.uploadBytesStarted - ); - assertEquals( - ((Map) statsObject.get(RemoteStoreStats.Fields.TOTAL_UPLOADS_IN_BYTES)).get(RemoteStoreStats.SubFields.SUCCEEDED), - (int) pressureTrackerStats.uploadBytesSucceeded - ); - assertEquals( - ((Map) statsObject.get(RemoteStoreStats.Fields.TOTAL_UPLOADS_IN_BYTES)).get(RemoteStoreStats.SubFields.FAILED), - (int) pressureTrackerStats.uploadBytesFailed - ); - assertEquals( - ((Map) statsObject.get(RemoteStoreStats.Fields.REMOTE_REFRESH_SIZE_IN_BYTES)).get(RemoteStoreStats.SubFields.MOVING_AVG), - pressureTrackerStats.uploadBytesMovingAverage - ); - assertEquals( - ((Map) statsObject.get(RemoteStoreStats.Fields.REMOTE_REFRESH_SIZE_IN_BYTES)).get(RemoteStoreStats.SubFields.LAST_SUCCESSFUL), - (int) pressureTrackerStats.lastSuccessfulRemoteRefreshBytes - ); - assertEquals( - ((Map) statsObject.get(RemoteStoreStats.Fields.UPLOAD_LATENCY_IN_BYTES_PER_SEC)).get(RemoteStoreStats.SubFields.MOVING_AVG), - pressureTrackerStats.uploadBytesPerSecMovingAverage - ); - assertEquals( - ((Map) statsObject.get(RemoteStoreStats.Fields.TOTAL_REMOTE_REFRESH)).get(RemoteStoreStats.SubFields.STARTED), - (int) pressureTrackerStats.totalUploadsStarted - ); - assertEquals( - ((Map) statsObject.get(RemoteStoreStats.Fields.TOTAL_REMOTE_REFRESH)).get(RemoteStoreStats.SubFields.SUCCEEDED), - (int) pressureTrackerStats.totalUploadsSucceeded + ((Map) statsObject.get(RemoteStoreStats.Fields.ROUTING)).get(RemoteStoreStats.RoutingFields.NODE_ID), + routing.currentNodeId() ); assertEquals( - ((Map) statsObject.get(RemoteStoreStats.Fields.TOTAL_REMOTE_REFRESH)).get(RemoteStoreStats.SubFields.FAILED), - (int) pressureTrackerStats.totalUploadsFailed + ((Map) statsObject.get(RemoteStoreStats.Fields.ROUTING)).get(RemoteStoreStats.RoutingFields.STATE), + routing.state().toString() ); assertEquals( - ((Map) statsObject.get(RemoteStoreStats.Fields.REMOTE_REFRESH_LATENCY_IN_MILLIS)).get(RemoteStoreStats.SubFields.MOVING_AVG), - pressureTrackerStats.uploadTimeMovingAverage + ((Map) statsObject.get(RemoteStoreStats.Fields.ROUTING)).get(RemoteStoreStats.RoutingFields.PRIMARY), + routing.primary() ); + + Map segment = ((Map) statsObject.get(RemoteStoreStats.Fields.SEGMENT)); + Map segmentDownloads = ((Map) segment.get(RemoteStoreStats.SubFields.DOWNLOAD)); + Map segmentUploads = ((Map) segment.get(RemoteStoreStats.SubFields.UPLOAD)); + + if (statsTracker.directoryFileTransferTrackerStats.transferredBytesStarted != 0) { + assertEquals( + segmentDownloads.get(RemoteStoreStats.DownloadStatsFields.LAST_SYNC_TIMESTAMP), + (int) statsTracker.directoryFileTransferTrackerStats.lastTransferTimestampMs + ); + assertEquals( + ((Map) segmentDownloads.get(RemoteStoreStats.DownloadStatsFields.TOTAL_DOWNLOADS_IN_BYTES)).get( + RemoteStoreStats.SubFields.STARTED + ), + (int) statsTracker.directoryFileTransferTrackerStats.transferredBytesStarted + ); + assertEquals( + ((Map) segmentDownloads.get(RemoteStoreStats.DownloadStatsFields.TOTAL_DOWNLOADS_IN_BYTES)).get( + RemoteStoreStats.SubFields.SUCCEEDED + ), + (int) statsTracker.directoryFileTransferTrackerStats.transferredBytesSucceeded + ); + assertEquals( + ((Map) segmentDownloads.get(RemoteStoreStats.DownloadStatsFields.TOTAL_DOWNLOADS_IN_BYTES)).get( + RemoteStoreStats.SubFields.FAILED + ), + (int) statsTracker.directoryFileTransferTrackerStats.transferredBytesFailed + ); + assertEquals( + ((Map) segmentDownloads.get(RemoteStoreStats.DownloadStatsFields.DOWNLOAD_SIZE_IN_BYTES)).get( + RemoteStoreStats.SubFields.LAST_SUCCESSFUL + ), + (int) statsTracker.directoryFileTransferTrackerStats.lastSuccessfulTransferInBytes + ); + assertEquals( + ((Map) segmentDownloads.get(RemoteStoreStats.DownloadStatsFields.DOWNLOAD_SIZE_IN_BYTES)).get( + RemoteStoreStats.SubFields.MOVING_AVG + ), + statsTracker.directoryFileTransferTrackerStats.transferredBytesMovingAverage + ); + assertEquals( + ((Map) segmentDownloads.get(RemoteStoreStats.DownloadStatsFields.DOWNLOAD_SPEED_IN_BYTES_PER_SEC)).get( + RemoteStoreStats.SubFields.MOVING_AVG + ), + statsTracker.directoryFileTransferTrackerStats.transferredBytesPerSecMovingAverage + ); + } else { + assertTrue(segmentDownloads.isEmpty()); + } + + if (statsTracker.totalUploadsStarted != 0) { + assertEquals( + segmentUploads.get(RemoteStoreStats.UploadStatsFields.LOCAL_REFRESH_TIMESTAMP), + (int) statsTracker.localRefreshClockTimeMs + ); + assertEquals( + segmentUploads.get(RemoteStoreStats.UploadStatsFields.REMOTE_REFRESH_TIMESTAMP), + (int) statsTracker.remoteRefreshClockTimeMs + ); + assertEquals( + segmentUploads.get(RemoteStoreStats.UploadStatsFields.REFRESH_TIME_LAG_IN_MILLIS), + (int) statsTracker.refreshTimeLagMs + ); + assertEquals( + segmentUploads.get(RemoteStoreStats.UploadStatsFields.REFRESH_LAG), + (int) (statsTracker.localRefreshNumber - statsTracker.remoteRefreshNumber) + ); + assertEquals(segmentUploads.get(RemoteStoreStats.UploadStatsFields.BYTES_LAG), (int) statsTracker.bytesLag); + + assertEquals( + segmentUploads.get(RemoteStoreStats.UploadStatsFields.BACKPRESSURE_REJECTION_COUNT), + (int) statsTracker.rejectionCount + ); + assertEquals( + segmentUploads.get(RemoteStoreStats.UploadStatsFields.CONSECUTIVE_FAILURE_COUNT), + (int) statsTracker.consecutiveFailuresCount + ); + assertEquals( + ((Map) segmentUploads.get(RemoteStoreStats.UploadStatsFields.TOTAL_UPLOADS_IN_BYTES)).get( + RemoteStoreStats.SubFields.STARTED + ), + (int) statsTracker.uploadBytesStarted + ); + assertEquals( + ((Map) segmentUploads.get(RemoteStoreStats.UploadStatsFields.TOTAL_UPLOADS_IN_BYTES)).get( + RemoteStoreStats.SubFields.SUCCEEDED + ), + (int) statsTracker.uploadBytesSucceeded + ); + assertEquals( + ((Map) segmentUploads.get(RemoteStoreStats.UploadStatsFields.TOTAL_UPLOADS_IN_BYTES)).get( + RemoteStoreStats.SubFields.FAILED + ), + (int) statsTracker.uploadBytesFailed + ); + assertEquals( + ((Map) segmentUploads.get(RemoteStoreStats.UploadStatsFields.REMOTE_REFRESH_SIZE_IN_BYTES)).get( + RemoteStoreStats.SubFields.MOVING_AVG + ), + statsTracker.uploadBytesMovingAverage + ); + assertEquals( + ((Map) segmentUploads.get(RemoteStoreStats.UploadStatsFields.REMOTE_REFRESH_SIZE_IN_BYTES)).get( + RemoteStoreStats.SubFields.LAST_SUCCESSFUL + ), + (int) statsTracker.lastSuccessfulRemoteRefreshBytes + ); + assertEquals( + ((Map) segmentUploads.get(RemoteStoreStats.UploadStatsFields.UPLOAD_LATENCY_IN_BYTES_PER_SEC)).get( + RemoteStoreStats.SubFields.MOVING_AVG + ), + statsTracker.uploadBytesPerSecMovingAverage + ); + assertEquals( + ((Map) segmentUploads.get(RemoteStoreStats.UploadStatsFields.TOTAL_SYNCS_TO_REMOTE)).get( + RemoteStoreStats.SubFields.STARTED + ), + (int) statsTracker.totalUploadsStarted + ); + assertEquals( + ((Map) segmentUploads.get(RemoteStoreStats.UploadStatsFields.TOTAL_SYNCS_TO_REMOTE)).get( + RemoteStoreStats.SubFields.SUCCEEDED + ), + (int) statsTracker.totalUploadsSucceeded + ); + assertEquals( + ((Map) segmentUploads.get(RemoteStoreStats.UploadStatsFields.TOTAL_SYNCS_TO_REMOTE)).get(RemoteStoreStats.SubFields.FAILED), + (int) statsTracker.totalUploadsFailed + ); + assertEquals( + ((Map) segmentUploads.get(RemoteStoreStats.UploadStatsFields.REMOTE_REFRESH_LATENCY_IN_MILLIS)).get( + RemoteStoreStats.SubFields.MOVING_AVG + ), + statsTracker.uploadTimeMovingAverage + ); + } else { + assertTrue(segmentUploads.isEmpty()); + } } } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsTests.java index fc057b71b15f8..3597a5350e1fb 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/RemoteStoreStatsTests.java @@ -9,12 +9,13 @@ package org.opensearch.action.admin.cluster.remotestore.stats; import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.index.remote.RemoteRefreshSegmentTracker; +import org.opensearch.index.remote.RemoteSegmentTransferTracker; import org.opensearch.core.index.shard.ShardId; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; @@ -24,7 +25,10 @@ import java.util.Map; import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.compareStatsResponse; -import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.createPressureTrackerStats; +import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.createStatsForNewReplica; +import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.createShardRouting; +import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.createStatsForNewPrimary; +import static org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsTestHelper.createStatsForRemoteStoreRestoredPrimary; import static org.opensearch.core.xcontent.ToXContent.EMPTY_PARAMS; public class RemoteStoreStatsTests extends OpenSearchTestCase { @@ -44,43 +48,175 @@ public void tearDown() throws Exception { threadPool.shutdownNow(); } - public void testXContentBuilder() throws IOException { - RemoteRefreshSegmentTracker.Stats pressureTrackerStats = createPressureTrackerStats(shardId); - RemoteStoreStats stats = new RemoteStoreStats(pressureTrackerStats); + public void testXContentBuilderWithPrimaryShard() throws IOException { + RemoteSegmentTransferTracker.Stats uploadStats = createStatsForNewPrimary(shardId); + ShardRouting routing = createShardRouting(shardId, true); + RemoteStoreStats stats = new RemoteStoreStats(uploadStats, routing); XContentBuilder builder = XContentFactory.jsonBuilder(); stats.toXContent(builder, EMPTY_PARAMS); Map jsonObject = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2(); - compareStatsResponse(jsonObject, pressureTrackerStats); + compareStatsResponse(jsonObject, uploadStats, routing); } - public void testSerialization() throws Exception { - RemoteRefreshSegmentTracker.Stats pressureTrackerStats = createPressureTrackerStats(shardId); - RemoteStoreStats stats = new RemoteStoreStats(pressureTrackerStats); + public void testXContentBuilderWithReplicaShard() throws IOException { + RemoteSegmentTransferTracker.Stats downloadStats = createStatsForNewReplica(shardId); + ShardRouting routing = createShardRouting(shardId, false); + RemoteStoreStats stats = new RemoteStoreStats(downloadStats, routing); + + XContentBuilder builder = XContentFactory.jsonBuilder(); + stats.toXContent(builder, EMPTY_PARAMS); + Map jsonObject = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2(); + compareStatsResponse(jsonObject, downloadStats, routing); + } + + public void testXContentBuilderWithRemoteStoreRestoredShard() throws IOException { + RemoteSegmentTransferTracker.Stats remotestoreRestoredShardStats = createStatsForRemoteStoreRestoredPrimary(shardId); + ShardRouting routing = createShardRouting(shardId, true); + RemoteStoreStats stats = new RemoteStoreStats(remotestoreRestoredShardStats, routing); + + XContentBuilder builder = XContentFactory.jsonBuilder(); + stats.toXContent(builder, EMPTY_PARAMS); + Map jsonObject = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2(); + compareStatsResponse(jsonObject, remotestoreRestoredShardStats, routing); + } + + public void testSerializationForPrimaryShard() throws Exception { + RemoteSegmentTransferTracker.Stats primaryShardStats = createStatsForNewPrimary(shardId); + RemoteStoreStats stats = new RemoteStoreStats(primaryShardStats, createShardRouting(shardId, true)); + try (BytesStreamOutput out = new BytesStreamOutput()) { + stats.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + RemoteSegmentTransferTracker.Stats deserializedStats = new RemoteStoreStats(in).getStats(); + assertEquals(stats.getStats().refreshTimeLagMs, deserializedStats.refreshTimeLagMs); + assertEquals(stats.getStats().localRefreshNumber, deserializedStats.localRefreshNumber); + assertEquals(stats.getStats().remoteRefreshNumber, deserializedStats.remoteRefreshNumber); + assertEquals(stats.getStats().uploadBytesStarted, deserializedStats.uploadBytesStarted); + assertEquals(stats.getStats().uploadBytesSucceeded, deserializedStats.uploadBytesSucceeded); + assertEquals(stats.getStats().uploadBytesFailed, deserializedStats.uploadBytesFailed); + assertEquals(stats.getStats().totalUploadsStarted, deserializedStats.totalUploadsStarted); + assertEquals(stats.getStats().totalUploadsFailed, deserializedStats.totalUploadsFailed); + assertEquals(stats.getStats().totalUploadsSucceeded, deserializedStats.totalUploadsSucceeded); + assertEquals(stats.getStats().rejectionCount, deserializedStats.rejectionCount); + assertEquals(stats.getStats().consecutiveFailuresCount, deserializedStats.consecutiveFailuresCount); + assertEquals(stats.getStats().uploadBytesMovingAverage, deserializedStats.uploadBytesMovingAverage, 0); + assertEquals(stats.getStats().uploadBytesPerSecMovingAverage, deserializedStats.uploadBytesPerSecMovingAverage, 0); + assertEquals(stats.getStats().uploadTimeMovingAverage, deserializedStats.uploadTimeMovingAverage, 0); + assertEquals(stats.getStats().bytesLag, deserializedStats.bytesLag); + assertEquals(0, deserializedStats.directoryFileTransferTrackerStats.transferredBytesStarted); + assertEquals(0, deserializedStats.directoryFileTransferTrackerStats.transferredBytesFailed); + assertEquals(0, deserializedStats.directoryFileTransferTrackerStats.transferredBytesSucceeded); + assertEquals(0, deserializedStats.directoryFileTransferTrackerStats.lastSuccessfulTransferInBytes); + assertEquals(0, deserializedStats.directoryFileTransferTrackerStats.lastTransferTimestampMs); + } + } + } + + public void testSerializationForReplicaShard() throws Exception { + RemoteSegmentTransferTracker.Stats replicaShardStats = createStatsForNewReplica(shardId); + RemoteStoreStats stats = new RemoteStoreStats(replicaShardStats, createShardRouting(shardId, false)); try (BytesStreamOutput out = new BytesStreamOutput()) { stats.writeTo(out); try (StreamInput in = out.bytes().streamInput()) { - RemoteStoreStats deserializedStats = new RemoteStoreStats(in); - assertEquals(deserializedStats.getStats().shardId.toString(), stats.getStats().shardId.toString()); - assertEquals(deserializedStats.getStats().refreshTimeLagMs, stats.getStats().refreshTimeLagMs); - assertEquals(deserializedStats.getStats().localRefreshNumber, stats.getStats().localRefreshNumber); - assertEquals(deserializedStats.getStats().remoteRefreshNumber, stats.getStats().remoteRefreshNumber); - assertEquals(deserializedStats.getStats().uploadBytesStarted, stats.getStats().uploadBytesStarted); - assertEquals(deserializedStats.getStats().uploadBytesSucceeded, stats.getStats().uploadBytesSucceeded); - assertEquals(deserializedStats.getStats().uploadBytesFailed, stats.getStats().uploadBytesFailed); - assertEquals(deserializedStats.getStats().totalUploadsStarted, stats.getStats().totalUploadsStarted); - assertEquals(deserializedStats.getStats().totalUploadsFailed, stats.getStats().totalUploadsFailed); - assertEquals(deserializedStats.getStats().totalUploadsSucceeded, stats.getStats().totalUploadsSucceeded); - assertEquals(deserializedStats.getStats().rejectionCount, stats.getStats().rejectionCount); - assertEquals(deserializedStats.getStats().consecutiveFailuresCount, stats.getStats().consecutiveFailuresCount); - assertEquals(deserializedStats.getStats().uploadBytesMovingAverage, stats.getStats().uploadBytesMovingAverage, 0); + RemoteSegmentTransferTracker.Stats deserializedStats = new RemoteStoreStats(in).getStats(); + assertEquals(0, deserializedStats.refreshTimeLagMs); + assertEquals(0, deserializedStats.localRefreshNumber); + assertEquals(0, deserializedStats.remoteRefreshNumber); + assertEquals(0, deserializedStats.uploadBytesStarted); + assertEquals(0, deserializedStats.uploadBytesSucceeded); + assertEquals(0, deserializedStats.uploadBytesFailed); + assertEquals(0, deserializedStats.totalUploadsStarted); + assertEquals(0, deserializedStats.totalUploadsFailed); + assertEquals(0, deserializedStats.totalUploadsSucceeded); + assertEquals(0, deserializedStats.rejectionCount); + assertEquals(0, deserializedStats.consecutiveFailuresCount); + assertEquals(0, deserializedStats.bytesLag); + assertEquals( + stats.getStats().directoryFileTransferTrackerStats.transferredBytesStarted, + deserializedStats.directoryFileTransferTrackerStats.transferredBytesStarted + ); + assertEquals( + stats.getStats().directoryFileTransferTrackerStats.transferredBytesFailed, + deserializedStats.directoryFileTransferTrackerStats.transferredBytesFailed + ); + assertEquals( + stats.getStats().directoryFileTransferTrackerStats.transferredBytesSucceeded, + deserializedStats.directoryFileTransferTrackerStats.transferredBytesSucceeded + ); + assertEquals( + stats.getStats().directoryFileTransferTrackerStats.lastSuccessfulTransferInBytes, + deserializedStats.directoryFileTransferTrackerStats.lastSuccessfulTransferInBytes + ); + assertEquals( + stats.getStats().directoryFileTransferTrackerStats.lastTransferTimestampMs, + deserializedStats.directoryFileTransferTrackerStats.lastTransferTimestampMs + ); + assertEquals( + stats.getStats().directoryFileTransferTrackerStats.transferredBytesPerSecMovingAverage, + deserializedStats.directoryFileTransferTrackerStats.transferredBytesPerSecMovingAverage, + 0 + ); + assertEquals( + stats.getStats().directoryFileTransferTrackerStats.transferredBytesMovingAverage, + deserializedStats.directoryFileTransferTrackerStats.transferredBytesMovingAverage, + 0 + ); + } + } + } + + public void testSerializationForRemoteStoreRestoredPrimaryShard() throws Exception { + RemoteSegmentTransferTracker.Stats primaryShardStats = createStatsForRemoteStoreRestoredPrimary(shardId); + RemoteStoreStats stats = new RemoteStoreStats(primaryShardStats, createShardRouting(shardId, true)); + try (BytesStreamOutput out = new BytesStreamOutput()) { + stats.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + RemoteSegmentTransferTracker.Stats deserializedStats = new RemoteStoreStats(in).getStats(); + assertEquals(stats.getStats().refreshTimeLagMs, deserializedStats.refreshTimeLagMs); + assertEquals(stats.getStats().localRefreshNumber, deserializedStats.localRefreshNumber); + assertEquals(stats.getStats().remoteRefreshNumber, deserializedStats.remoteRefreshNumber); + assertEquals(stats.getStats().uploadBytesStarted, deserializedStats.uploadBytesStarted); + assertEquals(stats.getStats().uploadBytesSucceeded, deserializedStats.uploadBytesSucceeded); + assertEquals(stats.getStats().uploadBytesFailed, deserializedStats.uploadBytesFailed); + assertEquals(stats.getStats().totalUploadsStarted, deserializedStats.totalUploadsStarted); + assertEquals(stats.getStats().totalUploadsFailed, deserializedStats.totalUploadsFailed); + assertEquals(stats.getStats().totalUploadsSucceeded, deserializedStats.totalUploadsSucceeded); + assertEquals(stats.getStats().rejectionCount, deserializedStats.rejectionCount); + assertEquals(stats.getStats().consecutiveFailuresCount, deserializedStats.consecutiveFailuresCount); + assertEquals(stats.getStats().uploadBytesMovingAverage, deserializedStats.uploadBytesMovingAverage, 0); + assertEquals(stats.getStats().uploadBytesPerSecMovingAverage, deserializedStats.uploadBytesPerSecMovingAverage, 0); + assertEquals(stats.getStats().uploadTimeMovingAverage, deserializedStats.uploadTimeMovingAverage, 0); + assertEquals(stats.getStats().bytesLag, deserializedStats.bytesLag); + assertEquals( + stats.getStats().directoryFileTransferTrackerStats.transferredBytesStarted, + deserializedStats.directoryFileTransferTrackerStats.transferredBytesStarted + ); + assertEquals( + stats.getStats().directoryFileTransferTrackerStats.transferredBytesFailed, + deserializedStats.directoryFileTransferTrackerStats.transferredBytesFailed + ); + assertEquals( + stats.getStats().directoryFileTransferTrackerStats.transferredBytesSucceeded, + deserializedStats.directoryFileTransferTrackerStats.transferredBytesSucceeded + ); + assertEquals( + stats.getStats().directoryFileTransferTrackerStats.lastSuccessfulTransferInBytes, + deserializedStats.directoryFileTransferTrackerStats.lastSuccessfulTransferInBytes + ); + assertEquals( + stats.getStats().directoryFileTransferTrackerStats.lastTransferTimestampMs, + deserializedStats.directoryFileTransferTrackerStats.lastTransferTimestampMs + ); + assertEquals( + stats.getStats().directoryFileTransferTrackerStats.transferredBytesPerSecMovingAverage, + deserializedStats.directoryFileTransferTrackerStats.transferredBytesPerSecMovingAverage, + 0 + ); assertEquals( - deserializedStats.getStats().uploadBytesPerSecMovingAverage, - stats.getStats().uploadBytesPerSecMovingAverage, + stats.getStats().directoryFileTransferTrackerStats.transferredBytesMovingAverage, + deserializedStats.directoryFileTransferTrackerStats.transferredBytesMovingAverage, 0 ); - assertEquals(deserializedStats.getStats().uploadTimeMovingAverage, stats.getStats().uploadTimeMovingAverage, 0); - assertEquals(deserializedStats.getStats().bytesLag, stats.getStats().bytesLag); } } } diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/TransportRemoteStoreStatsActionTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/TransportRemoteStoreStatsActionTests.java index 25e44884814a5..375b1b8ed7aba 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/TransportRemoteStoreStatsActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/TransportRemoteStoreStatsActionTests.java @@ -29,7 +29,7 @@ import org.opensearch.index.IndexService; import org.opensearch.index.IndexSettings; import org.opensearch.index.remote.RemoteRefreshSegmentPressureService; -import org.opensearch.index.remote.RemoteRefreshSegmentTracker; +import org.opensearch.index.remote.RemoteSegmentTransferTracker; import org.opensearch.index.shard.IndexShardTestCase; import org.opensearch.indices.IndicesService; import org.opensearch.test.FeatureFlagSetter; @@ -86,7 +86,7 @@ public void setUp() throws Exception { Collections.emptySet() ); - when(pressureService.getRemoteRefreshSegmentTracker(any())).thenReturn(mock(RemoteRefreshSegmentTracker.class)); + when(pressureService.getRemoteRefreshSegmentTracker(any())).thenReturn(mock(RemoteSegmentTransferTracker.class)); when(indicesService.indexService(INDEX)).thenReturn(indexService); when(indexService.getIndexSettings()).thenReturn(new IndexSettings(remoteStoreIndexMetadata, Settings.EMPTY)); statsAction = new TransportRemoteStoreStatsAction( @@ -108,7 +108,7 @@ public void tearDown() throws Exception { clusterService.close(); } - public void testOnlyPrimaryShards() throws Exception { + public void testAllShardCopies() throws Exception { FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); RoutingTable routingTable = RoutingTable.builder().addAsNew(remoteStoreIndexMetadata).build(); Metadata metadata = Metadata.builder().put(remoteStoreIndexMetadata, false).build(); @@ -125,7 +125,7 @@ public void testOnlyPrimaryShards() throws Exception { new String[] { INDEX.getName() } ); - assertEquals(shardsIterator.size(), 2); + assertEquals(shardsIterator.size(), 4); } public void testOnlyLocalShards() throws Exception { @@ -153,10 +153,10 @@ public void testOnlyLocalShards() throws Exception { remoteStoreStatsRequest.local(true); ShardsIterator shardsIterator = statsAction.shards(clusterService.state(), remoteStoreStatsRequest, concreteIndices); - assertEquals(shardsIterator.size(), 1); + assertEquals(shardsIterator.size(), 2); } - public void testOnlyRemoteStoreEnabledShards() throws Exception { + public void testOnlyRemoteStoreEnabledShardCopies() throws Exception { FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); Index NEW_INDEX = new Index("newIndex", "newUUID"); IndexMetadata indexMetadataWithoutRemoteStore = IndexMetadata.builder(NEW_INDEX.getName()) @@ -189,6 +189,6 @@ public void testOnlyRemoteStoreEnabledShards() throws Exception { new String[] { INDEX.getName() } ); - assertEquals(shardsIterator.size(), 2); + assertEquals(shardsIterator.size(), 4); } } diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteRefreshSegmentPressureServiceTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteRefreshSegmentPressureServiceTests.java index 5ccacd4048596..7b180a71f3bab 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteRefreshSegmentPressureServiceTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteRefreshSegmentPressureServiceTests.java @@ -16,6 +16,7 @@ import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.IndexShard; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.store.Store; import org.opensearch.test.IndexSettingsModule; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; @@ -99,7 +100,7 @@ public void testValidateSegmentUploadLag() { pressureService = new RemoteRefreshSegmentPressureService(clusterService, Settings.EMPTY); pressureService.afterIndexShardCreated(indexShard); - RemoteRefreshSegmentTracker pressureTracker = pressureService.getRemoteRefreshSegmentTracker(shardId); + RemoteSegmentTransferTracker pressureTracker = pressureService.getRemoteRefreshSegmentTracker(shardId); pressureTracker.updateLocalRefreshSeqNo(6); // 1. time lag more than dynamic threshold @@ -152,10 +153,11 @@ public void testValidateSegmentUploadLag() { private static IndexShard createIndexShard(ShardId shardId, boolean remoteStoreEnabled) { Settings settings = Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, String.valueOf(remoteStoreEnabled)).build(); IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test_index", settings); + Store store = mock(Store.class); IndexShard indexShard = mock(IndexShard.class); when(indexShard.indexSettings()).thenReturn(indexSettings); when(indexShard.shardId()).thenReturn(shardId); + when(indexShard.store()).thenReturn(store); return indexShard; } - } diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteRefreshSegmentTrackerTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java similarity index 66% rename from server/src/test/java/org/opensearch/index/remote/RemoteRefreshSegmentTrackerTests.java rename to server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java index badfeb0d67c05..208cea111f2e1 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteRefreshSegmentTrackerTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteSegmentTransferTrackerTests.java @@ -14,6 +14,7 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.store.DirectoryFileTransferTracker; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -24,7 +25,7 @@ import static org.mockito.Mockito.mock; -public class RemoteRefreshSegmentTrackerTests extends OpenSearchTestCase { +public class RemoteSegmentTransferTrackerTests extends OpenSearchTestCase { private RemoteRefreshSegmentPressureSettings pressureSettings; @@ -34,7 +35,9 @@ public class RemoteRefreshSegmentTrackerTests extends OpenSearchTestCase { private ShardId shardId; - private RemoteRefreshSegmentTracker pressureTracker; + private RemoteSegmentTransferTracker pressureTracker; + + private DirectoryFileTransferTracker directoryFileTransferTracker; @Override public void setUp() throws Exception { @@ -51,6 +54,7 @@ public void setUp() throws Exception { mock(RemoteRefreshSegmentPressureService.class) ); shardId = new ShardId("index", "uuid", 0); + directoryFileTransferTracker = new DirectoryFileTransferTracker(); } @Override @@ -60,8 +64,9 @@ public void tearDown() throws Exception { } public void testGetShardId() { - pressureTracker = new RemoteRefreshSegmentTracker( + pressureTracker = new RemoteSegmentTransferTracker( shardId, + directoryFileTransferTracker, pressureSettings.getUploadBytesMovingAverageWindowSize(), pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), pressureSettings.getUploadTimeMovingAverageWindowSize() @@ -70,8 +75,9 @@ public void testGetShardId() { } public void testUpdateLocalRefreshSeqNo() { - pressureTracker = new RemoteRefreshSegmentTracker( + pressureTracker = new RemoteSegmentTransferTracker( shardId, + directoryFileTransferTracker, pressureSettings.getUploadBytesMovingAverageWindowSize(), pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), pressureSettings.getUploadTimeMovingAverageWindowSize() @@ -82,8 +88,9 @@ public void testUpdateLocalRefreshSeqNo() { } public void testUpdateRemoteRefreshSeqNo() { - pressureTracker = new RemoteRefreshSegmentTracker( + pressureTracker = new RemoteSegmentTransferTracker( shardId, + directoryFileTransferTracker, pressureSettings.getUploadBytesMovingAverageWindowSize(), pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), pressureSettings.getUploadTimeMovingAverageWindowSize() @@ -94,8 +101,9 @@ public void testUpdateRemoteRefreshSeqNo() { } public void testUpdateLocalRefreshTimeMs() { - pressureTracker = new RemoteRefreshSegmentTracker( + pressureTracker = new RemoteSegmentTransferTracker( shardId, + directoryFileTransferTracker, pressureSettings.getUploadBytesMovingAverageWindowSize(), pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), pressureSettings.getUploadTimeMovingAverageWindowSize() @@ -106,8 +114,9 @@ public void testUpdateLocalRefreshTimeMs() { } public void testUpdateRemoteRefreshTimeMs() { - pressureTracker = new RemoteRefreshSegmentTracker( + pressureTracker = new RemoteSegmentTransferTracker( shardId, + directoryFileTransferTracker, pressureSettings.getUploadBytesMovingAverageWindowSize(), pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), pressureSettings.getUploadTimeMovingAverageWindowSize() @@ -117,9 +126,23 @@ public void testUpdateRemoteRefreshTimeMs() { assertEquals(refreshTimeMs, pressureTracker.getRemoteRefreshTimeMs()); } + public void testLastDownloadTimestampMs() { + pressureTracker = new RemoteSegmentTransferTracker( + shardId, + directoryFileTransferTracker, + pressureSettings.getUploadBytesMovingAverageWindowSize(), + pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), + pressureSettings.getUploadTimeMovingAverageWindowSize() + ); + long currentTimeInMs = System.currentTimeMillis(); + pressureTracker.getDirectoryFileTransferTracker().updateLastTransferTimestampMs(currentTimeInMs); + assertEquals(currentTimeInMs, pressureTracker.getDirectoryFileTransferTracker().getLastTransferTimestampMs()); + } + public void testComputeSeqNoLagOnUpdate() { - pressureTracker = new RemoteRefreshSegmentTracker( + pressureTracker = new RemoteSegmentTransferTracker( shardId, + directoryFileTransferTracker, pressureSettings.getUploadBytesMovingAverageWindowSize(), pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), pressureSettings.getUploadTimeMovingAverageWindowSize() @@ -133,8 +156,9 @@ public void testComputeSeqNoLagOnUpdate() { } public void testComputeTimeLagOnUpdate() { - pressureTracker = new RemoteRefreshSegmentTracker( + pressureTracker = new RemoteSegmentTransferTracker( shardId, + directoryFileTransferTracker, pressureSettings.getUploadBytesMovingAverageWindowSize(), pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), pressureSettings.getUploadTimeMovingAverageWindowSize() @@ -150,8 +174,9 @@ public void testComputeTimeLagOnUpdate() { } public void testAddUploadBytesStarted() { - pressureTracker = new RemoteRefreshSegmentTracker( + pressureTracker = new RemoteSegmentTransferTracker( shardId, + directoryFileTransferTracker, pressureSettings.getUploadBytesMovingAverageWindowSize(), pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), pressureSettings.getUploadTimeMovingAverageWindowSize() @@ -165,8 +190,9 @@ public void testAddUploadBytesStarted() { } public void testAddUploadBytesFailed() { - pressureTracker = new RemoteRefreshSegmentTracker( + pressureTracker = new RemoteSegmentTransferTracker( shardId, + directoryFileTransferTracker, pressureSettings.getUploadBytesMovingAverageWindowSize(), pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), pressureSettings.getUploadTimeMovingAverageWindowSize() @@ -180,8 +206,9 @@ public void testAddUploadBytesFailed() { } public void testAddUploadBytesSucceeded() { - pressureTracker = new RemoteRefreshSegmentTracker( + pressureTracker = new RemoteSegmentTransferTracker( shardId, + directoryFileTransferTracker, pressureSettings.getUploadBytesMovingAverageWindowSize(), pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), pressureSettings.getUploadTimeMovingAverageWindowSize() @@ -194,9 +221,58 @@ public void testAddUploadBytesSucceeded() { assertEquals(bytesToAdd + moreBytesToAdd, pressureTracker.getUploadBytesSucceeded()); } + public void testAddDownloadBytesStarted() { + pressureTracker = new RemoteSegmentTransferTracker( + shardId, + directoryFileTransferTracker, + pressureSettings.getUploadBytesMovingAverageWindowSize(), + pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), + pressureSettings.getUploadTimeMovingAverageWindowSize() + ); + long bytesToAdd = randomLongBetween(1000, 1000000); + pressureTracker.getDirectoryFileTransferTracker().addTransferredBytesStarted(bytesToAdd); + assertEquals(bytesToAdd, pressureTracker.getDirectoryFileTransferTracker().getTransferredBytesStarted()); + long moreBytesToAdd = randomLongBetween(1000, 10000); + pressureTracker.getDirectoryFileTransferTracker().addTransferredBytesStarted(moreBytesToAdd); + assertEquals(bytesToAdd + moreBytesToAdd, pressureTracker.getDirectoryFileTransferTracker().getTransferredBytesStarted()); + } + + public void testAddDownloadBytesFailed() { + pressureTracker = new RemoteSegmentTransferTracker( + shardId, + directoryFileTransferTracker, + pressureSettings.getUploadBytesMovingAverageWindowSize(), + pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), + pressureSettings.getUploadTimeMovingAverageWindowSize() + ); + long bytesToAdd = randomLongBetween(1000, 1000000); + pressureTracker.getDirectoryFileTransferTracker().addTransferredBytesFailed(bytesToAdd); + assertEquals(bytesToAdd, pressureTracker.getDirectoryFileTransferTracker().getTransferredBytesFailed()); + long moreBytesToAdd = randomLongBetween(1000, 10000); + pressureTracker.getDirectoryFileTransferTracker().addTransferredBytesFailed(moreBytesToAdd); + assertEquals(bytesToAdd + moreBytesToAdd, pressureTracker.getDirectoryFileTransferTracker().getTransferredBytesFailed()); + } + + public void testAddDownloadBytesSucceeded() { + pressureTracker = new RemoteSegmentTransferTracker( + shardId, + directoryFileTransferTracker, + pressureSettings.getUploadBytesMovingAverageWindowSize(), + pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), + pressureSettings.getUploadTimeMovingAverageWindowSize() + ); + long bytesToAdd = randomLongBetween(1000, 1000000); + pressureTracker.getDirectoryFileTransferTracker().addTransferredBytesSucceeded(bytesToAdd, System.currentTimeMillis()); + assertEquals(bytesToAdd, pressureTracker.getDirectoryFileTransferTracker().getTransferredBytesSucceeded()); + long moreBytesToAdd = randomLongBetween(1000, 10000); + pressureTracker.getDirectoryFileTransferTracker().addTransferredBytesSucceeded(moreBytesToAdd, System.currentTimeMillis()); + assertEquals(bytesToAdd + moreBytesToAdd, pressureTracker.getDirectoryFileTransferTracker().getTransferredBytesSucceeded()); + } + public void testGetInflightUploadBytes() { - pressureTracker = new RemoteRefreshSegmentTracker( + pressureTracker = new RemoteSegmentTransferTracker( shardId, + directoryFileTransferTracker, pressureSettings.getUploadBytesMovingAverageWindowSize(), pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), pressureSettings.getUploadTimeMovingAverageWindowSize() @@ -211,8 +287,9 @@ public void testGetInflightUploadBytes() { } public void testIncrementTotalUploadsStarted() { - pressureTracker = new RemoteRefreshSegmentTracker( + pressureTracker = new RemoteSegmentTransferTracker( shardId, + directoryFileTransferTracker, pressureSettings.getUploadBytesMovingAverageWindowSize(), pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), pressureSettings.getUploadTimeMovingAverageWindowSize() @@ -224,8 +301,9 @@ public void testIncrementTotalUploadsStarted() { } public void testIncrementTotalUploadsFailed() { - pressureTracker = new RemoteRefreshSegmentTracker( + pressureTracker = new RemoteSegmentTransferTracker( shardId, + directoryFileTransferTracker, pressureSettings.getUploadBytesMovingAverageWindowSize(), pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), pressureSettings.getUploadTimeMovingAverageWindowSize() @@ -237,8 +315,9 @@ public void testIncrementTotalUploadsFailed() { } public void testIncrementTotalUploadSucceeded() { - pressureTracker = new RemoteRefreshSegmentTracker( + pressureTracker = new RemoteSegmentTransferTracker( shardId, + directoryFileTransferTracker, pressureSettings.getUploadBytesMovingAverageWindowSize(), pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), pressureSettings.getUploadTimeMovingAverageWindowSize() @@ -250,8 +329,9 @@ public void testIncrementTotalUploadSucceeded() { } public void testGetInflightUploads() { - pressureTracker = new RemoteRefreshSegmentTracker( + pressureTracker = new RemoteSegmentTransferTracker( shardId, + directoryFileTransferTracker, pressureSettings.getUploadBytesMovingAverageWindowSize(), pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), pressureSettings.getUploadTimeMovingAverageWindowSize() @@ -267,8 +347,9 @@ public void testGetInflightUploads() { } public void testIncrementRejectionCount() { - pressureTracker = new RemoteRefreshSegmentTracker( + pressureTracker = new RemoteSegmentTransferTracker( shardId, + directoryFileTransferTracker, pressureSettings.getUploadBytesMovingAverageWindowSize(), pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), pressureSettings.getUploadTimeMovingAverageWindowSize() @@ -280,8 +361,9 @@ public void testIncrementRejectionCount() { } public void testGetConsecutiveFailureCount() { - pressureTracker = new RemoteRefreshSegmentTracker( + pressureTracker = new RemoteSegmentTransferTracker( shardId, + directoryFileTransferTracker, pressureSettings.getUploadBytesMovingAverageWindowSize(), pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), pressureSettings.getUploadTimeMovingAverageWindowSize() @@ -295,8 +377,9 @@ public void testGetConsecutiveFailureCount() { } public void testComputeBytesLag() { - pressureTracker = new RemoteRefreshSegmentTracker( + pressureTracker = new RemoteSegmentTransferTracker( shardId, + directoryFileTransferTracker, pressureSettings.getUploadBytesMovingAverageWindowSize(), pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), pressureSettings.getUploadTimeMovingAverageWindowSize() @@ -324,8 +407,9 @@ public void testComputeBytesLag() { } public void testIsUploadBytesAverageReady() { - pressureTracker = new RemoteRefreshSegmentTracker( + pressureTracker = new RemoteSegmentTransferTracker( shardId, + directoryFileTransferTracker, pressureSettings.getUploadBytesMovingAverageWindowSize(), pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), pressureSettings.getUploadTimeMovingAverageWindowSize() @@ -351,8 +435,9 @@ public void testIsUploadBytesAverageReady() { } public void testIsUploadBytesPerSecAverageReady() { - pressureTracker = new RemoteRefreshSegmentTracker( + pressureTracker = new RemoteSegmentTransferTracker( shardId, + directoryFileTransferTracker, pressureSettings.getUploadBytesMovingAverageWindowSize(), pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), pressureSettings.getUploadTimeMovingAverageWindowSize() @@ -378,8 +463,9 @@ public void testIsUploadBytesPerSecAverageReady() { } public void testIsUploadTimeMsAverageReady() { - pressureTracker = new RemoteRefreshSegmentTracker( + pressureTracker = new RemoteSegmentTransferTracker( shardId, + directoryFileTransferTracker, pressureSettings.getUploadBytesMovingAverageWindowSize(), pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), pressureSettings.getUploadTimeMovingAverageWindowSize() @@ -404,12 +490,68 @@ public void testIsUploadTimeMsAverageReady() { assertEquals((double) sum / 20, pressureTracker.getUploadTimeMsAverage(), 0.0d); } + public void testIsDownloadBytesAverageReady() { + pressureTracker = new RemoteSegmentTransferTracker( + shardId, + directoryFileTransferTracker, + pressureSettings.getUploadBytesMovingAverageWindowSize(), + pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), + pressureSettings.getUploadTimeMovingAverageWindowSize() + ); + assertFalse(pressureTracker.getDirectoryFileTransferTracker().isTransferredBytesAverageReady()); + + long sum = 0; + for (int i = 1; i < 20; i++) { + pressureTracker.getDirectoryFileTransferTracker().updateLastSuccessfulTransferSize(i); + sum += i; + assertFalse(pressureTracker.getDirectoryFileTransferTracker().isTransferredBytesAverageReady()); + assertEquals((double) sum / i, pressureTracker.getDirectoryFileTransferTracker().getTransferredBytesAverage(), 0.0d); + } + + pressureTracker.getDirectoryFileTransferTracker().updateLastSuccessfulTransferSize(20); + sum += 20; + assertTrue(pressureTracker.getDirectoryFileTransferTracker().isTransferredBytesAverageReady()); + assertEquals((double) sum / 20, pressureTracker.getDirectoryFileTransferTracker().getTransferredBytesAverage(), 0.0d); + + pressureTracker.getDirectoryFileTransferTracker().updateLastSuccessfulTransferSize(100); + sum = sum + 100 - 1; + assertEquals((double) sum / 20, pressureTracker.getDirectoryFileTransferTracker().getTransferredBytesAverage(), 0.0d); + } + + public void testIsDownloadBytesPerSecAverageReady() { + pressureTracker = new RemoteSegmentTransferTracker( + shardId, + directoryFileTransferTracker, + pressureSettings.getUploadBytesMovingAverageWindowSize(), + pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), + pressureSettings.getUploadTimeMovingAverageWindowSize() + ); + assertFalse(pressureTracker.getDirectoryFileTransferTracker().isTransferredBytesPerSecAverageReady()); + + long sum = 0; + for (int i = 1; i < 20; i++) { + pressureTracker.getDirectoryFileTransferTracker().addTransferredBytesPerSec(i); + sum += i; + assertFalse(pressureTracker.getDirectoryFileTransferTracker().isTransferredBytesPerSecAverageReady()); + assertEquals((double) sum / i, pressureTracker.getDirectoryFileTransferTracker().getTransferredBytesPerSecAverage(), 0.0d); + } + + pressureTracker.getDirectoryFileTransferTracker().addTransferredBytesPerSec(20); + sum += 20; + assertTrue(pressureTracker.getDirectoryFileTransferTracker().isTransferredBytesPerSecAverageReady()); + assertEquals((double) sum / 20, pressureTracker.getDirectoryFileTransferTracker().getTransferredBytesPerSecAverage(), 0.0d); + + pressureTracker.getDirectoryFileTransferTracker().addTransferredBytesPerSec(100); + sum = sum + 100 - 1; + assertEquals((double) sum / 20, pressureTracker.getDirectoryFileTransferTracker().getTransferredBytesPerSecAverage(), 0.0d); + } + /** - * Tests whether RemoteRefreshSegmentTracker.Stats object generated correctly from RemoteRefreshSegmentTracker. + * Tests whether RemoteSegmentTransferTracker.Stats object generated correctly from RemoteSegmentTransferTracker. * */ public void testStatsObjectCreation() { pressureTracker = constructTracker(); - RemoteRefreshSegmentTracker.Stats pressureTrackerStats = pressureTracker.stats(); + RemoteSegmentTransferTracker.Stats pressureTrackerStats = pressureTracker.stats(); assertEquals(pressureTracker.getShardId(), pressureTrackerStats.shardId); assertEquals(pressureTracker.getTimeMsLag(), (int) pressureTrackerStats.refreshTimeLagMs); assertEquals(pressureTracker.getLocalRefreshSeqNo(), (int) pressureTrackerStats.localRefreshNumber); @@ -429,16 +571,16 @@ public void testStatsObjectCreation() { } /** - * Tests whether RemoteRefreshSegmentTracker.Stats object serialize and deserialize is working fine. + * Tests whether RemoteSegmentTransferTracker.Stats object serialize and deserialize is working fine. * This comes into play during internode data transfer. * */ public void testStatsObjectCreationViaStream() throws IOException { pressureTracker = constructTracker(); - RemoteRefreshSegmentTracker.Stats pressureTrackerStats = pressureTracker.stats(); + RemoteSegmentTransferTracker.Stats pressureTrackerStats = pressureTracker.stats(); try (BytesStreamOutput out = new BytesStreamOutput()) { pressureTrackerStats.writeTo(out); try (StreamInput in = out.bytes().streamInput()) { - RemoteRefreshSegmentTracker.Stats deserializedStats = new RemoteRefreshSegmentTracker.Stats(in); + RemoteSegmentTransferTracker.Stats deserializedStats = new RemoteSegmentTransferTracker.Stats(in); assertEquals(deserializedStats.shardId, pressureTrackerStats.shardId); assertEquals((int) deserializedStats.refreshTimeLagMs, (int) pressureTrackerStats.refreshTimeLagMs); assertEquals((int) deserializedStats.localRefreshNumber, (int) pressureTrackerStats.localRefreshNumber); @@ -459,13 +601,26 @@ public void testStatsObjectCreationViaStream() throws IOException { assertEquals((int) deserializedStats.totalUploadsStarted, (int) pressureTrackerStats.totalUploadsStarted); assertEquals((int) deserializedStats.totalUploadsSucceeded, (int) pressureTrackerStats.totalUploadsSucceeded); assertEquals((int) deserializedStats.totalUploadsFailed, (int) pressureTrackerStats.totalUploadsFailed); + assertEquals( + (int) deserializedStats.directoryFileTransferTrackerStats.transferredBytesStarted, + (int) pressureTrackerStats.directoryFileTransferTrackerStats.transferredBytesStarted + ); + assertEquals( + (int) deserializedStats.directoryFileTransferTrackerStats.transferredBytesSucceeded, + (int) pressureTrackerStats.directoryFileTransferTrackerStats.transferredBytesSucceeded + ); + assertEquals( + (int) deserializedStats.directoryFileTransferTrackerStats.transferredBytesPerSecMovingAverage, + (int) pressureTrackerStats.directoryFileTransferTrackerStats.transferredBytesPerSecMovingAverage + ); } } } - private RemoteRefreshSegmentTracker constructTracker() { - RemoteRefreshSegmentTracker segmentPressureTracker = new RemoteRefreshSegmentTracker( + private RemoteSegmentTransferTracker constructTracker() { + RemoteSegmentTransferTracker segmentPressureTracker = new RemoteSegmentTransferTracker( shardId, + new DirectoryFileTransferTracker(), pressureSettings.getUploadBytesMovingAverageWindowSize(), pressureSettings.getUploadBytesPerSecMovingAverageWindowSize(), pressureSettings.getUploadTimeMovingAverageWindowSize() @@ -475,6 +630,9 @@ private RemoteRefreshSegmentTracker constructTracker() { segmentPressureTracker.addUploadBytes(99); segmentPressureTracker.updateRemoteRefreshTimeMs(System.nanoTime() / 1_000_000L + randomIntBetween(10, 100)); segmentPressureTracker.incrementRejectionCount(); + segmentPressureTracker.getDirectoryFileTransferTracker().addTransferredBytesStarted(10); + segmentPressureTracker.getDirectoryFileTransferTracker().addTransferredBytesSucceeded(10, System.currentTimeMillis()); + segmentPressureTracker.getDirectoryFileTransferTracker().addTransferredBytesPerSec(5); return segmentPressureTracker; } } diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java index 7c119bfbbc573..f13f89c6e067c 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java @@ -26,7 +26,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.index.engine.InternalEngineFactory; import org.opensearch.index.remote.RemoteRefreshSegmentPressureService; -import org.opensearch.index.remote.RemoteRefreshSegmentTracker; +import org.opensearch.index.remote.RemoteSegmentTransferTracker; import org.opensearch.index.store.RemoteSegmentStoreDirectory; import org.opensearch.index.store.Store; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; @@ -255,7 +255,7 @@ public void testRefreshSuccessOnFirstAttempt() throws Exception { assertBusy(() -> assertEquals(0, refreshCountLatch.getCount())); assertBusy(() -> assertEquals(0, successLatch.getCount())); RemoteRefreshSegmentPressureService pressureService = tuple.v2(); - RemoteRefreshSegmentTracker segmentTracker = pressureService.getRemoteRefreshSegmentTracker(indexShard.shardId()); + RemoteSegmentTransferTracker segmentTracker = pressureService.getRemoteRefreshSegmentTracker(indexShard.shardId()); assertNoLagAndTotalUploadsFailed(segmentTracker, 0); } @@ -276,7 +276,7 @@ public void testRefreshSuccessOnSecondAttempt() throws Exception { assertBusy(() -> assertEquals(0, refreshCountLatch.getCount())); assertBusy(() -> assertEquals(0, successLatch.getCount())); RemoteRefreshSegmentPressureService pressureService = tuple.v2(); - RemoteRefreshSegmentTracker segmentTracker = pressureService.getRemoteRefreshSegmentTracker(indexShard.shardId()); + RemoteSegmentTransferTracker segmentTracker = pressureService.getRemoteRefreshSegmentTracker(indexShard.shardId()); assertNoLagAndTotalUploadsFailed(segmentTracker, 1); } @@ -322,11 +322,11 @@ public void testRefreshSuccessOnThirdAttempt() throws Exception { assertBusy(() -> assertEquals(0, refreshCountLatch.getCount())); assertBusy(() -> assertEquals(0, successLatch.getCount())); RemoteRefreshSegmentPressureService pressureService = tuple.v2(); - RemoteRefreshSegmentTracker segmentTracker = pressureService.getRemoteRefreshSegmentTracker(indexShard.shardId()); + RemoteSegmentTransferTracker segmentTracker = pressureService.getRemoteRefreshSegmentTracker(indexShard.shardId()); assertNoLagAndTotalUploadsFailed(segmentTracker, 2); } - private void assertNoLagAndTotalUploadsFailed(RemoteRefreshSegmentTracker segmentTracker, long totalUploadsFailed) throws Exception { + private void assertNoLagAndTotalUploadsFailed(RemoteSegmentTransferTracker segmentTracker, long totalUploadsFailed) throws Exception { assertBusy(() -> { assertEquals(0, segmentTracker.getBytesLag()); assertEquals(0, segmentTracker.getRefreshSeqNoLag()); @@ -339,7 +339,7 @@ public void testTrackerData() throws Exception { Tuple tuple = mockIndexShardWithRetryAndScheduleRefresh(1); RemoteStoreRefreshListener listener = tuple.v1(); RemoteRefreshSegmentPressureService pressureService = tuple.v2(); - RemoteRefreshSegmentTracker tracker = pressureService.getRemoteRefreshSegmentTracker(indexShard.shardId()); + RemoteSegmentTransferTracker tracker = pressureService.getRemoteRefreshSegmentTracker(indexShard.shardId()); assertNoLag(tracker); indexDocs(100, randomIntBetween(100, 200)); indexShard.refresh("test"); @@ -347,7 +347,7 @@ public void testTrackerData() throws Exception { assertBusy(() -> assertNoLag(tracker)); } - private void assertNoLag(RemoteRefreshSegmentTracker tracker) { + private void assertNoLag(RemoteSegmentTransferTracker tracker) { assertEquals(0, tracker.getRefreshSeqNoLag()); assertEquals(0, tracker.getBytesLag()); assertEquals(0, tracker.getTimeMsLag()); From 66ea4b3d736d676880b776039e0b901707cf7e7c Mon Sep 17 00:00:00 2001 From: Varun Bansal Date: Tue, 1 Aug 2023 18:54:46 +0530 Subject: [PATCH 02/24] Restrict user overrides for remote store related index settings (#8812) Signed-off-by: Varun Bansal --- CHANGELOG.md | 1 + .../index/SegmentReplicationPressureIT.java | 4 +- ...emoteStoreMockRepositoryIntegTestCase.java | 6 +- ...ateRemoteIndexClusterDefaultDocRepIT.java} | 16 +- .../remotestore/CreateRemoteIndexIT.java | 149 +++-- .../remotestore/PrimaryTermValidationIT.java | 1 + .../remotestore/RemoteIndexRecoveryIT.java | 99 +++- .../remotestore/RemoteRestoreSnapshotIT.java | 530 ++++++++++++++++++ .../RemoteStoreBaseIntegTestCase.java | 46 +- .../ReplicaToPrimaryPromotionIT.java | 6 +- .../SegmentReplicationUsingRemoteStoreIT.java | 15 +- ...tReplicationWithRemoteStorePressureIT.java | 18 +- .../RemoteStoreMultipartFileCorruptionIT.java | 52 +- .../opensearch/snapshots/CloneSnapshotIT.java | 27 +- .../snapshots/DeleteSnapshotIT.java | 64 +-- .../RemoteIndexSnapshotStatusApiIT.java | 209 +++++++ .../snapshots/RestoreSnapshotIT.java | 505 ----------------- .../snapshots/SnapshotStatusApisIT.java | 99 +--- .../cluster/metadata/IndexMetadata.java | 44 +- .../metadata/MetadataCreateIndexService.java | 56 +- .../common/settings/ClusterSettings.java | 2 +- .../common/settings/IndexScopedSettings.java | 2 +- .../opensearch/indices/IndicesService.java | 2 +- .../TransportRemoteStoreStatsActionTests.java | 3 + .../MetadataCreateIndexServiceTests.java | 237 +++----- .../opensearch/index/IndexSettingsTests.java | 41 +- ...oteRefreshSegmentPressureServiceTests.java | 6 +- .../index/seqno/ReplicationTrackerTests.java | 55 +- .../index/shard/IndexShardTests.java | 4 +- .../index/translog/RemoteFSTranslogTests.java | 2 + .../BlobStoreRepositoryHelperTests.java | 140 +++++ .../BlobStoreRepositoryRemoteIndexTests.java | 371 ++++++++++++ .../blobstore/BlobStoreRepositoryTests.java | 376 +------------ .../AbstractSnapshotIntegTestCase.java | 5 +- 34 files changed, 1731 insertions(+), 1462 deletions(-) rename server/src/internalClusterTest/java/org/opensearch/remotestore/{CreateRemoteIndexClusterDefaultDocRep.java => CreateRemoteIndexClusterDefaultDocRepIT.java} (82%) create mode 100644 server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java create mode 100644 server/src/internalClusterTest/java/org/opensearch/snapshots/RemoteIndexSnapshotStatusApiIT.java create mode 100644 server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java create mode 100644 server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRemoteIndexTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 9e4a26caa2b10..cf101317bf81d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -48,6 +48,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Remote Segment Store Repository setting moved from `index.remote_store.repository` to `index.remote_store.segment.repository` and `cluster.remote_store.repository` to `cluster.remote_store.segment.repository` respectively for Index and Cluster level settings ([#8719](https://github.com/opensearch-project/OpenSearch/pull/8719)) - [Remote Store] Add support to restore only unassigned shards of an index ([#8792](https://github.com/opensearch-project/OpenSearch/pull/8792)) - Replace the deprecated IndexReader APIs with new storedFields() & termVectors() ([#7792](https://github.com/opensearch-project/OpenSearch/pull/7792)) +- [Remote Store] Restrict user override for remote store index level settings ([#8812](https://github.com/opensearch-project/OpenSearch/pull/8812)) ### Deprecated diff --git a/server/src/internalClusterTest/java/org/opensearch/index/SegmentReplicationPressureIT.java b/server/src/internalClusterTest/java/org/opensearch/index/SegmentReplicationPressureIT.java index 60ff82e617dbd..cf73c370cce8f 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/SegmentReplicationPressureIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/SegmentReplicationPressureIT.java @@ -15,6 +15,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.lease.Releasable; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardState; @@ -30,6 +31,7 @@ import java.util.Collection; import java.util.Collections; import java.util.List; +import java.util.Objects; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; @@ -260,7 +262,7 @@ public void testFailStaleReplica() throws Exception { public void testWithDocumentReplicationEnabledIndex() throws Exception { assumeTrue( "Can't create DocRep index with remote store enabled. Skipping.", - indexSettings().getAsBoolean(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, false) == false + Objects.equals(featureFlagSettings().get(FeatureFlags.REMOTE_STORE, "false"), "false") ); Settings settings = Settings.builder().put(MAX_REPLICATION_TIME_SETTING.getKey(), TimeValue.timeValueMillis(500)).build(); // Starts a primary and replica node. diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java index 709c027c3f347..5bfbbc11da77d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/AbstractRemoteStoreMockRepositoryIntegTestCase.java @@ -29,6 +29,7 @@ import java.util.Set; import java.util.stream.Collectors; +import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; public abstract class AbstractRemoteStoreMockRepositoryIntegTestCase extends AbstractSnapshotIntegTestCase { @@ -46,7 +47,7 @@ protected Settings featureFlagSettings() { public void setup() { FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); FeatureFlagSetter.set(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL); - internalCluster().startClusterManagerOnlyNode(); + internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REPOSITORY_NAME, TRANSLOG_REPOSITORY_NAME)); } @Override @@ -62,9 +63,6 @@ protected Settings remoteStoreIndexSettings(int numberOfReplicas) { .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numberOfReplicas) .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), false) .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) - .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) - .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, REPOSITORY_NAME) - .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, TRANSLOG_REPOSITORY_NAME) .build(); } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexClusterDefaultDocRep.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexClusterDefaultDocRepIT.java similarity index 82% rename from server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexClusterDefaultDocRep.java rename to server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexClusterDefaultDocRepIT.java index 2abf4fc50ec69..32c02332e05b2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexClusterDefaultDocRep.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexClusterDefaultDocRepIT.java @@ -16,12 +16,16 @@ import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.OpenSearchIntegTestCase; +import java.util.Locale; + import static org.hamcrest.Matchers.containsString; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) -public class CreateRemoteIndexClusterDefaultDocRep extends CreateRemoteIndexIT { +public class CreateRemoteIndexClusterDefaultDocRepIT extends CreateRemoteIndexIT { @Override protected Settings nodeSettings(int nodeOriginal) { @@ -44,7 +48,15 @@ public void testDefaultRemoteStoreNoUserOverride() throws Exception { ); assertThat( exc.getMessage(), - containsString("Cannot enable [index.remote_store.enabled] when [index.replication.type] is DOCUMENT") + containsString( + String.format( + Locale.ROOT, + "To enable %s, %s should be set to %s", + SETTING_REMOTE_STORE_ENABLED, + SETTING_REPLICATION_TYPE, + ReplicationType.SEGMENT + ) + ) ); } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexIT.java index e52a12f66cff4..7683651e902b2 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/CreateRemoteIndexIT.java @@ -26,14 +26,10 @@ import static org.hamcrest.Matchers.containsString; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY; -import static org.opensearch.index.IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_REPOSITORY_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_ENABLED_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_SETTING_REPLICATION_TYPE; +import static org.opensearch.index.IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; +import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) @@ -50,10 +46,7 @@ public void teardown() { protected Settings nodeSettings(int nodeOriginal) { Settings settings = super.nodeSettings(nodeOriginal); Settings.Builder builder = Settings.builder() - .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT) - .put(CLUSTER_REMOTE_STORE_ENABLED_SETTING.getKey(), true) - .put(CLUSTER_REMOTE_STORE_REPOSITORY_SETTING.getKey(), "my-segment-repo-1") - .put(CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING.getKey(), "my-translog-repo-1") + .put(remoteStoreClusterSettings("my-segment-repo-1", "my-translog-repo-1")) .put(settings); return builder.build(); } @@ -111,19 +104,20 @@ public void testRemoteStoreDisabledByUser() throws Exception { .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) .put(SETTING_REMOTE_STORE_ENABLED, false) .build(); - assertAcked(client().admin().indices().prepareCreate("test-idx-1").setSettings(settings).get()); - GetIndexResponse getIndexResponse = client().admin() - .indices() - .getIndex(new GetIndexRequest().indices("test-idx-1").includeDefaults(true)) - .get(); - Settings indexSettings = getIndexResponse.settings().get("test-idx-1"); - verifyRemoteStoreIndexSettings( - indexSettings, - "false", - null, - null, - client().settings().get(CLUSTER_SETTING_REPLICATION_TYPE), - IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL + + IllegalArgumentException exc = expectThrows( + IllegalArgumentException.class, + () -> client().admin().indices().prepareCreate("test-idx-1").setSettings(settings).get() + ); + assertThat( + exc.getMessage(), + containsString( + String.format( + Locale.ROOT, + "Validation Failed: 1: private index setting [%s] can not be set explicitly;", + SETTING_REMOTE_STORE_ENABLED + ) + ) ); } @@ -161,8 +155,8 @@ public void testRemoteStoreEnabledByUserWithoutRemoteRepoIllegalArgumentExceptio containsString( String.format( Locale.ROOT, - "Setting %s should be provided with non-empty repository ID", - SETTING_REMOTE_SEGMENT_STORE_REPOSITORY + "Validation Failed: 1: private index setting [%s] can not be set explicitly;", + SETTING_REMOTE_STORE_ENABLED ) ) ); @@ -174,19 +168,21 @@ public void testReplicationTypeDocumentByUser() throws Exception { .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) .put(SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT) .build(); - assertAcked(client().admin().indices().prepareCreate("test-idx-1").setSettings(settings).get()); - GetIndexResponse getIndexResponse = client().admin() - .indices() - .getIndex(new GetIndexRequest().indices("test-idx-1").includeDefaults(true)) - .get(); - Settings indexSettings = getIndexResponse.settings().get("test-idx-1"); - verifyRemoteStoreIndexSettings( - indexSettings, - null, - null, - null, - ReplicationType.DOCUMENT.toString(), - IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL + IllegalArgumentException exc = expectThrows( + IllegalArgumentException.class, + () -> client().admin().indices().prepareCreate("test-idx-1").setSettings(settings).get() + ); + assertThat( + exc.getMessage(), + containsString( + String.format( + Locale.ROOT, + "To enable %s, %s should be set to %s", + SETTING_REMOTE_STORE_ENABLED, + SETTING_REPLICATION_TYPE, + ReplicationType.SEGMENT + ) + ) ); } @@ -213,7 +209,7 @@ public void testRemoteStoreSegmentRepoWithoutRemoteEnabledAndSegmentReplicationI ); } - public void testRemoteStoreEnabledByUserWithRemoteRepo() throws Exception { + public void testRemoteStoreEnabledByUserWithRemoteRepoIllegalArgumentException() throws Exception { Settings settings = Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) @@ -222,19 +218,20 @@ public void testRemoteStoreEnabledByUserWithRemoteRepo() throws Exception { .put(SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, "my-custom-repo") .build(); - assertAcked(client().admin().indices().prepareCreate("test-idx-1").setSettings(settings).get()); - GetIndexResponse getIndexResponse = client().admin() - .indices() - .getIndex(new GetIndexRequest().indices("test-idx-1").includeDefaults(true)) - .get(); - Settings indexSettings = getIndexResponse.settings().get("test-idx-1"); - verifyRemoteStoreIndexSettings( - indexSettings, - "true", - "my-custom-repo", - "my-translog-repo-1", - ReplicationType.SEGMENT.toString(), - IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL + IllegalArgumentException exc = expectThrows( + IllegalArgumentException.class, + () -> client().admin().indices().prepareCreate("test-idx-1").setSettings(settings).get() + ); + assertThat( + exc.getMessage(), + containsString( + String.format( + Locale.ROOT, + "Validation Failed: 1: private index setting [%s] can not be set explicitly;2: private index setting [%s] can not be set explicitly;", + SETTING_REMOTE_STORE_ENABLED, + SETTING_REMOTE_SEGMENT_STORE_REPOSITORY + ) + ) ); } @@ -270,41 +267,21 @@ public void testRemoteStoreOverrideTranslogRepoCorrectly() throws Exception { .put(SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, "my-custom-repo") .put(SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, "my-custom-repo") .build(); - assertAcked(client().admin().indices().prepareCreate("test-idx-1").setSettings(settings).get()); - GetIndexResponse getIndexResponse = client().admin() - .indices() - .getIndex(new GetIndexRequest().indices("test-idx-1").includeDefaults(true)) - .get(); - Settings indexSettings = getIndexResponse.settings().get("test-idx-1"); - verifyRemoteStoreIndexSettings( - indexSettings, - "true", - "my-custom-repo", - "my-custom-repo", - ReplicationType.SEGMENT.toString(), - IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL + IllegalArgumentException exc = expectThrows( + IllegalArgumentException.class, + () -> client().admin().indices().prepareCreate("test-idx-1").setSettings(settings).get() ); - } - - public void testRemoteStoreOverrideReplicationTypeIndexSettings() throws Exception { - Settings settings = Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put(SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT) - .build(); - assertAcked(client().admin().indices().prepareCreate("test-idx-1").setSettings(settings).get()); - GetIndexResponse getIndexResponse = client().admin() - .indices() - .getIndex(new GetIndexRequest().indices("test-idx-1").includeDefaults(true)) - .get(); - Settings indexSettings = getIndexResponse.settings().get("test-idx-1"); - verifyRemoteStoreIndexSettings( - indexSettings, - null, - null, - null, - ReplicationType.DOCUMENT.toString(), - IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL + assertThat( + exc.getMessage(), + containsString( + String.format( + Locale.ROOT, + "Validation Failed: 1: private index setting [%s] can not be set explicitly;2: private index setting [%s] can not be set explicitly;3: private index setting [%s] can not be set explicitly;", + SETTING_REMOTE_STORE_ENABLED, + SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, + SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY + ) + ) ); } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/PrimaryTermValidationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/PrimaryTermValidationIT.java index 9d63c9b528314..ee32c880257d1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/PrimaryTermValidationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/PrimaryTermValidationIT.java @@ -61,6 +61,7 @@ public void testPrimaryTermValidation() throws Exception { .put(FollowersChecker.FOLLOWER_CHECK_TIMEOUT_SETTING.getKey(), "1s") .put(FollowersChecker.FOLLOWER_CHECK_INTERVAL_SETTING.getKey(), "1s") .put(FollowersChecker.FOLLOWER_CHECK_RETRY_COUNT_SETTING.getKey(), 1) + .put(remoteStoreClusterSettings(REPOSITORY_NAME, REPOSITORY_2_NAME, true)) .build(); internalCluster().startClusterManagerOnlyNode(clusterSettings); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java index 4f7961cec22d7..d92ac83544a25 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexRecoveryIT.java @@ -23,15 +23,21 @@ import java.nio.file.Path; +import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class RemoteIndexRecoveryIT extends IndexRecoveryIT { - protected static final String REPOSITORY_NAME = "test-remore-store-repo"; + protected static final String REPOSITORY_NAME = "test-remote-store-repo"; protected Path absolutePath; + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(remoteStoreClusterSettings(REPOSITORY_NAME)).build(); + } + @Override protected Settings featureFlagSettings() { return Settings.builder() @@ -57,9 +63,6 @@ public Settings indexSettings() { return Settings.builder() .put(super.indexSettings()) .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), false) - .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) - .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, REPOSITORY_NAME) - .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, REPOSITORY_NAME) .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "300s") .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .build(); @@ -81,7 +84,91 @@ protected int numDocs() { } @Override - protected boolean shouldAssertOngoingRecoveryInRerouteRecovery() { - return false; + public void testUsesFileBasedRecoveryIfRetentionLeaseMissing() { + // Retention lease based tests not applicable for remote store; + } + + @Override + public void testPeerRecoveryTrimsLocalTranslog() { + // Peer recovery usecase not valid for remote enabled indices + } + + @Override + public void testHistoryRetention() { + // History retention not applicable for remote store + } + + @Override + public void testUsesFileBasedRecoveryIfOperationsBasedRecoveryWouldBeUnreasonable() { + // History retention not applicable for remote store + } + + @Override + public void testUsesFileBasedRecoveryIfRetentionLeaseAheadOfGlobalCheckpoint() { + // History retention not applicable for remote store + } + + @Override + public void testRecoverLocallyUpToGlobalCheckpoint() { + // History retention not applicable for remote store + } + + @Override + public void testCancelNewShardRecoveryAndUsesExistingShardCopy() { + // History retention not applicable for remote store + } + + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8919") + @Override + public void testReservesBytesDuringPeerRecoveryPhaseOne() { + + } + + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8919") + @Override + public void testAllocateEmptyPrimaryResetsGlobalCheckpoint() { + + } + + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8919") + @Override + public void testDoesNotCopyOperationsInSafeCommit() { + + } + + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8919") + @Override + public void testRepeatedRecovery() { + + } + + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8919") + @Override + public void testDisconnectsWhileRecovering() { + + } + + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8919") + @Override + public void testTransientErrorsDuringRecoveryAreRetried() { + + } + + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8919") + @Override + public void testDoNotInfinitelyWaitForMapping() { + + } + + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8919") + @Override + public void testDisconnectsDuringRecovery() { + + } + + @AwaitsFix(bugUrl = "https://github.com/opensearch-project/OpenSearch/issues/8919") + @Override + public void testReplicaRecovery() { + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java new file mode 100644 index 0000000000000..8c33bf36ad45d --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteRestoreSnapshotIT.java @@ -0,0 +1,530 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore; + +import org.junit.After; +import org.junit.Before; +import org.opensearch.action.DocWriteResponse; +import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; +import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.opensearch.action.admin.indices.get.GetIndexRequest; +import org.opensearch.action.admin.indices.get.GetIndexResponse; +import org.opensearch.action.delete.DeleteResponse; +import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.client.Client; +import org.opensearch.client.Requests; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.io.PathUtils; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.rest.RestStatus; +import org.opensearch.index.IndexSettings; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.snapshots.AbstractSnapshotIntegTestCase; +import org.opensearch.snapshots.SnapshotState; +import org.opensearch.test.InternalTestCluster; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.concurrent.ExecutionException; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; +import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +public class RemoteRestoreSnapshotIT extends AbstractSnapshotIntegTestCase { + private static final String BASE_REMOTE_REPO = "test-rs-repo" + TEST_REMOTE_STORE_REPO_SUFFIX; + private Path remoteRepoPath; + + @Before + public void setup() { + remoteRepoPath = randomRepoPath().toAbsolutePath(); + createRepository(BASE_REMOTE_REPO, "fs", remoteRepoPath); + } + + @After + public void teardown() { + assertAcked(clusterAdmin().prepareDeleteRepository(BASE_REMOTE_REPO)); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(FeatureFlags.REMOTE_STORE, "true") + .put(remoteStoreClusterSettings(BASE_REMOTE_REPO)) + .build(); + } + + private Settings.Builder getIndexSettings(int numOfShards, int numOfReplicas) { + Settings.Builder settingsBuilder = Settings.builder() + .put(super.indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numOfShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numOfReplicas) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "300s"); + return settingsBuilder; + } + + private void indexDocuments(Client client, String indexName, int numOfDocs) { + indexDocuments(client, indexName, 0, numOfDocs); + } + + private void indexDocuments(Client client, String indexName, int fromId, int toId) { + for (int i = fromId; i < toId; i++) { + String id = Integer.toString(i); + client.prepareIndex(indexName).setId(id).setSource("text", "sometext").get(); + } + client.admin().indices().prepareFlush(indexName).get(); + } + + private void assertDocsPresentInIndex(Client client, String indexName, int numOfDocs) { + for (int i = 0; i < numOfDocs; i++) { + String id = Integer.toString(i); + logger.info("checking for index " + indexName + " with docId" + id); + assertTrue("doc with id" + id + " is not present for index " + indexName, client.prepareGet(indexName, id).get().isExists()); + } + } + + public void testRestoreOperationsShallowCopyEnabled() throws IOException, ExecutionException, InterruptedException { + String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); + String primary = internalCluster().startDataOnlyNode(); + String indexName1 = "testindex1"; + String indexName2 = "testindex2"; + String snapshotRepoName = "test-restore-snapshot-repo"; + String snapshotName1 = "test-restore-snapshot1"; + String snapshotName2 = "test-restore-snapshot2"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + logger.info("Snapshot Path [{}]", absolutePath1); + String restoredIndexName1 = indexName1 + "-restored"; + String restoredIndexName1Seg = indexName1 + "-restored-seg"; + String restoredIndexName1Doc = indexName1 + "-restored-doc"; + String restoredIndexName2 = indexName2 + "-restored"; + + createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, true)); + + Client client = client(); + Settings indexSettings = getIndexSettings(1, 0).build(); + createIndex(indexName1, indexSettings); + + Settings indexSettings2 = getIndexSettings(1, 0).build(); + createIndex(indexName2, indexSettings2); + + final int numDocsInIndex1 = 5; + final int numDocsInIndex2 = 6; + indexDocuments(client, indexName1, numDocsInIndex1); + indexDocuments(client, indexName2, numDocsInIndex2); + ensureGreen(indexName1, indexName2); + + internalCluster().startDataOnlyNode(); + logger.info("--> snapshot"); + CreateSnapshotResponse createSnapshotResponse = client.admin() + .cluster() + .prepareCreateSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(true) + .setIndices(indexName1, indexName2) + .get(); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat( + createSnapshotResponse.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) + ); + assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); + + updateRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, false)); + CreateSnapshotResponse createSnapshotResponse2 = client.admin() + .cluster() + .prepareCreateSnapshot(snapshotRepoName, snapshotName2) + .setWaitForCompletion(true) + .setIndices(indexName1, indexName2) + .get(); + assertThat(createSnapshotResponse2.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat( + createSnapshotResponse2.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse2.getSnapshotInfo().totalShards()) + ); + assertThat(createSnapshotResponse2.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); + + DeleteResponse deleteResponse = client().prepareDelete(indexName1, "0").execute().actionGet(); + assertEquals(deleteResponse.getResult(), DocWriteResponse.Result.DELETED); + indexDocuments(client, indexName1, numDocsInIndex1, numDocsInIndex1 + randomIntBetween(2, 5)); + ensureGreen(indexName1); + + RestoreSnapshotResponse restoreSnapshotResponse1 = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(false) + .setIndices(indexName1) + .setRenamePattern(indexName1) + .setRenameReplacement(restoredIndexName1) + .get(); + RestoreSnapshotResponse restoreSnapshotResponse2 = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName2) + .setWaitForCompletion(false) + .setIndices(indexName2) + .setRenamePattern(indexName2) + .setRenameReplacement(restoredIndexName2) + .get(); + assertEquals(restoreSnapshotResponse1.status(), RestStatus.ACCEPTED); + assertEquals(restoreSnapshotResponse2.status(), RestStatus.ACCEPTED); + ensureGreen(restoredIndexName1, restoredIndexName2); + assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1); + assertDocsPresentInIndex(client, restoredIndexName2, numDocsInIndex2); + + // deleting data for restoredIndexName1 and restoring from remote store. + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primary)); + ensureRed(restoredIndexName1); + // Re-initialize client to make sure we are not using client from stopped node. + client = client(clusterManagerNode); + assertAcked(client.admin().indices().prepareClose(restoredIndexName1)); + client.admin() + .cluster() + .restoreRemoteStore( + new RestoreRemoteStoreRequest().indices(restoredIndexName1).restoreAllShards(true), + PlainActionFuture.newFuture() + ); + ensureYellowAndNoInitializingShards(restoredIndexName1); + ensureGreen(restoredIndexName1); + assertDocsPresentInIndex(client(), restoredIndexName1, numDocsInIndex1); + // indexing some new docs and validating + indexDocuments(client, restoredIndexName1, numDocsInIndex1, numDocsInIndex1 + 2); + ensureGreen(restoredIndexName1); + assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1 + 2); + + // restore index as seg rep enabled with remote store and remote translog disabled + RestoreSnapshotResponse restoreSnapshotResponse3 = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(false) + .setIgnoreIndexSettings(IndexMetadata.SETTING_REMOTE_STORE_ENABLED) + .setIndices(indexName1) + .setRenamePattern(indexName1) + .setRenameReplacement(restoredIndexName1Seg) + .get(); + assertEquals(restoreSnapshotResponse3.status(), RestStatus.ACCEPTED); + ensureGreen(restoredIndexName1Seg); + + GetIndexResponse getIndexResponse = client.admin() + .indices() + .getIndex(new GetIndexRequest().indices(restoredIndexName1Seg).includeDefaults(true)) + .get(); + indexSettings = getIndexResponse.settings().get(restoredIndexName1Seg); + assertNull(indexSettings.get(SETTING_REMOTE_STORE_ENABLED)); + assertNull(indexSettings.get(SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, null)); + assertEquals(ReplicationType.SEGMENT.toString(), indexSettings.get(IndexMetadata.SETTING_REPLICATION_TYPE)); + assertDocsPresentInIndex(client, restoredIndexName1Seg, numDocsInIndex1); + // indexing some new docs and validating + indexDocuments(client, restoredIndexName1Seg, numDocsInIndex1, numDocsInIndex1 + 2); + ensureGreen(restoredIndexName1Seg); + assertDocsPresentInIndex(client, restoredIndexName1Seg, numDocsInIndex1 + 2); + + // restore index as doc rep based from shallow copy snapshot + RestoreSnapshotResponse restoreSnapshotResponse4 = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(false) + .setIgnoreIndexSettings(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, IndexMetadata.SETTING_REPLICATION_TYPE) + .setIndices(indexName1) + .setRenamePattern(indexName1) + .setRenameReplacement(restoredIndexName1Doc) + .get(); + assertEquals(restoreSnapshotResponse4.status(), RestStatus.ACCEPTED); + ensureGreen(restoredIndexName1Doc); + + getIndexResponse = client.admin() + .indices() + .getIndex(new GetIndexRequest().indices(restoredIndexName1Doc).includeDefaults(true)) + .get(); + indexSettings = getIndexResponse.settings().get(restoredIndexName1Doc); + assertNull(indexSettings.get(SETTING_REMOTE_STORE_ENABLED)); + assertNull(indexSettings.get(SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, null)); + assertNull(indexSettings.get(IndexMetadata.SETTING_REPLICATION_TYPE)); + assertDocsPresentInIndex(client, restoredIndexName1Doc, numDocsInIndex1); + // indexing some new docs and validating + indexDocuments(client, restoredIndexName1Doc, numDocsInIndex1, numDocsInIndex1 + 2); + ensureGreen(restoredIndexName1Doc); + assertDocsPresentInIndex(client, restoredIndexName1Doc, numDocsInIndex1 + 2); + } + + public void testRestoreInSameRemoteStoreEnabledIndex() throws IOException { + String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); + String primary = internalCluster().startDataOnlyNode(); + String indexName1 = "testindex1"; + String indexName2 = "testindex2"; + String snapshotRepoName = "test-restore-snapshot-repo"; + String snapshotName1 = "test-restore-snapshot1"; + String snapshotName2 = "test-restore-snapshot2"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + logger.info("Snapshot Path [{}]", absolutePath1); + String restoredIndexName2 = indexName2 + "-restored"; + + boolean enableShallowCopy = randomBoolean(); + createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, enableShallowCopy)); + + Client client = client(); + Settings indexSettings = getIndexSettings(1, 0).build(); + createIndex(indexName1, indexSettings); + + Settings indexSettings2 = getIndexSettings(1, 0).build(); + createIndex(indexName2, indexSettings2); + + final int numDocsInIndex1 = 5; + final int numDocsInIndex2 = 6; + indexDocuments(client, indexName1, numDocsInIndex1); + indexDocuments(client, indexName2, numDocsInIndex2); + ensureGreen(indexName1, indexName2); + + internalCluster().startDataOnlyNode(); + logger.info("--> snapshot"); + CreateSnapshotResponse createSnapshotResponse = client.admin() + .cluster() + .prepareCreateSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(true) + .setIndices(indexName1, indexName2) + .get(); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat( + createSnapshotResponse.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) + ); + assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); + + updateRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, false)); + CreateSnapshotResponse createSnapshotResponse2 = client.admin() + .cluster() + .prepareCreateSnapshot(snapshotRepoName, snapshotName2) + .setWaitForCompletion(true) + .setIndices(indexName1, indexName2) + .get(); + assertThat(createSnapshotResponse2.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat( + createSnapshotResponse2.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse2.getSnapshotInfo().totalShards()) + ); + assertThat(createSnapshotResponse2.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); + + DeleteResponse deleteResponse = client().prepareDelete(indexName1, "0").execute().actionGet(); + assertEquals(deleteResponse.getResult(), DocWriteResponse.Result.DELETED); + indexDocuments(client, indexName1, numDocsInIndex1, numDocsInIndex1 + randomIntBetween(2, 5)); + ensureGreen(indexName1); + + assertAcked(client().admin().indices().prepareClose(indexName1)); + + RestoreSnapshotResponse restoreSnapshotResponse1 = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(false) + .setIndices(indexName1) + .get(); + RestoreSnapshotResponse restoreSnapshotResponse2 = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName2) + .setWaitForCompletion(false) + .setIndices(indexName2) + .setRenamePattern(indexName2) + .setRenameReplacement(restoredIndexName2) + .get(); + assertEquals(restoreSnapshotResponse1.status(), RestStatus.ACCEPTED); + assertEquals(restoreSnapshotResponse2.status(), RestStatus.ACCEPTED); + ensureGreen(indexName1, restoredIndexName2); + assertDocsPresentInIndex(client, indexName1, numDocsInIndex1); + assertDocsPresentInIndex(client, restoredIndexName2, numDocsInIndex2); + + // deleting data for restoredIndexName1 and restoring from remote store. + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primary)); + ensureRed(indexName1); + // Re-initialize client to make sure we are not using client from stopped node. + client = client(clusterManagerNode); + assertAcked(client.admin().indices().prepareClose(indexName1)); + client.admin() + .cluster() + .restoreRemoteStore(new RestoreRemoteStoreRequest().indices(indexName1).restoreAllShards(true), PlainActionFuture.newFuture()); + ensureYellowAndNoInitializingShards(indexName1); + ensureGreen(indexName1); + assertDocsPresentInIndex(client(), indexName1, numDocsInIndex1); + // indexing some new docs and validating + indexDocuments(client, indexName1, numDocsInIndex1, numDocsInIndex1 + 2); + ensureGreen(indexName1); + assertDocsPresentInIndex(client, indexName1, numDocsInIndex1 + 2); + } + + public void testRestoreShallowCopySnapshotWithDifferentRepo() throws IOException { + String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); + String primary = internalCluster().startDataOnlyNode(); + String indexName1 = "testindex1"; + String indexName2 = "testindex2"; + String snapshotRepoName = "test-restore-snapshot-repo"; + String remoteStoreRepo2Name = "test-rs-repo-2" + TEST_REMOTE_STORE_REPO_SUFFIX; + String snapshotName1 = "test-restore-snapshot1"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + Path absolutePath3 = randomRepoPath().toAbsolutePath(); + String restoredIndexName1 = indexName1 + "-restored"; + + createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, false)); + createRepository(remoteStoreRepo2Name, "fs", absolutePath3); + + Client client = client(); + Settings indexSettings = getIndexSettings(1, 0).build(); + createIndex(indexName1, indexSettings); + + Settings indexSettings2 = getIndexSettings(1, 0).build(); + createIndex(indexName2, indexSettings2); + + final int numDocsInIndex1 = 5; + final int numDocsInIndex2 = 6; + indexDocuments(client, indexName1, numDocsInIndex1); + indexDocuments(client, indexName2, numDocsInIndex2); + ensureGreen(indexName1, indexName2); + + internalCluster().startDataOnlyNode(); + + logger.info("--> snapshot"); + CreateSnapshotResponse createSnapshotResponse = client.admin() + .cluster() + .prepareCreateSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(true) + .setIndices(indexName1, indexName2) + .get(); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat( + createSnapshotResponse.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) + ); + assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); + + Settings remoteStoreIndexSettings = Settings.builder() + .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, remoteStoreRepo2Name) + .build(); + // restore index as a remote store index with different remote store repo + RestoreSnapshotResponse restoreSnapshotResponse = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(false) + .setIndexSettings(remoteStoreIndexSettings) + .setIndices(indexName1) + .setRenamePattern(indexName1) + .setRenameReplacement(restoredIndexName1) + .get(); + assertEquals(restoreSnapshotResponse.status(), RestStatus.ACCEPTED); + ensureGreen(restoredIndexName1); + assertDocsPresentInIndex(client(), restoredIndexName1, numDocsInIndex1); + + // deleting data for restoredIndexName1 and restoring from remote store. + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primary)); + // Re-initialize client to make sure we are not using client from stopped node. + client = client(clusterManagerNode); + assertAcked(client.admin().indices().prepareClose(restoredIndexName1)); + client.admin() + .cluster() + .restoreRemoteStore( + new RestoreRemoteStoreRequest().indices(restoredIndexName1).restoreAllShards(true), + PlainActionFuture.newFuture() + ); + ensureYellowAndNoInitializingShards(restoredIndexName1); + ensureGreen(restoredIndexName1); + // indexing some new docs and validating + assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1); + indexDocuments(client, restoredIndexName1, numDocsInIndex1, numDocsInIndex1 + 2); + ensureGreen(restoredIndexName1); + assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1 + 2); + } + + public void testRestoreShallowSnapshotRepositoryOverriden() throws ExecutionException, InterruptedException { + String indexName1 = "testindex1"; + String snapshotRepoName = "test-restore-snapshot-repo"; + String remoteStoreRepoNameUpdated = "test-rs-repo-updated" + TEST_REMOTE_STORE_REPO_SUFFIX; + String snapshotName1 = "test-restore-snapshot1"; + Path absolutePath1 = randomRepoPath().toAbsolutePath(); + Path absolutePath2 = randomRepoPath().toAbsolutePath(); + String[] pathTokens = absolutePath1.toString().split("/"); + String basePath = pathTokens[pathTokens.length - 1]; + Arrays.copyOf(pathTokens, pathTokens.length - 1); + Path location = PathUtils.get(String.join("/", pathTokens)); + pathTokens = absolutePath2.toString().split("/"); + String basePath2 = pathTokens[pathTokens.length - 1]; + Arrays.copyOf(pathTokens, pathTokens.length - 1); + Path location2 = PathUtils.get(String.join("/", pathTokens)); + logger.info("Path 1 [{}]", absolutePath1); + logger.info("Path 2 [{}]", absolutePath2); + String restoredIndexName1 = indexName1 + "-restored"; + + createRepository(snapshotRepoName, "fs", getRepositorySettings(location, basePath, true)); + + Client client = client(); + Settings indexSettings = Settings.builder() + .put(super.indexSettings()) + .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "300s") + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build(); + createIndex(indexName1, indexSettings); + + int numDocsInIndex1 = randomIntBetween(2, 5); + indexDocuments(client, indexName1, numDocsInIndex1); + + ensureGreen(indexName1); + + logger.info("--> snapshot"); + CreateSnapshotResponse createSnapshotResponse = client.admin() + .cluster() + .prepareCreateSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(true) + .setIndices(indexName1) + .get(); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat( + createSnapshotResponse.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) + ); + assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); + + createRepository(BASE_REMOTE_REPO, "fs", absolutePath2); + + RestoreSnapshotResponse restoreSnapshotResponse = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(true) + .setIndices(indexName1) + .setRenamePattern(indexName1) + .setRenameReplacement(restoredIndexName1) + .get(); + + assertTrue(restoreSnapshotResponse.getRestoreInfo().failedShards() > 0); + + ensureRed(restoredIndexName1); + + client().admin().indices().close(Requests.closeIndexRequest(restoredIndexName1)).get(); + createRepository(remoteStoreRepoNameUpdated, "fs", remoteRepoPath); + RestoreSnapshotResponse restoreSnapshotResponse2 = client.admin() + .cluster() + .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) + .setWaitForCompletion(true) + .setIndices(indexName1) + .setRenamePattern(indexName1) + .setRenameReplacement(restoredIndexName1) + .setSourceRemoteStoreRepository(remoteStoreRepoNameUpdated) + .get(); + + assertTrue(restoreSnapshotResponse2.getRestoreInfo().failedShards() == 0); + ensureGreen(restoredIndexName1); + assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1); + + // indexing some new docs and validating + indexDocuments(client, restoredIndexName1, numDocsInIndex1, numDocsInIndex1 + 2); + ensureGreen(restoredIndexName1); + assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1 + 2); + } + +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java index 2887fbc56106c..4a85ff46d9025 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java @@ -29,11 +29,15 @@ import java.util.List; import java.util.concurrent.atomic.AtomicInteger; +import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_ENABLED_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; public class RemoteStoreBaseIntegTestCase extends OpenSearchIntegTestCase { - protected static final String REPOSITORY_NAME = "test-remore-store-repo"; - protected static final String REPOSITORY_2_NAME = "test-remore-store-repo-2"; + protected static final String REPOSITORY_NAME = "test-remote-store-repo"; + protected static final String REPOSITORY_2_NAME = "test-remote-store-repo-2"; protected static final int SHARD_COUNT = 1; protected static final int REPLICA_COUNT = 1; protected Path absolutePath; @@ -51,6 +55,14 @@ protected boolean addMockInternalEngine() { return false; } + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(remoteStoreClusterSettings(REPOSITORY_NAME, REPOSITORY_2_NAME, true)) + .build(); + } + @Override protected Settings featureFlagSettings() { return Settings.builder() @@ -64,21 +76,41 @@ public Settings indexSettings() { return defaultIndexSettings(); } - IndexResponse indexSingleDoc(String indexName) { + protected IndexResponse indexSingleDoc(String indexName) { return client().prepareIndex(indexName) .setId(UUIDs.randomBase64UUID()) .setSource(documentKeys.get(randomIntBetween(0, documentKeys.size() - 1)), randomAlphaOfLength(5)) .get(); } + public static Settings remoteStoreClusterSettings(String segmentRepoName) { + return remoteStoreClusterSettings(segmentRepoName, segmentRepoName); + } + + public static Settings remoteStoreClusterSettings( + String segmentRepoName, + String translogRepoName, + boolean randomizeSameRepoForRSSAndRTS + ) { + return remoteStoreClusterSettings( + segmentRepoName, + randomizeSameRepoForRSSAndRTS ? (randomBoolean() ? translogRepoName : segmentRepoName) : translogRepoName + ); + } + + public static Settings remoteStoreClusterSettings(String segmentRepoName, String translogRepoName) { + return Settings.builder() + .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT) + .put(CLUSTER_REMOTE_STORE_ENABLED_SETTING.getKey(), true) + .put(CLUSTER_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING.getKey(), segmentRepoName) + .put(CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING.getKey(), translogRepoName) + .build(); + } + private Settings defaultIndexSettings() { - boolean sameRepoForRSSAndRTS = randomBoolean(); return Settings.builder() .put(super.indexSettings()) .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), false) - .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) - .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, REPOSITORY_NAME) - .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, sameRepoForRSSAndRTS ? REPOSITORY_NAME : REPOSITORY_2_NAME) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, SHARD_COUNT) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, REPLICA_COUNT) .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "300s") diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/ReplicaToPrimaryPromotionIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/ReplicaToPrimaryPromotionIT.java index 6764c50175e61..549b4985894a7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/ReplicaToPrimaryPromotionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/ReplicaToPrimaryPromotionIT.java @@ -39,11 +39,7 @@ public void setup() { @Override public Settings indexSettings() { - return Settings.builder() - .put(super.indexSettings()) - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, shard_count) - .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, REPOSITORY_NAME) - .build(); + return Settings.builder().put(super.indexSettings()).put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, shard_count).build(); } public void testPromoteReplicaToPrimary() throws Exception { diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java index f298fac7c894e..6f76c21cc0411 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationUsingRemoteStoreIT.java @@ -10,7 +10,6 @@ import org.junit.After; import org.junit.Before; -import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; import org.opensearch.indices.replication.SegmentReplicationIT; @@ -18,6 +17,7 @@ import java.nio.file.Path; +import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; /** @@ -32,13 +32,12 @@ public class SegmentReplicationUsingRemoteStoreIT extends SegmentReplicationIT { private static final String REPOSITORY_NAME = "test-remote-store-repo"; @Override - public Settings indexSettings() { - return Settings.builder() - .put(super.indexSettings()) - .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) - .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, REPOSITORY_NAME) - .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, REPOSITORY_NAME) - .build(); + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(remoteStoreClusterSettings(REPOSITORY_NAME)).build(); + } + + protected boolean segmentReplicationWithRemoteEnabled() { + return true; } @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationWithRemoteStorePressureIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationWithRemoteStorePressureIT.java index 0b64680033d84..38db7a7c7269e 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationWithRemoteStorePressureIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/SegmentReplicationWithRemoteStorePressureIT.java @@ -10,15 +10,14 @@ import org.junit.After; import org.junit.Before; -import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.SegmentReplicationPressureIT; -import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.OpenSearchIntegTestCase; import java.nio.file.Path; +import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; /** @@ -32,14 +31,8 @@ public class SegmentReplicationWithRemoteStorePressureIT extends SegmentReplicat private static final String REPOSITORY_NAME = "test-remote-store-repo"; @Override - public Settings indexSettings() { - return Settings.builder() - .put(super.indexSettings()) - .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) - .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, REPOSITORY_NAME) - .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, REPOSITORY_NAME) - .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) - .build(); + protected boolean segmentReplicationWithRemoteEnabled() { + return true; } @Override @@ -51,6 +44,11 @@ protected Settings featureFlagSettings() { .build(); } + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder().put(super.nodeSettings(nodeOrdinal)).put(remoteStoreClusterSettings(REPOSITORY_NAME)).build(); + } + @Before public void setup() { internalCluster().startClusterManagerOnlyNode(); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartFileCorruptionIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartFileCorruptionIT.java index 529e84d281476..b801c28983890 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartFileCorruptionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/multipart/RemoteStoreMultipartFileCorruptionIT.java @@ -8,31 +8,22 @@ package org.opensearch.remotestore.multipart; -import org.junit.After; import org.junit.Before; -import org.opensearch.action.index.IndexResponse; import org.opensearch.action.support.IndicesOptions; import org.opensearch.cluster.metadata.IndexMetadata; -import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; -import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexModule; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.plugins.Plugin; -import org.opensearch.remotestore.multipart.mocks.MockFsRepository; +import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; import org.opensearch.remotestore.multipart.mocks.MockFsRepositoryPlugin; -import org.opensearch.test.OpenSearchIntegTestCase; -import java.nio.file.Path; import java.util.Collection; import java.util.stream.Collectors; import java.util.stream.Stream; -import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +public class RemoteStoreMultipartFileCorruptionIT extends RemoteStoreBaseIntegTestCase { -public class RemoteStoreMultipartFileCorruptionIT extends OpenSearchIntegTestCase { - - protected static final String REPOSITORY_NAME = "test-remore-store-repo"; private static final String INDEX_NAME = "remote-store-test-idx-1"; @Override @@ -40,34 +31,9 @@ protected Collection> nodePlugins() { return Stream.concat(super.nodePlugins().stream(), Stream.of(MockFsRepositoryPlugin.class)).collect(Collectors.toList()); } - @Override - protected Settings featureFlagSettings() { - return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.REMOTE_STORE, "true").build(); - } - @Before public void setup() { - internalCluster().startClusterManagerOnlyNode(); - Path absolutePath = randomRepoPath().toAbsolutePath(); - putRepository(absolutePath); - } - - protected void putRepository(Path path) { - assertAcked( - clusterAdmin().preparePutRepository(REPOSITORY_NAME) - .setType(MockFsRepositoryPlugin.TYPE) - .setSettings( - Settings.builder() - .put("location", path) - // custom setting for MockFsRepositoryPlugin - .put(MockFsRepository.TRIGGER_DATA_INTEGRITY_FAILURE.getKey(), true) - ) - ); - } - - @After - public void teardown() { - assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_NAME)); + setupRepo(); } protected Settings remoteStoreIndexSettings() { @@ -78,26 +44,16 @@ protected Settings remoteStoreIndexSettings() { .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), false) .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) - .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) - .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, REPOSITORY_NAME) - .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, REPOSITORY_NAME) .build(); } - private IndexResponse indexSingleDoc() { - return client().prepareIndex(INDEX_NAME) - .setId(UUIDs.randomBase64UUID()) - .setSource(randomAlphaOfLength(5), randomAlphaOfLength(5)) - .get(); - } - public void testLocalFileCorruptionDuringUpload() { internalCluster().startDataOnlyNodes(1); createIndex(INDEX_NAME, remoteStoreIndexSettings()); ensureYellowAndNoInitializingShards(INDEX_NAME); ensureGreen(INDEX_NAME); - indexSingleDoc(); + indexSingleDoc(INDEX_NAME); client().admin() .indices() diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java index 3de982f89ac80..d19cb513ed38d 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/CloneSnapshotIT.java @@ -54,6 +54,7 @@ import java.util.Collection; import java.util.List; +import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.hamcrest.Matchers.containsString; @@ -162,7 +163,8 @@ public void testCloneSnapshotIndex() throws Exception { public void testCloneShallowSnapshotIndex() throws Exception { disableRepoConsistencyCheck("This test uses remote store repository"); FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - internalCluster().startClusterManagerOnlyNode(); + final String remoteStoreRepoName = "remote-store-repo-name"; + internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(remoteStoreRepoName)); internalCluster().startDataOnlyNode(); final String snapshotRepoName = "snapshot-repo-name"; @@ -174,14 +176,13 @@ public void testCloneShallowSnapshotIndex() throws Exception { createRepository(shallowSnapshotRepoName, "fs", snapshotRepoSettingsForShallowCopy(shallowSnapshotRepoPath)); final Path remoteStoreRepoPath = randomRepoPath(); - final String remoteStoreRepoName = "remote-store-repo-name"; createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); final String indexName = "index-1"; createIndexWithRandomDocs(indexName, randomIntBetween(5, 10)); final String remoteStoreEnabledIndexName = "remote-index-1"; - final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepoName); + final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(); createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); indexRandomDocs(remoteStoreEnabledIndexName, randomIntBetween(5, 10)); @@ -209,7 +210,10 @@ public void testCloneShallowSnapshotIndex() throws Exception { public void testShallowCloneNameAvailability() throws Exception { disableRepoConsistencyCheck("This test uses remote store repository"); FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - internalCluster().startClusterManagerOnlyNode(LARGE_SNAPSHOT_POOL_SETTINGS); + final String remoteStoreRepoName = "remote-store-repo-name"; + internalCluster().startClusterManagerOnlyNode( + Settings.builder().put(LARGE_SNAPSHOT_POOL_SETTINGS).put(remoteStoreClusterSettings(remoteStoreRepoName)).build() + ); internalCluster().startDataOnlyNode(); final String shallowSnapshotRepoName = "shallow-snapshot-repo-name"; @@ -217,14 +221,13 @@ public void testShallowCloneNameAvailability() throws Exception { createRepository(shallowSnapshotRepoName, "fs", snapshotRepoSettingsForShallowCopy(shallowSnapshotRepoPath)); final Path remoteStoreRepoPath = randomRepoPath(); - final String remoteStoreRepoName = "remote-store-repo-name"; createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); final String indexName = "index-1"; createIndexWithRandomDocs(indexName, randomIntBetween(5, 10)); final String remoteStoreEnabledIndexName = "remote-index-1"; - final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepoName); + final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(); createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); indexRandomDocs(remoteStoreEnabledIndexName, randomIntBetween(5, 10)); @@ -244,7 +247,8 @@ public void testShallowCloneNameAvailability() throws Exception { public void testCloneAfterRepoShallowSettingEnabled() throws Exception { disableRepoConsistencyCheck("This test uses remote store repository"); FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - internalCluster().startClusterManagerOnlyNode(); + final String remoteStoreRepoName = "remote-store-repo-name"; + internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(remoteStoreRepoName)); internalCluster().startDataOnlyNode(); final String snapshotRepoName = "snapshot-repo-name"; @@ -252,14 +256,13 @@ public void testCloneAfterRepoShallowSettingEnabled() throws Exception { createRepository(snapshotRepoName, "fs", snapshotRepoPath); final Path remoteStoreRepoPath = randomRepoPath(); - final String remoteStoreRepoName = "remote-store-repo-name"; createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); final String indexName = "index-1"; createIndexWithRandomDocs(indexName, randomIntBetween(5, 10)); final String remoteStoreEnabledIndexName = "remote-index-1"; - final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepoName); + final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(); createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); indexRandomDocs(remoteStoreEnabledIndexName, randomIntBetween(5, 10)); @@ -281,7 +284,8 @@ public void testCloneAfterRepoShallowSettingEnabled() throws Exception { public void testCloneAfterRepoShallowSettingDisabled() throws Exception { disableRepoConsistencyCheck("This test uses remote store repository"); FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - internalCluster().startClusterManagerOnlyNode(); + final String remoteStoreRepoName = "remote-store-repo-name"; + internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(remoteStoreRepoName)); internalCluster().startDataOnlyNode(); final String snapshotRepoName = "snapshot-repo-name"; @@ -289,14 +293,13 @@ public void testCloneAfterRepoShallowSettingDisabled() throws Exception { createRepository(snapshotRepoName, "fs", snapshotRepoSettingsForShallowCopy(snapshotRepoPath)); final Path remoteStoreRepoPath = randomRepoPath(); - final String remoteStoreRepoName = "remote-store-repo-name"; createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); final String indexName = "index-1"; createIndexWithRandomDocs(indexName, randomIntBetween(5, 10)); final String remoteStoreEnabledIndexName = "remote-index-1"; - final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepoName); + final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(); createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); indexRandomDocs(remoteStoreEnabledIndexName, randomIntBetween(5, 10)); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java index 2688449294f3d..b12fbdd2a9bd7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java @@ -25,15 +25,18 @@ import java.util.stream.Stream; import static org.hamcrest.Matchers.is; +import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) public class DeleteSnapshotIT extends AbstractSnapshotIntegTestCase { + private static final String REMOTE_REPO_NAME = "remote-store-repo-name"; + public void testDeleteSnapshot() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - internalCluster().startClusterManagerOnlyNode(); + internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME)); internalCluster().startDataOnlyNode(); final String snapshotRepoName = "snapshot-repo-name"; @@ -41,20 +44,19 @@ public void testDeleteSnapshot() throws Exception { createRepository(snapshotRepoName, "fs", snapshotRepoPath); final Path remoteStoreRepoPath = randomRepoPath(); - final String remoteStoreRepoName = "remote-store-repo-name"; - createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); + createRepository(REMOTE_REPO_NAME, "fs", remoteStoreRepoPath); final String indexName = "index-1"; createIndexWithRandomDocs(indexName, randomIntBetween(5, 10)); final String remoteStoreEnabledIndexName = "remote-index-1"; - final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepoName); + final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(); createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); indexRandomDocs(remoteStoreEnabledIndexName, randomIntBetween(5, 10)); final String snapshot = "snapshot"; createFullSnapshot(snapshotRepoName, snapshot); - assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, remoteStoreRepoName).length == 0); + assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, REMOTE_REPO_NAME).length == 0); assert (getRepositoryData(snapshotRepoName).getSnapshotIds().size() == 1); assertAcked(startDeleteSnapshot(snapshotRepoName, snapshot).get()); @@ -64,32 +66,31 @@ public void testDeleteSnapshot() throws Exception { public void testDeleteShallowCopySnapshot() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - internalCluster().startClusterManagerOnlyNode(); + internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME)); internalCluster().startDataOnlyNode(); final String snapshotRepoName = "snapshot-repo-name"; createRepository(snapshotRepoName, "fs", snapshotRepoSettingsForShallowCopy()); final Path remoteStoreRepoPath = randomRepoPath(); - final String remoteStoreRepoName = "remote-store-repo-name"; - createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); + createRepository(REMOTE_REPO_NAME, "fs", remoteStoreRepoPath); final String indexName = "index-1"; createIndexWithRandomDocs(indexName, randomIntBetween(5, 10)); final String remoteStoreEnabledIndexName = "remote-index-1"; - final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepoName); + final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(); createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); indexRandomDocs(remoteStoreEnabledIndexName, randomIntBetween(5, 10)); final String shallowSnapshot = "shallow-snapshot"; createFullSnapshot(snapshotRepoName, shallowSnapshot); - assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, remoteStoreRepoName).length == 1); + assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, REMOTE_REPO_NAME).length == 1); assert (getRepositoryData(snapshotRepoName).getSnapshotIds().size() == 1); assertAcked(startDeleteSnapshot(snapshotRepoName, shallowSnapshot).get()); assert (getRepositoryData(snapshotRepoName).getSnapshotIds().size() == 0); - assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, remoteStoreRepoName).length == 0); + assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, REMOTE_REPO_NAME).length == 0); } // Deleting multiple shallow copy snapshots as part of single delete call with repo having only shallow copy snapshots. @@ -97,23 +98,22 @@ public void testDeleteMultipleShallowCopySnapshotsCase1() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - internalCluster().startClusterManagerOnlyNode(); + internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME)); internalCluster().startDataOnlyNode(); final Client clusterManagerClient = internalCluster().clusterManagerClient(); ensureStableCluster(2); + final Path remoteStoreRepoPath = randomRepoPath(); + createRepository(REMOTE_REPO_NAME, "fs", remoteStoreRepoPath); + final String snapshotRepoName = "snapshot-repo-name"; final Path snapshotRepoPath = randomRepoPath(); createRepository(snapshotRepoName, "mock", snapshotRepoSettingsForShallowCopy(snapshotRepoPath)); final String testIndex = "index-test"; createIndexWithContent(testIndex); - final Path remoteStoreRepoPath = randomRepoPath(); - final String remoteStoreRepoName = "remote-store-repo-name"; - createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); - final String remoteStoreEnabledIndexName = "remote-index-1"; - final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepoName); + final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(); createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); indexRandomDocs(remoteStoreEnabledIndexName, randomIntBetween(5, 10)); @@ -122,7 +122,7 @@ public void testDeleteMultipleShallowCopySnapshotsCase1() throws Exception { List shallowCopySnapshots = createNSnapshots(snapshotRepoName, totalShallowCopySnapshotsCount); List snapshotsToBeDeleted = shallowCopySnapshots.subList(0, randomIntBetween(2, totalShallowCopySnapshotsCount)); int tobeDeletedSnapshotsCount = snapshotsToBeDeleted.size(); - assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, remoteStoreRepoName).length == totalShallowCopySnapshotsCount); + assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, REMOTE_REPO_NAME).length == totalShallowCopySnapshotsCount); assert (getRepositoryData(snapshotRepoName).getSnapshotIds().size() == totalShallowCopySnapshotsCount); // Deleting subset of shallow copy snapshots assertAcked( @@ -132,7 +132,7 @@ public void testDeleteMultipleShallowCopySnapshotsCase1() throws Exception { .get() ); assert (getRepositoryData(snapshotRepoName).getSnapshotIds().size() == totalShallowCopySnapshotsCount - tobeDeletedSnapshotsCount); - assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, remoteStoreRepoName).length == totalShallowCopySnapshotsCount + assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, REMOTE_REPO_NAME).length == totalShallowCopySnapshotsCount - tobeDeletedSnapshotsCount); } @@ -144,7 +144,7 @@ public void testDeleteMultipleShallowCopySnapshotsCase2() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - internalCluster().startClusterManagerOnlyNode(); + internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME)); final String dataNode = internalCluster().startDataOnlyNode(); ensureStableCluster(2); final String clusterManagerNode = internalCluster().getClusterManagerName(); @@ -156,11 +156,10 @@ public void testDeleteMultipleShallowCopySnapshotsCase2() throws Exception { createIndexWithContent(testIndex); final Path remoteStoreRepoPath = randomRepoPath(); - final String remoteStoreRepoName = "remote-store-repo-name"; - createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); + createRepository(REMOTE_REPO_NAME, "fs", remoteStoreRepoPath); final String remoteStoreEnabledIndexName = "remote-index-1"; - final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepoName); + final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(); createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); indexRandomDocs(remoteStoreEnabledIndexName, randomIntBetween(5, 10)); @@ -201,7 +200,7 @@ public void testDeleteMultipleShallowCopySnapshotsCase2() throws Exception { int totalSnapshotsCount = totalFullCopySnapshotsCount + totalShallowCopySnapshotsCount; - assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, remoteStoreRepoName).length == totalShallowCopySnapshotsCount); + assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, REMOTE_REPO_NAME).length == totalShallowCopySnapshotsCount); assert (getRepositoryData(snapshotRepoName).getSnapshotIds().size() == totalSnapshotsCount); // Deleting subset of shallow copy snapshots assertAcked( @@ -213,7 +212,7 @@ public void testDeleteMultipleShallowCopySnapshotsCase2() throws Exception { totalSnapshotsCount -= tobeDeletedShallowCopySnapshotsCount; totalShallowCopySnapshotsCount -= tobeDeletedShallowCopySnapshotsCount; assert (getRepositoryData(snapshotRepoName).getSnapshotIds().size() == totalSnapshotsCount); - assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, remoteStoreRepoName).length == totalShallowCopySnapshotsCount); + assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, REMOTE_REPO_NAME).length == totalShallowCopySnapshotsCount); // Deleting subset of full copy snapshots assertAcked( @@ -224,7 +223,7 @@ public void testDeleteMultipleShallowCopySnapshotsCase2() throws Exception { ); totalSnapshotsCount -= tobeDeletedFullCopySnapshotsCount; assert (getRepositoryData(snapshotRepoName).getSnapshotIds().size() == totalSnapshotsCount); - assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, remoteStoreRepoName).length == totalShallowCopySnapshotsCount); + assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, REMOTE_REPO_NAME).length == totalShallowCopySnapshotsCount); } // Deleting subset of shallow and full copy snapshots as part of single delete call and then deleting all snapshots in the repo. @@ -233,7 +232,7 @@ public void testDeleteMultipleShallowCopySnapshotsCase3() throws Exception { disableRepoConsistencyCheck("Remote store repository is being used in the test"); FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - internalCluster().startClusterManagerOnlyNode(); + internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME)); internalCluster().startDataOnlyNode(); final Client clusterManagerClient = internalCluster().clusterManagerClient(); ensureStableCluster(2); @@ -245,11 +244,10 @@ public void testDeleteMultipleShallowCopySnapshotsCase3() throws Exception { createIndexWithContent(testIndex); final Path remoteStoreRepoPath = randomRepoPath(); - final String remoteStoreRepoName = "remote-store-repo-name"; - createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); + createRepository(REMOTE_REPO_NAME, "fs", remoteStoreRepoPath); final String remoteStoreEnabledIndexName = "remote-index-1"; - final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepoName); + final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(); createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); indexRandomDocs(remoteStoreEnabledIndexName, randomIntBetween(5, 10)); @@ -268,7 +266,7 @@ public void testDeleteMultipleShallowCopySnapshotsCase3() throws Exception { int totalSnapshotsCount = totalFullCopySnapshotsCount + totalShallowCopySnapshotsCount; - assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, remoteStoreRepoName).length == totalShallowCopySnapshotsCount); + assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, REMOTE_REPO_NAME).length == totalShallowCopySnapshotsCount); assert (getRepositoryData(snapshotRepoName).getSnapshotIds().size() == totalSnapshotsCount); // Deleting subset of shallow copy snapshots and full copy snapshots assertAcked( @@ -283,12 +281,12 @@ public void testDeleteMultipleShallowCopySnapshotsCase3() throws Exception { totalSnapshotsCount -= (tobeDeletedShallowCopySnapshotsCount + tobeDeletedFullCopySnapshotsCount); totalShallowCopySnapshotsCount -= tobeDeletedShallowCopySnapshotsCount; assert (getRepositoryData(snapshotRepoName).getSnapshotIds().size() == totalSnapshotsCount); - assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, remoteStoreRepoName).length == totalShallowCopySnapshotsCount); + assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, REMOTE_REPO_NAME).length == totalShallowCopySnapshotsCount); // Deleting all the remaining snapshots assertAcked(clusterManagerClient.admin().cluster().prepareDeleteSnapshot(snapshotRepoName, "*").get()); assert (getRepositoryData(snapshotRepoName).getSnapshotIds().size() == 0); - assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, remoteStoreRepoName).length == 0); + assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, REMOTE_REPO_NAME).length == 0); } private List createNSnapshots(String repoName, int count) { diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/RemoteIndexSnapshotStatusApiIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/RemoteIndexSnapshotStatusApiIT.java new file mode 100644 index 0000000000000..b6a5188c99335 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/RemoteIndexSnapshotStatusApiIT.java @@ -0,0 +1,209 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.snapshots; + +import org.opensearch.action.ActionFuture; +import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.opensearch.action.admin.cluster.snapshots.status.SnapshotIndexShardStage; +import org.opensearch.action.admin.cluster.snapshots.status.SnapshotIndexShardStatus; +import org.opensearch.action.admin.cluster.snapshots.status.SnapshotStatus; +import org.opensearch.cluster.SnapshotsInProgress; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.threadpool.ThreadPool; + +import java.nio.file.Path; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; +import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; + +public class RemoteIndexSnapshotStatusApiIT extends AbstractSnapshotIntegTestCase { + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(ThreadPool.ESTIMATED_TIME_INTERVAL_SETTING.getKey(), 0) // We have tests that check by-timestamp order + .put(FeatureFlags.REMOTE_STORE, "true") + .put(remoteStoreClusterSettings("remote-store-repo-name")) + .build(); + } + + public void testStatusAPICallForShallowCopySnapshot() throws Exception { + disableRepoConsistencyCheck("Remote store repository is being used for the test"); + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNode(); + + final String snapshotRepoName = "snapshot-repo-name"; + createRepository(snapshotRepoName, "fs", snapshotRepoSettingsForShallowCopy()); + + final Path remoteStoreRepoPath = randomRepoPath(); + final String remoteStoreRepoName = "remote-store-repo-name"; + createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); + + final String remoteStoreEnabledIndexName = "remote-index-1"; + final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(); + createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); + ensureGreen(); + + logger.info("--> indexing some data"); + for (int i = 0; i < 100; i++) { + index(remoteStoreEnabledIndexName, "_doc", Integer.toString(i), "foo", "bar" + i); + } + refresh(); + + final String snapshot = "snapshot"; + createFullSnapshot(snapshotRepoName, snapshot); + assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, remoteStoreRepoName).length == 1); + + final SnapshotStatus snapshotStatus = getSnapshotStatus(snapshotRepoName, snapshot); + assertThat(snapshotStatus.getState(), is(SnapshotsInProgress.State.SUCCESS)); + + // Validating that the incremental file count and incremental file size is zero for shallow copy + final SnapshotIndexShardStatus shallowSnapshotShardState = stateFirstShard(snapshotStatus, remoteStoreEnabledIndexName); + assertThat(shallowSnapshotShardState.getStage(), is(SnapshotIndexShardStage.DONE)); + assertThat(shallowSnapshotShardState.getStats().getTotalFileCount(), greaterThan(0)); + assertThat(shallowSnapshotShardState.getStats().getTotalSize(), greaterThan(0L)); + assertThat(shallowSnapshotShardState.getStats().getIncrementalFileCount(), is(0)); + assertThat(shallowSnapshotShardState.getStats().getIncrementalSize(), is(0L)); + } + + public void testStatusAPIStatsForBackToBackShallowSnapshot() throws Exception { + disableRepoConsistencyCheck("Remote store repository is being used for the test"); + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNode(); + + final String snapshotRepoName = "snapshot-repo-name"; + createRepository(snapshotRepoName, "fs", snapshotRepoSettingsForShallowCopy()); + + final Path remoteStoreRepoPath = randomRepoPath(); + final String remoteStoreRepoName = "remote-store-repo-name"; + createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); + + final String remoteStoreEnabledIndexName = "remote-index-1"; + final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(); + createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); + ensureGreen(); + + logger.info("--> indexing some data"); + for (int i = 0; i < 100; i++) { + index(remoteStoreEnabledIndexName, "_doc", Integer.toString(i), "foo", "bar" + i); + } + refresh(); + + createFullSnapshot(snapshotRepoName, "test-snap-1"); + assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, remoteStoreRepoName).length == 1); + + SnapshotStatus snapshotStatus = getSnapshotStatus(snapshotRepoName, "test-snap-1"); + assertThat(snapshotStatus.getState(), is(SnapshotsInProgress.State.SUCCESS)); + + SnapshotIndexShardStatus shallowSnapshotShardState = stateFirstShard(snapshotStatus, remoteStoreEnabledIndexName); + assertThat(shallowSnapshotShardState.getStage(), is(SnapshotIndexShardStage.DONE)); + final int totalFileCount = shallowSnapshotShardState.getStats().getTotalFileCount(); + final long totalSize = shallowSnapshotShardState.getStats().getTotalSize(); + final int incrementalFileCount = shallowSnapshotShardState.getStats().getIncrementalFileCount(); + final long incrementalSize = shallowSnapshotShardState.getStats().getIncrementalSize(); + + createFullSnapshot(snapshotRepoName, "test-snap-2"); + assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, remoteStoreRepoName).length == 2); + + snapshotStatus = getSnapshotStatus(snapshotRepoName, "test-snap-2"); + assertThat(snapshotStatus.getState(), is(SnapshotsInProgress.State.SUCCESS)); + shallowSnapshotShardState = stateFirstShard(snapshotStatus, remoteStoreEnabledIndexName); + assertThat(shallowSnapshotShardState.getStats().getTotalFileCount(), equalTo(totalFileCount)); + assertThat(shallowSnapshotShardState.getStats().getTotalSize(), equalTo(totalSize)); + assertThat(shallowSnapshotShardState.getStats().getIncrementalFileCount(), equalTo(incrementalFileCount)); + assertThat(shallowSnapshotShardState.getStats().getIncrementalSize(), equalTo(incrementalSize)); + } + + public void testStatusAPICallInProgressShallowSnapshot() throws Exception { + disableRepoConsistencyCheck("Remote store repository is being used for the test"); + internalCluster().startClusterManagerOnlyNode(); + internalCluster().startDataOnlyNode(); + + final String snapshotRepoName = "snapshot-repo-name"; + createRepository(snapshotRepoName, "mock", snapshotRepoSettingsForShallowCopy().put("block_on_data", true)); + + final Path remoteStoreRepoPath = randomRepoPath(); + final String remoteStoreRepoName = "remote-store-repo-name"; + createRepository(remoteStoreRepoName, "mock", remoteStoreRepoPath); + + final String remoteStoreEnabledIndexName = "remote-index-1"; + final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(); + createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); + ensureGreen(); + + logger.info("--> indexing some data"); + for (int i = 0; i < 100; i++) { + index(remoteStoreEnabledIndexName, "_doc", Integer.toString(i), "foo", "bar" + i); + } + refresh(); + + logger.info("--> snapshot"); + ActionFuture createSnapshotResponseActionFuture = startFullSnapshot(snapshotRepoName, "test-snap"); + + logger.info("--> wait for data nodes to get blocked"); + awaitNumberOfSnapshotsInProgress(1); + assertEquals( + SnapshotsInProgress.State.STARTED, + client().admin() + .cluster() + .prepareSnapshotStatus(snapshotRepoName) + .setSnapshots("test-snap") + .get() + .getSnapshots() + .get(0) + .getState() + ); + + logger.info("--> unblock all data nodes"); + unblockAllDataNodes(snapshotRepoName); + + logger.info("--> wait for snapshot to finish"); + createSnapshotResponseActionFuture.actionGet(); + } + + private static SnapshotIndexShardStatus stateFirstShard(SnapshotStatus snapshotStatus, String indexName) { + return snapshotStatus.getIndices().get(indexName).getShards().get(0); + } + + private static SnapshotStatus getSnapshotStatus(String repoName, String snapshotName) { + try { + return client().admin().cluster().prepareSnapshotStatus(repoName).setSnapshots(snapshotName).get().getSnapshots().get(0); + } catch (SnapshotMissingException e) { + throw new AssertionError(e); + } + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java index 8677e61efeb46..0274ca874c7b9 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/RestoreSnapshotIT.java @@ -33,45 +33,30 @@ package org.opensearch.snapshots; import org.opensearch.action.ActionFuture; -import org.opensearch.action.DocWriteResponse; -import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; -import org.opensearch.action.admin.indices.get.GetIndexRequest; -import org.opensearch.action.admin.indices.get.GetIndexResponse; import org.opensearch.action.admin.indices.settings.get.GetSettingsResponse; import org.opensearch.action.admin.indices.template.delete.DeleteIndexTemplateRequestBuilder; import org.opensearch.action.admin.indices.template.get.GetIndexTemplatesResponse; -import org.opensearch.action.delete.DeleteResponse; import org.opensearch.action.index.IndexRequestBuilder; -import org.opensearch.action.support.PlainActionFuture; import org.opensearch.client.Client; -import org.opensearch.client.Requests; import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.MappingMetadata; -import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.core.rest.RestStatus; -import org.opensearch.index.IndexSettings; import org.opensearch.indices.InvalidIndexNameException; -import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.repositories.RepositoriesService; -import org.opensearch.test.InternalTestCluster; - -import java.io.IOException; import java.nio.file.Path; import java.util.Collections; import java.util.List; import java.util.Locale; import java.util.Map; -import java.util.Arrays; -import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -86,8 +71,6 @@ import static org.hamcrest.Matchers.nullValue; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY; import static org.opensearch.index.IndexSettings.INDEX_REFRESH_INTERVAL_SETTING; import static org.opensearch.index.IndexSettings.INDEX_SOFT_DELETES_SETTING; import static org.opensearch.index.query.QueryBuilders.matchQuery; @@ -174,494 +157,6 @@ public void testParallelRestoreOperations() { assertThat(client.prepareGet(restoredIndexName2, docId2).get().isExists(), equalTo(true)); } - public void testRestoreRemoteStoreIndicesWithRemoteTranslog() throws IOException, ExecutionException, InterruptedException { - testRestoreOperationsShallowCopyEnabled(); - } - - public void testRestoreOperationsShallowCopyEnabled() throws IOException, ExecutionException, InterruptedException { - String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); - String primary = internalCluster().startDataOnlyNode(); - String indexName1 = "testindex1"; - String indexName2 = "testindex2"; - String snapshotRepoName = "test-restore-snapshot-repo"; - String remoteStoreRepoName = "test-rs-repo" + TEST_REMOTE_STORE_REPO_SUFFIX; - String snapshotName1 = "test-restore-snapshot1"; - String snapshotName2 = "test-restore-snapshot2"; - Path absolutePath1 = randomRepoPath().toAbsolutePath(); - Path absolutePath2 = randomRepoPath().toAbsolutePath(); - logger.info("Snapshot Path [{}]", absolutePath1); - logger.info("Remote Store Repo Path [{}]", absolutePath2); - String restoredIndexName1 = indexName1 + "-restored"; - String restoredIndexName1Seg = indexName1 + "-restored-seg"; - String restoredIndexName1Doc = indexName1 + "-restored-doc"; - String restoredIndexName2 = indexName2 + "-restored"; - - createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, true)); - createRepository(remoteStoreRepoName, "fs", absolutePath2); - - Client client = client(); - Settings indexSettings = getIndexSettings(true, remoteStoreRepoName, 1, 0).build(); - createIndex(indexName1, indexSettings); - - Settings indexSettings2 = getIndexSettings(false, null, 1, 0).build(); - createIndex(indexName2, indexSettings2); - - final int numDocsInIndex1 = 5; - final int numDocsInIndex2 = 6; - indexDocuments(client, indexName1, numDocsInIndex1); - indexDocuments(client, indexName2, numDocsInIndex2); - ensureGreen(indexName1, indexName2); - - internalCluster().startDataOnlyNode(); - logger.info("--> snapshot"); - CreateSnapshotResponse createSnapshotResponse = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepoName, snapshotName1) - .setWaitForCompletion(true) - .setIndices(indexName1, indexName2) - .get(); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); - assertThat( - createSnapshotResponse.getSnapshotInfo().successfulShards(), - equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) - ); - assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); - - updateRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, false)); - CreateSnapshotResponse createSnapshotResponse2 = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepoName, snapshotName2) - .setWaitForCompletion(true) - .setIndices(indexName1, indexName2) - .get(); - assertThat(createSnapshotResponse2.getSnapshotInfo().successfulShards(), greaterThan(0)); - assertThat( - createSnapshotResponse2.getSnapshotInfo().successfulShards(), - equalTo(createSnapshotResponse2.getSnapshotInfo().totalShards()) - ); - assertThat(createSnapshotResponse2.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); - - DeleteResponse deleteResponse = client().prepareDelete(indexName1, "0").execute().actionGet(); - assertEquals(deleteResponse.getResult(), DocWriteResponse.Result.DELETED); - indexDocuments(client, indexName1, numDocsInIndex1, numDocsInIndex1 + randomIntBetween(2, 5)); - ensureGreen(indexName1); - - RestoreSnapshotResponse restoreSnapshotResponse1 = client.admin() - .cluster() - .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) - .setWaitForCompletion(false) - .setIndices(indexName1) - .setRenamePattern(indexName1) - .setRenameReplacement(restoredIndexName1) - .get(); - RestoreSnapshotResponse restoreSnapshotResponse2 = client.admin() - .cluster() - .prepareRestoreSnapshot(snapshotRepoName, snapshotName2) - .setWaitForCompletion(false) - .setIndices(indexName2) - .setRenamePattern(indexName2) - .setRenameReplacement(restoredIndexName2) - .get(); - assertEquals(restoreSnapshotResponse1.status(), RestStatus.ACCEPTED); - assertEquals(restoreSnapshotResponse2.status(), RestStatus.ACCEPTED); - ensureGreen(restoredIndexName1, restoredIndexName2); - assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1); - assertDocsPresentInIndex(client, restoredIndexName2, numDocsInIndex2); - - // deleting data for restoredIndexName1 and restoring from remote store. - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primary)); - ensureRed(restoredIndexName1); - // Re-initialize client to make sure we are not using client from stopped node. - client = client(clusterManagerNode); - assertAcked(client.admin().indices().prepareClose(restoredIndexName1)); - client.admin() - .cluster() - .restoreRemoteStore( - new RestoreRemoteStoreRequest().indices(restoredIndexName1).restoreAllShards(true), - PlainActionFuture.newFuture() - ); - ensureYellowAndNoInitializingShards(restoredIndexName1); - ensureGreen(restoredIndexName1); - assertDocsPresentInIndex(client(), restoredIndexName1, numDocsInIndex1); - // indexing some new docs and validating - indexDocuments(client, restoredIndexName1, numDocsInIndex1, numDocsInIndex1 + 2); - ensureGreen(restoredIndexName1); - assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1 + 2); - - // restore index as seg rep enabled with remote store and remote translog disabled - RestoreSnapshotResponse restoreSnapshotResponse3 = client.admin() - .cluster() - .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) - .setWaitForCompletion(false) - .setIgnoreIndexSettings(IndexMetadata.SETTING_REMOTE_STORE_ENABLED) - .setIndices(indexName1) - .setRenamePattern(indexName1) - .setRenameReplacement(restoredIndexName1Seg) - .get(); - assertEquals(restoreSnapshotResponse3.status(), RestStatus.ACCEPTED); - ensureGreen(restoredIndexName1Seg); - - GetIndexResponse getIndexResponse = client.admin() - .indices() - .getIndex(new GetIndexRequest().indices(restoredIndexName1Seg).includeDefaults(true)) - .get(); - indexSettings = getIndexResponse.settings().get(restoredIndexName1Seg); - assertNull(indexSettings.get(SETTING_REMOTE_STORE_ENABLED)); - assertNull(indexSettings.get(SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, null)); - assertEquals(ReplicationType.SEGMENT.toString(), indexSettings.get(IndexMetadata.SETTING_REPLICATION_TYPE)); - assertDocsPresentInIndex(client, restoredIndexName1Seg, numDocsInIndex1); - // indexing some new docs and validating - indexDocuments(client, restoredIndexName1Seg, numDocsInIndex1, numDocsInIndex1 + 2); - ensureGreen(restoredIndexName1Seg); - assertDocsPresentInIndex(client, restoredIndexName1Seg, numDocsInIndex1 + 2); - - // restore index as doc rep based from shallow copy snapshot - RestoreSnapshotResponse restoreSnapshotResponse4 = client.admin() - .cluster() - .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) - .setWaitForCompletion(false) - .setIgnoreIndexSettings(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, IndexMetadata.SETTING_REPLICATION_TYPE) - .setIndices(indexName1) - .setRenamePattern(indexName1) - .setRenameReplacement(restoredIndexName1Doc) - .get(); - assertEquals(restoreSnapshotResponse4.status(), RestStatus.ACCEPTED); - ensureGreen(restoredIndexName1Doc); - - getIndexResponse = client.admin() - .indices() - .getIndex(new GetIndexRequest().indices(restoredIndexName1Doc).includeDefaults(true)) - .get(); - indexSettings = getIndexResponse.settings().get(restoredIndexName1Doc); - assertNull(indexSettings.get(SETTING_REMOTE_STORE_ENABLED)); - assertNull(indexSettings.get(SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, null)); - assertNull(indexSettings.get(IndexMetadata.SETTING_REPLICATION_TYPE)); - assertDocsPresentInIndex(client, restoredIndexName1Doc, numDocsInIndex1); - // indexing some new docs and validating - indexDocuments(client, restoredIndexName1Doc, numDocsInIndex1, numDocsInIndex1 + 2); - ensureGreen(restoredIndexName1Doc); - assertDocsPresentInIndex(client, restoredIndexName1Doc, numDocsInIndex1 + 2); - } - - public void testRestoreInSameRemoteStoreEnabledIndex() throws IOException { - String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); - String primary = internalCluster().startDataOnlyNode(); - String indexName1 = "testindex1"; - String indexName2 = "testindex2"; - String snapshotRepoName = "test-restore-snapshot-repo"; - String remoteStoreRepoName = "test-rs-repo" + TEST_REMOTE_STORE_REPO_SUFFIX; - String snapshotName1 = "test-restore-snapshot1"; - String snapshotName2 = "test-restore-snapshot2"; - Path absolutePath1 = randomRepoPath().toAbsolutePath(); - Path absolutePath2 = randomRepoPath().toAbsolutePath(); - logger.info("Snapshot Path [{}]", absolutePath1); - logger.info("Remote Store Repo Path [{}]", absolutePath2); - String restoredIndexName2 = indexName2 + "-restored"; - - boolean enableShallowCopy = randomBoolean(); - createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, enableShallowCopy)); - createRepository(remoteStoreRepoName, "fs", absolutePath2); - - Client client = client(); - Settings indexSettings = getIndexSettings(true, remoteStoreRepoName, 1, 0).build(); - createIndex(indexName1, indexSettings); - - Settings indexSettings2 = getIndexSettings(false, null, 1, 0).build(); - createIndex(indexName2, indexSettings2); - - final int numDocsInIndex1 = 5; - final int numDocsInIndex2 = 6; - indexDocuments(client, indexName1, numDocsInIndex1); - indexDocuments(client, indexName2, numDocsInIndex2); - ensureGreen(indexName1, indexName2); - - internalCluster().startDataOnlyNode(); - logger.info("--> snapshot"); - CreateSnapshotResponse createSnapshotResponse = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepoName, snapshotName1) - .setWaitForCompletion(true) - .setIndices(indexName1, indexName2) - .get(); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); - assertThat( - createSnapshotResponse.getSnapshotInfo().successfulShards(), - equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) - ); - assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); - - updateRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, false)); - CreateSnapshotResponse createSnapshotResponse2 = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepoName, snapshotName2) - .setWaitForCompletion(true) - .setIndices(indexName1, indexName2) - .get(); - assertThat(createSnapshotResponse2.getSnapshotInfo().successfulShards(), greaterThan(0)); - assertThat( - createSnapshotResponse2.getSnapshotInfo().successfulShards(), - equalTo(createSnapshotResponse2.getSnapshotInfo().totalShards()) - ); - assertThat(createSnapshotResponse2.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); - - DeleteResponse deleteResponse = client().prepareDelete(indexName1, "0").execute().actionGet(); - assertEquals(deleteResponse.getResult(), DocWriteResponse.Result.DELETED); - indexDocuments(client, indexName1, numDocsInIndex1, numDocsInIndex1 + randomIntBetween(2, 5)); - ensureGreen(indexName1); - - assertAcked(client().admin().indices().prepareClose(indexName1)); - - RestoreSnapshotResponse restoreSnapshotResponse1 = client.admin() - .cluster() - .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) - .setWaitForCompletion(false) - .setIndices(indexName1) - .get(); - RestoreSnapshotResponse restoreSnapshotResponse2 = client.admin() - .cluster() - .prepareRestoreSnapshot(snapshotRepoName, snapshotName2) - .setWaitForCompletion(false) - .setIndices(indexName2) - .setRenamePattern(indexName2) - .setRenameReplacement(restoredIndexName2) - .get(); - assertEquals(restoreSnapshotResponse1.status(), RestStatus.ACCEPTED); - assertEquals(restoreSnapshotResponse2.status(), RestStatus.ACCEPTED); - ensureGreen(indexName1, restoredIndexName2); - assertDocsPresentInIndex(client, indexName1, numDocsInIndex1); - assertDocsPresentInIndex(client, restoredIndexName2, numDocsInIndex2); - - // deleting data for restoredIndexName1 and restoring from remote store. - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primary)); - ensureRed(indexName1); - // Re-initialize client to make sure we are not using client from stopped node. - client = client(clusterManagerNode); - assertAcked(client.admin().indices().prepareClose(indexName1)); - client.admin() - .cluster() - .restoreRemoteStore(new RestoreRemoteStoreRequest().indices(indexName1).restoreAllShards(true), PlainActionFuture.newFuture()); - ensureYellowAndNoInitializingShards(indexName1); - ensureGreen(indexName1); - assertDocsPresentInIndex(client(), indexName1, numDocsInIndex1); - // indexing some new docs and validating - indexDocuments(client, indexName1, numDocsInIndex1, numDocsInIndex1 + 2); - ensureGreen(indexName1); - assertDocsPresentInIndex(client, indexName1, numDocsInIndex1 + 2); - } - - public void testRestoreShallowCopySnapshotWithDifferentRepo() throws IOException { - String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); - String primary = internalCluster().startDataOnlyNode(); - String indexName1 = "testindex1"; - String indexName2 = "testindex2"; - String snapshotRepoName = "test-restore-snapshot-repo"; - String remoteStoreRepoName = "test-rs-repo" + TEST_REMOTE_STORE_REPO_SUFFIX; - String remoteStoreRepo2Name = "test-rs-repo-2" + TEST_REMOTE_STORE_REPO_SUFFIX; - String snapshotName1 = "test-restore-snapshot1"; - Path absolutePath1 = randomRepoPath().toAbsolutePath(); - Path absolutePath2 = randomRepoPath().toAbsolutePath(); - Path absolutePath3 = randomRepoPath().toAbsolutePath(); - String restoredIndexName1 = indexName1 + "-restored"; - - createRepository(snapshotRepoName, "fs", getRepositorySettings(absolutePath1, false)); - createRepository(remoteStoreRepoName, "fs", absolutePath2); - createRepository(remoteStoreRepo2Name, "fs", absolutePath3); - - Client client = client(); - Settings indexSettings = getIndexSettings(true, remoteStoreRepoName, 1, 0).build(); - createIndex(indexName1, indexSettings); - - Settings indexSettings2 = getIndexSettings(false, null, 1, 0).build(); - createIndex(indexName2, indexSettings2); - - final int numDocsInIndex1 = 5; - final int numDocsInIndex2 = 6; - indexDocuments(client, indexName1, numDocsInIndex1); - indexDocuments(client, indexName2, numDocsInIndex2); - ensureGreen(indexName1, indexName2); - - internalCluster().startDataOnlyNode(); - - logger.info("--> snapshot"); - CreateSnapshotResponse createSnapshotResponse = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepoName, snapshotName1) - .setWaitForCompletion(true) - .setIndices(indexName1, indexName2) - .get(); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); - assertThat( - createSnapshotResponse.getSnapshotInfo().successfulShards(), - equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) - ); - assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); - - Settings remoteStoreIndexSettings = Settings.builder() - .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, remoteStoreRepo2Name) - .build(); - // restore index as a remote store index with different remote store repo - RestoreSnapshotResponse restoreSnapshotResponse = client.admin() - .cluster() - .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) - .setWaitForCompletion(false) - .setIndexSettings(remoteStoreIndexSettings) - .setIndices(indexName1) - .setRenamePattern(indexName1) - .setRenameReplacement(restoredIndexName1) - .get(); - assertEquals(restoreSnapshotResponse.status(), RestStatus.ACCEPTED); - ensureGreen(restoredIndexName1); - assertDocsPresentInIndex(client(), restoredIndexName1, numDocsInIndex1); - - // deleting data for restoredIndexName1 and restoring from remote store. - internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primary)); - // Re-initialize client to make sure we are not using client from stopped node. - client = client(clusterManagerNode); - assertAcked(client.admin().indices().prepareClose(restoredIndexName1)); - client.admin() - .cluster() - .restoreRemoteStore( - new RestoreRemoteStoreRequest().indices(restoredIndexName1).restoreAllShards(true), - PlainActionFuture.newFuture() - ); - ensureYellowAndNoInitializingShards(restoredIndexName1); - ensureGreen(restoredIndexName1); - // indexing some new docs and validating - assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1); - indexDocuments(client, restoredIndexName1, numDocsInIndex1, numDocsInIndex1 + 2); - ensureGreen(restoredIndexName1); - assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1 + 2); - } - - private Settings.Builder getIndexSettings(boolean enableRemoteStore, String remoteStoreRepo, int numOfShards, int numOfReplicas) { - Settings.Builder settingsBuilder = Settings.builder() - .put(super.indexSettings()) - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numOfShards) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numOfReplicas); - if (enableRemoteStore) { - settingsBuilder.put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) - .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, remoteStoreRepo) - .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, remoteStoreRepo) - .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "300s") - .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT); - } - return settingsBuilder; - } - - public void testRestoreShallowSnapshotRepositoryOverriden() throws ExecutionException, InterruptedException { - String indexName1 = "testindex1"; - String snapshotRepoName = "test-restore-snapshot-repo"; - String remoteStoreRepoName = "test-rs-repo" + TEST_REMOTE_STORE_REPO_SUFFIX; - String remoteStoreRepoNameUpdated = "test-rs-repo-updated" + TEST_REMOTE_STORE_REPO_SUFFIX; - String snapshotName1 = "test-restore-snapshot1"; - Path absolutePath1 = randomRepoPath().toAbsolutePath(); - Path absolutePath2 = randomRepoPath().toAbsolutePath(); - Path absolutePath3 = randomRepoPath().toAbsolutePath(); - String[] pathTokens = absolutePath1.toString().split("/"); - String basePath = pathTokens[pathTokens.length - 1]; - Arrays.copyOf(pathTokens, pathTokens.length - 1); - Path location = PathUtils.get(String.join("/", pathTokens)); - pathTokens = absolutePath2.toString().split("/"); - String basePath2 = pathTokens[pathTokens.length - 1]; - Arrays.copyOf(pathTokens, pathTokens.length - 1); - Path location2 = PathUtils.get(String.join("/", pathTokens)); - logger.info("Path 1 [{}]", absolutePath1); - logger.info("Path 2 [{}]", absolutePath2); - logger.info("Path 3 [{}]", absolutePath3); - String restoredIndexName1 = indexName1 + "-restored"; - - createRepository(snapshotRepoName, "fs", getRepositorySettings(location, basePath, true)); - createRepository(remoteStoreRepoName, "fs", absolutePath3); - - Client client = client(); - Settings indexSettings = Settings.builder() - .put(super.indexSettings()) - .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) - .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, remoteStoreRepoName) - .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, remoteStoreRepoName) - .put(IndexSettings.INDEX_REFRESH_INTERVAL_SETTING.getKey(), "300s") - .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .build(); - createIndex(indexName1, indexSettings); - - int numDocsInIndex1 = randomIntBetween(2, 5); - indexDocuments(client, indexName1, numDocsInIndex1); - - ensureGreen(indexName1); - - logger.info("--> snapshot"); - CreateSnapshotResponse createSnapshotResponse = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepoName, snapshotName1) - .setWaitForCompletion(true) - .setIndices(indexName1) - .get(); - assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); - assertThat( - createSnapshotResponse.getSnapshotInfo().successfulShards(), - equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) - ); - assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.SUCCESS)); - - createRepository(remoteStoreRepoName, "fs", absolutePath2); - - RestoreSnapshotResponse restoreSnapshotResponse = client.admin() - .cluster() - .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) - .setWaitForCompletion(true) - .setIndices(indexName1) - .setRenamePattern(indexName1) - .setRenameReplacement(restoredIndexName1) - .get(); - - assertTrue(restoreSnapshotResponse.getRestoreInfo().failedShards() > 0); - - ensureRed(restoredIndexName1); - - client().admin().indices().close(Requests.closeIndexRequest(restoredIndexName1)).get(); - createRepository(remoteStoreRepoNameUpdated, "fs", absolutePath3); - RestoreSnapshotResponse restoreSnapshotResponse2 = client.admin() - .cluster() - .prepareRestoreSnapshot(snapshotRepoName, snapshotName1) - .setWaitForCompletion(true) - .setIndices(indexName1) - .setRenamePattern(indexName1) - .setRenameReplacement(restoredIndexName1) - .setSourceRemoteStoreRepository(remoteStoreRepoNameUpdated) - .get(); - - assertTrue(restoreSnapshotResponse2.getRestoreInfo().failedShards() == 0); - ensureGreen(restoredIndexName1); - assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1); - - // indexing some new docs and validating - indexDocuments(client, restoredIndexName1, numDocsInIndex1, numDocsInIndex1 + 2); - ensureGreen(restoredIndexName1); - assertDocsPresentInIndex(client, restoredIndexName1, numDocsInIndex1 + 2); - } - - private void indexDocuments(Client client, String indexName, int numOfDocs) { - indexDocuments(client, indexName, 0, numOfDocs); - } - - private void indexDocuments(Client client, String indexName, int fromId, int toId) { - for (int i = fromId; i < toId; i++) { - String id = Integer.toString(i); - client.prepareIndex(indexName).setId(id).setSource("text", "sometext").get(); - } - client.admin().indices().prepareFlush(indexName).get(); - } - - private void assertDocsPresentInIndex(Client client, String indexName, int numOfDocs) { - for (int i = 0; i < numOfDocs; i++) { - String id = Integer.toString(i); - logger.info("checking for index " + indexName + " with docId" + id); - assertTrue("doc with id" + id + " is not present for index " + indexName, client.prepareGet(indexName, id).get().isExists()); - } - } - public void testParallelRestoreOperationsFromSingleSnapshot() throws Exception { String indexName1 = "testindex1"; String indexName2 = "testindex2"; diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java index e47a2b94fc715..e02a5b95da400 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SnapshotStatusApisIT.java @@ -112,7 +112,7 @@ public void testStatusApiConsistency() { assertEquals(snStatus.getStats().getTime(), snapshotInfo.endTime() - snapshotInfo.startTime()); } - public void testStatusAPICallForShallowCopySnapshot() throws Exception { + public void testStatusAPICallForShallowCopySnapshot() { disableRepoConsistencyCheck("Remote store repository is being used for the test"); internalCluster().startClusterManagerOnlyNode(); internalCluster().startDataOnlyNode(); @@ -120,10 +120,6 @@ public void testStatusAPICallForShallowCopySnapshot() throws Exception { final String snapshotRepoName = "snapshot-repo-name"; createRepository(snapshotRepoName, "fs", snapshotRepoSettingsForShallowCopy()); - final Path remoteStoreRepoPath = randomRepoPath(); - final String remoteStoreRepoName = "remote-store-repo-name"; - createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); - final String indexName = "index-1"; createIndex(indexName); ensureGreen(); @@ -133,20 +129,8 @@ public void testStatusAPICallForShallowCopySnapshot() throws Exception { } refresh(); - final String remoteStoreEnabledIndexName = "remote-index-1"; - final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepoName); - createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); - ensureGreen(); - - logger.info("--> indexing some data"); - for (int i = 0; i < 100; i++) { - index(remoteStoreEnabledIndexName, "_doc", Integer.toString(i), "foo", "bar" + i); - } - refresh(); - final String snapshot = "snapshot"; createFullSnapshot(snapshotRepoName, snapshot); - assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, remoteStoreRepoName).length == 1); final SnapshotStatus snapshotStatus = getSnapshotStatus(snapshotRepoName, snapshot); assertThat(snapshotStatus.getState(), is(SnapshotsInProgress.State.SUCCESS)); @@ -157,14 +141,6 @@ public void testStatusAPICallForShallowCopySnapshot() throws Exception { assertThat(snapshotShardState.getStats().getTotalSize(), greaterThan(0L)); assertThat(snapshotShardState.getStats().getIncrementalFileCount(), greaterThan(0)); assertThat(snapshotShardState.getStats().getIncrementalSize(), greaterThan(0L)); - - // Validating that the incremental file count and incremental file size is zero for shallow copy - final SnapshotIndexShardStatus shallowSnapshotShardState = stateFirstShard(snapshotStatus, remoteStoreEnabledIndexName); - assertThat(shallowSnapshotShardState.getStage(), is(SnapshotIndexShardStage.DONE)); - assertThat(shallowSnapshotShardState.getStats().getTotalFileCount(), greaterThan(0)); - assertThat(shallowSnapshotShardState.getStats().getTotalSize(), greaterThan(0L)); - assertThat(shallowSnapshotShardState.getStats().getIncrementalFileCount(), is(0)); - assertThat(shallowSnapshotShardState.getStats().getIncrementalSize(), is(0L)); } public void testStatusAPICallInProgressSnapshot() throws Exception { @@ -245,63 +221,6 @@ public void testExceptionOnMissingShardLevelSnapBlob() throws IOException { ); } - public void testStatusAPIStatsForBackToBackShallowSnapshot() throws Exception { - disableRepoConsistencyCheck("Remote store repository is being used for the test"); - internalCluster().startClusterManagerOnlyNode(); - internalCluster().startDataOnlyNode(); - - final String snapshotRepoName = "snapshot-repo-name"; - createRepository(snapshotRepoName, "fs", snapshotRepoSettingsForShallowCopy()); - - final Path remoteStoreRepoPath = randomRepoPath(); - final String remoteStoreRepoName = "remote-store-repo-name"; - createRepository(remoteStoreRepoName, "fs", remoteStoreRepoPath); - - final String indexName = "index-1"; - createIndex(indexName); - ensureGreen(); - logger.info("--> indexing some data"); - for (int i = 0; i < 100; i++) { - index(indexName, "_doc", Integer.toString(i), "foo", "bar" + i); - } - refresh(); - - final String remoteStoreEnabledIndexName = "remote-index-1"; - final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepoName); - createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); - ensureGreen(); - - logger.info("--> indexing some data"); - for (int i = 0; i < 100; i++) { - index(remoteStoreEnabledIndexName, "_doc", Integer.toString(i), "foo", "bar" + i); - } - refresh(); - - createFullSnapshot(snapshotRepoName, "test-snap-1"); - assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, remoteStoreRepoName).length == 1); - - SnapshotStatus snapshotStatus = getSnapshotStatus(snapshotRepoName, "test-snap-1"); - assertThat(snapshotStatus.getState(), is(SnapshotsInProgress.State.SUCCESS)); - - SnapshotIndexShardStatus shallowSnapshotShardState = stateFirstShard(snapshotStatus, remoteStoreEnabledIndexName); - assertThat(shallowSnapshotShardState.getStage(), is(SnapshotIndexShardStage.DONE)); - final int totalFileCount = shallowSnapshotShardState.getStats().getTotalFileCount(); - final long totalSize = shallowSnapshotShardState.getStats().getTotalSize(); - final int incrementalFileCount = shallowSnapshotShardState.getStats().getIncrementalFileCount(); - final long incrementalSize = shallowSnapshotShardState.getStats().getIncrementalSize(); - - createFullSnapshot(snapshotRepoName, "test-snap-2"); - assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, remoteStoreRepoName).length == 2); - - snapshotStatus = getSnapshotStatus(snapshotRepoName, "test-snap-2"); - assertThat(snapshotStatus.getState(), is(SnapshotsInProgress.State.SUCCESS)); - shallowSnapshotShardState = stateFirstShard(snapshotStatus, remoteStoreEnabledIndexName); - assertThat(shallowSnapshotShardState.getStats().getTotalFileCount(), equalTo(totalFileCount)); - assertThat(shallowSnapshotShardState.getStats().getTotalSize(), equalTo(totalSize)); - assertThat(shallowSnapshotShardState.getStats().getIncrementalFileCount(), equalTo(incrementalFileCount)); - assertThat(shallowSnapshotShardState.getStats().getIncrementalSize(), equalTo(incrementalSize)); - } - public void testGetSnapshotsWithoutIndices() throws Exception { createRepository("test-repo", "fs"); @@ -441,17 +360,12 @@ public void testSnapshotStatusOnFailedSnapshot() throws Exception { } public void testStatusAPICallInProgressShallowSnapshot() throws Exception { - disableRepoConsistencyCheck("Remote store repository is being used for the test"); internalCluster().startClusterManagerOnlyNode(); internalCluster().startDataOnlyNode(); final String snapshotRepoName = "snapshot-repo-name"; createRepository(snapshotRepoName, "mock", snapshotRepoSettingsForShallowCopy().put("block_on_data", true)); - final Path remoteStoreRepoPath = randomRepoPath(); - final String remoteStoreRepoName = "remote-store-repo-name"; - createRepository(remoteStoreRepoName, "mock", remoteStoreRepoPath); - final String indexName = "index-1"; createIndex(indexName); ensureGreen(); @@ -461,17 +375,6 @@ public void testStatusAPICallInProgressShallowSnapshot() throws Exception { } refresh(); - final String remoteStoreEnabledIndexName = "remote-index-1"; - final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepoName); - createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); - ensureGreen(); - - logger.info("--> indexing some data"); - for (int i = 0; i < 100; i++) { - index(remoteStoreEnabledIndexName, "_doc", Integer.toString(i), "foo", "bar" + i); - } - refresh(); - logger.info("--> snapshot"); ActionFuture createSnapshotResponseActionFuture = startFullSnapshot(snapshotRepoName, "test-snap"); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index bcf9cbd0efef8..267abbbd6b6fe 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -82,6 +82,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.function.Function; @@ -285,6 +286,32 @@ public Iterator> settings() { SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT.toString(), ReplicationType::parseString, + new Setting.Validator<>() { + + @Override + public void validate(final ReplicationType value) {} + + @Override + public void validate(final ReplicationType value, final Map, Object> settings) { + final Object remoteStoreEnabled = settings.get(INDEX_REMOTE_STORE_ENABLED_SETTING); + if (ReplicationType.SEGMENT.equals(value) == false && Objects.equals(remoteStoreEnabled, true)) { + throw new IllegalArgumentException( + "To enable " + + INDEX_REMOTE_STORE_ENABLED_SETTING.getKey() + + ", " + + INDEX_REPLICATION_TYPE_SETTING.getKey() + + " should be set to " + + ReplicationType.SEGMENT + ); + } + } + + @Override + public Iterator> settings() { + final List> settings = List.of(INDEX_REMOTE_STORE_ENABLED_SETTING); + return settings.iterator(); + } + }, Property.IndexScope, Property.Final ); @@ -328,13 +355,14 @@ public Iterator> settings() { } }, Property.IndexScope, - Property.Final + Property.PrivateIndex, + Property.Dynamic ); /** * Used to specify remote store repository to use for this index. */ - public static final Setting INDEX_REMOTE_STORE_REPOSITORY_SETTING = Setting.simpleString( + public static final Setting INDEX_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING = Setting.simpleString( SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, new Setting.Validator<>() { @@ -345,10 +373,12 @@ public void validate(final String value) {} public void validate(final String value, final Map, Object> settings) { if (value == null || value.isEmpty()) { throw new IllegalArgumentException( - "Setting " + INDEX_REMOTE_STORE_REPOSITORY_SETTING.getKey() + " should be provided with non-empty repository ID" + "Setting " + + INDEX_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING.getKey() + + " should be provided with non-empty repository ID" ); } else { - validateRemoteStoreSettingEnabled(settings, INDEX_REMOTE_STORE_REPOSITORY_SETTING); + validateRemoteStoreSettingEnabled(settings, INDEX_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING); } } @@ -359,7 +389,8 @@ public Iterator> settings() { } }, Property.IndexScope, - Property.Final + Property.PrivateIndex, + Property.Dynamic ); private static void validateRemoteStoreSettingEnabled(final Map, Object> settings, Setting setting) { @@ -409,7 +440,8 @@ public Iterator> settings() { } }, Property.IndexScope, - Property.Final + Property.PrivateIndex, + Property.Dynamic ); public static final String SETTING_AUTO_EXPAND_REPLICAS = "index.auto_expand_replicas"; diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index 728bac647d74a..db9964b1a2ff8 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -91,7 +91,6 @@ import org.opensearch.indices.InvalidIndexNameException; import org.opensearch.indices.ShardLimitValidator; import org.opensearch.indices.SystemIndices; -import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.threadpool.ThreadPool; import java.io.IOException; @@ -105,7 +104,6 @@ import java.util.List; import java.util.Locale; import java.util.Map; -import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; @@ -120,9 +118,6 @@ import static java.util.stream.Collectors.toList; import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING; import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING; -import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING; -import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_REMOTE_STORE_REPOSITORY_SETTING; -import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_REMOTE_TRANSLOG_REPOSITORY_SETTING; import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_REPLICATION_TYPE_SETTING; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_CREATION_DATE; @@ -134,7 +129,7 @@ import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; import static org.opensearch.cluster.metadata.Metadata.DEFAULT_REPLICA_COUNT_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_REPOSITORY_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_ENABLED_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; @@ -892,7 +887,7 @@ static Settings aggregateIndexSettings( indexSettingsBuilder.put(SETTING_INDEX_UUID, UUIDs.randomBase64UUID()); updateReplicationStrategy(indexSettingsBuilder, request.settings(), settings); - updateRemoteStoreSettings(indexSettingsBuilder, request.settings(), settings); + updateRemoteStoreSettings(indexSettingsBuilder, settings); if (sourceMetadata != null) { assert request.resizeType() != null; @@ -947,53 +942,16 @@ private static void updateReplicationStrategy(Settings.Builder settingsBuilder, /** * Updates index settings to enable remote store by default based on cluster level settings * @param settingsBuilder index settings builder to be updated with relevant settings - * @param requestSettings settings passed in during index create request * @param clusterSettings cluster level settings */ - private static void updateRemoteStoreSettings(Settings.Builder settingsBuilder, Settings requestSettings, Settings clusterSettings) { - if (CLUSTER_REMOTE_STORE_ENABLED_SETTING.get(clusterSettings)) { - // Verify if we can create a remote store based index based on user provided settings - if (canCreateRemoteStoreIndex(requestSettings) == false) { - return; - } - - // Verify index has replication type as SEGMENT - if (ReplicationType.DOCUMENT.equals(ReplicationType.parseString(settingsBuilder.get(SETTING_REPLICATION_TYPE)))) { - throw new IllegalArgumentException( - "Cannot enable [" - + SETTING_REMOTE_STORE_ENABLED - + "] when [" - + SETTING_REPLICATION_TYPE - + "] is " - + ReplicationType.DOCUMENT - ); - } - - settingsBuilder.put(SETTING_REMOTE_STORE_ENABLED, true); - String remoteStoreRepo; - if (Objects.equals(requestSettings.get(INDEX_REMOTE_STORE_ENABLED_SETTING.getKey()), "true")) { - remoteStoreRepo = requestSettings.get(INDEX_REMOTE_STORE_REPOSITORY_SETTING.getKey()); - } else { - remoteStoreRepo = CLUSTER_REMOTE_STORE_REPOSITORY_SETTING.get(clusterSettings); - } - settingsBuilder.put(SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, remoteStoreRepo) - .put( - SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, - requestSettings.get( - INDEX_REMOTE_TRANSLOG_REPOSITORY_SETTING.getKey(), - CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING.get(clusterSettings) - ) - ); + private static void updateRemoteStoreSettings(Settings.Builder settingsBuilder, Settings clusterSettings) { + if (CLUSTER_REMOTE_STORE_ENABLED_SETTING.get(clusterSettings) == true) { + settingsBuilder.put(SETTING_REMOTE_STORE_ENABLED, true) + .put(SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, CLUSTER_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING.get(clusterSettings)) + .put(SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING.get(clusterSettings)); } } - private static boolean canCreateRemoteStoreIndex(Settings requestSettings) { - return (INDEX_REPLICATION_TYPE_SETTING.exists(requestSettings) == false - || INDEX_REPLICATION_TYPE_SETTING.get(requestSettings).equals(ReplicationType.SEGMENT)) - && (INDEX_REMOTE_STORE_ENABLED_SETTING.exists(requestSettings) == false - || INDEX_REMOTE_STORE_ENABLED_SETTING.get(requestSettings)); - } - public static void validateStoreTypeSettings(Settings settings) { // deprecate simplefs store type: if (IndexModule.Type.SIMPLEFS.match(IndexModule.INDEX_STORE_TYPE_SETTING.get(settings))) { diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 9da9e1b14d307..802eb7bd01254 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -673,7 +673,7 @@ public void apply(Settings value, Settings current, Settings previous) { List.of(FeatureFlags.REMOTE_STORE), List.of( IndicesService.CLUSTER_REMOTE_STORE_ENABLED_SETTING, - IndicesService.CLUSTER_REMOTE_STORE_REPOSITORY_SETTING, + IndicesService.CLUSTER_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING, IndicesService.CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING ), List.of(FeatureFlags.CONCURRENT_SEGMENT_SEARCH), diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index 3cc7c351fe1bf..be2b5f00bc0ec 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -235,7 +235,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { FeatureFlags.REMOTE_STORE, List.of( IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING, - IndexMetadata.INDEX_REMOTE_STORE_REPOSITORY_SETTING, + IndexMetadata.INDEX_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING, IndexMetadata.INDEX_REMOTE_TRANSLOG_REPOSITORY_SETTING ), FeatureFlags.CONCURRENT_SEGMENT_SEARCH, diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 36b937e7df76a..7be824d95b421 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -256,7 +256,7 @@ public class IndicesService extends AbstractLifecycleComponent /** * Used to specify default repo to use for segment upload for remote store backed indices */ - public static final Setting CLUSTER_REMOTE_STORE_REPOSITORY_SETTING = Setting.simpleString( + public static final Setting CLUSTER_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING = Setting.simpleString( "cluster.remote_store.segment.repository", "", Property.NodeScope, diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/TransportRemoteStoreStatsActionTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/TransportRemoteStoreStatsActionTests.java index 375b1b8ed7aba..aa3e7ab1fb2c7 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/TransportRemoteStoreStatsActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/remotestore/stats/TransportRemoteStoreStatsActionTests.java @@ -32,6 +32,7 @@ import org.opensearch.index.remote.RemoteSegmentTransferTracker; import org.opensearch.index.shard.IndexShardTestCase; import org.opensearch.indices.IndicesService; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.transport.MockTransport; import org.opensearch.transport.TransportService; @@ -45,6 +46,7 @@ import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_INDEX_UUID; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REPLICATION_TYPE; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_STORE_ENABLED; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY; @@ -70,6 +72,7 @@ public void setUp() throws Exception { remoteStoreIndexMetadata = IndexMetadata.builder(INDEX.getName()) .settings( settings(Version.CURRENT).put(SETTING_INDEX_UUID, INDEX.getUUID()) + .put(SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(SETTING_REMOTE_STORE_ENABLED, true) .put(SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, "my-test-repo") .build() diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java index e52237c8dba99..63c3511a97d2b 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataCreateIndexServiceTests.java @@ -101,6 +101,7 @@ import java.util.Map; import java.util.Optional; import java.util.Set; +import java.util.Locale; import java.util.UUID; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; @@ -113,7 +114,6 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.singleton; import static java.util.Collections.singletonList; -import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasKey; @@ -143,7 +143,7 @@ import static org.opensearch.cluster.metadata.MetadataCreateIndexService.resolveAndValidateAliases; import static org.opensearch.index.IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING; import static org.opensearch.index.IndexSettings.INDEX_SOFT_DELETES_SETTING; -import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_REPOSITORY_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_ENABLED_SETTING; import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; @@ -1191,40 +1191,11 @@ public void testvalidateIndexSettings() { threadPool.shutdown(); } - public void testRemoteStoreNoUserOverrideConflictingReplicationTypeIndexSettings() { - Settings settings = Settings.builder() - .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.DOCUMENT) - .put(CLUSTER_REMOTE_STORE_ENABLED_SETTING.getKey(), true) - .put(CLUSTER_REMOTE_STORE_REPOSITORY_SETTING.getKey(), "my-segment-repo-1") - .put(CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING.getKey(), "my-translog-repo-1") - .build(); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - - request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); - IllegalArgumentException exc = expectThrows( - IllegalArgumentException.class, - () -> aggregateIndexSettings( - ClusterState.EMPTY_STATE, - request, - Settings.EMPTY, - null, - settings, - IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, - randomShardLimitService(), - Collections.emptySet() - ) - ); - assertThat( - exc.getMessage(), - containsString("Cannot enable [index.remote_store.enabled] when [index.replication.type] is DOCUMENT") - ); - } - public void testRemoteStoreNoUserOverrideExceptReplicationTypeSegmentIndexSettings() { Settings settings = Settings.builder() .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.DOCUMENT) .put(CLUSTER_REMOTE_STORE_ENABLED_SETTING.getKey(), true) - .put(CLUSTER_REMOTE_STORE_REPOSITORY_SETTING.getKey(), "my-segment-repo-1") + .put(CLUSTER_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING.getKey(), "my-segment-repo-1") .put(CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING.getKey(), "my-translog-repo-1") .build(); FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); @@ -1257,7 +1228,7 @@ public void testRemoteStoreNoUserOverrideIndexSettings() { Settings settings = Settings.builder() .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT) .put(CLUSTER_REMOTE_STORE_ENABLED_SETTING.getKey(), true) - .put(CLUSTER_REMOTE_STORE_REPOSITORY_SETTING.getKey(), "my-segment-repo-1") + .put(CLUSTER_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING.getKey(), "my-segment-repo-1") .put(CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING.getKey(), "my-translog-repo-1") .build(); FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); @@ -1284,137 +1255,103 @@ public void testRemoteStoreNoUserOverrideIndexSettings() { } public void testRemoteStoreDisabledByUserIndexSettings() { - Settings settings = Settings.builder() - .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT) - .put(CLUSTER_REMOTE_STORE_ENABLED_SETTING.getKey(), true) - .put(CLUSTER_REMOTE_STORE_REPOSITORY_SETTING.getKey(), "my-segment-repo-1") - .put(CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING.getKey(), "my-translog-repo-1") - .build(); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - - request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); final Settings.Builder requestSettings = Settings.builder(); requestSettings.put(SETTING_REMOTE_STORE_ENABLED, false); - request.settings(requestSettings.build()); - Settings indexSettings = aggregateIndexSettings( - ClusterState.EMPTY_STATE, - request, - Settings.EMPTY, - null, - settings, - IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, - randomShardLimitService(), - Collections.emptySet() - ); - verifyRemoteStoreIndexSettings( - indexSettings, - "false", - null, - null, - ReplicationType.SEGMENT.toString(), - IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL - ); + withTemporaryClusterService(((clusterService, threadPool) -> { + MetadataCreateIndexService checkerService = new MetadataCreateIndexService( + Settings.EMPTY, + clusterService, + null, + null, + null, + createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), + new Environment(Settings.builder().put("path.home", "dummy").build(), null), + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + threadPool, + null, + new SystemIndices(Collections.emptyMap()), + true, + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) + ); + + final List validationErrors = checkerService.getIndexSettingsValidationErrors( + requestSettings.build(), + true, + Optional.empty() + ); + assertThat(validationErrors.size(), is(1)); + assertThat( + validationErrors.get(0), + is(String.format(Locale.ROOT, "expected [%s] to be private but it was not", SETTING_REMOTE_STORE_ENABLED)) + ); + })); } public void testRemoteStoreOverrideSegmentRepoIndexSettings() { - Settings settings = Settings.builder() - .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT) - .put(CLUSTER_REMOTE_STORE_ENABLED_SETTING.getKey(), true) - .put(CLUSTER_REMOTE_STORE_REPOSITORY_SETTING.getKey(), "my-segment-repo-1") - .put(CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING.getKey(), "my-translog-repo-1") - .build(); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - - request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); final Settings.Builder requestSettings = Settings.builder(); requestSettings.put(SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) - .put(SETTING_REMOTE_STORE_ENABLED, true) .put(SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, "my-custom-repo"); - request.settings(requestSettings.build()); - Settings indexSettings = aggregateIndexSettings( - ClusterState.EMPTY_STATE, - request, - Settings.EMPTY, - null, - settings, - IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, - randomShardLimitService(), - Collections.emptySet() - ); - verifyRemoteStoreIndexSettings( - indexSettings, - "true", - "my-custom-repo", - "my-translog-repo-1", - ReplicationType.SEGMENT.toString(), - IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL - ); + withTemporaryClusterService(((clusterService, threadPool) -> { + MetadataCreateIndexService checkerService = new MetadataCreateIndexService( + Settings.EMPTY, + clusterService, + null, + null, + null, + createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), + new Environment(Settings.builder().put("path.home", "dummy").build(), null), + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + threadPool, + null, + new SystemIndices(Collections.emptyMap()), + true, + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) + ); + + final List validationErrors = checkerService.getIndexSettingsValidationErrors( + requestSettings.build(), + true, + Optional.empty() + ); + assertThat(validationErrors.size(), is(1)); + assertThat( + validationErrors.get(0), + is(String.format(Locale.ROOT, "expected [%s] to be private but it was not", SETTING_REMOTE_SEGMENT_STORE_REPOSITORY)) + ); + })); } public void testRemoteStoreOverrideTranslogRepoIndexSettings() { - Settings settings = Settings.builder() - .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT) - .put(CLUSTER_REMOTE_STORE_ENABLED_SETTING.getKey(), true) - .put(CLUSTER_REMOTE_STORE_REPOSITORY_SETTING.getKey(), "my-segment-repo-1") - .put(CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING.getKey(), "my-translog-repo-1") - .build(); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - - request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); final Settings.Builder requestSettings = Settings.builder(); requestSettings.put(SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, "my-custom-repo"); - request.settings(requestSettings.build()); - Settings indexSettings = aggregateIndexSettings( - ClusterState.EMPTY_STATE, - request, - Settings.EMPTY, - null, - settings, - IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, - randomShardLimitService(), - Collections.emptySet() - ); - verifyRemoteStoreIndexSettings( - indexSettings, - "true", - "my-segment-repo-1", - "my-custom-repo", - ReplicationType.SEGMENT.toString(), - IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL - ); - } - - public void testRemoteStoreOverrideReplicationTypeIndexSettings() { - Settings settings = Settings.builder() - .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT) - .put(CLUSTER_REMOTE_STORE_ENABLED_SETTING.getKey(), true) - .put(CLUSTER_REMOTE_STORE_REPOSITORY_SETTING.getKey(), "my-segment-repo-1") - .put(CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING.getKey(), "my-translog-repo-1") - .build(); - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); + withTemporaryClusterService(((clusterService, threadPool) -> { + MetadataCreateIndexService checkerService = new MetadataCreateIndexService( + Settings.EMPTY, + clusterService, + null, + null, + null, + createTestShardLimitService(randomIntBetween(1, 1000), false, clusterService), + new Environment(Settings.builder().put("path.home", "dummy").build(), null), + IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, + threadPool, + null, + new SystemIndices(Collections.emptyMap()), + true, + new AwarenessReplicaBalance(Settings.EMPTY, clusterService.getClusterSettings()) + ); - request = new CreateIndexClusterStateUpdateRequest("create index", "test", "test"); - final Settings.Builder requestSettings = Settings.builder(); - requestSettings.put(SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT); - request.settings(requestSettings.build()); - Settings indexSettings = aggregateIndexSettings( - ClusterState.EMPTY_STATE, - request, - Settings.EMPTY, - null, - settings, - IndexScopedSettings.DEFAULT_SCOPED_SETTINGS, - randomShardLimitService(), - Collections.emptySet() - ); - verifyRemoteStoreIndexSettings( - indexSettings, - null, - null, - null, - ReplicationType.DOCUMENT.toString(), - IndexSettings.DEFAULT_REMOTE_TRANSLOG_BUFFER_INTERVAL - ); + final List validationErrors = checkerService.getIndexSettingsValidationErrors( + requestSettings.build(), + true, + Optional.empty() + ); + assertThat(validationErrors.size(), is(1)); + assertThat( + validationErrors.get(0), + is(String.format(Locale.ROOT, "expected [%s] to be private but it was not", SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY)) + ); + })); } public void testBuildIndexMetadata() { diff --git a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java index f91905fea9561..70f43c7ed52e3 100644 --- a/server/src/test/java/org/opensearch/index/IndexSettingsTests.java +++ b/server/src/test/java/org/opensearch/index/IndexSettingsTests.java @@ -779,6 +779,7 @@ public void testRemoteStoreExplicitSetting() { "index", Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) .build() ); @@ -795,22 +796,6 @@ public void testRemoteTranslogStoreDefaultSetting() { assertFalse(settings.isRemoteTranslogStoreEnabled()); } - public void testUpdateRemoteStoreFails() { - Set> remoteStoreSettingSet = new HashSet<>(); - remoteStoreSettingSet.add(IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING); - IndexScopedSettings settings = new IndexScopedSettings(Settings.EMPTY, remoteStoreSettingSet); - SettingsException error = expectThrows( - SettingsException.class, - () -> settings.updateSettings( - Settings.builder().put("index.remote_store.enabled", randomBoolean()).build(), - Settings.builder(), - Settings.builder(), - "index" - ) - ); - assertEquals(error.getMessage(), "final index setting [index.remote_store.enabled], not updateable"); - } - public void testEnablingRemoteStoreFailsWhenReplicationTypeIsDocument() { Settings indexSettings = Settings.builder() .put("index.replication.type", ReplicationType.DOCUMENT) @@ -846,6 +831,7 @@ public void testRemoteRepositoryExplicitSetting() { "index", Settings.builder() .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, "repo1") .build() @@ -854,25 +840,6 @@ public void testRemoteRepositoryExplicitSetting() { assertEquals("repo1", settings.getRemoteStoreRepository()); } - public void testUpdateRemoteRepositoryFails() { - Set> remoteStoreSettingSet = new HashSet<>(); - remoteStoreSettingSet.add(IndexMetadata.INDEX_REMOTE_STORE_REPOSITORY_SETTING); - IndexScopedSettings settings = new IndexScopedSettings(Settings.EMPTY, remoteStoreSettingSet); - SettingsException error = expectThrows( - SettingsException.class, - () -> settings.updateSettings( - Settings.builder().put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, randomUnicodeOfLength(10)).build(), - Settings.builder(), - Settings.builder(), - "index" - ) - ); - assertEquals( - error.getMessage(), - String.format(Locale.ROOT, "final index setting [%s], not updateable", IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY) - ); - } - public void testSetRemoteRepositoryFailsWhenRemoteStoreIsNotEnabled() { Settings indexSettings = Settings.builder() .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) @@ -881,7 +848,7 @@ public void testSetRemoteRepositoryFailsWhenRemoteStoreIsNotEnabled() { .build(); IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, - () -> IndexMetadata.INDEX_REMOTE_STORE_REPOSITORY_SETTING.get(indexSettings) + () -> IndexMetadata.INDEX_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING.get(indexSettings) ); assertEquals( String.format( @@ -902,7 +869,7 @@ public void testSetRemoteRepositoryFailsWhenEmptyString() { .build(); IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, - () -> IndexMetadata.INDEX_REMOTE_STORE_REPOSITORY_SETTING.get(indexSettings) + () -> IndexMetadata.INDEX_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING.get(indexSettings) ); assertEquals( String.format( diff --git a/server/src/test/java/org/opensearch/index/remote/RemoteRefreshSegmentPressureServiceTests.java b/server/src/test/java/org/opensearch/index/remote/RemoteRefreshSegmentPressureServiceTests.java index 7b180a71f3bab..8d444f5d10f26 100644 --- a/server/src/test/java/org/opensearch/index/remote/RemoteRefreshSegmentPressureServiceTests.java +++ b/server/src/test/java/org/opensearch/index/remote/RemoteRefreshSegmentPressureServiceTests.java @@ -16,6 +16,7 @@ import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.IndexShard; import org.opensearch.core.index.shard.ShardId; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.index.store.Store; import org.opensearch.test.IndexSettingsModule; import org.opensearch.test.OpenSearchTestCase; @@ -151,7 +152,10 @@ public void testValidateSegmentUploadLag() { } private static IndexShard createIndexShard(ShardId shardId, boolean remoteStoreEnabled) { - Settings settings = Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, String.valueOf(remoteStoreEnabled)).build(); + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, String.valueOf(remoteStoreEnabled)) + .build(); IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test_index", settings); Store store = mock(Store.class); IndexShard indexShard = mock(IndexShard.class); diff --git a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java index 03a6fc3df824d..5e88b892cc835 100644 --- a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java @@ -1293,7 +1293,10 @@ public void testGlobalCheckpointUpdateWithRemoteTranslogEnabled() { assertThat(allocations.size(), equalTo(active.size() + initializing.size())); final AllocationId primaryId = active.iterator().next(); - Settings settings = Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true").build(); + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") + .build(); final ReplicationTracker tracker = newTracker(primaryId, settings); assertThat(tracker.getGlobalCheckpoint(), equalTo(UNASSIGNED_SEQ_NO)); @@ -1368,7 +1371,10 @@ public void testUpdateFromClusterManagerWithRemoteTranslogEnabled() { assertThat(allocations.size(), equalTo(active.size() + initializing.size())); final AllocationId primaryId = active.iterator().next(); - Settings settings = Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true").build(); + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") + .build(); final ReplicationTracker tracker = newTracker(primaryId, settings); assertThat(tracker.getGlobalCheckpoint(), equalTo(UNASSIGNED_SEQ_NO)); @@ -1438,7 +1444,10 @@ public void testUpdateFromClusterManagerWithRemoteTranslogEnabled() { */ public void testUpdateGlobalCheckpointOnReplicaWithRemoteTranslogEnabled() { final AllocationId active = AllocationId.newInitializing(); - Settings settings = Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true").build(); + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") + .build(); final ReplicationTracker tracker = newTracker(active, settings); final long globalCheckpoint = randomLongBetween(NO_OPS_PERFORMED, Long.MAX_VALUE - 1); tracker.updateGlobalCheckpointOnReplica(globalCheckpoint, "test"); @@ -1460,7 +1469,10 @@ public void testMarkAllocationIdAsInSyncWithRemoteTranslogEnabled() throws Excep Set initializing = new HashSet<>(initializingWithCheckpoints.keySet()); final AllocationId primaryId = active.iterator().next(); final AllocationId replicaId = initializing.iterator().next(); - Settings settings = Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true").build(); + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") + .build(); final ReplicationTracker tracker = newTracker(primaryId, settings); tracker.updateFromClusterManager(initialClusterStateVersion, ids(active), routingTable(initializing, primaryId)); final long localCheckpoint = randomLongBetween(0, Long.MAX_VALUE - 1); @@ -1485,7 +1497,10 @@ public void testMissingActiveIdsDoesNotPreventAdvanceWithRemoteTranslogEnabled() assigned.putAll(active); assigned.putAll(initializing); AllocationId primaryId = active.keySet().iterator().next(); - Settings settings = Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true").build(); + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") + .build(); final ReplicationTracker tracker = newTracker(primaryId, settings); tracker.updateFromClusterManager(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet(), primaryId)); tracker.activatePrimaryMode(NO_OPS_PERFORMED); @@ -1515,7 +1530,10 @@ public void testMissingInSyncIdsDoesNotPreventAdvanceWithRemoteTranslogEnabled() logger.info("active: {}, initializing: {}", active, initializing); AllocationId primaryId = active.keySet().iterator().next(); - Settings settings = Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true").build(); + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") + .build(); final ReplicationTracker tracker = newTracker(primaryId, settings); tracker.updateFromClusterManager(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet(), primaryId)); tracker.activatePrimaryMode(NO_OPS_PERFORMED); @@ -1540,7 +1558,10 @@ public void testInSyncIdsAreIgnoredIfNotValidatedByClusterManagerWithRemoteTrans final Map initializing = randomAllocationsWithLocalCheckpoints(1, 5); final Map nonApproved = randomAllocationsWithLocalCheckpoints(1, 5); final AllocationId primaryId = active.keySet().iterator().next(); - Settings settings = Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true").build(); + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") + .build(); final ReplicationTracker tracker = newTracker(primaryId, settings); tracker.updateFromClusterManager(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet(), primaryId)); tracker.activatePrimaryMode(NO_OPS_PERFORMED); @@ -1578,7 +1599,10 @@ public void testInSyncIdsAreRemovedIfNotValidatedByClusterManagerWithRemoteTrans if (randomBoolean()) { allocations.putAll(initializingToBeRemoved); } - Settings settings = Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true").build(); + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") + .build(); final ReplicationTracker tracker = newTracker(primaryId, settings); tracker.updateFromClusterManager(initialClusterStateVersion, ids(active), routingTable(initializing, primaryId)); tracker.activatePrimaryMode(NO_OPS_PERFORMED); @@ -1624,7 +1648,10 @@ public void testUpdateAllocationIdsFromClusterManagerWithRemoteTranslogEnabled() final Set initializingIds = activeAndInitializingAllocationIds.v2(); AllocationId primaryId = activeAllocationIds.iterator().next(); IndexShardRoutingTable routingTable = routingTable(initializingIds, primaryId); - Settings settings = Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true").build(); + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") + .build(); final ReplicationTracker tracker = newTracker(primaryId, settings); tracker.updateFromClusterManager(initialClusterStateVersion, ids(activeAllocationIds), routingTable); tracker.activatePrimaryMode(NO_OPS_PERFORMED); @@ -1936,7 +1963,10 @@ public void testSegmentReplicationCheckpointTrackingInvalidAllocationIDs() { } public void testPrimaryContextHandoffWithRemoteTranslogEnabled() throws IOException { - Settings settings = Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true").build(); + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") + .build(); final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", settings); final ShardId shardId = new ShardId("test", "_na_", 0); @@ -2115,7 +2145,10 @@ public void testPrimaryContextHandoffWithRemoteTranslogEnabled() throws IOExcept public void testIllegalStateExceptionIfUnknownAllocationIdWithRemoteTranslogEnabled() { final AllocationId active = AllocationId.newInitializing(); final AllocationId initializing = AllocationId.newInitializing(); - Settings settings = Settings.builder().put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true").build(); + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, "true") + .build(); final ReplicationTracker tracker = newTracker(active, settings); tracker.updateFromClusterManager( randomNonNegativeLong(), diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index 1340ff1868a11..96fa53fbf0fc2 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -1260,6 +1260,7 @@ public void testGetChangesSnapshotThrowsAssertForRemoteStore() throws IOExceptio .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 2) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) .build(); final IndexMetadata.Builder indexMetadata = IndexMetadata.builder(shardRouting.getIndexName()).settings(settings).primaryTerm(0, 1); @@ -4839,12 +4840,13 @@ public void testTranslogFactoryForRemoteTranslogBackedReplicaShard() throws IOEx .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, "seg-test") .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, "txlog-test") .build(); final IndexShard replicaShard = newStartedShard(false, primarySettings, new NRTReplicationEngineFactory()); - assertEquals(replicaShard.getEngine().getClass(), InternalEngine.class); + assertEquals(replicaShard.getEngine().getClass(), NRTReplicationEngine.class); assertEquals(replicaShard.getEngine().config().getTranslogFactory().getClass(), InternalTranslogFactory.class); closeShards(replicaShard); } diff --git a/server/src/test/java/org/opensearch/index/translog/RemoteFSTranslogTests.java b/server/src/test/java/org/opensearch/index/translog/RemoteFSTranslogTests.java index 349f78c3c2d7b..707a58535f21d 100644 --- a/server/src/test/java/org/opensearch/index/translog/RemoteFSTranslogTests.java +++ b/server/src/test/java/org/opensearch/index/translog/RemoteFSTranslogTests.java @@ -47,6 +47,7 @@ import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.translog.transfer.BlobStoreTransferService; import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.repositories.blobstore.BlobStoreRepository; import org.opensearch.repositories.blobstore.BlobStoreTestUtil; import org.opensearch.repositories.fs.FsRepository; @@ -182,6 +183,7 @@ private TranslogConfig getTranslogConfig(final Path path) { // only randomize between nog age retention and a long one, so failures will have a chance of reproducing .put(IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getKey(), randomBoolean() ? "-1ms" : "1h") .put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), randomIntBetween(-1, 2048) + "b") + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) .build(); return getTranslogConfig(path, settings); diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java new file mode 100644 index 0000000000000..0dbc0372458b5 --- /dev/null +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryHelperTests.java @@ -0,0 +1,140 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.repositories.blobstore; + +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.client.Client; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.xcontent.NamedXContentRegistry; +import org.opensearch.env.Environment; +import org.opensearch.index.IndexModule; +import org.opensearch.index.IndexService; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.store.RemoteBufferedOutputDirectory; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.plugins.Plugin; +import org.opensearch.plugins.RepositoryPlugin; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.Repository; +import org.opensearch.repositories.fs.FsRepository; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.OpenSearchSingleNodeTestCase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +public class BlobStoreRepositoryHelperTests extends OpenSearchSingleNodeTestCase { + + static final String REPO_TYPE = "fsLike"; + + protected Collection> getPlugins() { + return Arrays.asList(FsLikeRepoPlugin.class); + } + + protected String[] getLockFilesInRemoteStore(String remoteStoreIndex, String remoteStoreRepository) throws IOException { + String indexUUID = client().admin() + .indices() + .prepareGetSettings(remoteStoreIndex) + .get() + .getSetting(remoteStoreIndex, IndexMetadata.SETTING_INDEX_UUID); + final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class); + final BlobStoreRepository remoteStorerepository = (BlobStoreRepository) repositoriesService.repository(remoteStoreRepository); + BlobPath shardLevelBlobPath = remoteStorerepository.basePath().add(indexUUID).add("0").add("segments").add("lock_files"); + BlobContainer blobContainer = remoteStorerepository.blobStore().blobContainer(shardLevelBlobPath); + try (RemoteBufferedOutputDirectory lockDirectory = new RemoteBufferedOutputDirectory(blobContainer)) { + return Arrays.stream(lockDirectory.listAll()).filter(lock -> lock.endsWith(".lock")).toArray(String[]::new); + } + } + + // the reason for this plug-in is to drop any assertSnapshotOrGenericThread as mostly all access in this test goes from test threads + public static class FsLikeRepoPlugin extends Plugin implements RepositoryPlugin { + + @Override + public Map getRepositories( + Environment env, + NamedXContentRegistry namedXContentRegistry, + ClusterService clusterService, + RecoverySettings recoverySettings + ) { + return Collections.singletonMap( + REPO_TYPE, + (metadata) -> new FsRepository(metadata, env, namedXContentRegistry, clusterService, recoverySettings) { + @Override + protected void assertSnapshotOrGenericThread() { + // eliminate thread name check as we access blobStore on test/main threads + } + } + ); + } + } + + protected void createRepository(Client client, String repoName) { + AcknowledgedResponse putRepositoryResponse = client.admin() + .cluster() + .preparePutRepository(repoName) + .setType(REPO_TYPE) + .setSettings( + Settings.builder().put(node().settings()).put("location", OpenSearchIntegTestCase.randomRepoPath(node().settings())) + ) + .get(); + assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + } + + protected void createRepository(Client client, String repoName, Settings repoSettings) { + AcknowledgedResponse putRepositoryResponse = client.admin() + .cluster() + .preparePutRepository(repoName) + .setType(REPO_TYPE) + .setSettings(repoSettings) + .get(); + assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + } + + protected void updateRepository(Client client, String repoName, Settings repoSettings) { + createRepository(client, repoName, repoSettings); + } + + protected Settings getRemoteStoreBackedIndexSettings(String remoteStoreRepo) { + return Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "1") + .put("index.refresh_interval", "300s") + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") + .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.FS.getSettingsKey()) + .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), false) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build(); + } + + protected void indexDocuments(Client client, String indexName) { + int numDocs = randomIntBetween(10, 20); + for (int i = 0; i < numDocs; i++) { + String id = Integer.toString(i); + client.prepareIndex(indexName).setId(id).setSource("text", "sometext").get(); + } + client.admin().indices().prepareFlush(indexName).get(); + } + + protected IndexSettings getIndexSettings(String indexName) { + final IndicesService indicesService = getInstanceFromNode(IndicesService.class); + final IndexService indexService = indicesService.indexService(resolveIndex(indexName)); + return indexService.getIndexSettings(); + } + +} diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRemoteIndexTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRemoteIndexTests.java new file mode 100644 index 0000000000000..f25498b8c8368 --- /dev/null +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRemoteIndexTests.java @@ -0,0 +1,371 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.repositories.blobstore; + +import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; +import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.opensearch.client.Client; +import org.opensearch.cluster.metadata.RepositoryMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.snapshots.blobstore.RemoteStoreShardShallowCopySnapshot; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.repositories.IndexId; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.RepositoryData; +import org.opensearch.snapshots.SnapshotId; +import org.opensearch.test.FeatureFlagSetter; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.equalTo; +import static org.opensearch.indices.IndicesService.CLUSTER_REPLICATION_TYPE_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_STORE_ENABLED_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING; +import static org.opensearch.indices.IndicesService.CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING; + +/** + * Tests for the {@link BlobStoreRepository} and its subclasses. + */ +public class BlobStoreRepositoryRemoteIndexTests extends BlobStoreRepositoryHelperTests { + + @Override + protected Settings nodeSettings() { + return Settings.builder() + .put(super.nodeSettings()) + .put(FeatureFlags.REMOTE_STORE, "true") + .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT) + .put(CLUSTER_REMOTE_STORE_ENABLED_SETTING.getKey(), true) + .put(CLUSTER_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING.getKey(), "test-rs-repo") + .put(CLUSTER_REMOTE_TRANSLOG_REPOSITORY_SETTING.getKey(), "test-rs-repo") + .build(); + } + + // Validate Scenario Normal Snapshot -> remoteStoreShallowCopy Snapshot -> normal Snapshot + public void testRetrieveShallowCopySnapshotCase1() throws IOException { + FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); + final Client client = client(); + final String snapshotRepositoryName = "test-repo"; + final String remoteStoreRepositoryName = "test-rs-repo"; + + logger.info("--> creating snapshot repository"); + + Settings snapshotRepoSettings = Settings.builder() + .put(node().settings()) + .put("location", OpenSearchIntegTestCase.randomRepoPath(node().settings())) + .build(); + createRepository(client, snapshotRepositoryName, snapshotRepoSettings); + + logger.info("--> creating remote store repository"); + Settings remoteStoreRepoSettings = Settings.builder() + .put(node().settings()) + .put("location", OpenSearchIntegTestCase.randomRepoPath(node().settings())) + .build(); + createRepository(client, remoteStoreRepositoryName, remoteStoreRepoSettings); + + logger.info("--> creating an index and indexing documents"); + final String indexName = "test-idx"; + createIndex(indexName); + ensureGreen(); + indexDocuments(client, indexName); + + logger.info("--> creating a remote store enabled index and indexing documents"); + final String remoteStoreIndexName = "test-rs-idx"; + Settings indexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepositoryName); + createIndex(remoteStoreIndexName, indexSettings); + indexDocuments(client, remoteStoreIndexName); + + logger.info("--> create first snapshot"); + CreateSnapshotResponse createSnapshotResponse = client.admin() + .cluster() + .prepareCreateSnapshot(snapshotRepositoryName, "test-snap-1") + .setWaitForCompletion(true) + .setIndices(indexName, remoteStoreIndexName) + .get(); + final SnapshotId snapshotId1 = createSnapshotResponse.getSnapshotInfo().snapshotId(); + + String[] lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); + assert (lockFiles.length == 0) : "there should be no lock files present in directory, but found " + Arrays.toString(lockFiles); + logger.info("--> create remote index shallow snapshot"); + Settings snapshotRepoSettingsForShallowCopy = Settings.builder() + .put(snapshotRepoSettings) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), Boolean.TRUE) + .build(); + updateRepository(client, snapshotRepositoryName, snapshotRepoSettingsForShallowCopy); + + createSnapshotResponse = client.admin() + .cluster() + .prepareCreateSnapshot(snapshotRepositoryName, "test-snap-2") + .setWaitForCompletion(true) + .setIndices(indexName, remoteStoreIndexName) + .get(); + final SnapshotId snapshotId2 = createSnapshotResponse.getSnapshotInfo().snapshotId(); + + lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); + assert (lockFiles.length == 1) : "there should be only one lock file, but found " + Arrays.toString(lockFiles); + assert lockFiles[0].endsWith(snapshotId2.getUUID() + ".lock"); + + logger.info("--> create another normal snapshot"); + updateRepository(client, snapshotRepositoryName, snapshotRepoSettings); + createSnapshotResponse = client.admin() + .cluster() + .prepareCreateSnapshot(snapshotRepositoryName, "test-snap-3") + .setWaitForCompletion(true) + .setIndices(indexName, remoteStoreIndexName) + .get(); + final SnapshotId snapshotId3 = createSnapshotResponse.getSnapshotInfo().snapshotId(); + + lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); + assert (lockFiles.length == 1) : "there should be only one lock file, but found " + Arrays.toString(lockFiles); + assert lockFiles[0].endsWith(snapshotId2.getUUID() + ".lock"); + + logger.info("--> make sure the node's repository can resolve the snapshots"); + final List originalSnapshots = Arrays.asList(snapshotId1, snapshotId2, snapshotId3); + + final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class); + final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(snapshotRepositoryName); + RepositoryData repositoryData = OpenSearchBlobStoreRepositoryIntegTestCase.getRepositoryData(repository); + IndexId indexId = repositoryData.resolveIndexId(remoteStoreIndexName); + + List snapshotIds = repositoryData.getSnapshotIds() + .stream() + .sorted((s1, s2) -> s1.getName().compareTo(s2.getName())) + .collect(Collectors.toList()); + assertThat(snapshotIds, equalTo(originalSnapshots)); + + // shallow copy shard metadata - getRemoteStoreShallowCopyShardMetadata + RemoteStoreShardShallowCopySnapshot shardShallowCopySnapshot = repository.getRemoteStoreShallowCopyShardMetadata( + snapshotId2, + indexId, + new ShardId(remoteStoreIndexName, indexId.getId(), 0) + ); + assertEquals(shardShallowCopySnapshot.getRemoteStoreRepository(), remoteStoreRepositoryName); + } + + public void testGetRemoteStoreShallowCopyShardMetadata() throws IOException { + FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); + final Client client = client(); + final String snapshotRepositoryName = "test-repo"; + final String remoteStoreRepositoryName = "test-rs-repo"; + + logger.info("--> creating snapshot repository"); + + Settings snapshotRepoSettings = Settings.builder() + .put(node().settings()) + .put("location", OpenSearchIntegTestCase.randomRepoPath(node().settings())) + .build(); + createRepository(client, snapshotRepositoryName, snapshotRepoSettings); + + logger.info("--> creating remote store repository"); + Settings remoteStoreRepoSettings = Settings.builder() + .put(node().settings()) + .put("location", OpenSearchIntegTestCase.randomRepoPath(node().settings())) + .build(); + createRepository(client, remoteStoreRepositoryName, remoteStoreRepoSettings); + + logger.info("--> creating a remote store enabled index and indexing documents"); + final String remoteStoreIndexName = "test-rs-idx"; + Settings indexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepositoryName); + createIndex(remoteStoreIndexName, indexSettings); + indexDocuments(client, remoteStoreIndexName); + + logger.info("--> create remote index shallow snapshot"); + Settings snapshotRepoSettingsForShallowCopy = Settings.builder() + .put(snapshotRepoSettings) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), Boolean.TRUE) + .build(); + updateRepository(client, snapshotRepositoryName, snapshotRepoSettingsForShallowCopy); + + CreateSnapshotResponse createSnapshotResponse = client.admin() + .cluster() + .prepareCreateSnapshot(snapshotRepositoryName, "test-snap-2") + .setWaitForCompletion(true) + .setIndices(remoteStoreIndexName) + .get(); + final SnapshotId snapshotId = createSnapshotResponse.getSnapshotInfo().snapshotId(); + + String[] lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); + assert (lockFiles.length == 1) : "there should be only one lock file, but found " + Arrays.toString(lockFiles); + assert lockFiles[0].endsWith(snapshotId.getUUID() + ".lock"); + + final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class); + final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(snapshotRepositoryName); + RepositoryData repositoryData = OpenSearchBlobStoreRepositoryIntegTestCase.getRepositoryData(repository); + IndexSettings indexSetting = getIndexSettings(remoteStoreIndexName); + IndexId indexId = repositoryData.resolveIndexId(remoteStoreIndexName); + RemoteStoreShardShallowCopySnapshot shardShallowCopySnapshot = repository.getRemoteStoreShallowCopyShardMetadata( + snapshotId, + indexId, + new ShardId(remoteStoreIndexName, indexSetting.getUUID(), 0) + ); + assertEquals(shardShallowCopySnapshot.getRemoteStoreRepository(), remoteStoreRepositoryName); + assertEquals(shardShallowCopySnapshot.getIndexUUID(), indexSetting.getUUID()); + assertEquals(shardShallowCopySnapshot.getRepositoryBasePath(), ""); + } + + // Validate Scenario remoteStoreShallowCopy Snapshot -> remoteStoreShallowCopy Snapshot + // -> remoteStoreShallowCopy Snapshot -> normal snapshot + public void testRetrieveShallowCopySnapshotCase2() throws IOException { + FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); + final Client client = client(); + final String snapshotRepositoryName = "test-repo"; + final String remoteStoreRepositoryName = "test-rs-repo"; + + logger.info("--> creating snapshot repository"); + Settings snapshotRepoSettings = Settings.builder() + .put(node().settings()) + .put("location", OpenSearchIntegTestCase.randomRepoPath(node().settings())) + .build(); + createRepository(client, snapshotRepositoryName, snapshotRepoSettings); + + GetRepositoriesResponse updatedGetRepositoriesResponse = client.admin() + .cluster() + .prepareGetRepositories(snapshotRepositoryName) + .get(); + + RepositoryMetadata updatedRepositoryMetadata = updatedGetRepositoriesResponse.repositories().get(0); + + assertFalse(updatedRepositoryMetadata.settings().getAsBoolean(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), false)); + + logger.info("--> creating remote store repository"); + createRepository(client, remoteStoreRepositoryName); + + logger.info("--> creating an index and indexing documents"); + final String indexName = "test-idx"; + createIndex(indexName); + ensureGreen(); + indexDocuments(client, indexName); + + logger.info("--> creating a remote store enabled index and indexing documents"); + final String remoteStoreIndexName = "test-rs-idx"; + Settings indexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepositoryName); + createIndex(remoteStoreIndexName, indexSettings); + indexDocuments(client, remoteStoreIndexName); + + logger.info("--> create first remote index shallow snapshot"); + + Settings snapshotRepoSettingsForShallowCopy = Settings.builder() + .put(snapshotRepoSettings) + .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) + .build(); + updateRepository(client, snapshotRepositoryName, snapshotRepoSettingsForShallowCopy); + + updatedGetRepositoriesResponse = client.admin().cluster().prepareGetRepositories(snapshotRepositoryName).get(); + + updatedRepositoryMetadata = updatedGetRepositoriesResponse.repositories().get(0); + + assertTrue(updatedRepositoryMetadata.settings().getAsBoolean(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), false)); + + CreateSnapshotResponse createSnapshotResponse = client.admin() + .cluster() + .prepareCreateSnapshot(snapshotRepositoryName, "test-snap-1") + .setWaitForCompletion(true) + .setIndices(indexName, remoteStoreIndexName) + .get(); + final SnapshotId snapshotId1 = createSnapshotResponse.getSnapshotInfo().snapshotId(); + + String[] lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); + assert (lockFiles.length == 1) : "lock files are " + Arrays.toString(lockFiles); + assert lockFiles[0].endsWith(snapshotId1.getUUID() + ".lock"); + + logger.info("--> create second remote index shallow snapshot"); + createSnapshotResponse = client.admin() + .cluster() + .prepareCreateSnapshot(snapshotRepositoryName, "test-snap-2") + .setWaitForCompletion(true) + .setIndices(indexName, remoteStoreIndexName) + .get(); + final SnapshotId snapshotId2 = createSnapshotResponse.getSnapshotInfo().snapshotId(); + + lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); + assert (lockFiles.length == 2) : "lock files are " + Arrays.toString(lockFiles); + List shallowCopySnapshotIDs = Arrays.asList(snapshotId1, snapshotId2); + for (SnapshotId snapshotId : shallowCopySnapshotIDs) { + assert lockFiles[0].contains(snapshotId.getUUID()) || lockFiles[1].contains(snapshotId.getUUID()); + } + logger.info("--> create third remote index shallow snapshot"); + createSnapshotResponse = client.admin() + .cluster() + .prepareCreateSnapshot(snapshotRepositoryName, "test-snap-3") + .setWaitForCompletion(true) + .setIndices(indexName, remoteStoreIndexName) + .get(); + final SnapshotId snapshotId3 = createSnapshotResponse.getSnapshotInfo().snapshotId(); + + lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); + assert (lockFiles.length == 3); + shallowCopySnapshotIDs = Arrays.asList(snapshotId1, snapshotId2, snapshotId3); + for (SnapshotId snapshotId : shallowCopySnapshotIDs) { + assert lockFiles[0].contains(snapshotId.getUUID()) + || lockFiles[1].contains(snapshotId.getUUID()) + || lockFiles[2].contains(snapshotId.getUUID()); + } + logger.info("--> create normal snapshot"); + createRepository(client, snapshotRepositoryName, snapshotRepoSettings); + createSnapshotResponse = client.admin() + .cluster() + .prepareCreateSnapshot(snapshotRepositoryName, "test-snap-4") + .setWaitForCompletion(true) + .setIndices(indexName, remoteStoreIndexName) + .get(); + final SnapshotId snapshotId4 = createSnapshotResponse.getSnapshotInfo().snapshotId(); + + lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); + assert (lockFiles.length == 3) : "lock files are " + Arrays.toString(lockFiles); + shallowCopySnapshotIDs = Arrays.asList(snapshotId1, snapshotId2, snapshotId3); + for (SnapshotId snapshotId : shallowCopySnapshotIDs) { + assert lockFiles[0].contains(snapshotId.getUUID()) + || lockFiles[1].contains(snapshotId.getUUID()) + || lockFiles[2].contains(snapshotId.getUUID()); + } + + logger.info("--> make sure the node's repository can resolve the snapshots"); + final List originalSnapshots = Arrays.asList(snapshotId1, snapshotId2, snapshotId3, snapshotId4); + + final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class); + final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(snapshotRepositoryName); + List snapshotIds = OpenSearchBlobStoreRepositoryIntegTestCase.getRepositoryData(repository) + .getSnapshotIds() + .stream() + .sorted((s1, s2) -> s1.getName().compareTo(s2.getName())) + .collect(Collectors.toList()); + assertThat(snapshotIds, equalTo(originalSnapshots)); + } + +} diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java index 893631ce4b564..f1253e377c819 100644 --- a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java @@ -33,31 +33,19 @@ package org.opensearch.repositories.blobstore; import org.opensearch.Version; -import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; -import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.UUIDs; -import org.opensearch.common.blobstore.BlobContainer; -import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.settings.Settings; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.common.util.FeatureFlags; -import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; -import org.opensearch.index.IndexModule; -import org.opensearch.index.IndexService; -import org.opensearch.index.IndexSettings; -import org.opensearch.index.snapshots.blobstore.RemoteStoreShardShallowCopySnapshot; -import org.opensearch.index.store.RemoteBufferedOutputDirectory; -import org.opensearch.indices.IndicesService; import org.opensearch.indices.recovery.RecoverySettings; -import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.plugins.Plugin; import org.opensearch.plugins.RepositoryPlugin; import org.opensearch.repositories.IndexId; @@ -69,11 +57,8 @@ import org.opensearch.repositories.fs.FsRepository; import org.opensearch.snapshots.SnapshotId; import org.opensearch.snapshots.SnapshotState; -import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.OpenSearchIntegTestCase; -import org.opensearch.test.OpenSearchSingleNodeTestCase; -import java.io.IOException; import java.nio.file.Path; import java.util.Arrays; import java.util.Collection; @@ -90,7 +75,7 @@ /** * Tests for the {@link BlobStoreRepository} and its subclasses. */ -public class BlobStoreRepositoryTests extends OpenSearchSingleNodeTestCase { +public class BlobStoreRepositoryTests extends BlobStoreRepositoryHelperTests { static final String REPO_TYPE = "fsLike"; @@ -181,367 +166,8 @@ public void testRetrieveSnapshots() throws Exception { assertThat(snapshotIds, equalTo(originalSnapshots)); } - private void createRepository(Client client, String repoName) { - AcknowledgedResponse putRepositoryResponse = client.admin() - .cluster() - .preparePutRepository(repoName) - .setType(REPO_TYPE) - .setSettings( - Settings.builder().put(node().settings()).put("location", OpenSearchIntegTestCase.randomRepoPath(node().settings())) - ) - .get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); - } - - private void createRepository(Client client, String repoName, Settings repoSettings) { - AcknowledgedResponse putRepositoryResponse = client.admin() - .cluster() - .preparePutRepository(repoName) - .setType(REPO_TYPE) - .setSettings(repoSettings) - .get(); - assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); - } - - private void updateRepository(Client client, String repoName, Settings repoSettings) { - createRepository(client, repoName, repoSettings); - } - - private Settings getRemoteStoreBackedIndexSettings(String remoteStoreRepo) { - return Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "1") - .put("index.refresh_interval", "300s") - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1") - .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.FS.getSettingsKey()) - .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), false) - .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) - .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) - .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, remoteStoreRepo) - .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, remoteStoreRepo) - .build(); - } - - private void indexDocuments(Client client, String indexName) { - int numDocs = randomIntBetween(10, 20); - for (int i = 0; i < numDocs; i++) { - String id = Integer.toString(i); - client.prepareIndex(indexName).setId(id).setSource("text", "sometext").get(); - } - client.admin().indices().prepareFlush(indexName).get(); - } - - private String[] getLockFilesInRemoteStore(String remoteStoreIndex, String remoteStoreRepository) throws IOException { - String indexUUID = client().admin() - .indices() - .prepareGetSettings(remoteStoreIndex) - .get() - .getSetting(remoteStoreIndex, IndexMetadata.SETTING_INDEX_UUID); - final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class); - final BlobStoreRepository remoteStorerepository = (BlobStoreRepository) repositoriesService.repository(remoteStoreRepository); - BlobPath shardLevelBlobPath = remoteStorerepository.basePath().add(indexUUID).add("0").add("segments").add("lock_files"); - BlobContainer blobContainer = remoteStorerepository.blobStore().blobContainer(shardLevelBlobPath); - try (RemoteBufferedOutputDirectory lockDirectory = new RemoteBufferedOutputDirectory(blobContainer)) { - return Arrays.stream(lockDirectory.listAll()).filter(lock -> lock.endsWith(".lock")).toArray(String[]::new); - } - } - - // Validate Scenario Normal Snapshot -> remoteStoreShallowCopy Snapshot -> normal Snapshot - public void testRetrieveShallowCopySnapshotCase1() throws IOException { - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - final Client client = client(); - final String snapshotRepositoryName = "test-repo"; - final String remoteStoreRepositoryName = "test-rs-repo"; - - logger.info("--> creating snapshot repository"); - - Settings snapshotRepoSettings = Settings.builder() - .put(node().settings()) - .put("location", OpenSearchIntegTestCase.randomRepoPath(node().settings())) - .build(); - createRepository(client, snapshotRepositoryName, snapshotRepoSettings); - - logger.info("--> creating remote store repository"); - Settings remoteStoreRepoSettings = Settings.builder() - .put(node().settings()) - .put("location", OpenSearchIntegTestCase.randomRepoPath(node().settings())) - .build(); - createRepository(client, remoteStoreRepositoryName, remoteStoreRepoSettings); - - logger.info("--> creating an index and indexing documents"); - final String indexName = "test-idx"; - createIndex(indexName); - ensureGreen(); - indexDocuments(client, indexName); - - logger.info("--> creating a remote store enabled index and indexing documents"); - final String remoteStoreIndexName = "test-rs-idx"; - Settings indexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepositoryName); - createIndex(remoteStoreIndexName, indexSettings); - indexDocuments(client, remoteStoreIndexName); - - logger.info("--> create first snapshot"); - CreateSnapshotResponse createSnapshotResponse = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepositoryName, "test-snap-1") - .setWaitForCompletion(true) - .setIndices(indexName, remoteStoreIndexName) - .get(); - final SnapshotId snapshotId1 = createSnapshotResponse.getSnapshotInfo().snapshotId(); - - String[] lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); - assert (lockFiles.length == 0) : "there should be no lock files present in directory, but found " + Arrays.toString(lockFiles); - logger.info("--> create remote index shallow snapshot"); - Settings snapshotRepoSettingsForShallowCopy = Settings.builder() - .put(snapshotRepoSettings) - .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), Boolean.TRUE) - .build(); - updateRepository(client, snapshotRepositoryName, snapshotRepoSettingsForShallowCopy); - - createSnapshotResponse = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepositoryName, "test-snap-2") - .setWaitForCompletion(true) - .setIndices(indexName, remoteStoreIndexName) - .get(); - final SnapshotId snapshotId2 = createSnapshotResponse.getSnapshotInfo().snapshotId(); - - lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); - assert (lockFiles.length == 1) : "there should be only one lock file, but found " + Arrays.toString(lockFiles); - assert lockFiles[0].endsWith(snapshotId2.getUUID() + ".lock"); - - logger.info("--> create another normal snapshot"); - updateRepository(client, snapshotRepositoryName, snapshotRepoSettings); - createSnapshotResponse = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepositoryName, "test-snap-3") - .setWaitForCompletion(true) - .setIndices(indexName, remoteStoreIndexName) - .get(); - final SnapshotId snapshotId3 = createSnapshotResponse.getSnapshotInfo().snapshotId(); - - lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); - assert (lockFiles.length == 1) : "there should be only one lock file, but found " + Arrays.toString(lockFiles); - assert lockFiles[0].endsWith(snapshotId2.getUUID() + ".lock"); - - logger.info("--> make sure the node's repository can resolve the snapshots"); - final List originalSnapshots = Arrays.asList(snapshotId1, snapshotId2, snapshotId3); - - final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class); - final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(snapshotRepositoryName); - RepositoryData repositoryData = OpenSearchBlobStoreRepositoryIntegTestCase.getRepositoryData(repository); - IndexId indexId = repositoryData.resolveIndexId(remoteStoreIndexName); - - List snapshotIds = repositoryData.getSnapshotIds() - .stream() - .sorted((s1, s2) -> s1.getName().compareTo(s2.getName())) - .collect(Collectors.toList()); - assertThat(snapshotIds, equalTo(originalSnapshots)); - - // shallow copy shard metadata - getRemoteStoreShallowCopyShardMetadata - RemoteStoreShardShallowCopySnapshot shardShallowCopySnapshot = repository.getRemoteStoreShallowCopyShardMetadata( - snapshotId2, - indexId, - new ShardId(remoteStoreIndexName, indexId.getId(), 0) - ); - assertEquals(shardShallowCopySnapshot.getRemoteStoreRepository(), remoteStoreRepositoryName); - } - - public void testGetRemoteStoreShallowCopyShardMetadata() throws IOException { - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - final Client client = client(); - final String snapshotRepositoryName = "test-repo"; - final String remoteStoreRepositoryName = "test-rs-repo"; - - logger.info("--> creating snapshot repository"); - - Settings snapshotRepoSettings = Settings.builder() - .put(node().settings()) - .put("location", OpenSearchIntegTestCase.randomRepoPath(node().settings())) - .build(); - createRepository(client, snapshotRepositoryName, snapshotRepoSettings); - - logger.info("--> creating remote store repository"); - Settings remoteStoreRepoSettings = Settings.builder() - .put(node().settings()) - .put("location", OpenSearchIntegTestCase.randomRepoPath(node().settings())) - .build(); - createRepository(client, remoteStoreRepositoryName, remoteStoreRepoSettings); - - logger.info("--> creating a remote store enabled index and indexing documents"); - final String remoteStoreIndexName = "test-rs-idx"; - Settings indexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepositoryName); - createIndex(remoteStoreIndexName, indexSettings); - indexDocuments(client, remoteStoreIndexName); - - logger.info("--> create remote index shallow snapshot"); - Settings snapshotRepoSettingsForShallowCopy = Settings.builder() - .put(snapshotRepoSettings) - .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), Boolean.TRUE) - .build(); - updateRepository(client, snapshotRepositoryName, snapshotRepoSettingsForShallowCopy); - - CreateSnapshotResponse createSnapshotResponse = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepositoryName, "test-snap-2") - .setWaitForCompletion(true) - .setIndices(remoteStoreIndexName) - .get(); - final SnapshotId snapshotId = createSnapshotResponse.getSnapshotInfo().snapshotId(); - - String[] lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); - assert (lockFiles.length == 1) : "there should be only one lock file, but found " + Arrays.toString(lockFiles); - assert lockFiles[0].endsWith(snapshotId.getUUID() + ".lock"); - - final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class); - final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(snapshotRepositoryName); - RepositoryData repositoryData = OpenSearchBlobStoreRepositoryIntegTestCase.getRepositoryData(repository); - IndexSettings indexSetting = getIndexSettings(remoteStoreIndexName); - IndexId indexId = repositoryData.resolveIndexId(remoteStoreIndexName); - RemoteStoreShardShallowCopySnapshot shardShallowCopySnapshot = repository.getRemoteStoreShallowCopyShardMetadata( - snapshotId, - indexId, - new ShardId(remoteStoreIndexName, indexSetting.getUUID(), 0) - ); - assertEquals(shardShallowCopySnapshot.getRemoteStoreRepository(), remoteStoreRepositoryName); - assertEquals(shardShallowCopySnapshot.getIndexUUID(), indexSetting.getUUID()); - assertEquals(shardShallowCopySnapshot.getRepositoryBasePath(), ""); - } - - private IndexSettings getIndexSettings(String indexName) { - final IndicesService indicesService = getInstanceFromNode(IndicesService.class); - final IndexService indexService = indicesService.indexService(resolveIndex(indexName)); - return indexService.getIndexSettings(); - } - // Validate Scenario remoteStoreShallowCopy Snapshot -> remoteStoreShallowCopy Snapshot // -> remoteStoreShallowCopy Snapshot -> normal snapshot - public void testRetrieveShallowCopySnapshotCase2() throws IOException { - FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); - final Client client = client(); - final String snapshotRepositoryName = "test-repo"; - final String remoteStoreRepositoryName = "test-rs-repo"; - - logger.info("--> creating snapshot repository"); - Settings snapshotRepoSettings = Settings.builder() - .put(node().settings()) - .put("location", OpenSearchIntegTestCase.randomRepoPath(node().settings())) - .build(); - createRepository(client, snapshotRepositoryName, snapshotRepoSettings); - - GetRepositoriesResponse updatedGetRepositoriesResponse = client.admin() - .cluster() - .prepareGetRepositories(snapshotRepositoryName) - .get(); - - RepositoryMetadata updatedRepositoryMetadata = updatedGetRepositoriesResponse.repositories().get(0); - - assertFalse(updatedRepositoryMetadata.settings().getAsBoolean(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), false)); - - logger.info("--> creating remote store repository"); - createRepository(client, remoteStoreRepositoryName); - - logger.info("--> creating an index and indexing documents"); - final String indexName = "test-idx"; - createIndex(indexName); - ensureGreen(); - indexDocuments(client, indexName); - - logger.info("--> creating a remote store enabled index and indexing documents"); - final String remoteStoreIndexName = "test-rs-idx"; - Settings indexSettings = getRemoteStoreBackedIndexSettings(remoteStoreRepositoryName); - createIndex(remoteStoreIndexName, indexSettings); - indexDocuments(client, remoteStoreIndexName); - - logger.info("--> create first remote index shallow snapshot"); - - Settings snapshotRepoSettingsForShallowCopy = Settings.builder() - .put(snapshotRepoSettings) - .put(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), true) - .build(); - updateRepository(client, snapshotRepositoryName, snapshotRepoSettingsForShallowCopy); - - updatedGetRepositoriesResponse = client.admin().cluster().prepareGetRepositories(snapshotRepositoryName).get(); - - updatedRepositoryMetadata = updatedGetRepositoriesResponse.repositories().get(0); - - assertTrue(updatedRepositoryMetadata.settings().getAsBoolean(BlobStoreRepository.REMOTE_STORE_INDEX_SHALLOW_COPY.getKey(), false)); - - CreateSnapshotResponse createSnapshotResponse = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepositoryName, "test-snap-1") - .setWaitForCompletion(true) - .setIndices(indexName, remoteStoreIndexName) - .get(); - final SnapshotId snapshotId1 = createSnapshotResponse.getSnapshotInfo().snapshotId(); - - String[] lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); - assert (lockFiles.length == 1) : "lock files are " + Arrays.toString(lockFiles); - assert lockFiles[0].endsWith(snapshotId1.getUUID() + ".lock"); - - logger.info("--> create second remote index shallow snapshot"); - createSnapshotResponse = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepositoryName, "test-snap-2") - .setWaitForCompletion(true) - .setIndices(indexName, remoteStoreIndexName) - .get(); - final SnapshotId snapshotId2 = createSnapshotResponse.getSnapshotInfo().snapshotId(); - - lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); - assert (lockFiles.length == 2) : "lock files are " + Arrays.toString(lockFiles); - List shallowCopySnapshotIDs = Arrays.asList(snapshotId1, snapshotId2); - for (SnapshotId snapshotId : shallowCopySnapshotIDs) { - assert lockFiles[0].contains(snapshotId.getUUID()) || lockFiles[1].contains(snapshotId.getUUID()); - } - logger.info("--> create third remote index shallow snapshot"); - createSnapshotResponse = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepositoryName, "test-snap-3") - .setWaitForCompletion(true) - .setIndices(indexName, remoteStoreIndexName) - .get(); - final SnapshotId snapshotId3 = createSnapshotResponse.getSnapshotInfo().snapshotId(); - - lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); - assert (lockFiles.length == 3); - shallowCopySnapshotIDs = Arrays.asList(snapshotId1, snapshotId2, snapshotId3); - for (SnapshotId snapshotId : shallowCopySnapshotIDs) { - assert lockFiles[0].contains(snapshotId.getUUID()) - || lockFiles[1].contains(snapshotId.getUUID()) - || lockFiles[2].contains(snapshotId.getUUID()); - } - logger.info("--> create normal snapshot"); - createRepository(client, snapshotRepositoryName, snapshotRepoSettings); - createSnapshotResponse = client.admin() - .cluster() - .prepareCreateSnapshot(snapshotRepositoryName, "test-snap-4") - .setWaitForCompletion(true) - .setIndices(indexName, remoteStoreIndexName) - .get(); - final SnapshotId snapshotId4 = createSnapshotResponse.getSnapshotInfo().snapshotId(); - - lockFiles = getLockFilesInRemoteStore(remoteStoreIndexName, remoteStoreRepositoryName); - assert (lockFiles.length == 3) : "lock files are " + Arrays.toString(lockFiles); - shallowCopySnapshotIDs = Arrays.asList(snapshotId1, snapshotId2, snapshotId3); - for (SnapshotId snapshotId : shallowCopySnapshotIDs) { - assert lockFiles[0].contains(snapshotId.getUUID()) - || lockFiles[1].contains(snapshotId.getUUID()) - || lockFiles[2].contains(snapshotId.getUUID()); - } - - logger.info("--> make sure the node's repository can resolve the snapshots"); - final List originalSnapshots = Arrays.asList(snapshotId1, snapshotId2, snapshotId3, snapshotId4); - - final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class); - final BlobStoreRepository repository = (BlobStoreRepository) repositoriesService.repository(snapshotRepositoryName); - List snapshotIds = OpenSearchBlobStoreRepositoryIntegTestCase.getRepositoryData(repository) - .getSnapshotIds() - .stream() - .sorted((s1, s2) -> s1.getName().compareTo(s2.getName())) - .collect(Collectors.toList()); - assertThat(snapshotIds, equalTo(originalSnapshots)); - } - public void testReadAndWriteSnapshotsThroughIndexFile() throws Exception { final BlobStoreRepository repository = setupRepo(); final long pendingGeneration = repository.metadata.pendingGeneration(); diff --git a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java index a169fb871ca53..2c6f4f7b15e5d 100644 --- a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -523,7 +523,7 @@ protected void indexRandomDocs(String index, int numdocs) throws InterruptedExce assertDocCount(index, numdocs); } - protected Settings getRemoteStoreBackedIndexSettings(String remoteStoreRepo) { + protected Settings getRemoteStoreBackedIndexSettings() { return Settings.builder() .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "1") .put("index.refresh_interval", "300s") @@ -531,9 +531,6 @@ protected Settings getRemoteStoreBackedIndexSettings(String remoteStoreRepo) { .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.FS.getSettingsKey()) .put(IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING.getKey(), false) .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) - .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) - .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, remoteStoreRepo) - .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, remoteStoreRepo) .build(); } From f2ca769143d1c589806d5f9adc65e0545eedd753 Mon Sep 17 00:00:00 2001 From: Ticheng Lin <51488860+ticheng-aws@users.noreply.github.com> Date: Tue, 1 Aug 2023 07:26:37 -0700 Subject: [PATCH 03/24] Update version guards to deal for aggregation profiler with concurrent aggregation (#9017) Signed-off-by: Ticheng Lin --- .../java/org/opensearch/search/profile/ProfileResult.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/opensearch/search/profile/ProfileResult.java b/server/src/main/java/org/opensearch/search/profile/ProfileResult.java index d96db1d2dd8da..62b247735dfdb 100644 --- a/server/src/main/java/org/opensearch/search/profile/ProfileResult.java +++ b/server/src/main/java/org/opensearch/search/profile/ProfileResult.java @@ -134,7 +134,7 @@ public ProfileResult(StreamInput in) throws IOException { breakdown = in.readMap(StreamInput::readString, StreamInput::readLong); debug = in.readMap(StreamInput::readString, StreamInput::readGenericValue); children = in.readList(ProfileResult::new); - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_10_0)) { this.maxSliceNodeTime = in.readOptionalLong(); this.minSliceNodeTime = in.readOptionalLong(); this.avgSliceNodeTime = in.readOptionalLong(); @@ -153,7 +153,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeMap(breakdown, StreamOutput::writeString, StreamOutput::writeLong); out.writeMap(debug, StreamOutput::writeString, StreamOutput::writeGenericValue); out.writeList(children); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { out.writeOptionalLong(maxSliceNodeTime); out.writeOptionalLong(minSliceNodeTime); out.writeOptionalLong(avgSliceNodeTime); From 0596bd4444dc2893353af95a8edbe373a5214ffe Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Aug 2023 11:27:23 -0400 Subject: [PATCH 04/24] Bump com.maxmind.geoip2:geoip2 from 4.0.1 to 4.1.0 in /modules/ingest-geoip (#8998) * Bump com.maxmind.geoip2:geoip2 in /modules/ingest-geoip Bumps [com.maxmind.geoip2:geoip2](https://github.com/maxmind/GeoIP2-java) from 4.0.1 to 4.1.0. - [Release notes](https://github.com/maxmind/GeoIP2-java/releases) - [Changelog](https://github.com/maxmind/GeoIP2-java/blob/main/CHANGELOG.md) - [Commits](https://github.com/maxmind/GeoIP2-java/compare/v4.0.1...v4.1.0) --- updated-dependencies: - dependency-name: com.maxmind.geoip2:geoip2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Update changelog Signed-off-by: dependabot[bot] --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- CHANGELOG.md | 3 ++- modules/ingest-geoip/build.gradle | 2 +- modules/ingest-geoip/licenses/geoip2-4.0.1.jar.sha1 | 1 - modules/ingest-geoip/licenses/geoip2-4.1.0.jar.sha1 | 1 + 4 files changed, 4 insertions(+), 3 deletions(-) delete mode 100644 modules/ingest-geoip/licenses/geoip2-4.0.1.jar.sha1 create mode 100644 modules/ingest-geoip/licenses/geoip2-4.1.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index cf101317bf81d..28080050b2e22 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -96,6 +96,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.gradle.enterprise` from 3.13.3 to 3.14.1 ([#8996](https://github.com/opensearch-project/OpenSearch/pull/8996)) - Bump `org.apache.commons:commons-lang3` from 3.12.0 to 3.13.0 ([#8995](https://github.com/opensearch-project/OpenSearch/pull/8995)) - Bump `com.google.cloud:google-cloud-core-http` from 2.21.0 to 2.21.1 ([#8999](https://github.com/opensearch-project/OpenSearch/pull/8999)) +- Bump `com.maxmind.geoip2:geoip2` from 4.0.1 to 4.1.0 ([#8998](https://github.com/opensearch-project/OpenSearch/pull/8998)) ### Changed - Perform aggregation postCollection in ContextIndexSearcher after searching leaves ([#8303](https://github.com/opensearch-project/OpenSearch/pull/8303)) @@ -117,4 +118,4 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Security [Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD -[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.10...2.x +[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.10...2.x \ No newline at end of file diff --git a/modules/ingest-geoip/build.gradle b/modules/ingest-geoip/build.gradle index 35b7de8f83164..e126cf37e33a2 100644 --- a/modules/ingest-geoip/build.gradle +++ b/modules/ingest-geoip/build.gradle @@ -39,7 +39,7 @@ opensearchplugin { } dependencies { - api('com.maxmind.geoip2:geoip2:4.0.1') + api('com.maxmind.geoip2:geoip2:4.1.0') // geoip2 dependencies: api('com.maxmind.db:maxmind-db:3.0.0') api("com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}") diff --git a/modules/ingest-geoip/licenses/geoip2-4.0.1.jar.sha1 b/modules/ingest-geoip/licenses/geoip2-4.0.1.jar.sha1 deleted file mode 100644 index 0722ebf08e137..0000000000000 --- a/modules/ingest-geoip/licenses/geoip2-4.0.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f2a9b0ebd91b73a409a526b4d939f5ab8f4a1a87 \ No newline at end of file diff --git a/modules/ingest-geoip/licenses/geoip2-4.1.0.jar.sha1 b/modules/ingest-geoip/licenses/geoip2-4.1.0.jar.sha1 new file mode 100644 index 0000000000000..0d124299e4cfb --- /dev/null +++ b/modules/ingest-geoip/licenses/geoip2-4.1.0.jar.sha1 @@ -0,0 +1 @@ +b6b356cc91863409ba3475a148ee11a3a6d6aa4b \ No newline at end of file From 86ce02e2edbbe46797fc95f0cbc7bac338ba2a5a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Aug 2023 13:18:09 -0400 Subject: [PATCH 05/24] Bump org.apache.commons:commons-lang3 from 3.12.0 to 3.13.0 in /plugins/repository-hdfs (#8997) * Bump org.apache.commons:commons-lang3 in /plugins/repository-hdfs Bumps org.apache.commons:commons-lang3 from 3.12.0 to 3.13.0. --- updated-dependencies: - dependency-name: org.apache.commons:commons-lang3 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Add CHANGELOG.md entry Signed-off-by: Andriy Redko --------- Signed-off-by: dependabot[bot] Signed-off-by: Andriy Redko Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Co-authored-by: Andriy Redko --- CHANGELOG.md | 3 ++- plugins/repository-hdfs/build.gradle | 2 +- plugins/repository-hdfs/licenses/commons-lang3-3.12.0.jar.sha1 | 1 - plugins/repository-hdfs/licenses/commons-lang3-3.13.0.jar.sha1 | 1 + 4 files changed, 4 insertions(+), 3 deletions(-) delete mode 100644 plugins/repository-hdfs/licenses/commons-lang3-3.12.0.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/commons-lang3-3.13.0.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index 28080050b2e22..ddedd5d59ffb2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -97,6 +97,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `org.apache.commons:commons-lang3` from 3.12.0 to 3.13.0 ([#8995](https://github.com/opensearch-project/OpenSearch/pull/8995)) - Bump `com.google.cloud:google-cloud-core-http` from 2.21.0 to 2.21.1 ([#8999](https://github.com/opensearch-project/OpenSearch/pull/8999)) - Bump `com.maxmind.geoip2:geoip2` from 4.0.1 to 4.1.0 ([#8998](https://github.com/opensearch-project/OpenSearch/pull/8998)) +- Bump `org.apache.commons:commons-lang3` from 3.12.0 to 3.13.0 in /plugins/repository-hdfs ([#8997](https://github.com/opensearch-project/OpenSearch/pull/8997)) ### Changed - Perform aggregation postCollection in ContextIndexSearcher after searching leaves ([#8303](https://github.com/opensearch-project/OpenSearch/pull/8303)) @@ -118,4 +119,4 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Security [Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.x...HEAD -[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.10...2.x \ No newline at end of file +[Unreleased 2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.10...2.x diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 6626bfccc6662..1fdb3d2fb41e2 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -76,7 +76,7 @@ dependencies { api 'org.apache.commons:commons-compress:1.23.0' api 'org.apache.commons:commons-configuration2:2.9.0' api 'commons-io:commons-io:2.13.0' - api 'org.apache.commons:commons-lang3:3.12.0' + api 'org.apache.commons:commons-lang3:3.13.0' implementation 'com.google.re2j:re2j:1.7' api 'javax.servlet:servlet-api:2.5' api "org.slf4j:slf4j-api:${versions.slf4j}" diff --git a/plugins/repository-hdfs/licenses/commons-lang3-3.12.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-lang3-3.12.0.jar.sha1 deleted file mode 100644 index 9273d8c01aaba..0000000000000 --- a/plugins/repository-hdfs/licenses/commons-lang3-3.12.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c6842c86792ff03b9f1d1fe2aab8dc23aa6c6f0e \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/commons-lang3-3.13.0.jar.sha1 b/plugins/repository-hdfs/licenses/commons-lang3-3.13.0.jar.sha1 new file mode 100644 index 0000000000000..d0c2f2486ee1f --- /dev/null +++ b/plugins/repository-hdfs/licenses/commons-lang3-3.13.0.jar.sha1 @@ -0,0 +1 @@ +b7263237aa89c1f99b327197c41d0669707a462e \ No newline at end of file From 22893b12add9f12717f42aa9e718ee16ea442114 Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Tue, 1 Aug 2023 12:38:06 -0500 Subject: [PATCH 06/24] [Refactor] Recyclers to Common Library (#9028) This commit refactors the common recylers logic from the server module to the core library to make the base functionality available to concrete implementations across the codebase. This is done to support cloud native or serverless implementations. Until JPM is enabled, the classes remain marked for internal use only. Determining which classes will be exported will dictate if/when these doc labels are switched to API. Signed-off-by: Nicholas Walter Knize --- .../opensearch/common/recycler/AbstractRecycler.java | 0 .../opensearch/common/recycler/AbstractRecyclerC.java | 0 .../common/recycler/ConcurrentDequeRecycler.java | 0 .../org/opensearch/common/recycler/DequeRecycler.java | 0 .../org/opensearch/common/recycler/FilterRecycler.java | 0 .../org/opensearch/common/recycler/NoneRecycler.java | 0 .../java/org/opensearch/common/recycler/Recycler.java | 0 .../java/org/opensearch/common/recycler/Recyclers.java | 4 +++- .../org/opensearch/common/recycler/package-info.java | 10 ++++++++++ .../common/util/concurrent/ConcurrentCollections.java | 0 .../common/util/concurrent/ConcurrentHashMapLong.java | 0 .../common/util/concurrent/ConcurrentMapLong.java | 0 .../common/recycler/AbstractRecyclerTestCase.java | 0 .../common/recycler/ConcurrentRecyclerTests.java | 0 .../common/recycler/LockedRecyclerTests.java | 0 .../opensearch/common/recycler/NoneRecyclerTests.java | 0 .../opensearch/common/recycler/QueueRecyclerTests.java | 0 17 files changed, 13 insertions(+), 1 deletion(-) rename {server => libs/common}/src/main/java/org/opensearch/common/recycler/AbstractRecycler.java (100%) rename {server => libs/common}/src/main/java/org/opensearch/common/recycler/AbstractRecyclerC.java (100%) rename {server => libs/common}/src/main/java/org/opensearch/common/recycler/ConcurrentDequeRecycler.java (100%) rename {server => libs/common}/src/main/java/org/opensearch/common/recycler/DequeRecycler.java (100%) rename {server => libs/common}/src/main/java/org/opensearch/common/recycler/FilterRecycler.java (100%) rename {server => libs/common}/src/main/java/org/opensearch/common/recycler/NoneRecycler.java (100%) rename {server => libs/common}/src/main/java/org/opensearch/common/recycler/Recycler.java (100%) rename {server => libs/common}/src/main/java/org/opensearch/common/recycler/Recyclers.java (98%) create mode 100644 libs/common/src/main/java/org/opensearch/common/recycler/package-info.java rename {server => libs/common}/src/main/java/org/opensearch/common/util/concurrent/ConcurrentCollections.java (100%) rename {server => libs/common}/src/main/java/org/opensearch/common/util/concurrent/ConcurrentHashMapLong.java (100%) rename {server => libs/common}/src/main/java/org/opensearch/common/util/concurrent/ConcurrentMapLong.java (100%) rename {server => libs/common}/src/test/java/org/opensearch/common/recycler/AbstractRecyclerTestCase.java (100%) rename {server => libs/common}/src/test/java/org/opensearch/common/recycler/ConcurrentRecyclerTests.java (100%) rename {server => libs/common}/src/test/java/org/opensearch/common/recycler/LockedRecyclerTests.java (100%) rename {server => libs/common}/src/test/java/org/opensearch/common/recycler/NoneRecyclerTests.java (100%) rename {server => libs/common}/src/test/java/org/opensearch/common/recycler/QueueRecyclerTests.java (100%) diff --git a/server/src/main/java/org/opensearch/common/recycler/AbstractRecycler.java b/libs/common/src/main/java/org/opensearch/common/recycler/AbstractRecycler.java similarity index 100% rename from server/src/main/java/org/opensearch/common/recycler/AbstractRecycler.java rename to libs/common/src/main/java/org/opensearch/common/recycler/AbstractRecycler.java diff --git a/server/src/main/java/org/opensearch/common/recycler/AbstractRecyclerC.java b/libs/common/src/main/java/org/opensearch/common/recycler/AbstractRecyclerC.java similarity index 100% rename from server/src/main/java/org/opensearch/common/recycler/AbstractRecyclerC.java rename to libs/common/src/main/java/org/opensearch/common/recycler/AbstractRecyclerC.java diff --git a/server/src/main/java/org/opensearch/common/recycler/ConcurrentDequeRecycler.java b/libs/common/src/main/java/org/opensearch/common/recycler/ConcurrentDequeRecycler.java similarity index 100% rename from server/src/main/java/org/opensearch/common/recycler/ConcurrentDequeRecycler.java rename to libs/common/src/main/java/org/opensearch/common/recycler/ConcurrentDequeRecycler.java diff --git a/server/src/main/java/org/opensearch/common/recycler/DequeRecycler.java b/libs/common/src/main/java/org/opensearch/common/recycler/DequeRecycler.java similarity index 100% rename from server/src/main/java/org/opensearch/common/recycler/DequeRecycler.java rename to libs/common/src/main/java/org/opensearch/common/recycler/DequeRecycler.java diff --git a/server/src/main/java/org/opensearch/common/recycler/FilterRecycler.java b/libs/common/src/main/java/org/opensearch/common/recycler/FilterRecycler.java similarity index 100% rename from server/src/main/java/org/opensearch/common/recycler/FilterRecycler.java rename to libs/common/src/main/java/org/opensearch/common/recycler/FilterRecycler.java diff --git a/server/src/main/java/org/opensearch/common/recycler/NoneRecycler.java b/libs/common/src/main/java/org/opensearch/common/recycler/NoneRecycler.java similarity index 100% rename from server/src/main/java/org/opensearch/common/recycler/NoneRecycler.java rename to libs/common/src/main/java/org/opensearch/common/recycler/NoneRecycler.java diff --git a/server/src/main/java/org/opensearch/common/recycler/Recycler.java b/libs/common/src/main/java/org/opensearch/common/recycler/Recycler.java similarity index 100% rename from server/src/main/java/org/opensearch/common/recycler/Recycler.java rename to libs/common/src/main/java/org/opensearch/common/recycler/Recycler.java diff --git a/server/src/main/java/org/opensearch/common/recycler/Recyclers.java b/libs/common/src/main/java/org/opensearch/common/recycler/Recyclers.java similarity index 98% rename from server/src/main/java/org/opensearch/common/recycler/Recyclers.java rename to libs/common/src/main/java/org/opensearch/common/recycler/Recyclers.java index 4cbb80509d6a1..52587144369f1 100644 --- a/server/src/main/java/org/opensearch/common/recycler/Recyclers.java +++ b/libs/common/src/main/java/org/opensearch/common/recycler/Recyclers.java @@ -75,6 +75,8 @@ public static Recycler.Factory dequeFactory(final Recycler.C c, final /** * Wrap the provided recycler so that calls to {@link Recycler#obtain()} and {@link Recycler.V#close()} are protected by * a lock. + * + * @opensearch.internal */ public static Recycler locked(final Recycler recycler) { return new FilterRecycler() { @@ -140,7 +142,7 @@ public static Recycler concurrent(final Recycler.Factory factory, fina private final Recycler[] recyclers; { - @SuppressWarnings("unchecked") + @SuppressWarnings({ "rawtypes", "unchecked" }) final Recycler[] recyclers = new Recycler[concurrencyLevel]; this.recyclers = recyclers; for (int i = 0; i < concurrencyLevel; ++i) { diff --git a/libs/common/src/main/java/org/opensearch/common/recycler/package-info.java b/libs/common/src/main/java/org/opensearch/common/recycler/package-info.java new file mode 100644 index 0000000000000..fec3c5d5e52d3 --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/recycler/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Common Recycler functionality for recycling objects */ +package org.opensearch.common.recycler; diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ConcurrentCollections.java b/libs/common/src/main/java/org/opensearch/common/util/concurrent/ConcurrentCollections.java similarity index 100% rename from server/src/main/java/org/opensearch/common/util/concurrent/ConcurrentCollections.java rename to libs/common/src/main/java/org/opensearch/common/util/concurrent/ConcurrentCollections.java diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ConcurrentHashMapLong.java b/libs/common/src/main/java/org/opensearch/common/util/concurrent/ConcurrentHashMapLong.java similarity index 100% rename from server/src/main/java/org/opensearch/common/util/concurrent/ConcurrentHashMapLong.java rename to libs/common/src/main/java/org/opensearch/common/util/concurrent/ConcurrentHashMapLong.java diff --git a/server/src/main/java/org/opensearch/common/util/concurrent/ConcurrentMapLong.java b/libs/common/src/main/java/org/opensearch/common/util/concurrent/ConcurrentMapLong.java similarity index 100% rename from server/src/main/java/org/opensearch/common/util/concurrent/ConcurrentMapLong.java rename to libs/common/src/main/java/org/opensearch/common/util/concurrent/ConcurrentMapLong.java diff --git a/server/src/test/java/org/opensearch/common/recycler/AbstractRecyclerTestCase.java b/libs/common/src/test/java/org/opensearch/common/recycler/AbstractRecyclerTestCase.java similarity index 100% rename from server/src/test/java/org/opensearch/common/recycler/AbstractRecyclerTestCase.java rename to libs/common/src/test/java/org/opensearch/common/recycler/AbstractRecyclerTestCase.java diff --git a/server/src/test/java/org/opensearch/common/recycler/ConcurrentRecyclerTests.java b/libs/common/src/test/java/org/opensearch/common/recycler/ConcurrentRecyclerTests.java similarity index 100% rename from server/src/test/java/org/opensearch/common/recycler/ConcurrentRecyclerTests.java rename to libs/common/src/test/java/org/opensearch/common/recycler/ConcurrentRecyclerTests.java diff --git a/server/src/test/java/org/opensearch/common/recycler/LockedRecyclerTests.java b/libs/common/src/test/java/org/opensearch/common/recycler/LockedRecyclerTests.java similarity index 100% rename from server/src/test/java/org/opensearch/common/recycler/LockedRecyclerTests.java rename to libs/common/src/test/java/org/opensearch/common/recycler/LockedRecyclerTests.java diff --git a/server/src/test/java/org/opensearch/common/recycler/NoneRecyclerTests.java b/libs/common/src/test/java/org/opensearch/common/recycler/NoneRecyclerTests.java similarity index 100% rename from server/src/test/java/org/opensearch/common/recycler/NoneRecyclerTests.java rename to libs/common/src/test/java/org/opensearch/common/recycler/NoneRecyclerTests.java diff --git a/server/src/test/java/org/opensearch/common/recycler/QueueRecyclerTests.java b/libs/common/src/test/java/org/opensearch/common/recycler/QueueRecyclerTests.java similarity index 100% rename from server/src/test/java/org/opensearch/common/recycler/QueueRecyclerTests.java rename to libs/common/src/test/java/org/opensearch/common/recycler/QueueRecyclerTests.java From b17396a17579a5023f8b3bf1ac2c2d8a48838479 Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Tue, 1 Aug 2023 14:40:35 -0400 Subject: [PATCH 07/24] Bump netty from 4.1.94.Final to 4.1.96.Final (#9030) Signed-off-by: Andriy Redko --- CHANGELOG.md | 1 + buildSrc/version.properties | 2 +- .../licenses/netty-buffer-4.1.94.Final.jar.sha1 | 1 - .../licenses/netty-buffer-4.1.96.Final.jar.sha1 | 1 + .../transport-netty4/licenses/netty-codec-4.1.94.Final.jar.sha1 | 1 - .../transport-netty4/licenses/netty-codec-4.1.96.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.94.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.96.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.94.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.96.Final.jar.sha1 | 1 + .../licenses/netty-common-4.1.94.Final.jar.sha1 | 1 - .../licenses/netty-common-4.1.96.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.94.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.96.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.94.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.96.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.94.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.96.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.94.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.96.Final.jar.sha1 | 1 + .../licenses/netty-codec-dns-4.1.94.Final.jar.sha1 | 1 - .../licenses/netty-codec-dns-4.1.96.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.94.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.96.Final.jar.sha1 | 1 + .../licenses/netty-codec-socks-4.1.94.Final.jar.sha1 | 1 - .../licenses/netty-codec-socks-4.1.96.Final.jar.sha1 | 1 + .../licenses/netty-handler-proxy-4.1.94.Final.jar.sha1 | 1 - .../licenses/netty-handler-proxy-4.1.96.Final.jar.sha1 | 1 + .../licenses/netty-resolver-dns-4.1.94.Final.jar.sha1 | 1 - .../licenses/netty-resolver-dns-4.1.96.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.94.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.96.Final.jar.sha1 | 1 + .../repository-hdfs/licenses/netty-all-4.1.94.Final.jar.sha1 | 1 - .../repository-hdfs/licenses/netty-all-4.1.96.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-buffer-4.1.94.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-buffer-4.1.96.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-codec-4.1.94.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-codec-4.1.96.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.94.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.96.Final.jar.sha1 | 1 + .../licenses/netty-codec-http2-4.1.94.Final.jar.sha1 | 1 - .../licenses/netty-codec-http2-4.1.96.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-common-4.1.94.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-common-4.1.96.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-handler-4.1.94.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-handler-4.1.96.Final.jar.sha1 | 1 + .../repository-s3/licenses/netty-resolver-4.1.94.Final.jar.sha1 | 1 - .../repository-s3/licenses/netty-resolver-4.1.96.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.94.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.96.Final.jar.sha1 | 1 + .../netty-transport-classes-epoll-4.1.94.Final.jar.sha1 | 1 - .../netty-transport-classes-epoll-4.1.96.Final.jar.sha1 | 1 + .../netty-transport-native-unix-common-4.1.94.Final.jar.sha1 | 1 - .../netty-transport-native-unix-common-4.1.96.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-buffer-4.1.94.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-buffer-4.1.96.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-codec-4.1.94.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-codec-4.1.96.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.94.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.96.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-common-4.1.94.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-common-4.1.96.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-handler-4.1.94.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-handler-4.1.96.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-resolver-4.1.94.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-resolver-4.1.96.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.94.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.96.Final.jar.sha1 | 1 + 68 files changed, 35 insertions(+), 34 deletions(-) delete mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.94.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.96.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.94.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.96.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.94.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.96.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http2-4.1.94.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http2-4.1.96.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-common-4.1.94.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-common-4.1.96.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.94.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.96.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.94.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.96.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.94.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.96.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.94.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.96.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.94.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.96.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.94.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.96.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.94.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.96.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.94.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.96.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.94.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.96.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.94.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.96.Final.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.94.Final.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.96.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-buffer-4.1.94.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-buffer-4.1.96.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-4.1.94.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-4.1.96.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-http-4.1.94.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-http-4.1.96.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-codec-http2-4.1.94.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-codec-http2-4.1.96.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-common-4.1.94.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-common-4.1.96.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-handler-4.1.94.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-handler-4.1.96.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-resolver-4.1.94.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-resolver-4.1.96.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-4.1.94.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-4.1.96.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.94.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.96.Final.jar.sha1 delete mode 100644 plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.94.Final.jar.sha1 create mode 100644 plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.96.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.94.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.96.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.94.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.96.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.94.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.96.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-common-4.1.94.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-common-4.1.96.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.94.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.96.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.94.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.96.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.94.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.96.Final.jar.sha1 diff --git a/CHANGELOG.md b/CHANGELOG.md index ddedd5d59ffb2..a37976462b38e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -98,6 +98,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Bump `com.google.cloud:google-cloud-core-http` from 2.21.0 to 2.21.1 ([#8999](https://github.com/opensearch-project/OpenSearch/pull/8999)) - Bump `com.maxmind.geoip2:geoip2` from 4.0.1 to 4.1.0 ([#8998](https://github.com/opensearch-project/OpenSearch/pull/8998)) - Bump `org.apache.commons:commons-lang3` from 3.12.0 to 3.13.0 in /plugins/repository-hdfs ([#8997](https://github.com/opensearch-project/OpenSearch/pull/8997)) +- Bump `netty` from 4.1.94.Final to 4.1.96.Final ([#9030](https://github.com/opensearch-project/OpenSearch/pull/9030)) ### Changed - Perform aggregation postCollection in ContextIndexSearcher after searching leaves ([#8303](https://github.com/opensearch-project/OpenSearch/pull/8303)) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index e4a9293c59b8f..53fcd63944d6a 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -28,7 +28,7 @@ jakarta_annotation = 1.3.5 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.5.0 -netty = 4.1.94.Final +netty = 4.1.96.Final joda = 2.12.2 # client dependencies diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.94.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.94.Final.jar.sha1 deleted file mode 100644 index 05b1c2a4d614e..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -eec248b26f16e888688e5bb37b7eeda76b78d2f7 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.96.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.96.Final.jar.sha1 new file mode 100644 index 0000000000000..7abdb33dc79a2 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.96.Final.jar.sha1 @@ -0,0 +1 @@ +4b80fffbe77485b457bf844289bf1801f61b9e91 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.94.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.94.Final.jar.sha1 deleted file mode 100644 index baa7e25f1ac49..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c70ef20ca338558147887df60f46341bc47f6900 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.96.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.96.Final.jar.sha1 new file mode 100644 index 0000000000000..8fdb32be1de0b --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.96.Final.jar.sha1 @@ -0,0 +1 @@ +9cfe430f8b14e7ba86969d8e1126aa0aae4d18f0 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.94.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.94.Final.jar.sha1 deleted file mode 100644 index 8c018be2565e5..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9e5404764092c1f6305ad5719078f46ab228d587 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.96.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.96.Final.jar.sha1 new file mode 100644 index 0000000000000..dfb0cf39463e2 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.96.Final.jar.sha1 @@ -0,0 +1 @@ +a4d0d95df5026965c454902ef3d6d84b81f89626 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.94.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.94.Final.jar.sha1 deleted file mode 100644 index e73026b412972..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http2-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f651595784d6cca4cbca6a8ad74c48fceed6cea8 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.96.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.96.Final.jar.sha1 new file mode 100644 index 0000000000000..2fc787ee65197 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http2-4.1.96.Final.jar.sha1 @@ -0,0 +1 @@ +cc8baf4ff67c1bcc0cde60bc5c2bb9447d92d9e6 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.94.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.94.Final.jar.sha1 deleted file mode 100644 index b787338551ede..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ad4ecf779ebc794cd351f57792f56ea01387b868 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.96.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.96.Final.jar.sha1 new file mode 100644 index 0000000000000..85b5f52749671 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.96.Final.jar.sha1 @@ -0,0 +1 @@ +d10c167623cbc471753f950846df241d1021655c \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.94.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.94.Final.jar.sha1 deleted file mode 100644 index b08e85ba7adf8..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cd9121ce24d6d3f2898946d04b0ef3ec548b00b4 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.96.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.96.Final.jar.sha1 new file mode 100644 index 0000000000000..fe4f48c68e78b --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.96.Final.jar.sha1 @@ -0,0 +1 @@ +7840d7523d709e02961b647546f9d9dde1699306 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.94.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.94.Final.jar.sha1 deleted file mode 100644 index 4c9e4dda2b852..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e96f649e8e9dcb29a1f8e95328b99c9eb6cf76c2 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.96.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.96.Final.jar.sha1 new file mode 100644 index 0000000000000..9e93f013226cd --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.96.Final.jar.sha1 @@ -0,0 +1 @@ +0e51db5568a881e0f9b013b35617c597dc32f130 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.94.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.94.Final.jar.sha1 deleted file mode 100644 index ed7760b8e15d1..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ec783a737f96991a87b1d5794e2f9eb2024d708a \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.96.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.96.Final.jar.sha1 new file mode 100644 index 0000000000000..707285d3d29c3 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.96.Final.jar.sha1 @@ -0,0 +1 @@ +dbd15ca244be28e1a98ed29b9d755edbfa737e02 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.94.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.94.Final.jar.sha1 deleted file mode 100644 index 43bc960a347a1..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3fa5f9d04b6b782d869d6e0657d896eeadca5866 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.96.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.96.Final.jar.sha1 new file mode 100644 index 0000000000000..e911c47d5ab1a --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.96.Final.jar.sha1 @@ -0,0 +1 @@ +daf8578cade63a01525ee9d70371fa78e6e91094 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.94.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.94.Final.jar.sha1 deleted file mode 100644 index 670bd4c98a044..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9180660dc8479e1594b60b02fc27404af0ea43a6 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.96.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.96.Final.jar.sha1 new file mode 100644 index 0000000000000..42d5e60ce9d45 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.96.Final.jar.sha1 @@ -0,0 +1 @@ +afd90dc0e164be74b4a3e1a899890557fce98567 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.94.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.94.Final.jar.sha1 deleted file mode 100644 index e73026b412972..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f651595784d6cca4cbca6a8ad74c48fceed6cea8 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.96.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.96.Final.jar.sha1 new file mode 100644 index 0000000000000..2fc787ee65197 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.96.Final.jar.sha1 @@ -0,0 +1 @@ +cc8baf4ff67c1bcc0cde60bc5c2bb9447d92d9e6 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.94.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.94.Final.jar.sha1 deleted file mode 100644 index de2c4d00aef09..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b9192c7cda295d75f236a13a0b1f5a008f05d516 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.96.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.96.Final.jar.sha1 new file mode 100644 index 0000000000000..8e959bdac5079 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.96.Final.jar.sha1 @@ -0,0 +1 @@ +f53c52dbddaa4a02a51430405792d3f30a89b147 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.94.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.94.Final.jar.sha1 deleted file mode 100644 index a2db8bece8f6f..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -26ba9d30b8f7b095155b9ac63378d6d9386d85c3 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.96.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.96.Final.jar.sha1 new file mode 100644 index 0000000000000..d410208dada90 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.96.Final.jar.sha1 @@ -0,0 +1 @@ +dcabd63f4aaec2b4cad7588bfdd4cd2c82287e38 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.94.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.94.Final.jar.sha1 deleted file mode 100644 index 2fa927b3b77ba..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -25bbe90e10685ce63c32bd0db56574cffffa28de \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.96.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.96.Final.jar.sha1 new file mode 100644 index 0000000000000..5041cf5473505 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.96.Final.jar.sha1 @@ -0,0 +1 @@ +0095023cc667af76578c9be326a6d54e3e1de52c \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.94.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.94.Final.jar.sha1 deleted file mode 100644 index 43bc960a347a1..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3fa5f9d04b6b782d869d6e0657d896eeadca5866 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.96.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.96.Final.jar.sha1 new file mode 100644 index 0000000000000..e911c47d5ab1a --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.96.Final.jar.sha1 @@ -0,0 +1 @@ +daf8578cade63a01525ee9d70371fa78e6e91094 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.94.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.94.Final.jar.sha1 deleted file mode 100644 index 6766770f61e78..0000000000000 --- a/plugins/repository-hdfs/licenses/netty-all-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2a7df0424eed81818157f22613f36b72487ceb34 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.96.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.96.Final.jar.sha1 new file mode 100644 index 0000000000000..32ced5451cfb6 --- /dev/null +++ b/plugins/repository-hdfs/licenses/netty-all-4.1.96.Final.jar.sha1 @@ -0,0 +1 @@ +2145ec747511965e4a57099767654cf9083ce8a7 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.94.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.94.Final.jar.sha1 deleted file mode 100644 index 05b1c2a4d614e..0000000000000 --- a/plugins/repository-s3/licenses/netty-buffer-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -eec248b26f16e888688e5bb37b7eeda76b78d2f7 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-buffer-4.1.96.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-buffer-4.1.96.Final.jar.sha1 new file mode 100644 index 0000000000000..7abdb33dc79a2 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-buffer-4.1.96.Final.jar.sha1 @@ -0,0 +1 @@ +4b80fffbe77485b457bf844289bf1801f61b9e91 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.94.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.94.Final.jar.sha1 deleted file mode 100644 index baa7e25f1ac49..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c70ef20ca338558147887df60f46341bc47f6900 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-4.1.96.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-4.1.96.Final.jar.sha1 new file mode 100644 index 0000000000000..8fdb32be1de0b --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-4.1.96.Final.jar.sha1 @@ -0,0 +1 @@ +9cfe430f8b14e7ba86969d8e1126aa0aae4d18f0 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.94.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.94.Final.jar.sha1 deleted file mode 100644 index 8c018be2565e5..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9e5404764092c1f6305ad5719078f46ab228d587 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http-4.1.96.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http-4.1.96.Final.jar.sha1 new file mode 100644 index 0000000000000..dfb0cf39463e2 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http-4.1.96.Final.jar.sha1 @@ -0,0 +1 @@ +a4d0d95df5026965c454902ef3d6d84b81f89626 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.94.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.94.Final.jar.sha1 deleted file mode 100644 index e73026b412972..0000000000000 --- a/plugins/repository-s3/licenses/netty-codec-http2-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f651595784d6cca4cbca6a8ad74c48fceed6cea8 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-codec-http2-4.1.96.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-codec-http2-4.1.96.Final.jar.sha1 new file mode 100644 index 0000000000000..2fc787ee65197 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-codec-http2-4.1.96.Final.jar.sha1 @@ -0,0 +1 @@ +cc8baf4ff67c1bcc0cde60bc5c2bb9447d92d9e6 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.94.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.94.Final.jar.sha1 deleted file mode 100644 index b787338551ede..0000000000000 --- a/plugins/repository-s3/licenses/netty-common-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ad4ecf779ebc794cd351f57792f56ea01387b868 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-common-4.1.96.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-common-4.1.96.Final.jar.sha1 new file mode 100644 index 0000000000000..85b5f52749671 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-common-4.1.96.Final.jar.sha1 @@ -0,0 +1 @@ +d10c167623cbc471753f950846df241d1021655c \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.94.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.94.Final.jar.sha1 deleted file mode 100644 index b08e85ba7adf8..0000000000000 --- a/plugins/repository-s3/licenses/netty-handler-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cd9121ce24d6d3f2898946d04b0ef3ec548b00b4 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-handler-4.1.96.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-handler-4.1.96.Final.jar.sha1 new file mode 100644 index 0000000000000..fe4f48c68e78b --- /dev/null +++ b/plugins/repository-s3/licenses/netty-handler-4.1.96.Final.jar.sha1 @@ -0,0 +1 @@ +7840d7523d709e02961b647546f9d9dde1699306 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.94.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.94.Final.jar.sha1 deleted file mode 100644 index 4c9e4dda2b852..0000000000000 --- a/plugins/repository-s3/licenses/netty-resolver-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e96f649e8e9dcb29a1f8e95328b99c9eb6cf76c2 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-resolver-4.1.96.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-resolver-4.1.96.Final.jar.sha1 new file mode 100644 index 0000000000000..9e93f013226cd --- /dev/null +++ b/plugins/repository-s3/licenses/netty-resolver-4.1.96.Final.jar.sha1 @@ -0,0 +1 @@ +0e51db5568a881e0f9b013b35617c597dc32f130 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.94.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.94.Final.jar.sha1 deleted file mode 100644 index ed7760b8e15d1..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ec783a737f96991a87b1d5794e2f9eb2024d708a \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-4.1.96.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-4.1.96.Final.jar.sha1 new file mode 100644 index 0000000000000..707285d3d29c3 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-4.1.96.Final.jar.sha1 @@ -0,0 +1 @@ +dbd15ca244be28e1a98ed29b9d755edbfa737e02 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.94.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.94.Final.jar.sha1 deleted file mode 100644 index 72a392ea2917d..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -240e36cd5c2ffaf655913f8857f2d58b26394679 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.96.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.96.Final.jar.sha1 new file mode 100644 index 0000000000000..58564d9da4b27 --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-classes-epoll-4.1.96.Final.jar.sha1 @@ -0,0 +1 @@ +b0369501645f6e71f89ff7f77b5c5f52510a2e31 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.94.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.94.Final.jar.sha1 deleted file mode 100644 index 43bc960a347a1..0000000000000 --- a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3fa5f9d04b6b782d869d6e0657d896eeadca5866 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.96.Final.jar.sha1 b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.96.Final.jar.sha1 new file mode 100644 index 0000000000000..e911c47d5ab1a --- /dev/null +++ b/plugins/repository-s3/licenses/netty-transport-native-unix-common-4.1.96.Final.jar.sha1 @@ -0,0 +1 @@ +daf8578cade63a01525ee9d70371fa78e6e91094 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.94.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.94.Final.jar.sha1 deleted file mode 100644 index 05b1c2a4d614e..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -eec248b26f16e888688e5bb37b7eeda76b78d2f7 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.96.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.96.Final.jar.sha1 new file mode 100644 index 0000000000000..7abdb33dc79a2 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.96.Final.jar.sha1 @@ -0,0 +1 @@ +4b80fffbe77485b457bf844289bf1801f61b9e91 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.94.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.94.Final.jar.sha1 deleted file mode 100644 index baa7e25f1ac49..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c70ef20ca338558147887df60f46341bc47f6900 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.96.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.96.Final.jar.sha1 new file mode 100644 index 0000000000000..8fdb32be1de0b --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.96.Final.jar.sha1 @@ -0,0 +1 @@ +9cfe430f8b14e7ba86969d8e1126aa0aae4d18f0 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.94.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.94.Final.jar.sha1 deleted file mode 100644 index 8c018be2565e5..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9e5404764092c1f6305ad5719078f46ab228d587 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.96.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.96.Final.jar.sha1 new file mode 100644 index 0000000000000..dfb0cf39463e2 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.96.Final.jar.sha1 @@ -0,0 +1 @@ +a4d0d95df5026965c454902ef3d6d84b81f89626 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.94.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.94.Final.jar.sha1 deleted file mode 100644 index b787338551ede..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ad4ecf779ebc794cd351f57792f56ea01387b868 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.96.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.96.Final.jar.sha1 new file mode 100644 index 0000000000000..85b5f52749671 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.96.Final.jar.sha1 @@ -0,0 +1 @@ +d10c167623cbc471753f950846df241d1021655c \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.94.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.94.Final.jar.sha1 deleted file mode 100644 index b08e85ba7adf8..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cd9121ce24d6d3f2898946d04b0ef3ec548b00b4 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.96.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.96.Final.jar.sha1 new file mode 100644 index 0000000000000..fe4f48c68e78b --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.96.Final.jar.sha1 @@ -0,0 +1 @@ +7840d7523d709e02961b647546f9d9dde1699306 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.94.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.94.Final.jar.sha1 deleted file mode 100644 index 4c9e4dda2b852..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e96f649e8e9dcb29a1f8e95328b99c9eb6cf76c2 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.96.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.96.Final.jar.sha1 new file mode 100644 index 0000000000000..9e93f013226cd --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.96.Final.jar.sha1 @@ -0,0 +1 @@ +0e51db5568a881e0f9b013b35617c597dc32f130 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.94.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.94.Final.jar.sha1 deleted file mode 100644 index ed7760b8e15d1..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.94.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ec783a737f96991a87b1d5794e2f9eb2024d708a \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.96.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.96.Final.jar.sha1 new file mode 100644 index 0000000000000..707285d3d29c3 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.96.Final.jar.sha1 @@ -0,0 +1 @@ +dbd15ca244be28e1a98ed29b9d755edbfa737e02 \ No newline at end of file From cc641eb30ea6cde1708a7d8edac5764ce03d0a20 Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Tue, 1 Aug 2023 15:12:03 -0500 Subject: [PATCH 08/24] [BWC] Change BWC version check for MediaType (#9035) This commit changes the bwc version check for serializing MediaTypes over the transport wire after backporting changes to 2.x. Signed-off-by: Nicholas Walter Knize --- .../admin/cluster/storedscripts/PutStoredScriptRequest.java | 4 ++-- .../main/java/org/opensearch/action/index/IndexRequest.java | 4 ++-- .../java/org/opensearch/action/ingest/PutPipelineRequest.java | 4 ++-- .../org/opensearch/action/ingest/SimulatePipelineRequest.java | 4 ++-- .../opensearch/action/search/PutSearchPipelineRequest.java | 4 ++-- .../org/opensearch/action/termvectors/TermVectorsRequest.java | 4 ++-- .../org/opensearch/extensions/rest/ExtensionRestRequest.java | 4 ++-- .../org/opensearch/index/query/MoreLikeThisQueryBuilder.java | 4 ++-- .../java/org/opensearch/ingest/PipelineConfiguration.java | 4 ++-- .../org/opensearch/search/pipeline/PipelineConfiguration.java | 4 ++-- 10 files changed, 20 insertions(+), 20 deletions(-) diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java index 761373a001ffe..8b328bc3879dd 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/storedscripts/PutStoredScriptRequest.java @@ -68,7 +68,7 @@ public PutStoredScriptRequest(StreamInput in) throws IOException { super(in); id = in.readOptionalString(); content = in.readBytesReference(); - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_10_0)) { mediaType = in.readMediaType(); } else { mediaType = in.readEnum(XContentType.class); @@ -152,7 +152,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeOptionalString(id); out.writeBytesReference(content); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { mediaType.writeTo(out); } else { out.writeEnum((XContentType) mediaType); diff --git a/server/src/main/java/org/opensearch/action/index/IndexRequest.java b/server/src/main/java/org/opensearch/action/index/IndexRequest.java index ac4c8436aab5a..584bee0caaf2e 100644 --- a/server/src/main/java/org/opensearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/opensearch/action/index/IndexRequest.java @@ -159,7 +159,7 @@ public IndexRequest(@Nullable ShardId shardId, StreamInput in) throws IOExceptio isRetry = in.readBoolean(); autoGeneratedTimestamp = in.readLong(); if (in.readBoolean()) { - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_10_0)) { contentType = in.readMediaType(); } else { contentType = in.readEnum(XContentType.class); @@ -670,7 +670,7 @@ private void writeBody(StreamOutput out) throws IOException { out.writeLong(autoGeneratedTimestamp); if (contentType != null) { out.writeBoolean(true); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { contentType.writeTo(out); } else { out.writeEnum((XContentType) contentType); diff --git a/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequest.java b/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequest.java index 7a88f817c70bf..f764e4b23860a 100644 --- a/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequest.java +++ b/server/src/main/java/org/opensearch/action/ingest/PutPipelineRequest.java @@ -70,7 +70,7 @@ public PutPipelineRequest(StreamInput in) throws IOException { super(in); id = in.readString(); source = in.readBytesReference(); - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_10_0)) { mediaType = in.readMediaType(); } else { mediaType = in.readEnum(XContentType.class); @@ -101,7 +101,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(id); out.writeBytesReference(source); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { mediaType.writeTo(out); } else { out.writeEnum((XContentType) mediaType); diff --git a/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequest.java b/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequest.java index 1ac441a1afe64..4837cfdd492b4 100644 --- a/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequest.java +++ b/server/src/main/java/org/opensearch/action/ingest/SimulatePipelineRequest.java @@ -85,7 +85,7 @@ public SimulatePipelineRequest(BytesReference source, MediaType mediaType) { id = in.readOptionalString(); verbose = in.readBoolean(); source = in.readBytesReference(); - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_10_0)) { mediaType = in.readMediaType(); } else { mediaType = in.readEnum(XContentType.class); @@ -127,7 +127,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(id); out.writeBoolean(verbose); out.writeBytesReference(source); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { mediaType.writeTo(out); } else { out.writeEnum((XContentType) mediaType); diff --git a/server/src/main/java/org/opensearch/action/search/PutSearchPipelineRequest.java b/server/src/main/java/org/opensearch/action/search/PutSearchPipelineRequest.java index d32aab0c8a561..d0484b3a69a1e 100644 --- a/server/src/main/java/org/opensearch/action/search/PutSearchPipelineRequest.java +++ b/server/src/main/java/org/opensearch/action/search/PutSearchPipelineRequest.java @@ -47,7 +47,7 @@ public PutSearchPipelineRequest(StreamInput in) throws IOException { super(in); id = in.readString(); source = in.readBytesReference(); - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_10_0)) { mediaType = in.readMediaType(); } else { mediaType = in.readEnum(XContentType.class); @@ -76,7 +76,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(id); out.writeBytesReference(source); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { mediaType.writeTo(out); } else { out.writeEnum((XContentType) mediaType); diff --git a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequest.java b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequest.java index 57cc4698cefce..f15a039fd9305 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequest.java +++ b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequest.java @@ -186,7 +186,7 @@ public TermVectorsRequest() {} if (in.readBoolean()) { doc = in.readBytesReference(); - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_10_0)) { mediaType = in.readMediaType(); } else { mediaType = in.readEnum(XContentType.class); @@ -538,7 +538,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(doc != null); if (doc != null) { out.writeBytesReference(doc); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { mediaType.writeTo(out); } else { out.writeEnum((XContentType) mediaType); diff --git a/server/src/main/java/org/opensearch/extensions/rest/ExtensionRestRequest.java b/server/src/main/java/org/opensearch/extensions/rest/ExtensionRestRequest.java index e6df6e964a31b..8b13061def8d0 100644 --- a/server/src/main/java/org/opensearch/extensions/rest/ExtensionRestRequest.java +++ b/server/src/main/java/org/opensearch/extensions/rest/ExtensionRestRequest.java @@ -104,7 +104,7 @@ public ExtensionRestRequest(StreamInput in) throws IOException { params = in.readMap(StreamInput::readString, StreamInput::readString); headers = in.readMap(StreamInput::readString, StreamInput::readStringList); if (in.readBoolean()) { - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_10_0)) { mediaType = in.readMediaType(); } else { mediaType = in.readEnum(XContentType.class); @@ -125,7 +125,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeStringCollection); out.writeBoolean(mediaType != null); if (mediaType != null) { - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { mediaType.writeTo(out); } else { out.writeEnum((XContentType) mediaType); diff --git a/server/src/main/java/org/opensearch/index/query/MoreLikeThisQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/MoreLikeThisQueryBuilder.java index 7287634ecfacb..13ed1859d3622 100644 --- a/server/src/main/java/org/opensearch/index/query/MoreLikeThisQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/MoreLikeThisQueryBuilder.java @@ -235,7 +235,7 @@ public Item(@Nullable String index, XContentBuilder doc) { } if (in.readBoolean()) { doc = (BytesReference) in.readGenericValue(); - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_10_0)) { mediaType = in.readMediaType(); } else { mediaType = in.readEnum(XContentType.class); @@ -260,7 +260,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(doc != null); if (doc != null) { out.writeGenericValue(doc); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { mediaType.writeTo(out); } else { out.writeEnum((XContentType) mediaType); diff --git a/server/src/main/java/org/opensearch/ingest/PipelineConfiguration.java b/server/src/main/java/org/opensearch/ingest/PipelineConfiguration.java index 04892e4653065..3b918592053c1 100644 --- a/server/src/main/java/org/opensearch/ingest/PipelineConfiguration.java +++ b/server/src/main/java/org/opensearch/ingest/PipelineConfiguration.java @@ -141,7 +141,7 @@ public static PipelineConfiguration readFrom(StreamInput in) throws IOException return new PipelineConfiguration( in.readString(), in.readBytesReference(), - in.getVersion().onOrAfter(Version.V_3_0_0) ? in.readMediaType() : in.readEnum(XContentType.class) + in.getVersion().onOrAfter(Version.V_2_10_0) ? in.readMediaType() : in.readEnum(XContentType.class) ); } @@ -158,7 +158,7 @@ public String toString() { public void writeTo(StreamOutput out) throws IOException { out.writeString(id); out.writeBytesReference(config); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { mediaType.writeTo(out); } else { out.writeEnum((XContentType) mediaType); diff --git a/server/src/main/java/org/opensearch/search/pipeline/PipelineConfiguration.java b/server/src/main/java/org/opensearch/search/pipeline/PipelineConfiguration.java index b4f6549c83390..33686139039b9 100644 --- a/server/src/main/java/org/opensearch/search/pipeline/PipelineConfiguration.java +++ b/server/src/main/java/org/opensearch/search/pipeline/PipelineConfiguration.java @@ -120,7 +120,7 @@ public static PipelineConfiguration readFrom(StreamInput in) throws IOException return new PipelineConfiguration( in.readString(), in.readBytesReference(), - in.getVersion().onOrAfter(Version.V_3_0_0) ? in.readMediaType() : in.readEnum(XContentType.class) + in.getVersion().onOrAfter(Version.V_2_10_0) ? in.readMediaType() : in.readEnum(XContentType.class) ); } @@ -137,7 +137,7 @@ public String toString() { public void writeTo(StreamOutput out) throws IOException { out.writeString(id); out.writeBytesReference(config); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { mediaType.writeTo(out); } else { out.writeEnum((XContentType) mediaType); From 85f918e3fe3e78b7e004b277645f799e06d95593 Mon Sep 17 00:00:00 2001 From: Sayali Gaikawad <61760125+gaiksaya@users.noreply.github.com> Date: Tue, 1 Aug 2023 15:27:20 -0700 Subject: [PATCH 09/24] Add workflow to check compatibility (#8486) Signed-off-by: Sayali Gaikawad --- .github/workflows/check-compatibility.yml | 35 +++++++++++++++++++ .../gradle/CheckCompatibilityTask.groovy | 14 ++++++-- 2 files changed, 46 insertions(+), 3 deletions(-) create mode 100644 .github/workflows/check-compatibility.yml diff --git a/.github/workflows/check-compatibility.yml b/.github/workflows/check-compatibility.yml new file mode 100644 index 0000000000000..b208fe38a581f --- /dev/null +++ b/.github/workflows/check-compatibility.yml @@ -0,0 +1,35 @@ +--- +name: Check Compatibility + +on: + pull_request_target + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Run compatibility task + run: ./gradlew checkCompatibility | tee $HOME/gradlew-check.out + + - name: Get results + run: | + echo 'Compatibility status:' > ${{ github.workspace }}/results.txt && echo '```' >> ${{ github.workspace }}/results.txt + grep -e 'Compatible components' -e 'Incompatible components' -e 'Components skipped' -A 2 -B 3 $HOME/gradlew-check.out >> "${{ github.workspace }}/results.txt" + echo '```' >> ${{ github.workspace }}/results.txt + + - name: GitHub App token + id: github_app_token + uses: tibdex/github-app-token@v1.6.0 + with: + app_id: ${{ secrets.APP_ID }} + private_key: ${{ secrets.APP_PRIVATE_KEY }} + installation_id: 22958780 + + - name: Add comment on the PR + uses: peter-evans/create-or-update-comment@v3 + with: + token: ${{ steps.github_app_token.outputs.token }} + issue-number: ${{ github.event.number }} + body-path: "${{ github.workspace }}/results.txt" diff --git a/buildSrc/src/main/groovy/org/opensearch/gradle/CheckCompatibilityTask.groovy b/buildSrc/src/main/groovy/org/opensearch/gradle/CheckCompatibilityTask.groovy index ee6446fec6d57..b95bb3be22f8b 100644 --- a/buildSrc/src/main/groovy/org/opensearch/gradle/CheckCompatibilityTask.groovy +++ b/buildSrc/src/main/groovy/org/opensearch/gradle/CheckCompatibilityTask.groovy @@ -40,8 +40,8 @@ class CheckCompatibilityTask extends DefaultTask { @TaskAction void checkCompatibility() { - logger.info("Checking compatibility for: $repositoryUrls for $ref") repositoryUrls.parallelStream().forEach { repositoryUrl -> + logger.lifecycle("Checking compatibility for: $repositoryUrl with ref: $ref") def tempDir = File.createTempDir() try { if (cloneAndCheckout(repositoryUrl, tempDir)) { @@ -81,8 +81,16 @@ class CheckCompatibilityTask extends DefaultTask { protected static List getRepoUrls() { def json = new JsonSlurper().parse(REPO_URL.toURL()) - def labels = json.projects.values() - return labels as List + def repository = json.projects.values() + def repoUrls = replaceSshWithHttps(repository as List) + return repoUrls + } + + protected static replaceSshWithHttps(List repoList) { + repoList.replaceAll { element -> + element.replace("git@github.com:", "https://github.com/") + } + return repoList } protected boolean cloneAndCheckout(repoUrl, directory) { From f9158f3fe5395159fc12bbfb66fe9c23f62f9529 Mon Sep 17 00:00:00 2001 From: Kunal Kotwani Date: Tue, 1 Aug 2023 15:38:32 -0700 Subject: [PATCH 10/24] Update BwC version for file cache safeguards (#9038) Signed-off-by: Kunal Kotwani --- server/src/main/java/org/opensearch/cluster/ClusterInfo.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/opensearch/cluster/ClusterInfo.java b/server/src/main/java/org/opensearch/cluster/ClusterInfo.java index 7b1a2f0f12b69..1513dc4e5acf7 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterInfo.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterInfo.java @@ -110,7 +110,7 @@ public ClusterInfo(StreamInput in) throws IOException { this.shardSizes = Collections.unmodifiableMap(sizeMap); this.routingToDataPath = Collections.unmodifiableMap(routingMap); this.reservedSpace = Collections.unmodifiableMap(reservedSpaceMap); - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_10_0)) { this.nodeFileCacheStats = in.readMap(StreamInput::readString, FileCacheStats::new); } else { this.nodeFileCacheStats = Map.of(); @@ -124,7 +124,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeMap(this.shardSizes, StreamOutput::writeString, (o, v) -> out.writeLong(v == null ? -1 : v)); out.writeMap(this.routingToDataPath, (o, k) -> k.writeTo(o), StreamOutput::writeString); out.writeMap(this.reservedSpace, (o, v) -> v.writeTo(o), (o, v) -> v.writeTo(o)); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { out.writeMap(this.nodeFileCacheStats, StreamOutput::writeString, (o, v) -> v.writeTo(o)); } } From 11c84264ca079e0999008ef0a75ab1e2d10e222b Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Tue, 1 Aug 2023 19:57:33 -0500 Subject: [PATCH 11/24] [BWC] Change BWC version check for MediaType in PercolateQueryBuilder (#9053) This changes the version check for MediaType in PercolateQueryBuilder for BWC. Signed-off-by: Nicholas Walter Knize --- .../java/org/opensearch/percolator/PercolateQueryBuilder.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/percolator/src/main/java/org/opensearch/percolator/PercolateQueryBuilder.java b/modules/percolator/src/main/java/org/opensearch/percolator/PercolateQueryBuilder.java index 08d9a4855c473..9f49843c37ea5 100644 --- a/modules/percolator/src/main/java/org/opensearch/percolator/PercolateQueryBuilder.java +++ b/modules/percolator/src/main/java/org/opensearch/percolator/PercolateQueryBuilder.java @@ -254,7 +254,7 @@ protected PercolateQueryBuilder(String field, Supplier documentS } documents = in.readList(StreamInput::readBytesReference); if (documents.isEmpty() == false) { - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_10_0)) { documentXContentType = in.readMediaType(); } else { documentXContentType = in.readEnum(XContentType.class); @@ -304,7 +304,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeBytesReference(document); } if (documents.isEmpty() == false) { - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_10_0)) { documentXContentType.writeTo(out); } else { out.writeEnum((XContentType) documentXContentType); From 57eb105a6a156e3a313efd58e267d772ac178eea Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Tue, 1 Aug 2023 23:17:39 -0700 Subject: [PATCH 12/24] [Segment Replication] Refactor RemoteStoreReplicationSource (#8767) * [Segment Replication] Refactor remote replication source Signed-off-by: Suraj Singh * Unit test updates Signed-off-by: Suraj Singh * Self review Signed-off-by: Suraj Singh * Self review Signed-off-by: Suraj Singh * Segregate shard level tests for node to node and remote store segment replication Signed-off-by: Suraj Singh * Fix failing unit tests Signed-off-by: Suraj Singh * Fix failing UT Signed-off-by: Suraj Singh * Fix failing UT Signed-off-by: Suraj Singh * Address review comments Signed-off-by: Suraj Singh * Fix more unit tests Signed-off-by: Suraj Singh * Improve RemoteStoreReplicationSourceTests, remove unnecessary mocks and use actual failures for failure/exception use cases Signed-off-by: Suraj Singh * Spotless check fix Signed-off-by: Suraj Singh * Address review comments Signed-off-by: Suraj Singh * Ignore files already in store while computing segment file diff with primary Signed-off-by: Suraj Singh * Spotless fix Signed-off-by: Suraj Singh * Fix failing UT Signed-off-by: Suraj Singh * Spotless fix Signed-off-by: Suraj Singh * Move read/writes from IndexInput/Output to RemoteSegmentMetadata Signed-off-by: Suraj Singh * Address review commnt Signed-off-by: Suraj Singh * Update recovery flow to perform commits during recovery Signed-off-by: Suraj Singh * Remove un-necessary char Signed-off-by: Suraj Singh * Address review comments Signed-off-by: Suraj Singh * Update comment nit-pick Signed-off-by: Suraj Singh * Remove deletion logic causing read issues due to deleted segments_N Signed-off-by: Suraj Singh * Spotless fix Signed-off-by: Suraj Singh * Fix unit tests Signed-off-by: Suraj Singh --------- Signed-off-by: Suraj Singh --- .../replication/SegmentReplicationBaseIT.java | 20 - .../remotestore/RemoteStoreStatsIT.java | 8 +- .../RemoteIndexSnapshotStatusApiIT.java | 1 + .../opensearch/index/shard/IndexShard.java | 54 +- .../shard/RemoteStoreRefreshListener.java | 9 +- .../opensearch/index/shard/StoreRecovery.java | 2 +- .../store/RemoteSegmentStoreDirectory.java | 13 +- .../org/opensearch/index/store/Store.java | 10 +- .../metadata/RemoteSegmentMetadata.java | 53 +- .../recovery/PeerRecoveryTargetService.java | 2 +- .../replication/GetSegmentFilesResponse.java | 4 + .../RemoteStoreReplicationSource.java | 50 +- .../replication/SegmentReplicationTarget.java | 55 +- .../RemoteStoreRefreshListenerTests.java | 2 +- ...overyWithRemoteTranslogOnPrimaryTests.java | 37 +- .../SegmentReplicationIndexShardTests.java | 896 ++---------------- ...licationWithNodeToNodeIndexShardTests.java | 697 ++++++++++++++ ...tReplicationWithRemoteIndexShardTests.java | 133 ++- .../RemoteSegmentStoreDirectoryTests.java | 47 +- .../RemoteSegmentMetadataHandlerTests.java | 28 +- ...teStorePeerRecoverySourceHandlerTests.java | 29 +- .../RemoteStoreReplicationSourceTests.java | 199 ++-- ...enSearchIndexLevelReplicationTestCase.java | 6 +- .../index/shard/IndexShardTestCase.java | 64 +- 24 files changed, 1291 insertions(+), 1128 deletions(-) create mode 100644 server/src/test/java/org/opensearch/index/shard/SegmentReplicationWithNodeToNodeIndexShardTests.java diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java index 64c6ebbb33482..cfb2e11c8c429 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationBaseIT.java @@ -8,7 +8,6 @@ package org.opensearch.indices.replication; -import org.opensearch.action.admin.indices.replication.SegmentReplicationStatsResponse; import org.opensearch.action.search.SearchResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; @@ -24,7 +23,6 @@ import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.IndexModule; import org.opensearch.index.IndexService; -import org.opensearch.index.SegmentReplicationPerGroupStats; import org.opensearch.index.SegmentReplicationShardStats; import org.opensearch.index.engine.Engine; import org.opensearch.index.shard.IndexShard; @@ -134,24 +132,6 @@ protected void waitForSearchableDocs(long docCount, String... nodes) throws Exce waitForSearchableDocs(docCount, Arrays.stream(nodes).collect(Collectors.toList())); } - protected void waitForSegmentReplication(String node) throws Exception { - assertBusy(() -> { - SegmentReplicationStatsResponse segmentReplicationStatsResponse = client(node).admin() - .indices() - .prepareSegmentReplicationStats(INDEX_NAME) - .setDetailed(true) - .execute() - .actionGet(); - final SegmentReplicationPerGroupStats perGroupStats = segmentReplicationStatsResponse.getReplicationStats() - .get(INDEX_NAME) - .get(0); - assertEquals( - perGroupStats.getReplicaStats().stream().findFirst().get().getCurrentReplicationState().getStage(), - SegmentReplicationState.Stage.DONE - ); - }, 1, TimeUnit.MINUTES); - } - protected void verifyStoreContent() throws Exception { assertBusy(() -> { final ClusterState clusterState = getClusterState(); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java index 840e3a07ed255..1c7f14701b3e7 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java @@ -272,12 +272,12 @@ public void testDownloadStatsCorrectnessSinglePrimarySingleReplica() throws Exce assertTrue( replicaStats.directoryFileTransferTrackerStats.transferredBytesStarted > 0 && primaryStats.uploadBytesStarted - - zeroStatePrimaryStats.uploadBytesStarted == replicaStats.directoryFileTransferTrackerStats.transferredBytesStarted + - zeroStatePrimaryStats.uploadBytesStarted >= replicaStats.directoryFileTransferTrackerStats.transferredBytesStarted ); assertTrue( replicaStats.directoryFileTransferTrackerStats.transferredBytesSucceeded > 0 && primaryStats.uploadBytesSucceeded - - zeroStatePrimaryStats.uploadBytesSucceeded == replicaStats.directoryFileTransferTrackerStats.transferredBytesSucceeded + - zeroStatePrimaryStats.uploadBytesSucceeded >= replicaStats.directoryFileTransferTrackerStats.transferredBytesSucceeded ); // Assert zero failures assertEquals(0, primaryStats.uploadBytesFailed); @@ -369,8 +369,8 @@ public void testDownloadStatsCorrectnessSinglePrimaryMultipleReplicaShards() thr assertEquals(0, uploadsFailed); assertEquals(0, uploadBytesFailed); for (int j = 0; j < response.getSuccessfulShards() - 1; j++) { - assertEquals(uploadBytesStarted - zeroStatePrimaryStats.uploadBytesStarted, (long) downloadBytesStarted.get(j)); - assertEquals(uploadBytesSucceeded - zeroStatePrimaryStats.uploadBytesSucceeded, (long) downloadBytesSucceeded.get(j)); + assertTrue(uploadBytesStarted - zeroStatePrimaryStats.uploadBytesStarted > downloadBytesStarted.get(j)); + assertTrue(uploadBytesSucceeded - zeroStatePrimaryStats.uploadBytesSucceeded > downloadBytesSucceeded.get(j)); assertEquals(0, (long) downloadBytesFailed.get(j)); } }, 60, TimeUnit.SECONDS); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/RemoteIndexSnapshotStatusApiIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/RemoteIndexSnapshotStatusApiIT.java index b6a5188c99335..d17410d8921ed 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/RemoteIndexSnapshotStatusApiIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/RemoteIndexSnapshotStatusApiIT.java @@ -57,6 +57,7 @@ protected Settings nodeSettings(int nodeOrdinal) { .put(super.nodeSettings(nodeOrdinal)) .put(ThreadPool.ESTIMATED_TIME_INTERVAL_SETTING.getKey(), 0) // We have tests that check by-timestamp order .put(FeatureFlags.REMOTE_STORE, "true") + .put(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL, "true") .put(remoteStoreClusterSettings("remote-store-repo-name")) .build(); } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index bb5088866edb6..2b85193275a13 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -199,9 +199,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; -import java.util.Comparator; import java.util.EnumSet; -import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Locale; @@ -1988,7 +1986,7 @@ private long recoverLocallyUpToGlobalCheckpoint() { final Optional safeCommit; final long globalCheckpoint; try { - final String translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(Translog.TRANSLOG_UUID_KEY); + final String translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(TRANSLOG_UUID_KEY); globalCheckpoint = Translog.readGlobalCheckpoint(translogConfig.getTranslogPath(), translogUUID); safeCommit = store.findSafeIndexCommit(globalCheckpoint); } catch (org.apache.lucene.index.IndexNotFoundException e) { @@ -2088,7 +2086,7 @@ private long recoverLocallyUptoLastCommit() { try { seqNo = Long.parseLong(store.readLastCommittedSegmentsInfo().getUserData().get(MAX_SEQ_NO)); } catch (org.apache.lucene.index.IndexNotFoundException e) { - logger.error("skip local recovery as no index commit found", e); + logger.error("skip local recovery as no index commit found"); return UNASSIGNED_SEQ_NO; } catch (Exception e) { logger.error("skip local recovery as failed to find the safe commit", e); @@ -2242,7 +2240,7 @@ private void loadGlobalCheckpointToReplicationTracker() throws IOException { // we have to set it before we open an engine and recover from the translog because // acquiring a snapshot from the translog causes a sync which causes the global checkpoint to be pulled in, // and an engine can be forced to close in ctor which also causes the global checkpoint to be pulled in. - final String translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(Translog.TRANSLOG_UUID_KEY); + final String translogUUID = store.readLastCommittedSegmentsInfo().getUserData().get(TRANSLOG_UUID_KEY); final long globalCheckpoint = Translog.readGlobalCheckpoint(translogConfig.getTranslogPath(), translogUUID); replicationTracker.updateGlobalCheckpointOnReplica(globalCheckpoint, "read from translog checkpoint"); } @@ -2326,7 +2324,7 @@ private void innerOpenEngineAndTranslog(LongSupplier globalCheckpointSupplier, b assert currentEngineReference.get() == null : "engine is running"; verifyNotClosed(); if (indexSettings.isRemoteStoreEnabled() && syncFromRemote) { - syncSegmentsFromRemoteSegmentStore(false, true, true); + syncSegmentsFromRemoteSegmentStore(false, true); } if (indexSettings.isRemoteTranslogStoreEnabled() && shardRouting.primary()) { if (syncFromRemote) { @@ -4555,7 +4553,7 @@ public void close() throws IOException { }; IOUtils.close(currentEngineReference.getAndSet(readOnlyEngine)); if (indexSettings.isRemoteStoreEnabled()) { - syncSegmentsFromRemoteSegmentStore(false, true, true); + syncSegmentsFromRemoteSegmentStore(false, true); } if (indexSettings.isRemoteTranslogStoreEnabled() && shardRouting.primary()) { syncRemoteTranslogAndUpdateGlobalCheckpoint(); @@ -4616,13 +4614,11 @@ public void syncTranslogFilesFromRemoteTranslog() throws IOException { * Downloads segments from remote segment store. * @param overrideLocal flag to override local segment files with those in remote store * @param refreshLevelSegmentSync last refresh checkpoint is used if true, commit checkpoint otherwise - * @param shouldCommit if the shard requires committing the changes after sync from remote. * @throws IOException if exception occurs while reading segments from remote store */ - public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal, boolean refreshLevelSegmentSync, boolean shouldCommit) - throws IOException { + public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal, boolean refreshLevelSegmentSync) throws IOException { assert indexSettings.isRemoteStoreEnabled(); - logger.info("Downloading segments from remote segment store"); + logger.trace("Downloading segments from remote segment store"); RemoteSegmentStoreDirectory remoteDirectory = getRemoteDirectory(); // We need to call RemoteSegmentStoreDirectory.init() in order to get latest metadata of the files that // are uploaded to the remote segment store. @@ -4647,7 +4643,6 @@ public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal, boolean re } else { storeDirectory = store.directory(); } - Set localSegmentFiles = Sets.newHashSet(storeDirectory.listAll()); copySegmentFiles(storeDirectory, remoteDirectory, null, uploadedSegments, overrideLocal); if (refreshLevelSegmentSync && remoteSegmentMetadata != null) { @@ -4661,37 +4656,8 @@ public void syncSegmentsFromRemoteSegmentStore(boolean overrideLocal, boolean re indexInput, remoteSegmentMetadata.getGeneration() ); - // Replicas never need a local commit - if (shouldCommit) { - if (this.shardRouting.primary()) { - long processedLocalCheckpoint = Long.parseLong(infosSnapshot.getUserData().get(LOCAL_CHECKPOINT_KEY)); - // Following code block makes sure to use SegmentInfosSnapshot in the remote store if generation differs - // with local filesystem. If local filesystem already has segments_N+2 and infosSnapshot has generation N, - // after commit, there would be 2 files that would be created segments_N+1 and segments_N+2. With the - // policy of preserving only the latest commit, we will delete segments_N+1 which in fact is the part of the - // latest commit. - Optional localMaxSegmentInfos = localSegmentFiles.stream() - .filter(file -> file.startsWith(IndexFileNames.SEGMENTS)) - .max(Comparator.comparingLong(SegmentInfos::generationFromSegmentsFileName)); - if (localMaxSegmentInfos.isPresent() - && infosSnapshot.getGeneration() < SegmentInfos.generationFromSegmentsFileName(localMaxSegmentInfos.get()) - - 1) { - // If remote translog is not enabled, local translog will be created with different UUID. - // This fails in Store.trimUnsafeCommits() as translog UUID of checkpoint and SegmentInfos needs - // to be same. Following code block make sure to have the same UUID. - if (indexSettings.isRemoteTranslogStoreEnabled() == false) { - SegmentInfos localSegmentInfos = store.readLastCommittedSegmentsInfo(); - Map userData = new HashMap<>(infosSnapshot.getUserData()); - userData.put(TRANSLOG_UUID_KEY, localSegmentInfos.userData.get(TRANSLOG_UUID_KEY)); - infosSnapshot.setUserData(userData, false); - } - storeDirectory.deleteFile(localMaxSegmentInfos.get()); - } - store.commitSegmentInfos(infosSnapshot, processedLocalCheckpoint, processedLocalCheckpoint); - } - } else { - finalizeReplication(infosSnapshot); - } + long processedLocalCheckpoint = Long.parseLong(infosSnapshot.getUserData().get(LOCAL_CHECKPOINT_KEY)); + store.commitSegmentInfos(infosSnapshot, processedLocalCheckpoint, processedLocalCheckpoint); } } } catch (IOException e) { @@ -4716,7 +4682,7 @@ public void syncSegmentsFromGivenRemoteSegmentStore( long primaryTerm, long commitGeneration ) throws IOException { - logger.info("Downloading segments from given remote segment store"); + logger.trace("Downloading segments from given remote segment store"); RemoteSegmentStoreDirectory remoteDirectory = null; if (remoteStore != null) { remoteDirectory = getRemoteDirectory(); diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java index 4a70ff04770d3..8dd0c8b9d4405 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java @@ -220,7 +220,7 @@ private synchronized boolean syncSegments() { public void onResponse(Void unused) { try { // Start metadata file upload - uploadMetadata(localSegmentsPostRefresh, segmentInfos); + uploadMetadata(localSegmentsPostRefresh, segmentInfos, checkpoint); clearStaleFilesFromLocalSegmentChecksumMap(localSegmentsPostRefresh); onSuccessfulSegmentsSync( refreshTimeMs, @@ -327,7 +327,8 @@ private boolean isRefreshAfterCommit() throws IOException { && !remoteDirectory.containsFile(lastCommittedLocalSegmentFileName, getChecksumOfLocalFile(lastCommittedLocalSegmentFileName))); } - void uploadMetadata(Collection localSegmentsPostRefresh, SegmentInfos segmentInfos) throws IOException { + void uploadMetadata(Collection localSegmentsPostRefresh, SegmentInfos segmentInfos, ReplicationCheckpoint replicationCheckpoint) + throws IOException { final long maxSeqNo = ((InternalEngine) indexShard.getEngine()).currentOngoingRefreshCheckpoint(); SegmentInfos segmentInfosSnapshot = segmentInfos.clone(); Map userData = segmentInfosSnapshot.getUserData(); @@ -344,8 +345,8 @@ void uploadMetadata(Collection localSegmentsPostRefresh, SegmentInfos se localSegmentsPostRefresh, segmentInfosSnapshot, storeDirectory, - indexShard.getOperationPrimaryTerm(), - translogFileGeneration + translogFileGeneration, + replicationCheckpoint ); } } diff --git a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java index 5897fa7d513d7..2c8a186a6ed53 100644 --- a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java @@ -530,7 +530,7 @@ private void recoverFromRemoteStore(IndexShard indexShard) throws IndexShardReco remoteStore.incRef(); try { // Download segments from remote segment store - indexShard.syncSegmentsFromRemoteSegmentStore(true, true, true); + indexShard.syncSegmentsFromRemoteSegmentStore(true, true); if (store.directory().listAll().length == 0) { store.createEmpty(indexShard.indexSettings().getIndexVersionCreated().luceneVersion); diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index 8ee267cb67e68..8dfdb3e2c8e06 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -44,6 +44,7 @@ import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadataHandler; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.threadpool.ThreadPool; import java.io.FileNotFoundException; @@ -603,19 +604,20 @@ public boolean containsFile(String localFilename, String checksum) { * @param segmentFiles segment files that are part of the shard at the time of the latest refresh * @param segmentInfosSnapshot SegmentInfos bytes to store as part of metadata file * @param storeDirectory instance of local directory to temporarily create metadata file before upload - * @param primaryTerm primary term to be used in the name of metadata file + * @param translogGeneration translog generation + * @param replicationCheckpoint ReplicationCheckpoint of primary shard * @throws IOException in case of I/O error while uploading the metadata file */ public void uploadMetadata( Collection segmentFiles, SegmentInfos segmentInfosSnapshot, Directory storeDirectory, - long primaryTerm, - long translogGeneration + long translogGeneration, + ReplicationCheckpoint replicationCheckpoint ) throws IOException { synchronized (this) { String metadataFilename = MetadataFilenameUtils.getMetadataFilename( - primaryTerm, + replicationCheckpoint.getPrimaryTerm(), segmentInfosSnapshot.getGeneration(), translogGeneration, metadataUploadCounter.incrementAndGet(), @@ -646,8 +648,7 @@ public void uploadMetadata( new RemoteSegmentMetadata( RemoteSegmentMetadata.fromMapOfStrings(uploadedSegments), segmentInfoSnapshotByteArray, - primaryTerm, - segmentInfosSnapshot.getGeneration() + replicationCheckpoint ) ); } diff --git a/server/src/main/java/org/opensearch/index/store/Store.java b/server/src/main/java/org/opensearch/index/store/Store.java index a67b87f58110c..921deae41946a 100644 --- a/server/src/main/java/org/opensearch/index/store/Store.java +++ b/server/src/main/java/org/opensearch/index/store/Store.java @@ -845,22 +845,24 @@ private void cleanupFiles(Collection filesToConsiderForCleanup, String r * @param tmpToFileName Map of temporary replication file to actual file name * @param infosBytes bytes[] of SegmentInfos supposed to be sent over by primary excluding segment_N file * @param segmentsGen segment generation number - * @param consumer consumer for generated SegmentInfos + * @param finalizeConsumer consumer for action on passed in SegmentInfos + * @param renameConsumer consumer for action on temporary copied over files * @throws IOException Exception while reading store and building segment infos */ public void buildInfosFromBytes( Map tmpToFileName, byte[] infosBytes, long segmentsGen, - CheckedConsumer consumer + CheckedConsumer finalizeConsumer, + CheckedConsumer, IOException> renameConsumer ) throws IOException { metadataLock.writeLock().lock(); try { final List values = new ArrayList<>(tmpToFileName.values()); incRefFileDeleter(values); try { - renameTempFilesSafe(tmpToFileName); - consumer.accept(buildSegmentInfos(infosBytes, segmentsGen)); + renameConsumer.accept(tmpToFileName); + finalizeConsumer.accept(buildSegmentInfos(infosBytes, segmentsGen)); } finally { decRefFileDeleter(values); } diff --git a/server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadata.java b/server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadata.java index 9a479346ff711..15703a2c02b13 100644 --- a/server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadata.java +++ b/server/src/main/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadata.java @@ -14,7 +14,10 @@ import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.IndexOutput; +import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.store.RemoteSegmentStoreDirectory; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; /** * Metadata object for Remote Segment @@ -38,19 +41,16 @@ public class RemoteSegmentMetadata { private final byte[] segmentInfosBytes; - private final long primaryTerm; - private final long generation; + private final ReplicationCheckpoint replicationCheckpoint; public RemoteSegmentMetadata( Map metadata, byte[] segmentInfosBytes, - long primaryTerm, - long generation + ReplicationCheckpoint replicationCheckpoint ) { this.metadata = metadata; this.segmentInfosBytes = segmentInfosBytes; - this.generation = generation; - this.primaryTerm = primaryTerm; + this.replicationCheckpoint = replicationCheckpoint; } /** @@ -66,11 +66,15 @@ public byte[] getSegmentInfosBytes() { } public long getGeneration() { - return generation; + return replicationCheckpoint.getSegmentsGen(); } public long getPrimaryTerm() { - return primaryTerm; + return replicationCheckpoint.getPrimaryTerm(); + } + + public ReplicationCheckpoint getReplicationCheckpoint() { + return replicationCheckpoint; } /** @@ -99,19 +103,42 @@ public static Map f public void write(IndexOutput out) throws IOException { out.writeMapOfStrings(toMapOfStrings()); - out.writeLong(generation); - out.writeLong(primaryTerm); + writeCheckpointToIndexOutput(replicationCheckpoint, out); out.writeLong(segmentInfosBytes.length); out.writeBytes(segmentInfosBytes, segmentInfosBytes.length); } public static RemoteSegmentMetadata read(IndexInput indexInput) throws IOException { Map metadata = indexInput.readMapOfStrings(); - long generation = indexInput.readLong(); - long primaryTerm = indexInput.readLong(); + ReplicationCheckpoint replicationCheckpoint = readCheckpointFromIndexInput(indexInput); int byteArraySize = (int) indexInput.readLong(); byte[] segmentInfosBytes = new byte[byteArraySize]; indexInput.readBytes(segmentInfosBytes, 0, byteArraySize); - return new RemoteSegmentMetadata(RemoteSegmentMetadata.fromMapOfStrings(metadata), segmentInfosBytes, primaryTerm, generation); + return new RemoteSegmentMetadata(RemoteSegmentMetadata.fromMapOfStrings(metadata), segmentInfosBytes, replicationCheckpoint); + } + + public static void writeCheckpointToIndexOutput(ReplicationCheckpoint replicationCheckpoint, IndexOutput out) throws IOException { + ShardId shardId = replicationCheckpoint.getShardId(); + // Write ShardId + out.writeString(shardId.getIndex().getName()); + out.writeString(shardId.getIndex().getUUID()); + out.writeVInt(shardId.getId()); + // Write remaining checkpoint fields + out.writeLong(replicationCheckpoint.getPrimaryTerm()); + out.writeLong(replicationCheckpoint.getSegmentsGen()); + out.writeLong(replicationCheckpoint.getSegmentInfosVersion()); + out.writeLong(replicationCheckpoint.getLength()); + out.writeString(replicationCheckpoint.getCodec()); + } + + private static ReplicationCheckpoint readCheckpointFromIndexInput(IndexInput in) throws IOException { + return new ReplicationCheckpoint( + new ShardId(new Index(in.readString(), in.readString()), in.readVInt()), + in.readLong(), + in.readLong(), + in.readLong(), + in.readLong(), + in.readString() + ); } } diff --git a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java index 0ba57a9ee7f65..386b2e0e8192d 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java @@ -245,7 +245,7 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi indexShard.prepareForIndexRecovery(); final boolean hasRemoteSegmentStore = indexShard.indexSettings().isRemoteStoreEnabled(); if (hasRemoteSegmentStore) { - indexShard.syncSegmentsFromRemoteSegmentStore(false, false, true); + indexShard.syncSegmentsFromRemoteSegmentStore(false, false); } final boolean hasRemoteTranslog = recoveryTarget.state().getPrimary() == false && indexShard.isRemoteTranslogEnabled(); final boolean hasNoTranslog = indexShard.indexSettings().isRemoteSnapshot(); diff --git a/server/src/main/java/org/opensearch/indices/replication/GetSegmentFilesResponse.java b/server/src/main/java/org/opensearch/indices/replication/GetSegmentFilesResponse.java index 89d50a17464a6..33a84833f2418 100644 --- a/server/src/main/java/org/opensearch/indices/replication/GetSegmentFilesResponse.java +++ b/server/src/main/java/org/opensearch/indices/replication/GetSegmentFilesResponse.java @@ -33,6 +33,10 @@ public GetSegmentFilesResponse(StreamInput out) throws IOException { out.readList(StoreFileMetadata::new); } + public List getFiles() { + return files; + } + @Override public void writeTo(StreamOutput out) throws IOException { out.writeCollection(files); diff --git a/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java b/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java index c5be7635782af..7f444d0031533 100644 --- a/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java +++ b/server/src/main/java/org/opensearch/indices/replication/RemoteStoreReplicationSource.java @@ -10,7 +10,9 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.lucene.store.Directory; import org.apache.lucene.store.FilterDirectory; +import org.apache.lucene.store.IOContext; import org.apache.lucene.util.Version; import org.opensearch.action.ActionListener; import org.opensearch.index.shard.IndexShard; @@ -21,6 +23,8 @@ import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; @@ -33,12 +37,16 @@ */ public class RemoteStoreReplicationSource implements SegmentReplicationSource { - private static final Logger logger = LogManager.getLogger(PrimaryShardReplicationSource.class); + private static final Logger logger = LogManager.getLogger(RemoteStoreReplicationSource.class); private final IndexShard indexShard; + private final RemoteSegmentStoreDirectory remoteDirectory; public RemoteStoreReplicationSource(IndexShard indexShard) { this.indexShard = indexShard; + FilterDirectory remoteStoreDirectory = (FilterDirectory) indexShard.remoteStore().directory(); + FilterDirectory byteSizeCachingStoreDirectory = (FilterDirectory) remoteStoreDirectory.getDelegate(); + this.remoteDirectory = (RemoteSegmentStoreDirectory) byteSizeCachingStoreDirectory.getDelegate(); } @Override @@ -47,15 +55,11 @@ public void getCheckpointMetadata( ReplicationCheckpoint checkpoint, ActionListener listener ) { - FilterDirectory remoteStoreDirectory = (FilterDirectory) indexShard.remoteStore().directory(); - FilterDirectory byteSizeCachingStoreDirectory = (FilterDirectory) remoteStoreDirectory.getDelegate(); - RemoteSegmentStoreDirectory remoteDirectory = (RemoteSegmentStoreDirectory) byteSizeCachingStoreDirectory.getDelegate(); - Map metadataMap; // TODO: Need to figure out a way to pass this information for segment metadata via remote store. final Version version = indexShard.getSegmentInfosSnapshot().get().getCommitLuceneVersion(); try { - RemoteSegmentMetadata mdFile = remoteDirectory.readLatestMetadataFile(); + RemoteSegmentMetadata mdFile = remoteDirectory.init(); // During initial recovery flow, the remote store might not have metadata as primary hasn't uploaded anything yet. if (mdFile == null && indexShard.state().equals(IndexShardState.STARTED) == false) { listener.onResponse(new CheckpointInfoResponse(checkpoint, Collections.emptyMap(), null)); @@ -77,8 +81,7 @@ public void getCheckpointMetadata( ) ) ); - // TODO: GET current checkpoint from remote store. - listener.onResponse(new CheckpointInfoResponse(checkpoint, metadataMap, null)); + listener.onResponse(new CheckpointInfoResponse(mdFile.getReplicationCheckpoint(), metadataMap, mdFile.getSegmentInfosBytes())); } catch (Exception e) { listener.onFailure(e); } @@ -93,8 +96,33 @@ public void getSegmentFiles( ActionListener listener ) { try { - indexShard.syncSegmentsFromRemoteSegmentStore(false, true, false); - listener.onResponse(new GetSegmentFilesResponse(Collections.emptyList())); + if (filesToFetch.isEmpty()) { + listener.onResponse(new GetSegmentFilesResponse(Collections.emptyList())); + return; + } + logger.trace("Downloading segments files from remote store {}", filesToFetch); + + RemoteSegmentMetadata remoteSegmentMetadata = remoteDirectory.readLatestMetadataFile(); + List downloadedSegments = new ArrayList<>(); + Collection directoryFiles = List.of(indexShard.store().directory().listAll()); + if (remoteSegmentMetadata != null) { + try { + indexShard.store().incRef(); + indexShard.remoteStore().incRef(); + final Directory storeDirectory = indexShard.store().directory(); + for (StoreFileMetadata fileMetadata : filesToFetch) { + String file = fileMetadata.name(); + assert directoryFiles.contains(file) == false : "Local store already contains the file " + file; + storeDirectory.copyFrom(remoteDirectory, file, file, IOContext.DEFAULT); + downloadedSegments.add(fileMetadata); + } + logger.trace("Downloaded segments from remote store {}", downloadedSegments); + } finally { + indexShard.store().decRef(); + indexShard.remoteStore().decRef(); + } + } + listener.onResponse(new GetSegmentFilesResponse(downloadedSegments)); } catch (Exception e) { listener.onFailure(e); } @@ -102,6 +130,6 @@ public void getSegmentFiles( @Override public String getDescription() { - return "remote store"; + return "RemoteStoreReplicationSource"; } } diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java index 2e0f5a8c0ad1f..c22701dfc94ce 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java @@ -13,10 +13,6 @@ import org.apache.lucene.index.IndexFormatTooNewException; import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.store.AlreadyClosedException; -import org.apache.lucene.store.BufferedChecksumIndexInput; -import org.apache.lucene.store.ByteBuffersDataInput; -import org.apache.lucene.store.ByteBuffersIndexInput; -import org.apache.lucene.store.ChecksumIndexInput; import org.opensearch.OpenSearchCorruptionException; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; @@ -36,10 +32,10 @@ import org.opensearch.indices.replication.common.ReplicationTarget; import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.Arrays; import java.util.List; import java.util.Locale; +import java.util.Map; +import java.util.stream.Collectors; /** * Represents the target of a replication event. @@ -55,10 +51,6 @@ public class SegmentReplicationTarget extends ReplicationTarget { public final static String REPLICATION_PREFIX = "replication."; - public ReplicationCheckpoint getCheckpoint() { - return this.checkpoint; - } - public SegmentReplicationTarget(IndexShard indexShard, SegmentReplicationSource source, ReplicationListener listener) { super("replication_target", indexShard, new ReplicationLuceneIndex(), listener); this.checkpoint = indexShard.getLatestReplicationCheckpoint(); @@ -117,6 +109,10 @@ public boolean reset(CancellableThreads newTargetCancellableThreads) throws IOEx return false; } + public ReplicationCheckpoint getCheckpoint() { + return this.checkpoint; + } + @Override public void writeFileChunk( StoreFileMetadata metadata, @@ -162,7 +158,7 @@ public void startReplication(ActionListener listener) { }, listener::onFailure); getFilesListener.whenComplete(response -> { - finalizeReplication(checkpointInfoListener.result()); + finalizeReplication(checkpointInfoListener.result(), getFilesListener.result()); listener.onResponse(null); }, listener::onFailure); } @@ -193,23 +189,34 @@ private List getFiles(CheckpointInfoResponse checkpointInfo) return diff.missing; } - private void finalizeReplication(CheckpointInfoResponse checkpointInfoResponse) throws OpenSearchCorruptionException { - // TODO: Refactor the logic so that finalize doesn't have to be invoked for remote store as source - if (source instanceof RemoteStoreReplicationSource) { - state.setStage(SegmentReplicationState.Stage.FINALIZE_REPLICATION); - return; - } + private void finalizeReplication(CheckpointInfoResponse checkpointInfoResponse, GetSegmentFilesResponse getSegmentFilesResponse) + throws OpenSearchCorruptionException { cancellableThreads.checkForCancel(); state.setStage(SegmentReplicationState.Stage.FINALIZE_REPLICATION); + // Handle empty SegmentInfos bytes for recovering replicas + if (checkpointInfoResponse.getInfosBytes() == null) { + return; + } Store store = null; try { store = store(); store.incRef(); + Map tempFileNames; + if (this.indexShard.indexSettings().isRemoteStoreEnabled() == true) { + tempFileNames = getSegmentFilesResponse.getFiles() + .stream() + .collect(Collectors.toMap(StoreFileMetadata::name, StoreFileMetadata::name)); + } else { + tempFileNames = multiFileWriter.getTempFileNames(); + } store.buildInfosFromBytes( - multiFileWriter.getTempFileNames(), + tempFileNames, checkpointInfoResponse.getInfosBytes(), checkpointInfoResponse.getCheckpoint().getSegmentsGen(), - indexShard::finalizeReplication + indexShard::finalizeReplication, + this.indexShard.indexSettings().isRemoteStoreEnabled() == true + ? (files) -> {} + : (files) -> indexShard.store().renameTempFilesSafe(files) ); } catch (CorruptIndexException | IndexFormatTooNewException | IndexFormatTooOldException ex) { // this is a fatal exception at this stage. @@ -247,16 +254,6 @@ private void finalizeReplication(CheckpointInfoResponse checkpointInfoResponse) } } - /** - * This method formats our byte[] containing the primary's SegmentInfos into lucene's {@link ChecksumIndexInput} that can be - * passed to SegmentInfos.readCommit - */ - private ChecksumIndexInput toIndexInput(byte[] input) { - return new BufferedChecksumIndexInput( - new ByteBuffersIndexInput(new ByteBuffersDataInput(Arrays.asList(ByteBuffer.wrap(input))), "SegmentInfos") - ); - } - /** * Trigger a cancellation, this method will not close the target a subsequent call to #fail is required from target service. */ diff --git a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java index f13f89c6e067c..66938eec10513 100644 --- a/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteStoreRefreshListenerTests.java @@ -413,7 +413,7 @@ private Tuple m when(remoteStore.directory()).thenReturn(remoteStoreFilterDirectory); // Mock indexShard.getOperationPrimaryTerm() - when(shard.getOperationPrimaryTerm()).thenReturn(indexShard.getOperationPrimaryTerm()); + when(shard.getLatestReplicationCheckpoint()).thenReturn(indexShard.getLatestReplicationCheckpoint()); // Mock indexShard.routingEntry().primary() when(shard.routingEntry()).thenReturn(indexShard.routingEntry()); diff --git a/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java b/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java index 690c7955ff338..20b3dfc0f93a6 100644 --- a/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java +++ b/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java @@ -9,22 +9,27 @@ package org.opensearch.index.shard; import org.junit.Assert; +import org.junit.Before; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexSettings; import org.opensearch.index.engine.DocIdSeqNoAndSource; import org.opensearch.index.engine.NRTReplicationEngine; import org.opensearch.index.engine.NRTReplicationEngineFactory; +import org.opensearch.index.mapper.MapperService; import org.opensearch.index.replication.OpenSearchIndexLevelReplicationTestCase; import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.index.store.Store; import org.opensearch.index.translog.WriteOnlyTranslogManager; import org.opensearch.indices.recovery.RecoveryTarget; import org.opensearch.indices.replication.common.ReplicationType; import java.io.IOException; +import java.nio.file.Path; import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; @@ -39,15 +44,24 @@ public class ReplicaRecoveryWithRemoteTranslogOnPrimaryTests extends OpenSearchI .put(IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), "100ms") .build(); - public void testStartSequenceForReplicaRecovery() throws Exception { - try (ReplicationGroup shards = createGroup(0, settings, new NRTReplicationEngineFactory())) { + @Before + public void setup() { + // Todo: Remove feature flag once remote store integration with segrep goes GA + FeatureFlags.initializeFeatureFlags( + Settings.builder().put(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL_SETTING.getKey(), "true").build() + ); + } + public void testStartSequenceForReplicaRecovery() throws Exception { + final Path remoteDir = createTempDir(); + final String indexMapping = "{ \"" + MapperService.SINGLE_MAPPING_NAME + "\": {} }"; + try (ReplicationGroup shards = createGroup(0, settings, indexMapping, new NRTReplicationEngineFactory(), remoteDir)) { shards.startPrimary(); final IndexShard primary = shards.getPrimary(); int numDocs = shards.indexDocs(randomIntBetween(10, 100)); shards.flush(); - final IndexShard replica = shards.addReplica(); + final IndexShard replica = shards.addReplica(remoteDir); shards.startAll(); allowShardFailures(); @@ -63,6 +77,14 @@ public void testStartSequenceForReplicaRecovery() throws Exception { int moreDocs = shards.indexDocs(randomIntBetween(20, 100)); shards.flush(); + final ShardRouting replicaRouting2 = newShardRouting( + replicaRouting.shardId(), + replicaRouting.currentNodeId(), + false, + ShardRoutingState.INITIALIZING, + RecoverySource.PeerRecoverySource.INSTANCE + ); + Store remoteStore = createRemoteStore(remoteDir, replicaRouting2, newIndexMetadata); IndexShard newReplicaShard = newShard( newShardRouting( replicaRouting.shardId(), @@ -80,7 +102,7 @@ public void testStartSequenceForReplicaRecovery() throws Exception { replica.getGlobalCheckpointSyncer(), replica.getRetentionLeaseSyncer(), EMPTY_EVENT_LISTENER, - null + remoteStore ); shards.addReplica(newReplicaShard); AtomicBoolean assertDone = new AtomicBoolean(false); @@ -103,7 +125,6 @@ public IndexShard indexShard() { return idxShard; } }); - shards.flush(); replicateSegments(primary, shards.getReplicas()); shards.assertAllEqual(numDocs + moreDocs); @@ -111,7 +132,9 @@ public IndexShard indexShard() { } public void testNoTranslogHistoryTransferred() throws Exception { - try (ReplicationGroup shards = createGroup(0, settings, new NRTReplicationEngineFactory())) { + final Path remoteDir = createTempDir(); + final String indexMapping = "{ \"" + MapperService.SINGLE_MAPPING_NAME + "\": {} }"; + try (ReplicationGroup shards = createGroup(0, settings, indexMapping, new NRTReplicationEngineFactory(), remoteDir)) { // Step1 - Start primary, index docs, flush, index more docs, check translog in primary as expected shards.startPrimary(); @@ -123,7 +146,7 @@ public void testNoTranslogHistoryTransferred() throws Exception { assertEquals(numDocs + moreDocs, getTranslog(primary).totalOperations()); // Step 2 - Start replica, recovery happens, check docs recovered till last flush - final IndexShard replica = shards.addReplica(); + final IndexShard replica = shards.addReplica(remoteDir); shards.startAll(); assertEquals(docIdAndSeqNosAfterFlush, getDocIdAndSeqNos(replica)); assertDocCount(replica, numDocs); diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java index 9107606326150..12b7341349442 100644 --- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java @@ -16,7 +16,6 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.admin.indices.forcemerge.ForceMergeRequest; -import org.opensearch.action.delete.DeleteRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.metadata.IndexMetadata; @@ -31,9 +30,7 @@ import org.opensearch.common.xcontent.XContentType; import org.opensearch.common.lease.Releasable; import org.opensearch.index.IndexSettings; -import org.opensearch.index.engine.DocIdSeqNoAndSource; import org.opensearch.index.engine.Engine; -import org.opensearch.index.engine.InternalEngine; import org.opensearch.index.engine.InternalEngineFactory; import org.opensearch.index.engine.NRTReplicationEngine; import org.opensearch.index.engine.NRTReplicationEngineFactory; @@ -44,7 +41,6 @@ import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.index.translog.SnapshotMatchers; import org.opensearch.index.translog.Translog; -import org.opensearch.indices.IndicesService; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryTarget; import org.opensearch.indices.replication.CheckpointInfoResponse; @@ -67,22 +63,15 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Consumer; import java.util.function.Function; -import java.util.stream.Collectors; -import java.util.stream.Stream; -import static java.util.Arrays.asList; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.instanceOf; import static org.mockito.Mockito.any; @@ -100,6 +89,41 @@ public class SegmentReplicationIndexShardTests extends OpenSearchIndexLevelRepli .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .build(); + protected ReplicationGroup getReplicationGroup(int numberOfReplicas) throws IOException { + return createGroup(numberOfReplicas, getIndexSettings(), indexMapping, new NRTReplicationEngineFactory()); + } + + protected ReplicationGroup getReplicationGroup(int numberOfReplicas, String indexMapping) throws IOException { + return createGroup(numberOfReplicas, getIndexSettings(), indexMapping, new NRTReplicationEngineFactory()); + } + + protected Settings getIndexSettings() { + return settings; + } + + /** + * Validates happy path of segment replication where primary index docs which are replicated to replica shards. Assertions + * made on doc count on both primary and replica. + * @throws Exception + */ + public void testReplication() throws Exception { + try (ReplicationGroup shards = createGroup(1, getIndexSettings(), indexMapping, new NRTReplicationEngineFactory());) { + shards.startAll(); + final IndexShard primaryShard = shards.getPrimary(); + final IndexShard replicaShard = shards.getReplicas().get(0); + + // index and replicate segments to replica. + int numDocs = randomIntBetween(10, 20); + shards.indexDocs(numDocs); + primaryShard.refresh("test"); + flushShard(primaryShard); + replicateSegments(primaryShard, List.of(replicaShard)); + + // Assertions + shards.assertAllEqual(numDocs); + } + } + /** * Test that latestReplicationCheckpoint returns null only for docrep enabled indices */ @@ -114,7 +138,7 @@ public void testReplicationCheckpointNullForDocRep() throws IOException { * Test that latestReplicationCheckpoint returns ReplicationCheckpoint for segrep enabled indices */ public void testReplicationCheckpointNotNullForSegRep() throws IOException { - final IndexShard indexShard = newStartedShard(randomBoolean(), settings, new NRTReplicationEngineFactory()); + final IndexShard indexShard = newStartedShard(randomBoolean(), getIndexSettings(), new NRTReplicationEngineFactory()); final ReplicationCheckpoint replicationCheckpoint = indexShard.getLatestReplicationCheckpoint(); assertNotNull(replicationCheckpoint); closeShards(indexShard); @@ -127,7 +151,7 @@ public void testNRTReplicasDoNotAcceptRefreshListeners() throws IOException { } public void testSegmentInfosAndReplicationCheckpointTuple() throws Exception { - try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { + try (ReplicationGroup shards = createGroup(1, getIndexSettings(), indexMapping, new NRTReplicationEngineFactory())) { shards.startAll(); final IndexShard primary = shards.getPrimary(); final IndexShard replica = shards.getReplicas().get(0); @@ -149,7 +173,7 @@ public void testSegmentInfosAndReplicationCheckpointTuple() throws Exception { assertEquals(1, primary.getLatestReplicationCheckpoint().compareTo(replica.getLatestReplicationCheckpoint())); // index and copy segments to replica. - int numDocs = randomIntBetween(10, 100); + int numDocs = randomIntBetween(10, 20); shards.indexDocs(numDocs); primary.refresh("test"); replicateSegments(primary, List.of(replica)); @@ -172,6 +196,51 @@ public void testSegmentInfosAndReplicationCheckpointTuple() throws Exception { } } + public void testPrimaryRelocationWithSegRepFailure() throws Exception { + final IndexShard primarySource = newStartedShard(true, getIndexSettings()); + int totalOps = randomInt(10); + for (int i = 0; i < totalOps; i++) { + indexDoc(primarySource, "_doc", Integer.toString(i)); + } + IndexShardTestCase.updateRoutingEntry(primarySource, primarySource.routingEntry().relocate(randomAlphaOfLength(10), -1)); + final IndexShard primaryTarget = newShard( + primarySource.routingEntry().getTargetRelocatingShard(), + getIndexSettings(), + new NRTReplicationEngineFactory() + ); + updateMappings(primaryTarget, primarySource.indexSettings().getIndexMetadata()); + + Function, List> replicatePrimaryFunction = (shardList) -> { + try { + throw new IOException("Expected failure"); + } catch (IOException e) { + throw new RuntimeException(e); + } + }; + Exception e = expectThrows( + Exception.class, + () -> recoverReplica( + primaryTarget, + primarySource, + (primary, sourceNode) -> new RecoveryTarget(primary, sourceNode, new ReplicationListener() { + @Override + public void onDone(ReplicationState state) { + throw new AssertionError("recovery must fail"); + } + + @Override + public void onFailure(ReplicationState state, ReplicationFailedException e, boolean sendShardFailure) { + assertEquals(ExceptionsHelper.unwrap(e, IOException.class).getMessage(), "Expected failure"); + } + }), + true, + true, + replicatePrimaryFunction + ) + ); + closeShards(primarySource, primaryTarget); + } + private void assertReplicationCheckpoint(IndexShard shard, SegmentInfos segmentInfos, ReplicationCheckpoint checkpoint) throws IOException { assertNotNull(segmentInfos); @@ -180,7 +249,7 @@ private void assertReplicationCheckpoint(IndexShard shard, SegmentInfos segmentI } public void testIsSegmentReplicationAllowed_WrongEngineType() throws IOException { - final IndexShard indexShard = newShard(false, settings, new InternalEngineFactory()); + final IndexShard indexShard = newShard(false, getIndexSettings(), new InternalEngineFactory()); assertFalse(indexShard.isSegmentReplicationAllowed()); closeShards(indexShard); } @@ -193,13 +262,13 @@ public void testIsSegmentReplicationAllowed_WrongEngineType() throws IOException */ public void testSegmentReplication_With_ReaderClosedConcurrently() throws Exception { String mappings = "{ \"" + MapperService.SINGLE_MAPPING_NAME + "\": { \"properties\": { \"foo\": { \"type\": \"keyword\"} }}}"; - try (ReplicationGroup shards = createGroup(1, settings, mappings, new NRTReplicationEngineFactory())) { + try (ReplicationGroup shards = createGroup(1, getIndexSettings(), mappings, new NRTReplicationEngineFactory())) { shards.startAll(); IndexShard primaryShard = shards.getPrimary(); final IndexShard replicaShard = shards.getReplicas().get(0); // Step 1. Ingest numDocs documents & replicate to replica shard - final int numDocs = randomIntBetween(100, 200); + final int numDocs = randomIntBetween(10, 20); logger.info("--> Inserting documents {}", numDocs); for (int i = 0; i < numDocs; i++) { shards.index(new IndexRequest(index.getName()).id(String.valueOf(i)).source("{\"foo\": \"bar\"}", XContentType.JSON)); @@ -242,13 +311,13 @@ public void testSegmentReplication_With_ReaderClosedConcurrently() throws Except */ public void testSegmentReplication_With_EngineClosedConcurrently() throws Exception { String mappings = "{ \"" + MapperService.SINGLE_MAPPING_NAME + "\": { \"properties\": { \"foo\": { \"type\": \"keyword\"} }}}"; - try (ReplicationGroup shards = createGroup(1, settings, mappings, new NRTReplicationEngineFactory())) { + try (ReplicationGroup shards = createGroup(1, getIndexSettings(), mappings, new NRTReplicationEngineFactory())) { shards.startAll(); IndexShard primaryShard = shards.getPrimary(); final IndexShard replicaShard = shards.getReplicas().get(0); // Step 1. Ingest numDocs documents - final int numDocs = randomIntBetween(100, 200); + final int numDocs = randomIntBetween(10, 20); logger.info("--> Inserting documents {}", numDocs); for (int i = 0; i < numDocs; i++) { shards.index(new IndexRequest(index.getName()).id(String.valueOf(i)).source("{\"foo\": \"bar\"}", XContentType.JSON)); @@ -284,137 +353,9 @@ public void testSegmentReplication_With_EngineClosedConcurrently() throws Except } } - /** - * Verifies that commits on replica engine resulting from engine or reader close does not cleanup the temporary - * replication files from ongoing round of segment replication - */ - public void testTemporaryFilesNotCleanup() throws Exception { - String mappings = "{ \"" + MapperService.SINGLE_MAPPING_NAME + "\": { \"properties\": { \"foo\": { \"type\": \"keyword\"} }}}"; - try (ReplicationGroup shards = createGroup(1, settings, mappings, new NRTReplicationEngineFactory())) { - shards.startAll(); - IndexShard primaryShard = shards.getPrimary(); - final IndexShard replica = shards.getReplicas().get(0); - - // Step 1. Ingest numDocs documents, commit to create commit point on primary & replicate - final int numDocs = randomIntBetween(100, 200); - logger.info("--> Inserting documents {}", numDocs); - for (int i = 0; i < numDocs; i++) { - shards.index(new IndexRequest(index.getName()).id(String.valueOf(i)).source("{\"foo\": \"bar\"}", XContentType.JSON)); - } - assertEqualTranslogOperations(shards, primaryShard); - primaryShard.flush(new FlushRequest().waitIfOngoing(true).force(true)); - replicateSegments(primaryShard, shards.getReplicas()); - shards.assertAllEqual(numDocs); - - // Step 2. Ingest numDocs documents again to create a new commit on primary - logger.info("--> Ingest {} docs again", numDocs); - for (int i = 0; i < numDocs; i++) { - shards.index(new IndexRequest(index.getName()).id(String.valueOf(i)).source("{\"foo\": \"bar\"}", XContentType.JSON)); - } - assertEqualTranslogOperations(shards, primaryShard); - primaryShard.flush(new FlushRequest().waitIfOngoing(true).force(true)); - - // Step 3. Copy segment files to replica shard but prevent commit - final CountDownLatch countDownLatch = new CountDownLatch(1); - Map primaryMetadata; - try (final GatedCloseable segmentInfosSnapshot = primaryShard.getSegmentInfosSnapshot()) { - final SegmentInfos primarySegmentInfos = segmentInfosSnapshot.get(); - primaryMetadata = primaryShard.store().getSegmentMetadataMap(primarySegmentInfos); - } - final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); - final IndicesService indicesService = mock(IndicesService.class); - when(indicesService.getShardOrNull(replica.shardId)).thenReturn(replica); - final SegmentReplicationTargetService targetService = new SegmentReplicationTargetService( - threadPool, - new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), - mock(TransportService.class), - sourceFactory, - indicesService, - clusterService - ); - final Consumer runnablePostGetFiles = (indexShard) -> { - try { - Collection temporaryFiles = Stream.of(indexShard.store().directory().listAll()) - .filter(name -> name.startsWith(SegmentReplicationTarget.REPLICATION_PREFIX)) - .collect(Collectors.toList()); - - // Step 4. Perform a commit on replica shard. - NRTReplicationEngine engine = (NRTReplicationEngine) indexShard.getEngine(); - engine.updateSegments(engine.getSegmentInfosSnapshot().get()); - - // Step 5. Validate temporary files are not deleted from store. - Collection replicaStoreFiles = List.of(indexShard.store().directory().listAll()); - assertTrue(replicaStoreFiles.containsAll(temporaryFiles)); - } catch (IOException e) { - throw new RuntimeException(e); - } - }; - SegmentReplicationSource segmentReplicationSource = getSegmentReplicationSource( - primaryShard, - (repId) -> targetService.get(repId), - runnablePostGetFiles - ); - when(sourceFactory.get(any())).thenReturn(segmentReplicationSource); - targetService.startReplication(replica, getTargetListener(primaryShard, replica, primaryMetadata, countDownLatch)); - countDownLatch.await(30, TimeUnit.SECONDS); - assertEquals("Replication failed", 0, countDownLatch.getCount()); - shards.assertAllEqual(numDocs); - } - } - - public void testSegmentReplication_Index_Update_Delete() throws Exception { - String mappings = "{ \"" + MapperService.SINGLE_MAPPING_NAME + "\": { \"properties\": { \"foo\": { \"type\": \"keyword\"} }}}"; - try (ReplicationGroup shards = createGroup(2, settings, mappings, new NRTReplicationEngineFactory())) { - shards.startAll(); - final IndexShard primaryShard = shards.getPrimary(); - - final int numDocs = randomIntBetween(100, 200); - for (int i = 0; i < numDocs; i++) { - shards.index(new IndexRequest(index.getName()).id(String.valueOf(i)).source("{\"foo\": \"bar\"}", XContentType.JSON)); - } - - assertEqualTranslogOperations(shards, primaryShard); - primaryShard.refresh("Test"); - replicateSegments(primaryShard, shards.getReplicas()); - - shards.assertAllEqual(numDocs); - - for (int i = 0; i < numDocs; i++) { - // randomly update docs. - if (randomBoolean()) { - shards.index( - new IndexRequest(index.getName()).id(String.valueOf(i)).source("{ \"foo\" : \"baz\" }", XContentType.JSON) - ); - } - } - assertEqualTranslogOperations(shards, primaryShard); - primaryShard.refresh("Test"); - replicateSegments(primaryShard, shards.getReplicas()); - shards.assertAllEqual(numDocs); - - final List docs = getDocIdAndSeqNos(primaryShard); - for (IndexShard shard : shards.getReplicas()) { - assertEquals(getDocIdAndSeqNos(shard), docs); - } - for (int i = 0; i < numDocs; i++) { - // randomly delete. - if (randomBoolean()) { - shards.delete(new DeleteRequest(index.getName()).id(String.valueOf(i))); - } - } - assertEqualTranslogOperations(shards, primaryShard); - primaryShard.refresh("Test"); - replicateSegments(primaryShard, shards.getReplicas()); - final List docsAfterDelete = getDocIdAndSeqNos(primaryShard); - for (IndexShard shard : shards.getReplicas()) { - assertEquals(getDocIdAndSeqNos(shard), docsAfterDelete); - } - } - } - public void testIgnoreShardIdle() throws Exception { Settings updatedSettings = Settings.builder() - .put(settings) + .put(getIndexSettings()) .put(IndexSettings.INDEX_SEARCH_IDLE_AFTER.getKey(), TimeValue.ZERO) .build(); try (ReplicationGroup shards = createGroup(1, updatedSettings, new NRTReplicationEngineFactory())) { @@ -464,7 +405,7 @@ public void testShardIdle_Docrep() throws Exception { public void testShardIdleWithNoReplicas() throws Exception { Settings updatedSettings = Settings.builder() - .put(settings) + .put(getIndexSettings()) .put(IndexSettings.INDEX_SEARCH_IDLE_AFTER.getKey(), TimeValue.ZERO) .build(); try (ReplicationGroup shards = createGroup(0, updatedSettings, new NRTReplicationEngineFactory())) { @@ -544,132 +485,10 @@ public void testRejectCheckpointOnShardRoutingPrimary() throws IOException { closeShards(primaryShard); } - public void testReplicaReceivesGenIncrease() throws Exception { - try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { - shards.startAll(); - final IndexShard primary = shards.getPrimary(); - final IndexShard replica = shards.getReplicas().get(0); - final int numDocs = randomIntBetween(10, 100); - shards.indexDocs(numDocs); - assertEquals(numDocs, primary.translogStats().estimatedNumberOfOperations()); - assertEquals(numDocs, replica.translogStats().estimatedNumberOfOperations()); - assertEquals(numDocs, primary.translogStats().getUncommittedOperations()); - assertEquals(numDocs, replica.translogStats().getUncommittedOperations()); - flushShard(primary, true); - replicateSegments(primary, shards.getReplicas()); - assertEquals(0, primary.translogStats().estimatedNumberOfOperations()); - assertEquals(0, replica.translogStats().estimatedNumberOfOperations()); - assertEquals(0, primary.translogStats().getUncommittedOperations()); - assertEquals(0, replica.translogStats().getUncommittedOperations()); - - final int additionalDocs = shards.indexDocs(randomIntBetween(numDocs + 1, numDocs + 10)); - - final int totalDocs = numDocs + additionalDocs; - primary.refresh("test"); - replicateSegments(primary, shards.getReplicas()); - assertEquals(additionalDocs, primary.translogStats().estimatedNumberOfOperations()); - assertEquals(additionalDocs, replica.translogStats().estimatedNumberOfOperations()); - assertEquals(additionalDocs, primary.translogStats().getUncommittedOperations()); - assertEquals(additionalDocs, replica.translogStats().getUncommittedOperations()); - flushShard(primary, true); - replicateSegments(primary, shards.getReplicas()); - - assertEqualCommittedSegments(primary, replica); - assertDocCount(primary, totalDocs); - assertDocCount(replica, totalDocs); - assertEquals(0, primary.translogStats().estimatedNumberOfOperations()); - assertEquals(0, replica.translogStats().estimatedNumberOfOperations()); - assertEquals(0, primary.translogStats().getUncommittedOperations()); - assertEquals(0, replica.translogStats().getUncommittedOperations()); - } - } - - public void testPrimaryRelocation() throws Exception { - final IndexShard primarySource = newStartedShard(true, settings); - int totalOps = randomInt(10); - for (int i = 0; i < totalOps; i++) { - indexDoc(primarySource, "_doc", Integer.toString(i)); - } - IndexShardTestCase.updateRoutingEntry(primarySource, primarySource.routingEntry().relocate(randomAlphaOfLength(10), -1)); - final IndexShard primaryTarget = newShard( - primarySource.routingEntry().getTargetRelocatingShard(), - settings, - new NRTReplicationEngineFactory() - ); - updateMappings(primaryTarget, primarySource.indexSettings().getIndexMetadata()); - - Function, List> replicatePrimaryFunction = (shardList) -> { - try { - assert shardList.size() >= 2; - final IndexShard primary = shardList.get(0); - return replicateSegments(primary, shardList.subList(1, shardList.size())); - } catch (IOException | InterruptedException e) { - throw new RuntimeException(e); - } - }; - recoverReplica(primaryTarget, primarySource, true, replicatePrimaryFunction); - - // check that local checkpoint of new primary is properly tracked after primary relocation - assertThat(primaryTarget.getLocalCheckpoint(), equalTo(totalOps - 1L)); - assertThat( - primaryTarget.getReplicationTracker() - .getTrackedLocalCheckpointForShard(primaryTarget.routingEntry().allocationId().getId()) - .getLocalCheckpoint(), - equalTo(totalOps - 1L) - ); - assertDocCount(primaryTarget, totalOps); - closeShards(primarySource, primaryTarget); - } - - public void testPrimaryRelocationWithSegRepFailure() throws Exception { - final IndexShard primarySource = newStartedShard(true, settings); - int totalOps = randomInt(10); - for (int i = 0; i < totalOps; i++) { - indexDoc(primarySource, "_doc", Integer.toString(i)); - } - IndexShardTestCase.updateRoutingEntry(primarySource, primarySource.routingEntry().relocate(randomAlphaOfLength(10), -1)); - final IndexShard primaryTarget = newShard( - primarySource.routingEntry().getTargetRelocatingShard(), - settings, - new NRTReplicationEngineFactory() - ); - updateMappings(primaryTarget, primarySource.indexSettings().getIndexMetadata()); - - Function, List> replicatePrimaryFunction = (shardList) -> { - try { - throw new IOException("Expected failure"); - } catch (IOException e) { - throw new RuntimeException(e); - } - }; - Exception e = expectThrows( - Exception.class, - () -> recoverReplica( - primaryTarget, - primarySource, - (primary, sourceNode) -> new RecoveryTarget(primary, sourceNode, new ReplicationListener() { - @Override - public void onDone(ReplicationState state) { - throw new AssertionError("recovery must fail"); - } - - @Override - public void onFailure(ReplicationState state, ReplicationFailedException e, boolean sendShardFailure) { - assertEquals(ExceptionsHelper.unwrap(e, IOException.class).getMessage(), "Expected failure"); - } - }), - true, - true, - replicatePrimaryFunction - ) - ); - closeShards(primarySource, primaryTarget); - } - // Todo: Remove this test when there is a better mechanism to test a functionality passing in different replication // strategy. public void testLockingBeforeAndAfterRelocated() throws Exception { - final IndexShard shard = newStartedShard(true, settings); + final IndexShard shard = newStartedShard(true, getIndexSettings()); final ShardRouting routing = ShardRoutingHelper.relocate(shard.routingEntry(), "other_node"); IndexShardTestCase.updateRoutingEntry(shard, routing); CountDownLatch latch = new CountDownLatch(1); @@ -702,7 +521,7 @@ public void testLockingBeforeAndAfterRelocated() throws Exception { // Todo: Remove this test when there is a better mechanism to test a functionality passing in different replication // strategy. public void testDelayedOperationsBeforeAndAfterRelocated() throws Exception { - final IndexShard shard = newStartedShard(true, settings); + final IndexShard shard = newStartedShard(true, getIndexSettings()); final ShardRouting routing = ShardRoutingHelper.relocate(shard.routingEntry(), "other_node"); IndexShardTestCase.updateRoutingEntry(shard, routing); final CountDownLatch startRecovery = new CountDownLatch(1); @@ -776,428 +595,8 @@ public void onFailure(Exception e) { closeShards(shard); } - public void testReplicaReceivesLowerGeneration() throws Exception { - // when a replica gets incoming segments that are lower than what it currently has on disk. - - // start 3 nodes Gens: P [2], R [2], R[2] - // index some docs and flush twice, push to only 1 replica. - // State Gens: P [4], R-1 [3], R-2 [2] - // Promote R-2 as the new primary and demote the old primary. - // State Gens: R[4], R-1 [3], P [4] - *commit on close of NRTEngine, xlog replayed and commit made. - // index docs on new primary and flush - // replicate to all. - // Expected result: State Gens: P[4], R-1 [4], R-2 [4] - try (ReplicationGroup shards = createGroup(2, settings, new NRTReplicationEngineFactory())) { - shards.startAll(); - final IndexShard primary = shards.getPrimary(); - final IndexShard replica_1 = shards.getReplicas().get(0); - final IndexShard replica_2 = shards.getReplicas().get(1); - int numDocs = randomIntBetween(10, 100); - shards.indexDocs(numDocs); - flushShard(primary, false); - replicateSegments(primary, List.of(replica_1)); - numDocs = randomIntBetween(numDocs + 1, numDocs + 10); - shards.indexDocs(numDocs); - flushShard(primary, false); - replicateSegments(primary, List.of(replica_1)); - - assertEqualCommittedSegments(primary, replica_1); - - shards.promoteReplicaToPrimary(replica_2).get(); - primary.close("demoted", false, false); - primary.store().close(); - IndexShard oldPrimary = shards.addReplicaWithExistingPath(primary.shardPath(), primary.routingEntry().currentNodeId()); - shards.recoverReplica(oldPrimary); - - numDocs = randomIntBetween(numDocs + 1, numDocs + 10); - shards.indexDocs(numDocs); - flushShard(replica_2, false); - replicateSegments(replica_2, shards.getReplicas()); - assertEqualCommittedSegments(replica_2, oldPrimary, replica_1); - } - } - - public void testReplicaRestarts() throws Exception { - try (ReplicationGroup shards = createGroup(3, settings, new NRTReplicationEngineFactory())) { - shards.startAll(); - IndexShard primary = shards.getPrimary(); - // 1. Create ops that are in the index and xlog of both shards but not yet part of a commit point. - final int numDocs = shards.indexDocs(randomInt(10)); - - // refresh and copy the segments over. - if (randomBoolean()) { - flushShard(primary); - } - primary.refresh("Test"); - replicateSegments(primary, shards.getReplicas()); - - // at this point both shards should have numDocs persisted and searchable. - assertDocCounts(primary, numDocs, numDocs); - for (IndexShard shard : shards.getReplicas()) { - assertDocCounts(shard, numDocs, numDocs); - } - - final int i1 = randomInt(5); - for (int i = 0; i < i1; i++) { - shards.indexDocs(randomInt(10)); - - // randomly resetart a replica - final IndexShard replicaToRestart = getRandomReplica(shards); - replicaToRestart.close("restart", false, false); - replicaToRestart.store().close(); - shards.removeReplica(replicaToRestart); - final IndexShard newReplica = shards.addReplicaWithExistingPath( - replicaToRestart.shardPath(), - replicaToRestart.routingEntry().currentNodeId() - ); - shards.recoverReplica(newReplica); - - // refresh and push segments to our other replicas. - if (randomBoolean()) { - failAndPromoteRandomReplica(shards); - } - flushShard(shards.getPrimary()); - replicateSegments(shards.getPrimary(), shards.getReplicas()); - } - primary = shards.getPrimary(); - - // refresh and push segments to our other replica. - flushShard(primary); - replicateSegments(primary, shards.getReplicas()); - - for (IndexShard shard : shards) { - assertConsistentHistoryBetweenTranslogAndLucene(shard); - } - final List docsAfterReplication = getDocIdAndSeqNos(shards.getPrimary()); - for (IndexShard shard : shards.getReplicas()) { - assertThat(shard.routingEntry().toString(), getDocIdAndSeqNos(shard), equalTo(docsAfterReplication)); - } - } - } - - public void testNRTReplicaWithRemoteStorePromotedAsPrimaryRefreshRefresh() throws Exception { - testNRTReplicaWithRemoteStorePromotedAsPrimary(false, false); - } - - public void testNRTReplicaWithRemoteStorePromotedAsPrimaryRefreshCommit() throws Exception { - testNRTReplicaWithRemoteStorePromotedAsPrimary(false, true); - } - - public void testNRTReplicaWithRemoteStorePromotedAsPrimaryCommitRefresh() throws Exception { - testNRTReplicaWithRemoteStorePromotedAsPrimary(true, false); - } - - public void testNRTReplicaWithRemoteStorePromotedAsPrimaryCommitCommit() throws Exception { - testNRTReplicaWithRemoteStorePromotedAsPrimary(true, true); - } - - private void testNRTReplicaWithRemoteStorePromotedAsPrimary(boolean performFlushFirst, boolean performFlushSecond) throws Exception { - Settings settings = Settings.builder() - .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) - .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) - .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, "temp-fs") - .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, "temp-fs") - .build(); - - try (ReplicationGroup shards = createGroup(1, settings, indexMapping, new NRTReplicationEngineFactory(), createTempDir())) { - shards.startAll(); - IndexShard oldPrimary = shards.getPrimary(); - final IndexShard nextPrimary = shards.getReplicas().get(0); - - // 1. Create ops that are in the index and xlog of both shards but not yet part of a commit point. - final int numDocs = shards.indexDocs(randomInt(10)); - - // refresh but do not copy the segments over. - if (performFlushFirst) { - flushShard(oldPrimary, true); - } else { - oldPrimary.refresh("Test"); - } - // replicateSegments(primary, shards.getReplicas()); - - // at this point both shards should have numDocs persisted and searchable. - assertDocCounts(oldPrimary, numDocs, numDocs); - for (IndexShard shard : shards.getReplicas()) { - assertDocCounts(shard, numDocs, 0); - } - - // 2. Create ops that are in the replica's xlog, not in the index. - // index some more into both but don't replicate. replica will have only numDocs searchable, but should have totalDocs - // persisted. - final int additonalDocs = shards.indexDocs(randomInt(10)); - final int totalDocs = numDocs + additonalDocs; - - if (performFlushSecond) { - flushShard(oldPrimary, true); - } else { - oldPrimary.refresh("Test"); - } - assertDocCounts(oldPrimary, totalDocs, totalDocs); - for (IndexShard shard : shards.getReplicas()) { - assertDocCounts(shard, totalDocs, 0); - } - assertTrue(nextPrimary.translogStats().estimatedNumberOfOperations() >= additonalDocs); - assertTrue(nextPrimary.translogStats().getUncommittedOperations() >= additonalDocs); - - int prevOperationCount = nextPrimary.translogStats().estimatedNumberOfOperations(); - - // promote the replica - shards.promoteReplicaToPrimary(nextPrimary).get(); - - // close oldPrimary. - oldPrimary.close("demoted", false, false); - oldPrimary.store().close(); - - assertEquals(InternalEngine.class, nextPrimary.getEngine().getClass()); - assertDocCounts(nextPrimary, totalDocs, totalDocs); - - // As we are downloading segments from remote segment store on failover, there should not be - // any operations replayed from translog - assertEquals(prevOperationCount, nextPrimary.translogStats().estimatedNumberOfOperations()); - - // refresh and push segments to our other replica. - nextPrimary.refresh("test"); - - for (IndexShard shard : shards) { - assertConsistentHistoryBetweenTranslogAndLucene(shard); - } - final List docsAfterRecovery = getDocIdAndSeqNos(shards.getPrimary()); - for (IndexShard shard : shards.getReplicas()) { - assertThat(shard.routingEntry().toString(), getDocIdAndSeqNos(shard), equalTo(docsAfterRecovery)); - } - } - } - - public void testNRTReplicaPromotedAsPrimary() throws Exception { - try (ReplicationGroup shards = createGroup(2, settings, new NRTReplicationEngineFactory())) { - shards.startAll(); - IndexShard oldPrimary = shards.getPrimary(); - final IndexShard nextPrimary = shards.getReplicas().get(0); - final IndexShard replica = shards.getReplicas().get(1); - - // 1. Create ops that are in the index and xlog of both shards but not yet part of a commit point. - final int numDocs = shards.indexDocs(randomInt(10)); - - // refresh and copy the segments over. - oldPrimary.refresh("Test"); - replicateSegments(oldPrimary, shards.getReplicas()); - - // at this point both shards should have numDocs persisted and searchable. - assertDocCounts(oldPrimary, numDocs, numDocs); - for (IndexShard shard : shards.getReplicas()) { - assertDocCounts(shard, numDocs, numDocs); - } - assertEqualTranslogOperations(shards, oldPrimary); - - // 2. Create ops that are in the replica's xlog, not in the index. - // index some more into both but don't replicate. replica will have only numDocs searchable, but should have totalDocs - // persisted. - final int additonalDocs = shards.indexDocs(randomInt(10)); - final int totalDocs = numDocs + additonalDocs; - - assertDocCounts(oldPrimary, totalDocs, totalDocs); - assertEqualTranslogOperations(shards, oldPrimary); - for (IndexShard shard : shards.getReplicas()) { - assertDocCounts(shard, totalDocs, numDocs); - } - assertEquals(totalDocs, oldPrimary.translogStats().estimatedNumberOfOperations()); - assertEquals(totalDocs, oldPrimary.translogStats().estimatedNumberOfOperations()); - assertEquals(totalDocs, nextPrimary.translogStats().estimatedNumberOfOperations()); - assertEquals(totalDocs, replica.translogStats().estimatedNumberOfOperations()); - assertEquals(totalDocs, nextPrimary.translogStats().getUncommittedOperations()); - assertEquals(totalDocs, replica.translogStats().getUncommittedOperations()); - - // promote the replica - shards.syncGlobalCheckpoint(); - shards.promoteReplicaToPrimary(nextPrimary); - - // close and start the oldPrimary as a replica. - oldPrimary.close("demoted", false, false); - oldPrimary.store().close(); - oldPrimary = shards.addReplicaWithExistingPath(oldPrimary.shardPath(), oldPrimary.routingEntry().currentNodeId()); - shards.recoverReplica(oldPrimary); - - assertEquals(NRTReplicationEngine.class, oldPrimary.getEngine().getClass()); - assertEquals(InternalEngine.class, nextPrimary.getEngine().getClass()); - assertDocCounts(nextPrimary, totalDocs, totalDocs); - assertEquals(0, nextPrimary.translogStats().estimatedNumberOfOperations()); - - // refresh and push segments to our other replica. - nextPrimary.refresh("test"); - replicateSegments(nextPrimary, asList(replica)); - - for (IndexShard shard : shards) { - assertConsistentHistoryBetweenTranslogAndLucene(shard); - } - final List docsAfterRecovery = getDocIdAndSeqNos(shards.getPrimary()); - for (IndexShard shard : shards.getReplicas()) { - assertThat(shard.routingEntry().toString(), getDocIdAndSeqNos(shard), equalTo(docsAfterRecovery)); - } - } - } - - public void testReplicaPromotedWhileReplicating() throws Exception { - try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { - shards.startAll(); - final IndexShard oldPrimary = shards.getPrimary(); - final IndexShard nextPrimary = shards.getReplicas().get(0); - - final int numDocs = shards.indexDocs(randomInt(10)); - oldPrimary.refresh("Test"); - shards.syncGlobalCheckpoint(); - - final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); - final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); - SegmentReplicationSource source = new TestReplicationSource() { - @Override - public void getCheckpointMetadata( - long replicationId, - ReplicationCheckpoint checkpoint, - ActionListener listener - ) { - resolveCheckpointInfoResponseListener(listener, oldPrimary); - ShardRouting oldRouting = nextPrimary.shardRouting; - try { - shards.promoteReplicaToPrimary(nextPrimary); - } catch (IOException e) { - Assert.fail("Promotion should not fail"); - } - targetService.shardRoutingChanged(nextPrimary, oldRouting, nextPrimary.shardRouting); - } - - @Override - public void getSegmentFiles( - long replicationId, - ReplicationCheckpoint checkpoint, - List filesToFetch, - IndexShard indexShard, - ActionListener listener - ) { - listener.onResponse(new GetSegmentFilesResponse(Collections.emptyList())); - } - }; - when(sourceFactory.get(any())).thenReturn(source); - startReplicationAndAssertCancellation(nextPrimary, targetService); - // wait for replica to finish being promoted, and assert doc counts. - final CountDownLatch latch = new CountDownLatch(1); - nextPrimary.acquirePrimaryOperationPermit(new ActionListener<>() { - @Override - public void onResponse(Releasable releasable) { - latch.countDown(); - } - - @Override - public void onFailure(Exception e) { - throw new AssertionError(e); - } - }, ThreadPool.Names.GENERIC, ""); - latch.await(); - assertEquals(nextPrimary.getEngine().getClass(), InternalEngine.class); - nextPrimary.refresh("test"); - - oldPrimary.close("demoted", false, false); - oldPrimary.store().close(); - IndexShard newReplica = shards.addReplicaWithExistingPath(oldPrimary.shardPath(), oldPrimary.routingEntry().currentNodeId()); - shards.recoverReplica(newReplica); - - assertDocCount(nextPrimary, numDocs); - assertDocCount(newReplica, numDocs); - - nextPrimary.refresh("test"); - replicateSegments(nextPrimary, shards.getReplicas()); - final List docsAfterRecovery = getDocIdAndSeqNos(shards.getPrimary()); - for (IndexShard shard : shards.getReplicas()) { - assertThat(shard.routingEntry().toString(), getDocIdAndSeqNos(shard), equalTo(docsAfterRecovery)); - } - } - } - - public void testReplicaClosesWhileReplicating_AfterGetCheckpoint() throws Exception { - try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { - shards.startAll(); - IndexShard primary = shards.getPrimary(); - final IndexShard replica = shards.getReplicas().get(0); - - final int numDocs = shards.indexDocs(randomInt(10)); - primary.refresh("Test"); - - final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); - final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); - SegmentReplicationSource source = new TestReplicationSource() { - @Override - public void getCheckpointMetadata( - long replicationId, - ReplicationCheckpoint checkpoint, - ActionListener listener - ) { - // trigger a cancellation by closing the replica. - targetService.beforeIndexShardClosed(replica.shardId, replica, Settings.EMPTY); - resolveCheckpointInfoResponseListener(listener, primary); - } - - @Override - public void getSegmentFiles( - long replicationId, - ReplicationCheckpoint checkpoint, - List filesToFetch, - IndexShard indexShard, - ActionListener listener - ) { - Assert.fail("Should not be reached"); - } - }; - when(sourceFactory.get(any())).thenReturn(source); - startReplicationAndAssertCancellation(replica, targetService); - - shards.removeReplica(replica); - closeShards(replica); - } - } - - public void testReplicaClosesWhileReplicating_AfterGetSegmentFiles() throws Exception { - try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { - shards.startAll(); - IndexShard primary = shards.getPrimary(); - final IndexShard replica = shards.getReplicas().get(0); - - final int numDocs = shards.indexDocs(randomInt(10)); - primary.refresh("Test"); - - final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); - final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); - SegmentReplicationSource source = new TestReplicationSource() { - @Override - public void getCheckpointMetadata( - long replicationId, - ReplicationCheckpoint checkpoint, - ActionListener listener - ) { - resolveCheckpointInfoResponseListener(listener, primary); - } - - @Override - public void getSegmentFiles( - long replicationId, - ReplicationCheckpoint checkpoint, - List filesToFetch, - IndexShard indexShard, - ActionListener listener - ) { - // randomly resolve the listener, indicating the source has resolved. - listener.onResponse(new GetSegmentFilesResponse(Collections.emptyList())); - targetService.beforeIndexShardClosed(replica.shardId, replica, Settings.EMPTY); - } - }; - when(sourceFactory.get(any())).thenReturn(source); - startReplicationAndAssertCancellation(replica, targetService); - - shards.removeReplica(replica); - closeShards(replica); - } - } - public void testCloseShardDuringFinalize() throws Exception { - try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { + try (ReplicationGroup shards = createGroup(1, getIndexSettings(), new NRTReplicationEngineFactory())) { shards.startAll(); IndexShard primary = shards.getPrimary(); final IndexShard replica = shards.getReplicas().get(0); @@ -1211,60 +610,8 @@ public void testCloseShardDuringFinalize() throws Exception { } } - public void testCloseShardWhileGettingCheckpoint() throws Exception { - try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { - shards.startAll(); - IndexShard primary = shards.getPrimary(); - final IndexShard replica = shards.getReplicas().get(0); - - primary.refresh("Test"); - - final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); - final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); - SegmentReplicationSource source = new TestReplicationSource() { - - ActionListener listener; - - @Override - public void getCheckpointMetadata( - long replicationId, - ReplicationCheckpoint checkpoint, - ActionListener listener - ) { - // set the listener, we will only fail it once we cancel the source. - this.listener = listener; - // shard is closing while we are copying files. - targetService.beforeIndexShardClosed(replica.shardId, replica, Settings.EMPTY); - } - - @Override - public void getSegmentFiles( - long replicationId, - ReplicationCheckpoint checkpoint, - List filesToFetch, - IndexShard indexShard, - ActionListener listener - ) { - Assert.fail("Unreachable"); - } - - @Override - public void cancel() { - // simulate listener resolving, but only after we have issued a cancel from beforeIndexShardClosed . - final RuntimeException exception = new CancellableThreads.ExecutionCancelledException("retryable action was cancelled"); - listener.onFailure(exception); - } - }; - when(sourceFactory.get(any())).thenReturn(source); - startReplicationAndAssertCancellation(replica, targetService); - - shards.removeReplica(replica); - closeShards(replica); - } - } - public void testBeforeIndexShardClosedWhileCopyingFiles() throws Exception { - try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { + try (ReplicationGroup shards = createGroup(1, getIndexSettings(), new NRTReplicationEngineFactory())) { shards.startAll(); IndexShard primary = shards.getPrimary(); final IndexShard replica = shards.getReplicas().get(0); @@ -1315,45 +662,7 @@ public void cancel() { } } - public void testPrimaryCancelsExecution() throws Exception { - try (ReplicationGroup shards = createGroup(1, settings, new NRTReplicationEngineFactory())) { - shards.startAll(); - IndexShard primary = shards.getPrimary(); - final IndexShard replica = shards.getReplicas().get(0); - - final int numDocs = shards.indexDocs(randomInt(10)); - primary.refresh("Test"); - - final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); - final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); - SegmentReplicationSource source = new TestReplicationSource() { - @Override - public void getCheckpointMetadata( - long replicationId, - ReplicationCheckpoint checkpoint, - ActionListener listener - ) { - listener.onFailure(new CancellableThreads.ExecutionCancelledException("Cancelled")); - } - - @Override - public void getSegmentFiles( - long replicationId, - ReplicationCheckpoint checkpoint, - List filesToFetch, - IndexShard indexShard, - ActionListener listener - ) {} - }; - when(sourceFactory.get(any())).thenReturn(source); - startReplicationAndAssertCancellation(replica, targetService); - - shards.removeReplica(replica); - closeShards(replica); - } - } - - private SegmentReplicationTargetService newTargetService(SegmentReplicationSourceFactory sourceFactory) { + protected SegmentReplicationTargetService newTargetService(SegmentReplicationSourceFactory sourceFactory) { return new SegmentReplicationTargetService( threadPool, new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), @@ -1368,14 +677,15 @@ private SegmentReplicationTargetService newTargetService(SegmentReplicationSourc * Assert persisted and searchable doc counts. This method should not be used while docs are concurrently indexed because * it asserts point in time seqNos are relative to the doc counts. */ - private void assertDocCounts(IndexShard indexShard, int expectedPersistedDocCount, int expectedSearchableDocCount) throws IOException { + protected void assertDocCounts(IndexShard indexShard, int expectedPersistedDocCount, int expectedSearchableDocCount) + throws IOException { assertDocCount(indexShard, expectedSearchableDocCount); // assigned seqNos start at 0, so assert max & local seqNos are 1 less than our persisted doc count. assertEquals(expectedPersistedDocCount - 1, indexShard.seqNoStats().getMaxSeqNo()); assertEquals(expectedPersistedDocCount - 1, indexShard.seqNoStats().getLocalCheckpoint()); } - private void resolveCheckpointInfoResponseListener(ActionListener listener, IndexShard primary) { + protected void resolveCheckpointInfoResponseListener(ActionListener listener, IndexShard primary) { try { final CopyState copyState = new CopyState( ReplicationCheckpoint.empty(primary.shardId, primary.getLatestReplicationCheckpoint().getCodec()), @@ -1390,7 +700,7 @@ private void resolveCheckpointInfoResponseListener(ActionListener operations = new ArrayList<>(); Translog.Operation op; diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationWithNodeToNodeIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationWithNodeToNodeIndexShardTests.java new file mode 100644 index 0000000000000..69846fbbe1dd4 --- /dev/null +++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationWithNodeToNodeIndexShardTests.java @@ -0,0 +1,697 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.shard; + +import org.apache.lucene.index.SegmentInfos; +import org.junit.Assert; +import org.opensearch.action.ActionListener; +import org.opensearch.action.admin.indices.flush.FlushRequest; +import org.opensearch.action.delete.DeleteRequest; +import org.opensearch.action.index.IndexRequest; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.concurrent.GatedCloseable; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.CancellableThreads; +import org.opensearch.common.xcontent.XContentType; +import org.opensearch.index.engine.DocIdSeqNoAndSource; +import org.opensearch.index.engine.InternalEngine; +import org.opensearch.index.engine.NRTReplicationEngine; +import org.opensearch.index.engine.NRTReplicationEngineFactory; +import org.opensearch.index.mapper.MapperService; +import org.opensearch.index.replication.TestReplicationSource; +import org.opensearch.index.store.StoreFileMetadata; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.indices.replication.CheckpointInfoResponse; +import org.opensearch.indices.replication.GetSegmentFilesResponse; +import org.opensearch.indices.replication.SegmentReplicationSource; +import org.opensearch.indices.replication.SegmentReplicationSourceFactory; +import org.opensearch.indices.replication.SegmentReplicationTarget; +import org.opensearch.indices.replication.SegmentReplicationTargetService; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static java.util.Arrays.asList; +import static org.hamcrest.Matchers.equalTo; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SegmentReplicationWithNodeToNodeIndexShardTests extends SegmentReplicationIndexShardTests { + + public void testReplicaClosesWhileReplicating_AfterGetSegmentFiles() throws Exception { + try (ReplicationGroup shards = createGroup(1, getIndexSettings(), new NRTReplicationEngineFactory())) { + shards.startAll(); + IndexShard primary = shards.getPrimary(); + final IndexShard replica = shards.getReplicas().get(0); + + final int numDocs = shards.indexDocs(randomInt(10)); + primary.refresh("Test"); + + final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); + final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); + SegmentReplicationSource source = new TestReplicationSource() { + @Override + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener listener + ) { + resolveCheckpointInfoResponseListener(listener, primary); + } + + @Override + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List filesToFetch, + IndexShard indexShard, + ActionListener listener + ) { + // randomly resolve the listener, indicating the source has resolved. + listener.onResponse(new GetSegmentFilesResponse(Collections.emptyList())); + targetService.beforeIndexShardClosed(replica.shardId, replica, Settings.EMPTY); + } + }; + when(sourceFactory.get(any())).thenReturn(source); + startReplicationAndAssertCancellation(replica, targetService); + + shards.removeReplica(replica); + closeShards(replica); + } + } + + public void testReplicaClosesWhileReplicating_AfterGetCheckpoint() throws Exception { + try (ReplicationGroup shards = createGroup(1, getIndexSettings(), new NRTReplicationEngineFactory())) { + shards.startAll(); + IndexShard primary = shards.getPrimary(); + final IndexShard replica = shards.getReplicas().get(0); + + final int numDocs = shards.indexDocs(randomInt(10)); + primary.refresh("Test"); + + final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); + final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); + SegmentReplicationSource source = new TestReplicationSource() { + @Override + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener listener + ) { + // trigger a cancellation by closing the replica. + targetService.beforeIndexShardClosed(replica.shardId, replica, Settings.EMPTY); + resolveCheckpointInfoResponseListener(listener, primary); + } + + @Override + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List filesToFetch, + IndexShard indexShard, + ActionListener listener + ) { + Assert.fail("Should not be reached"); + } + }; + when(sourceFactory.get(any())).thenReturn(source); + startReplicationAndAssertCancellation(replica, targetService); + + shards.removeReplica(replica); + closeShards(replica); + } + } + + public void testCloseShardWhileGettingCheckpoint() throws Exception { + try (ReplicationGroup shards = createGroup(1, getIndexSettings(), new NRTReplicationEngineFactory())) { + shards.startAll(); + IndexShard primary = shards.getPrimary(); + final IndexShard replica = shards.getReplicas().get(0); + + primary.refresh("Test"); + + final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); + final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); + SegmentReplicationSource source = new TestReplicationSource() { + + ActionListener listener; + + @Override + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener listener + ) { + // set the listener, we will only fail it once we cancel the source. + this.listener = listener; + // shard is closing while we are copying files. + targetService.beforeIndexShardClosed(replica.shardId, replica, Settings.EMPTY); + } + + @Override + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List filesToFetch, + IndexShard indexShard, + ActionListener listener + ) { + Assert.fail("Unreachable"); + } + + @Override + public void cancel() { + // simulate listener resolving, but only after we have issued a cancel from beforeIndexShardClosed . + final RuntimeException exception = new CancellableThreads.ExecutionCancelledException("retryable action was cancelled"); + listener.onFailure(exception); + } + }; + when(sourceFactory.get(any())).thenReturn(source); + startReplicationAndAssertCancellation(replica, targetService); + + shards.removeReplica(replica); + closeShards(replica); + } + } + + public void testPrimaryCancelsExecution() throws Exception { + try (ReplicationGroup shards = createGroup(1, getIndexSettings(), new NRTReplicationEngineFactory())) { + shards.startAll(); + IndexShard primary = shards.getPrimary(); + final IndexShard replica = shards.getReplicas().get(0); + + final int numDocs = shards.indexDocs(randomInt(10)); + primary.refresh("Test"); + + final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); + final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); + SegmentReplicationSource source = new TestReplicationSource() { + @Override + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener listener + ) { + listener.onFailure(new CancellableThreads.ExecutionCancelledException("Cancelled")); + } + + @Override + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List filesToFetch, + IndexShard indexShard, + ActionListener listener + ) {} + }; + when(sourceFactory.get(any())).thenReturn(source); + startReplicationAndAssertCancellation(replica, targetService); + + shards.removeReplica(replica); + closeShards(replica); + } + } + + public void testReplicaPromotedWhileReplicating() throws Exception { + try (ReplicationGroup shards = createGroup(1, getIndexSettings(), new NRTReplicationEngineFactory())) { + shards.startAll(); + final IndexShard oldPrimary = shards.getPrimary(); + final IndexShard nextPrimary = shards.getReplicas().get(0); + + final int numDocs = shards.indexDocs(randomInt(10)); + oldPrimary.refresh("Test"); + shards.syncGlobalCheckpoint(); + + final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); + final SegmentReplicationTargetService targetService = newTargetService(sourceFactory); + SegmentReplicationSource source = new TestReplicationSource() { + @Override + public void getCheckpointMetadata( + long replicationId, + ReplicationCheckpoint checkpoint, + ActionListener listener + ) { + resolveCheckpointInfoResponseListener(listener, oldPrimary); + ShardRouting oldRouting = nextPrimary.shardRouting; + try { + shards.promoteReplicaToPrimary(nextPrimary); + } catch (IOException e) { + Assert.fail("Promotion should not fail"); + } + targetService.shardRoutingChanged(nextPrimary, oldRouting, nextPrimary.shardRouting); + } + + @Override + public void getSegmentFiles( + long replicationId, + ReplicationCheckpoint checkpoint, + List filesToFetch, + IndexShard indexShard, + ActionListener listener + ) { + listener.onResponse(new GetSegmentFilesResponse(Collections.emptyList())); + } + }; + when(sourceFactory.get(any())).thenReturn(source); + startReplicationAndAssertCancellation(nextPrimary, targetService); + // wait for replica to finish being promoted, and assert doc counts. + final CountDownLatch latch = new CountDownLatch(1); + nextPrimary.acquirePrimaryOperationPermit(new ActionListener<>() { + @Override + public void onResponse(Releasable releasable) { + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + throw new AssertionError(e); + } + }, ThreadPool.Names.GENERIC, ""); + latch.await(); + assertEquals(nextPrimary.getEngine().getClass(), InternalEngine.class); + nextPrimary.refresh("test"); + + oldPrimary.close("demoted", false, false); + oldPrimary.store().close(); + IndexShard newReplica = shards.addReplicaWithExistingPath(oldPrimary.shardPath(), oldPrimary.routingEntry().currentNodeId()); + shards.recoverReplica(newReplica); + + assertDocCount(nextPrimary, numDocs); + assertDocCount(newReplica, numDocs); + + nextPrimary.refresh("test"); + replicateSegments(nextPrimary, shards.getReplicas()); + final List docsAfterRecovery = getDocIdAndSeqNos(shards.getPrimary()); + for (IndexShard shard : shards.getReplicas()) { + assertThat(shard.routingEntry().toString(), getDocIdAndSeqNos(shard), equalTo(docsAfterRecovery)); + } + } + } + + public void testReplicaReceivesGenIncrease() throws Exception { + try (ReplicationGroup shards = createGroup(1, getIndexSettings(), new NRTReplicationEngineFactory())) { + shards.startAll(); + final IndexShard primary = shards.getPrimary(); + final IndexShard replica = shards.getReplicas().get(0); + final int numDocs = randomIntBetween(10, 100); + shards.indexDocs(numDocs); + assertEquals(numDocs, primary.translogStats().estimatedNumberOfOperations()); + assertEquals(numDocs, replica.translogStats().estimatedNumberOfOperations()); + assertEquals(numDocs, primary.translogStats().getUncommittedOperations()); + assertEquals(numDocs, replica.translogStats().getUncommittedOperations()); + flushShard(primary, true); + replicateSegments(primary, shards.getReplicas()); + assertEquals(0, primary.translogStats().estimatedNumberOfOperations()); + assertEquals(0, replica.translogStats().estimatedNumberOfOperations()); + assertEquals(0, primary.translogStats().getUncommittedOperations()); + assertEquals(0, replica.translogStats().getUncommittedOperations()); + + final int additionalDocs = shards.indexDocs(randomIntBetween(numDocs + 1, numDocs + 10)); + + final int totalDocs = numDocs + additionalDocs; + primary.refresh("test"); + replicateSegments(primary, shards.getReplicas()); + assertEquals(additionalDocs, primary.translogStats().estimatedNumberOfOperations()); + assertEquals(additionalDocs, replica.translogStats().estimatedNumberOfOperations()); + assertEquals(additionalDocs, primary.translogStats().getUncommittedOperations()); + assertEquals(additionalDocs, replica.translogStats().getUncommittedOperations()); + flushShard(primary, true); + replicateSegments(primary, shards.getReplicas()); + + assertEqualCommittedSegments(primary, replica); + assertDocCount(primary, totalDocs); + assertDocCount(replica, totalDocs); + assertEquals(0, primary.translogStats().estimatedNumberOfOperations()); + assertEquals(0, replica.translogStats().estimatedNumberOfOperations()); + assertEquals(0, primary.translogStats().getUncommittedOperations()); + assertEquals(0, replica.translogStats().getUncommittedOperations()); + } + } + + /** + * Verifies that commits on replica engine resulting from engine or reader close does not cleanup the temporary + * replication files from ongoing round of segment replication + * @throws Exception + */ + public void testTemporaryFilesNotCleanup() throws Exception { + String mappings = "{ \"" + MapperService.SINGLE_MAPPING_NAME + "\": { \"properties\": { \"foo\": { \"type\": \"keyword\"} }}}"; + try (ReplicationGroup shards = createGroup(1, getIndexSettings(), mappings, new NRTReplicationEngineFactory())) { + shards.startAll(); + IndexShard primaryShard = shards.getPrimary(); + final IndexShard replica = shards.getReplicas().get(0); + + // Step 1. Ingest numDocs documents, commit to create commit point on primary & replicate + final int numDocs = randomIntBetween(100, 200); + logger.info("--> Inserting documents {}", numDocs); + for (int i = 0; i < numDocs; i++) { + shards.index(new IndexRequest(index.getName()).id(String.valueOf(i)).source("{\"foo\": \"bar\"}", XContentType.JSON)); + } + assertEqualTranslogOperations(shards, primaryShard); + primaryShard.flush(new FlushRequest().waitIfOngoing(true).force(true)); + replicateSegments(primaryShard, shards.getReplicas()); + shards.assertAllEqual(numDocs); + + // Step 2. Ingest numDocs documents again to create a new commit on primary + logger.info("--> Ingest {} docs again", numDocs); + for (int i = 0; i < numDocs; i++) { + shards.index(new IndexRequest(index.getName()).id(String.valueOf(i)).source("{\"foo\": \"bar\"}", XContentType.JSON)); + } + assertEqualTranslogOperations(shards, primaryShard); + primaryShard.flush(new FlushRequest().waitIfOngoing(true).force(true)); + + // Step 3. Copy segment files to replica shard but prevent commit + final CountDownLatch countDownLatch = new CountDownLatch(1); + Map primaryMetadata; + try (final GatedCloseable segmentInfosSnapshot = primaryShard.getSegmentInfosSnapshot()) { + final SegmentInfos primarySegmentInfos = segmentInfosSnapshot.get(); + primaryMetadata = primaryShard.store().getSegmentMetadataMap(primarySegmentInfos); + } + final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); + final IndicesService indicesService = mock(IndicesService.class); + when(indicesService.getShardOrNull(replica.shardId)).thenReturn(replica); + final SegmentReplicationTargetService targetService = new SegmentReplicationTargetService( + threadPool, + new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), + mock(TransportService.class), + sourceFactory, + indicesService, + clusterService + ); + final Consumer runnablePostGetFiles = (indexShard) -> { + try { + Collection temporaryFiles = Stream.of(indexShard.store().directory().listAll()) + .filter(name -> name.startsWith(SegmentReplicationTarget.REPLICATION_PREFIX)) + .collect(Collectors.toList()); + + // Step 4. Perform a commit on replica shard. + NRTReplicationEngine engine = (NRTReplicationEngine) indexShard.getEngine(); + engine.updateSegments(engine.getSegmentInfosSnapshot().get()); + + // Step 5. Validate temporary files are not deleted from store. + Collection replicaStoreFiles = List.of(indexShard.store().directory().listAll()); + assertTrue(replicaStoreFiles.containsAll(temporaryFiles)); + } catch (IOException e) { + throw new RuntimeException(e); + } + }; + SegmentReplicationSource segmentReplicationSource = getSegmentReplicationSource( + primaryShard, + (repId) -> targetService.get(repId), + runnablePostGetFiles + ); + when(sourceFactory.get(any())).thenReturn(segmentReplicationSource); + targetService.startReplication(replica, getTargetListener(primaryShard, replica, primaryMetadata, countDownLatch)); + countDownLatch.await(30, TimeUnit.SECONDS); + assertEquals("Replication failed", 0, countDownLatch.getCount()); + shards.assertAllEqual(numDocs); + } + } + + // Todo: Move this test to SegmentReplicationIndexShardTests so that it runs for both node-node & remote store + public void testReplicaReceivesLowerGeneration() throws Exception { + // when a replica gets incoming segments that are lower than what it currently has on disk. + + // start 3 nodes Gens: P [2], R [2], R[2] + // index some docs and flush twice, push to only 1 replica. + // State Gens: P [4], R-1 [3], R-2 [2] + // Promote R-2 as the new primary and demote the old primary. + // State Gens: R[4], R-1 [3], P [4] - *commit on close of NRTEngine, xlog replayed and commit made. + // index docs on new primary and flush + // replicate to all. + // Expected result: State Gens: P[4], R-1 [4], R-2 [4] + try (ReplicationGroup shards = createGroup(2, getIndexSettings(), new NRTReplicationEngineFactory())) { + shards.startAll(); + final IndexShard primary = shards.getPrimary(); + final IndexShard replica_1 = shards.getReplicas().get(0); + final IndexShard replica_2 = shards.getReplicas().get(1); + int numDocs = randomIntBetween(10, 100); + shards.indexDocs(numDocs); + flushShard(primary, false); + replicateSegments(primary, List.of(replica_1)); + numDocs = randomIntBetween(numDocs + 1, numDocs + 10); + shards.indexDocs(numDocs); + flushShard(primary, false); + replicateSegments(primary, List.of(replica_1)); + + assertEqualCommittedSegments(primary, replica_1); + + shards.promoteReplicaToPrimary(replica_2).get(); + primary.close("demoted", false, false); + primary.store().close(); + IndexShard oldPrimary = shards.addReplicaWithExistingPath(primary.shardPath(), primary.routingEntry().currentNodeId()); + shards.recoverReplica(oldPrimary); + + numDocs = randomIntBetween(numDocs + 1, numDocs + 10); + shards.indexDocs(numDocs); + flushShard(replica_2, false); + replicateSegments(replica_2, shards.getReplicas()); + assertEqualCommittedSegments(replica_2, oldPrimary, replica_1); + } + } + + // Todo: Move this test to SegmentReplicationIndexShardTests so that it runs for both node-node & remote store + public void testPrimaryRelocation() throws Exception { + final IndexShard primarySource = newStartedShard(true, getIndexSettings()); + int totalOps = randomInt(10); + for (int i = 0; i < totalOps; i++) { + indexDoc(primarySource, "_doc", Integer.toString(i)); + } + IndexShardTestCase.updateRoutingEntry(primarySource, primarySource.routingEntry().relocate(randomAlphaOfLength(10), -1)); + final IndexShard primaryTarget = newShard( + primarySource.routingEntry().getTargetRelocatingShard(), + getIndexSettings(), + new NRTReplicationEngineFactory() + ); + updateMappings(primaryTarget, primarySource.indexSettings().getIndexMetadata()); + + Function, List> replicatePrimaryFunction = (shardList) -> { + try { + assert shardList.size() >= 2; + final IndexShard primary = shardList.get(0); + return replicateSegments(primary, shardList.subList(1, shardList.size())); + } catch (IOException | InterruptedException e) { + throw new RuntimeException(e); + } + }; + recoverReplica(primaryTarget, primarySource, true, replicatePrimaryFunction); + + // check that local checkpoint of new primary is properly tracked after primary relocation + assertThat(primaryTarget.getLocalCheckpoint(), equalTo(totalOps - 1L)); + assertThat( + primaryTarget.getReplicationTracker() + .getTrackedLocalCheckpointForShard(primaryTarget.routingEntry().allocationId().getId()) + .getLocalCheckpoint(), + equalTo(totalOps - 1L) + ); + assertDocCount(primaryTarget, totalOps); + closeShards(primarySource, primaryTarget); + } + + // Todo: Move this test to SegmentReplicationIndexShardTests so that it runs for both node-node & remote store + public void testNRTReplicaPromotedAsPrimary() throws Exception { + try (ReplicationGroup shards = createGroup(2, getIndexSettings(), new NRTReplicationEngineFactory())) { + shards.startAll(); + IndexShard oldPrimary = shards.getPrimary(); + final IndexShard nextPrimary = shards.getReplicas().get(0); + final IndexShard replica = shards.getReplicas().get(1); + + // 1. Create ops that are in the index and xlog of both shards but not yet part of a commit point. + final int numDocs = shards.indexDocs(randomInt(10)); + + // refresh and copy the segments over. + oldPrimary.refresh("Test"); + replicateSegments(oldPrimary, shards.getReplicas()); + + // at this point both shards should have numDocs persisted and searchable. + assertDocCounts(oldPrimary, numDocs, numDocs); + for (IndexShard shard : shards.getReplicas()) { + assertDocCounts(shard, numDocs, numDocs); + } + assertEqualTranslogOperations(shards, oldPrimary); + + // 2. Create ops that are in the replica's xlog, not in the index. + // index some more into both but don't replicate. replica will have only numDocs searchable, but should have totalDocs + // persisted. + final int additonalDocs = shards.indexDocs(randomInt(10)); + final int totalDocs = numDocs + additonalDocs; + + assertDocCounts(oldPrimary, totalDocs, totalDocs); + assertEqualTranslogOperations(shards, oldPrimary); + for (IndexShard shard : shards.getReplicas()) { + assertDocCounts(shard, totalDocs, numDocs); + } + assertEquals(totalDocs, oldPrimary.translogStats().estimatedNumberOfOperations()); + assertEquals(totalDocs, oldPrimary.translogStats().estimatedNumberOfOperations()); + assertEquals(totalDocs, nextPrimary.translogStats().estimatedNumberOfOperations()); + assertEquals(totalDocs, replica.translogStats().estimatedNumberOfOperations()); + assertEquals(totalDocs, nextPrimary.translogStats().getUncommittedOperations()); + assertEquals(totalDocs, replica.translogStats().getUncommittedOperations()); + + // promote the replica + shards.syncGlobalCheckpoint(); + shards.promoteReplicaToPrimary(nextPrimary); + + // close and start the oldPrimary as a replica. + oldPrimary.close("demoted", false, false); + oldPrimary.store().close(); + oldPrimary = shards.addReplicaWithExistingPath(oldPrimary.shardPath(), oldPrimary.routingEntry().currentNodeId()); + shards.recoverReplica(oldPrimary); + + assertEquals(NRTReplicationEngine.class, oldPrimary.getEngine().getClass()); + assertEquals(InternalEngine.class, nextPrimary.getEngine().getClass()); + assertDocCounts(nextPrimary, totalDocs, totalDocs); + assertEquals(0, nextPrimary.translogStats().estimatedNumberOfOperations()); + + // refresh and push segments to our other replica. + nextPrimary.refresh("test"); + replicateSegments(nextPrimary, asList(replica)); + + for (IndexShard shard : shards) { + assertConsistentHistoryBetweenTranslogAndLucene(shard); + } + final List docsAfterRecovery = getDocIdAndSeqNos(shards.getPrimary()); + for (IndexShard shard : shards.getReplicas()) { + assertThat(shard.routingEntry().toString(), getDocIdAndSeqNos(shard), equalTo(docsAfterRecovery)); + } + } + } + + // Todo: Move this test to SegmentReplicationIndexShardTests so that it runs for both node-node & remote store + public void testReplicaRestarts() throws Exception { + try (ReplicationGroup shards = createGroup(3, getIndexSettings(), new NRTReplicationEngineFactory())) { + shards.startAll(); + IndexShard primary = shards.getPrimary(); + // 1. Create ops that are in the index and xlog of both shards but not yet part of a commit point. + final int numDocs = shards.indexDocs(randomInt(10)); + logger.info("--> Index {} documents on primary", numDocs); + + // refresh and copy the segments over. + if (randomBoolean()) { + flushShard(primary); + } + primary.refresh("Test"); + logger.info("--> Replicate segments"); + replicateSegments(primary, shards.getReplicas()); + + // at this point both shards should have numDocs persisted and searchable. + logger.info("--> Verify doc count"); + assertDocCounts(primary, numDocs, numDocs); + for (IndexShard shard : shards.getReplicas()) { + assertDocCounts(shard, numDocs, numDocs); + } + + final int i1 = randomInt(5); + logger.info("--> Index {} more docs", i1); + for (int i = 0; i < i1; i++) { + shards.indexDocs(randomInt(10)); + + // randomly restart a replica + final IndexShard replicaToRestart = getRandomReplica(shards); + logger.info("--> Restarting replica {}", replicaToRestart.shardId); + replicaToRestart.close("restart", false, false); + replicaToRestart.store().close(); + shards.removeReplica(replicaToRestart); + final IndexShard newReplica = shards.addReplicaWithExistingPath( + replicaToRestart.shardPath(), + replicaToRestart.routingEntry().currentNodeId() + ); + logger.info("--> Recover newReplica {}", newReplica.shardId); + shards.recoverReplica(newReplica); + + // refresh and push segments to our other replicas. + if (randomBoolean()) { + failAndPromoteRandomReplica(shards); + } + flushShard(shards.getPrimary()); + replicateSegments(shards.getPrimary(), shards.getReplicas()); + } + primary = shards.getPrimary(); + + // refresh and push segments to our other replica. + flushShard(primary); + replicateSegments(primary, shards.getReplicas()); + + for (IndexShard shard : shards) { + assertConsistentHistoryBetweenTranslogAndLucene(shard); + } + final List docsAfterReplication = getDocIdAndSeqNos(shards.getPrimary()); + for (IndexShard shard : shards.getReplicas()) { + assertThat(shard.routingEntry().toString(), getDocIdAndSeqNos(shard), equalTo(docsAfterReplication)); + } + } + } + + // Todo: Move this test to SegmentReplicationIndexShardTests so that it runs for both node-node & remote store + public void testSegmentReplication_Index_Update_Delete() throws Exception { + String mappings = "{ \"" + MapperService.SINGLE_MAPPING_NAME + "\": { \"properties\": { \"foo\": { \"type\": \"keyword\"} }}}"; + try (ReplicationGroup shards = createGroup(2, getIndexSettings(), mappings, new NRTReplicationEngineFactory())) { + shards.startAll(); + final IndexShard primaryShard = shards.getPrimary(); + + final int numDocs = randomIntBetween(100, 200); + for (int i = 0; i < numDocs; i++) { + shards.index(new IndexRequest(index.getName()).id(String.valueOf(i)).source("{\"foo\": \"bar\"}", XContentType.JSON)); + } + + assertEqualTranslogOperations(shards, primaryShard); + primaryShard.refresh("Test"); + replicateSegments(primaryShard, shards.getReplicas()); + + shards.assertAllEqual(numDocs); + + for (int i = 0; i < numDocs; i++) { + // randomly update docs. + if (randomBoolean()) { + shards.index( + new IndexRequest(index.getName()).id(String.valueOf(i)).source("{ \"foo\" : \"baz\" }", XContentType.JSON) + ); + } + } + assertEqualTranslogOperations(shards, primaryShard); + primaryShard.refresh("Test"); + replicateSegments(primaryShard, shards.getReplicas()); + shards.assertAllEqual(numDocs); + + final List docs = getDocIdAndSeqNos(primaryShard); + for (IndexShard shard : shards.getReplicas()) { + assertEquals(getDocIdAndSeqNos(shard), docs); + } + for (int i = 0; i < numDocs; i++) { + // randomly delete. + if (randomBoolean()) { + shards.delete(new DeleteRequest(index.getName()).id(String.valueOf(i))); + } + } + assertEqualTranslogOperations(shards, primaryShard); + primaryShard.refresh("Test"); + replicateSegments(primaryShard, shards.getReplicas()); + final List docsAfterDelete = getDocIdAndSeqNos(primaryShard); + for (IndexShard shard : shards.getReplicas()) { + assertEquals(getDocIdAndSeqNos(shard), docsAfterDelete); + } + } + } + +} diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationWithRemoteIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationWithRemoteIndexShardTests.java index a67b60d6128d1..b15d8b66fca55 100644 --- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationWithRemoteIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationWithRemoteIndexShardTests.java @@ -8,36 +8,131 @@ package org.opensearch.index.shard; +import org.junit.Before; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.index.engine.DocIdSeqNoAndSource; +import org.opensearch.index.engine.InternalEngine; import org.opensearch.index.engine.NRTReplicationEngineFactory; -import org.opensearch.index.replication.OpenSearchIndexLevelReplicationTestCase; import org.opensearch.indices.replication.common.ReplicationType; import java.io.IOException; +import java.util.List; -public class SegmentReplicationWithRemoteIndexShardTests extends OpenSearchIndexLevelReplicationTestCase { +import static org.hamcrest.Matchers.equalTo; + +public class SegmentReplicationWithRemoteIndexShardTests extends SegmentReplicationIndexShardTests { + + private static final String REPOSITORY_NAME = "temp-fs"; private static final Settings settings = Settings.builder() .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) - .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, "temp-fs") - .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, "temp-fs") + .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, REPOSITORY_NAME) + .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, REPOSITORY_NAME) .build(); - public void testReplicaSyncingFromRemoteStore() throws IOException { - ReplicationGroup shards = createGroup(1, settings, indexMapping, new NRTReplicationEngineFactory(), createTempDir()); - final IndexShard primaryShard = shards.getPrimary(); - final IndexShard replicaShard = shards.getReplicas().get(0); - shards.startPrimary(); - shards.startAll(); - indexDoc(primaryShard, "_doc", "1"); - indexDoc(primaryShard, "_doc", "2"); - primaryShard.refresh("test"); - assertDocs(primaryShard, "1", "2"); - flushShard(primaryShard); - - replicaShard.syncSegmentsFromRemoteSegmentStore(true, true, false); - assertDocs(replicaShard, "1", "2"); - closeShards(primaryShard, replicaShard); + @Before + public void setup() { + // Todo: Remove feature flag once remote store integration with segrep goes GA + FeatureFlags.initializeFeatureFlags( + Settings.builder().put(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL_SETTING.getKey(), "true").build() + ); + } + + protected Settings getIndexSettings() { + return settings; + } + + protected ReplicationGroup getReplicationGroup(int numberOfReplicas) throws IOException { + return createGroup(numberOfReplicas, settings, indexMapping, new NRTReplicationEngineFactory(), createTempDir()); + } + + public void testNRTReplicaWithRemoteStorePromotedAsPrimaryRefreshRefresh() throws Exception { + testNRTReplicaWithRemoteStorePromotedAsPrimary(false, false); + } + + public void testNRTReplicaWithRemoteStorePromotedAsPrimaryRefreshCommit() throws Exception { + testNRTReplicaWithRemoteStorePromotedAsPrimary(false, true); + } + + public void testNRTReplicaWithRemoteStorePromotedAsPrimaryCommitRefresh() throws Exception { + testNRTReplicaWithRemoteStorePromotedAsPrimary(true, false); + } + + public void testNRTReplicaWithRemoteStorePromotedAsPrimaryCommitCommit() throws Exception { + testNRTReplicaWithRemoteStorePromotedAsPrimary(true, true); + } + + public void testNRTReplicaWithRemoteStorePromotedAsPrimary(boolean performFlushFirst, boolean performFlushSecond) throws Exception { + try ( + ReplicationGroup shards = createGroup(1, getIndexSettings(), indexMapping, new NRTReplicationEngineFactory(), createTempDir()) + ) { + shards.startAll(); + IndexShard oldPrimary = shards.getPrimary(); + final IndexShard nextPrimary = shards.getReplicas().get(0); + + // 1. Create ops that are in the index and xlog of both shards but not yet part of a commit point. + final int numDocs = shards.indexDocs(randomInt(10)); + + // refresh but do not copy the segments over. + if (performFlushFirst) { + flushShard(oldPrimary, true); + } else { + oldPrimary.refresh("Test"); + } + // replicateSegments(primary, shards.getReplicas()); + + // at this point both shards should have numDocs persisted and searchable. + assertDocCounts(oldPrimary, numDocs, numDocs); + for (IndexShard shard : shards.getReplicas()) { + assertDocCounts(shard, numDocs, 0); + } + + // 2. Create ops that are in the replica's xlog, not in the index. + // index some more into both but don't replicate. replica will have only numDocs searchable, but should have totalDocs + // persisted. + final int additonalDocs = shards.indexDocs(randomInt(10)); + final int totalDocs = numDocs + additonalDocs; + + if (performFlushSecond) { + flushShard(oldPrimary, true); + } else { + oldPrimary.refresh("Test"); + } + assertDocCounts(oldPrimary, totalDocs, totalDocs); + for (IndexShard shard : shards.getReplicas()) { + assertDocCounts(shard, totalDocs, 0); + } + assertTrue(nextPrimary.translogStats().estimatedNumberOfOperations() >= additonalDocs); + assertTrue(nextPrimary.translogStats().getUncommittedOperations() >= additonalDocs); + + int prevOperationCount = nextPrimary.translogStats().estimatedNumberOfOperations(); + + // promote the replica + shards.promoteReplicaToPrimary(nextPrimary).get(); + + // close oldPrimary. + oldPrimary.close("demoted", false, false); + oldPrimary.store().close(); + + assertEquals(InternalEngine.class, nextPrimary.getEngine().getClass()); + assertDocCounts(nextPrimary, totalDocs, totalDocs); + + // As we are downloading segments from remote segment store on failover, there should not be + // any operations replayed from translog + assertEquals(prevOperationCount, nextPrimary.translogStats().estimatedNumberOfOperations()); + + // refresh and push segments to our other replica. + nextPrimary.refresh("test"); + + for (IndexShard shard : shards) { + assertConsistentHistoryBetweenTranslogAndLucene(shard); + } + final List docsAfterRecovery = getDocIdAndSeqNos(shards.getPrimary()); + for (IndexShard shard : shards.getReplicas()) { + assertThat(shard.routingEntry().toString(), getDocIdAndSeqNos(shard), equalTo(docsAfterRecovery)); + } + } } } diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java index 7c765cf5df0be..3b2e33388925a 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -43,6 +43,8 @@ import org.opensearch.index.store.lockmanager.RemoteStoreMetadataLockManager; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadata; import org.opensearch.index.store.remote.metadata.RemoteSegmentMetadataHandler; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.threadpool.ThreadPool; import java.io.IOException; @@ -55,8 +57,6 @@ import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import java.util.HashMap; -import java.util.Collection; import java.util.concurrent.ExecutorService; import static org.mockito.Mockito.mock; @@ -101,7 +101,10 @@ public void setup() throws IOException { ); testUploadTracker = new TestUploadListener(); - Settings indexSettings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, org.opensearch.Version.CURRENT).build(); + Settings indexSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, org.opensearch.Version.CURRENT) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build(); ExecutorService executorService = OpenSearchExecutors.newDirectExecutorService(); indexShard = newStartedShard(false, indexSettings, new NRTReplicationEngineFactory()); @@ -260,7 +263,7 @@ private Map getDummyMetadata(String prefix, int commitGeneration * @return ByteArrayIndexInput: metadata file bytes with header and footer * @throws IOException IOException */ - private ByteArrayIndexInput createMetadataFileBytes(Map segmentFilesMap, long generation, long primaryTerm) + private ByteArrayIndexInput createMetadataFileBytes(Map segmentFilesMap, ReplicationCheckpoint replicationCheckpoint) throws IOException { ByteBuffersDataOutput byteBuffersIndexOutput = new ByteBuffersDataOutput(); segmentInfos.write(new ByteBuffersIndexOutput(byteBuffersIndexOutput, "", "")); @@ -270,8 +273,7 @@ private ByteArrayIndexInput createMetadataFileBytes(Map segmentF OutputStreamIndexOutput indexOutput = new OutputStreamIndexOutput("segment metadata", "metadata output stream", output, 4096); CodecUtil.writeHeader(indexOutput, RemoteSegmentMetadata.METADATA_CODEC, RemoteSegmentMetadata.CURRENT_VERSION); indexOutput.writeMapOfStrings(segmentFilesMap); - indexOutput.writeLong(generation); - indexOutput.writeLong(primaryTerm); + RemoteSegmentMetadata.writeCheckpointToIndexOutput(replicationCheckpoint, indexOutput); indexOutput.writeLong(byteArray.length); indexOutput.writeBytes(byteArray, byteArray.length); CodecUtil.writeFooter(indexOutput); @@ -309,13 +311,13 @@ private Map> populateMetadata() throws IOException { ); when(remoteMetadataDirectory.openInput(metadataFilename, IOContext.DEFAULT)).thenAnswer( - I -> createMetadataFileBytes(metadataFilenameContentMapping.get(metadataFilename), 23, 12) + I -> createMetadataFileBytes(metadataFilenameContentMapping.get(metadataFilename), indexShard.getLatestReplicationCheckpoint()) ); when(remoteMetadataDirectory.openInput(metadataFilename2, IOContext.DEFAULT)).thenAnswer( - I -> createMetadataFileBytes(metadataFilenameContentMapping.get(metadataFilename2), 13, 12) + I -> createMetadataFileBytes(metadataFilenameContentMapping.get(metadataFilename2), indexShard.getLatestReplicationCheckpoint()) ); when(remoteMetadataDirectory.openInput(metadataFilename3, IOContext.DEFAULT)).thenAnswer( - I -> createMetadataFileBytes(metadataFilenameContentMapping.get(metadataFilename3), 38, 10) + I -> createMetadataFileBytes(metadataFilenameContentMapping.get(metadataFilename3), indexShard.getLatestReplicationCheckpoint()) ); return metadataFilenameContentMapping; @@ -651,7 +653,9 @@ public void testContainsFile() throws IOException { metadata.put("_0.cfe", "_0.cfe::_0.cfe__" + UUIDs.base64UUID() + "::1234::512::" + Version.LATEST.major); metadata.put("_0.cfs", "_0.cfs::_0.cfs__" + UUIDs.base64UUID() + "::2345::1024::" + Version.LATEST.major); - when(remoteMetadataDirectory.openInput(metadataFilename, IOContext.DEFAULT)).thenReturn(createMetadataFileBytes(metadata, 1, 5)); + when(remoteMetadataDirectory.openInput(metadataFilename, IOContext.DEFAULT)).thenReturn( + createMetadataFileBytes(metadata, indexShard.getLatestReplicationCheckpoint()) + ); remoteSegmentStoreDirectory.init(); @@ -676,12 +680,19 @@ public void testContainsFile() throws IOException { public void testUploadMetadataEmpty() throws IOException { Directory storeDirectory = mock(Directory.class); IndexOutput indexOutput = mock(IndexOutput.class); - when(storeDirectory.createOutput(startsWith("metadata__12__o"), eq(IOContext.DEFAULT))).thenReturn(indexOutput); + final long primaryTerm = indexShard.getOperationPrimaryTerm(); + when(storeDirectory.createOutput(startsWith("metadata__" + primaryTerm + "__o"), eq(IOContext.DEFAULT))).thenReturn(indexOutput); Collection segmentFiles = List.of("_s1.si", "_s1.cfe", "_s3.cfs"); assertThrows( NoSuchFileException.class, - () -> remoteSegmentStoreDirectory.uploadMetadata(segmentFiles, segmentInfos, storeDirectory, 12L, 34L) + () -> remoteSegmentStoreDirectory.uploadMetadata( + segmentFiles, + segmentInfos, + storeDirectory, + 34L, + indexShard.getLatestReplicationCheckpoint() + ) ); } @@ -689,7 +700,7 @@ public void testUploadMetadataNonEmpty() throws IOException { indexDocs(142364, 5); flushShard(indexShard, true); SegmentInfos segInfos = indexShard.store().readLastCommittedSegmentsInfo(); - long primaryTerm = 12; + long primaryTerm = indexShard.getLatestReplicationCheckpoint().getPrimaryTerm(); String primaryTermLong = RemoteStoreUtils.invertLong(primaryTerm); long generation = segInfos.getGeneration(); String generationLong = RemoteStoreUtils.invertLong(generation); @@ -706,7 +717,7 @@ public void testUploadMetadataNonEmpty() throws IOException { getDummyMetadata("_0", (int) generation) ); when(remoteMetadataDirectory.openInput(latestMetadataFileName, IOContext.DEFAULT)).thenReturn( - createMetadataFileBytes(metadataFilenameContentMapping.get(latestMetadataFileName), generation, primaryTerm) + createMetadataFileBytes(metadataFilenameContentMapping.get(latestMetadataFileName), indexShard.getLatestReplicationCheckpoint()) ); remoteSegmentStoreDirectory.init(); @@ -717,7 +728,13 @@ public void testUploadMetadataNonEmpty() throws IOException { when(storeDirectory.createOutput(startsWith("metadata__" + primaryTermLong + "__" + generationLong), eq(IOContext.DEFAULT))) .thenReturn(indexOutput); - remoteSegmentStoreDirectory.uploadMetadata(segInfos.files(true), segInfos, storeDirectory, primaryTerm, generation); + remoteSegmentStoreDirectory.uploadMetadata( + segInfos.files(true), + segInfos, + storeDirectory, + generation, + indexShard.getLatestReplicationCheckpoint() + ); verify(remoteMetadataDirectory).copyFrom( eq(storeDirectory), diff --git a/server/src/test/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandlerTests.java b/server/src/test/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandlerTests.java index 2fee77ab563c0..d0136f04afd75 100644 --- a/server/src/test/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandlerTests.java +++ b/server/src/test/java/org/opensearch/index/store/remote/metadata/RemoteSegmentMetadataHandlerTests.java @@ -25,6 +25,8 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardTestCase; import org.opensearch.index.store.Store; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.indices.replication.common.ReplicationType; import java.io.IOException; import java.util.HashMap; @@ -38,16 +40,23 @@ public class RemoteSegmentMetadataHandlerTests extends IndexShardTestCase { private IndexShard indexShard; private SegmentInfos segmentInfos; + private ReplicationCheckpoint replicationCheckpoint; + @Before public void setup() throws IOException { remoteSegmentMetadataHandler = new RemoteSegmentMetadataHandler(); - Settings indexSettings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, org.opensearch.Version.CURRENT).build(); + Settings indexSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, org.opensearch.Version.CURRENT) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) + .build(); indexShard = newStartedShard(false, indexSettings, new NRTReplicationEngineFactory()); try (Store store = indexShard.store()) { segmentInfos = store.readLastCommittedSegmentsInfo(); } + replicationCheckpoint = indexShard.getLatestReplicationCheckpoint(); } @After @@ -61,8 +70,7 @@ public void testReadContentNoSegmentInfos() throws IOException { OutputStreamIndexOutput indexOutput = new OutputStreamIndexOutput("dummy bytes", "dummy stream", output, 4096); Map expectedOutput = getDummyData(); indexOutput.writeMapOfStrings(expectedOutput); - indexOutput.writeLong(1234); - indexOutput.writeLong(1234); + RemoteSegmentMetadata.writeCheckpointToIndexOutput(replicationCheckpoint, indexOutput); indexOutput.writeLong(0); indexOutput.writeBytes(new byte[0], 0); indexOutput.close(); @@ -70,7 +78,7 @@ public void testReadContentNoSegmentInfos() throws IOException { new ByteArrayIndexInput("dummy bytes", BytesReference.toBytes(output.bytes())) ); assertEquals(expectedOutput, metadata.toMapOfStrings()); - assertEquals(1234, metadata.getGeneration()); + assertEquals(replicationCheckpoint.getSegmentsGen(), metadata.getGeneration()); } public void testReadContentWithSegmentInfos() throws IOException { @@ -78,8 +86,7 @@ public void testReadContentWithSegmentInfos() throws IOException { OutputStreamIndexOutput indexOutput = new OutputStreamIndexOutput("dummy bytes", "dummy stream", output, 4096); Map expectedOutput = getDummyData(); indexOutput.writeMapOfStrings(expectedOutput); - indexOutput.writeLong(1234); - indexOutput.writeLong(1234); + RemoteSegmentMetadata.writeCheckpointToIndexOutput(replicationCheckpoint, indexOutput); ByteBuffersIndexOutput segmentInfosOutput = new ByteBuffersIndexOutput(new ByteBuffersDataOutput(), "test", "resource"); segmentInfos.write(segmentInfosOutput); byte[] segmentInfosBytes = segmentInfosOutput.toArrayCopy(); @@ -90,7 +97,7 @@ public void testReadContentWithSegmentInfos() throws IOException { new ByteArrayIndexInput("dummy bytes", BytesReference.toBytes(output.bytes())) ); assertEquals(expectedOutput, metadata.toMapOfStrings()); - assertEquals(1234, metadata.getGeneration()); + assertEquals(replicationCheckpoint.getSegmentsGen(), metadata.getGeneration()); assertArrayEquals(segmentInfosBytes, metadata.getSegmentInfosBytes()); } @@ -106,8 +113,7 @@ public void testWriteContent() throws IOException { RemoteSegmentMetadata remoteSegmentMetadata = new RemoteSegmentMetadata( RemoteSegmentMetadata.fromMapOfStrings(expectedOutput), segmentInfosBytes, - 1234, - 1234 + indexShard.getLatestReplicationCheckpoint() ); remoteSegmentMetadataHandler.writeContent(indexOutput, remoteSegmentMetadata); indexOutput.close(); @@ -116,8 +122,8 @@ public void testWriteContent() throws IOException { new ByteArrayIndexInput("dummy bytes", BytesReference.toBytes(output.bytes())) ); assertEquals(expectedOutput, metadata.toMapOfStrings()); - assertEquals(1234, metadata.getGeneration()); - assertEquals(1234, metadata.getPrimaryTerm()); + assertEquals(replicationCheckpoint.getSegmentsGen(), metadata.getGeneration()); + assertEquals(replicationCheckpoint.getPrimaryTerm(), metadata.getPrimaryTerm()); assertArrayEquals(segmentInfosBytes, metadata.getSegmentInfosBytes()); } diff --git a/server/src/test/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandlerTests.java b/server/src/test/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandlerTests.java index 8135d9cd3718e..40182a85608ea 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandlerTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/RemoteStorePeerRecoverySourceHandlerTests.java @@ -8,15 +8,20 @@ package org.opensearch.indices.recovery; +import org.junit.Before; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.IndexSettings; import org.opensearch.index.engine.NRTReplicationEngineFactory; +import org.opensearch.index.mapper.MapperService; import org.opensearch.index.replication.OpenSearchIndexLevelReplicationTestCase; import org.opensearch.index.seqno.ReplicationTracker; import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.replication.common.ReplicationType; +import java.nio.file.Path; + public class RemoteStorePeerRecoverySourceHandlerTests extends OpenSearchIndexLevelReplicationTestCase { private static final Settings settings = Settings.builder() @@ -26,23 +31,36 @@ public class RemoteStorePeerRecoverySourceHandlerTests extends OpenSearchIndexLe .put(IndexSettings.INDEX_REMOTE_TRANSLOG_BUFFER_INTERVAL_SETTING.getKey(), "100ms") .build(); + @Before + public void setup() { + // Todo: Remove feature flag once remote store integration with segrep goes GA + FeatureFlags.initializeFeatureFlags( + Settings.builder().put(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL_SETTING.getKey(), "true").build() + ); + } + public void testReplicaShardRecoveryUptoLastFlushedCommit() throws Exception { - try (ReplicationGroup shards = createGroup(0, settings, new NRTReplicationEngineFactory())) { + final Path remoteDir = createTempDir(); + final String indexMapping = "{ \"" + MapperService.SINGLE_MAPPING_NAME + "\": {} }"; + try (ReplicationGroup shards = createGroup(0, settings, indexMapping, new NRTReplicationEngineFactory(), remoteDir)) { // Step1 - Start primary, index docs and flush shards.startPrimary(); final IndexShard primary = shards.getPrimary(); - int numDocs = shards.indexDocs(randomIntBetween(10, 100)); + int numDocs = shards.indexDocs(randomIntBetween(10, 20)); + logger.info("--> Index numDocs {} and flush", numDocs); shards.flush(); // Step 2 - Start replica for recovery to happen, check both has same number of docs - final IndexShard replica1 = shards.addReplica(); + final IndexShard replica1 = shards.addReplica(remoteDir); + logger.info("--> Added and started replica {}", replica1.routingEntry()); shards.startAll(); assertEquals(getDocIdAndSeqNos(primary), getDocIdAndSeqNos(replica1)); // Step 3 - Index more docs, run segment replication, check both have same number of docs - int moreDocs = shards.indexDocs(randomIntBetween(10, 100)); + int moreDocs = shards.indexDocs(randomIntBetween(10, 20)); primary.refresh("test"); + logger.info("--> Index more docs {} and replicate segments", moreDocs); replicateSegments(primary, shards.getReplicas()); assertEquals(getDocIdAndSeqNos(primary), getDocIdAndSeqNos(replica1)); @@ -55,7 +73,8 @@ public void testReplicaShardRecoveryUptoLastFlushedCommit() throws Exception { assertFalse(primary.getRetentionLeases().contains(ReplicationTracker.getPeerRecoveryRetentionLeaseId(replica1.routingEntry()))); // Step 6 - Start new replica, recovery happens, and check that new replica has all docs - final IndexShard replica2 = shards.addReplica(); + final IndexShard replica2 = shards.addReplica(remoteDir); + logger.info("--> Added and started replica {}", replica2.routingEntry()); shards.startAll(); shards.assertAllEqual(numDocs + moreDocs); diff --git a/server/src/test/java/org/opensearch/indices/replication/RemoteStoreReplicationSourceTests.java b/server/src/test/java/org/opensearch/indices/replication/RemoteStoreReplicationSourceTests.java index 04b5aa58ea485..9204f48ba5bdd 100644 --- a/server/src/test/java/org/opensearch/indices/replication/RemoteStoreReplicationSourceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/RemoteStoreReplicationSourceTests.java @@ -8,43 +8,38 @@ package org.opensearch.indices.replication; -import org.apache.lucene.codecs.Codec; import org.apache.lucene.store.FilterDirectory; -import org.mockito.Mockito; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.settings.Settings; import org.opensearch.index.engine.InternalEngineFactory; +import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.replication.OpenSearchIndexLevelReplicationTestCase; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardState; import org.opensearch.index.shard.RemoteStoreRefreshListenerTests; import org.opensearch.index.store.RemoteSegmentStoreDirectory; import org.opensearch.index.store.Store; +import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.common.ReplicationType; import java.io.IOException; import java.util.Collections; +import java.util.List; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; public class RemoteStoreReplicationSourceTests extends OpenSearchIndexLevelReplicationTestCase { - - private static final long PRIMARY_TERM = 1L; - private static final long SEGMENTS_GEN = 2L; - private static final long VERSION = 4L; private static final long REPLICATION_ID = 123L; private RemoteStoreReplicationSource replicationSource; - private IndexShard indexShard; - - private IndexShard mockShard; - - private Store remoteStore; + private IndexShard primaryShard; + private IndexShard replicaShard; private final Settings settings = Settings.builder() .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, "my-repo") @@ -55,146 +50,110 @@ public class RemoteStoreReplicationSourceTests extends OpenSearchIndexLevelRepli @Override public void setUp() throws Exception { super.setUp(); - - indexShard = newStartedShard(true, settings, new InternalEngineFactory()); - - indexDoc(indexShard, "_doc", "1"); - indexDoc(indexShard, "_doc", "2"); - indexShard.refresh("test"); - - // mock shard - mockShard = mock(IndexShard.class); - Store store = mock(Store.class); - when(mockShard.store()).thenReturn(store); - when(store.directory()).thenReturn(indexShard.store().directory()); - remoteStore = mock(Store.class); - when(mockShard.remoteStore()).thenReturn(remoteStore); - RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = - (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) indexShard.remoteStore().directory()).getDelegate()) - .getDelegate(); - FilterDirectory remoteStoreFilterDirectory = new RemoteStoreRefreshListenerTests.TestFilterDirectory( - new RemoteStoreRefreshListenerTests.TestFilterDirectory(remoteSegmentStoreDirectory) - ); - when(remoteStore.directory()).thenReturn(remoteStoreFilterDirectory); - replicationSource = new RemoteStoreReplicationSource(mockShard); + primaryShard = newStartedShard(true, settings, new InternalEngineFactory()); + indexDoc(primaryShard, "_doc", "1"); + indexDoc(primaryShard, "_doc", "2"); + primaryShard.refresh("test"); + replicaShard = newStartedShard(false, settings, new NRTReplicationEngineFactory()); } @Override public void tearDown() throws Exception { - closeShards(indexShard); + closeShards(primaryShard, replicaShard); super.tearDown(); } public void testGetCheckpointMetadata() throws ExecutionException, InterruptedException { - when(mockShard.getSegmentInfosSnapshot()).thenReturn(indexShard.getSegmentInfosSnapshot()); - final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint( - indexShard.shardId(), - PRIMARY_TERM, - SEGMENTS_GEN, - VERSION, - Codec.getDefault().getName() - ); - + final ReplicationCheckpoint checkpoint = primaryShard.getLatestReplicationCheckpoint(); final PlainActionFuture res = PlainActionFuture.newFuture(); + replicationSource = new RemoteStoreReplicationSource(primaryShard); replicationSource.getCheckpointMetadata(REPLICATION_ID, checkpoint, res); CheckpointInfoResponse response = res.get(); assert (response.getCheckpoint().equals(checkpoint)); - assert (!response.getMetadataMap().isEmpty()); + assert (response.getMetadataMap().isEmpty() == false); } public void testGetCheckpointMetadataFailure() { - final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint( - indexShard.shardId(), - PRIMARY_TERM, - SEGMENTS_GEN, - VERSION, - Codec.getDefault().getName() - ); - + IndexShard mockShard = mock(IndexShard.class); + final ReplicationCheckpoint checkpoint = primaryShard.getLatestReplicationCheckpoint(); when(mockShard.getSegmentInfosSnapshot()).thenThrow(new RuntimeException("test")); - assertThrows(RuntimeException.class, () -> { + replicationSource = new RemoteStoreReplicationSource(mockShard); final PlainActionFuture res = PlainActionFuture.newFuture(); replicationSource.getCheckpointMetadata(REPLICATION_ID, checkpoint, res); res.get(); }); } - public void testGetCheckpointMetadataEmpty() throws ExecutionException, InterruptedException, IOException { - when(mockShard.getSegmentInfosSnapshot()).thenReturn(indexShard.getSegmentInfosSnapshot()); - final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint( - indexShard.shardId(), - PRIMARY_TERM, - SEGMENTS_GEN, - VERSION, - Codec.getDefault().getName() - ); - IndexShard emptyIndexShard = null; - try { - emptyIndexShard = newStartedShard( - true, - settings, - new InternalEngineFactory() - ); - RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = - (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) emptyIndexShard.remoteStore().directory()).getDelegate()) - .getDelegate(); - FilterDirectory remoteStoreFilterDirectory = new RemoteStoreRefreshListenerTests.TestFilterDirectory( - new RemoteStoreRefreshListenerTests.TestFilterDirectory(remoteSegmentStoreDirectory) - ); - when(remoteStore.directory()).thenReturn(remoteStoreFilterDirectory); + public void testGetSegmentFiles() throws ExecutionException, InterruptedException, IOException { + final ReplicationCheckpoint checkpoint = primaryShard.getLatestReplicationCheckpoint(); + List filesToFetch = primaryShard.getSegmentMetadataMap().values().stream().collect(Collectors.toList()); + final PlainActionFuture res = PlainActionFuture.newFuture(); + replicationSource = new RemoteStoreReplicationSource(primaryShard); + replicationSource.getSegmentFiles(REPLICATION_ID, checkpoint, filesToFetch, replicaShard, res); + GetSegmentFilesResponse response = res.get(); + assertEquals(response.files.size(), filesToFetch.size()); + assertTrue(response.files.containsAll(filesToFetch)); + closeShards(replicaShard); + } - final PlainActionFuture res = PlainActionFuture.newFuture(); - when(mockShard.state()).thenReturn(IndexShardState.RECOVERING); - // Recovering shard should just do a noop and return empty metadata map. - replicationSource.getCheckpointMetadata(REPLICATION_ID, checkpoint, res); - CheckpointInfoResponse response = res.get(); - assert (response.getCheckpoint().equals(checkpoint)); - assert (response.getMetadataMap().isEmpty()); - - when(mockShard.state()).thenReturn(IndexShardState.STARTED); - // Started shard should fail with assertion error. - expectThrows(AssertionError.class, () -> { - final PlainActionFuture res2 = PlainActionFuture.newFuture(); - replicationSource.getCheckpointMetadata(REPLICATION_ID, checkpoint, res2); - }); - } finally { - closeShards(emptyIndexShard); + public void testGetSegmentFilesAlreadyExists() throws IOException, InterruptedException { + final ReplicationCheckpoint checkpoint = primaryShard.getLatestReplicationCheckpoint(); + List filesToFetch = primaryShard.getSegmentMetadataMap().values().stream().collect(Collectors.toList()); + CountDownLatch latch = new CountDownLatch(1); + try { + final PlainActionFuture res = PlainActionFuture.newFuture(); + replicationSource = new RemoteStoreReplicationSource(primaryShard); + replicationSource.getSegmentFiles(REPLICATION_ID, checkpoint, filesToFetch, primaryShard, res); + res.get(); + } catch (AssertionError | ExecutionException ex) { + latch.countDown(); + assertTrue(ex instanceof AssertionError); + assertTrue(ex.getMessage().startsWith("Local store already contains the file")); } + latch.await(); } - public void testGetSegmentFiles() throws ExecutionException, InterruptedException { - final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint( - indexShard.shardId(), - PRIMARY_TERM, - SEGMENTS_GEN, - VERSION, - Codec.getDefault().getName() - ); - + public void testGetSegmentFilesReturnEmptyResponse() throws ExecutionException, InterruptedException { + final ReplicationCheckpoint checkpoint = primaryShard.getLatestReplicationCheckpoint(); final PlainActionFuture res = PlainActionFuture.newFuture(); - replicationSource.getSegmentFiles(REPLICATION_ID, checkpoint, Collections.emptyList(), indexShard, res); + replicationSource = new RemoteStoreReplicationSource(primaryShard); + replicationSource.getSegmentFiles(REPLICATION_ID, checkpoint, Collections.emptyList(), primaryShard, res); GetSegmentFilesResponse response = res.get(); assert (response.files.isEmpty()); - assertEquals("remote store", replicationSource.getDescription()); - } - public void testGetSegmentFilesFailure() throws IOException { - final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint( - indexShard.shardId(), - PRIMARY_TERM, - SEGMENTS_GEN, - VERSION, - Codec.getDefault().getName() - ); - Mockito.doThrow(new RuntimeException("testing")) - .when(mockShard) - .syncSegmentsFromRemoteSegmentStore(Mockito.anyBoolean(), Mockito.anyBoolean(), Mockito.anyBoolean()); - assertThrows(ExecutionException.class, () -> { - final PlainActionFuture res = PlainActionFuture.newFuture(); - replicationSource.getSegmentFiles(REPLICATION_ID, checkpoint, Collections.emptyList(), mockShard, res); - res.get(10, TimeUnit.SECONDS); + public void testGetCheckpointMetadataEmpty() throws ExecutionException, InterruptedException, IOException { + IndexShard mockShard = mock(IndexShard.class); + // Build mockShard to return replicaShard directory so that empty metadata file is returned. + buildIndexShardBehavior(mockShard, replicaShard); + replicationSource = new RemoteStoreReplicationSource(mockShard); + + // Mock replica shard state to RECOVERING so that getCheckpointInfo return empty map + final ReplicationCheckpoint checkpoint = replicaShard.getLatestReplicationCheckpoint(); + final PlainActionFuture res = PlainActionFuture.newFuture(); + when(mockShard.state()).thenReturn(IndexShardState.RECOVERING); + replicationSource = new RemoteStoreReplicationSource(mockShard); + // Recovering shard should just do a noop and return empty metadata map. + replicationSource.getCheckpointMetadata(REPLICATION_ID, checkpoint, res); + CheckpointInfoResponse response = res.get(); + assert (response.getCheckpoint().equals(checkpoint)); + assert (response.getMetadataMap().isEmpty()); + + // Started shard should fail with assertion error. + when(mockShard.state()).thenReturn(IndexShardState.STARTED); + expectThrows(AssertionError.class, () -> { + final PlainActionFuture res2 = PlainActionFuture.newFuture(); + replicationSource.getCheckpointMetadata(REPLICATION_ID, checkpoint, res2); }); } + + private void buildIndexShardBehavior(IndexShard mockShard, IndexShard indexShard) { + when(mockShard.getSegmentInfosSnapshot()).thenReturn(indexShard.getSegmentInfosSnapshot()); + Store remoteStore = mock(Store.class); + when(mockShard.remoteStore()).thenReturn(remoteStore); + RemoteSegmentStoreDirectory remoteSegmentStoreDirectory = (RemoteSegmentStoreDirectory) ((FilterDirectory) ((FilterDirectory) indexShard.remoteStore().directory()).getDelegate()).getDelegate(); + FilterDirectory remoteStoreFilterDirectory = new RemoteStoreRefreshListenerTests.TestFilterDirectory(new RemoteStoreRefreshListenerTests.TestFilterDirectory(remoteSegmentStoreDirectory)); + when(remoteStore.directory()).thenReturn(remoteStoreFilterDirectory); + } } diff --git a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java index f3c98ce4f9f03..278847e56e65f 100644 --- a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java @@ -149,7 +149,11 @@ protected ReplicationGroup createGroup(int replicas, Settings settings, EngineFa protected ReplicationGroup createGroup(int replicas, Settings settings, String mappings, EngineFactory engineFactory) throws IOException { - return createGroup(replicas, settings, mappings, engineFactory, null); + Path remotePath = null; + if ("true".equals(settings.get(IndexMetadata.SETTING_REMOTE_STORE_ENABLED))) { + remotePath = createTempDir(); + } + return createGroup(replicas, settings, mappings, engineFactory, remotePath); } protected ReplicationGroup createGroup(int replicas, Settings settings, String mappings, EngineFactory engineFactory, Path remotePath) diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 7a492dbebd836..66e5459cfea3b 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -1326,9 +1326,27 @@ public static Engine.Warmer createTestWarmer(IndexSettings indexSettings) { }; } + private SegmentReplicationTargetService getSegmentReplicationTargetService( + TransportService transportService, + IndicesService indicesService, + ClusterService clusterService, + SegmentReplicationSourceFactory sourceFactory + ) { + return new SegmentReplicationTargetService( + threadPool, + new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), + transportService, + sourceFactory, + indicesService, + clusterService + ); + } + /** * Segment Replication specific test method - Creates a {@link SegmentReplicationTargetService} to perform replications that has - * been configured to return the given primaryShard's current segments. + * been configured to return the given primaryShard's current segments. In order to do so, it mimics the replication + * source (to avoid transport calls) and simply copies over the segment files from primary store to replica's as part of + * get_files calls. * * @param primaryShard {@link IndexShard} - The target replica shard in segment replication. * @param target {@link IndexShard} - The source primary shard in segment replication. @@ -1339,7 +1357,7 @@ public static Engine.Warmer createTestWarmer(IndexSettings indexSettings) { * which are desired right after files are copied. e.g. To work with temp files * @return Returns SegmentReplicationTargetService */ - public final SegmentReplicationTargetService prepareForReplication( + private SegmentReplicationTargetService prepareForReplication( IndexShard primaryShard, IndexShard target, TransportService transportService, @@ -1347,22 +1365,28 @@ public final SegmentReplicationTargetService prepareForReplication( ClusterService clusterService, Consumer postGetFilesRunnable ) { - final SegmentReplicationSourceFactory sourceFactory = mock(SegmentReplicationSourceFactory.class); - final SegmentReplicationTargetService targetService = new SegmentReplicationTargetService( - threadPool, - new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)), - transportService, - sourceFactory, - indicesService, - clusterService - ); - final SegmentReplicationSource replicationSource = getSegmentReplicationSource( - primaryShard, - (repId) -> targetService.get(repId), - postGetFilesRunnable - ); - when(sourceFactory.get(any())).thenReturn(replicationSource); - when(indicesService.getShardOrNull(any())).thenReturn(target); + + SegmentReplicationSourceFactory sourceFactory = null; + SegmentReplicationTargetService targetService; + if (primaryShard.indexSettings.isRemoteStoreEnabled()) { + RecoverySettings recoverySettings = new RecoverySettings( + Settings.EMPTY, + new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) + ); + sourceFactory = new SegmentReplicationSourceFactory(transportService, recoverySettings, clusterService); + targetService = getSegmentReplicationTargetService(transportService, indicesService, clusterService, sourceFactory); + } else { + sourceFactory = mock(SegmentReplicationSourceFactory.class); + targetService = getSegmentReplicationTargetService(transportService, indicesService, clusterService, sourceFactory); + final SegmentReplicationSource replicationSource = getSegmentReplicationSource( + primaryShard, + (repId) -> targetService.get(repId), + postGetFilesRunnable + ); + when(sourceFactory.get(any())).thenReturn(replicationSource); + // This is needed for force segment sync call. Remote store uses a different recovery mechanism + when(indicesService.getShardOrNull(any())).thenReturn(target); + } return targetService; } @@ -1502,9 +1526,11 @@ public void getSegmentFiles( * @param replicaShards - Replicas that will be updated. * @return {@link List} List of target components orchestrating replication. */ - public final List replicateSegments(IndexShard primaryShard, List replicaShards) + protected final List replicateSegments(IndexShard primaryShard, List replicaShards) throws IOException, InterruptedException { + // Latch to block test execution until replica catches up final CountDownLatch countDownLatch = new CountDownLatch(replicaShards.size()); + // Get primary metadata to verify with replica's, used to ensure replica catches up Map primaryMetadata; try (final GatedCloseable segmentInfosSnapshot = primaryShard.getSegmentInfosSnapshot()) { final SegmentInfos primarySegmentInfos = segmentInfosSnapshot.get(); From 82b5f3c7f95db84d4c71ed9aa9c968334da7a926 Mon Sep 17 00:00:00 2001 From: Sarthak Aggarwal Date: Wed, 2 Aug 2023 13:54:07 +0530 Subject: [PATCH 13/24] Disallowing compression level for lz4 and best_compression codec (#8737) * Disallowing compression level for lz4 and best_compression codec * setting up codec settings interface for validation Signed-off-by: Sarthak Aggarwal --- CHANGELOG.md | 1 + .../index/codec/CodecCompressionLevelIT.java | 178 ++++++++++++++++++ .../opensearch/index/codec/CodecSettings.java | 21 +++ .../index/codec/customcodecs/ZstdCodec.java | 10 +- .../codec/customcodecs/ZstdNoDictCodec.java | 10 +- .../opensearch/index/engine/EngineConfig.java | 51 ++++- .../opensearch/index/codec/CodecTests.java | 71 ++++++- 7 files changed, 332 insertions(+), 10 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/opensearch/index/codec/CodecCompressionLevelIT.java create mode 100644 server/src/main/java/org/opensearch/index/codec/CodecSettings.java diff --git a/CHANGELOG.md b/CHANGELOG.md index a37976462b38e..e29bbd2da4db5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -81,6 +81,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Start replication checkpointTimers on primary before segments upload to remote store. ([#8221]()https://github.com/opensearch-project/OpenSearch/pull/8221) - [distribution/archives] [Linux] [x64] Provide the variant of the distributions bundled with JRE ([#8195]()https://github.com/opensearch-project/OpenSearch/pull/8195) - Add configuration for file cache size to max remote data ratio to prevent oversubscription of file cache ([#8606](https://github.com/opensearch-project/OpenSearch/pull/8606)) +- Disallow compression level to be set for default and best_compression index codecs ([#8737]()https://github.com/opensearch-project/OpenSearch/pull/8737) ### Dependencies - Bump `org.apache.logging.log4j:log4j-core` from 2.17.1 to 2.20.0 ([#8307](https://github.com/opensearch-project/OpenSearch/pull/8307)) diff --git a/server/src/internalClusterTest/java/org/opensearch/index/codec/CodecCompressionLevelIT.java b/server/src/internalClusterTest/java/org/opensearch/index/codec/CodecCompressionLevelIT.java new file mode 100644 index 0000000000000..5f3e53f1454fc --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/index/codec/CodecCompressionLevelIT.java @@ -0,0 +1,178 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec; + +import org.apache.logging.log4j.core.util.Throwables; +import org.opensearch.action.admin.indices.settings.put.UpdateSettingsRequest; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.util.concurrent.ExecutionException; + +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) +public class CodecCompressionLevelIT extends OpenSearchIntegTestCase { + + public void testLuceneCodecsCreateIndexWithCompressionLevel() { + + internalCluster().ensureAtLeastNumDataNodes(1); + final String index = "test-index"; + + // creating index + assertThrows( + IllegalArgumentException.class, + () -> createIndex( + index, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.codec", randomFrom(CodecService.DEFAULT_CODEC, CodecService.BEST_COMPRESSION_CODEC)) + .put("index.codec.compression_level", randomIntBetween(1, 6)) + .build() + ) + ); + + createIndex( + index, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.codec", randomFrom(CodecService.DEFAULT_CODEC, CodecService.BEST_COMPRESSION_CODEC)) + .build() + ); + ensureGreen(index); + } + + public void testZStandardCodecsCreateIndexWithCompressionLevel() { + + internalCluster().ensureAtLeastNumDataNodes(1); + final String index = "test-index"; + + // creating index + createIndex( + index, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.codec", randomFrom(CodecService.ZSTD_CODEC, CodecService.ZSTD_NO_DICT_CODEC)) + .put("index.codec.compression_level", randomIntBetween(1, 6)) + .build() + ); + + ensureGreen(index); + } + + public void testZStandardToLuceneCodecsWithCompressionLevel() throws ExecutionException, InterruptedException { + + internalCluster().ensureAtLeastNumDataNodes(1); + final String index = "test-index"; + + // creating index + createIndex( + index, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.codec", randomFrom(CodecService.ZSTD_CODEC, CodecService.ZSTD_NO_DICT_CODEC)) + .put("index.codec.compression_level", randomIntBetween(1, 6)) + .build() + ); + ensureGreen(index); + + assertAcked(client().admin().indices().prepareClose(index)); + + Throwable executionException = expectThrows( + ExecutionException.class, + () -> client().admin() + .indices() + .updateSettings( + new UpdateSettingsRequest(index).settings( + Settings.builder().put("index.codec", randomFrom(CodecService.DEFAULT_CODEC, CodecService.BEST_COMPRESSION_CODEC)) + ) + ) + .get() + ); + + Throwable rootCause = Throwables.getRootCause(executionException); + assertEquals(IllegalArgumentException.class, rootCause.getClass()); + assertTrue(rootCause.getMessage().startsWith("Compression level cannot be set")); + + assertAcked( + client().admin() + .indices() + .updateSettings( + new UpdateSettingsRequest(index).settings( + Settings.builder() + .put("index.codec", randomFrom(CodecService.DEFAULT_CODEC, CodecService.BEST_COMPRESSION_CODEC)) + .put("index.codec.compression_level", (String) null) + ) + ) + .get() + ); + + assertAcked(client().admin().indices().prepareOpen(index)); + ensureGreen(index); + } + + public void testLuceneToZStandardCodecsWithCompressionLevel() throws ExecutionException, InterruptedException { + + internalCluster().ensureAtLeastNumDataNodes(1); + final String index = "test-index"; + + // creating index + createIndex( + index, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put("index.codec", randomFrom(CodecService.DEFAULT_CODEC, CodecService.BEST_COMPRESSION_CODEC)) + .build() + ); + ensureGreen(index); + + assertAcked(client().admin().indices().prepareClose(index)); + + Throwable executionException = expectThrows( + ExecutionException.class, + () -> client().admin() + .indices() + .updateSettings( + new UpdateSettingsRequest(index).settings( + Settings.builder() + .put("index.codec", randomFrom(CodecService.DEFAULT_CODEC, CodecService.BEST_COMPRESSION_CODEC)) + .put("index.codec.compression_level", randomIntBetween(1, 6)) + ) + ) + .get() + ); + + Throwable rootCause = Throwables.getRootCause(executionException); + assertEquals(IllegalArgumentException.class, rootCause.getClass()); + assertTrue(rootCause.getMessage().startsWith("Compression level cannot be set")); + + assertAcked( + client().admin() + .indices() + .updateSettings( + new UpdateSettingsRequest(index).settings( + Settings.builder() + .put("index.codec", randomFrom(CodecService.ZSTD_CODEC, CodecService.ZSTD_NO_DICT_CODEC)) + .put("index.codec.compression_level", randomIntBetween(1, 6)) + ) + ) + .get() + ); + + assertAcked(client().admin().indices().prepareOpen(index)); + ensureGreen(index); + } + +} diff --git a/server/src/main/java/org/opensearch/index/codec/CodecSettings.java b/server/src/main/java/org/opensearch/index/codec/CodecSettings.java new file mode 100644 index 0000000000000..2d371dfc190db --- /dev/null +++ b/server/src/main/java/org/opensearch/index/codec/CodecSettings.java @@ -0,0 +1,21 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.codec; + +import org.apache.lucene.codecs.Codec; +import org.opensearch.common.settings.Setting; + +/** + * This {@link CodecSettings} allows us to manage the settings with {@link Codec}. + * + * @opensearch.internal + */ +public interface CodecSettings { + boolean supports(Setting setting); +} diff --git a/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCodec.java b/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCodec.java index 04c110fceacdf..042f7eaa29e53 100644 --- a/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCodec.java +++ b/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdCodec.java @@ -9,12 +9,15 @@ package org.opensearch.index.codec.customcodecs; import org.apache.logging.log4j.Logger; +import org.opensearch.common.settings.Setting; +import org.opensearch.index.codec.CodecSettings; +import org.opensearch.index.engine.EngineConfig; import org.opensearch.index.mapper.MapperService; /** * ZstdCodec provides ZSTD compressor using the zstd-jni library. */ -public class ZstdCodec extends Lucene95CustomCodec { +public class ZstdCodec extends Lucene95CustomCodec implements CodecSettings { /** * Creates a new ZstdCodec instance with the default compression level. @@ -41,4 +44,9 @@ public ZstdCodec(MapperService mapperService, Logger logger, int compressionLeve public String toString() { return getClass().getSimpleName(); } + + @Override + public boolean supports(Setting setting) { + return setting.equals(EngineConfig.INDEX_CODEC_COMPRESSION_LEVEL_SETTING); + } } diff --git a/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCodec.java b/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCodec.java index 134f9a14422ad..a7e8e0e42ee68 100644 --- a/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCodec.java +++ b/server/src/main/java/org/opensearch/index/codec/customcodecs/ZstdNoDictCodec.java @@ -9,12 +9,15 @@ package org.opensearch.index.codec.customcodecs; import org.apache.logging.log4j.Logger; +import org.opensearch.common.settings.Setting; +import org.opensearch.index.codec.CodecSettings; +import org.opensearch.index.engine.EngineConfig; import org.opensearch.index.mapper.MapperService; /** * ZstdNoDictCodec provides ZSTD compressor without a dictionary support. */ -public class ZstdNoDictCodec extends Lucene95CustomCodec { +public class ZstdNoDictCodec extends Lucene95CustomCodec implements CodecSettings { /** * Creates a new ZstdNoDictCodec instance with the default compression level. @@ -41,4 +44,9 @@ public ZstdNoDictCodec(MapperService mapperService, Logger logger, int compressi public String toString() { return getClass().getSimpleName(); } + + @Override + public boolean supports(Setting setting) { + return setting.equals(EngineConfig.INDEX_CODEC_COMPRESSION_LEVEL_SETTING); + } } diff --git a/server/src/main/java/org/opensearch/index/engine/EngineConfig.java b/server/src/main/java/org/opensearch/index/engine/EngineConfig.java index 7900e63a95c39..03669eaac0070 100644 --- a/server/src/main/java/org/opensearch/index/engine/EngineConfig.java +++ b/server/src/main/java/org/opensearch/index/engine/EngineConfig.java @@ -48,6 +48,7 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.index.IndexSettings; import org.opensearch.index.codec.CodecService; +import org.opensearch.index.codec.CodecSettings; import org.opensearch.index.mapper.ParsedDocument; import org.opensearch.index.seqno.RetentionLeases; import org.opensearch.core.index.shard.ShardId; @@ -63,6 +64,7 @@ import java.util.Comparator; import java.util.List; import java.util.Objects; +import java.util.Set; import java.util.function.BooleanSupplier; import java.util.function.LongSupplier; import java.util.function.Supplier; @@ -148,13 +150,52 @@ public Supplier retentionLeasesSupplier() { * Compression Level gives a trade-off between compression ratio and speed. The higher compression level results in higher compression ratio but slower compression and decompression speeds. * This setting is not realtime updateable. */ - public static final Setting INDEX_CODEC_COMPRESSION_LEVEL_SETTING = Setting.intSetting( + + public static final Setting INDEX_CODEC_COMPRESSION_LEVEL_SETTING = new Setting<>( "index.codec.compression_level", - 3, - 1, - 6, + Integer.toString(3), + new Setting.IntegerParser(1, 6, "index.codec.compression_level", false), Property.IndexScope - ); + ) { + @Override + public Set getSettingsDependencies(String key) { + return Set.of(new SettingDependency() { + @Override + public Setting getSetting() { + return INDEX_CODEC_SETTING; + } + + @Override + public void validate(String key, Object value, Object dependency) { + if (!(dependency instanceof String)) { + throw new IllegalArgumentException("Codec should be of string type."); + } + doValidateCodecSettings((String) dependency); + } + }); + } + }; + + private static void doValidateCodecSettings(final String codec) { + switch (codec) { + case "zstd": + case "zstd_no_dict": + return; + case "best_compression": + case "lucene_default": + case "default": + break; + default: + if (Codec.availableCodecs().contains(codec)) { + Codec luceneCodec = Codec.forName(codec); + if (luceneCodec instanceof CodecSettings + && ((CodecSettings) luceneCodec).supports(INDEX_CODEC_COMPRESSION_LEVEL_SETTING)) { + return; + } + } + } + throw new IllegalArgumentException("Compression level cannot be set for the " + codec + " codec."); + } /** * Configures an index to optimize documents with auto generated ids for append only. If this setting is updated from false diff --git a/server/src/test/java/org/opensearch/index/codec/CodecTests.java b/server/src/test/java/org/opensearch/index/codec/CodecTests.java index b0d904392407c..0eeeae9e8e59e 100644 --- a/server/src/test/java/org/opensearch/index/codec/CodecTests.java +++ b/server/src/test/java/org/opensearch/index/codec/CodecTests.java @@ -43,12 +43,14 @@ import org.apache.lucene.index.SegmentReader; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.util.LuceneTestCase.SuppressCodecs; +import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; import org.opensearch.index.analysis.IndexAnalyzers; import org.opensearch.index.codec.customcodecs.Lucene95CustomCodec; import org.opensearch.index.codec.customcodecs.Lucene95CustomStoredFieldsFormat; +import org.opensearch.index.engine.EngineConfig; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.similarity.SimilarityService; import org.opensearch.indices.mapper.MapperRegistry; @@ -60,6 +62,7 @@ import java.util.Collections; import static org.hamcrest.Matchers.instanceOf; +import static org.opensearch.index.engine.EngineConfig.INDEX_CODEC_COMPRESSION_LEVEL_SETTING; @SuppressCodecs("*") // we test against default codec so never get a random one here! public class CodecTests extends OpenSearchTestCase { @@ -96,7 +99,7 @@ public void testZstdNoDict() throws Exception { public void testZstdWithCompressionLevel() throws Exception { int randomCompressionLevel = randomIntBetween(1, 6); - Codec codec = createCodecService(randomCompressionLevel).codec("zstd"); + Codec codec = createCodecService(randomCompressionLevel, "zstd").codec("zstd"); assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode.ZSTD, codec); Lucene95CustomStoredFieldsFormat storedFieldsFormat = (Lucene95CustomStoredFieldsFormat) codec.storedFieldsFormat(); assertEquals(randomCompressionLevel, storedFieldsFormat.getCompressionLevel()); @@ -104,12 +107,73 @@ public void testZstdWithCompressionLevel() throws Exception { public void testZstdNoDictWithCompressionLevel() throws Exception { int randomCompressionLevel = randomIntBetween(1, 6); - Codec codec = createCodecService(randomCompressionLevel).codec("zstd_no_dict"); + Codec codec = createCodecService(randomCompressionLevel, "zstd_no_dict").codec("zstd_no_dict"); assertStoredFieldsCompressionEquals(Lucene95CustomCodec.Mode.ZSTD_NO_DICT, codec); Lucene95CustomStoredFieldsFormat storedFieldsFormat = (Lucene95CustomStoredFieldsFormat) codec.storedFieldsFormat(); assertEquals(randomCompressionLevel, storedFieldsFormat.getCompressionLevel()); } + public void testBestCompressionWithCompressionLevel() { + final Settings zstdSettings = Settings.builder() + .put(INDEX_CODEC_COMPRESSION_LEVEL_SETTING.getKey(), randomIntBetween(1, 6)) + .put(EngineConfig.INDEX_CODEC_SETTING.getKey(), randomFrom(CodecService.ZSTD_CODEC, CodecService.ZSTD_NO_DICT_CODEC)) + .build(); + + // able to validate zstd + final IndexScopedSettings zstdIndexScopedSettings = new IndexScopedSettings( + zstdSettings, + IndexScopedSettings.BUILT_IN_INDEX_SETTINGS + ); + zstdIndexScopedSettings.validate(zstdSettings, true); + + final Settings settings = Settings.builder() + .put(INDEX_CODEC_COMPRESSION_LEVEL_SETTING.getKey(), randomIntBetween(1, 6)) + .put(EngineConfig.INDEX_CODEC_SETTING.getKey(), randomFrom(CodecService.DEFAULT_CODEC, CodecService.BEST_COMPRESSION_CODEC)) + .build(); + final IndexScopedSettings indexScopedSettings = new IndexScopedSettings(settings, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS); + + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> indexScopedSettings.validate(settings, true)); + assertTrue(e.getMessage().startsWith("Compression level cannot be set")); + } + + public void testLuceneCodecsWithCompressionLevel() { + String codecName = randomFrom(Codec.availableCodecs()); + Codec codec = Codec.forName(codecName); + + final Settings customCodecSettings = Settings.builder() + .put(INDEX_CODEC_COMPRESSION_LEVEL_SETTING.getKey(), randomIntBetween(1, 6)) + .put(EngineConfig.INDEX_CODEC_SETTING.getKey(), "Lucene95CustomCodec") + .build(); + + final IndexScopedSettings customCodecIndexScopedSettings = new IndexScopedSettings( + customCodecSettings, + IndexScopedSettings.BUILT_IN_INDEX_SETTINGS + ); + customCodecIndexScopedSettings.validate(customCodecSettings, true); + + final Settings settings = Settings.builder() + .put(INDEX_CODEC_COMPRESSION_LEVEL_SETTING.getKey(), randomIntBetween(1, 6)) + .put(EngineConfig.INDEX_CODEC_SETTING.getKey(), codecName) + .build(); + final IndexScopedSettings indexScopedSettings = new IndexScopedSettings(settings, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS); + + if (!(codec instanceof CodecSettings && ((CodecSettings) codec).supports(INDEX_CODEC_COMPRESSION_LEVEL_SETTING))) { + final IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> indexScopedSettings.validate(settings, true) + ); + assertTrue(e.getMessage().startsWith("Compression level cannot be set")); + } + } + + public void testZstandardCompressionLevelSupport() throws Exception { + CodecService codecService = createCodecService(false); + CodecSettings zstdCodec = (CodecSettings) codecService.codec("zstd"); + CodecSettings zstdNoDictCodec = (CodecSettings) codecService.codec("zstd_no_dict"); + assertTrue(zstdCodec.supports(INDEX_CODEC_COMPRESSION_LEVEL_SETTING)); + assertTrue(zstdNoDictCodec.supports(INDEX_CODEC_COMPRESSION_LEVEL_SETTING)); + } + public void testDefaultMapperServiceNull() throws Exception { Codec codec = createCodecService(true).codec("default"); assertStoredFieldsCompressionEquals(Lucene95Codec.Mode.BEST_SPEED, codec); @@ -165,9 +229,10 @@ private CodecService createCodecService(boolean isMapperServiceNull) throws IOEx return buildCodecService(nodeSettings); } - private CodecService createCodecService(int randomCompressionLevel) throws IOException { + private CodecService createCodecService(int randomCompressionLevel, String codec) throws IOException { Settings nodeSettings = Settings.builder() .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .put("index.codec", codec) .put("index.codec.compression_level", randomCompressionLevel) .build(); return buildCodecService(nodeSettings); From 97205280d198c115ab33d6cbd4524b56269a0c2f Mon Sep 17 00:00:00 2001 From: Sarthak Aggarwal Date: Wed, 2 Aug 2023 17:40:27 +0530 Subject: [PATCH 14/24] readding codecs in the tests (#8987) Signed-off-by: Sarthak Aggarwal --- .../java/org/opensearch/upgrades/IndexingIT.java | 9 ++++++++- .../indices/replication/SegmentReplicationIT.java | 9 +++++---- .../opensearch/test/OpenSearchIntegTestCase.java | 15 ++++++++++++++- 3 files changed, 27 insertions(+), 6 deletions(-) diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java index b60ee09d39048..a03d299b32274 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java @@ -43,17 +43,20 @@ import org.opensearch.index.codec.CodecService; import org.opensearch.index.engine.EngineConfig; import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.rest.yaml.ObjectPath; import java.io.IOException; import java.net.URISyntaxException; import java.nio.charset.StandardCharsets; +import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; import static org.opensearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; import static org.opensearch.rest.action.search.RestSearchAction.TOTAL_HITS_AS_INT_PARAM; +import static org.opensearch.test.OpenSearchIntegTestCase.CODECS; /** * Basic test that indexed documents survive the rolling restart. See @@ -267,7 +270,11 @@ public void testIndexingWithSegRep() throws Exception { .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put( EngineConfig.INDEX_CODEC_SETTING.getKey(), - randomFrom(CodecService.DEFAULT_CODEC, CodecService.BEST_COMPRESSION_CODEC, CodecService.LUCENE_DEFAULT_CODEC) + randomFrom(new ArrayList<>(CODECS) { + { + add(CodecService.LUCENE_DEFAULT_CODEC); + } + }) ) .put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms"); createIndex(indexName, settings.build()); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java index 3ab1a2a8564c5..08186bf3f9362 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java @@ -201,10 +201,11 @@ public void testReplicationAfterPrimaryRefreshAndFlush() throws Exception { final String nodeB = internalCluster().startDataOnlyNode(); final Settings settings = Settings.builder() .put(indexSettings()) - .put( - EngineConfig.INDEX_CODEC_SETTING.getKey(), - randomFrom(CodecService.DEFAULT_CODEC, CodecService.BEST_COMPRESSION_CODEC, CodecService.LUCENE_DEFAULT_CODEC) - ) + .put(EngineConfig.INDEX_CODEC_SETTING.getKey(), randomFrom(new ArrayList<>(CODECS) { + { + add(CodecService.LUCENE_DEFAULT_CODEC); + } + })) .build(); createIndex(INDEX_NAME, settings); ensureGreen(INDEX_NAME); diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 5bba700f53dc4..5e79bec91bd90 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -38,6 +38,7 @@ import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.apache.hc.core5.http.HttpHost; +import org.apache.lucene.codecs.Codec; import org.apache.lucene.search.Sort; import org.apache.lucene.search.TotalHits; import org.apache.lucene.tests.util.LuceneTestCase; @@ -132,6 +133,7 @@ import org.opensearch.index.MockEngineFactoryPlugin; import org.opensearch.index.codec.CodecService; import org.opensearch.index.engine.Segment; +import org.opensearch.index.mapper.CompletionFieldMapper; import org.opensearch.index.mapper.MockFieldFilterPlugin; import org.opensearch.index.store.Store; import org.opensearch.index.translog.Translog; @@ -271,6 +273,17 @@ public abstract class OpenSearchIntegTestCase extends OpenSearchTestCase { */ public static final String SYSPROP_THIRDPARTY = "tests.thirdparty"; + /** + * The lucene_default {@link Codec} is not added to the list as it internally maps to Asserting {@link Codec}. + * The override to fetch the {@link CompletionFieldMapper.CompletionFieldType} postings format is not available for this codec. + */ + public static final List CODECS = List.of( + CodecService.DEFAULT_CODEC, + CodecService.BEST_COMPRESSION_CODEC, + CodecService.ZSTD_CODEC, + CodecService.ZSTD_NO_DICT_CODEC + ); + /** * Annotation for third-party integration tests. *

@@ -427,7 +440,7 @@ protected void randomIndexTemplate() { // otherwise, use it, it has assertions and so on that can find bugs. SuppressCodecs annotation = getClass().getAnnotation(SuppressCodecs.class); if (annotation != null && annotation.value().length == 1 && "*".equals(annotation.value()[0])) { - randomSettingsBuilder.put("index.codec", randomFrom(CodecService.DEFAULT_CODEC, CodecService.BEST_COMPRESSION_CODEC)); + randomSettingsBuilder.put("index.codec", randomFrom(CODECS)); } else { randomSettingsBuilder.put("index.codec", CodecService.LUCENE_DEFAULT_CODEC); } From d916f9c1027f5b2ccff971a66921b7c26db1688f Mon Sep 17 00:00:00 2001 From: Stefano Maglione Date: Wed, 2 Aug 2023 18:11:09 +0200 Subject: [PATCH 15/24] Added featureflag (#8988) Added the featureFlagSettings method to OpenSearchSingleNodeTestCase Signed-off-by: Stefano Maglione --- .../BlobStoreRepositoryRemoteIndexTests.java | 5 ++++- .../blobstore/BlobStoreRepositoryTests.java | 4 ++-- .../opensearch/search/SearchServiceTests.java | 7 ++++++- .../test/OpenSearchSingleNodeTestCase.java | 18 ++++++++++++++++++ 4 files changed, 30 insertions(+), 4 deletions(-) diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRemoteIndexTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRemoteIndexTests.java index f25498b8c8368..06fd2aaa22039 100644 --- a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRemoteIndexTests.java +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryRemoteIndexTests.java @@ -64,12 +64,15 @@ * Tests for the {@link BlobStoreRepository} and its subclasses. */ public class BlobStoreRepositoryRemoteIndexTests extends BlobStoreRepositoryHelperTests { + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.REMOTE_STORE, "true").build(); + } @Override protected Settings nodeSettings() { return Settings.builder() .put(super.nodeSettings()) - .put(FeatureFlags.REMOTE_STORE, "true") .put(CLUSTER_REPLICATION_TYPE_SETTING.getKey(), ReplicationType.SEGMENT) .put(CLUSTER_REMOTE_STORE_ENABLED_SETTING.getKey(), true) .put(CLUSTER_REMOTE_SEGMENT_STORE_REPOSITORY_SETTING.getKey(), "test-rs-repo") diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java index f1253e377c819..26082f2456867 100644 --- a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java @@ -106,8 +106,8 @@ protected void assertSnapshotOrGenericThread() { } @Override - protected Settings nodeSettings() { - return Settings.builder().put(super.nodeSettings()).put(FeatureFlags.REMOTE_STORE, "true").build(); + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.REMOTE_STORE, "true").build(); } public void testRetrieveSnapshots() throws Exception { diff --git a/server/src/test/java/org/opensearch/search/SearchServiceTests.java b/server/src/test/java/org/opensearch/search/SearchServiceTests.java index 2371c5812814a..876a2d15cad7e 100644 --- a/server/src/test/java/org/opensearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/opensearch/search/SearchServiceTests.java @@ -225,9 +225,14 @@ public void onQueryPhase(SearchContext context, long tookInNanos) { } } + @Override + protected Settings featureFlagSettings() { + return Settings.builder().put(super.featureFlagSettings()).put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, "true").build(); + } + @Override protected Settings nodeSettings() { - return Settings.builder().put("search.default_search_timeout", "5s").put(FeatureFlags.CONCURRENT_SEGMENT_SEARCH, true).build(); + return Settings.builder().put("search.default_search_timeout", "5s").build(); } public void testClearOnClose() { diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java index 1d7c04227b208..63b486e32ff5b 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchSingleNodeTestCase.java @@ -45,6 +45,8 @@ import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.routing.allocation.DiskThresholdSettings; import org.opensearch.common.Priority; +import org.opensearch.common.settings.FeatureFlagSettings; +import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; @@ -248,6 +250,7 @@ private Node newNode() { .put(FeatureFlags.TELEMETRY_SETTING.getKey(), true) .put(TelemetrySettings.TRACER_ENABLED_SETTING.getKey(), true) .put(nodeSettings()) // allow test cases to provide their own settings or override these + .put(featureFlagSettings()) .build(); Collection> plugins = getPlugins(); @@ -414,4 +417,19 @@ protected boolean forbidPrivateIndexSettings() { return true; } + /** + * Setting all feature flag settings at base IT, which can be overridden later by individual + * IT classes. + * + * @return Feature flag settings. + */ + protected Settings featureFlagSettings() { + Settings.Builder featureSettings = Settings.builder(); + for (Setting builtInFlag : FeatureFlagSettings.BUILT_IN_FEATURE_FLAGS) { + featureSettings.put(builtInFlag.getKey(), builtInFlag.getDefaultRaw(Settings.EMPTY)); + } + featureSettings.put(FeatureFlags.TELEMETRY_SETTING.getKey(), true); + return featureSettings.build(); + } + } From 8afb22a525f4728e90826eabaa74109a9e745182 Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Wed, 2 Aug 2023 15:01:24 -0500 Subject: [PATCH 16/24] [Refactor] Network and Transport common classes to Libraries (#9073) This commit refactors the following network and transport libraries to the opensearch common and core libraries respectively: * o.o.common.network.Cidrs -> :libs:opensearch-common * o.o.common.network.InetAddresses -> :libs:opensearch-common * o.o.common.network.NetworkAddress -> :libs:opensearch-common * o.o.common.transport.NetworkExceptionHelper -> :libs:opensearch-common * o.o.common.transport.PortsRange -> :libs:opensearch-common * o.o.common.transport.TransportAddress -> :libs:opensearch-core * o.o.common.transport.BoundTransportAddress -> :libs:opensearch-core * o.o.transport.TransportMessage -> :libs:opensearch-core * o.o.transport.TransportResponse -> :libs:opensearch-core The purpose is to reduce the change surface area of the core APIs to minimize impact to downstream consumers while moving toward establishing a formal API for cloud native or serverless implementations. Signed-off-by: Nicholas Walter Knize --- .../benchmark/routing/allocation/Allocators.java | 2 +- .../client/tasks/CancelTasksResponseTests.java | 2 +- .../main/java/org/opensearch/common/network/Cidrs.java | 0 .../org/opensearch/common/network/InetAddresses.java | 0 .../org/opensearch/common/network/NetworkAddress.java | 0 .../org/opensearch/common/network/package-info.java | 10 ++++++++++ .../common/transport/NetworkExceptionHelper.java | 0 .../org/opensearch/common/transport/PortsRange.java | 0 .../org/opensearch/common/transport/package-info.java | 2 +- .../java/org/opensearch/common/network/CidrsTests.java | 0 .../opensearch/common/network/InetAddressesTests.java | 0 .../opensearch/common/network/NetworkAddressTests.java | 0 .../core}/common/transport/BoundTransportAddress.java | 4 ++-- .../core}/common/transport/TransportAddress.java | 4 ++-- .../opensearch/core/common/transport/package-info.java | 10 ++++++++++ .../opensearch/core}/transport/TransportMessage.java | 4 ++-- .../opensearch/core}/transport/TransportResponse.java | 2 +- .../org/opensearch/core/transport/package-info.java | 10 ++++++++++ .../index/reindex/ReindexFromRemoteWithAuthTests.java | 2 +- .../java/org/opensearch/index/reindex/RetryTests.java | 2 +- .../java/org/opensearch/http/netty4/Netty4Http2IT.java | 2 +- .../http/netty4/Netty4HttpRequestSizeLimitIT.java | 2 +- .../org/opensearch/http/netty4/Netty4PipeliningIT.java | 2 +- .../netty4/Netty4TransportMultiPortIntegrationIT.java | 4 ++-- .../netty4/Netty4TransportPublishAddressIT.java | 4 ++-- .../opensearch/http/netty4/Netty4BadRequestTests.java | 2 +- .../http/netty4/Netty4HttpServerPipeliningTests.java | 2 +- .../http/netty4/Netty4HttpServerTransportTests.java | 2 +- .../netty4/Netty4SizeHeaderFrameDecoderTests.java | 2 +- .../transport/netty4/SimpleNetty4TransportTests.java | 2 +- .../classic/AbstractAzureComputeServiceTestCase.java | 2 +- .../azure/classic/AzureSeedHostsProvider.java | 2 +- .../discovery/ec2/AwsEc2SeedHostsProvider.java | 2 +- .../opensearch/discovery/ec2/Ec2DiscoveryTests.java | 2 +- .../org/opensearch/discovery/ec2/Ec2RetriesTests.java | 2 +- .../opensearch/discovery/gce/GceSeedHostsProvider.java | 2 +- .../opensearch/discovery/gce/GceDiscoveryTests.java | 2 +- .../java/org/opensearch/http/nio/NioPipeliningIT.java | 2 +- .../http/nio/NioHttpServerTransportTests.java | 2 +- .../transport/nio/SimpleNioTransportTests.java | 2 +- .../TransportReplicationActionRetryOnClosedNodeIT.java | 2 +- .../action/ActionListenerResponseHandler.java | 2 +- .../java/org/opensearch/action/ActionResponse.java | 2 +- .../action/admin/cluster/stats/ClusterStatsNodes.java | 2 +- .../action/search/ClearScrollController.java | 2 +- .../org/opensearch/action/search/DeletePitInfo.java | 2 +- .../action/search/SearchTransportService.java | 2 +- .../action/search/UpdatePitContextResponse.java | 2 +- .../action/support/ChannelActionListener.java | 2 +- .../support/broadcast/BroadcastShardResponse.java | 2 +- .../broadcast/node/TransportBroadcastByNodeAction.java | 2 +- .../action/support/nodes/BaseNodeResponse.java | 2 +- .../action/support/tasks/TransportTasksAction.java | 2 +- .../main/java/org/opensearch/bootstrap/Bootstrap.java | 2 +- .../java/org/opensearch/bootstrap/BootstrapChecks.java | 4 ++-- .../opensearch/cluster/ClusterSettingsResponse.java | 2 +- .../cluster/action/index/NodeMappingRefreshAction.java | 2 +- .../cluster/action/shard/ShardStateAction.java | 2 +- .../coordination/ClusterFormationFailureHelper.java | 2 +- .../opensearch/cluster/coordination/Coordinator.java | 4 ++-- .../cluster/coordination/FollowersChecker.java | 2 +- .../opensearch/cluster/coordination/JoinHelper.java | 4 ++-- .../opensearch/cluster/coordination/LeaderChecker.java | 4 ++-- .../opensearch/cluster/coordination/PeersResponse.java | 2 +- .../cluster/coordination/PreVoteResponse.java | 2 +- .../opensearch/cluster/coordination/Publication.java | 2 +- .../coordination/PublicationTransportHandler.java | 2 +- .../cluster/coordination/PublishWithJoinResponse.java | 2 +- .../org/opensearch/cluster/node/DiscoveryNode.java | 2 +- .../opensearch/cluster/node/DiscoveryNodeFilters.java | 2 +- .../org/opensearch/cluster/node/DiscoveryNodes.java | 2 +- .../java/org/opensearch/discovery/DiscoveryModule.java | 2 +- .../discovery/FileBasedSeedHostsProvider.java | 2 +- .../HandshakingTransportAddressConnector.java | 2 +- .../discovery/InitializeExtensionResponse.java | 2 +- .../main/java/org/opensearch/discovery/PeerFinder.java | 2 +- .../org/opensearch/discovery/SeedHostsProvider.java | 2 +- .../org/opensearch/discovery/SeedHostsResolver.java | 2 +- .../discovery/SettingsBasedSeedHostsProvider.java | 2 +- .../opensearch/env/EnvironmentSettingsResponse.java | 2 +- .../opensearch/extensions/AcknowledgedResponse.java | 2 +- .../AddSettingsUpdateConsumerRequestHandler.java | 2 +- .../opensearch/extensions/DiscoveryExtensionNode.java | 2 +- .../extensions/ExtensionDependencyResponse.java | 2 +- .../org/opensearch/extensions/ExtensionsManager.java | 4 ++-- .../action/ExtensionTransportActionsHandler.java | 2 +- .../extensions/rest/RestActionsRequestHandler.java | 2 +- .../rest/RestExecuteOnExtensionResponse.java | 2 +- .../settings/CustomSettingsRequestHandler.java | 2 +- .../gateway/LocalAllocateDangledIndices.java | 2 +- .../opensearch/http/AbstractHttpServerTransport.java | 4 ++-- server/src/main/java/org/opensearch/http/HttpInfo.java | 4 ++-- .../java/org/opensearch/http/HttpServerTransport.java | 2 +- .../org/opensearch/index/IndicesModuleResponse.java | 2 +- .../indices/recovery/PeerRecoveryTargetService.java | 2 +- .../opensearch/indices/recovery/RecoveryResponse.java | 2 +- .../recovery/RecoveryTranslogOperationsResponse.java | 2 +- .../indices/recovery/RemoteRecoveryTargetHandler.java | 2 +- .../indices/recovery/RetryableTransportClient.java | 2 +- .../indices/replication/CheckpointInfoResponse.java | 2 +- .../indices/replication/GetSegmentFilesResponse.java | 2 +- .../replication/RemoteSegmentFileChunkWriter.java | 2 +- .../replication/SegmentReplicationSourceService.java | 2 +- .../replication/SegmentReplicationTargetService.java | 2 +- .../indices/replication/common/ReplicationTarget.java | 2 +- .../org/opensearch/indices/store/IndicesStore.java | 2 +- server/src/main/java/org/opensearch/node/Node.java | 4 ++-- .../org/opensearch/node/NodeValidationException.java | 2 +- .../java/org/opensearch/plugins/DiscoveryPlugin.java | 3 ++- .../repositories/VerifyNodeRepositoryAction.java | 2 +- .../opensearch/rest/action/cat/RestNodesAction.java | 2 +- .../java/org/opensearch/search/SearchPhaseResult.java | 2 +- .../org/opensearch/tasks/TaskCancellationService.java | 2 +- .../opensearch/transport/ActionTransportException.java | 2 +- .../transport/EmptyTransportResponseHandler.java | 1 + .../java/org/opensearch/transport/InboundHandler.java | 3 ++- .../java/org/opensearch/transport/OutboundHandler.java | 3 ++- .../org/opensearch/transport/PlainTransportFuture.java | 1 + .../opensearch/transport/ProxyConnectionStrategy.java | 2 +- .../opensearch/transport/RemoteTransportException.java | 2 +- .../opensearch/transport/SniffConnectionStrategy.java | 2 +- .../org/opensearch/transport/TaskTransportChannel.java | 1 + .../java/org/opensearch/transport/TcpTransport.java | 4 ++-- .../org/opensearch/transport/TcpTransportChannel.java | 1 + .../main/java/org/opensearch/transport/Transport.java | 5 +++-- .../org/opensearch/transport/TransportActionProxy.java | 1 + .../org/opensearch/transport/TransportChannel.java | 1 + .../org/opensearch/transport/TransportHandshaker.java | 1 + .../java/org/opensearch/transport/TransportInfo.java | 4 ++-- .../org/opensearch/transport/TransportInterceptor.java | 1 + .../opensearch/transport/TransportMessageListener.java | 1 + .../org/opensearch/transport/TransportRequest.java | 1 + .../opensearch/transport/TransportResponseHandler.java | 1 + .../org/opensearch/transport/TransportService.java | 5 +++-- .../org/opensearch/ExceptionSerializationTests.java | 2 +- .../admin/cluster/node/tasks/TaskManagerTestCase.java | 2 +- .../admin/cluster/node/tasks/TestTaskPlugin.java | 2 +- .../cluster/reroute/ClusterRerouteResponseTests.java | 2 +- .../shards/ClusterSearchShardsResponseTests.java | 2 +- .../TransportVerifyShardBeforeCloseActionTests.java | 2 +- .../action/bulk/TransportShardBulkActionTests.java | 2 +- .../action/search/ClearScrollControllerTests.java | 2 +- .../action/search/TransportSearchActionTests.java | 2 +- .../node/TransportBroadcastByNodeActionTests.java | 2 +- .../support/replication/ReplicationOperationTests.java | 2 +- .../replication/TransportReplicationActionTests.java | 2 +- .../TransportWriteActionForIndexingPressureTests.java | 2 +- .../support/replication/TransportWriteActionTests.java | 2 +- .../org/opensearch/bootstrap/BootstrapChecksTests.java | 4 ++-- .../java/org/opensearch/cluster/ClusterStateTests.java | 2 +- .../cluster/NodeConnectionsServiceTests.java | 4 ++-- .../cluster/action/shard/ShardStateActionTests.java | 2 +- .../ClusterFormationFailureHelperTests.java | 2 +- .../cluster/coordination/CoordinationStateTests.java | 2 +- .../cluster/coordination/FollowersCheckerTests.java | 4 ++-- .../cluster/coordination/JoinHelperTests.java | 2 +- .../cluster/coordination/LeaderCheckerTests.java | 4 ++-- .../opensearch/cluster/coordination/NodeJoinTests.java | 2 +- .../cluster/coordination/PublicationTests.java | 2 +- .../cluster/node/DiscoveryNodeFiltersTests.java | 2 +- .../opensearch/cluster/node/DiscoveryNodeTests.java | 2 +- .../opensearch/cluster/node/DiscoveryNodesTests.java | 2 +- .../opensearch/cluster/routing/RoutingNodeTests.java | 2 +- .../opensearch/common/network/NetworkModuleTests.java | 2 +- .../common/transport/BoundTransportAddressTests.java | 2 ++ .../discovery/FileBasedSeedHostsProviderTests.java | 4 ++-- .../HandshakingTransportAddressConnectorTests.java | 2 +- .../discovery/InitializeExtensionRequestTests.java | 2 +- .../java/org/opensearch/discovery/PeerFinderTests.java | 2 +- .../opensearch/discovery/SeedHostsResolverTests.java | 4 ++-- .../discovery/SettingsBasedSeedHostsProviderTests.java | 2 +- .../extensions/DiscoveryExtensionNodeTests.java | 2 +- .../opensearch/extensions/ExtensionsManagerTests.java | 4 ++-- .../action/ExtensionTransportActionsHandlerTests.java | 2 +- .../rest/RestSendToExtensionActionTests.java | 2 +- .../org/opensearch/gateway/GatewayServiceTests.java | 2 +- .../http/AbstractHttpServerTransportTests.java | 2 +- .../test/java/org/opensearch/http/HttpInfoTests.java | 4 ++-- .../SegmentReplicationSourceServiceTests.java | 2 +- .../SegmentReplicationTargetServiceTests.java | 2 +- .../src/test/java/org/opensearch/node/NodeTests.java | 2 +- .../opensearch/node/ResponseCollectorServiceTests.java | 2 +- .../opensearch/nodesinfo/NodeInfoStreamingTests.java | 4 ++-- .../org/opensearch/rest/BytesRestResponseTests.java | 2 +- .../java/org/opensearch/rest/RestControllerTests.java | 4 ++-- .../rest/action/RestBuilderListenerTests.java | 4 ++-- .../opensearch/snapshots/SnapshotResiliencyTests.java | 2 +- .../transport/ClusterConnectionManagerTests.java | 2 +- .../org/opensearch/transport/InboundDecoderTests.java | 1 + .../org/opensearch/transport/OutboundHandlerTests.java | 3 ++- .../transport/ProxyConnectionStrategyTests.java | 2 +- .../transport/RemoteClusterConnectionTests.java | 2 +- .../transport/RemoteConnectionManagerTests.java | 2 +- .../transport/SniffConnectionStrategyTests.java | 2 +- .../org/opensearch/transport/TcpTransportTests.java | 2 +- .../transport/TransportActionProxyTests.java | 1 + .../opensearch/transport/TransportHandshakerTests.java | 1 + .../org/opensearch/transport/TransportInfoTests.java | 4 ++-- .../TransportServiceDeserializationFailureTests.java | 1 + .../coordination/AbstractCoordinatorTestCase.java | 2 +- .../src/main/java/org/opensearch/node/MockNode.java | 2 +- .../java/org/opensearch/test/ExternalTestCluster.java | 2 +- .../java/org/opensearch/test/MockHttpTransport.java | 4 ++-- .../org/opensearch/test/OpenSearchIntegTestCase.java | 2 +- .../java/org/opensearch/test/OpenSearchTestCase.java | 2 +- .../test/disruption/DisruptableMockTransport.java | 6 +++--- .../org/opensearch/test/transport/FakeTransport.java | 4 ++-- .../org/opensearch/test/transport/MockTransport.java | 4 ++-- .../test/transport/MockTransportService.java | 4 ++-- .../test/transport/StubbableConnectionManager.java | 2 +- .../opensearch/test/transport/StubbableTransport.java | 4 ++-- .../transport/AbstractSimpleTransportTestCase.java | 5 +++-- .../java/org/opensearch/transport/TestResponse.java | 1 + .../org/opensearch/transport/TestTransportChannel.java | 1 + .../test/disruption/DisruptableMockTransportTests.java | 6 +++--- .../test/disruption/NetworkDisruptionIT.java | 2 +- .../transport/nio/SimpleMockNioTransportTests.java | 2 +- 217 files changed, 283 insertions(+), 227 deletions(-) rename {server => libs/common}/src/main/java/org/opensearch/common/network/Cidrs.java (100%) rename {server => libs/common}/src/main/java/org/opensearch/common/network/InetAddresses.java (100%) rename {server => libs/common}/src/main/java/org/opensearch/common/network/NetworkAddress.java (100%) create mode 100644 libs/common/src/main/java/org/opensearch/common/network/package-info.java rename {server => libs/common}/src/main/java/org/opensearch/common/transport/NetworkExceptionHelper.java (100%) rename {server => libs/common}/src/main/java/org/opensearch/common/transport/PortsRange.java (100%) rename {server => libs/common}/src/main/java/org/opensearch/common/transport/package-info.java (79%) rename {server => libs/common}/src/test/java/org/opensearch/common/network/CidrsTests.java (100%) rename {server => libs/common}/src/test/java/org/opensearch/common/network/InetAddressesTests.java (100%) rename {server => libs/common}/src/test/java/org/opensearch/common/network/NetworkAddressTests.java (100%) rename {server/src/main/java/org/opensearch => libs/core/src/main/java/org/opensearch/core}/common/transport/BoundTransportAddress.java (98%) rename {server/src/main/java/org/opensearch => libs/core/src/main/java/org/opensearch/core}/common/transport/TransportAddress.java (99%) create mode 100644 libs/core/src/main/java/org/opensearch/core/common/transport/package-info.java rename {server/src/main/java/org/opensearch => libs/core/src/main/java/org/opensearch/core}/transport/TransportMessage.java (94%) rename {server/src/main/java/org/opensearch => libs/core/src/main/java/org/opensearch/core}/transport/TransportResponse.java (98%) create mode 100644 libs/core/src/main/java/org/opensearch/core/transport/package-info.java diff --git a/benchmarks/src/main/java/org/opensearch/benchmark/routing/allocation/Allocators.java b/benchmarks/src/main/java/org/opensearch/benchmark/routing/allocation/Allocators.java index d700b9dab2cf3..25a7bf3d98f0a 100644 --- a/benchmarks/src/main/java/org/opensearch/benchmark/routing/allocation/Allocators.java +++ b/benchmarks/src/main/java/org/opensearch/benchmark/routing/allocation/Allocators.java @@ -45,7 +45,7 @@ import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.util.set.Sets; import org.opensearch.gateway.GatewayAllocator; import org.opensearch.snapshots.EmptySnapshotsInfoService; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/tasks/CancelTasksResponseTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/tasks/CancelTasksResponseTests.java index e6411b615df07..0e9bce7ce0b58 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/tasks/CancelTasksResponseTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/tasks/CancelTasksResponseTests.java @@ -40,7 +40,7 @@ import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; import org.opensearch.common.xcontent.XContentType; diff --git a/server/src/main/java/org/opensearch/common/network/Cidrs.java b/libs/common/src/main/java/org/opensearch/common/network/Cidrs.java similarity index 100% rename from server/src/main/java/org/opensearch/common/network/Cidrs.java rename to libs/common/src/main/java/org/opensearch/common/network/Cidrs.java diff --git a/server/src/main/java/org/opensearch/common/network/InetAddresses.java b/libs/common/src/main/java/org/opensearch/common/network/InetAddresses.java similarity index 100% rename from server/src/main/java/org/opensearch/common/network/InetAddresses.java rename to libs/common/src/main/java/org/opensearch/common/network/InetAddresses.java diff --git a/server/src/main/java/org/opensearch/common/network/NetworkAddress.java b/libs/common/src/main/java/org/opensearch/common/network/NetworkAddress.java similarity index 100% rename from server/src/main/java/org/opensearch/common/network/NetworkAddress.java rename to libs/common/src/main/java/org/opensearch/common/network/NetworkAddress.java diff --git a/libs/common/src/main/java/org/opensearch/common/network/package-info.java b/libs/common/src/main/java/org/opensearch/common/network/package-info.java new file mode 100644 index 0000000000000..92e4eac5bde42 --- /dev/null +++ b/libs/common/src/main/java/org/opensearch/common/network/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** common network layer classes used across the code base */ +package org.opensearch.common.network; diff --git a/server/src/main/java/org/opensearch/common/transport/NetworkExceptionHelper.java b/libs/common/src/main/java/org/opensearch/common/transport/NetworkExceptionHelper.java similarity index 100% rename from server/src/main/java/org/opensearch/common/transport/NetworkExceptionHelper.java rename to libs/common/src/main/java/org/opensearch/common/transport/NetworkExceptionHelper.java diff --git a/server/src/main/java/org/opensearch/common/transport/PortsRange.java b/libs/common/src/main/java/org/opensearch/common/transport/PortsRange.java similarity index 100% rename from server/src/main/java/org/opensearch/common/transport/PortsRange.java rename to libs/common/src/main/java/org/opensearch/common/transport/PortsRange.java diff --git a/server/src/main/java/org/opensearch/common/transport/package-info.java b/libs/common/src/main/java/org/opensearch/common/transport/package-info.java similarity index 79% rename from server/src/main/java/org/opensearch/common/transport/package-info.java rename to libs/common/src/main/java/org/opensearch/common/transport/package-info.java index abb8dfbb4e4f0..7d28ac6c60a14 100644 --- a/server/src/main/java/org/opensearch/common/transport/package-info.java +++ b/libs/common/src/main/java/org/opensearch/common/transport/package-info.java @@ -6,5 +6,5 @@ * compatible open source license. */ -/** Base Transport utility package. */ +/** common transport layer classes used across the code base */ package org.opensearch.common.transport; diff --git a/server/src/test/java/org/opensearch/common/network/CidrsTests.java b/libs/common/src/test/java/org/opensearch/common/network/CidrsTests.java similarity index 100% rename from server/src/test/java/org/opensearch/common/network/CidrsTests.java rename to libs/common/src/test/java/org/opensearch/common/network/CidrsTests.java diff --git a/server/src/test/java/org/opensearch/common/network/InetAddressesTests.java b/libs/common/src/test/java/org/opensearch/common/network/InetAddressesTests.java similarity index 100% rename from server/src/test/java/org/opensearch/common/network/InetAddressesTests.java rename to libs/common/src/test/java/org/opensearch/common/network/InetAddressesTests.java diff --git a/server/src/test/java/org/opensearch/common/network/NetworkAddressTests.java b/libs/common/src/test/java/org/opensearch/common/network/NetworkAddressTests.java similarity index 100% rename from server/src/test/java/org/opensearch/common/network/NetworkAddressTests.java rename to libs/common/src/test/java/org/opensearch/common/network/NetworkAddressTests.java diff --git a/server/src/main/java/org/opensearch/common/transport/BoundTransportAddress.java b/libs/core/src/main/java/org/opensearch/core/common/transport/BoundTransportAddress.java similarity index 98% rename from server/src/main/java/org/opensearch/common/transport/BoundTransportAddress.java rename to libs/core/src/main/java/org/opensearch/core/common/transport/BoundTransportAddress.java index 3a9c337f2d950..8908a172395f2 100644 --- a/server/src/main/java/org/opensearch/common/transport/BoundTransportAddress.java +++ b/libs/core/src/main/java/org/opensearch/core/common/transport/BoundTransportAddress.java @@ -30,12 +30,12 @@ * GitHub history for details. */ -package org.opensearch.common.transport; +package org.opensearch.core.common.transport; +import org.opensearch.common.network.InetAddresses; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.network.InetAddresses; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/common/transport/TransportAddress.java b/libs/core/src/main/java/org/opensearch/core/common/transport/TransportAddress.java similarity index 99% rename from server/src/main/java/org/opensearch/common/transport/TransportAddress.java rename to libs/core/src/main/java/org/opensearch/core/common/transport/TransportAddress.java index 737e8f3496143..1a853877ed0b9 100644 --- a/server/src/main/java/org/opensearch/common/transport/TransportAddress.java +++ b/libs/core/src/main/java/org/opensearch/core/common/transport/TransportAddress.java @@ -30,12 +30,12 @@ * GitHub history for details. */ -package org.opensearch.common.transport; +package org.opensearch.core.common.transport; +import org.opensearch.common.network.NetworkAddress; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.network.NetworkAddress; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; diff --git a/libs/core/src/main/java/org/opensearch/core/common/transport/package-info.java b/libs/core/src/main/java/org/opensearch/core/common/transport/package-info.java new file mode 100644 index 0000000000000..21d2abfce958a --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/core/common/transport/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Common / Base Transport classes used to implement the OpenSearch transport layer */ +package org.opensearch.core.common.transport; diff --git a/server/src/main/java/org/opensearch/transport/TransportMessage.java b/libs/core/src/main/java/org/opensearch/core/transport/TransportMessage.java similarity index 94% rename from server/src/main/java/org/opensearch/transport/TransportMessage.java rename to libs/core/src/main/java/org/opensearch/core/transport/TransportMessage.java index 78216047d530e..941babda40aa3 100644 --- a/server/src/main/java/org/opensearch/transport/TransportMessage.java +++ b/libs/core/src/main/java/org/opensearch/core/transport/TransportMessage.java @@ -30,11 +30,11 @@ * GitHub history for details. */ -package org.opensearch.transport; +package org.opensearch.core.transport; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; /** * Message over the transport interface diff --git a/server/src/main/java/org/opensearch/transport/TransportResponse.java b/libs/core/src/main/java/org/opensearch/core/transport/TransportResponse.java similarity index 98% rename from server/src/main/java/org/opensearch/transport/TransportResponse.java rename to libs/core/src/main/java/org/opensearch/core/transport/TransportResponse.java index 73713fa1447a8..038069e93a51b 100644 --- a/server/src/main/java/org/opensearch/transport/TransportResponse.java +++ b/libs/core/src/main/java/org/opensearch/core/transport/TransportResponse.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.transport; +package org.opensearch.core.transport; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; diff --git a/libs/core/src/main/java/org/opensearch/core/transport/package-info.java b/libs/core/src/main/java/org/opensearch/core/transport/package-info.java new file mode 100644 index 0000000000000..91db839f40305 --- /dev/null +++ b/libs/core/src/main/java/org/opensearch/core/transport/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Core Transport Layer classes used across the OpenSearch core */ +package org.opensearch.core.transport; diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexFromRemoteWithAuthTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexFromRemoteWithAuthTests.java index 97f43b9439408..03d6fafccfea3 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexFromRemoteWithAuthTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/ReindexFromRemoteWithAuthTests.java @@ -50,7 +50,7 @@ import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.network.NetworkModule; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.env.Environment; diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/RetryTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/RetryTests.java index e239018e0ce31..89eb8fc7e15a3 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/RetryTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/RetryTests.java @@ -42,7 +42,7 @@ import org.opensearch.client.Client; import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; import org.opensearch.http.HttpInfo; import org.opensearch.index.query.QueryBuilders; diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4Http2IT.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4Http2IT.java index c066f3edf6900..baa306aa0624b 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4Http2IT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4Http2IT.java @@ -13,7 +13,7 @@ import io.netty.util.ReferenceCounted; import org.opensearch.OpenSearchNetty4IntegTestCase; import org.opensearch.common.collect.Tuple; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.http.HttpServerTransport; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HttpRequestSizeLimitIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HttpRequestSizeLimitIT.java index 95440e28686e7..d01f72ac88c9d 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HttpRequestSizeLimitIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4HttpRequestSizeLimitIT.java @@ -38,7 +38,7 @@ import org.opensearch.OpenSearchNetty4IntegTestCase; import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.http.HttpServerTransport; diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4PipeliningIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4PipeliningIT.java index 96193b0ecb954..d891284f53205 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4PipeliningIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/http/netty4/Netty4PipeliningIT.java @@ -35,7 +35,7 @@ import io.netty.handler.codec.http.FullHttpResponse; import io.netty.util.ReferenceCounted; import org.opensearch.OpenSearchNetty4IntegTestCase; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.http.HttpServerTransport; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/netty4/Netty4TransportMultiPortIntegrationIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/netty4/Netty4TransportMultiPortIntegrationIT.java index 3ff3938d23f65..4004d3d1a029d 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/netty4/Netty4TransportMultiPortIntegrationIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/netty4/Netty4TransportMultiPortIntegrationIT.java @@ -36,8 +36,8 @@ import org.opensearch.action.admin.cluster.node.info.NodesInfoResponse; import org.opensearch.common.network.NetworkAddress; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; import org.opensearch.test.junit.annotations.Network; diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/netty4/Netty4TransportPublishAddressIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/netty4/Netty4TransportPublishAddressIT.java index e6604abf126da..4722cdb66be18 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/netty4/Netty4TransportPublishAddressIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/transport/netty4/Netty4TransportPublishAddressIT.java @@ -38,8 +38,8 @@ import org.opensearch.common.network.NetworkModule; import org.opensearch.common.network.NetworkUtils; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.transport.Netty4ModulePlugin; import org.opensearch.transport.TransportInfo; diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4BadRequestTests.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4BadRequestTests.java index 5fcfc4ee5e151..1b175d7991e32 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4BadRequestTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4BadRequestTests.java @@ -38,7 +38,7 @@ import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.util.MockBigArrays; import org.opensearch.common.util.MockPageCacheRecycler; import org.opensearch.common.util.concurrent.ThreadContext; diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerPipeliningTests.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerPipeliningTests.java index 6c8cf69afb148..ca8dfb616e313 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerPipeliningTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerPipeliningTests.java @@ -45,7 +45,7 @@ import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.util.MockBigArrays; import org.opensearch.common.util.MockPageCacheRecycler; import org.opensearch.http.HttpPipelinedRequest; diff --git a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerTransportTests.java b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerTransportTests.java index d23edfda829f9..af4ded2255c9c 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerTransportTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/http/netty4/Netty4HttpServerTransportTests.java @@ -64,7 +64,7 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.MockBigArrays; diff --git a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java index 5c1c5970a7cfb..db7347bf99345 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/Netty4SizeHeaderFrameDecoderTests.java @@ -36,7 +36,7 @@ import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.util.MockPageCacheRecycler; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; diff --git a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/SimpleNetty4TransportTests.java b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/SimpleNetty4TransportTests.java index ee2bf12a4246f..27bfaef15ca86 100644 --- a/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/SimpleNetty4TransportTests.java +++ b/modules/transport-netty4/src/test/java/org/opensearch/transport/netty4/SimpleNetty4TransportTests.java @@ -39,7 +39,7 @@ import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.common.util.io.IOUtils; import org.opensearch.common.util.net.NetUtils; diff --git a/plugins/discovery-azure-classic/src/internalClusterTest/java/org/opensearch/cloud/azure/classic/AbstractAzureComputeServiceTestCase.java b/plugins/discovery-azure-classic/src/internalClusterTest/java/org/opensearch/cloud/azure/classic/AbstractAzureComputeServiceTestCase.java index f95f358532bac..d5d6aae23f344 100644 --- a/plugins/discovery-azure-classic/src/internalClusterTest/java/org/opensearch/cloud/azure/classic/AbstractAzureComputeServiceTestCase.java +++ b/plugins/discovery-azure-classic/src/internalClusterTest/java/org/opensearch/cloud/azure/classic/AbstractAzureComputeServiceTestCase.java @@ -47,7 +47,7 @@ import org.opensearch.common.network.NetworkAddress; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.discovery.azure.classic.AzureSeedHostsProvider; import org.opensearch.plugin.discovery.azure.classic.AzureDiscoveryPlugin; import org.opensearch.plugins.Plugin; diff --git a/plugins/discovery-azure-classic/src/main/java/org/opensearch/discovery/azure/classic/AzureSeedHostsProvider.java b/plugins/discovery-azure-classic/src/main/java/org/opensearch/discovery/azure/classic/AzureSeedHostsProvider.java index e2bc180876a17..89970fdfc322e 100644 --- a/plugins/discovery-azure-classic/src/main/java/org/opensearch/discovery/azure/classic/AzureSeedHostsProvider.java +++ b/plugins/discovery-azure-classic/src/main/java/org/opensearch/discovery/azure/classic/AzureSeedHostsProvider.java @@ -48,7 +48,7 @@ import org.opensearch.common.network.NetworkAddress; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.Strings; import org.opensearch.discovery.SeedHostsProvider; diff --git a/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/AwsEc2SeedHostsProvider.java b/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/AwsEc2SeedHostsProvider.java index 4afdff7d2c272..0724dbe4543b8 100644 --- a/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/AwsEc2SeedHostsProvider.java +++ b/plugins/discovery-ec2/src/main/java/org/opensearch/discovery/ec2/AwsEc2SeedHostsProvider.java @@ -47,7 +47,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.SingleObjectCache; import org.opensearch.discovery.SeedHostsProvider; diff --git a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryTests.java b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryTests.java index afa35f63ae4dc..07400f2126fe5 100644 --- a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryTests.java +++ b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2DiscoveryTests.java @@ -44,7 +44,7 @@ import org.opensearch.common.io.Streams; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.test.transport.MockTransportService; diff --git a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2RetriesTests.java b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2RetriesTests.java index ea10d03576d94..0f7a86bf76622 100644 --- a/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2RetriesTests.java +++ b/plugins/discovery-ec2/src/test/java/org/opensearch/discovery/ec2/Ec2RetriesTests.java @@ -41,7 +41,7 @@ import org.opensearch.common.io.Streams; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.discovery.SeedHostsProvider; import org.opensearch.discovery.SeedHostsResolver; diff --git a/plugins/discovery-gce/src/main/java/org/opensearch/discovery/gce/GceSeedHostsProvider.java b/plugins/discovery-gce/src/main/java/org/opensearch/discovery/gce/GceSeedHostsProvider.java index dfd60f52730a6..3295273c83598 100644 --- a/plugins/discovery-gce/src/main/java/org/opensearch/discovery/gce/GceSeedHostsProvider.java +++ b/plugins/discovery-gce/src/main/java/org/opensearch/discovery/gce/GceSeedHostsProvider.java @@ -46,7 +46,7 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.Strings; import org.opensearch.discovery.SeedHostsProvider; diff --git a/plugins/discovery-gce/src/test/java/org/opensearch/discovery/gce/GceDiscoveryTests.java b/plugins/discovery-gce/src/test/java/org/opensearch/discovery/gce/GceDiscoveryTests.java index 2ca1234bb8a04..c63085deb466f 100644 --- a/plugins/discovery-gce/src/test/java/org/opensearch/discovery/gce/GceDiscoveryTests.java +++ b/plugins/discovery-gce/src/test/java/org/opensearch/discovery/gce/GceDiscoveryTests.java @@ -37,7 +37,7 @@ import org.opensearch.cloud.gce.GceMetadataService; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; diff --git a/plugins/transport-nio/src/internalClusterTest/java/org/opensearch/http/nio/NioPipeliningIT.java b/plugins/transport-nio/src/internalClusterTest/java/org/opensearch/http/nio/NioPipeliningIT.java index ac06bf03ed8cd..9afb8e37cd9a9 100644 --- a/plugins/transport-nio/src/internalClusterTest/java/org/opensearch/http/nio/NioPipeliningIT.java +++ b/plugins/transport-nio/src/internalClusterTest/java/org/opensearch/http/nio/NioPipeliningIT.java @@ -34,7 +34,7 @@ import io.netty.handler.codec.http.FullHttpResponse; import org.opensearch.NioIntegTestCase; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.http.HttpServerTransport; import org.opensearch.test.OpenSearchIntegTestCase.ClusterScope; import org.opensearch.test.OpenSearchIntegTestCase.Scope; diff --git a/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpServerTransportTests.java b/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpServerTransportTests.java index 4d0db18d433ec..22bda4881c322 100644 --- a/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpServerTransportTests.java +++ b/plugins/transport-nio/src/test/java/org/opensearch/http/nio/NioHttpServerTransportTests.java @@ -53,7 +53,7 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.MockBigArrays; diff --git a/plugins/transport-nio/src/test/java/org/opensearch/transport/nio/SimpleNioTransportTests.java b/plugins/transport-nio/src/test/java/org/opensearch/transport/nio/SimpleNioTransportTests.java index d0e779edded7d..d7f603031ac17 100644 --- a/plugins/transport-nio/src/test/java/org/opensearch/transport/nio/SimpleNioTransportTests.java +++ b/plugins/transport-nio/src/test/java/org/opensearch/transport/nio/SimpleNioTransportTests.java @@ -39,7 +39,7 @@ import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.util.MockPageCacheRecycler; import org.opensearch.common.util.io.IOUtils; import org.opensearch.common.util.net.NetUtils; diff --git a/server/src/internalClusterTest/java/org/opensearch/action/support/replication/TransportReplicationActionRetryOnClosedNodeIT.java b/server/src/internalClusterTest/java/org/opensearch/action/support/replication/TransportReplicationActionRetryOnClosedNodeIT.java index 9f60e65eca297..7d663dd70edd6 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/support/replication/TransportReplicationActionRetryOnClosedNodeIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/support/replication/TransportReplicationActionRetryOnClosedNodeIT.java @@ -60,7 +60,7 @@ import org.opensearch.transport.TransportInterceptor; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestOptions; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/action/ActionListenerResponseHandler.java b/server/src/main/java/org/opensearch/action/ActionListenerResponseHandler.java index 7899324a3301e..2f376d81fa202 100644 --- a/server/src/main/java/org/opensearch/action/ActionListenerResponseHandler.java +++ b/server/src/main/java/org/opensearch/action/ActionListenerResponseHandler.java @@ -37,7 +37,7 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportException; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; import java.util.Objects; diff --git a/server/src/main/java/org/opensearch/action/ActionResponse.java b/server/src/main/java/org/opensearch/action/ActionResponse.java index fd13971433d8b..e1d4da760b35b 100644 --- a/server/src/main/java/org/opensearch/action/ActionResponse.java +++ b/server/src/main/java/org/opensearch/action/ActionResponse.java @@ -33,7 +33,7 @@ package org.opensearch.action; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodes.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodes.java index bddb3fb746eb1..6dd7e09aeae0d 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodes.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/ClusterStatsNodes.java @@ -41,7 +41,7 @@ import org.opensearch.common.metrics.OperationStats; import org.opensearch.common.network.NetworkModule; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.common.Strings; diff --git a/server/src/main/java/org/opensearch/action/search/ClearScrollController.java b/server/src/main/java/org/opensearch/action/search/ClearScrollController.java index eb0fa49a94050..c258b111fa1c6 100644 --- a/server/src/main/java/org/opensearch/action/search/ClearScrollController.java +++ b/server/src/main/java/org/opensearch/action/search/ClearScrollController.java @@ -41,7 +41,7 @@ import org.opensearch.common.util.concurrent.CountDown; import org.opensearch.core.common.Strings; import org.opensearch.transport.Transport; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import java.util.ArrayList; import java.util.Collection; diff --git a/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java b/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java index 1e616ab5ca16e..b33f8a46c8f7a 100644 --- a/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java +++ b/server/src/main/java/org/opensearch/action/search/DeletePitInfo.java @@ -15,7 +15,7 @@ import org.opensearch.core.xcontent.ConstructingObjectParser; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/action/search/SearchTransportService.java b/server/src/main/java/org/opensearch/action/search/SearchTransportService.java index 5a280818640ed..37ffca6cac5f2 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/opensearch/action/search/SearchTransportService.java @@ -65,7 +65,7 @@ import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestOptions; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.transport.TransportService; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/action/search/UpdatePitContextResponse.java b/server/src/main/java/org/opensearch/action/search/UpdatePitContextResponse.java index da39aed20ef8e..1db8fc48c28bc 100644 --- a/server/src/main/java/org/opensearch/action/search/UpdatePitContextResponse.java +++ b/server/src/main/java/org/opensearch/action/search/UpdatePitContextResponse.java @@ -10,7 +10,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/action/support/ChannelActionListener.java b/server/src/main/java/org/opensearch/action/support/ChannelActionListener.java index 5b0475093d3c2..07851345241bd 100644 --- a/server/src/main/java/org/opensearch/action/support/ChannelActionListener.java +++ b/server/src/main/java/org/opensearch/action/support/ChannelActionListener.java @@ -35,7 +35,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.transport.TransportChannel; import org.opensearch.transport.TransportRequest; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; /** * Listener for transport channel actions diff --git a/server/src/main/java/org/opensearch/action/support/broadcast/BroadcastShardResponse.java b/server/src/main/java/org/opensearch/action/support/broadcast/BroadcastShardResponse.java index 39c524448bc5d..9603f886366f2 100644 --- a/server/src/main/java/org/opensearch/action/support/broadcast/BroadcastShardResponse.java +++ b/server/src/main/java/org/opensearch/action/support/broadcast/BroadcastShardResponse.java @@ -35,7 +35,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.index.shard.ShardId; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java b/server/src/main/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java index bf71134ab7b88..1b7822ee5a440 100644 --- a/server/src/main/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java +++ b/server/src/main/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java @@ -63,7 +63,7 @@ import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestHandler; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; import org.opensearch.transport.TransportRequestOptions; diff --git a/server/src/main/java/org/opensearch/action/support/nodes/BaseNodeResponse.java b/server/src/main/java/org/opensearch/action/support/nodes/BaseNodeResponse.java index 4a94f790c3443..8a4e12567b515 100644 --- a/server/src/main/java/org/opensearch/action/support/nodes/BaseNodeResponse.java +++ b/server/src/main/java/org/opensearch/action/support/nodes/BaseNodeResponse.java @@ -35,7 +35,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/action/support/tasks/TransportTasksAction.java b/server/src/main/java/org/opensearch/action/support/tasks/TransportTasksAction.java index bfd207e6f969f..e06858ab1a201 100644 --- a/server/src/main/java/org/opensearch/action/support/tasks/TransportTasksAction.java +++ b/server/src/main/java/org/opensearch/action/support/tasks/TransportTasksAction.java @@ -57,7 +57,7 @@ import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestHandler; import org.opensearch.transport.TransportRequestOptions; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/bootstrap/Bootstrap.java b/server/src/main/java/org/opensearch/bootstrap/Bootstrap.java index 2a23b501a8a0e..9383260d1bd73 100644 --- a/server/src/main/java/org/opensearch/bootstrap/Bootstrap.java +++ b/server/src/main/java/org/opensearch/bootstrap/Bootstrap.java @@ -55,7 +55,7 @@ import org.opensearch.common.settings.SecureSettings; import org.opensearch.core.common.settings.SecureString; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.BoundTransportAddress; import org.opensearch.common.util.io.IOUtils; import org.opensearch.env.Environment; import org.opensearch.monitor.jvm.JvmInfo; diff --git a/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java b/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java index c27c149947444..f9661e71d60e6 100644 --- a/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java +++ b/server/src/main/java/org/opensearch/bootstrap/BootstrapChecks.java @@ -42,8 +42,8 @@ import org.opensearch.common.SuppressForbidden; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Setting; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.discovery.DiscoveryModule; import org.opensearch.env.Environment; import org.opensearch.index.IndexModule; diff --git a/server/src/main/java/org/opensearch/cluster/ClusterSettingsResponse.java b/server/src/main/java/org/opensearch/cluster/ClusterSettingsResponse.java index ee5c8c00dfaf4..408344f476fe2 100644 --- a/server/src/main/java/org/opensearch/cluster/ClusterSettingsResponse.java +++ b/server/src/main/java/org/opensearch/cluster/ClusterSettingsResponse.java @@ -12,7 +12,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.settings.Settings; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; import java.util.Objects; diff --git a/server/src/main/java/org/opensearch/cluster/action/index/NodeMappingRefreshAction.java b/server/src/main/java/org/opensearch/cluster/action/index/NodeMappingRefreshAction.java index e6781fc22e1a7..47e4e59dadd3f 100644 --- a/server/src/main/java/org/opensearch/cluster/action/index/NodeMappingRefreshAction.java +++ b/server/src/main/java/org/opensearch/cluster/action/index/NodeMappingRefreshAction.java @@ -48,7 +48,7 @@ import org.opensearch.transport.TransportChannel; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestHandler; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.transport.TransportService; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/cluster/action/shard/ShardStateAction.java b/server/src/main/java/org/opensearch/cluster/action/shard/ShardStateAction.java index 9d9b6c52f6b25..30dcb5fd08954 100644 --- a/server/src/main/java/org/opensearch/cluster/action/shard/ShardStateAction.java +++ b/server/src/main/java/org/opensearch/cluster/action/shard/ShardStateAction.java @@ -74,7 +74,7 @@ import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestDeduplicator; import org.opensearch.transport.TransportRequestHandler; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.transport.TransportService; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelper.java b/server/src/main/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelper.java index 9b51e56dce966..05a5ac862a5b1 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelper.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelper.java @@ -40,7 +40,7 @@ import org.opensearch.common.Nullable; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.gateway.GatewayMetaState; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java index 1c38e68c43466..b57da128ce852 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java @@ -67,7 +67,7 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.ListenableFuture; @@ -86,7 +86,7 @@ import org.opensearch.monitor.StatusInfo; import org.opensearch.threadpool.Scheduler; import org.opensearch.threadpool.ThreadPool.Names; -import org.opensearch.transport.TransportResponse.Empty; +import org.opensearch.core.transport.TransportResponse.Empty; import org.opensearch.transport.TransportService; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/FollowersChecker.java b/server/src/main/java/org/opensearch/cluster/coordination/FollowersChecker.java index 08008152cfcd6..94d2e11ab591e 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/FollowersChecker.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/FollowersChecker.java @@ -55,7 +55,7 @@ import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestOptions; import org.opensearch.transport.TransportRequestOptions.Type; -import org.opensearch.transport.TransportResponse.Empty; +import org.opensearch.core.transport.TransportResponse.Empty; import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java index 42f09f95a7f56..f923176efa5e5 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java @@ -67,8 +67,8 @@ import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestOptions; -import org.opensearch.transport.TransportResponse; -import org.opensearch.transport.TransportResponse.Empty; +import org.opensearch.core.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse.Empty; import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/LeaderChecker.java b/server/src/main/java/org/opensearch/cluster/coordination/LeaderChecker.java index f43abf0080575..009a2121a5886 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/LeaderChecker.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/LeaderChecker.java @@ -56,8 +56,8 @@ import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestOptions; import org.opensearch.transport.TransportRequestOptions.Type; -import org.opensearch.transport.TransportResponse; -import org.opensearch.transport.TransportResponse.Empty; +import org.opensearch.core.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse.Empty; import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PeersResponse.java b/server/src/main/java/org/opensearch/cluster/coordination/PeersResponse.java index b68f689ef63fd..8a70c71d53fdd 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PeersResponse.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PeersResponse.java @@ -35,7 +35,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; import java.util.List; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PreVoteResponse.java b/server/src/main/java/org/opensearch/cluster/coordination/PreVoteResponse.java index c8186441db449..9c683f7de0878 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PreVoteResponse.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PreVoteResponse.java @@ -34,7 +34,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; import java.util.Objects; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Publication.java b/server/src/main/java/org/opensearch/cluster/coordination/Publication.java index 429890e7420de..6ffca828ecb06 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Publication.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Publication.java @@ -43,7 +43,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.unit.TimeValue; import org.opensearch.transport.TransportException; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import java.util.ArrayList; import java.util.List; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java index 60c931a601561..64c3b93e0e0be 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PublicationTransportHandler.java @@ -51,7 +51,7 @@ import org.opensearch.transport.TransportChannel; import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportRequestOptions; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/PublishWithJoinResponse.java b/server/src/main/java/org/opensearch/cluster/coordination/PublishWithJoinResponse.java index f99ba82be5514..f6350c5558a82 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/PublishWithJoinResponse.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/PublishWithJoinResponse.java @@ -33,7 +33,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; import java.util.Optional; diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java index d6ba0199d193c..f44085a232127 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java @@ -39,7 +39,7 @@ import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.node.Node; diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeFilters.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeFilters.java index 4fd2905495961..2f0fb120311d9 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeFilters.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeFilters.java @@ -37,7 +37,7 @@ import org.opensearch.common.network.NetworkAddress; import org.opensearch.common.regex.Regex; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.common.Strings; import java.util.HashMap; diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java index 060c7c5eb8d1a..e84bbc7a203f2 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodes.java @@ -40,7 +40,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.regex.Regex; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.util.set.Sets; import org.opensearch.core.common.Strings; diff --git a/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java b/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java index cf2f7b47288fd..13d9c6d97e079 100644 --- a/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java +++ b/server/src/main/java/org/opensearch/discovery/DiscoveryModule.java @@ -49,7 +49,7 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.gateway.GatewayMetaState; import org.opensearch.monitor.NodeHealthService; import org.opensearch.plugins.DiscoveryPlugin; diff --git a/server/src/main/java/org/opensearch/discovery/FileBasedSeedHostsProvider.java b/server/src/main/java/org/opensearch/discovery/FileBasedSeedHostsProvider.java index 94f36ba0a546e..3159733336057 100644 --- a/server/src/main/java/org/opensearch/discovery/FileBasedSeedHostsProvider.java +++ b/server/src/main/java/org/opensearch/discovery/FileBasedSeedHostsProvider.java @@ -35,7 +35,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import java.io.IOException; import java.nio.file.Files; diff --git a/server/src/main/java/org/opensearch/discovery/HandshakingTransportAddressConnector.java b/server/src/main/java/org/opensearch/discovery/HandshakingTransportAddressConnector.java index 90ca19e9369f4..80ce094785755 100644 --- a/server/src/main/java/org/opensearch/discovery/HandshakingTransportAddressConnector.java +++ b/server/src/main/java/org/opensearch/discovery/HandshakingTransportAddressConnector.java @@ -43,7 +43,7 @@ import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.io.IOUtils; diff --git a/server/src/main/java/org/opensearch/discovery/InitializeExtensionResponse.java b/server/src/main/java/org/opensearch/discovery/InitializeExtensionResponse.java index f56ffc84a7909..33ca1edcf3330 100644 --- a/server/src/main/java/org/opensearch/discovery/InitializeExtensionResponse.java +++ b/server/src/main/java/org/opensearch/discovery/InitializeExtensionResponse.java @@ -34,7 +34,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; import java.util.Arrays; diff --git a/server/src/main/java/org/opensearch/discovery/PeerFinder.java b/server/src/main/java/org/opensearch/discovery/PeerFinder.java index 96556d1cd71ed..f470342d826f8 100644 --- a/server/src/main/java/org/opensearch/discovery/PeerFinder.java +++ b/server/src/main/java/org/opensearch/discovery/PeerFinder.java @@ -44,7 +44,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.threadpool.ThreadPool.Names; diff --git a/server/src/main/java/org/opensearch/discovery/SeedHostsProvider.java b/server/src/main/java/org/opensearch/discovery/SeedHostsProvider.java index 1a4b5a3182dbe..89dfd9310e895 100644 --- a/server/src/main/java/org/opensearch/discovery/SeedHostsProvider.java +++ b/server/src/main/java/org/opensearch/discovery/SeedHostsProvider.java @@ -32,7 +32,7 @@ package org.opensearch.discovery; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import java.util.List; diff --git a/server/src/main/java/org/opensearch/discovery/SeedHostsResolver.java b/server/src/main/java/org/opensearch/discovery/SeedHostsResolver.java index cef7853011b82..43b685052c9a8 100644 --- a/server/src/main/java/org/opensearch/discovery/SeedHostsResolver.java +++ b/server/src/main/java/org/opensearch/discovery/SeedHostsResolver.java @@ -38,7 +38,7 @@ import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.CancellableThreads; import org.opensearch.common.util.concurrent.AbstractRunnable; diff --git a/server/src/main/java/org/opensearch/discovery/SettingsBasedSeedHostsProvider.java b/server/src/main/java/org/opensearch/discovery/SettingsBasedSeedHostsProvider.java index 2dcd819e727f2..9785d5b21078e 100644 --- a/server/src/main/java/org/opensearch/discovery/SettingsBasedSeedHostsProvider.java +++ b/server/src/main/java/org/opensearch/discovery/SettingsBasedSeedHostsProvider.java @@ -37,7 +37,7 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.transport.TransportService; import java.util.List; diff --git a/server/src/main/java/org/opensearch/env/EnvironmentSettingsResponse.java b/server/src/main/java/org/opensearch/env/EnvironmentSettingsResponse.java index ce3aa0556744b..1b87011ff8c75 100644 --- a/server/src/main/java/org/opensearch/env/EnvironmentSettingsResponse.java +++ b/server/src/main/java/org/opensearch/env/EnvironmentSettingsResponse.java @@ -11,7 +11,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.settings.Settings; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; import java.util.Objects; diff --git a/server/src/main/java/org/opensearch/extensions/AcknowledgedResponse.java b/server/src/main/java/org/opensearch/extensions/AcknowledgedResponse.java index 7c7e3e78798e8..32ad108b728b7 100644 --- a/server/src/main/java/org/opensearch/extensions/AcknowledgedResponse.java +++ b/server/src/main/java/org/opensearch/extensions/AcknowledgedResponse.java @@ -10,7 +10,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; import java.util.Objects; diff --git a/server/src/main/java/org/opensearch/extensions/AddSettingsUpdateConsumerRequestHandler.java b/server/src/main/java/org/opensearch/extensions/AddSettingsUpdateConsumerRequestHandler.java index 67c56b7f458ff..4cac2f3b1562c 100644 --- a/server/src/main/java/org/opensearch/extensions/AddSettingsUpdateConsumerRequestHandler.java +++ b/server/src/main/java/org/opensearch/extensions/AddSettingsUpdateConsumerRequestHandler.java @@ -17,7 +17,7 @@ import org.opensearch.common.settings.SettingsException; import org.opensearch.common.settings.SettingsModule; import org.opensearch.common.settings.WriteableSetting; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.transport.TransportService; /** diff --git a/server/src/main/java/org/opensearch/extensions/DiscoveryExtensionNode.java b/server/src/main/java/org/opensearch/extensions/DiscoveryExtensionNode.java index ac1dfe5309ffa..a888f99ff11ed 100644 --- a/server/src/main/java/org/opensearch/extensions/DiscoveryExtensionNode.java +++ b/server/src/main/java/org/opensearch/extensions/DiscoveryExtensionNode.java @@ -15,7 +15,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.xcontent.ToXContentFragment; import org.opensearch.core.xcontent.XContentBuilder; diff --git a/server/src/main/java/org/opensearch/extensions/ExtensionDependencyResponse.java b/server/src/main/java/org/opensearch/extensions/ExtensionDependencyResponse.java index d9531c0cc2894..5fb084caf89f3 100644 --- a/server/src/main/java/org/opensearch/extensions/ExtensionDependencyResponse.java +++ b/server/src/main/java/org/opensearch/extensions/ExtensionDependencyResponse.java @@ -15,7 +15,7 @@ import java.util.Objects; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; /** * The response for getting the Extension Dependency. diff --git a/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java b/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java index 468de4238f879..c6ebe295976ce 100644 --- a/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java +++ b/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java @@ -32,7 +32,7 @@ import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsModule; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; @@ -52,7 +52,7 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.ConnectTransportException; import org.opensearch.transport.TransportException; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; import org.opensearch.env.EnvironmentSettingsResponse; diff --git a/server/src/main/java/org/opensearch/extensions/action/ExtensionTransportActionsHandler.java b/server/src/main/java/org/opensearch/extensions/action/ExtensionTransportActionsHandler.java index 22502509634c6..19fe43e9a6d61 100644 --- a/server/src/main/java/org/opensearch/extensions/action/ExtensionTransportActionsHandler.java +++ b/server/src/main/java/org/opensearch/extensions/action/ExtensionTransportActionsHandler.java @@ -22,7 +22,7 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.ActionNotFoundTransportException; import org.opensearch.transport.TransportException; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/extensions/rest/RestActionsRequestHandler.java b/server/src/main/java/org/opensearch/extensions/rest/RestActionsRequestHandler.java index d890c1b85bb81..563bfc5a5c9da 100644 --- a/server/src/main/java/org/opensearch/extensions/rest/RestActionsRequestHandler.java +++ b/server/src/main/java/org/opensearch/extensions/rest/RestActionsRequestHandler.java @@ -13,7 +13,7 @@ import org.opensearch.extensions.DiscoveryExtensionNode; import org.opensearch.rest.RestController; import org.opensearch.rest.RestHandler; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.transport.TransportService; import java.util.Map; diff --git a/server/src/main/java/org/opensearch/extensions/rest/RestExecuteOnExtensionResponse.java b/server/src/main/java/org/opensearch/extensions/rest/RestExecuteOnExtensionResponse.java index 63ae6ce93af22..c0c53f4e97b31 100644 --- a/server/src/main/java/org/opensearch/extensions/rest/RestExecuteOnExtensionResponse.java +++ b/server/src/main/java/org/opensearch/extensions/rest/RestExecuteOnExtensionResponse.java @@ -12,7 +12,7 @@ import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.rest.RestResponse; import org.opensearch.core.rest.RestStatus; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; import java.util.List; diff --git a/server/src/main/java/org/opensearch/extensions/settings/CustomSettingsRequestHandler.java b/server/src/main/java/org/opensearch/extensions/settings/CustomSettingsRequestHandler.java index 980dcf67c3128..a9070e77c2942 100644 --- a/server/src/main/java/org/opensearch/extensions/settings/CustomSettingsRequestHandler.java +++ b/server/src/main/java/org/opensearch/extensions/settings/CustomSettingsRequestHandler.java @@ -11,7 +11,7 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.SettingsModule; import org.opensearch.extensions.AcknowledgedResponse; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import java.util.ArrayList; import java.util.List; diff --git a/server/src/main/java/org/opensearch/gateway/LocalAllocateDangledIndices.java b/server/src/main/java/org/opensearch/gateway/LocalAllocateDangledIndices.java index 5ee369d6b9402..5d6843a9684e1 100644 --- a/server/src/main/java/org/opensearch/gateway/LocalAllocateDangledIndices.java +++ b/server/src/main/java/org/opensearch/gateway/LocalAllocateDangledIndices.java @@ -59,7 +59,7 @@ import org.opensearch.transport.TransportChannel; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestHandler; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.transport.TransportService; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java b/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java index 0ba49be01d193..bedeedb9c5c6f 100644 --- a/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java +++ b/server/src/main/java/org/opensearch/http/AbstractHttpServerTransport.java @@ -43,10 +43,10 @@ import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.BoundTransportAddress; import org.opensearch.common.transport.NetworkExceptionHelper; import org.opensearch.common.transport.PortsRange; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.concurrent.ThreadContext; diff --git a/server/src/main/java/org/opensearch/http/HttpInfo.java b/server/src/main/java/org/opensearch/http/HttpInfo.java index 24c29b8dc7444..35eadc5a5de9e 100644 --- a/server/src/main/java/org/opensearch/http/HttpInfo.java +++ b/server/src/main/java/org/opensearch/http/HttpInfo.java @@ -36,8 +36,8 @@ import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.network.InetAddresses; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.node.ReportingService; diff --git a/server/src/main/java/org/opensearch/http/HttpServerTransport.java b/server/src/main/java/org/opensearch/http/HttpServerTransport.java index 6549f0786fcda..7bd16a286e33b 100644 --- a/server/src/main/java/org/opensearch/http/HttpServerTransport.java +++ b/server/src/main/java/org/opensearch/http/HttpServerTransport.java @@ -33,7 +33,7 @@ package org.opensearch.http; import org.opensearch.common.lifecycle.LifecycleComponent; -import org.opensearch.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.BoundTransportAddress; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.node.ReportingService; import org.opensearch.rest.RestChannel; diff --git a/server/src/main/java/org/opensearch/index/IndicesModuleResponse.java b/server/src/main/java/org/opensearch/index/IndicesModuleResponse.java index 67f2c686dbf8b..3d2340ed35a8c 100644 --- a/server/src/main/java/org/opensearch/index/IndicesModuleResponse.java +++ b/server/src/main/java/org/opensearch/index/IndicesModuleResponse.java @@ -10,7 +10,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; import java.util.Objects; diff --git a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java index 386b2e0e8192d..569977afd3ac5 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java @@ -74,7 +74,7 @@ import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestHandler; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryResponse.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryResponse.java index 8af69b1786e38..c1203a9f4939f 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryResponse.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryResponse.java @@ -34,7 +34,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; import java.util.List; diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTranslogOperationsResponse.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTranslogOperationsResponse.java index b623d382b415f..9ea9f1d48a494 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTranslogOperationsResponse.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTranslogOperationsResponse.java @@ -34,7 +34,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/indices/recovery/RemoteRecoveryTargetHandler.java b/server/src/main/java/org/opensearch/indices/recovery/RemoteRecoveryTargetHandler.java index cdc62350b4aa5..35585def29365 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RemoteRecoveryTargetHandler.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RemoteRecoveryTargetHandler.java @@ -48,7 +48,7 @@ import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.transport.EmptyTransportResponseHandler; import org.opensearch.transport.TransportRequestOptions; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.transport.TransportService; import java.util.List; diff --git a/server/src/main/java/org/opensearch/indices/recovery/RetryableTransportClient.java b/server/src/main/java/org/opensearch/indices/recovery/RetryableTransportClient.java index 4f1fb42a421f6..7de2d0e2fb3d4 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RetryableTransportClient.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RetryableTransportClient.java @@ -26,7 +26,7 @@ import org.opensearch.transport.SendRequestTransportException; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestOptions; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.transport.TransportService; import java.util.Map; diff --git a/server/src/main/java/org/opensearch/indices/replication/CheckpointInfoResponse.java b/server/src/main/java/org/opensearch/indices/replication/CheckpointInfoResponse.java index 0155883f34552..72ee7203e5e98 100644 --- a/server/src/main/java/org/opensearch/indices/replication/CheckpointInfoResponse.java +++ b/server/src/main/java/org/opensearch/indices/replication/CheckpointInfoResponse.java @@ -12,7 +12,7 @@ import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; import java.util.Map; diff --git a/server/src/main/java/org/opensearch/indices/replication/GetSegmentFilesResponse.java b/server/src/main/java/org/opensearch/indices/replication/GetSegmentFilesResponse.java index 33a84833f2418..7409e09310737 100644 --- a/server/src/main/java/org/opensearch/indices/replication/GetSegmentFilesResponse.java +++ b/server/src/main/java/org/opensearch/indices/replication/GetSegmentFilesResponse.java @@ -11,7 +11,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.index.store.StoreFileMetadata; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; import java.util.List; diff --git a/server/src/main/java/org/opensearch/indices/replication/RemoteSegmentFileChunkWriter.java b/server/src/main/java/org/opensearch/indices/replication/RemoteSegmentFileChunkWriter.java index ed171927c4600..d96f9544e5734 100644 --- a/server/src/main/java/org/opensearch/indices/replication/RemoteSegmentFileChunkWriter.java +++ b/server/src/main/java/org/opensearch/indices/replication/RemoteSegmentFileChunkWriter.java @@ -20,7 +20,7 @@ import org.opensearch.indices.recovery.RetryableTransportClient; import org.opensearch.indices.recovery.FileChunkWriter; import org.opensearch.transport.TransportRequestOptions; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; import java.util.concurrent.atomic.AtomicLong; diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java index 8bb2a61e32e2d..090e3f96a8f65 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationSourceService.java @@ -34,7 +34,7 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportChannel; import org.opensearch.transport.TransportRequestHandler; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.transport.TransportService; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java index 7c35c4f07598e..b41c9e09add45 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java @@ -41,7 +41,7 @@ import org.opensearch.transport.TransportChannel; import org.opensearch.transport.TransportRequestHandler; import org.opensearch.transport.TransportRequestOptions; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.transport.TransportService; import java.util.Map; diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java index 0c96a87715014..0e06791768745 100644 --- a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java @@ -28,7 +28,7 @@ import org.opensearch.indices.recovery.FileChunkRequest; import org.opensearch.indices.recovery.RecoveryTransportRequest; import org.opensearch.transport.TransportChannel; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; import java.util.concurrent.atomic.AtomicBoolean; diff --git a/server/src/main/java/org/opensearch/indices/store/IndicesStore.java b/server/src/main/java/org/opensearch/indices/store/IndicesStore.java index eaaf5198fba94..95ecc1359e5d1 100644 --- a/server/src/main/java/org/opensearch/indices/store/IndicesStore.java +++ b/server/src/main/java/org/opensearch/indices/store/IndicesStore.java @@ -69,7 +69,7 @@ import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestHandler; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index d8b51b0184a66..60c95a04a042f 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -133,8 +133,8 @@ import org.opensearch.common.settings.SettingUpgrader; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsModule; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.PageCacheRecycler; diff --git a/server/src/main/java/org/opensearch/node/NodeValidationException.java b/server/src/main/java/org/opensearch/node/NodeValidationException.java index ef1500f1e4ede..b316288b7cf06 100644 --- a/server/src/main/java/org/opensearch/node/NodeValidationException.java +++ b/server/src/main/java/org/opensearch/node/NodeValidationException.java @@ -32,7 +32,7 @@ package org.opensearch.node; -import org.opensearch.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.BoundTransportAddress; import java.util.List; diff --git a/server/src/main/java/org/opensearch/plugins/DiscoveryPlugin.java b/server/src/main/java/org/opensearch/plugins/DiscoveryPlugin.java index 89433b2a3b67d..bca72942bd70e 100644 --- a/server/src/main/java/org/opensearch/plugins/DiscoveryPlugin.java +++ b/server/src/main/java/org/opensearch/plugins/DiscoveryPlugin.java @@ -37,6 +37,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.Settings; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.discovery.SeedHostsProvider; import org.opensearch.transport.TransportService; @@ -86,7 +87,7 @@ default NetworkService.CustomNameResolver getCustomNameResolver(Settings setting * (see {@link org.opensearch.discovery.DiscoveryModule#DISCOVERY_SEED_PROVIDERS_SETTING}), and * the value is a supplier to construct the host provider when it is selected for use. * - * @param transportService Use to form the {@link org.opensearch.common.transport.TransportAddress} portion + * @param transportService Use to form the {@link TransportAddress} portion * of a {@link org.opensearch.cluster.node.DiscoveryNode} * @param networkService Use to find the publish host address of the current node */ diff --git a/server/src/main/java/org/opensearch/repositories/VerifyNodeRepositoryAction.java b/server/src/main/java/org/opensearch/repositories/VerifyNodeRepositoryAction.java index ff5ffdbfe1e3e..1f0a5060962c6 100644 --- a/server/src/main/java/org/opensearch/repositories/VerifyNodeRepositoryAction.java +++ b/server/src/main/java/org/opensearch/repositories/VerifyNodeRepositoryAction.java @@ -48,7 +48,7 @@ import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestHandler; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.transport.TransportService; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java b/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java index b54c8955283a2..e29898624386c 100644 --- a/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java +++ b/server/src/main/java/org/opensearch/rest/action/cat/RestNodesAction.java @@ -48,7 +48,7 @@ import org.opensearch.common.Table; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.network.NetworkAddress; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.http.HttpInfo; import org.opensearch.index.cache.query.QueryCacheStats; diff --git a/server/src/main/java/org/opensearch/search/SearchPhaseResult.java b/server/src/main/java/org/opensearch/search/SearchPhaseResult.java index 1b4cebbe91a3e..9aace832cf72f 100644 --- a/server/src/main/java/org/opensearch/search/SearchPhaseResult.java +++ b/server/src/main/java/org/opensearch/search/SearchPhaseResult.java @@ -39,7 +39,7 @@ import org.opensearch.search.internal.ShardSearchContextId; import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.search.query.QuerySearchResult; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/tasks/TaskCancellationService.java b/server/src/main/java/org/opensearch/tasks/TaskCancellationService.java index 7ff27fa1096dc..a24cdd02bbd30 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskCancellationService.java +++ b/server/src/main/java/org/opensearch/tasks/TaskCancellationService.java @@ -49,7 +49,7 @@ import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestHandler; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.transport.TransportService; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/transport/ActionTransportException.java b/server/src/main/java/org/opensearch/transport/ActionTransportException.java index 97e9a986db7f4..fb5dd2c75dc75 100644 --- a/server/src/main/java/org/opensearch/transport/ActionTransportException.java +++ b/server/src/main/java/org/opensearch/transport/ActionTransportException.java @@ -34,7 +34,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/transport/EmptyTransportResponseHandler.java b/server/src/main/java/org/opensearch/transport/EmptyTransportResponseHandler.java index 3b97a81faf192..1691b427ffca1 100644 --- a/server/src/main/java/org/opensearch/transport/EmptyTransportResponseHandler.java +++ b/server/src/main/java/org/opensearch/transport/EmptyTransportResponseHandler.java @@ -33,6 +33,7 @@ package org.opensearch.transport; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.threadpool.ThreadPool; /** diff --git a/server/src/main/java/org/opensearch/transport/InboundHandler.java b/server/src/main/java/org/opensearch/transport/InboundHandler.java index bb04f149d39a9..12eaabb0a74eb 100644 --- a/server/src/main/java/org/opensearch/transport/InboundHandler.java +++ b/server/src/main/java/org/opensearch/transport/InboundHandler.java @@ -41,10 +41,11 @@ import org.opensearch.core.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.threadpool.ThreadPool; import java.io.EOFException; diff --git a/server/src/main/java/org/opensearch/transport/OutboundHandler.java b/server/src/main/java/org/opensearch/transport/OutboundHandler.java index 3e493267242fb..c67a6df8f90f6 100644 --- a/server/src/main/java/org/opensearch/transport/OutboundHandler.java +++ b/server/src/main/java/org/opensearch/transport/OutboundHandler.java @@ -44,12 +44,13 @@ import org.opensearch.common.io.stream.ReleasableBytesStreamOutput; import org.opensearch.common.network.CloseableChannel; import org.opensearch.common.transport.NetworkExceptionHelper; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.util.io.IOUtils; import org.opensearch.common.lease.Releasable; import org.opensearch.common.lease.Releasables; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.threadpool.ThreadPool; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/transport/PlainTransportFuture.java b/server/src/main/java/org/opensearch/transport/PlainTransportFuture.java index 53d71c135a8dd..19123aba7d413 100644 --- a/server/src/main/java/org/opensearch/transport/PlainTransportFuture.java +++ b/server/src/main/java/org/opensearch/transport/PlainTransportFuture.java @@ -36,6 +36,7 @@ import org.opensearch.OpenSearchTimeoutException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.util.concurrent.BaseFuture; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; import java.util.concurrent.ExecutionException; diff --git a/server/src/main/java/org/opensearch/transport/ProxyConnectionStrategy.java b/server/src/main/java/org/opensearch/transport/ProxyConnectionStrategy.java index b1eefb9fac245..fcaad01e78a50 100644 --- a/server/src/main/java/org/opensearch/transport/ProxyConnectionStrategy.java +++ b/server/src/main/java/org/opensearch/transport/ProxyConnectionStrategy.java @@ -43,7 +43,7 @@ import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.util.concurrent.CountDown; import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.XContentBuilder; diff --git a/server/src/main/java/org/opensearch/transport/RemoteTransportException.java b/server/src/main/java/org/opensearch/transport/RemoteTransportException.java index 041a70795b8de..de3d6bb9d775e 100644 --- a/server/src/main/java/org/opensearch/transport/RemoteTransportException.java +++ b/server/src/main/java/org/opensearch/transport/RemoteTransportException.java @@ -34,7 +34,7 @@ import org.opensearch.OpenSearchWrapperException; import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/transport/SniffConnectionStrategy.java b/server/src/main/java/org/opensearch/transport/SniffConnectionStrategy.java index 5e00704b3baaf..c95a65fadf323 100644 --- a/server/src/main/java/org/opensearch/transport/SniffConnectionStrategy.java +++ b/server/src/main/java/org/opensearch/transport/SniffConnectionStrategy.java @@ -50,7 +50,7 @@ import org.opensearch.core.common.io.stream.Writeable; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.core.common.Strings; import org.opensearch.core.xcontent.XContentBuilder; diff --git a/server/src/main/java/org/opensearch/transport/TaskTransportChannel.java b/server/src/main/java/org/opensearch/transport/TaskTransportChannel.java index 4dceee4c48d4d..052611317f174 100644 --- a/server/src/main/java/org/opensearch/transport/TaskTransportChannel.java +++ b/server/src/main/java/org/opensearch/transport/TaskTransportChannel.java @@ -34,6 +34,7 @@ import org.opensearch.Version; import org.opensearch.common.lease.Releasable; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/transport/TcpTransport.java b/server/src/main/java/org/opensearch/transport/TcpTransport.java index 8733cb5fe7e8e..b99e750366018 100644 --- a/server/src/main/java/org/opensearch/transport/TcpTransport.java +++ b/server/src/main/java/org/opensearch/transport/TcpTransport.java @@ -54,9 +54,9 @@ import org.opensearch.common.network.NetworkUtils; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.BoundTransportAddress; import org.opensearch.common.transport.PortsRange; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; diff --git a/server/src/main/java/org/opensearch/transport/TcpTransportChannel.java b/server/src/main/java/org/opensearch/transport/TcpTransportChannel.java index e25003648794d..00702d08902a9 100644 --- a/server/src/main/java/org/opensearch/transport/TcpTransportChannel.java +++ b/server/src/main/java/org/opensearch/transport/TcpTransportChannel.java @@ -34,6 +34,7 @@ import org.opensearch.Version; import org.opensearch.common.lease.Releasable; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.search.query.QuerySearchResult; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/transport/Transport.java b/server/src/main/java/org/opensearch/transport/Transport.java index 3bf855f847685..26bae24f70f26 100644 --- a/server/src/main/java/org/opensearch/transport/Transport.java +++ b/server/src/main/java/org/opensearch/transport/Transport.java @@ -37,11 +37,12 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.collect.MapBuilder; import org.opensearch.common.lifecycle.LifecycleComponent; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.common.util.concurrent.ConcurrentMapLong; +import org.opensearch.core.transport.TransportResponse; import java.io.Closeable; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/transport/TransportActionProxy.java b/server/src/main/java/org/opensearch/transport/TransportActionProxy.java index 8ad6010800ad8..a61aec8a34e20 100644 --- a/server/src/main/java/org/opensearch/transport/TransportActionProxy.java +++ b/server/src/main/java/org/opensearch/transport/TransportActionProxy.java @@ -35,6 +35,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/main/java/org/opensearch/transport/TransportChannel.java b/server/src/main/java/org/opensearch/transport/TransportChannel.java index b660db029c0b2..3c582127f28e8 100644 --- a/server/src/main/java/org/opensearch/transport/TransportChannel.java +++ b/server/src/main/java/org/opensearch/transport/TransportChannel.java @@ -36,6 +36,7 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.Version; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; diff --git a/server/src/main/java/org/opensearch/transport/TransportHandshaker.java b/server/src/main/java/org/opensearch/transport/TransportHandshaker.java index 7b64b328469ad..8ce4f6887cbd9 100644 --- a/server/src/main/java/org/opensearch/transport/TransportHandshaker.java +++ b/server/src/main/java/org/opensearch/transport/TransportHandshaker.java @@ -40,6 +40,7 @@ import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.metrics.CounterMetric; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.threadpool.ThreadPool; import java.io.EOFException; diff --git a/server/src/main/java/org/opensearch/transport/TransportInfo.java b/server/src/main/java/org/opensearch/transport/TransportInfo.java index 0ee8672e8df8c..308abb2bc28f4 100644 --- a/server/src/main/java/org/opensearch/transport/TransportInfo.java +++ b/server/src/main/java/org/opensearch/transport/TransportInfo.java @@ -37,8 +37,8 @@ import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.network.InetAddresses; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.node.ReportingService; diff --git a/server/src/main/java/org/opensearch/transport/TransportInterceptor.java b/server/src/main/java/org/opensearch/transport/TransportInterceptor.java index f4b003cae4864..9ee2db6d39893 100644 --- a/server/src/main/java/org/opensearch/transport/TransportInterceptor.java +++ b/server/src/main/java/org/opensearch/transport/TransportInterceptor.java @@ -34,6 +34,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.core.common.io.stream.Writeable.Reader; +import org.opensearch.core.transport.TransportResponse; /** * This interface allows plugins to intercept requests on both the sender and the receiver side. diff --git a/server/src/main/java/org/opensearch/transport/TransportMessageListener.java b/server/src/main/java/org/opensearch/transport/TransportMessageListener.java index 8a7612d3bd99a..dfcd7acce3706 100644 --- a/server/src/main/java/org/opensearch/transport/TransportMessageListener.java +++ b/server/src/main/java/org/opensearch/transport/TransportMessageListener.java @@ -32,6 +32,7 @@ package org.opensearch.transport; import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.core.transport.TransportResponse; /** * Listens for transport messages diff --git a/server/src/main/java/org/opensearch/transport/TransportRequest.java b/server/src/main/java/org/opensearch/transport/TransportRequest.java index 95b038303f530..3eb465718763e 100644 --- a/server/src/main/java/org/opensearch/transport/TransportRequest.java +++ b/server/src/main/java/org/opensearch/transport/TransportRequest.java @@ -34,6 +34,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.transport.TransportMessage; import org.opensearch.tasks.TaskAwareRequest; import org.opensearch.tasks.TaskId; diff --git a/server/src/main/java/org/opensearch/transport/TransportResponseHandler.java b/server/src/main/java/org/opensearch/transport/TransportResponseHandler.java index 674a62fb75b7d..0b39983cc3bee 100644 --- a/server/src/main/java/org/opensearch/transport/TransportResponseHandler.java +++ b/server/src/main/java/org/opensearch/transport/TransportResponseHandler.java @@ -34,6 +34,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; import java.util.function.Function; diff --git a/server/src/main/java/org/opensearch/transport/TransportService.java b/server/src/main/java/org/opensearch/transport/TransportService.java index 3b59c99c03d3b..61900fa1f0014 100644 --- a/server/src/main/java/org/opensearch/transport/TransportService.java +++ b/server/src/main/java/org/opensearch/transport/TransportService.java @@ -52,8 +52,8 @@ import org.opensearch.common.regex.Regex; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.ThreadContext; @@ -61,6 +61,7 @@ import org.opensearch.common.lease.Releasable; import org.opensearch.core.common.Strings; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.node.NodeClosedException; import org.opensearch.node.ReportingService; import org.opensearch.tasks.Task; diff --git a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java index 629fe9ebd4b99..920f3026871a0 100644 --- a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java @@ -72,7 +72,7 @@ import org.opensearch.core.common.io.stream.NotSerializableExceptionWrapper; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.common.util.CancellableThreadsTests; import org.opensearch.common.util.set.Sets; diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java index e0676cc9ddbdd..1b8d3a2c276fb 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java @@ -53,7 +53,7 @@ import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.BoundTransportAddress; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.common.lease.Releasable; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TestTaskPlugin.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TestTaskPlugin.java index b46d4fcfea2c9..6b4317d6ec934 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TestTaskPlugin.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TestTaskPlugin.java @@ -73,7 +73,7 @@ import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import org.opensearch.transport.TransportRequestOptions; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.transport.TransportResponseHandler; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java index 5571eb020b9e0..a2998b4bf6be7 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java @@ -45,7 +45,7 @@ import org.opensearch.cluster.routing.allocation.decider.Decision; import org.opensearch.common.Strings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.json.JsonXContent; diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java index a09a580de1475..ee4f22c6af93b 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/shards/ClusterSearchShardsResponseTests.java @@ -42,7 +42,7 @@ import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.common.Strings; import org.opensearch.index.query.RandomQueryBuilder; import org.opensearch.core.index.shard.ShardId; diff --git a/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java index 55e4efd8b10df..86e87de364a51 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java @@ -70,7 +70,7 @@ import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.transport.TransportService; import java.util.Collections; diff --git a/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java b/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java index c0c35e8c22f4d..ebdc7dcbfd6ac 100644 --- a/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/TransportShardBulkActionTests.java @@ -92,7 +92,7 @@ import org.opensearch.threadpool.ThreadPool.Names; import org.opensearch.transport.TestTransportChannel; import org.opensearch.transport.TransportChannel; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.transport.TransportService; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/action/search/ClearScrollControllerTests.java b/server/src/test/java/org/opensearch/action/search/ClearScrollControllerTests.java index e3c7d4741d3ae..55e597dab04dc 100644 --- a/server/src/test/java/org/opensearch/action/search/ClearScrollControllerTests.java +++ b/server/src/test/java/org/opensearch/action/search/ClearScrollControllerTests.java @@ -46,7 +46,7 @@ import org.opensearch.test.VersionUtils; import org.opensearch.transport.NodeNotConnectedException; import org.opensearch.transport.Transport; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; import java.util.Arrays; diff --git a/server/src/test/java/org/opensearch/action/search/TransportSearchActionTests.java b/server/src/test/java/org/opensearch/action/search/TransportSearchActionTests.java index 413fff98de44c..05df501faae82 100644 --- a/server/src/test/java/org/opensearch/action/search/TransportSearchActionTests.java +++ b/server/src/test/java/org/opensearch/action/search/TransportSearchActionTests.java @@ -54,7 +54,7 @@ import org.opensearch.common.SetOnce; import org.opensearch.common.collect.Tuple; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.common.Strings; import org.opensearch.core.index.Index; import org.opensearch.index.query.InnerHitBuilder; diff --git a/server/src/test/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java b/server/src/test/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java index 23654c02f0901..5c328c279fabb 100644 --- a/server/src/test/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/broadcast/node/TransportBroadcastByNodeActionTests.java @@ -73,7 +73,7 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.ReceiveTimeoutTransportException; import org.opensearch.transport.TestTransportChannel; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.transport.TransportService; import org.junit.After; import org.junit.AfterClass; diff --git a/server/src/test/java/org/opensearch/action/support/replication/ReplicationOperationTests.java b/server/src/test/java/org/opensearch/action/support/replication/ReplicationOperationTests.java index 21d14fb31059d..10d5a87e8200a 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/ReplicationOperationTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/ReplicationOperationTests.java @@ -51,7 +51,7 @@ import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.core.common.breaker.CircuitBreaker; import org.opensearch.core.common.breaker.CircuitBreakingException; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.core.concurrency.OpenSearchRejectedExecutionException; diff --git a/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java b/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java index 5af358cae18a8..7d47238b8e5ab 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java @@ -102,7 +102,7 @@ import org.opensearch.transport.TransportChannel; import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportRequest; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.transport.TransportService; import org.opensearch.transport.nio.MockNioTransport; diff --git a/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionForIndexingPressureTests.java b/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionForIndexingPressureTests.java index c978031103ff2..4a7500f16197d 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionForIndexingPressureTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionForIndexingPressureTests.java @@ -41,7 +41,7 @@ import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportChannel; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.transport.TransportService; import org.hamcrest.Matcher; import org.junit.After; diff --git a/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionTests.java b/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionTests.java index 7a0d0f3814100..a2d5e92fa11fe 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/TransportWriteActionTests.java @@ -72,7 +72,7 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transport; import org.opensearch.transport.TransportException; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.transport.TransportService; import org.junit.After; import org.junit.AfterClass; diff --git a/server/src/test/java/org/opensearch/bootstrap/BootstrapChecksTests.java b/server/src/test/java/org/opensearch/bootstrap/BootstrapChecksTests.java index 15aacd25b30b1..69102d2e76bef 100644 --- a/server/src/test/java/org/opensearch/bootstrap/BootstrapChecksTests.java +++ b/server/src/test/java/org/opensearch/bootstrap/BootstrapChecksTests.java @@ -40,8 +40,8 @@ import org.opensearch.common.CheckedConsumer; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.discovery.DiscoveryModule; import org.opensearch.discovery.SettingsBasedSeedHostsProvider; import org.opensearch.env.Environment; diff --git a/server/src/test/java/org/opensearch/cluster/ClusterStateTests.java b/server/src/test/java/org/opensearch/cluster/ClusterStateTests.java index 23c2506bf6143..20b68f32a69e4 100644 --- a/server/src/test/java/org/opensearch/cluster/ClusterStateTests.java +++ b/server/src/test/java/org/opensearch/cluster/ClusterStateTests.java @@ -51,7 +51,7 @@ import org.opensearch.cluster.routing.TestShardRouting; import org.opensearch.common.Strings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.json.JsonXContent; diff --git a/server/src/test/java/org/opensearch/cluster/NodeConnectionsServiceTests.java b/server/src/test/java/org/opensearch/cluster/NodeConnectionsServiceTests.java index a284269dc4151..25956f1a560f2 100644 --- a/server/src/test/java/org/opensearch/cluster/NodeConnectionsServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/NodeConnectionsServiceTests.java @@ -48,8 +48,8 @@ import org.opensearch.common.lifecycle.Lifecycle; import org.opensearch.common.lifecycle.LifecycleListener; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.MockLogAppender; import org.opensearch.test.junit.annotations.TestLogging; diff --git a/server/src/test/java/org/opensearch/cluster/action/shard/ShardStateActionTests.java b/server/src/test/java/org/opensearch/cluster/action/shard/ShardStateActionTests.java index 2688aaa145dc0..957da6ea65b89 100644 --- a/server/src/test/java/org/opensearch/cluster/action/shard/ShardStateActionTests.java +++ b/server/src/test/java/org/opensearch/cluster/action/shard/ShardStateActionTests.java @@ -64,7 +64,7 @@ import org.opensearch.transport.NodeNotConnectedException; import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportRequest; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.transport.TransportService; import org.junit.After; import org.junit.AfterClass; diff --git a/server/src/test/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelperTests.java b/server/src/test/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelperTests.java index b091130db0b98..efdf4fb4b92f5 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelperTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/ClusterFormationFailureHelperTests.java @@ -41,7 +41,7 @@ import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.gateway.GatewayMetaState; import org.opensearch.monitor.StatusInfo; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java index 23087e6dd2ba4..bae7581f8d9e0 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/CoordinationStateTests.java @@ -43,7 +43,7 @@ import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.EqualsHashCodeTestUtils; import org.junit.Before; diff --git a/server/src/test/java/org/opensearch/cluster/coordination/FollowersCheckerTests.java b/server/src/test/java/org/opensearch/cluster/coordination/FollowersCheckerTests.java index be211a16cdd72..8e6d7346677c3 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/FollowersCheckerTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/FollowersCheckerTests.java @@ -53,8 +53,8 @@ import org.opensearch.transport.ConnectTransportException; import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportRequest; -import org.opensearch.transport.TransportResponse; -import org.opensearch.transport.TransportResponse.Empty; +import org.opensearch.core.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse.Empty; import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java index 27146829ad8da..f63f33f345411 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java @@ -53,7 +53,7 @@ import org.opensearch.transport.RemoteTransportException; import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportRequest; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.transport.TransportService; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/cluster/coordination/LeaderCheckerTests.java b/server/src/test/java/org/opensearch/cluster/coordination/LeaderCheckerTests.java index ac6d885229f9e..2ebeb9b4edeb8 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/LeaderCheckerTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/LeaderCheckerTests.java @@ -50,8 +50,8 @@ import org.opensearch.transport.ConnectTransportException; import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportRequest; -import org.opensearch.transport.TransportResponse; -import org.opensearch.transport.TransportResponse.Empty; +import org.opensearch.core.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse.Empty; import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; diff --git a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java index fb2e7cd73d3bf..09a7b34958f2c 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java @@ -68,7 +68,7 @@ import org.opensearch.transport.Transport; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestOptions; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.transport.TransportService; import org.junit.After; import org.junit.AfterClass; diff --git a/server/src/test/java/org/opensearch/cluster/coordination/PublicationTests.java b/server/src/test/java/org/opensearch/cluster/coordination/PublicationTests.java index 517456f54b785..366f19f74aac8 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/PublicationTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/PublicationTests.java @@ -47,7 +47,7 @@ import org.opensearch.discovery.Discovery; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.transport.TransportException; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import java.util.ArrayList; import java.util.Arrays; diff --git a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeFiltersTests.java b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeFiltersTests.java index 54f9c46e999d7..691a3cb418f94 100644 --- a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeFiltersTests.java +++ b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeFiltersTests.java @@ -35,7 +35,7 @@ import org.opensearch.Version; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.test.OpenSearchTestCase; import org.junit.AfterClass; import org.junit.BeforeClass; diff --git a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java index 14f6880419286..b4f03b1d7a850 100644 --- a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java +++ b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java @@ -37,7 +37,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.test.NodeRoles; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodesTests.java b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodesTests.java index 47676ecc13f5e..f5b7d98bb95ce 100644 --- a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodesTests.java +++ b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodesTests.java @@ -36,7 +36,7 @@ import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.settings.Setting; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.test.OpenSearchTestCase; import java.util.ArrayList; diff --git a/server/src/test/java/org/opensearch/cluster/routing/RoutingNodeTests.java b/server/src/test/java/org/opensearch/cluster/routing/RoutingNodeTests.java index 578d537653684..cc4f2e510cb31 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/RoutingNodeTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/RoutingNodeTests.java @@ -35,7 +35,7 @@ import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java b/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java index f795df2f48b22..5b47d9babd264 100644 --- a/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java +++ b/server/src/test/java/org/opensearch/common/network/NetworkModuleTests.java @@ -36,7 +36,7 @@ import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.BoundTransportAddress; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.common.util.concurrent.ThreadContext; diff --git a/server/src/test/java/org/opensearch/common/transport/BoundTransportAddressTests.java b/server/src/test/java/org/opensearch/common/transport/BoundTransportAddressTests.java index a177840059f8f..dae16d805efbf 100644 --- a/server/src/test/java/org/opensearch/common/transport/BoundTransportAddressTests.java +++ b/server/src/test/java/org/opensearch/common/transport/BoundTransportAddressTests.java @@ -34,6 +34,8 @@ import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.test.OpenSearchTestCase; import java.net.InetAddress; diff --git a/server/src/test/java/org/opensearch/discovery/FileBasedSeedHostsProviderTests.java b/server/src/test/java/org/opensearch/discovery/FileBasedSeedHostsProviderTests.java index acf94483c8116..0f8c40a0bd759 100644 --- a/server/src/test/java/org/opensearch/discovery/FileBasedSeedHostsProviderTests.java +++ b/server/src/test/java/org/opensearch/discovery/FileBasedSeedHostsProviderTests.java @@ -36,8 +36,8 @@ import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.CancellableThreads; import org.opensearch.common.util.PageCacheRecycler; diff --git a/server/src/test/java/org/opensearch/discovery/HandshakingTransportAddressConnectorTests.java b/server/src/test/java/org/opensearch/discovery/HandshakingTransportAddressConnectorTests.java index 5d252168c7b28..e4703626d08fc 100644 --- a/server/src/test/java/org/opensearch/discovery/HandshakingTransportAddressConnectorTests.java +++ b/server/src/test/java/org/opensearch/discovery/HandshakingTransportAddressConnectorTests.java @@ -43,7 +43,7 @@ import org.opensearch.common.Nullable; import org.opensearch.common.SetOnce; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.MockLogAppender; import org.opensearch.test.junit.annotations.TestLogging; diff --git a/server/src/test/java/org/opensearch/discovery/InitializeExtensionRequestTests.java b/server/src/test/java/org/opensearch/discovery/InitializeExtensionRequestTests.java index 63b79c9b53081..0be812ce847f8 100644 --- a/server/src/test/java/org/opensearch/discovery/InitializeExtensionRequestTests.java +++ b/server/src/test/java/org/opensearch/discovery/InitializeExtensionRequestTests.java @@ -13,7 +13,7 @@ import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.BytesStreamInput; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.extensions.DiscoveryExtensionNode; import org.opensearch.extensions.ExtensionDependency; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java b/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java index 91eec3d2edfaf..c3d27119b61be 100644 --- a/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java +++ b/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java @@ -42,7 +42,7 @@ import org.opensearch.cluster.node.DiscoveryNodes.Builder; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.discovery.PeerFinder.TransportAddressConnector; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.CapturingTransport; diff --git a/server/src/test/java/org/opensearch/discovery/SeedHostsResolverTests.java b/server/src/test/java/org/opensearch/discovery/SeedHostsResolverTests.java index 07491cc0d6435..724d3fbee3939 100644 --- a/server/src/test/java/org/opensearch/discovery/SeedHostsResolverTests.java +++ b/server/src/test/java/org/opensearch/discovery/SeedHostsResolverTests.java @@ -39,8 +39,8 @@ import org.opensearch.common.network.NetworkAddress; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.CancellableThreads; import org.opensearch.common.util.PageCacheRecycler; diff --git a/server/src/test/java/org/opensearch/discovery/SettingsBasedSeedHostsProviderTests.java b/server/src/test/java/org/opensearch/discovery/SettingsBasedSeedHostsProviderTests.java index 4648c98bec5df..5801ad0d08818 100644 --- a/server/src/test/java/org/opensearch/discovery/SettingsBasedSeedHostsProviderTests.java +++ b/server/src/test/java/org/opensearch/discovery/SettingsBasedSeedHostsProviderTests.java @@ -32,7 +32,7 @@ package org.opensearch.discovery; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.util.set.Sets; import org.opensearch.discovery.SeedHostsProvider.HostsResolver; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/extensions/DiscoveryExtensionNodeTests.java b/server/src/test/java/org/opensearch/extensions/DiscoveryExtensionNodeTests.java index 578e7503b76a9..8146062c15b73 100644 --- a/server/src/test/java/org/opensearch/extensions/DiscoveryExtensionNodeTests.java +++ b/server/src/test/java/org/opensearch/extensions/DiscoveryExtensionNodeTests.java @@ -10,7 +10,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.Version; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.test.OpenSearchTestCase; import java.net.InetAddress; diff --git a/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java b/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java index 9f1050351c7b5..840605b61c500 100644 --- a/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java +++ b/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java @@ -55,7 +55,7 @@ import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.WriteableSetting.SettingType; import org.opensearch.common.settings.SettingsModule; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.core.common.bytes.BytesReference; import org.opensearch.core.common.io.stream.BytesStreamInput; @@ -77,7 +77,7 @@ import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.Transport; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.transport.TransportService; import org.opensearch.transport.nio.MockNioTransport; import org.opensearch.usage.UsageService; diff --git a/server/src/test/java/org/opensearch/extensions/action/ExtensionTransportActionsHandlerTests.java b/server/src/test/java/org/opensearch/extensions/action/ExtensionTransportActionsHandlerTests.java index bc216bd3bbbb8..fb5a6e374b9a3 100644 --- a/server/src/test/java/org/opensearch/extensions/action/ExtensionTransportActionsHandlerTests.java +++ b/server/src/test/java/org/opensearch/extensions/action/ExtensionTransportActionsHandlerTests.java @@ -20,7 +20,7 @@ import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.extensions.DiscoveryExtensionNode; import org.opensearch.extensions.AcknowledgedResponse; diff --git a/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java b/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java index b59513fc0045d..f0cdbd6c96d41 100644 --- a/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java +++ b/server/src/test/java/org/opensearch/extensions/rest/RestSendToExtensionActionTests.java @@ -38,7 +38,7 @@ import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsModule; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.extensions.DiscoveryExtensionNode; diff --git a/server/src/test/java/org/opensearch/gateway/GatewayServiceTests.java b/server/src/test/java/org/opensearch/gateway/GatewayServiceTests.java index 2d8a26f8bbe87..b8607c0aa89ab 100644 --- a/server/src/test/java/org/opensearch/gateway/GatewayServiceTests.java +++ b/server/src/test/java/org/opensearch/gateway/GatewayServiceTests.java @@ -50,7 +50,7 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.snapshots.EmptySnapshotsInfoService; import org.opensearch.test.OpenSearchTestCase; diff --git a/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java b/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java index fbf0e8cd42c72..5ea003b323fd6 100644 --- a/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java +++ b/server/src/test/java/org/opensearch/http/AbstractHttpServerTransportTests.java @@ -41,7 +41,7 @@ import org.opensearch.common.network.NetworkUtils; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.util.MockBigArrays; import org.opensearch.common.util.MockPageCacheRecycler; import org.opensearch.common.util.concurrent.ThreadContext; diff --git a/server/src/test/java/org/opensearch/http/HttpInfoTests.java b/server/src/test/java/org/opensearch/http/HttpInfoTests.java index d03ae9a2a1ccb..030e7cc89ab76 100644 --- a/server/src/test/java/org/opensearch/http/HttpInfoTests.java +++ b/server/src/test/java/org/opensearch/http/HttpInfoTests.java @@ -36,8 +36,8 @@ import java.net.InetAddress; import java.util.Map; import org.opensearch.common.network.NetworkAddress; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java index bee1bedc892d8..31ad67f35cdd2 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationSourceServiceTests.java @@ -34,7 +34,7 @@ import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportException; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java index 94e57f4a0d3e4..efb8dda201e87 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -41,7 +41,7 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.EmptyTransportResponseHandler; import org.opensearch.transport.TransportRequestOptions; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.transport.TransportService; import org.opensearch.test.transport.CapturingTransport; import java.io.IOException; diff --git a/server/src/test/java/org/opensearch/node/NodeTests.java b/server/src/test/java/org/opensearch/node/NodeTests.java index 6dfecd8a692f1..816dc47c96f8a 100644 --- a/server/src/test/java/org/opensearch/node/NodeTests.java +++ b/server/src/test/java/org/opensearch/node/NodeTests.java @@ -41,7 +41,7 @@ import org.opensearch.common.network.NetworkModule; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsException; -import org.opensearch.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.BoundTransportAddress; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.env.Environment; diff --git a/server/src/test/java/org/opensearch/node/ResponseCollectorServiceTests.java b/server/src/test/java/org/opensearch/node/ResponseCollectorServiceTests.java index 2b13df3027cfa..7ca1f1e864b99 100644 --- a/server/src/test/java/org/opensearch/node/ResponseCollectorServiceTests.java +++ b/server/src/test/java/org/opensearch/node/ResponseCollectorServiceTests.java @@ -40,7 +40,7 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/test/java/org/opensearch/nodesinfo/NodeInfoStreamingTests.java b/server/src/test/java/org/opensearch/nodesinfo/NodeInfoStreamingTests.java index 347fef773fc8a..31df1b3ab80b3 100644 --- a/server/src/test/java/org/opensearch/nodesinfo/NodeInfoStreamingTests.java +++ b/server/src/test/java/org/opensearch/nodesinfo/NodeInfoStreamingTests.java @@ -40,8 +40,8 @@ import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; diff --git a/server/src/test/java/org/opensearch/rest/BytesRestResponseTests.java b/server/src/test/java/org/opensearch/rest/BytesRestResponseTests.java index 5bdc2cc0bd280..9e851cef62655 100644 --- a/server/src/test/java/org/opensearch/rest/BytesRestResponseTests.java +++ b/server/src/test/java/org/opensearch/rest/BytesRestResponseTests.java @@ -42,7 +42,7 @@ import org.opensearch.action.search.ShardSearchFailure; import org.opensearch.core.common.ParsingException; import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.rest.RestStatus; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.core.xcontent.XContentParser; diff --git a/server/src/test/java/org/opensearch/rest/RestControllerTests.java b/server/src/test/java/org/opensearch/rest/RestControllerTests.java index ea0ce54913a8d..b4fa7574f0ff0 100644 --- a/server/src/test/java/org/opensearch/rest/RestControllerTests.java +++ b/server/src/test/java/org/opensearch/rest/RestControllerTests.java @@ -39,8 +39,8 @@ import org.opensearch.common.lifecycle.AbstractLifecycleComponent; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.core.rest.RestStatus; diff --git a/server/src/test/java/org/opensearch/rest/action/RestBuilderListenerTests.java b/server/src/test/java/org/opensearch/rest/action/RestBuilderListenerTests.java index d865607aa5451..662ba202c8c5c 100644 --- a/server/src/test/java/org/opensearch/rest/action/RestBuilderListenerTests.java +++ b/server/src/test/java/org/opensearch/rest/action/RestBuilderListenerTests.java @@ -40,8 +40,8 @@ import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.rest.FakeRestChannel; import org.opensearch.test.rest.FakeRestRequest; -import org.opensearch.transport.TransportResponse; -import org.opensearch.transport.TransportResponse.Empty; +import org.opensearch.core.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse.Empty; import java.util.concurrent.atomic.AtomicReference; diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index 32e0edf7c8f11..92781cf160afc 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -158,7 +158,7 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.common.util.concurrent.AbstractRunnable; diff --git a/server/src/test/java/org/opensearch/transport/ClusterConnectionManagerTests.java b/server/src/test/java/org/opensearch/transport/ClusterConnectionManagerTests.java index bf47fc2cc9b45..e3d3e17c41fcb 100644 --- a/server/src/test/java/org/opensearch/transport/ClusterConnectionManagerTests.java +++ b/server/src/test/java/org/opensearch/transport/ClusterConnectionManagerTests.java @@ -37,7 +37,7 @@ import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/test/java/org/opensearch/transport/InboundDecoderTests.java b/server/src/test/java/org/opensearch/transport/InboundDecoderTests.java index 4d8955650f8be..009dc829e8168 100644 --- a/server/src/test/java/org/opensearch/transport/InboundDecoderTests.java +++ b/server/src/test/java/org/opensearch/transport/InboundDecoderTests.java @@ -39,6 +39,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.transport.TransportMessage; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; diff --git a/server/src/test/java/org/opensearch/transport/OutboundHandlerTests.java b/server/src/test/java/org/opensearch/transport/OutboundHandlerTests.java index fe8be400b03c9..ad82fd6ada5f4 100644 --- a/server/src/test/java/org/opensearch/transport/OutboundHandlerTests.java +++ b/server/src/test/java/org/opensearch/transport/OutboundHandlerTests.java @@ -43,12 +43,13 @@ import org.opensearch.common.bytes.ReleasableBytesReference; import org.opensearch.common.collect.Tuple; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.common.util.io.Streams; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; diff --git a/server/src/test/java/org/opensearch/transport/ProxyConnectionStrategyTests.java b/server/src/test/java/org/opensearch/transport/ProxyConnectionStrategyTests.java index 1451e9466778b..510a2b3abd943 100644 --- a/server/src/test/java/org/opensearch/transport/ProxyConnectionStrategyTests.java +++ b/server/src/test/java/org/opensearch/transport/ProxyConnectionStrategyTests.java @@ -41,7 +41,7 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; diff --git a/server/src/test/java/org/opensearch/transport/RemoteClusterConnectionTests.java b/server/src/test/java/org/opensearch/transport/RemoteClusterConnectionTests.java index 4106ebd9988c9..92f5f455f7caa 100644 --- a/server/src/test/java/org/opensearch/transport/RemoteClusterConnectionTests.java +++ b/server/src/test/java/org/opensearch/transport/RemoteClusterConnectionTests.java @@ -56,7 +56,7 @@ import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; diff --git a/server/src/test/java/org/opensearch/transport/RemoteConnectionManagerTests.java b/server/src/test/java/org/opensearch/transport/RemoteConnectionManagerTests.java index ccba66ff4c45e..5741024850756 100644 --- a/server/src/test/java/org/opensearch/transport/RemoteConnectionManagerTests.java +++ b/server/src/test/java/org/opensearch/transport/RemoteConnectionManagerTests.java @@ -36,7 +36,7 @@ import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.test.OpenSearchTestCase; import java.net.InetAddress; diff --git a/server/src/test/java/org/opensearch/transport/SniffConnectionStrategyTests.java b/server/src/test/java/org/opensearch/transport/SniffConnectionStrategyTests.java index 975da2de82ae6..a91b114b5ee84 100644 --- a/server/src/test/java/org/opensearch/transport/SniffConnectionStrategyTests.java +++ b/server/src/test/java/org/opensearch/transport/SniffConnectionStrategyTests.java @@ -47,7 +47,7 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.common.Strings; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; diff --git a/server/src/test/java/org/opensearch/transport/TcpTransportTests.java b/server/src/test/java/org/opensearch/transport/TcpTransportTests.java index 39bc2d9bd2d48..ba216b31a1f1d 100644 --- a/server/src/test/java/org/opensearch/transport/TcpTransportTests.java +++ b/server/src/test/java/org/opensearch/transport/TcpTransportTests.java @@ -43,7 +43,7 @@ import org.opensearch.common.network.NetworkService; import org.opensearch.common.network.NetworkUtils; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.MockPageCacheRecycler; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; diff --git a/server/src/test/java/org/opensearch/transport/TransportActionProxyTests.java b/server/src/test/java/org/opensearch/transport/TransportActionProxyTests.java index d1d46d5be13c0..cf37ca8cbdc25 100644 --- a/server/src/test/java/org/opensearch/transport/TransportActionProxyTests.java +++ b/server/src/test/java/org/opensearch/transport/TransportActionProxyTests.java @@ -39,6 +39,7 @@ import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; diff --git a/server/src/test/java/org/opensearch/transport/TransportHandshakerTests.java b/server/src/test/java/org/opensearch/transport/TransportHandshakerTests.java index 58c8806380436..6bbe926ea0384 100644 --- a/server/src/test/java/org/opensearch/transport/TransportHandshakerTests.java +++ b/server/src/test/java/org/opensearch/transport/TransportHandshakerTests.java @@ -37,6 +37,7 @@ import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.unit.TimeValue; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.tasks.TaskId; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; diff --git a/server/src/test/java/org/opensearch/transport/TransportInfoTests.java b/server/src/test/java/org/opensearch/transport/TransportInfoTests.java index 402c4183fa2b8..8360346ce3c42 100644 --- a/server/src/test/java/org/opensearch/transport/TransportInfoTests.java +++ b/server/src/test/java/org/opensearch/transport/TransportInfoTests.java @@ -33,8 +33,8 @@ package org.opensearch.transport; import org.opensearch.common.network.NetworkAddress; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.xcontent.ToXContent; import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; diff --git a/server/src/test/java/org/opensearch/transport/TransportServiceDeserializationFailureTests.java b/server/src/test/java/org/opensearch/transport/TransportServiceDeserializationFailureTests.java index 01d6a4d331477..c23c213353888 100644 --- a/server/src/test/java/org/opensearch/transport/TransportServiceDeserializationFailureTests.java +++ b/server/src/test/java/org/opensearch/transport/TransportServiceDeserializationFailureTests.java @@ -39,6 +39,7 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.Settings; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.tasks.Task; import org.opensearch.tasks.TaskAwareRequest; import org.opensearch.tasks.TaskId; diff --git a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java index 72fabc001760f..0d1d30014a9ac 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java +++ b/test/framework/src/main/java/org/opensearch/cluster/coordination/AbstractCoordinatorTestCase.java @@ -66,7 +66,7 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.MockBigArrays; diff --git a/test/framework/src/main/java/org/opensearch/node/MockNode.java b/test/framework/src/main/java/org/opensearch/node/MockNode.java index 7d3a88fb1aba2..59c78d32c4c3c 100644 --- a/test/framework/src/main/java/org/opensearch/node/MockNode.java +++ b/test/framework/src/main/java/org/opensearch/node/MockNode.java @@ -41,7 +41,7 @@ import org.opensearch.common.network.NetworkModule; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.BoundTransportAddress; import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.MockBigArrays; import org.opensearch.common.util.MockPageCacheRecycler; diff --git a/test/framework/src/main/java/org/opensearch/test/ExternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/ExternalTestCluster.java index 1869fc8c9b447..943a99c02a00b 100644 --- a/test/framework/src/main/java/org/opensearch/test/ExternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/ExternalTestCluster.java @@ -45,7 +45,7 @@ import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.network.NetworkModule; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.env.Environment; import org.opensearch.http.HttpInfo; import org.opensearch.node.MockNode; diff --git a/test/framework/src/main/java/org/opensearch/test/MockHttpTransport.java b/test/framework/src/main/java/org/opensearch/test/MockHttpTransport.java index e156449adc184..841cf62620f0d 100644 --- a/test/framework/src/main/java/org/opensearch/test/MockHttpTransport.java +++ b/test/framework/src/main/java/org/opensearch/test/MockHttpTransport.java @@ -33,8 +33,8 @@ package org.opensearch.test; import org.opensearch.common.lifecycle.AbstractLifecycleComponent; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.http.HttpInfo; import org.opensearch.http.HttpServerTransport; import org.opensearch.http.HttpStats; diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 5e79bec91bd90..45d69703456fc 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -106,7 +106,7 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Setting.Property; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.core.common.unit.ByteSizeUnit; import org.opensearch.core.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java index 14275d838e6a9..7b787e12be64a 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchTestCase.java @@ -89,7 +89,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateUtils; import org.opensearch.common.time.FormatNames; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.util.MockBigArrays; import org.opensearch.common.util.MockPageCacheRecycler; import org.opensearch.common.util.concurrent.ThreadContext; diff --git a/test/framework/src/main/java/org/opensearch/test/disruption/DisruptableMockTransport.java b/test/framework/src/main/java/org/opensearch/test/disruption/DisruptableMockTransport.java index dd025555d1ae8..1cd60690ca9d5 100644 --- a/test/framework/src/main/java/org/opensearch/test/disruption/DisruptableMockTransport.java +++ b/test/framework/src/main/java/org/opensearch/test/disruption/DisruptableMockTransport.java @@ -39,8 +39,8 @@ import org.opensearch.common.Nullable; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.test.transport.MockTransport; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.CloseableConnection; @@ -52,7 +52,7 @@ import org.opensearch.transport.TransportInterceptor; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestOptions; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.transport.TransportService; import java.io.IOException; diff --git a/test/framework/src/main/java/org/opensearch/test/transport/FakeTransport.java b/test/framework/src/main/java/org/opensearch/test/transport/FakeTransport.java index 4d59afd5f99ed..9354d244e4c06 100644 --- a/test/framework/src/main/java/org/opensearch/test/transport/FakeTransport.java +++ b/test/framework/src/main/java/org/opensearch/test/transport/FakeTransport.java @@ -35,8 +35,8 @@ import org.opensearch.action.ActionListener; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.lifecycle.AbstractLifecycleComponent; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.transport.CloseableConnection; import org.opensearch.transport.ConnectionProfile; import org.opensearch.transport.Transport; diff --git a/test/framework/src/main/java/org/opensearch/test/transport/MockTransport.java b/test/framework/src/main/java/org/opensearch/test/transport/MockTransport.java index 36f3e6cb4b692..0974a5f1f5671 100644 --- a/test/framework/src/main/java/org/opensearch/test/transport/MockTransport.java +++ b/test/framework/src/main/java/org/opensearch/test/transport/MockTransport.java @@ -43,7 +43,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.BoundTransportAddress; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.CloseableConnection; import org.opensearch.transport.ClusterConnectionManager; @@ -54,7 +54,7 @@ import org.opensearch.transport.TransportMessageListener; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestOptions; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; diff --git a/test/framework/src/main/java/org/opensearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/opensearch/test/transport/MockTransportService.java index 7a1d730ac1b27..5cf451980ec98 100644 --- a/test/framework/src/main/java/org/opensearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/opensearch/test/transport/MockTransportService.java @@ -46,8 +46,8 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.MockPageCacheRecycler; import org.opensearch.common.util.concurrent.AbstractRunnable; diff --git a/test/framework/src/main/java/org/opensearch/test/transport/StubbableConnectionManager.java b/test/framework/src/main/java/org/opensearch/test/transport/StubbableConnectionManager.java index 53cae12871d92..47de32acde351 100644 --- a/test/framework/src/main/java/org/opensearch/test/transport/StubbableConnectionManager.java +++ b/test/framework/src/main/java/org/opensearch/test/transport/StubbableConnectionManager.java @@ -33,7 +33,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.transport.ConnectTransportException; import org.opensearch.transport.ConnectionProfile; import org.opensearch.transport.ConnectionManager; diff --git a/test/framework/src/main/java/org/opensearch/test/transport/StubbableTransport.java b/test/framework/src/main/java/org/opensearch/test/transport/StubbableTransport.java index 8d66d481dc4aa..0aeb5f77af8c8 100644 --- a/test/framework/src/main/java/org/opensearch/test/transport/StubbableTransport.java +++ b/test/framework/src/main/java/org/opensearch/test/transport/StubbableTransport.java @@ -37,8 +37,8 @@ import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.lifecycle.Lifecycle; import org.opensearch.common.lifecycle.LifecycleListener; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.tasks.Task; import org.opensearch.transport.ConnectionProfile; import org.opensearch.transport.RequestHandlerRegistry; diff --git a/test/framework/src/main/java/org/opensearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/opensearch/transport/AbstractSimpleTransportTestCase.java index 29619a541722c..6beda60ad4145 100644 --- a/test/framework/src/main/java/org/opensearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/opensearch/transport/AbstractSimpleTransportTestCase.java @@ -58,12 +58,13 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.BoundTransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.common.util.io.IOUtils; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.node.Node; import org.opensearch.tasks.Task; import org.opensearch.test.OpenSearchTestCase; diff --git a/test/framework/src/main/java/org/opensearch/transport/TestResponse.java b/test/framework/src/main/java/org/opensearch/transport/TestResponse.java index 09dd50d656004..14db8b3372bf2 100644 --- a/test/framework/src/main/java/org/opensearch/transport/TestResponse.java +++ b/test/framework/src/main/java/org/opensearch/transport/TestResponse.java @@ -33,6 +33,7 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.transport.TransportResponse; import java.io.IOException; diff --git a/test/framework/src/main/java/org/opensearch/transport/TestTransportChannel.java b/test/framework/src/main/java/org/opensearch/transport/TestTransportChannel.java index 819094c9fb089..64bb01afe3430 100644 --- a/test/framework/src/main/java/org/opensearch/transport/TestTransportChannel.java +++ b/test/framework/src/main/java/org/opensearch/transport/TestTransportChannel.java @@ -33,6 +33,7 @@ package org.opensearch.transport; import org.opensearch.action.ActionListener; +import org.opensearch.core.transport.TransportResponse; public class TestTransportChannel implements TransportChannel { diff --git a/test/framework/src/test/java/org/opensearch/test/disruption/DisruptableMockTransportTests.java b/test/framework/src/test/java/org/opensearch/test/disruption/DisruptableMockTransportTests.java index 9bddaf013aef4..46c1cfac5c5c5 100644 --- a/test/framework/src/test/java/org/opensearch/test/disruption/DisruptableMockTransportTests.java +++ b/test/framework/src/test/java/org/opensearch/test/disruption/DisruptableMockTransportTests.java @@ -39,7 +39,7 @@ import org.opensearch.common.collect.Tuple; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.node.Node; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.disruption.DisruptableMockTransport.ConnectionStatus; @@ -49,8 +49,8 @@ import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportRequestHandler; -import org.opensearch.transport.TransportResponse; -import org.opensearch.transport.TransportResponse.Empty; +import org.opensearch.core.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse.Empty; import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; import org.junit.Before; diff --git a/test/framework/src/test/java/org/opensearch/test/disruption/NetworkDisruptionIT.java b/test/framework/src/test/java/org/opensearch/test/disruption/NetworkDisruptionIT.java index a5112bc958954..427f217d07e2a 100644 --- a/test/framework/src/test/java/org/opensearch/test/disruption/NetworkDisruptionIT.java +++ b/test/framework/src/test/java/org/opensearch/test/disruption/NetworkDisruptionIT.java @@ -46,7 +46,7 @@ import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportException; -import org.opensearch.transport.TransportResponse; +import org.opensearch.core.transport.TransportResponse; import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; diff --git a/test/framework/src/test/java/org/opensearch/transport/nio/SimpleMockNioTransportTests.java b/test/framework/src/test/java/org/opensearch/transport/nio/SimpleMockNioTransportTests.java index 868affc81be37..158a770987207 100644 --- a/test/framework/src/test/java/org/opensearch/transport/nio/SimpleMockNioTransportTests.java +++ b/test/framework/src/test/java/org/opensearch/transport/nio/SimpleMockNioTransportTests.java @@ -39,7 +39,7 @@ import org.opensearch.common.network.NetworkService; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.TransportAddress; +import org.opensearch.core.common.transport.TransportAddress; import org.opensearch.common.util.MockPageCacheRecycler; import org.opensearch.core.indices.breaker.NoneCircuitBreakerService; import org.opensearch.transport.AbstractSimpleTransportTestCase; From 5670d2a2be92602c8e8e38cc8fec7ddf5737ed52 Mon Sep 17 00:00:00 2001 From: Harish Bhakuni Date: Thu, 3 Aug 2023 00:36:45 -0700 Subject: [PATCH 17/24] [Snapshot Interop] Add Logic in Lock Manager to cleanup stale data post index deletion. (#8472) Signed-off-by: Harish Bhakuni --- .../RemoteStoreBaseIntegTestCase.java | 2 +- .../snapshots/DeleteSnapshotIT.java | 76 ++++++++++++++++++- .../store/RemoteSegmentStoreDirectory.java | 16 ++-- .../RemoteSegmentStoreDirectoryFactory.java | 4 +- .../RemoteStoreLockManagerFactory.java | 8 +- .../blobstore/BlobStoreRepository.java | 43 +++++++++-- .../blobstore/BlobStoreRepositoryTests.java | 2 - .../AbstractSnapshotIntegTestCase.java | 5 ++ 8 files changed, 138 insertions(+), 18 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java index 4a85ff46d9025..ec58c29175e16 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreBaseIntegTestCase.java @@ -159,7 +159,7 @@ public void teardown() { assertAcked(clusterAdmin().prepareDeleteRepository(REPOSITORY_2_NAME)); } - public int getFileCount(Path path) throws Exception { + public static int getFileCount(Path path) throws Exception { final AtomicInteger filesExisting = new AtomicInteger(0); Files.walkFileTree(path, new SimpleFileVisitor<>() { @Override diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java index b12fbdd2a9bd7..d38620723a8f4 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DeleteSnapshotIT.java @@ -10,22 +10,28 @@ import org.opensearch.action.ActionFuture; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; +import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.UUIDs; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.FeatureFlags; +import org.opensearch.remotestore.RemoteStoreBaseIntegTestCase; import org.opensearch.test.FeatureFlagSetter; import org.opensearch.test.OpenSearchIntegTestCase; import java.nio.file.Path; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.Locale; +import java.util.concurrent.TimeUnit; import java.util.stream.Stream; import static org.hamcrest.Matchers.is; import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; +import static org.hamcrest.Matchers.comparesEqualTo; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) @@ -240,12 +246,13 @@ public void testDeleteMultipleShallowCopySnapshotsCase3() throws Exception { final String snapshotRepoName = "snapshot-repo-name"; final Path snapshotRepoPath = randomRepoPath(); createRepository(snapshotRepoName, "mock", snapshotRepoSettingsForShallowCopy(snapshotRepoPath)); - final String testIndex = "index-test"; - createIndexWithContent(testIndex); final Path remoteStoreRepoPath = randomRepoPath(); createRepository(REMOTE_REPO_NAME, "fs", remoteStoreRepoPath); + final String testIndex = "index-test"; + createIndexWithContent(testIndex); + final String remoteStoreEnabledIndexName = "remote-index-1"; final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(); createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); @@ -289,6 +296,71 @@ public void testDeleteMultipleShallowCopySnapshotsCase3() throws Exception { assert (getLockFilesInRemoteStore(remoteStoreEnabledIndexName, REMOTE_REPO_NAME).length == 0); } + public void testRemoteStoreCleanupForDeletedIndex() throws Exception { + disableRepoConsistencyCheck("Remote store repository is being used in the test"); + FeatureFlagSetter.set(FeatureFlags.REMOTE_STORE); + + internalCluster().startClusterManagerOnlyNode(remoteStoreClusterSettings(REMOTE_REPO_NAME)); + internalCluster().startDataOnlyNode(); + final Client clusterManagerClient = internalCluster().clusterManagerClient(); + ensureStableCluster(2); + + final String snapshotRepoName = "snapshot-repo-name"; + final Path snapshotRepoPath = randomRepoPath(); + createRepository(snapshotRepoName, "mock", snapshotRepoSettingsForShallowCopy(snapshotRepoPath)); + + final Path remoteStoreRepoPath = randomRepoPath(); + createRepository(REMOTE_REPO_NAME, "fs", remoteStoreRepoPath); + + final String testIndex = "index-test"; + createIndexWithContent(testIndex); + + final String remoteStoreEnabledIndexName = "remote-index-1"; + final Settings remoteStoreEnabledIndexSettings = getRemoteStoreBackedIndexSettings(); + createIndex(remoteStoreEnabledIndexName, remoteStoreEnabledIndexSettings); + indexRandomDocs(remoteStoreEnabledIndexName, randomIntBetween(5, 10)); + + String indexUUID = client().admin() + .indices() + .prepareGetSettings(remoteStoreEnabledIndexName) + .get() + .getSetting(remoteStoreEnabledIndexName, IndexMetadata.SETTING_INDEX_UUID); + + logger.info("--> create two remote index shallow snapshots"); + List shallowCopySnapshots = createNSnapshots(snapshotRepoName, 2); + + String[] lockFiles = getLockFilesInRemoteStore(remoteStoreEnabledIndexName, REMOTE_REPO_NAME); + assert (lockFiles.length == 2) : "lock files are " + Arrays.toString(lockFiles); + + // delete remote store index + assertAcked(client().admin().indices().prepareDelete(remoteStoreEnabledIndexName)); + + logger.info("--> delete snapshot 1"); + AcknowledgedResponse deleteSnapshotResponse = clusterManagerClient.admin() + .cluster() + .prepareDeleteSnapshot(snapshotRepoName, shallowCopySnapshots.get(0)) + .get(); + assertAcked(deleteSnapshotResponse); + + lockFiles = getLockFilesInRemoteStore(remoteStoreEnabledIndexName, REMOTE_REPO_NAME, indexUUID); + assert (lockFiles.length == 1) : "lock files are " + Arrays.toString(lockFiles); + + logger.info("--> delete snapshot 2"); + deleteSnapshotResponse = clusterManagerClient.admin() + .cluster() + .prepareDeleteSnapshot(snapshotRepoName, shallowCopySnapshots.get(1)) + .get(); + assertAcked(deleteSnapshotResponse); + + Path indexPath = Path.of(String.valueOf(remoteStoreRepoPath), indexUUID); + // Delete is async. Give time for it + assertBusy(() -> { + try { + assertThat(RemoteStoreBaseIntegTestCase.getFileCount(indexPath), comparesEqualTo(0)); + } catch (Exception e) {} + }, 30, TimeUnit.SECONDS); + } + private List createNSnapshots(String repoName, int count) { final List snapshotNames = new ArrayList<>(count); final String prefix = "snap-" + UUIDs.randomBase64UUID(random()).toLowerCase(Locale.ROOT) + "-"; diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java index 8dfdb3e2c8e06..9ac5ebb94a5ca 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectory.java @@ -837,29 +837,36 @@ public void deleteStaleSegments(int lastNMetadataFilesToKeep) throws IOException } } + public void deleteStaleSegmentsAsync(int lastNMetadataFilesToKeep) { + deleteStaleSegmentsAsync(lastNMetadataFilesToKeep, ActionListener.wrap(r -> {}, e -> {})); + } + /** * Delete stale segment and metadata files asynchronously. * This method calls {@link RemoteSegmentStoreDirectory#deleteStaleSegments(int)} in an async manner. * @param lastNMetadataFilesToKeep number of metadata files to keep */ - public void deleteStaleSegmentsAsync(int lastNMetadataFilesToKeep) { + public void deleteStaleSegmentsAsync(int lastNMetadataFilesToKeep, ActionListener listener) { if (canDeleteStaleCommits.compareAndSet(true, false)) { try { threadPool.executor(ThreadPool.Names.REMOTE_PURGE).execute(() -> { try { deleteStaleSegments(lastNMetadataFilesToKeep); + listener.onResponse(null); } catch (Exception e) { - logger.info( + logger.error( "Exception while deleting stale commits from remote segment store, will retry delete post next commit", e ); + listener.onFailure(e); } finally { canDeleteStaleCommits.set(true); } }); } catch (Exception e) { - logger.info("Exception occurred while scheduling deleteStaleCommits", e); + logger.error("Exception occurred while scheduling deleteStaleCommits", e); canDeleteStaleCommits.set(true); + listener.onFailure(e); } } } @@ -891,7 +898,6 @@ private boolean deleteIfEmpty() throws IOException { } public void close() throws IOException { - deleteStaleSegmentsAsync(0); - deleteIfEmpty(); + deleteStaleSegmentsAsync(0, ActionListener.wrap(r -> deleteIfEmpty(), e -> logger.error("Failed to cleanup remote directory"))); } } diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java index 3bec84f287ce4..3de7a706c0688 100644 --- a/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java +++ b/server/src/main/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryFactory.java @@ -13,8 +13,8 @@ import org.opensearch.common.blobstore.BlobPath; import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.ShardPath; +import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; import org.opensearch.index.store.lockmanager.RemoteStoreLockManagerFactory; -import org.opensearch.index.store.lockmanager.RemoteStoreMetadataLockManager; import org.opensearch.plugins.IndexStorePlugin; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; @@ -59,7 +59,7 @@ public Directory newDirectory(String repositoryName, String indexUUID, String sh RemoteDirectory dataDirectory = createRemoteDirectory(repository, commonBlobPath, "data"); RemoteDirectory metadataDirectory = createRemoteDirectory(repository, commonBlobPath, "metadata"); - RemoteStoreMetadataLockManager mdLockManager = RemoteStoreLockManagerFactory.newLockManager( + RemoteStoreLockManager mdLockManager = RemoteStoreLockManagerFactory.newLockManager( repositoriesService.get(), repositoryName, indexUUID, diff --git a/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactory.java b/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactory.java index e866551eae143..1a306f3261094 100644 --- a/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactory.java +++ b/server/src/main/java/org/opensearch/index/store/lockmanager/RemoteStoreLockManagerFactory.java @@ -33,7 +33,7 @@ public RemoteStoreLockManagerFactory(Supplier repositoriesS this.repositoriesService = repositoriesService; } - public RemoteStoreMetadataLockManager newLockManager(String repositoryName, String indexUUID, String shardId) throws IOException { + public RemoteStoreLockManager newLockManager(String repositoryName, String indexUUID, String shardId) throws IOException { return newLockManager(repositoriesService.get(), repositoryName, indexUUID, shardId); } @@ -58,6 +58,12 @@ public static RemoteStoreMetadataLockManager newLockManager( } } + // TODO: remove this once we add poller in place to trigger remote store cleanup + // see: https://github.com/opensearch-project/OpenSearch/issues/8469 + public Supplier getRepositoriesService() { + return repositoriesService; + } + private static RemoteBufferedOutputDirectory createRemoteBufferedOutputDirectory( Repository repository, BlobPath commonBlobPath, diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index 70db2e0c0a9bd..f24c255f0d0da 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -113,11 +113,12 @@ import org.opensearch.index.snapshots.blobstore.RateLimitingInputStream; import org.opensearch.index.snapshots.blobstore.SlicedInputStream; import org.opensearch.index.snapshots.blobstore.SnapshotFiles; +import org.opensearch.index.store.RemoteSegmentStoreDirectoryFactory; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.index.store.lockmanager.FileLockInfo; +import org.opensearch.index.store.lockmanager.RemoteStoreLockManager; import org.opensearch.index.store.lockmanager.RemoteStoreLockManagerFactory; -import org.opensearch.index.store.lockmanager.RemoteStoreMetadataLockManager; import org.opensearch.indices.recovery.RecoverySettings; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.repositories.IndexId; @@ -616,7 +617,7 @@ public void cloneRemoteStoreIndexShardSnapshot( RemoteStoreShardShallowCopySnapshot remStoreBasedShardMetadata = (RemoteStoreShardShallowCopySnapshot) indexShardSnapshot; String indexUUID = remStoreBasedShardMetadata.getIndexUUID(); String remoteStoreRepository = remStoreBasedShardMetadata.getRemoteStoreRepository(); - RemoteStoreMetadataLockManager remoteStoreMetadataLockManger = remoteStoreLockManagerFactory.newLockManager( + RemoteStoreLockManager remoteStoreMetadataLockManger = remoteStoreLockManagerFactory.newLockManager( remoteStoreRepository, indexUUID, String.valueOf(shardId.shardId()) @@ -1072,11 +1073,24 @@ private void executeStaleShardDelete( // Releasing lock file before deleting the shallow-snap-UUID file because in case of any failure while // releasing the lock file, we would still have the shallow-snap-UUID file and that would be used during // next delete operation for releasing this lock file - RemoteStoreMetadataLockManager remoteStoreMetadataLockManager = remoteStoreLockManagerFactory - .newLockManager(remoteStoreRepoForIndex, indexUUID, shardId); + RemoteStoreLockManager remoteStoreMetadataLockManager = remoteStoreLockManagerFactory.newLockManager( + remoteStoreRepoForIndex, + indexUUID, + shardId + ); remoteStoreMetadataLockManager.release( FileLockInfo.getLockInfoBuilder().withAcquirerId(snapshotUUID).build() ); + if (!isIndexPresent(clusterService, indexUUID)) { + // this is a temporary solution where snapshot deletion triggers remote store side + // cleanup if index is already deleted. We will add a poller in future to take + // care of remote store side cleanup. + // see https://github.com/opensearch-project/OpenSearch/issues/8469 + new RemoteSegmentStoreDirectoryFactory( + remoteStoreLockManagerFactory.getRepositoriesService(), + threadPool + ).newDirectory(remoteStoreRepoForIndex, indexUUID, shardId).close(); + } } } } @@ -1487,6 +1501,15 @@ private void cleanupStaleIndices( } } + private static boolean isIndexPresent(ClusterService clusterService, String indexUUID) { + for (final IndexMetadata indexMetadata : clusterService.state().metadata().getIndices().values()) { + if (indexUUID.equals(indexMetadata.getIndexUUID())) { + return true; + } + } + return false; + } + private void executeOneStaleIndexDelete( BlockingQueue> staleIndicesToDelete, RemoteStoreLockManagerFactory remoteStoreLockManagerFactory, @@ -1519,11 +1542,21 @@ private void executeOneStaleIndexDelete( // Releasing lock files before deleting the shallow-snap-UUID file because in case of any failure // while releasing the lock file, we would still have the corresponding shallow-snap-UUID file // and that would be used during next delete operation for releasing this stale lock file - RemoteStoreMetadataLockManager remoteStoreMetadataLockManager = remoteStoreLockManagerFactory + RemoteStoreLockManager remoteStoreMetadataLockManager = remoteStoreLockManagerFactory .newLockManager(remoteStoreRepoForIndex, indexUUID, shardBlob.getKey()); remoteStoreMetadataLockManager.release( FileLockInfo.getLockInfoBuilder().withAcquirerId(snapshotUUID).build() ); + if (!isIndexPresent(clusterService, indexUUID)) { + // this is a temporary solution where snapshot deletion triggers remote store side + // cleanup if index is already deleted. We will add a poller in future to take + // care of remote store side cleanup. + // see https://github.com/opensearch-project/OpenSearch/issues/8469 + new RemoteSegmentStoreDirectoryFactory( + remoteStoreLockManagerFactory.getRepositoriesService(), + threadPool + ).newDirectory(remoteStoreRepoForIndex, indexUUID, shardBlob.getKey()).close(); + } } } } diff --git a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java index 26082f2456867..ee9181dba4563 100644 --- a/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java +++ b/server/src/test/java/org/opensearch/repositories/blobstore/BlobStoreRepositoryTests.java @@ -166,8 +166,6 @@ public void testRetrieveSnapshots() throws Exception { assertThat(snapshotIds, equalTo(originalSnapshots)); } - // Validate Scenario remoteStoreShallowCopy Snapshot -> remoteStoreShallowCopy Snapshot - // -> remoteStoreShallowCopy Snapshot -> normal snapshot public void testReadAndWriteSnapshotsThroughIndexFile() throws Exception { final BlobStoreRepository repository = setupRepo(); final long pendingGeneration = repository.metadata.pendingGeneration(); diff --git a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java index 2c6f4f7b15e5d..9c3f342e58111 100644 --- a/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -557,6 +557,11 @@ protected String[] getLockFilesInRemoteStore(String remoteStoreIndex, String rem .prepareGetSettings(remoteStoreIndex) .get() .getSetting(remoteStoreIndex, IndexMetadata.SETTING_INDEX_UUID); + return getLockFilesInRemoteStore(remoteStoreIndex, remoteStoreRepositoryName, indexUUID); + } + + protected String[] getLockFilesInRemoteStore(String remoteStoreIndex, String remoteStoreRepositoryName, String indexUUID) + throws IOException { final RepositoriesService repositoriesService = internalCluster().getCurrentClusterManagerNodeInstance(RepositoriesService.class); final BlobStoreRepository remoteStoreRepository = (BlobStoreRepository) repositoriesService.repository(remoteStoreRepositoryName); BlobPath shardLevelBlobPath = remoteStoreRepository.basePath().add(indexUUID).add("0").add("segments").add("lock_files"); From e55dadef57809a7fc04dac5ff4d43873beeefadc Mon Sep 17 00:00:00 2001 From: Gaurav Bafna <85113518+gbbafna@users.noreply.github.com> Date: Thu, 3 Aug 2023 16:47:20 +0530 Subject: [PATCH 18/24] Avoid duplicate indexing in case of SegRep enabled indices' translog replay (#8578) Signed-off-by: Gaurav Bafna --- .../recovery/IndexPrimaryRelocationIT.java | 14 +-- .../RemoteIndexPrimaryRelocationIT.java | 66 ++++++++++++++ .../opensearch/remotestore/RemoteStoreIT.java | 3 + .../ReplicaToPrimaryPromotionIT.java | 64 ++++++++++++++ .../index/engine/InternalEngine.java | 28 ++++-- .../index/shard/IndexShardTests.java | 27 +++--- ...dTests.java => RemoteIndexShardTests.java} | 39 ++++++++- ...overyWithRemoteTranslogOnPrimaryTests.java | 12 +-- .../SegmentReplicationIndexShardTests.java | 40 ++++++++- .../TranslogTransferManagerTests.java | 5 +- ...enSearchIndexLevelReplicationTestCase.java | 17 +--- .../index/shard/IndexShardTestCase.java | 86 ++++++++++++++----- 12 files changed, 325 insertions(+), 76 deletions(-) create mode 100644 server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java rename server/src/test/java/org/opensearch/index/shard/{SegmentReplicationWithRemoteIndexShardTests.java => RemoteIndexShardTests.java} (78%) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexPrimaryRelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexPrimaryRelocationIT.java index 32a10451a0dd3..e9962706bcd39 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexPrimaryRelocationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexPrimaryRelocationIT.java @@ -56,14 +56,16 @@ public class IndexPrimaryRelocationIT extends OpenSearchIntegTestCase { private static final int RELOCATION_COUNT = 15; + public void setup() {} + + public Settings indexSettings() { + return Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0).build(); + } + public void testPrimaryRelocationWhileIndexing() throws Exception { internalCluster().ensureAtLeastNumDataNodes(randomIntBetween(2, 3)); - client().admin() - .indices() - .prepareCreate("test") - .setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0)) - .setMapping("field", "type=text") - .get(); + setup(); + client().admin().indices().prepareCreate("test").setSettings(indexSettings()).setMapping("field", "type=text").get(); ensureGreen("test"); AtomicInteger numAutoGenDocs = new AtomicInteger(); final AtomicBoolean finished = new AtomicBoolean(false); diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java new file mode 100644 index 0000000000000..a9482c8c19187 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteIndexPrimaryRelocationIT.java @@ -0,0 +1,66 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.remotestore; + +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.indices.recovery.IndexPrimaryRelocationIT; +import org.opensearch.indices.replication.common.ReplicationType; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.nio.file.Path; + +import static org.opensearch.remotestore.RemoteStoreBaseIntegTestCase.remoteStoreClusterSettings; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST) +public class RemoteIndexPrimaryRelocationIT extends IndexPrimaryRelocationIT { + + protected static final String REPOSITORY_NAME = "test-remote-store-repo"; + + protected Path absolutePath; + + public void setup() { + absolutePath = randomRepoPath().toAbsolutePath(); + assertAcked( + clusterAdmin().preparePutRepository(REPOSITORY_NAME).setType("fs").setSettings(Settings.builder().put("location", absolutePath)) + ); + } + + protected Settings nodeSettings(int nodeOrdinal) { + return Settings.builder() + .put(super.nodeSettings(nodeOrdinal)) + .put(remoteStoreClusterSettings(REPOSITORY_NAME, REPOSITORY_NAME, false)) + .build(); + } + + @Override + protected boolean addMockInternalEngine() { + return false; + } + + public Settings indexSettings() { + return Settings.builder() + .put(super.indexSettings()) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .build(); + } + + @Override + protected Settings featureFlagSettings() { + return Settings.builder() + .put(super.featureFlagSettings()) + .put(FeatureFlags.REMOTE_STORE, "true") + .put(FeatureFlags.SEGMENT_REPLICATION_EXPERIMENTAL, "true") + .build(); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java index 693c4113f8f3b..51ae37c3b4faa 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreIT.java @@ -40,6 +40,9 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.oneOf; import static org.hamcrest.Matchers.comparesEqualTo; +import static org.hamcrest.Matchers.comparesEqualTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.oneOf; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/ReplicaToPrimaryPromotionIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/ReplicaToPrimaryPromotionIT.java index 549b4985894a7..c73e7f603b09b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/ReplicaToPrimaryPromotionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/ReplicaToPrimaryPromotionIT.java @@ -11,17 +11,22 @@ import com.carrotsearch.randomizedtesting.RandomizedTest; import org.junit.Before; import org.opensearch.action.admin.indices.close.CloseIndexResponse; +import org.opensearch.action.index.IndexResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.common.settings.Settings; +import org.opensearch.core.rest.RestStatus; import org.opensearch.test.BackgroundIndexer; import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import java.util.Locale; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -116,4 +121,63 @@ public void testPromoteReplicaToPrimary() throws Exception { refresh(indexName); assertHitCount(client().prepareSearch(indexName).setSize(0).get(), numOfDocs); } + + public void testFailoverWhileIndexing() throws Exception { + internalCluster().startNode(); + internalCluster().startNode(); + final String indexName = randomAlphaOfLength(5).toLowerCase(Locale.ROOT); + shard_count = scaledRandomIntBetween(1, 5); + createIndex(indexName); + ensureGreen(indexName); + int docCount = scaledRandomIntBetween(20, 50); + final int indexDocAfterFailover = scaledRandomIntBetween(20, 50); + AtomicInteger numAutoGenDocs = new AtomicInteger(); + CountDownLatch latch = new CountDownLatch(1); + final AtomicBoolean finished = new AtomicBoolean(false); + Thread indexingThread = new Thread(() -> { + int docsAfterFailover = 0; + while (finished.get() == false && numAutoGenDocs.get() < docCount) { + IndexResponse indexResponse = internalCluster().clusterManagerClient() + .prepareIndex(indexName) + .setSource("field", numAutoGenDocs.get()) + .get(); + + if (indexResponse.status() == RestStatus.CREATED || indexResponse.status() == RestStatus.ACCEPTED) { + numAutoGenDocs.incrementAndGet(); + if (numAutoGenDocs.get() == docCount / 2) { + if (random().nextInt(3) == 0) { + refresh(indexName); + } else if (random().nextInt(2) == 0) { + flush(indexName); + } + // Node is killed on this + latch.countDown(); + } else if (numAutoGenDocs.get() > docCount / 2) { + docsAfterFailover++; + if (docsAfterFailover == indexDocAfterFailover) { + finished.set(true); + } + } + } + } + logger.debug("Done indexing"); + }); + indexingThread.start(); + latch.await(); + + ClusterState state = client(internalCluster().getClusterManagerName()).admin().cluster().prepareState().get().getState(); + final int numShards = state.metadata().index(indexName).getNumberOfShards(); + final ShardRouting primaryShard = state.routingTable().index(indexName).shard(randomIntBetween(0, numShards - 1)).primaryShard(); + final DiscoveryNode randomNode = state.nodes().resolveNode(primaryShard.currentNodeId()); + + // stop the random data node, all remaining shards are promoted to primaries + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(randomNode.getName())); + ensureYellowAndNoInitializingShards(indexName); + indexingThread.join(); + refresh(indexName); + assertHitCount( + client(internalCluster().getClusterManagerName()).prepareSearch(indexName).setSize(0).setTrackTotalHits(true).get(), + numAutoGenDocs.get() + ); + } } diff --git a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java index 77d63dfaade54..028298f662e7b 100644 --- a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java @@ -710,6 +710,7 @@ private OpVsLuceneDocStatus compareOpToLuceneDocBasedOnSeqNo(final Operation op) final OpVsLuceneDocStatus status; VersionValue versionValue = getVersionFromMap(op.uid().bytes()); assert incrementVersionLookup(); + boolean segRepEnabled = engineConfig.getIndexSettings().isSegRepEnabled(); if (versionValue != null) { status = compareOpToVersionMapOnSeqNo(op.id(), op.seqNo(), op.primaryTerm(), versionValue); } else { @@ -722,10 +723,8 @@ private OpVsLuceneDocStatus compareOpToLuceneDocBasedOnSeqNo(final Operation op) } else if (op.seqNo() > docAndSeqNo.seqNo) { status = OpVsLuceneDocStatus.OP_NEWER; } else if (op.seqNo() == docAndSeqNo.seqNo) { - assert localCheckpointTracker.hasProcessed(op.seqNo()) : "local checkpoint tracker is not updated seq_no=" - + op.seqNo() - + " id=" - + op.id(); + assert localCheckpointTracker.hasProcessed(op.seqNo()) || segRepEnabled + : "local checkpoint tracker is not updated seq_no=" + op.seqNo() + " id=" + op.id(); status = OpVsLuceneDocStatus.OP_STALE_OR_EQUAL; } else { status = OpVsLuceneDocStatus.OP_STALE_OR_EQUAL; @@ -927,6 +926,7 @@ public IndexResult index(Index index) throws IOException { plan.currentNotFoundOrDeleted ); } + } if (index.origin().isFromTranslog() == false) { final Translog.Location location; @@ -1005,10 +1005,18 @@ protected final IndexingStrategy planIndexingAsNonPrimary(Index index) throws IO assert maxSeqNoOfUpdatesOrDeletes < index.seqNo() : index.seqNo() + ">=" + maxSeqNoOfUpdatesOrDeletes; plan = IndexingStrategy.optimizedAppendOnly(index.version(), 0); } else { + boolean segRepEnabled = engineConfig.getIndexSettings().isSegRepEnabled(); versionMap.enforceSafeAccess(); final OpVsLuceneDocStatus opVsLucene = compareOpToLuceneDocBasedOnSeqNo(index); if (opVsLucene == OpVsLuceneDocStatus.OP_STALE_OR_EQUAL) { - plan = IndexingStrategy.processAsStaleOp(index.version()); + if (segRepEnabled) { + // For segrep based indices, we can't completely rely on localCheckpointTracker + // as the preserved checkpoint may not have all the operations present in lucene + // we don't need to index it again as stale op as it would create multiple documents for same seq no + plan = IndexingStrategy.processButSkipLucene(false, index.version()); + } else { + plan = IndexingStrategy.processAsStaleOp(index.version()); + } } else { plan = IndexingStrategy.processNormally(opVsLucene == OpVsLuceneDocStatus.LUCENE_DOC_NOT_FOUND, index.version(), 0); } @@ -1442,9 +1450,17 @@ protected final DeletionStrategy planDeletionAsNonPrimary(Delete delete) throws // See testRecoveryWithOutOfOrderDelete for an example of peer recovery plan = DeletionStrategy.processButSkipLucene(false, delete.version()); } else { + boolean segRepEnabled = engineConfig.getIndexSettings().isSegRepEnabled(); final OpVsLuceneDocStatus opVsLucene = compareOpToLuceneDocBasedOnSeqNo(delete); if (opVsLucene == OpVsLuceneDocStatus.OP_STALE_OR_EQUAL) { - plan = DeletionStrategy.processAsStaleOp(delete.version()); + if (segRepEnabled) { + // For segrep based indices, we can't completely rely on localCheckpointTracker + // as the preserved checkpoint may not have all the operations present in lucene + // we don't need to index it again as stale op as it would create multiple documents for same seq no + plan = DeletionStrategy.processButSkipLucene(false, delete.version()); + } else { + plan = DeletionStrategy.processAsStaleOp(delete.version()); + } } else { plan = DeletionStrategy.processNormally(opVsLucene == OpVsLuceneDocStatus.LUCENE_DOC_NOT_FOUND, delete.version(), 0); } diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index 96fa53fbf0fc2..f1ae4b16d9bad 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -36,8 +36,8 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexableField; -import org.apache.lucene.index.Term; import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.index.Term; import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; @@ -50,8 +50,6 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Constants; import org.junit.Assert; -import org.opensearch.common.io.PathUtils; -import org.opensearch.core.Assertions; import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.action.ActionListener; @@ -77,11 +75,12 @@ import org.opensearch.common.Randomness; import org.opensearch.common.Strings; import org.opensearch.common.UUIDs; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.collect.Tuple; import org.opensearch.common.concurrent.GatedCloseable; +import org.opensearch.common.io.PathUtils; import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.settings.IndexScopedSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; @@ -90,8 +89,9 @@ import org.opensearch.common.util.concurrent.ConcurrentCollections; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.lease.Releasable; -import org.opensearch.common.lease.Releasables; +import org.opensearch.core.Assertions; +import org.opensearch.core.common.bytes.BytesArray; +import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.index.shard.ShardId; import org.opensearch.core.xcontent.NamedXContentRegistry; import org.opensearch.core.xcontent.XContentBuilder; @@ -106,8 +106,8 @@ import org.opensearch.index.engine.EngineTestCase; import org.opensearch.index.engine.InternalEngine; import org.opensearch.index.engine.InternalEngineFactory; -import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.engine.NRTReplicationEngine; +import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.engine.ReadOnlyEngine; import org.opensearch.index.fielddata.FieldDataStats; import org.opensearch.index.fielddata.IndexFieldData; @@ -168,6 +168,7 @@ import java.nio.file.attribute.BasicFileAttributes; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.HashSet; import java.util.List; @@ -192,7 +193,7 @@ import java.util.stream.Collectors; import java.util.stream.IntStream; import java.util.stream.Stream; -import java.util.Collection; + import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static org.hamcrest.Matchers.containsInAnyOrder; @@ -217,8 +218,8 @@ import static org.mockito.Mockito.mock; import static org.opensearch.cluster.routing.TestShardRouting.newShardRouting; import static org.opensearch.common.lucene.Lucene.cleanLuceneIndex; -import static org.opensearch.core.xcontent.ToXContent.EMPTY_PARAMS; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.opensearch.core.xcontent.ToXContent.EMPTY_PARAMS; import static org.opensearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; import static org.opensearch.test.hamcrest.RegexMatcher.matches; @@ -2886,13 +2887,14 @@ public void testCommitLevelRestoreShardFromRemoteStore() throws IOException { } public void testRestoreShardFromRemoteStore(boolean performFlush) throws IOException { + String remoteStorePath = createTempDir().toString(); IndexShard target = newStartedShard( true, Settings.builder() .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) .put(IndexMetadata.SETTING_REMOTE_STORE_ENABLED, true) - .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, "temp-fs") - .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, "temp-fs") + .put(IndexMetadata.SETTING_REMOTE_SEGMENT_STORE_REPOSITORY, remoteStorePath + "__test") + .put(IndexMetadata.SETTING_REMOTE_TRANSLOG_STORE_REPOSITORY, remoteStorePath + "__test") .build(), new InternalEngineFactory() ); @@ -2957,7 +2959,6 @@ public void testRestoreShardFromRemoteStore(boolean performFlush) throws IOExcep final PlainActionFuture future = PlainActionFuture.newFuture(); target.restoreFromRemoteStore(future); target.remoteStore().decRef(); - assertTrue(future.actionGet()); assertDocs(target, "1", "2"); diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationWithRemoteIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java similarity index 78% rename from server/src/test/java/org/opensearch/index/shard/SegmentReplicationWithRemoteIndexShardTests.java rename to server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java index b15d8b66fca55..a01169480de0b 100644 --- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationWithRemoteIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/RemoteIndexShardTests.java @@ -19,10 +19,12 @@ import java.io.IOException; import java.util.List; +import java.util.concurrent.CountDownLatch; import static org.hamcrest.Matchers.equalTo; +import static org.opensearch.index.engine.EngineTestCase.assertAtMostOneLuceneDocumentPerSequenceNumber; -public class SegmentReplicationWithRemoteIndexShardTests extends SegmentReplicationIndexShardTests { +public class RemoteIndexShardTests extends SegmentReplicationIndexShardTests { private static final String REPOSITORY_NAME = "temp-fs"; private static final Settings settings = Settings.builder() @@ -135,4 +137,39 @@ public void testNRTReplicaWithRemoteStorePromotedAsPrimary(boolean performFlushF } } } + + public void testNoDuplicateSeqNo() throws Exception { + Settings settings = Settings.builder().put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT).build(); + ReplicationGroup shards = createGroup(1, settings, indexMapping, new NRTReplicationEngineFactory(), createTempDir()); + final IndexShard primaryShard = shards.getPrimary(); + final IndexShard replicaShard = shards.getReplicas().get(0); + shards.startPrimary(); + shards.startAll(); + shards.indexDocs(10); + replicateSegments(primaryShard, shards.getReplicas()); + + flushShard(primaryShard); + shards.indexDocs(10); + replicateSegments(primaryShard, shards.getReplicas()); + + shards.indexDocs(10); + primaryShard.refresh("test"); + replicateSegments(primaryShard, shards.getReplicas()); + + CountDownLatch latch = new CountDownLatch(1); + shards.promoteReplicaToPrimary(replicaShard, (shard, listener) -> { + try { + assertAtMostOneLuceneDocumentPerSequenceNumber(replicaShard.getEngine()); + } catch (IOException e) { + throw new RuntimeException(e); + } + latch.countDown(); + }); + latch.await(); + for (IndexShard shard : shards) { + if (shard != null) { + closeShard(shard, false); + } + } + } } diff --git a/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java b/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java index 20b3dfc0f93a6..d71a290c9619a 100644 --- a/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java +++ b/server/src/test/java/org/opensearch/index/shard/ReplicaRecoveryWithRemoteTranslogOnPrimaryTests.java @@ -23,7 +23,6 @@ import org.opensearch.index.mapper.MapperService; import org.opensearch.index.replication.OpenSearchIndexLevelReplicationTestCase; import org.opensearch.index.seqno.SequenceNumbers; -import org.opensearch.index.store.Store; import org.opensearch.index.translog.WriteOnlyTranslogManager; import org.opensearch.indices.recovery.RecoveryTarget; import org.opensearch.indices.replication.common.ReplicationType; @@ -76,15 +75,6 @@ public void testStartSequenceForReplicaRecovery() throws Exception { int moreDocs = shards.indexDocs(randomIntBetween(20, 100)); shards.flush(); - - final ShardRouting replicaRouting2 = newShardRouting( - replicaRouting.shardId(), - replicaRouting.currentNodeId(), - false, - ShardRoutingState.INITIALIZING, - RecoverySource.PeerRecoverySource.INSTANCE - ); - Store remoteStore = createRemoteStore(remoteDir, replicaRouting2, newIndexMetadata); IndexShard newReplicaShard = newShard( newShardRouting( replicaRouting.shardId(), @@ -102,7 +92,7 @@ public void testStartSequenceForReplicaRecovery() throws Exception { replica.getGlobalCheckpointSyncer(), replica.getRetentionLeaseSyncer(), EMPTY_EVENT_LISTENER, - remoteStore + remoteDir ); shards.addReplica(newReplicaShard); AtomicBoolean assertDone = new AtomicBoolean(false); diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java index 12b7341349442..9c02f430e4e6d 100644 --- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java @@ -23,12 +23,12 @@ import org.opensearch.cluster.routing.ShardRoutingHelper; import org.opensearch.common.collect.Tuple; import org.opensearch.common.concurrent.GatedCloseable; +import org.opensearch.common.lease.Releasable; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.CancellableThreads; import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.lease.Releasable; import org.opensearch.index.IndexSettings; import org.opensearch.index.engine.Engine; import org.opensearch.index.engine.InternalEngineFactory; @@ -50,8 +50,8 @@ import org.opensearch.indices.replication.SegmentReplicationState; import org.opensearch.indices.replication.SegmentReplicationTarget; import org.opensearch.indices.replication.SegmentReplicationTargetService; -import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; +import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.common.CopyState; import org.opensearch.indices.replication.common.ReplicationFailedException; import org.opensearch.indices.replication.common.ReplicationListener; @@ -82,6 +82,7 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import static org.opensearch.index.engine.EngineTestCase.assertAtMostOneLuceneDocumentPerSequenceNumber; public class SegmentReplicationIndexShardTests extends OpenSearchIndexLevelReplicationTestCase { @@ -673,6 +674,41 @@ protected SegmentReplicationTargetService newTargetService(SegmentReplicationSou ); } + public void testNoDuplicateSeqNo() throws Exception { + Settings settings = Settings.builder().put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT).build(); + ReplicationGroup shards = createGroup(1, settings, indexMapping, new NRTReplicationEngineFactory(), createTempDir()); + final IndexShard primaryShard = shards.getPrimary(); + final IndexShard replicaShard = shards.getReplicas().get(0); + shards.startPrimary(); + shards.startAll(); + shards.indexDocs(10); + replicateSegments(primaryShard, shards.getReplicas()); + + flushShard(primaryShard); + shards.indexDocs(10); + replicateSegments(primaryShard, shards.getReplicas()); + + shards.indexDocs(10); + primaryShard.refresh("test"); + replicateSegments(primaryShard, shards.getReplicas()); + + CountDownLatch latch = new CountDownLatch(1); + shards.promoteReplicaToPrimary(replicaShard, (shard, listener) -> { + try { + assertAtMostOneLuceneDocumentPerSequenceNumber(replicaShard.getEngine()); + } catch (IOException e) { + throw new RuntimeException(e); + } + latch.countDown(); + }); + latch.await(); + for (IndexShard shard : shards) { + if (shard != null) { + closeShard(shard, false); + } + } + } + /** * Assert persisted and searchable doc counts. This method should not be used while docs are concurrently indexed because * it asserts point in time seqNos are relative to the doc counts. diff --git a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java index 3d622d6bdf8b8..99937faa18584 100644 --- a/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java +++ b/server/src/test/java/org/opensearch/index/translog/transfer/TranslogTransferManagerTests.java @@ -16,8 +16,8 @@ import org.opensearch.common.blobstore.BlobMetadata; import org.opensearch.common.blobstore.BlobPath; import org.opensearch.common.blobstore.BlobStore; -import org.opensearch.common.blobstore.support.PlainBlobMetadata; import org.opensearch.common.blobstore.stream.write.WritePriority; +import org.opensearch.common.blobstore.support.PlainBlobMetadata; import org.opensearch.core.index.Index; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.translog.Translog; @@ -44,7 +44,6 @@ import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyMap; import static org.mockito.ArgumentMatchers.anySet; -import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doNothing; @@ -432,7 +431,7 @@ public void testDeleteStaleTranslogMetadata() { .listAllInSortedOrderAsync( eq(ThreadPool.Names.REMOTE_PURGE), any(BlobPath.class), - anyString(), + eq(TranslogTransferMetadata.METADATA_PREFIX), anyInt(), any(ActionListener.class) ); diff --git a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java index 278847e56e65f..e8c5203129374 100644 --- a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java @@ -75,12 +75,12 @@ import org.opensearch.common.collect.Iterators; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentType; -import org.opensearch.common.lease.Releasable; -import org.opensearch.common.lease.Releasables; import org.opensearch.core.index.Index; import org.opensearch.index.IndexSettings; import org.opensearch.index.engine.DocIdSeqNoAndSource; @@ -98,7 +98,6 @@ import org.opensearch.index.shard.PrimaryReplicaSyncer; import org.opensearch.core.index.shard.ShardId; import org.opensearch.index.shard.ShardPath; -import org.opensearch.index.store.Store; import org.opensearch.index.translog.Translog; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.recovery.RecoveryTarget; @@ -253,10 +252,6 @@ protected ReplicationGroup(final IndexMetadata indexMetadata) throws IOException protected ReplicationGroup(final IndexMetadata indexMetadata, Path remotePath) throws IOException { final ShardRouting primaryRouting = this.createShardRouting("s0", true); - Store remoteStore = null; - if (remotePath != null) { - remoteStore = createRemoteStore(remotePath, primaryRouting, indexMetadata); - } primary = newShard( primaryRouting, indexMetadata, @@ -264,7 +259,7 @@ protected ReplicationGroup(final IndexMetadata indexMetadata, Path remotePath) t getEngineFactory(primaryRouting), () -> {}, retentionLeaseSyncer, - remoteStore + remotePath ); replicas = new CopyOnWriteArrayList<>(); this.indexMetadata = indexMetadata; @@ -390,10 +385,6 @@ public IndexShard addReplica() throws IOException { public IndexShard addReplica(Path remotePath) throws IOException { final ShardRouting replicaRouting = createShardRouting("s" + replicaId.incrementAndGet(), false); - Store remoteStore = null; - if (remotePath != null) { - remoteStore = createRemoteStore(remotePath, replicaRouting, indexMetadata); - } final IndexShard replica = newShard( replicaRouting, indexMetadata, @@ -401,7 +392,7 @@ public IndexShard addReplica(Path remotePath) throws IOException { getEngineFactory(replicaRouting), () -> {}, retentionLeaseSyncer, - remoteStore + remotePath ); addReplica(replica); return replica; diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 66e5459cfea3b..29ecc6b376ad0 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -48,6 +48,7 @@ import org.opensearch.action.support.replication.TransportReplicationAction; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.MappingMetadata; +import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; import org.opensearch.cluster.routing.IndexShardRoutingTable; @@ -66,7 +67,6 @@ import org.opensearch.common.blobstore.BlobStore; import org.opensearch.common.blobstore.fs.FsBlobContainer; import org.opensearch.common.blobstore.fs.FsBlobStore; -import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.io.PathUtils; import org.opensearch.common.lease.Releasable; @@ -77,9 +77,12 @@ import org.opensearch.common.util.BigArrays; import org.opensearch.common.util.io.IOUtils; import org.opensearch.common.xcontent.XContentType; -import org.opensearch.core.index.shard.ShardId; +import org.opensearch.core.common.bytes.BytesArray; import org.opensearch.core.index.Index; +import org.opensearch.core.index.shard.ShardId; +import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; +import org.opensearch.env.TestEnvironment; import org.opensearch.index.IndexSettings; import org.opensearch.index.MapperTestUtils; import org.opensearch.index.VersionType; @@ -142,7 +145,9 @@ import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.repositories.blobstore.BlobStoreTestUtil; import org.opensearch.repositories.blobstore.OpenSearchBlobStoreRepositoryIntegTestCase; +import org.opensearch.repositories.fs.FsRepository; import org.opensearch.snapshots.Snapshot; import org.opensearch.test.DummyShardLock; import org.opensearch.test.OpenSearchTestCase; @@ -460,7 +465,7 @@ protected IndexShard newShard( @Nullable EngineFactory engineFactory, Runnable globalCheckpointSyncer, RetentionLeaseSyncer retentionLeaseSyncer, - Store remoteStore, + Path path, IndexingOperationListener... listeners ) throws IOException { // add node id as name to settings for proper logging @@ -478,7 +483,7 @@ protected IndexShard newShard( globalCheckpointSyncer, retentionLeaseSyncer, EMPTY_EVENT_LISTENER, - remoteStore, + path, listeners ); } @@ -506,7 +511,7 @@ protected IndexShard newShard( Runnable globalCheckpointSyncer, RetentionLeaseSyncer retentionLeaseSyncer, IndexEventListener indexEventListener, - Store remoteStore, + Path remotePath, IndexingOperationListener... listeners ) throws IOException { return newShard( @@ -521,7 +526,7 @@ protected IndexShard newShard( retentionLeaseSyncer, indexEventListener, SegmentReplicationCheckpointPublisher.EMPTY, - remoteStore, + remotePath, listeners ); } @@ -550,7 +555,7 @@ protected IndexShard newShard( RetentionLeaseSyncer retentionLeaseSyncer, IndexEventListener indexEventListener, SegmentReplicationCheckpointPublisher checkpointPublisher, - @Nullable Store remoteStore, + @Nullable Path remotePath, IndexingOperationListener... listeners ) throws IOException { final Settings nodeSettings = Settings.builder().put("node.name", routing.currentNodeId()).build(); @@ -578,26 +583,32 @@ protected IndexShard newShard( Collections.emptyList(), clusterSettings ); - + Store remoteStore = null; RemoteRefreshSegmentPressureService remoteRefreshSegmentPressureService = null; + RepositoriesService mockRepoSvc = mock(RepositoriesService.class); + if (indexSettings.isRemoteStoreEnabled()) { - if (remoteStore == null) { - Path remoteStorePath; - String remoteStoreRepository = indexSettings.getRemoteStoreRepository(); - if (remoteStoreRepository != null && remoteStoreRepository.endsWith("__test")) { - remoteStorePath = PathUtils.get(remoteStoreRepository.replace("__test", "")); - } else { - remoteStorePath = createTempDir(); - } - remoteStore = createRemoteStore(remoteStorePath, routing, indexMetadata); + String remoteStoreRepository = indexSettings.getRemoteStoreRepository(); + // remote path via setting a repository . This is a hack used for shards are created using reset . + // since we can't get remote path from IndexShard directly, we are using repository to store it . + if (remoteStoreRepository != null && remoteStoreRepository.endsWith("__test")) { + remotePath = PathUtils.get(remoteStoreRepository.replace("__test", "")); + } else if (remotePath == null) { + remotePath = createTempDir(); } + + remoteStore = createRemoteStore(remotePath, routing, indexMetadata); + remoteRefreshSegmentPressureService = new RemoteRefreshSegmentPressureService(clusterService, indexSettings.getSettings()); + BlobStoreRepository repo = createRepository(remotePath); + when(mockRepoSvc.repository(any())).thenAnswer(invocationOnMock -> repo); } final BiFunction translogFactorySupplier = (settings, shardRouting) -> { if (settings.isRemoteTranslogStoreEnabled() && shardRouting.primary()) { + return new RemoteBlobStoreInternalTranslogFactory( - this::createRepositoriesService, + () -> mockRepoSvc, threadPool, settings.getRemoteStoreTranslogRepository() ); @@ -643,6 +654,39 @@ protected IndexShard newShard( return indexShard; } + private BlobStoreRepository createRepository(Path path) { + Settings settings = Settings.builder().put("location", path).build(); + RepositoryMetadata repositoryMetadata = new RepositoryMetadata(randomAlphaOfLength(10), FsRepository.TYPE, settings); + final ClusterService clusterService = BlobStoreTestUtil.mockClusterService(repositoryMetadata); + final FsRepository repository = new FsRepository( + repositoryMetadata, + createEnvironment(path), + xContentRegistry(), + clusterService, + new RecoverySettings(Settings.EMPTY, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)) + ) { + @Override + protected void assertSnapshotOrGenericThread() { + // eliminate thread name check as we create repo manually + } + }; + clusterService.addStateApplier(event -> repository.updateState(event.state())); + // Apply state once to initialize repo properly like RepositoriesService would + repository.updateState(clusterService.state()); + repository.start(); + return repository; + } + + private Environment createEnvironment(Path path) { + Path home = createTempDir(); + return TestEnvironment.newEnvironment( + Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), home.toAbsolutePath()) + .put(Environment.PATH_REPO_SETTING.getKey(), path.toAbsolutePath()) + .build() + ); + } + protected RepositoriesService createRepositoriesService() { RepositoriesService repositoriesService = Mockito.mock(RepositoriesService.class); BlobStoreRepository repository = Mockito.mock(BlobStoreRepository.class); @@ -724,7 +768,7 @@ protected IndexShard reinitShard(IndexShard current, ShardRouting routing, Index current.indexSettings.getIndexMetadata(), current.engineFactory, current.engineConfigFactory, - current.remoteStore(), + null, listeners ); } @@ -743,7 +787,7 @@ protected IndexShard reinitShard( IndexMetadata indexMetadata, EngineFactory engineFactory, EngineConfigFactory engineConfigFactory, - Store remoteStore, + Path remotePath, IndexingOperationListener... listeners ) throws IOException { closeShards(current); @@ -758,7 +802,7 @@ protected IndexShard reinitShard( current.getGlobalCheckpointSyncer(), current.getRetentionLeaseSyncer(), EMPTY_EVENT_LISTENER, - remoteStore, + remotePath, listeners ); } From ff6a8851e64b09f9fd21a960ed19eb6eacf06e2f Mon Sep 17 00:00:00 2001 From: Ashish Date: Thu, 3 Aug 2023 20:02:43 +0530 Subject: [PATCH 19/24] Fix flaky test testStatsOnShardUnassigned in RemoteStoreStatsIT (#9057) --------- Signed-off-by: Ashish Singh --- .../opensearch/remotestore/RemoteStoreStatsIT.java | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java index 1c7f14701b3e7..bd546a01b0b88 100644 --- a/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/remotestore/RemoteStoreStatsIT.java @@ -9,6 +9,7 @@ package org.opensearch.remotestore; import org.junit.Before; +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.cluster.remotestore.restore.RestoreRemoteStoreRequest; import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStats; import org.opensearch.action.admin.cluster.remotestore.stats.RemoteStoreStatsRequestBuilder; @@ -22,6 +23,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.index.IndexSettings; import org.opensearch.index.remote.RemoteSegmentTransferTracker; +import org.opensearch.test.InternalTestCluster; import org.opensearch.test.OpenSearchIntegTestCase; import java.io.IOException; @@ -443,12 +445,19 @@ public void testStatsOnShardUnassigned() throws IOException { createIndex(INDEX_NAME, remoteStoreIndexSettings(2, 1)); ensureGreen(INDEX_NAME); indexDocs(); - int dataNodeCountBeforeStop = client().admin().cluster().prepareHealth().get().getNumberOfDataNodes(); - internalCluster().stopRandomDataNode(); + ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().get(); + int dataNodeCountBeforeStop = clusterHealthResponse.getNumberOfDataNodes(); + int nodeCount = clusterHealthResponse.getNumberOfNodes(); + String nodeToBeStopped = randomBoolean() ? primaryNodeName(INDEX_NAME) : replicaNodeName(INDEX_NAME); + internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodeToBeStopped)); + ensureYellowAndNoInitializingShards(INDEX_NAME); + ensureStableCluster(nodeCount - 1); RemoteStoreStatsResponse response = client().admin().cluster().prepareRemoteStoreStats(INDEX_NAME, "0").get(); int dataNodeCountAfterStop = client().admin().cluster().prepareHealth().get().getNumberOfDataNodes(); assertEquals(dataNodeCountBeforeStop, response.getTotalShards()); assertEquals(dataNodeCountAfterStop, response.getSuccessfulShards()); + // Indexing docs to ensure that the primary has started + indexSingleDoc(INDEX_NAME); } public void testStatsOnRemoteStoreRestore() throws IOException { From c3acf47b4d643c3a3ab86dc3b07fe722ac6e4982 Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Thu, 3 Aug 2023 09:47:30 -0700 Subject: [PATCH 20/24] Fix test testDropPrimaryDuringReplication and clean up ReplicationCheckpoint validation (#8889) * Fix test testDropPrimaryDuringReplication and clean up ReplicationCheckpoint validation. This test is now occasionally failing with replicas having 0 documents. This occurs in a couple of ways: 1. After dropping the old primary the new primary is not publishing a checkpoint to replicas unless it indexes docs from translog after flipping to primary mode. If there is nothing to index, it will not publish a checkpoint, but the other replica could have never sync'd with the original primary and be left out of date. - This PR fixes this by force publishing a checkpoint after the new primary flips to primary mode. 2. The replica receives a checkpoint post failover and cancels its sync with the former primary that is still active, recognizing a primary term bump. However this cancellation is async and immediately starting a new replication event could fail as its still replicating. - This PR fixes this by attempting to process the latest received checkpoint on failure, if the shard is not failed and still behind. This PR also introduces a few changes to ensure the accuracy of the ReplicationCheckpoint tracked on primary & replicas. - Ensure the checkpoint stored in SegmentReplicationTarget is the checkpoint passed from the primary and not locally computed. This ensures checks for primary term are accurate and not using a locally compued operationPrimaryTerm. - Introduces a refresh listener for both primary & replica to update the ReplicationCheckpoint and store it in replicationTracker post refresh rather than redundantly computing when accessed. - Removes unnecessary onCheckpointPublished method used to start replication timers manually. This will happen automatically on primaries once its local cp is updated. Signed-off-by: Marc Handalian * Handle NoSuchFileException when attempting to delete decref'd files. To avoid divergent logic with remote store, we always incref/decref the segmentinfos.files(true) which includes the segments_n file. Decref to 0 will attempt to delete the file from the store and its possible this _n file does not yet exist. This change will ignore if we get a noSuchFile while attempting to delete. Signed-off-by: Marc Handalian * Add more unit tests. Signed-off-by: Marc Handalian * Clean up IndexShardTests.testCheckpointReffreshListenerWithNull Signed-off-by: Marc Handalian * Remove unnecessary catch for NoSuchFileException. Signed-off-by: Marc Handalian * Add another test for non segrep. Signed-off-by: Marc Handalian * PR Feedback. Signed-off-by: Marc Handalian * re-compute replication checkpoint on primary promotion. Signed-off-by: Marc Handalian --------- Signed-off-by: Marc Handalian --- .../replication/SegmentReplicationIT.java | 10 +- .../index/engine/NRTReplicationEngine.java | 15 +++ .../opensearch/index/shard/IndexShard.java | 69 +++++++---- .../shard/RemoteStoreRefreshListener.java | 5 +- .../replication/SegmentReplicationTarget.java | 20 +++- .../SegmentReplicationTargetService.java | 107 +++++++++++------- ...SegmentReplicationCheckpointPublisher.java | 1 - .../engine/NRTReplicationEngineTests.java | 72 ++++++++++++ .../index/shard/IndexShardTests.java | 51 +-------- .../SegmentReplicationIndexShardTests.java | 79 ++++++++++++- ...licationWithNodeToNodeIndexShardTests.java | 16 ++- .../SegmentReplicationTargetServiceTests.java | 90 +++++++++------ .../SegmentReplicationTargetTests.java | 14 +-- .../recovery/ReplicationCollectionTests.java | 2 + .../index/shard/IndexShardTestCase.java | 60 +++++++++- 15 files changed, 435 insertions(+), 176 deletions(-) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java index 08186bf3f9362..72b6a0296e3bb 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java @@ -44,6 +44,8 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.allocation.command.CancelAllocationCommand; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.core.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; import org.opensearch.common.settings.Settings; @@ -60,6 +62,7 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.core.index.shard.ShardId; import org.opensearch.indices.recovery.FileChunkRequest; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.search.SearchService; import org.opensearch.search.builder.PointInTimeBuilder; @@ -983,8 +986,11 @@ public void testScrollCreatedOnReplica() throws Exception { ) ); final IndexShard replicaShard = getIndexShard(replica, INDEX_NAME); - final SegmentInfos segmentInfos = replicaShard.getLatestSegmentInfosAndCheckpoint().v1().get(); - final Collection snapshottedSegments = segmentInfos.files(false); + final Tuple, ReplicationCheckpoint> tuple = replicaShard.getLatestSegmentInfosAndCheckpoint(); + final Collection snapshottedSegments; + try (final GatedCloseable closeable = tuple.v1()) { + snapshottedSegments = closeable.get().files(false); + } // opens a scrolled query before a flush is called. // this is for testing scroll segment consistency between refresh and flush SearchResponse searchResponse = client(replica).prepareSearch() diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java index e852658d7b3ba..6b09b8d86dc6c 100644 --- a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java @@ -34,6 +34,7 @@ import java.io.Closeable; import java.io.IOException; import java.util.Arrays; +import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Objects; @@ -445,6 +446,20 @@ protected SegmentInfos getLatestSegmentInfos() { return readerManager.getSegmentInfos(); } + @Override + public synchronized GatedCloseable getSegmentInfosSnapshot() { + // get reference to latest infos + final SegmentInfos latestSegmentInfos = getLatestSegmentInfos(); + // incref all files + try { + final Collection files = latestSegmentInfos.files(false); + store.incRefFileDeleter(files); + return new GatedCloseable<>(latestSegmentInfos, () -> store.decRefFileDeleter(files)); + } catch (IOException e) { + throw new EngineException(shardId, e.getMessage(), e); + } + } + protected LocalCheckpointTracker getLocalCheckpointTracker() { return localCheckpointTracker; } diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 2b85193275a13..ace6ed56c007c 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -677,8 +677,19 @@ public void updateShardState( // this Shard's engine was read only, we need to update its engine before restoring local history from xlog. assert newRouting.primary() && currentRouting.primary() == false; resetEngineToGlobalCheckpoint(); + // It is possible an engine can open with a SegmentInfos on a higher gen but the reader does not refresh to + // trigger our refresh listener. + // Force update the checkpoint post engine reset. + updateReplicationCheckpoint(); } + replicationTracker.activatePrimaryMode(getLocalCheckpoint()); + if (indexSettings.isSegRepEnabled()) { + // force publish a checkpoint once in primary mode so that replicas not caught up to previous primary + // are brought up to date. + checkpointPublisher.publish(this, getLatestReplicationCheckpoint()); + } + ensurePeerRecoveryRetentionLeasesExist(); /* * If this shard was serving as a replica shard when another shard was promoted to primary then @@ -1551,15 +1562,7 @@ public GatedCloseable acquireSafeIndexCommit() throws EngineExcepti * @return EMPTY checkpoint before the engine is opened and null for non-segrep enabled indices */ public ReplicationCheckpoint getLatestReplicationCheckpoint() { - final Tuple, ReplicationCheckpoint> infosAndCheckpoint = getLatestSegmentInfosAndCheckpoint(); - if (infosAndCheckpoint == null) { - return null; - } - try (final GatedCloseable ignored = infosAndCheckpoint.v1()) { - return infosAndCheckpoint.v2(); - } catch (IOException e) { - throw new OpenSearchException("Error Closing SegmentInfos Snapshot", e); - } + return replicationTracker.getLatestReplicationCheckpoint(); } /** @@ -1573,13 +1576,11 @@ public ReplicationCheckpoint getLatestReplicationCheckpoint() { * */ public Tuple, ReplicationCheckpoint> getLatestSegmentInfosAndCheckpoint() { - if (indexSettings.isSegRepEnabled() == false) { - return null; - } + assert indexSettings.isSegRepEnabled(); Tuple, ReplicationCheckpoint> nullSegmentInfosEmptyCheckpoint = new Tuple<>( new GatedCloseable<>(null, () -> {}), - ReplicationCheckpoint.empty(shardId, getDefaultCodecName()) + getLatestReplicationCheckpoint() ); if (getEngineOrNull() == null) { @@ -1598,11 +1599,7 @@ public Tuple, ReplicationCheckpoint> getLatestSegme getOperationPrimaryTerm(), segmentInfos.getGeneration(), segmentInfos.getVersion(), - // TODO: Update replicas to compute length from SegmentInfos. Replicas do not yet incref segments with - // getSegmentInfosSnapshot, so computing length from SegmentInfos can cause issues. - shardRouting.primary() - ? store.getSegmentMetadataMap(segmentInfos).values().stream().mapToLong(StoreFileMetadata::length).sum() - : store.stats(StoreStats.UNKNOWN_RESERVED_BYTES).getSizeInBytes(), + store.getSegmentMetadataMap(segmentInfos).values().stream().mapToLong(StoreFileMetadata::length).sum(), getEngine().config().getCodec().getName() ) ); @@ -1858,10 +1855,6 @@ public void resetToWriteableEngine() throws IOException, InterruptedException, T indexShardOperationPermits.blockOperations(30, TimeUnit.MINUTES, () -> { resetEngineToGlobalCheckpoint(); }); } - public void onCheckpointPublished(ReplicationCheckpoint checkpoint) { - replicationTracker.setLatestReplicationCheckpoint(checkpoint); - } - /** * Wrapper for a non-closing reader * @@ -2342,6 +2335,11 @@ private void innerOpenEngineAndTranslog(LongSupplier globalCheckpointSupplier, b final Engine newEngine = engineFactory.newReadWriteEngine(config); onNewEngine(newEngine); currentEngineReference.set(newEngine); + + if (indexSettings.isSegRepEnabled()) { + // set initial replication checkpoints into tracker. + updateReplicationCheckpoint(); + } // We set active because we are now writing operations to the engine; this way, // we can flush if we go idle after some time and become inactive. active.set(true); @@ -3667,6 +3665,9 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) thro internalRefreshListener.clear(); internalRefreshListener.add(new RefreshMetricUpdater(refreshMetric)); + if (indexSettings.isSegRepEnabled()) { + internalRefreshListener.add(new ReplicationCheckpointUpdater()); + } if (this.checkpointPublisher != null && shardRouting.primary() && indexSettings.isSegRepLocalEnabled()) { internalRefreshListener.add(new CheckpointRefreshListener(this, this.checkpointPublisher)); } @@ -4471,6 +4472,30 @@ public void afterRefresh(boolean didRefresh) throws IOException { } } + /** + * Refresh listener to update the Shard's ReplicationCheckpoint post refresh. + */ + private class ReplicationCheckpointUpdater implements ReferenceManager.RefreshListener { + @Override + public void beforeRefresh() throws IOException {} + + @Override + public void afterRefresh(boolean didRefresh) throws IOException { + if (didRefresh) { + updateReplicationCheckpoint(); + } + } + } + + private void updateReplicationCheckpoint() { + final Tuple, ReplicationCheckpoint> tuple = getLatestSegmentInfosAndCheckpoint(); + try (final GatedCloseable ignored = tuple.v1()) { + replicationTracker.setLatestReplicationCheckpoint(tuple.v2()); + } catch (IOException e) { + throw new OpenSearchException("Error Closing SegmentInfos Snapshot", e); + } + } + private EngineConfig.TombstoneDocSupplier tombstoneDocSupplier() { final RootObjectMapper.Builder noopRootMapper = new RootObjectMapper.Builder("__noop"); final DocumentMapper noopDocumentMapper = mapperService != null diff --git a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java index 8dd0c8b9d4405..d56054dd1c42b 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoteStoreRefreshListener.java @@ -185,7 +185,6 @@ private synchronized boolean syncSegments() { return true; } ReplicationCheckpoint checkpoint = indexShard.getLatestReplicationCheckpoint(); - indexShard.onCheckpointPublished(checkpoint); beforeSegmentsSync(); long refreshTimeMs = segmentTracker.getLocalRefreshTimeMs(), refreshClockTimeMs = segmentTracker.getLocalRefreshClockTimeMs(); long refreshSeqNo = segmentTracker.getLocalRefreshSeqNo(); @@ -207,6 +206,10 @@ private synchronized boolean syncSegments() { try (GatedCloseable segmentInfosGatedCloseable = indexShard.getSegmentInfosSnapshot()) { SegmentInfos segmentInfos = segmentInfosGatedCloseable.get(); + assert segmentInfos.getGeneration() == checkpoint.getSegmentsGen() : "SegmentInfos generation: " + + segmentInfos.getGeneration() + + " does not match metadata generation: " + + checkpoint.getSegmentsGen(); // Capture replication checkpoint before uploading the segments as upload can take some time and checkpoint can // move. long lastRefreshedCheckpoint = ((InternalEngine) indexShard.getEngine()).lastRefreshedCheckpoint(); diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java index c22701dfc94ce..3a84163bb979d 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTarget.java @@ -51,9 +51,14 @@ public class SegmentReplicationTarget extends ReplicationTarget { public final static String REPLICATION_PREFIX = "replication."; - public SegmentReplicationTarget(IndexShard indexShard, SegmentReplicationSource source, ReplicationListener listener) { + public SegmentReplicationTarget( + IndexShard indexShard, + ReplicationCheckpoint checkpoint, + SegmentReplicationSource source, + ReplicationListener listener + ) { super("replication_target", indexShard, new ReplicationLuceneIndex(), listener); - this.checkpoint = indexShard.getLatestReplicationCheckpoint(); + this.checkpoint = checkpoint; this.source = source; this.state = new SegmentReplicationState( indexShard.routingEntry(), @@ -90,12 +95,19 @@ public SegmentReplicationState state() { } public SegmentReplicationTarget retryCopy() { - return new SegmentReplicationTarget(indexShard, source, listener); + return new SegmentReplicationTarget(indexShard, checkpoint, source, listener); } @Override public String description() { - return String.format(Locale.ROOT, "Id:[%d] Shard:[%s] Source:[%s]", getId(), shardId(), source.getDescription()); + return String.format( + Locale.ROOT, + "Id:[%d] Checkpoint [%s] Shard:[%s] Source:[%s]", + getId(), + getCheckpoint(), + shardId(), + source.getDescription() + ); } @Override diff --git a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java index b41c9e09add45..84d6a722e572e 100644 --- a/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java +++ b/server/src/main/java/org/opensearch/indices/replication/SegmentReplicationTargetService.java @@ -234,7 +234,7 @@ public synchronized void onNewCheckpoint(final ReplicationCheckpoint receivedChe logger.trace( () -> new ParameterizedMessage( "Ignoring new replication checkpoint - shard is currently replicating to checkpoint {}", - replicaShard.getLatestReplicationCheckpoint() + ongoingReplicationTarget.getCheckpoint() ) ); return; @@ -242,7 +242,7 @@ public synchronized void onNewCheckpoint(final ReplicationCheckpoint receivedChe } final Thread thread = Thread.currentThread(); if (replicaShard.shouldProcessCheckpoint(receivedCheckpoint)) { - startReplication(replicaShard, new SegmentReplicationListener() { + startReplication(replicaShard, receivedCheckpoint, new SegmentReplicationListener() { @Override public void onReplicationDone(SegmentReplicationState state) { logger.trace( @@ -280,6 +280,8 @@ public void onReplicationFailure( ); if (sendShardFailure == true) { failShard(e, replicaShard); + } else { + processLatestReceivedCheckpoint(replicaShard, thread); } } }); @@ -396,8 +398,24 @@ protected void updateLatestReceivedCheckpoint(ReplicationCheckpoint receivedChec } } - public SegmentReplicationTarget startReplication(final IndexShard indexShard, final SegmentReplicationListener listener) { - final SegmentReplicationTarget target = new SegmentReplicationTarget(indexShard, sourceFactory.get(indexShard), listener); + /** + * Start a round of replication and sync to at least the given checkpoint. + * @param indexShard - {@link IndexShard} replica shard + * @param checkpoint - {@link ReplicationCheckpoint} checkpoint to sync to + * @param listener - {@link ReplicationListener} + * @return {@link SegmentReplicationTarget} target event orchestrating the event. + */ + public SegmentReplicationTarget startReplication( + final IndexShard indexShard, + final ReplicationCheckpoint checkpoint, + final SegmentReplicationListener listener + ) { + final SegmentReplicationTarget target = new SegmentReplicationTarget( + indexShard, + checkpoint, + sourceFactory.get(indexShard), + listener + ); startReplication(target); return target; } @@ -529,50 +547,59 @@ private void forceReplication(ForceSyncRequest request, ActionListener new ParameterizedMessage( + "[shardId {}] [replication id {}] Force replication Sync complete to {}, timing data: {}", + shardId, + state.getReplicationId(), + indexShard.getLatestReplicationCheckpoint(), + state.getTimingData() + ) + ); + // Promote engine type for primary target + if (indexShard.recoveryState().getPrimary() == true) { + indexShard.resetToWriteableEngine(); + } else { + // Update the replica's checkpoint on primary's replication tracker. + updateVisibleCheckpoint(state.getReplicationId(), indexShard); + } + listener.onResponse(TransportResponse.Empty.INSTANCE); + } catch (Exception e) { + logger.error("Error while marking replication completed", e); + listener.onFailure(e); + } + } + + @Override + public void onReplicationFailure( + SegmentReplicationState state, + ReplicationFailedException e, + boolean sendShardFailure + ) { + logger.error( () -> new ParameterizedMessage( - "[shardId {}] [replication id {}] Force replication Sync complete to {}, timing data: {}", - shardId, + "[shardId {}] [replication id {}] Force replication Sync failed, timing data: {}", + indexShard.shardId().getId(), state.getReplicationId(), - indexShard.getLatestReplicationCheckpoint(), state.getTimingData() - ) + ), + e ); - // Promote engine type for primary target - if (indexShard.recoveryState().getPrimary() == true) { - indexShard.resetToWriteableEngine(); - } else { - // Update the replica's checkpoint on primary's replication tracker. - updateVisibleCheckpoint(state.getReplicationId(), indexShard); + if (sendShardFailure) { + failShard(e, indexShard); } - listener.onResponse(TransportResponse.Empty.INSTANCE); - } catch (Exception e) { - logger.error("Error while marking replication completed", e); listener.onFailure(e); } } - - @Override - public void onReplicationFailure(SegmentReplicationState state, ReplicationFailedException e, boolean sendShardFailure) { - logger.error( - () -> new ParameterizedMessage( - "[shardId {}] [replication id {}] Replication failed, timing data: {}", - indexShard.shardId().getId(), - state.getReplicationId(), - state.getTimingData() - ), - e - ); - if (sendShardFailure) { - failShard(e, indexShard); - } - listener.onFailure(e); - } - }); + ); } } diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/SegmentReplicationCheckpointPublisher.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/SegmentReplicationCheckpointPublisher.java index b4bcdc92e539a..f5cb32b741862 100644 --- a/server/src/main/java/org/opensearch/indices/replication/checkpoint/SegmentReplicationCheckpointPublisher.java +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/SegmentReplicationCheckpointPublisher.java @@ -34,7 +34,6 @@ public SegmentReplicationCheckpointPublisher(PublishAction publishAction) { public void publish(IndexShard indexShard, ReplicationCheckpoint checkpoint) { publishAction.publish(indexShard, checkpoint); - indexShard.onCheckpointPublished(checkpoint); } /** diff --git a/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java index 64fe42493c686..4c87df48f583f 100644 --- a/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java +++ b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java @@ -12,7 +12,9 @@ import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.search.ReferenceManager; +import org.apache.lucene.store.IOContext; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.UUIDs; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; @@ -28,6 +30,8 @@ import java.io.IOException; import java.nio.file.Path; +import java.util.Arrays; +import java.util.Collection; import java.util.List; import java.util.Map; import java.util.Set; @@ -367,4 +371,72 @@ private NRTReplicationEngine buildNrtReplicaEngine(AtomicLong globalCheckpoint, private NRTReplicationEngine buildNrtReplicaEngine(AtomicLong globalCheckpoint, Store store) throws IOException { return buildNrtReplicaEngine(globalCheckpoint, store, defaultSettings); } + + public void testGetSegmentInfosSnapshotPreservesFilesUntilRelease() throws Exception { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + + // TODO: Remove this divergent commit logic and copy Segments_N from primary with node-node. + // randomly toggle commit / no commit. + IndexSettings settings = REMOTE_STORE_INDEX_SETTINGS; + final boolean shouldCommit = randomBoolean(); + if (shouldCommit) { + settings = INDEX_SETTINGS; + } + try ( + final Store nrtEngineStore = createStore(REMOTE_STORE_INDEX_SETTINGS, newDirectory()); + final NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore, settings) + ) { + // only index 2 docs here, this will create segments _0 and _1 and after forcemerge into _2. + final int docCount = 2; + List operations = generateHistoryOnReplica(docCount, randomBoolean(), randomBoolean(), randomBoolean()); + for (Engine.Operation op : operations) { + applyOperation(engine, op); + applyOperation(nrtEngine, op); + // refresh to create a lot of segments. + engine.refresh("test"); + } + assertEquals(2, engine.segmentsStats(false, false).getCount()); + // wipe the nrt directory initially so we can sync with primary. + Lucene.cleanLuceneIndex(nrtEngineStore.directory()); + assertFalse( + Arrays.stream(nrtEngineStore.directory().listAll()) + .anyMatch(file -> file.equals("write.lock") == false && file.equals("extra0") == false) + ); + for (String file : engine.getLatestSegmentInfos().files(true)) { + nrtEngineStore.directory().copyFrom(store.directory(), file, file, IOContext.DEFAULT); + } + nrtEngine.updateSegments(engine.getLatestSegmentInfos()); + assertEquals(engine.getLatestSegmentInfos(), nrtEngine.getLatestSegmentInfos()); + final GatedCloseable snapshot = nrtEngine.getSegmentInfosSnapshot(); + final Collection replicaSnapshotFiles = snapshot.get().files(false); + List replicaFiles = List.of(nrtEngine.store.directory().listAll()); + + // merge primary down to 1 segment + engine.forceMerge(true, 1, false, false, false, UUIDs.randomBase64UUID()); + // we expect a 3rd segment to be created after merge. + assertEquals(3, engine.segmentsStats(false, false).getCount()); + final Collection latestPrimaryFiles = engine.getLatestSegmentInfos().files(false); + + // copy new segments in and load reader. + for (String file : latestPrimaryFiles) { + if (replicaFiles.contains(file) == false) { + nrtEngineStore.directory().copyFrom(store.directory(), file, file, IOContext.DEFAULT); + } + } + nrtEngine.updateSegments(engine.getLatestSegmentInfos()); + + replicaFiles = List.of(nrtEngine.store.directory().listAll()); + assertTrue(replicaFiles.containsAll(replicaSnapshotFiles)); + + // close snapshot, files should be cleaned up + snapshot.close(); + + replicaFiles = List.of(nrtEngine.store.directory().listAll()); + assertFalse(replicaFiles.containsAll(replicaSnapshotFiles)); + + // Ensure we still have all the active files. Note - we exclude the infos file here if we aren't committing + // the nrt reader will still reference segments_n-1 after being loaded until a local commit occurs. + assertTrue(replicaFiles.containsAll(nrtEngine.getLatestSegmentInfos().files(shouldCommit))); + } + } } diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index f1ae4b16d9bad..8ffeb5d689f55 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -3753,7 +3753,7 @@ public void testReadSnapshotConcurrently() throws IOException, InterruptedExcept */ public void testCheckpointRefreshListener() throws IOException { final SegmentReplicationCheckpointPublisher mock = mock(SegmentReplicationCheckpointPublisher.class); - IndexShard shard = newStartedShard(p -> newShard(mock), true); + IndexShard shard = newStartedShard(p -> newShard(true, mock), true); List refreshListeners = shard.getEngine().config().getInternalRefreshListener(); assertTrue(refreshListeners.stream().anyMatch(e -> e instanceof CheckpointRefreshListener)); closeShards(shard); @@ -3763,58 +3763,13 @@ public void testCheckpointRefreshListener() throws IOException { * here we are passing null in place of SegmentReplicationCheckpointPublisher and testing on index shard if CheckpointRefreshListener is not added to the InternalrefreshListerners List */ public void testCheckpointRefreshListenerWithNull() throws IOException { - IndexShard shard = newStartedShard(p -> newShard(null), true); + final SegmentReplicationCheckpointPublisher publisher = null; + IndexShard shard = newStartedShard(p -> newShard(true, publisher), true); List refreshListeners = shard.getEngine().config().getInternalRefreshListener(); assertFalse(refreshListeners.stream().anyMatch(e -> e instanceof CheckpointRefreshListener)); closeShards(shard); } - /** - * creates a new initializing shard. The shard will be put in its proper path under the - * current node id the shard is assigned to. - * @param checkpointPublisher Segment Replication Checkpoint Publisher to publish checkpoint - */ - private IndexShard newShard(SegmentReplicationCheckpointPublisher checkpointPublisher) throws IOException { - final ShardId shardId = new ShardId("index", "_na_", 0); - final ShardRouting shardRouting = TestShardRouting.newShardRouting( - shardId, - randomAlphaOfLength(10), - true, - ShardRoutingState.INITIALIZING, - RecoverySource.EmptyStoreRecoverySource.INSTANCE - ); - final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(createTempDir()); - ShardPath shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId); - - Settings indexSettings = Settings.builder() - .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_REPLICATION_TYPE, "SEGMENT") - .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), between(0, 1000)) - .put(Settings.EMPTY) - .build(); - IndexMetadata metadata = IndexMetadata.builder(shardRouting.getIndexName()) - .settings(indexSettings) - .primaryTerm(0, primaryTerm) - .putMapping("{ \"properties\": {} }") - .build(); - return newShard( - shardRouting, - shardPath, - metadata, - null, - null, - new InternalEngineFactory(), - new EngineConfigFactory(new IndexSettings(metadata, metadata.getSettings())), - () -> {}, - RetentionLeaseSyncer.EMPTY, - EMPTY_EVENT_LISTENER, - checkpointPublisher, - null - ); - } - public void testIndexCheckOnStartup() throws Exception { final IndexShard indexShard = newStartedShard(true); diff --git a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java index 9c02f430e4e6d..58ae4b404e69c 100644 --- a/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/SegmentReplicationIndexShardTests.java @@ -19,6 +19,7 @@ import org.opensearch.action.index.IndexRequest; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingHelper; import org.opensearch.common.collect.Tuple; @@ -63,6 +64,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; @@ -423,7 +425,70 @@ public void testShardIdleWithNoReplicas() throws Exception { /** * here we are starting a new primary shard in PrimaryMode and testing if the shard publishes checkpoint after refresh. */ - public void testPublishCheckpointOnPrimaryMode() throws IOException { + public void testPublishCheckpointOnPrimaryMode() throws IOException, InterruptedException { + final SegmentReplicationCheckpointPublisher mock = mock(SegmentReplicationCheckpointPublisher.class); + IndexShard shard = newStartedShard(p -> newShard(false, mock, settings), false); + + final ShardRouting shardRouting = shard.routingEntry(); + promoteReplica( + shard, + Collections.singleton(shardRouting.allocationId().getId()), + new IndexShardRoutingTable.Builder(shardRouting.shardId()).addShard(shardRouting).build() + ); + + final CountDownLatch latch = new CountDownLatch(1); + shard.acquirePrimaryOperationPermit(new ActionListener() { + @Override + public void onResponse(Releasable releasable) { + releasable.close(); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + throw new RuntimeException(e); + } + }, ThreadPool.Names.GENERIC, ""); + + latch.await(); + // verify checkpoint is published + verify(mock, times(1)).publish(any(), any()); + closeShards(shard); + } + + public void testPublishCheckpointOnPrimaryMode_segrep_off() throws IOException, InterruptedException { + final SegmentReplicationCheckpointPublisher mock = mock(SegmentReplicationCheckpointPublisher.class); + final Settings settings = Settings.builder().put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.DOCUMENT).build(); + IndexShard shard = newStartedShard(p -> newShard(false, mock, settings), false); + + final ShardRouting shardRouting = shard.routingEntry(); + promoteReplica( + shard, + Collections.singleton(shardRouting.allocationId().getId()), + new IndexShardRoutingTable.Builder(shardRouting.shardId()).addShard(shardRouting).build() + ); + + final CountDownLatch latch = new CountDownLatch(1); + shard.acquirePrimaryOperationPermit(new ActionListener() { + @Override + public void onResponse(Releasable releasable) { + releasable.close(); + latch.countDown(); + } + + @Override + public void onFailure(Exception e) { + throw new RuntimeException(e); + } + }, ThreadPool.Names.GENERIC, ""); + + latch.await(); + // verify checkpoint is published + verify(mock, times(0)).publish(any(), any()); + closeShards(shard); + } + + public void testPublishCheckpointPostFailover() throws IOException { final SegmentReplicationCheckpointPublisher mock = mock(SegmentReplicationCheckpointPublisher.class); IndexShard shard = newStartedShard(true); CheckpointRefreshListener refreshListener = new CheckpointRefreshListener(shard, mock); @@ -482,7 +547,7 @@ public void testRejectCheckpointOnShardRoutingPrimary() throws IOException { spy.onNewCheckpoint(new ReplicationCheckpoint(primaryShard.shardId(), 0L, 0L, 0L, Codec.getDefault().getName()), spyShard); // Verify that checkpoint is not processed as shard routing is primary. - verify(spy, times(0)).startReplication(any(), any()); + verify(spy, times(0)).startReplication(any(), any(), any()); closeShards(primaryShard); } @@ -656,7 +721,7 @@ public void cancel() { } }; when(sourceFactory.get(any())).thenReturn(source); - startReplicationAndAssertCancellation(replica, targetService); + startReplicationAndAssertCancellation(replica, primary, targetService); shards.removeReplica(replica); closeShards(replica); @@ -736,11 +801,15 @@ protected void resolveCheckpointInfoResponseListener(ActionListener() { @@ -422,7 +422,11 @@ public void testTemporaryFilesNotCleanup() throws Exception { runnablePostGetFiles ); when(sourceFactory.get(any())).thenReturn(segmentReplicationSource); - targetService.startReplication(replica, getTargetListener(primaryShard, replica, primaryMetadata, countDownLatch)); + targetService.startReplication( + replica, + primaryShard.getLatestReplicationCheckpoint(), + getTargetListener(primaryShard, replica, primaryMetadata, countDownLatch) + ); countDownLatch.await(30, TimeUnit.SECONDS); assertEquals("Replication failed", 0, countDownLatch.getCount()); shards.assertAllEqual(numDocs); diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java index efb8dda201e87..4a04a64196918 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetServiceTests.java @@ -84,6 +84,7 @@ public class SegmentReplicationTargetServiceTests extends IndexShardTestCase { private IndicesService indicesService; private SegmentReplicationState state; + private ReplicationCheckpoint initialCheckpoint; private static final long TRANSPORT_TIMEOUT = 30000;// 30sec @@ -130,7 +131,7 @@ public void setUp() throws Exception { when(clusterState.nodes()).thenReturn(DiscoveryNodes.builder().add(localNode).build()); sut = prepareForReplication(primaryShard, replicaShard, transportService, indicesService, clusterService); - ReplicationCheckpoint initialCheckpoint = replicaShard.getLatestReplicationCheckpoint(); + initialCheckpoint = primaryShard.getLatestReplicationCheckpoint(); aheadCheckpoint = new ReplicationCheckpoint( initialCheckpoint.getShardId(), initialCheckpoint.getPrimaryTerm(), @@ -165,19 +166,23 @@ public void tearDown() throws Exception { public void testsSuccessfulReplication_listenerCompletes() throws InterruptedException { CountDownLatch latch = new CountDownLatch(1); - sut.startReplication(replicaShard, new SegmentReplicationTargetService.SegmentReplicationListener() { - @Override - public void onReplicationDone(SegmentReplicationState state) { - assertEquals(SegmentReplicationState.Stage.DONE, state.getStage()); - latch.countDown(); - } + sut.startReplication( + replicaShard, + primaryShard.getLatestReplicationCheckpoint(), + new SegmentReplicationTargetService.SegmentReplicationListener() { + @Override + public void onReplicationDone(SegmentReplicationState state) { + assertEquals(SegmentReplicationState.Stage.DONE, state.getStage()); + latch.countDown(); + } - @Override - public void onReplicationFailure(SegmentReplicationState state, ReplicationFailedException e, boolean sendShardFailure) { - logger.error("Unexpected error", e); - Assert.fail("Test should succeed"); + @Override + public void onReplicationFailure(SegmentReplicationState state, ReplicationFailedException e, boolean sendShardFailure) { + logger.error("Unexpected error", e); + Assert.fail("Test should succeed"); + } } - }); + ); latch.await(2, TimeUnit.SECONDS); assertEquals(0, latch.getCount()); } @@ -209,6 +214,7 @@ public void getSegmentFiles( }; final SegmentReplicationTarget target = new SegmentReplicationTarget( replicaShard, + primaryShard.getLatestReplicationCheckpoint(), source, new SegmentReplicationTargetService.SegmentReplicationListener() { @Override @@ -233,7 +239,7 @@ public void onReplicationFailure(SegmentReplicationState state, ReplicationFaile public void testAlreadyOnNewCheckpoint() { SegmentReplicationTargetService spy = spy(sut); spy.onNewCheckpoint(replicaShard.getLatestReplicationCheckpoint(), replicaShard); - verify(spy, times(0)).startReplication(any(), any()); + verify(spy, times(0)).startReplication(any(), any(), any()); } public void testShardAlreadyReplicating() { @@ -271,24 +277,22 @@ public void getSegmentFiles( } }; final SegmentReplicationTarget target = spy( - new SegmentReplicationTarget(replicaShard, source, mock(SegmentReplicationTargetService.SegmentReplicationListener.class)) + new SegmentReplicationTarget( + replicaShard, + primaryShard.getLatestReplicationCheckpoint(), + source, + mock(SegmentReplicationTargetService.SegmentReplicationListener.class) + ) ); + + final SegmentReplicationTargetService spy = spy(sut); + doReturn(false).when(spy).processLatestReceivedCheckpoint(eq(replicaShard), any()); // Start first round of segment replication. - sut.startReplication(target); + spy.startReplication(target); // Start second round of segment replication, this should fail to start as first round is still in-progress - sut.startReplication(replicaShard, new SegmentReplicationTargetService.SegmentReplicationListener() { - @Override - public void onReplicationDone(SegmentReplicationState state) { - Assert.fail("Should not succeed"); - } - - @Override - public void onReplicationFailure(SegmentReplicationState state, ReplicationFailedException e, boolean sendShardFailure) { - assertEquals("Shard " + replicaShard.shardId() + " is already replicating", e.getMessage()); - assertFalse(sendShardFailure); - } - }); + spy.onNewCheckpoint(newPrimaryCheckpoint, replicaShard); + verify(spy, times(1)).processLatestReceivedCheckpoint(eq(replicaShard), any()); blockGetCheckpointMetadata.countDown(); } @@ -337,8 +341,21 @@ public void cancel() { } }; + final ReplicationCheckpoint updatedCheckpoint = new ReplicationCheckpoint( + initialCheckpoint.getShardId(), + initialCheckpoint.getPrimaryTerm(), + initialCheckpoint.getSegmentsGen(), + initialCheckpoint.getSegmentInfosVersion() + 1, + primaryShard.getDefaultCodecName() + ); + final SegmentReplicationTarget targetSpy = spy( - new SegmentReplicationTarget(replicaShard, source, mock(SegmentReplicationTargetService.SegmentReplicationListener.class)) + new SegmentReplicationTarget( + replicaShard, + updatedCheckpoint, + source, + mock(SegmentReplicationTargetService.SegmentReplicationListener.class) + ) ); // start replication. This adds the target to on-ongoing replication collection @@ -352,20 +369,20 @@ public void cancel() { // ensure the old target is cancelled. and new iteration kicks off. verify(targetSpy, times(1)).cancel("Cancelling stuck target after new primary"); - verify(serviceSpy, times(1)).startReplication(eq(replicaShard), any()); + verify(serviceSpy, times(1)).startReplication(eq(replicaShard), any(), any()); } public void testNewCheckpointBehindCurrentCheckpoint() { SegmentReplicationTargetService spy = spy(sut); spy.onNewCheckpoint(checkpoint, replicaShard); - verify(spy, times(0)).startReplication(any(), any()); + verify(spy, times(0)).startReplication(any(), any(), any()); } public void testShardNotStarted() throws IOException { SegmentReplicationTargetService spy = spy(sut); IndexShard shard = newShard(false); spy.onNewCheckpoint(checkpoint, shard); - verify(spy, times(0)).startReplication(any(), any()); + verify(spy, times(0)).startReplication(any(), any(), any()); closeShards(shard); } @@ -381,7 +398,7 @@ public void testRejectCheckpointOnShardPrimaryMode() throws IOException { spy.onNewCheckpoint(aheadCheckpoint, spyShard); // Verify that checkpoint is not processed as shard is in PrimaryMode. - verify(spy, times(0)).startReplication(any(), any()); + verify(spy, times(0)).startReplication(any(), any(), any()); closeShards(primaryShard); } @@ -406,10 +423,10 @@ public void testStartReplicationListenerSuccess() throws InterruptedException { SegmentReplicationTargetService spy = spy(sut); CountDownLatch latch = new CountDownLatch(1); doAnswer(i -> { - ((SegmentReplicationTargetService.SegmentReplicationListener) i.getArgument(1)).onReplicationDone(state); + ((SegmentReplicationTargetService.SegmentReplicationListener) i.getArgument(2)).onReplicationDone(state); latch.countDown(); return null; - }).when(spy).startReplication(any(), any()); + }).when(spy).startReplication(any(), any(), any()); doNothing().when(spy).updateVisibleCheckpoint(eq(0L), any()); spy.afterIndexShardStarted(replicaShard); @@ -422,14 +439,14 @@ public void testStartReplicationListenerFailure() throws InterruptedException { SegmentReplicationTargetService spy = spy(sut); CountDownLatch latch = new CountDownLatch(1); doAnswer(i -> { - ((SegmentReplicationTargetService.SegmentReplicationListener) i.getArgument(1)).onReplicationFailure( + ((SegmentReplicationTargetService.SegmentReplicationListener) i.getArgument(2)).onReplicationFailure( state, new ReplicationFailedException(replicaShard, null), false ); latch.countDown(); return null; - }).when(spy).startReplication(any(), any()); + }).when(spy).startReplication(any(), any(), any()); doNothing().when(spy).updateVisibleCheckpoint(eq(0L), any()); spy.afterIndexShardStarted(replicaShard); @@ -570,6 +587,7 @@ public void testForceSegmentSyncHandlerWithFailure_AlreadyClosedException_swallo public void testTargetCancelledBeforeStartInvoked() { final SegmentReplicationTarget target = new SegmentReplicationTarget( replicaShard, + primaryShard.getLatestReplicationCheckpoint(), mock(SegmentReplicationSource.class), new SegmentReplicationTargetService.SegmentReplicationListener() { @Override diff --git a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java index 176954b6d6b3d..5b996fd774baf 100644 --- a/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java +++ b/server/src/test/java/org/opensearch/indices/replication/SegmentReplicationTargetTests.java @@ -141,7 +141,7 @@ public void getSegmentFiles( SegmentReplicationTargetService.SegmentReplicationListener segRepListener = mock( SegmentReplicationTargetService.SegmentReplicationListener.class ); - segrepTarget = new SegmentReplicationTarget(spyIndexShard, segrepSource, segRepListener); + segrepTarget = new SegmentReplicationTarget(spyIndexShard, repCheckpoint, segrepSource, segRepListener); segrepTarget.startReplication(new ActionListener() { @Override @@ -189,7 +189,7 @@ public void getSegmentFiles( SegmentReplicationTargetService.SegmentReplicationListener segRepListener = mock( SegmentReplicationTargetService.SegmentReplicationListener.class ); - segrepTarget = new SegmentReplicationTarget(spyIndexShard, segrepSource, segRepListener); + segrepTarget = new SegmentReplicationTarget(spyIndexShard, repCheckpoint, segrepSource, segRepListener); segrepTarget.startReplication(new ActionListener() { @Override @@ -232,7 +232,7 @@ public void getSegmentFiles( SegmentReplicationTargetService.SegmentReplicationListener segRepListener = mock( SegmentReplicationTargetService.SegmentReplicationListener.class ); - segrepTarget = new SegmentReplicationTarget(spyIndexShard, segrepSource, segRepListener); + segrepTarget = new SegmentReplicationTarget(spyIndexShard, repCheckpoint, segrepSource, segRepListener); segrepTarget.startReplication(new ActionListener() { @Override @@ -275,7 +275,7 @@ public void getSegmentFiles( SegmentReplicationTargetService.SegmentReplicationListener segRepListener = mock( SegmentReplicationTargetService.SegmentReplicationListener.class ); - segrepTarget = new SegmentReplicationTarget(spyIndexShard, segrepSource, segRepListener); + segrepTarget = new SegmentReplicationTarget(spyIndexShard, repCheckpoint, segrepSource, segRepListener); doThrow(exception).when(spyIndexShard).finalizeReplication(any()); @@ -320,7 +320,7 @@ public void getSegmentFiles( SegmentReplicationTargetService.SegmentReplicationListener segRepListener = mock( SegmentReplicationTargetService.SegmentReplicationListener.class ); - segrepTarget = new SegmentReplicationTarget(spyIndexShard, segrepSource, segRepListener); + segrepTarget = new SegmentReplicationTarget(spyIndexShard, repCheckpoint, segrepSource, segRepListener); doThrow(exception).when(spyIndexShard).finalizeReplication(any()); @@ -364,7 +364,7 @@ public void getSegmentFiles( SegmentReplicationTargetService.SegmentReplicationListener segRepListener = mock( SegmentReplicationTargetService.SegmentReplicationListener.class ); - segrepTarget = new SegmentReplicationTarget(spyIndexShard, segrepSource, segRepListener); + segrepTarget = new SegmentReplicationTarget(spyIndexShard, repCheckpoint, segrepSource, segRepListener); when(spyIndexShard.getSegmentMetadataMap()).thenReturn(SI_SNAPSHOT_DIFFERENT); segrepTarget.startReplication(new ActionListener() { @Override @@ -417,7 +417,7 @@ public void getSegmentFiles( SegmentReplicationTargetService.SegmentReplicationListener.class ); - segrepTarget = new SegmentReplicationTarget(spyIndexShard, segrepSource, segRepListener); + segrepTarget = new SegmentReplicationTarget(spyIndexShard, repCheckpoint, segrepSource, segRepListener); when(spyIndexShard.getSegmentMetadataMap()).thenReturn(storeMetadataSnapshots.get(0).asMap()); segrepTarget.startReplication(new ActionListener() { @Override diff --git a/server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java b/server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java index 776173f73ce5c..9c38c5848e297 100644 --- a/server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java +++ b/server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java @@ -120,11 +120,13 @@ public void testStartMultipleReplicationsForSingleShard() throws Exception { shards.recoverReplica(shard); final SegmentReplicationTarget target1 = new SegmentReplicationTarget( shard, + shards.getPrimary().getLatestReplicationCheckpoint(), mock(SegmentReplicationSource.class), mock(ReplicationListener.class) ); final SegmentReplicationTarget target2 = new SegmentReplicationTarget( shard, + shards.getPrimary().getLatestReplicationCheckpoint(), mock(SegmentReplicationSource.class), mock(ReplicationListener.class) ); diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 29ecc6b376ad0..7ef460ebbfb50 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -94,6 +94,7 @@ import org.opensearch.index.engine.EngineFactory; import org.opensearch.index.engine.EngineTestCase; import org.opensearch.index.engine.InternalEngineFactory; +import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.mapper.MapperService; import org.opensearch.index.mapper.SourceToParse; import org.opensearch.index.remote.RemoteRefreshSegmentPressureService; @@ -141,6 +142,7 @@ import org.opensearch.indices.replication.common.ReplicationFailedException; import org.opensearch.indices.replication.common.ReplicationListener; import org.opensearch.indices.replication.common.ReplicationState; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.repositories.IndexId; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; @@ -531,6 +533,58 @@ protected IndexShard newShard( ); } + protected IndexShard newShard(boolean primary, SegmentReplicationCheckpointPublisher checkpointPublisher) throws IOException { + final Settings settings = Settings.builder().put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT).build(); + return newShard(primary, checkpointPublisher, settings); + } + + /** + * creates a new initializing shard. The shard will be put in its proper path under the + * current node id the shard is assigned to. + * @param checkpointPublisher Segment Replication Checkpoint Publisher to publish checkpoint + */ + protected IndexShard newShard(boolean primary, SegmentReplicationCheckpointPublisher checkpointPublisher, Settings settings) + throws IOException { + final ShardId shardId = new ShardId("index", "_na_", 0); + final ShardRouting shardRouting = TestShardRouting.newShardRouting( + shardId, + randomAlphaOfLength(10), + primary, + ShardRoutingState.INITIALIZING, + primary ? RecoverySource.EmptyStoreRecoverySource.INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE + ); + final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(createTempDir()); + ShardPath shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId); + + Settings indexSettings = Settings.builder() + .put(settings) + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), between(0, 1000)) + .put(Settings.EMPTY) + .build(); + IndexMetadata metadata = IndexMetadata.builder(shardRouting.getIndexName()) + .settings(indexSettings) + .primaryTerm(0, primaryTerm) + .putMapping("{ \"properties\": {} }") + .build(); + return newShard( + shardRouting, + shardPath, + metadata, + null, + null, + new NRTReplicationEngineFactory(), + new EngineConfigFactory(new IndexSettings(metadata, metadata.getSettings())), + () -> {}, + RetentionLeaseSyncer.EMPTY, + EMPTY_EVENT_LISTENER, + checkpointPublisher, + null + ); + } + /** * creates a new initializing shard. * @param routing shard routing to use @@ -1527,10 +1581,7 @@ public void getCheckpointMetadata( ActionListener listener ) { try { - final CopyState copyState = new CopyState( - ReplicationCheckpoint.empty(primaryShard.shardId, primaryShard.getLatestReplicationCheckpoint().getCodec()), - primaryShard - ); + final CopyState copyState = new CopyState(primaryShard.getLatestReplicationCheckpoint(), primaryShard); listener.onResponse( new CheckpointInfoResponse(copyState.getCheckpoint(), copyState.getMetadataMap(), copyState.getInfosBytes()) ); @@ -1585,6 +1636,7 @@ protected final List replicateSegments(IndexShard prim final SegmentReplicationTargetService targetService = prepareForReplication(primaryShard, replica); final SegmentReplicationTarget target = targetService.startReplication( replica, + primaryShard.getLatestReplicationCheckpoint(), getTargetListener(primaryShard, replica, primaryMetadata, countDownLatch) ); ids.add(target); From 56a19eaef346034d8b49fe4cac83111ef093b674 Mon Sep 17 00:00:00 2001 From: Sorabh Date: Thu, 3 Aug 2023 11:00:02 -0700 Subject: [PATCH 21/24] Change to determine if concurrent segment search should be used by the request during SearchContext creation (#9059) * Change to determine if concurrent segment search should be used by the request during SearchContext creation. It caches the e evaluated output for all future invocation. It also provide executor to IndexSearcher based on this evaluation Signed-off-by: Sorabh Hamirwasia * Add test case to enable/disable concurrent search cluster setting and verify context object state Signed-off-by: Sorabh Hamirwasia --------- Signed-off-by: Sorabh Hamirwasia --- .../search/DefaultSearchContext.java | 37 ++++++---- .../internal/FilteredSearchContext.java | 5 ++ .../opensearch/search/SearchServiceTests.java | 69 +++++++++++++++++++ 3 files changed, 98 insertions(+), 13 deletions(-) diff --git a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java index f377a5e315e1b..d10173184f1c6 100644 --- a/server/src/main/java/org/opensearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/opensearch/search/DefaultSearchContext.java @@ -183,6 +183,7 @@ final class DefaultSearchContext extends SearchContext { private final QueryShardContext queryShardContext; private final FetchPhase fetchPhase; private final Function requestToAggReduceContextBuilder; + private final boolean useConcurrentSearch; DefaultSearchContext( ReaderContext readerContext, @@ -213,13 +214,14 @@ final class DefaultSearchContext extends SearchContext { this.indexShard = readerContext.indexShard(); this.clusterService = clusterService; this.engineSearcher = readerContext.acquireSearcher("search"); + this.useConcurrentSearch = useConcurrentSearch(executor); this.searcher = new ContextIndexSearcher( engineSearcher.getIndexReader(), engineSearcher.getSimilarity(), engineSearcher.getQueryCache(), engineSearcher.getQueryCachingPolicy(), lowLevelCancellation, - executor, + useConcurrentSearch ? executor : null, this ); this.relativeTimeSupplier = relativeTimeSupplier; @@ -878,18 +880,7 @@ public Profilers getProfilers() { */ @Override public boolean isConcurrentSegmentSearchEnabled() { - if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH) - && (clusterService != null) - && (searcher().getExecutor() != null)) { - return indexService.getIndexSettings() - .getSettings() - .getAsBoolean( - IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), - clusterService.getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) - ); - } else { - return false; - } + return useConcurrentSearch; } public void setProfilers(Profilers profilers) { @@ -932,4 +923,24 @@ public void setBucketCollectorProcessor(BucketCollectorProcessor bucketCollector public BucketCollectorProcessor bucketCollectorProcessor() { return bucketCollectorProcessor; } + + /** + * Evaluate based on cluster and index settings if concurrent segment search should be used for this request context + * @return true: use concurrent search + * false: otherwise + */ + private boolean useConcurrentSearch(Executor concurrentSearchExecutor) { + if (FeatureFlags.isEnabled(FeatureFlags.CONCURRENT_SEGMENT_SEARCH) + && (clusterService != null) + && (concurrentSearchExecutor != null)) { + return indexService.getIndexSettings() + .getSettings() + .getAsBoolean( + IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), + clusterService.getClusterSettings().get(CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING) + ); + } else { + return false; + } + } } diff --git a/server/src/main/java/org/opensearch/search/internal/FilteredSearchContext.java b/server/src/main/java/org/opensearch/search/internal/FilteredSearchContext.java index bb990e69e7722..02e6568369e16 100644 --- a/server/src/main/java/org/opensearch/search/internal/FilteredSearchContext.java +++ b/server/src/main/java/org/opensearch/search/internal/FilteredSearchContext.java @@ -559,4 +559,9 @@ public void setBucketCollectorProcessor(BucketCollectorProcessor bucketCollector public BucketCollectorProcessor bucketCollectorProcessor() { return in.bucketCollectorProcessor(); } + + @Override + public boolean isConcurrentSegmentSearchEnabled() { + return in.isConcurrentSegmentSearchEnabled(); + } } diff --git a/server/src/test/java/org/opensearch/search/SearchServiceTests.java b/server/src/test/java/org/opensearch/search/SearchServiceTests.java index 876a2d15cad7e..8643941970fe0 100644 --- a/server/src/test/java/org/opensearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/opensearch/search/SearchServiceTests.java @@ -1275,8 +1275,77 @@ public void testConcurrentSegmentSearchSearchContext() throws IOException { .getSetting(index, IndexSettings.INDEX_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey()) ); assertEquals(concurrentSearchEnabled, searchContext.isConcurrentSegmentSearchEnabled()); + // verify executor nullability with concurrent search enabled/disabled + if (concurrentSearchEnabled) { + assertNotNull(searchContext.searcher().getExecutor()); + } else { + assertNull(searchContext.searcher().getExecutor()); + } + } + } + // Cleanup + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings(Settings.builder().putNull(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey())) + .get(); + } + + /** + * Test that the Search Context for concurrent segment search enabled is set correctly at the time of construction. + * The same is used throughout the context object lifetime even if cluster setting changes before the request completion. + */ + public void testConcurrentSegmentSearchIsSetOnceDuringContextCreation() throws IOException { + String index = randomAlphaOfLengthBetween(5, 10).toLowerCase(Locale.ROOT); + IndexService indexService = createIndex(index); + final SearchService service = getInstanceFromNode(SearchService.class); + ShardId shardId = new ShardId(indexService.index(), 0); + long nowInMillis = System.currentTimeMillis(); + String clusterAlias = randomBoolean() ? null : randomAlphaOfLengthBetween(3, 10); + SearchRequest searchRequest = new SearchRequest(); + searchRequest.allowPartialSearchResults(randomBoolean()); + ShardSearchRequest request = new ShardSearchRequest( + OriginalIndices.NONE, + searchRequest, + shardId, + indexService.numberOfShards(), + AliasFilter.EMPTY, + 1f, + nowInMillis, + clusterAlias, + Strings.EMPTY_ARRAY + ); + + Boolean[] concurrentSearchStates = new Boolean[] { true, false }; + for (Boolean concurrentSearchSetting : concurrentSearchStates) { + // update concurrent search cluster setting and create search context + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().put(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), concurrentSearchSetting) + ) + .get(); + try (DefaultSearchContext searchContext = service.createSearchContext(request, new TimeValue(System.currentTimeMillis()))) { + // verify concurrent search state in context + assertEquals(concurrentSearchSetting, searchContext.isConcurrentSegmentSearchEnabled()); + // verify executor state in searcher + assertEquals(concurrentSearchSetting, (searchContext.searcher().getExecutor() != null)); + + // update cluster setting to flip the concurrent segment search state + client().admin() + .cluster() + .prepareUpdateSettings() + .setTransientSettings( + Settings.builder().put(SearchService.CLUSTER_CONCURRENT_SEGMENT_SEARCH_SETTING.getKey(), !concurrentSearchSetting) + ) + .get(); + + // verify that concurrent segment search is still set to same expected value for the context + assertEquals(concurrentSearchSetting, searchContext.isConcurrentSegmentSearchEnabled()); } } + // Cleanup client().admin() .cluster() From 6e51c475c30ca1a0f0875f9e8574242e4bbd6c9d Mon Sep 17 00:00:00 2001 From: Andrew Ross Date: Thu, 3 Aug 2023 13:07:09 -0700 Subject: [PATCH 22/24] Remove empty if/try-finally in base integ test class (#9098) The [commit to comment out `forceFailure()`][1] was done about 8 years ago so I don't think we'll be bringing it back. Also this makes the code base a slightly friendlier place :) [1]: https://github.com/opensearch-project/OpenSearch/commit/e5a699fa05a6e4ffcb919e8a2a6da8f57e1c2ce8 Signed-off-by: Andrew Ross --- .../test/OpenSearchIntegTestCase.java | 52 ++++++++----------- 1 file changed, 21 insertions(+), 31 deletions(-) diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 45d69703456fc..422f6d8dfbe7d 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -590,44 +590,34 @@ private static void clearClusters() throws Exception { } private void afterInternal(boolean afterClass) throws Exception { - boolean success = false; + final Scope currentClusterScope = getCurrentClusterScope(); + if (isInternalCluster()) { + internalCluster().clearDisruptionScheme(); + } try { - final Scope currentClusterScope = getCurrentClusterScope(); - if (isInternalCluster()) { - internalCluster().clearDisruptionScheme(); - } - try { - if (cluster() != null) { - if (currentClusterScope != Scope.TEST) { - Metadata metadata = client().admin().cluster().prepareState().execute().actionGet().getState().getMetadata(); + if (cluster() != null) { + if (currentClusterScope != Scope.TEST) { + Metadata metadata = client().admin().cluster().prepareState().execute().actionGet().getState().getMetadata(); - final Set persistentKeys = new HashSet<>(metadata.persistentSettings().keySet()); - assertThat("test leaves persistent cluster metadata behind", persistentKeys, empty()); + final Set persistentKeys = new HashSet<>(metadata.persistentSettings().keySet()); + assertThat("test leaves persistent cluster metadata behind", persistentKeys, empty()); - final Set transientKeys = new HashSet<>(metadata.transientSettings().keySet()); - assertThat("test leaves transient cluster metadata behind", transientKeys, empty()); - } - ensureClusterSizeConsistency(); - ensureClusterStateConsistency(); - ensureClusterStateCanBeReadByNodeTool(); - beforeIndexDeletion(); - cluster().wipe(excludeTemplates()); // wipe after to make sure we fail in the test that didn't ack the delete - if (afterClass || currentClusterScope == Scope.TEST) { - cluster().close(); - } - cluster().assertAfterTest(); + final Set transientKeys = new HashSet<>(metadata.transientSettings().keySet()); + assertThat("test leaves transient cluster metadata behind", transientKeys, empty()); } - } finally { - if (currentClusterScope == Scope.TEST) { - clearClusters(); // it is ok to leave persistent / transient cluster state behind if scope is TEST + ensureClusterSizeConsistency(); + ensureClusterStateConsistency(); + ensureClusterStateCanBeReadByNodeTool(); + beforeIndexDeletion(); + cluster().wipe(excludeTemplates()); // wipe after to make sure we fail in the test that didn't ack the delete + if (afterClass || currentClusterScope == Scope.TEST) { + cluster().close(); } + cluster().assertAfterTest(); } - success = true; } finally { - if (!success) { - // if we failed here that means that something broke horribly so we should clear all clusters - // TODO: just let the exception happen, WTF is all this horseshit - // afterTestRule.forceFailure(); + if (currentClusterScope == Scope.TEST) { + clearClusters(); // it is ok to leave persistent / transient cluster state behind if scope is TEST } } } From 9357b6192e3c107844b4cbe9bacd3502a472e58d Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Thu, 3 Aug 2023 14:42:15 -0700 Subject: [PATCH 23/24] [Remote Store] Add missing unit test in main (#9101) Signed-off-by: Suraj Singh --- .../RemoteSegmentStoreDirectoryTests.java | 29 +++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java index 3b2e33388925a..f2d39b2ac7bee 100644 --- a/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/RemoteSegmentStoreDirectoryTests.java @@ -758,6 +758,35 @@ public void testUploadMetadataNonEmpty() throws IOException { } } + public void testUploadMetadataMissingSegment() throws IOException { + populateMetadata(); + remoteSegmentStoreDirectory.init(); + + Directory storeDirectory = mock(Directory.class); + IndexOutput indexOutput = mock(IndexOutput.class); + + String generation = RemoteStoreUtils.invertLong(segmentInfos.getGeneration()); + long primaryTermLong = indexShard.getLatestReplicationCheckpoint().getPrimaryTerm(); + String primaryTerm = RemoteStoreUtils.invertLong(primaryTermLong); + when(storeDirectory.createOutput(startsWith("metadata__" + primaryTerm + "__" + generation), eq(IOContext.DEFAULT))).thenReturn( + indexOutput + ); + + Collection segmentFiles = List.of("_123.si"); + assertThrows( + NoSuchFileException.class, + () -> remoteSegmentStoreDirectory.uploadMetadata( + segmentFiles, + segmentInfos, + storeDirectory, + 12L, + indexShard.getLatestReplicationCheckpoint() + ) + ); + verify(indexOutput).close(); + verify(storeDirectory).deleteFile(startsWith("metadata__" + primaryTerm + "__" + generation)); + } + public void testUploadMetadataNoSegmentCommitInfos() throws IOException { SegmentInfos segInfos = indexShard.store().readLastCommittedSegmentsInfo(); int numSegCommitInfos = segInfos.size(); From 24595c9fbace3516e886137058f3a8b992937652 Mon Sep 17 00:00:00 2001 From: Neetika Singhal Date: Thu, 3 Aug 2023 17:31:08 -0700 Subject: [PATCH 24/24] Make MultiBucketConsumerService thread safe to use across slices during search (#9047) Signed-off-by: Neetika Singhal --- CHANGELOG.md | 2 +- .../MultiBucketConsumerService.java | 54 ++++++++++++-- .../MultiBucketConsumerTests.java | 72 +++++++++++++++++++ 3 files changed, 121 insertions(+), 7 deletions(-) create mode 100644 server/src/test/java/org/opensearch/search/aggregations/MultiBucketConsumerTests.java diff --git a/CHANGELOG.md b/CHANGELOG.md index e29bbd2da4db5..3b81cd3a60deb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -110,7 +110,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Create separate SourceLookup instance per segment slice in SignificantTextAggregatorFactory ([#8807](https://github.com/opensearch-project/OpenSearch/pull/8807)) - Add support for aggregation profiler with concurrent aggregation ([#8801](https://github.com/opensearch-project/OpenSearch/pull/8801)) - [Remove] Deprecated Fractional ByteSizeValue support #9005 ([#9005](https://github.com/opensearch-project/OpenSearch/pull/9005)) - +- Make MultiBucketConsumerService thread safe to use across slices during search ([#9047](https://github.com/opensearch-project/OpenSearch/pull/9047)) ### Deprecated ### Removed diff --git a/server/src/main/java/org/opensearch/search/aggregations/MultiBucketConsumerService.java b/server/src/main/java/org/opensearch/search/aggregations/MultiBucketConsumerService.java index f1416fddebfa2..825e37b7cd952 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/MultiBucketConsumerService.java +++ b/server/src/main/java/org/opensearch/search/aggregations/MultiBucketConsumerService.java @@ -33,6 +33,7 @@ import org.opensearch.cluster.service.ClusterService; import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.CircuitBreakingException; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.common.settings.Setting; @@ -42,6 +43,7 @@ import org.opensearch.search.aggregations.bucket.BucketsAggregator; import java.io.IOException; +import java.util.concurrent.atomic.LongAdder; import java.util.function.IntConsumer; /** @@ -127,13 +129,36 @@ public static class MultiBucketConsumer implements IntConsumer { private final int limit; private final CircuitBreaker breaker; - // aggregations execute in a single thread so no atomic here + // aggregations execute in a single thread for both sequential + // and concurrent search, so no atomic here private int count; - private int callCount = 0; + + // will be updated by multiple threads in concurrent search + // hence making it as LongAdder + private final LongAdder callCount; + private volatile boolean circuitBreakerTripped; + private final int availProcessors; public MultiBucketConsumer(int limit, CircuitBreaker breaker) { this.limit = limit; this.breaker = breaker; + callCount = new LongAdder(); + availProcessors = Runtime.getRuntime().availableProcessors(); + } + + // only visible for testing + protected MultiBucketConsumer( + int limit, + CircuitBreaker breaker, + LongAdder callCount, + boolean circuitBreakerTripped, + int availProcessors + ) { + this.limit = limit; + this.breaker = breaker; + this.callCount = callCount; + this.circuitBreakerTripped = circuitBreakerTripped; + this.availProcessors = availProcessors; } @Override @@ -153,10 +178,27 @@ public void accept(int value) { ); } } - // check parent circuit breaker every 1024 calls - callCount++; - if ((callCount & 0x3FF) == 0) { - breaker.addEstimateBytesAndMaybeBreak(0, "allocated_buckets"); + callCount.increment(); + // tripping the circuit breaker for other threads in case of concurrent search + // if the circuit breaker has tripped for one of the threads already, more info + // can be found on: https://github.com/opensearch-project/OpenSearch/issues/7785 + if (circuitBreakerTripped) { + throw new CircuitBreakingException( + "Circuit breaker for this consumer has already been tripped by previous invocations. " + + "This can happen in case of concurrent segment search when multiple threads are " + + "executing the request and one of the thread has already tripped the circuit breaker", + breaker.getDurability() + ); + } + // check parent circuit breaker every 1024 to (1024 + available processors) calls + long sum = callCount.sum(); + if ((sum >= 1024) && (sum & 0x3FF) <= availProcessors) { + try { + breaker.addEstimateBytesAndMaybeBreak(0, "allocated_buckets"); + } catch (CircuitBreakingException e) { + circuitBreakerTripped = true; + throw e; + } } } diff --git a/server/src/test/java/org/opensearch/search/aggregations/MultiBucketConsumerTests.java b/server/src/test/java/org/opensearch/search/aggregations/MultiBucketConsumerTests.java new file mode 100644 index 0000000000000..eda705f95bf9b --- /dev/null +++ b/server/src/test/java/org/opensearch/search/aggregations/MultiBucketConsumerTests.java @@ -0,0 +1,72 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.search.aggregations; + +import org.mockito.Mockito; +import org.opensearch.core.common.breaker.CircuitBreaker; +import org.opensearch.core.common.breaker.CircuitBreakingException; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.concurrent.atomic.LongAdder; + +import static org.opensearch.search.aggregations.MultiBucketConsumerService.DEFAULT_MAX_BUCKETS; + +public class MultiBucketConsumerTests extends OpenSearchTestCase { + + public void testMultiConsumerAcceptWhenCBTripped() { + CircuitBreaker breaker = Mockito.mock(CircuitBreaker.class); + MultiBucketConsumerService.MultiBucketConsumer multiBucketConsumer = new MultiBucketConsumerService.MultiBucketConsumer( + DEFAULT_MAX_BUCKETS, + breaker, + new LongAdder(), + true, + 1 + ); + // exception is thrown upfront since the circuit breaker has already tripped + expectThrows(CircuitBreakingException.class, () -> multiBucketConsumer.accept(0)); + Mockito.verify(breaker, Mockito.times(0)).addEstimateBytesAndMaybeBreak(0, "allocated_buckets"); + } + + public void testMultiConsumerAcceptToTripCB() { + CircuitBreaker breaker = Mockito.mock(CircuitBreaker.class); + LongAdder callCount = new LongAdder(); + for (int i = 0; i < 1024; i++) { + callCount.increment(); + } + MultiBucketConsumerService.MultiBucketConsumer multiBucketConsumer = new MultiBucketConsumerService.MultiBucketConsumer( + DEFAULT_MAX_BUCKETS, + breaker, + callCount, + false, + 2 + ); + // circuit breaker check is performed as the value of call count would be 1025 which is still in range + Mockito.when(breaker.addEstimateBytesAndMaybeBreak(0, "allocated_buckets")).thenThrow(CircuitBreakingException.class); + expectThrows(CircuitBreakingException.class, () -> multiBucketConsumer.accept(0)); + Mockito.verify(breaker, Mockito.times(1)).addEstimateBytesAndMaybeBreak(0, "allocated_buckets"); + } + + public void testMultiConsumerAccept() { + CircuitBreaker breaker = Mockito.mock(CircuitBreaker.class); + LongAdder callCount = new LongAdder(); + for (int i = 0; i < 1100; i++) { + callCount.increment(); + } + MultiBucketConsumerService.MultiBucketConsumer multiBucketConsumer = new MultiBucketConsumerService.MultiBucketConsumer( + DEFAULT_MAX_BUCKETS, + breaker, + callCount, + false, + 1 + ); + // no exception is thrown as the call count value is not in the expected range and CB is not checked + multiBucketConsumer.accept(0); + Mockito.verify(breaker, Mockito.times(0)).addEstimateBytesAndMaybeBreak(0, "allocated_buckets"); + } +}