|
| 1 | +/* |
| 2 | + * SPDX-License-Identifier: Apache-2.0 |
| 3 | + * |
| 4 | + * The OpenSearch Contributors require contributions made to |
| 5 | + * this file be licensed under the Apache-2.0 license or a |
| 6 | + * compatible open source license. |
| 7 | + */ |
| 8 | + |
| 9 | +package org.opensearch.cluster.coordination; |
| 10 | + |
| 11 | +import org.apache.logging.log4j.LogManager; |
| 12 | +import org.apache.logging.log4j.Logger; |
| 13 | +import org.junit.After; |
| 14 | +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateAction; |
| 15 | +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateRequest; |
| 16 | +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateResponse; |
| 17 | +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateAction; |
| 18 | +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateRequest; |
| 19 | +import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateResponse; |
| 20 | +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionAction; |
| 21 | +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionRequest; |
| 22 | +import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionResponse; |
| 23 | +import org.opensearch.cluster.ClusterState; |
| 24 | +import org.opensearch.cluster.decommission.DecommissionAttribute; |
| 25 | +import org.opensearch.cluster.decommission.DecommissionStatus; |
| 26 | +import org.opensearch.cluster.node.DiscoveryNode; |
| 27 | +import org.opensearch.cluster.node.DiscoveryNodeRole; |
| 28 | +import org.opensearch.cluster.service.ClusterService; |
| 29 | +import org.opensearch.common.Priority; |
| 30 | +import org.opensearch.common.settings.Settings; |
| 31 | +import org.opensearch.common.unit.TimeValue; |
| 32 | +import org.opensearch.plugins.Plugin; |
| 33 | +import org.opensearch.test.OpenSearchIntegTestCase; |
| 34 | +import org.opensearch.test.transport.MockTransportService; |
| 35 | + |
| 36 | +import java.util.Collection; |
| 37 | +import java.util.Collections; |
| 38 | +import java.util.Iterator; |
| 39 | +import java.util.List; |
| 40 | +import java.util.concurrent.ExecutionException; |
| 41 | + |
| 42 | +import static org.opensearch.test.NodeRoles.onlyRole; |
| 43 | +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertNoTimeout; |
| 44 | + |
| 45 | +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) |
| 46 | +public class AwarenessAttributeDecommissionIT extends OpenSearchIntegTestCase { |
| 47 | + private final Logger logger = LogManager.getLogger(AwarenessAttributeDecommissionIT.class); |
| 48 | + |
| 49 | + @Override |
| 50 | + protected Collection<Class<? extends Plugin>> nodePlugins() { |
| 51 | + return Collections.singletonList(MockTransportService.TestPlugin.class); |
| 52 | + } |
| 53 | + |
| 54 | + @After |
| 55 | + public void cleanup() throws Exception { |
| 56 | + assertNoTimeout(client().admin().cluster().prepareHealth().get()); |
| 57 | + } |
| 58 | + |
| 59 | + public void testDecommissionStatusUpdatePublishedToAllNodes() throws ExecutionException, InterruptedException { |
| 60 | + Settings commonSettings = Settings.builder() |
| 61 | + .put("cluster.routing.allocation.awareness.attributes", "zone") |
| 62 | + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") |
| 63 | + .build(); |
| 64 | + |
| 65 | + logger.info("--> start 3 cluster manager nodes on zones 'a' & 'b' & 'c'"); |
| 66 | + List<String> clusterManagerNodes = internalCluster().startNodes( |
| 67 | + Settings.builder() |
| 68 | + .put(commonSettings) |
| 69 | + .put("node.attr.zone", "a") |
| 70 | + .put(onlyRole(commonSettings, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)) |
| 71 | + .build(), |
| 72 | + Settings.builder() |
| 73 | + .put(commonSettings) |
| 74 | + .put("node.attr.zone", "b") |
| 75 | + .put(onlyRole(commonSettings, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)) |
| 76 | + .build(), |
| 77 | + Settings.builder() |
| 78 | + .put(commonSettings) |
| 79 | + .put("node.attr.zone", "c") |
| 80 | + .put(onlyRole(commonSettings, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)) |
| 81 | + .build() |
| 82 | + ); |
| 83 | + |
| 84 | + logger.info("--> start 3 data nodes on zones 'a' & 'b' & 'c'"); |
| 85 | + List<String> dataNodes = internalCluster().startNodes( |
| 86 | + Settings.builder() |
| 87 | + .put(commonSettings) |
| 88 | + .put("node.attr.zone", "a") |
| 89 | + .put(onlyRole(commonSettings, DiscoveryNodeRole.DATA_ROLE)) |
| 90 | + .build(), |
| 91 | + Settings.builder() |
| 92 | + .put(commonSettings) |
| 93 | + .put("node.attr.zone", "b") |
| 94 | + .put(onlyRole(commonSettings, DiscoveryNodeRole.DATA_ROLE)) |
| 95 | + .build(), |
| 96 | + Settings.builder() |
| 97 | + .put(commonSettings) |
| 98 | + .put("node.attr.zone", "c") |
| 99 | + .put(onlyRole(commonSettings, DiscoveryNodeRole.DATA_ROLE)) |
| 100 | + .build() |
| 101 | + ); |
| 102 | + |
| 103 | + ensureStableCluster(6); |
| 104 | + |
| 105 | + logger.info("--> starting decommissioning nodes in zone {}", 'c'); |
| 106 | + DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "c"); |
| 107 | + DecommissionRequest decommissionRequest = new DecommissionRequest(decommissionAttribute); |
| 108 | + DecommissionResponse decommissionResponse = client().execute(DecommissionAction.INSTANCE, decommissionRequest).get(); |
| 109 | + assertTrue(decommissionResponse.isAcknowledged()); |
| 110 | + |
| 111 | + // Will wait for all events to complete |
| 112 | + client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); |
| 113 | + |
| 114 | + // assert that decommission status is successful |
| 115 | + GetDecommissionStateResponse response = client().execute( |
| 116 | + GetDecommissionStateAction.INSTANCE, |
| 117 | + new GetDecommissionStateRequest(decommissionAttribute.attributeName()) |
| 118 | + ).get(); |
| 119 | + assertEquals(response.getAttributeValue(), decommissionAttribute.attributeValue()); |
| 120 | + assertEquals(response.getDecommissionStatus(), DecommissionStatus.SUCCESSFUL); |
| 121 | + |
| 122 | + ClusterState clusterState = client(clusterManagerNodes.get(0)).admin().cluster().prepareState().execute().actionGet().getState(); |
| 123 | + assertEquals(4, clusterState.nodes().getSize()); |
| 124 | + |
| 125 | + // assert status on nodes that are part of cluster currently |
| 126 | + Iterator<DiscoveryNode> discoveryNodeIterator = clusterState.nodes().getNodes().valuesIt(); |
| 127 | + while (discoveryNodeIterator.hasNext()) { |
| 128 | + // assert no node has decommissioned attribute |
| 129 | + DiscoveryNode node = discoveryNodeIterator.next(); |
| 130 | + assertNotEquals(node.getAttributes().get("zone"), "c"); |
| 131 | + |
| 132 | + // assert all the nodes has status as SUCCESSFUL |
| 133 | + ClusterService localNodeClusterService = internalCluster().getInstance(ClusterService.class, node.getName()); |
| 134 | + assertEquals( |
| 135 | + localNodeClusterService.state().metadata().decommissionAttributeMetadata().status(), |
| 136 | + DecommissionStatus.SUCCESSFUL |
| 137 | + ); |
| 138 | + } |
| 139 | + |
| 140 | + // assert status on decommissioned node |
| 141 | + // Here we will verify that until it got kicked out, it received appropriate status updates |
| 142 | + // decommissioned nodes hence will have status as IN_PROGRESS as it will be kicked out later after this |
| 143 | + // and won't receive status update to SUCCESSFUL |
| 144 | + String randomDecommissionedNode = randomFrom(clusterManagerNodes.get(2), dataNodes.get(2)); |
| 145 | + ClusterService decommissionedNodeClusterService = internalCluster().getInstance(ClusterService.class, randomDecommissionedNode); |
| 146 | + assertEquals( |
| 147 | + decommissionedNodeClusterService.state().metadata().decommissionAttributeMetadata().status(), |
| 148 | + DecommissionStatus.IN_PROGRESS |
| 149 | + ); |
| 150 | + |
| 151 | + // Will wait for all events to complete |
| 152 | + client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get(); |
| 153 | + |
| 154 | + // Recommissioning the zone back to gracefully succeed the test once above tests succeeds |
| 155 | + DeleteDecommissionStateResponse deleteDecommissionStateResponse = client(clusterManagerNodes.get(0)).execute( |
| 156 | + DeleteDecommissionStateAction.INSTANCE, |
| 157 | + new DeleteDecommissionStateRequest() |
| 158 | + ).get(); |
| 159 | + assertTrue(deleteDecommissionStateResponse.isAcknowledged()); |
| 160 | + |
| 161 | + // will wait for cluster to stabilise with a timeout of 2 min (findPeerInterval for decommissioned nodes) |
| 162 | + // as by then all nodes should have joined the cluster |
| 163 | + ensureStableCluster(6, TimeValue.timeValueMinutes(2)); |
| 164 | + } |
| 165 | +} |
0 commit comments