├── .gitignore ├── README.md ├── bin ├── cleanup └── elasticsearch.in.sh ├── config └── elasticsearch.yml ├── pom.xml └── src ├── main ├── assemblies │ └── exec.xml ├── java │ └── org │ │ └── motovs │ │ └── elasticsearch │ │ └── snapshots │ │ └── AbortedSnapshotCleaner.java └── resources │ └── log4j.properties └── test └── java └── org └── motovs └── elasticsearch └── snapshots └── StuckCompletedSnapshotTest.java /.gitignore: -------------------------------------------------------------------------------- 1 | /data 2 | /work 3 | /logs 4 | /.idea 5 | /target 6 | .DS_Store 7 | .local-execution-hints.log 8 | *.iml 9 | /.settings 10 | /.project 11 | /.classpath 12 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Elasticsearch Snapshot Cleanup Utilty 2 | ================================== 3 | 4 | This utility cleans up snapshots stuck due to the [Issue 5968](https://github.com/elasticsearch/elasticsearch/issues/5958). 5 | 6 | Usage: 7 | 8 | - For the version 1.0.1 untar the [tar.gz file](https://www.dropbox.com/s/lcmj244ztzv67ds/elasticsearch-snapshot-cleanup-1.0-SNAPSHOT.tar.gz) into a temporary directory on a machine that has access to the cluster. 9 | - For the version 1.4.4 untar the [tar.gz file](https://www.dropbox.com/s/xlohzi267egzjqk/elasticsearch-snapshot-cleanup-1.4.4.2.tar.gz) into a temporary directory on a machine that has access to the cluster. 10 | - For all other versions update pom.xml file and appropriate elasticsearch and lucene version, run `mvn clean package` and untar the file found in the `target/releases` directory. 11 | - Modify `config/elasticsearch.yml` file with cluster connection settings. 12 | - Execute the script `bin/cleanup` 13 | 14 | 15 | License 16 | ------- 17 | 18 | This software is licensed under the Apache 2 license, quoted below. 19 | 20 | Copyright 2009-2014 Elasticsearch 21 | 22 | Licensed under the Apache License, Version 2.0 (the "License"); you may not 23 | use this file except in compliance with the License. You may obtain a copy of 24 | the License at 25 | 26 | http://www.apache.org/licenses/LICENSE-2.0 27 | 28 | Unless required by applicable law or agreed to in writing, software 29 | distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 30 | WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 31 | License for the specific language governing permissions and limitations under 32 | the License. 33 | -------------------------------------------------------------------------------- /bin/cleanup: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | SCRIPT="$0" 4 | 5 | while [ -h "$SCRIPT" ] ; do 6 | ls=`ls -ld "$SCRIPT"` 7 | # Drop everything prior to -> 8 | link=`expr "$ls" : '.*-> \(.*\)$'` 9 | if expr "$link" : '/.*' > /dev/null; then 10 | SCRIPT="$link" 11 | else 12 | SCRIPT=`dirname "$SCRIPT"`/"$link" 13 | fi 14 | done 15 | 16 | # determine elasticsearch home 17 | ES_HOME=`dirname "$SCRIPT"`/.. 18 | 19 | # make ELASTICSEARCH_HOME absolute 20 | ES_HOME=`cd "$ES_HOME"; pwd` 21 | 22 | echo "Setting ES_HOME as $ES_HOME" 23 | # If an include wasn't specified in the environment, then search for one... 24 | if [ "x$ES_INCLUDE" = "x" ]; then 25 | # Locations (in order) to use when searching for an include file. 26 | for include in /usr/share/elasticsearch/elasticsearch.in.sh \ 27 | /usr/local/share/elasticsearch/elasticsearch.in.sh \ 28 | /opt/elasticsearch/elasticsearch.in.sh \ 29 | ~/.elasticsearch.in.sh \ 30 | "`dirname "$0"`"/elasticsearch.in.sh; do 31 | if [ -r "$include" ]; then 32 | . "$include" 33 | break 34 | fi 35 | done 36 | # ...otherwise, source the specified include. 37 | elif [ -r "$ES_INCLUDE" ]; then 38 | . "$ES_INCLUDE" 39 | fi 40 | 41 | if [ -x "$JAVA_HOME/bin/java" ]; then 42 | JAVA="$JAVA_HOME/bin/java" 43 | else 44 | JAVA=`which java` 45 | fi 46 | 47 | if [ ! -x "$JAVA" ]; then 48 | echo "Could not find any executable java binary. Please install java in your PATH or set JAVA_HOME" 49 | exit 1 50 | fi 51 | 52 | if [ -z "$ES_CLASSPATH" ]; then 53 | echo "You must set the ES_CLASSPATH var" >&2 54 | exit 1 55 | fi 56 | 57 | # Special-case path variables. 58 | case `uname` in 59 | CYGWIN*) 60 | ES_CLASSPATH=`cygpath -p -w "$ES_CLASSPATH"` 61 | ES_HOME=`cygpath -p -w "$ES_HOME"` 62 | ;; 63 | esac 64 | 65 | # Parse any long getopt options and put them into properties before calling getopt below 66 | # Be dash compatible to make sure running under ubuntu works 67 | ARGV="" 68 | while [ $# -gt 0 ] 69 | do 70 | case $1 in 71 | --*=*) properties="$properties -Des.${1#--}" 72 | shift 1 73 | ;; 74 | --*) properties="$properties -Des.${1#--}=$2" 75 | shift 2 76 | ;; 77 | *) ARGV="$ARGV $1" ; shift 78 | esac 79 | done 80 | 81 | # Parse any command line options. 82 | args=`getopt vdhp:D:X: $ARGV` 83 | eval set -- "$args" 84 | 85 | while true; do 86 | case $1 in 87 | -v) 88 | "$JAVA" $JAVA_OPTS $ES_JAVA_OPTS $es_parms -Des.path.home="$ES_HOME" -cp "$ES_CLASSPATH" $properties \ 89 | org.elasticsearch.Version 90 | exit 0 91 | ;; 92 | -h) 93 | echo "Usage: $0 [-h]" 94 | exit 0 95 | ;; 96 | -D) 97 | properties="$properties -D$2" 98 | shift 2 99 | ;; 100 | -X) 101 | properties="$properties -X$2" 102 | shift 2 103 | ;; 104 | --) 105 | shift 106 | break 107 | ;; 108 | *) 109 | echo "Error parsing argument $1!" >&2 110 | exit 1 111 | ;; 112 | esac 113 | done 114 | 115 | # Start up the service 116 | exec "$JAVA" $JAVA_OPTS $ES_JAVA_OPTS $es_parms -Des.path.home="$ES_HOME" -cp "$ES_CLASSPATH" $properties \ 117 | org.motovs.elasticsearch.snapshots.AbortedSnapshotCleaner 118 | 119 | exit $? -------------------------------------------------------------------------------- /bin/elasticsearch.in.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | ES_CLASSPATH=$ES_CLASSPATH:$ES_HOME/lib/${project.build.finalName}.jar:$ES_HOME/lib/*:$ES_HOME/lib/sigar/* 4 | 5 | if [ "x$ES_MIN_MEM" = "x" ]; then 6 | ES_MIN_MEM=256m 7 | fi 8 | if [ "x$ES_MAX_MEM" = "x" ]; then 9 | ES_MAX_MEM=1g 10 | fi 11 | if [ "x$ES_HEAP_SIZE" != "x" ]; then 12 | ES_MIN_MEM=$ES_HEAP_SIZE 13 | ES_MAX_MEM=$ES_HEAP_SIZE 14 | fi 15 | 16 | # min and max heap sizes should be set to the same value to avoid 17 | # stop-the-world GC pauses during resize, and so that we can lock the 18 | # heap in memory on startup to prevent any of it from being swapped 19 | # out. 20 | JAVA_OPTS="$JAVA_OPTS -Xms${ES_MIN_MEM}" 21 | JAVA_OPTS="$JAVA_OPTS -Xmx${ES_MAX_MEM}" 22 | 23 | # new generation 24 | if [ "x$ES_HEAP_NEWSIZE" != "x" ]; then 25 | JAVA_OPTS="$JAVA_OPTS -Xmn${ES_HEAP_NEWSIZE}" 26 | fi 27 | 28 | # max direct memory 29 | if [ "x$ES_DIRECT_SIZE" != "x" ]; then 30 | JAVA_OPTS="$JAVA_OPTS -XX:MaxDirectMemorySize=${ES_DIRECT_SIZE}" 31 | fi 32 | 33 | # reduce the per-thread stack size 34 | JAVA_OPTS="$JAVA_OPTS -Xss256k" 35 | 36 | # set to headless, just in case 37 | JAVA_OPTS="$JAVA_OPTS -Djava.awt.headless=true" 38 | 39 | # Force the JVM to use IPv4 stack 40 | if [ "x$ES_USE_IPV4" != "x" ]; then 41 | JAVA_OPTS="$JAVA_OPTS -Djava.net.preferIPv4Stack=true" 42 | fi 43 | 44 | JAVA_OPTS="$JAVA_OPTS -XX:+UseParNewGC" 45 | JAVA_OPTS="$JAVA_OPTS -XX:+UseConcMarkSweepGC" 46 | 47 | JAVA_OPTS="$JAVA_OPTS -XX:CMSInitiatingOccupancyFraction=75" 48 | JAVA_OPTS="$JAVA_OPTS -XX:+UseCMSInitiatingOccupancyOnly" 49 | 50 | # GC logging options 51 | if [ "x$ES_USE_GC_LOGGING" != "x" ]; then 52 | JAVA_OPTS="$JAVA_OPTS -XX:+PrintGCDetails" 53 | JAVA_OPTS="$JAVA_OPTS -XX:+PrintGCTimeStamps" 54 | JAVA_OPTS="$JAVA_OPTS -XX:+PrintClassHistogram" 55 | JAVA_OPTS="$JAVA_OPTS -XX:+PrintTenuringDistribution" 56 | JAVA_OPTS="$JAVA_OPTS -XX:+PrintGCApplicationStoppedTime" 57 | JAVA_OPTS="$JAVA_OPTS -Xloggc:/var/log/elasticsearch/gc.log" 58 | fi 59 | 60 | # Causes the JVM to dump its heap on OutOfMemory. 61 | JAVA_OPTS="$JAVA_OPTS -XX:+HeapDumpOnOutOfMemoryError" 62 | # The path to the heap dump location, note directory must exists and have enough 63 | # space for a full heap dump. 64 | #JAVA_OPTS="$JAVA_OPTS -XX:HeapDumpPath=$ES_HOME/logs/heapdump.hprof" 65 | -------------------------------------------------------------------------------- /config/elasticsearch.yml: -------------------------------------------------------------------------------- 1 | # Update this file with your settings 2 | cluster.name: "elasticsearch-imotov" 3 | network.host: "127.0.0.1" 4 | discovery.zen.ping.multicast.enabled: false 5 | discovery.zen.ping.unicast.hosts: ["localhost:9300", "localhost:9301", "localhost:9302"] -------------------------------------------------------------------------------- /pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 4.0.0 6 | 7 | org.motovs.elasticsearch.snapshots 8 | elasticsearch-snapshot-cleanup 9 | 1.4.4.2 10 | 11 | 12 | 1.4.4 13 | 4.10.3 14 | 15 | 16 | 17 | 18 | 19 | org.hamcrest 20 | hamcrest-all 21 | 1.3 22 | test 23 | 24 | 25 | com.carrotsearch.randomizedtesting 26 | randomizedtesting-runner 27 | 2.1.11 28 | test 29 | 30 | 31 | org.apache.lucene 32 | lucene-test-framework 33 | ${lucene.version} 34 | test 35 | 36 | 37 | 38 | org.elasticsearch 39 | elasticsearch 40 | ${elasticsearch.version} 41 | 42 | 43 | 44 | org.elasticsearch 45 | elasticsearch 46 | ${elasticsearch.version} 47 | test 48 | test-jar 49 | 50 | 51 | 52 | org.apache.lucene 53 | lucene-core 54 | ${lucene.version} 55 | 56 | 57 | 58 | log4j 59 | log4j 60 | 1.2.17 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | src/main/resources 71 | true 72 | 73 | 74 | 75 | 76 | org.apache.maven.plugins 77 | maven-compiler-plugin 78 | 2.3.2 79 | 80 | 1.6 81 | 1.6 82 | 83 | 84 | 85 | org.apache.maven.plugins 86 | maven-source-plugin 87 | 2.1.2 88 | 89 | 90 | attach-sources 91 | 92 | jar 93 | 94 | 95 | 96 | 97 | 98 | maven-assembly-plugin 99 | 2.3 100 | 101 | false 102 | ${project.build.directory}/releases/ 103 | 104 | ${basedir}/src/main/assemblies/exec.xml 105 | 106 | 107 | 108 | 109 | package 110 | 111 | single 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | -------------------------------------------------------------------------------- /src/main/assemblies/exec.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | targz 4 | 5 | tar.gz 6 | 7 | 8 | true 9 | 10 | 11 | 12 | /lib 13 | true 14 | true 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | config 29 | config 30 | 31 | * 32 | 33 | 34 | 35 | bin 36 | bin 37 | 0755 38 | 0755 39 | unix 40 | true 41 | 42 | elasticsearch.in.sh 43 | cleanup 44 | 45 | 46 | 47 | 48 | 49 | README.md 50 | / 51 | 52 | 53 | 54 | -------------------------------------------------------------------------------- /src/main/java/org/motovs/elasticsearch/snapshots/AbortedSnapshotCleaner.java: -------------------------------------------------------------------------------- 1 | package org.motovs.elasticsearch.snapshots; 2 | 3 | import org.elasticsearch.cluster.ClusterService; 4 | import org.elasticsearch.cluster.ClusterState; 5 | import org.elasticsearch.cluster.metadata.SnapshotId; 6 | import org.elasticsearch.cluster.metadata.SnapshotMetaData; 7 | import org.elasticsearch.common.collect.ImmutableMap; 8 | import org.elasticsearch.common.io.stream.StreamInput; 9 | import org.elasticsearch.common.io.stream.StreamOutput; 10 | import org.elasticsearch.common.logging.ESLogger; 11 | import org.elasticsearch.common.logging.Loggers; 12 | import org.elasticsearch.common.logging.log4j.LogConfigurator; 13 | import org.elasticsearch.common.settings.ImmutableSettings; 14 | import org.elasticsearch.common.settings.Settings; 15 | import org.elasticsearch.index.shard.ShardId; 16 | import org.elasticsearch.node.Node; 17 | import org.elasticsearch.node.internal.InternalNode; 18 | import org.elasticsearch.snapshots.SnapshotsService; 19 | import org.elasticsearch.transport.*; 20 | 21 | import java.io.IOException; 22 | 23 | import static org.elasticsearch.node.NodeBuilder.nodeBuilder; 24 | 25 | /** 26 | * Created by igor on 5/27/14. 27 | */ 28 | public class AbortedSnapshotCleaner { 29 | 30 | public static void main(String[] args) { 31 | AbortedSnapshotCleaner cleaner = new AbortedSnapshotCleaner(ImmutableSettings.EMPTY); 32 | try { 33 | cleaner.cleanSnapshots(); 34 | } finally { 35 | cleaner.close(); 36 | } 37 | 38 | } 39 | 40 | 41 | private Node node; 42 | 43 | private ESLogger logger; 44 | 45 | public AbortedSnapshotCleaner(Settings settings) { 46 | this(nodeBuilder().client(true).node(), settings); 47 | } 48 | 49 | // For testing 50 | public AbortedSnapshotCleaner(Node node, Settings settings) { 51 | LogConfigurator.configure(settings); 52 | logger = Loggers.getLogger(getClass(), settings); 53 | this.node = node; 54 | } 55 | 56 | public void cleanSnapshots() { 57 | InternalNode internalNode = (InternalNode)node; 58 | TransportService transportService = internalNode.injector().getInstance(TransportService.class); 59 | ClusterService clusterService = internalNode.injector().getInstance(ClusterService.class); 60 | 61 | 62 | ClusterState clusterState = clusterService.state(); 63 | SnapshotMetaData snapshots = clusterState.getMetaData().custom(SnapshotMetaData.TYPE); 64 | if (snapshots == null || snapshots.entries().isEmpty()) { 65 | logger.info("No snapshots found, snapshots metadata is {}", snapshots == null ? "null" : "empty"); 66 | return; 67 | } 68 | 69 | for (SnapshotMetaData.Entry entry : snapshots.entries()) { 70 | SnapshotId snapshotId = entry.snapshotId(); 71 | logger.info("Processing snapshot [{}]", snapshotId); 72 | boolean updated = false; 73 | for (ImmutableMap.Entry shard : entry.shards().entrySet()) { 74 | String nodeId = shard.getValue().nodeId(); 75 | if (shard.getValue().state() == SnapshotMetaData.State.ABORTED && clusterState.nodes().get(nodeId) == null) { 76 | logger.info("Found aborted snapshot [{}] on node [{}] - cleaning", shard.getKey(), nodeId); 77 | SnapshotMetaData.ShardSnapshotStatus status = new SnapshotMetaData.ShardSnapshotStatus(nodeId, SnapshotMetaData.State.FAILED, "Aborted"); 78 | UpdateIndexShardSnapshotStatusRequest request = new UpdateIndexShardSnapshotStatusRequest(snapshotId, shard.getKey(), status); 79 | transportService.sendRequest(clusterService.state().nodes().masterNode(), 80 | SnapshotsService.UPDATE_SNAPSHOT_ACTION_NAME, request, EmptyTransportResponseHandler.INSTANCE_SAME); 81 | updated = true; 82 | } else { 83 | logger.info("Ignoring shard [{}] with state [{}] on node [{}] - node exists : [{}]", shard.getKey(), shard.getValue().state(), nodeId, clusterState.nodes().get(nodeId) != null); 84 | } 85 | } 86 | if (updated == false) { 87 | // Hmm, nothing was found - try to push it by adding fake aborted shard 88 | SnapshotMetaData.ShardSnapshotStatus status = new SnapshotMetaData.ShardSnapshotStatus("fake-node", SnapshotMetaData.State.FAILED, "Aborted"); 89 | UpdateIndexShardSnapshotStatusRequest request = new UpdateIndexShardSnapshotStatusRequest(snapshotId, new ShardId("fake-index", 0), status); 90 | transportService.sendRequest(clusterService.state().nodes().masterNode(), 91 | SnapshotsService.UPDATE_SNAPSHOT_ACTION_NAME, request, EmptyTransportResponseHandler.INSTANCE_SAME); 92 | } 93 | } 94 | } 95 | 96 | public void close() { 97 | node.close(); 98 | } 99 | 100 | 101 | /** 102 | * Internal request that is used to send changes in snapshot status to master 103 | */ 104 | private static class UpdateIndexShardSnapshotStatusRequest extends TransportRequest { 105 | private SnapshotId snapshotId; 106 | private ShardId shardId; 107 | private SnapshotMetaData.ShardSnapshotStatus status; 108 | 109 | private UpdateIndexShardSnapshotStatusRequest() { 110 | 111 | } 112 | 113 | private UpdateIndexShardSnapshotStatusRequest(SnapshotId snapshotId, ShardId shardId, SnapshotMetaData.ShardSnapshotStatus status) { 114 | this.snapshotId = snapshotId; 115 | this.shardId = shardId; 116 | this.status = status; 117 | } 118 | 119 | @Override 120 | public void readFrom(StreamInput in) throws IOException { 121 | super.readFrom(in); 122 | snapshotId = SnapshotId.readSnapshotId(in); 123 | shardId = ShardId.readShardId(in); 124 | status = SnapshotMetaData.ShardSnapshotStatus.readShardSnapshotStatus(in); 125 | } 126 | 127 | @Override 128 | public void writeTo(StreamOutput out) throws IOException { 129 | super.writeTo(out); 130 | snapshotId.writeTo(out); 131 | shardId.writeTo(out); 132 | status.writeTo(out); 133 | } 134 | 135 | public SnapshotId snapshotId() { 136 | return snapshotId; 137 | } 138 | 139 | public ShardId shardId() { 140 | return shardId; 141 | } 142 | 143 | public SnapshotMetaData.ShardSnapshotStatus status() { 144 | return status; 145 | } 146 | } 147 | 148 | } 149 | -------------------------------------------------------------------------------- /src/main/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | es.logger.level=INFO 2 | log4j.rootLogger=${es.logger.level}, out 3 | 4 | log4j.appender.out=org.apache.log4j.ConsoleAppender 5 | log4j.appender.out.layout=org.apache.log4j.PatternLayout 6 | log4j.appender.out.layout.conversionPattern=[%d{ISO8601}][%-5p][%-25c] %m%n 7 | log4j.logger.org.elasticsearch.snapshots=TRACE 8 | log4j.logger.org.elasticsearch.index.snapshots=TRACE 9 | -------------------------------------------------------------------------------- /src/test/java/org/motovs/elasticsearch/snapshots/StuckCompletedSnapshotTest.java: -------------------------------------------------------------------------------- 1 | package org.motovs.elasticsearch.snapshots; 2 | 3 | /** 4 | * Created by igor on 10/21/15. 5 | */ 6 | 7 | import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; 8 | import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; 9 | import org.elasticsearch.client.Client; 10 | import org.elasticsearch.cluster.ClusterService; 11 | import org.elasticsearch.cluster.ClusterState; 12 | import org.elasticsearch.cluster.ProcessedClusterStateNonMasterUpdateTask; 13 | import org.elasticsearch.cluster.metadata.MetaData; 14 | import org.elasticsearch.cluster.metadata.SnapshotId; 15 | import org.elasticsearch.cluster.metadata.SnapshotMetaData; 16 | import org.elasticsearch.common.collect.ImmutableList; 17 | import org.elasticsearch.common.collect.ImmutableMap; 18 | import org.elasticsearch.common.settings.ImmutableSettings; 19 | import org.elasticsearch.common.settings.Settings; 20 | import org.elasticsearch.index.shard.ShardId; 21 | import org.elasticsearch.node.Node; 22 | import org.elasticsearch.snapshots.ConcurrentSnapshotExecutionException; 23 | import org.elasticsearch.test.ElasticsearchIntegrationTest; 24 | import org.junit.Test; 25 | 26 | import java.util.concurrent.CountDownLatch; 27 | import java.util.concurrent.TimeUnit; 28 | 29 | import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; 30 | import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; 31 | import static org.hamcrest.Matchers.equalTo; 32 | 33 | /** 34 | */ 35 | @ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, numDataNodes = 0) 36 | public class StuckCompletedSnapshotTest extends ElasticsearchIntegrationTest { 37 | 38 | @Test 39 | public void restorePersistentSettingsTest() throws Exception { 40 | logger.info("--> start 2 nodes"); 41 | Settings nodeSettings = settingsBuilder() 42 | .build(); 43 | internalCluster().startNode(nodeSettings); 44 | Client client = client(); 45 | final String secondNode = internalCluster().startNode(nodeSettings); 46 | logger.info("--> wait for the second node to join the cluster"); 47 | assertThat(client.admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut(), equalTo(false)); 48 | 49 | logger.info("--> create repository"); 50 | PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo") 51 | .setType("fs").setSettings(ImmutableSettings.settingsBuilder().put("location", newTempDir())).execute().actionGet(); 52 | assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); 53 | 54 | logger.info("--> create stuck snapshot"); 55 | final CountDownLatch latch = new CountDownLatch(1); 56 | final SnapshotId snapshotId = new SnapshotId("test-repo", "test-snap-1"); 57 | ClusterService masterClusterService = internalCluster().getInstance(ClusterService.class, internalCluster().getMasterName()); 58 | masterClusterService.submitStateUpdateTask("create stuck snapshot", new ProcessedClusterStateNonMasterUpdateTask() { 59 | @Override 60 | public void clusterStateProcessed(String s, ClusterState clusterState, ClusterState clusterState1) { 61 | latch.countDown(); 62 | } 63 | 64 | @Override 65 | public ClusterState execute(ClusterState clusterState) throws Exception { 66 | MetaData metaData = clusterState.metaData(); 67 | MetaData.Builder mdBuilder = MetaData.builder(clusterState.metaData()); 68 | SnapshotMetaData snapshots = metaData.custom(SnapshotMetaData.TYPE); 69 | SnapshotMetaData.Entry newSnapshot; 70 | assert (snapshots == null || snapshots.entries().isEmpty()); 71 | ImmutableList indices = ImmutableList.of("test-idx"); 72 | SnapshotMetaData.ShardSnapshotStatus shardSnapshotStatus = new SnapshotMetaData.ShardSnapshotStatus(clusterState.nodes().resolveNode(secondNode).id(), SnapshotMetaData.State.SUCCESS); 73 | ImmutableMap shards = ImmutableMap.of(new ShardId("test-idx", 0), shardSnapshotStatus); 74 | newSnapshot = new SnapshotMetaData.Entry(snapshotId, false, SnapshotMetaData.State.SUCCESS, indices, shards); 75 | snapshots = new SnapshotMetaData(newSnapshot); 76 | mdBuilder.putCustom(SnapshotMetaData.TYPE, snapshots); 77 | return ClusterState.builder(clusterState).metaData(mdBuilder).build(); 78 | } 79 | 80 | @Override 81 | public void onFailure(String s, Throwable throwable) { 82 | latch.countDown(); 83 | } 84 | }); 85 | assertTrue(latch.await(1, TimeUnit.SECONDS)); 86 | logger.info(client.admin().cluster().prepareState().get().getState().toString()); 87 | 88 | logger.info("--> create an index that will have some unallocated shards"); 89 | assertAcked(prepareCreate("test-idx-2", 2, settingsBuilder().put("number_of_shards", 6).put("number_of_replicas", 0))); 90 | ensureGreen(); 91 | 92 | logger.info("--> indexing some data into test-idx-some"); 93 | for (int i = 0; i < 100; i++) { 94 | index("test-idx-2", "doc", Integer.toString(i), "foo", "bar" + i); 95 | } 96 | refresh(); 97 | assertThat(client().prepareCount("test-idx-2").get().getCount(), equalTo(100L)); 98 | 99 | 100 | String clientNodeId = internalCluster().startNode(settingsBuilder().put(nodeSettings).put("node.client", true)); 101 | Node clientNode = internalCluster().getInstance(Node.class, clientNodeId); 102 | AbortedSnapshotCleaner abortedSnapshotCleaner = new AbortedSnapshotCleaner(clientNode, nodeSettings); 103 | abortedSnapshotCleaner.cleanSnapshots(); 104 | 105 | assertBusy(new Runnable() { 106 | @Override 107 | public void run() { 108 | // Try creating another snapshot 109 | try { 110 | CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-2").setWaitForCompletion(true).setIndices("test-idx-2").execute().actionGet(); 111 | assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(6)); 112 | assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(6)); 113 | } catch (ConcurrentSnapshotExecutionException ex) { 114 | logger.info("Snapshot is still running"); 115 | fail(); 116 | } 117 | } 118 | }); 119 | } 120 | } 121 | --------------------------------------------------------------------------------