├── src
├── main
│ ├── resources
│ │ └── es-plugin.properties
│ ├── assemblies
│ │ └── plugin.xml
│ └── java
│ │ └── org
│ │ └── elasticsearch
│ │ ├── cloud
│ │ └── aws
│ │ │ ├── AwsS3Service.java
│ │ │ ├── AwsSettingsFilter.java
│ │ │ ├── AwsModule.java
│ │ │ ├── node
│ │ │ └── Ec2CustomNodeAttributes.java
│ │ │ ├── blobstore
│ │ │ ├── S3ImmutableBlobContainer.java
│ │ │ ├── S3BlobStore.java
│ │ │ └── AbstractS3BlobContainer.java
│ │ │ ├── network
│ │ │ └── Ec2NameResolver.java
│ │ │ ├── AwsEc2Service.java
│ │ │ └── InternalAwsS3Service.java
│ │ ├── discovery
│ │ └── ec2
│ │ │ ├── Ec2DiscoveryModule.java
│ │ │ ├── Ec2Discovery.java
│ │ │ └── AwsEc2UnicastHostsProvider.java
│ │ ├── repositories
│ │ └── s3
│ │ │ ├── S3RepositoryModule.java
│ │ │ └── S3Repository.java
│ │ └── plugin
│ │ └── cloud
│ │ └── aws
│ │ └── CloudAwsPlugin.java
└── test
│ ├── java
│ └── org
│ │ └── elasticsearch
│ │ ├── repositories
│ │ └── s3
│ │ │ ├── S3SnapshotRestoreOverHttpTest.java
│ │ │ ├── S3SnapshotRestoreOverHttpsTest.java
│ │ │ └── AbstractS3SnapshotRestoreTest.java
│ │ ├── discovery
│ │ └── ec2
│ │ │ └── Ec2DiscoveryITest.java
│ │ └── cloud
│ │ └── aws
│ │ ├── TestAwsS3Service.java
│ │ ├── AbstractAwsTest.java
│ │ ├── TestAmazonS3.java
│ │ └── AmazonS3Wrapper.java
│ └── resources
│ └── log4j.xml
├── .gitignore
├── dev-tools
└── release.py
├── CONTRIBUTING.md
├── LICENSE.txt
├── pom.xml
└── README.md
/src/main/resources/es-plugin.properties:
--------------------------------------------------------------------------------
1 | plugin=org.elasticsearch.plugin.cloud.aws.CloudAwsPlugin
2 | version=${project.version}
3 |
4 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | /data
2 | /work
3 | /logs
4 | /.idea
5 | /target
6 | .DS_Store
7 | .local-execution-hints.log
8 | *.iml
9 | /.settings
10 | /.project
11 | /.classpath
12 | plugin_tools
13 |
--------------------------------------------------------------------------------
/src/main/assemblies/plugin.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | plugin
4 |
5 | zip
6 |
7 | false
8 |
9 |
10 | /
11 | true
12 | true
13 |
14 | org.elasticsearch:elasticsearch
15 |
16 |
17 |
18 | /
19 | true
20 | true
21 |
22 | com.amazonaws:aws-java-sdk
23 | commons-codec:commons-codec
24 |
25 |
26 |
27 |
28 |
--------------------------------------------------------------------------------
/src/main/java/org/elasticsearch/cloud/aws/AwsS3Service.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to Elasticsearch under one or more contributor
3 | * license agreements. See the NOTICE file distributed with
4 | * this work for additional information regarding copyright
5 | * ownership. Elasticsearch licenses this file to you under
6 | * the Apache License, Version 2.0 (the "License"); you may
7 | * not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing,
13 | * software distributed under the License is distributed on an
14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 | * KIND, either express or implied. See the License for the
16 | * specific language governing permissions and limitations
17 | * under the License.
18 | */
19 |
20 | package org.elasticsearch.cloud.aws;
21 |
22 | import com.amazonaws.services.s3.AmazonS3;
23 | import org.elasticsearch.common.component.LifecycleComponent;
24 |
25 | /**
26 | *
27 | */
28 | public interface AwsS3Service extends LifecycleComponent {
29 | AmazonS3 client();
30 |
31 | AmazonS3 client(String region, String account, String key);
32 | }
33 |
--------------------------------------------------------------------------------
/src/main/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryModule.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to Elasticsearch under one or more contributor
3 | * license agreements. See the NOTICE file distributed with
4 | * this work for additional information regarding copyright
5 | * ownership. Elasticsearch licenses this file to you under
6 | * the Apache License, Version 2.0 (the "License"); you may
7 | * not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing,
13 | * software distributed under the License is distributed on an
14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 | * KIND, either express or implied. See the License for the
16 | * specific language governing permissions and limitations
17 | * under the License.
18 | */
19 |
20 | package org.elasticsearch.discovery.ec2;
21 |
22 | import org.elasticsearch.discovery.Discovery;
23 | import org.elasticsearch.discovery.zen.ZenDiscoveryModule;
24 |
25 | /**
26 | *
27 | */
28 | public class Ec2DiscoveryModule extends ZenDiscoveryModule {
29 |
30 | @Override
31 | protected void bindDiscovery() {
32 | bind(Discovery.class).to(Ec2Discovery.class).asEagerSingleton();
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/src/main/java/org/elasticsearch/cloud/aws/AwsSettingsFilter.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to Elasticsearch under one or more contributor
3 | * license agreements. See the NOTICE file distributed with
4 | * this work for additional information regarding copyright
5 | * ownership. Elasticsearch licenses this file to you under
6 | * the Apache License, Version 2.0 (the "License"); you may
7 | * not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing,
13 | * software distributed under the License is distributed on an
14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 | * KIND, either express or implied. See the License for the
16 | * specific language governing permissions and limitations
17 | * under the License.
18 | */
19 |
20 | package org.elasticsearch.cloud.aws;
21 |
22 | import org.elasticsearch.common.settings.ImmutableSettings;
23 | import org.elasticsearch.common.settings.SettingsFilter;
24 |
25 | /**
26 | *
27 | */
28 | public class AwsSettingsFilter implements SettingsFilter.Filter {
29 |
30 | @Override
31 | public void filter(ImmutableSettings.Builder settings) {
32 | settings.remove("cloud.key");
33 | settings.remove("cloud.account");
34 | settings.remove("cloud.aws.access_key");
35 | settings.remove("cloud.aws.secret_key");
36 | }
37 | }
38 |
--------------------------------------------------------------------------------
/src/test/java/org/elasticsearch/repositories/s3/S3SnapshotRestoreOverHttpTest.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to Elasticsearch (the "Author") under one
3 | * or more contributor license agreements. See the NOTICE file
4 | * distributed with this work for additional information
5 | * regarding copyright ownership. Author licenses this
6 | * file to you under the Apache License, Version 2.0 (the
7 | * "License"); you may not use this file except in compliance
8 | * with the License. You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing,
13 | * software distributed under the License is distributed on an
14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 | * KIND, either express or implied. See the License for the
16 | * specific language governing permissions and limitations
17 | * under the License.
18 | */
19 |
20 | package org.elasticsearch.repositories.s3;
21 |
22 | import org.elasticsearch.common.settings.ImmutableSettings;
23 | import org.elasticsearch.common.settings.Settings;
24 |
25 | /**
26 | */
27 | public class S3SnapshotRestoreOverHttpTest extends AbstractS3SnapshotRestoreTest {
28 | @Override
29 | public Settings nodeSettings(int nodeOrdinal) {
30 | ImmutableSettings.Builder settings = ImmutableSettings.builder()
31 | .put(super.nodeSettings(nodeOrdinal))
32 | .put("cloud.aws.s3.protocol", "http");
33 | return settings.build();
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/src/test/java/org/elasticsearch/repositories/s3/S3SnapshotRestoreOverHttpsTest.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to Elasticsearch (the "Author") under one
3 | * or more contributor license agreements. See the NOTICE file
4 | * distributed with this work for additional information
5 | * regarding copyright ownership. Author licenses this
6 | * file to you under the Apache License, Version 2.0 (the
7 | * "License"); you may not use this file except in compliance
8 | * with the License. You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing,
13 | * software distributed under the License is distributed on an
14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 | * KIND, either express or implied. See the License for the
16 | * specific language governing permissions and limitations
17 | * under the License.
18 | */
19 |
20 | package org.elasticsearch.repositories.s3;
21 |
22 | import org.elasticsearch.common.settings.ImmutableSettings;
23 | import org.elasticsearch.common.settings.Settings;
24 |
25 | /**
26 | */
27 | public class S3SnapshotRestoreOverHttpsTest extends AbstractS3SnapshotRestoreTest {
28 | @Override
29 | public Settings nodeSettings(int nodeOrdinal) {
30 | ImmutableSettings.Builder settings = ImmutableSettings.builder()
31 | .put(super.nodeSettings(nodeOrdinal))
32 | .put("cloud.aws.s3.protocol", "https");
33 | return settings.build();
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryModule.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to ElasticSearch and Shay Banon under one
3 | * or more contributor license agreements. See the NOTICE file
4 | * distributed with this work for additional information
5 | * regarding copyright ownership. ElasticSearch licenses this
6 | * file to you under the Apache License, Version 2.0 (the
7 | * "License"); you may not use this file except in compliance
8 | * with the License. You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing,
13 | * software distributed under the License is distributed on an
14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 | * KIND, either express or implied. See the License for the
16 | * specific language governing permissions and limitations
17 | * under the License.
18 | */
19 |
20 | package org.elasticsearch.repositories.s3;
21 |
22 | import org.elasticsearch.common.inject.AbstractModule;
23 | import org.elasticsearch.index.snapshots.IndexShardRepository;
24 | import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardRepository;
25 | import org.elasticsearch.repositories.Repository;
26 |
27 | /**
28 | * S3 repository module
29 | */
30 | public class S3RepositoryModule extends AbstractModule {
31 |
32 | public S3RepositoryModule() {
33 | super();
34 | }
35 |
36 | /**
37 | * {@inheritDoc}
38 | */
39 | @Override
40 | protected void configure() {
41 | bind(Repository.class).to(S3Repository.class).asEagerSingleton();
42 | bind(IndexShardRepository.class).to(BlobStoreIndexShardRepository.class).asEagerSingleton();
43 | }
44 | }
45 |
46 |
--------------------------------------------------------------------------------
/src/main/java/org/elasticsearch/cloud/aws/AwsModule.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to Elasticsearch under one or more contributor
3 | * license agreements. See the NOTICE file distributed with
4 | * this work for additional information regarding copyright
5 | * ownership. Elasticsearch licenses this file to you under
6 | * the Apache License, Version 2.0 (the "License"); you may
7 | * not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing,
13 | * software distributed under the License is distributed on an
14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 | * KIND, either express or implied. See the License for the
16 | * specific language governing permissions and limitations
17 | * under the License.
18 | */
19 |
20 | package org.elasticsearch.cloud.aws;
21 |
22 | import org.elasticsearch.common.inject.AbstractModule;
23 | import org.elasticsearch.common.settings.Settings;
24 |
25 | /**
26 | *
27 | */
28 | public class AwsModule extends AbstractModule {
29 |
30 | private final Settings settings;
31 |
32 | public static final String S3_SERVICE_TYPE_KEY = "cloud.aws.s3service.type";
33 |
34 | public AwsModule(Settings settings) {
35 | this.settings = settings;
36 | }
37 |
38 | @Override
39 | protected void configure() {
40 | bind(AwsS3Service.class).to(getS3ServiceClass(settings)).asEagerSingleton();
41 | bind(AwsEc2Service.class).asEagerSingleton();
42 | }
43 |
44 | public static Class extends AwsS3Service> getS3ServiceClass(Settings settings) {
45 | return settings.getAsClass(S3_SERVICE_TYPE_KEY, InternalAwsS3Service.class);
46 | }
47 |
48 | }
--------------------------------------------------------------------------------
/src/test/resources/log4j.xml:
--------------------------------------------------------------------------------
1 |
2 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
--------------------------------------------------------------------------------
/src/test/java/org/elasticsearch/discovery/ec2/Ec2DiscoveryITest.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to Elasticsearch under one or more contributor
3 | * license agreements. See the NOTICE file distributed with
4 | * this work for additional information regarding copyright
5 | * ownership. Elasticsearch licenses this file to you under
6 | * the Apache License, Version 2.0 (the "License"); you may
7 | * not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing,
13 | * software distributed under the License is distributed on an
14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 | * KIND, either express or implied. See the License for the
16 | * specific language governing permissions and limitations
17 | * under the License.
18 | */
19 |
20 | package org.elasticsearch.discovery.ec2;
21 |
22 |
23 | import org.elasticsearch.cloud.aws.AbstractAwsTest;
24 | import org.elasticsearch.cloud.aws.AbstractAwsTest.AwsTest;
25 | import org.elasticsearch.common.settings.Settings;
26 | import org.elasticsearch.plugins.PluginsService;
27 | import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
28 | import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
29 | import org.junit.Test;
30 |
31 | import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
32 |
33 | /**
34 | * Just an empty Node Start test to check eveything if fine when
35 | * starting.
36 | * This test requires AWS to run.
37 | */
38 | @AwsTest
39 | @ClusterScope(scope = Scope.TEST, numDataNodes = 0, numClientNodes = 0, transportClientRatio = 0.0)
40 | public class Ec2DiscoveryITest extends AbstractAwsTest {
41 |
42 | @Test
43 | public void testStart() {
44 | Settings nodeSettings = settingsBuilder()
45 | .put("plugins." + PluginsService.LOAD_PLUGIN_FROM_CLASSPATH, true)
46 | .put("cloud.enabled", true)
47 | .put("discovery.type", "ec2")
48 | .build();
49 | internalCluster().startNode(nodeSettings);
50 | }
51 |
52 | }
53 |
--------------------------------------------------------------------------------
/src/test/java/org/elasticsearch/cloud/aws/TestAwsS3Service.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to Elasticsearch under one or more contributor
3 | * license agreements. See the NOTICE file distributed with
4 | * this work for additional information regarding copyright
5 | * ownership. Elasticsearch licenses this file to you under
6 | * the Apache License, Version 2.0 (the "License"); you may
7 | * not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing,
13 | * software distributed under the License is distributed on an
14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 | * KIND, either express or implied. See the License for the
16 | * specific language governing permissions and limitations
17 | * under the License.
18 | */
19 | package org.elasticsearch.cloud.aws;
20 |
21 | import com.amazonaws.services.s3.AmazonS3;
22 | import org.elasticsearch.ElasticsearchException;
23 | import org.elasticsearch.common.inject.Inject;
24 | import org.elasticsearch.common.settings.Settings;
25 | import org.elasticsearch.common.settings.SettingsFilter;
26 |
27 | import java.util.IdentityHashMap;
28 |
29 | /**
30 | *
31 | */
32 | public class TestAwsS3Service extends InternalAwsS3Service {
33 |
34 | IdentityHashMap clients = new IdentityHashMap();
35 |
36 | @Inject
37 | public TestAwsS3Service(Settings settings, SettingsFilter settingsFilter) {
38 | super(settings, settingsFilter);
39 | }
40 |
41 |
42 | @Override
43 | public synchronized AmazonS3 client() {
44 | return cachedWrapper(super.client());
45 | }
46 |
47 | @Override
48 | public synchronized AmazonS3 client(String region, String account, String key) {
49 | return cachedWrapper(super.client(region, account, key));
50 | }
51 |
52 | private AmazonS3 cachedWrapper(AmazonS3 client) {
53 | TestAmazonS3 wrapper = clients.get(client);
54 | if (wrapper == null) {
55 | wrapper = new TestAmazonS3(client, componentSettings);
56 | clients.put(client, wrapper);
57 | }
58 | return wrapper;
59 | }
60 |
61 | @Override
62 | protected synchronized void doClose() throws ElasticsearchException {
63 | super.doClose();
64 | clients.clear();
65 | }
66 |
67 |
68 | }
69 |
--------------------------------------------------------------------------------
/src/main/java/org/elasticsearch/plugin/cloud/aws/CloudAwsPlugin.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to Elasticsearch under one or more contributor
3 | * license agreements. See the NOTICE file distributed with
4 | * this work for additional information regarding copyright
5 | * ownership. Elasticsearch licenses this file to you under
6 | * the Apache License, Version 2.0 (the "License"); you may
7 | * not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing,
13 | * software distributed under the License is distributed on an
14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 | * KIND, either express or implied. See the License for the
16 | * specific language governing permissions and limitations
17 | * under the License.
18 | */
19 |
20 | package org.elasticsearch.plugin.cloud.aws;
21 |
22 | import org.elasticsearch.cloud.aws.AwsEc2Service;
23 | import org.elasticsearch.cloud.aws.AwsModule;
24 | import org.elasticsearch.cloud.aws.AwsS3Service;
25 | import org.elasticsearch.common.collect.Lists;
26 | import org.elasticsearch.common.component.LifecycleComponent;
27 | import org.elasticsearch.common.inject.Module;
28 | import org.elasticsearch.common.settings.Settings;
29 | import org.elasticsearch.plugins.AbstractPlugin;
30 | import org.elasticsearch.repositories.RepositoriesModule;
31 | import org.elasticsearch.repositories.s3.S3Repository;
32 | import org.elasticsearch.repositories.s3.S3RepositoryModule;
33 |
34 | import java.util.Collection;
35 |
36 | /**
37 | *
38 | */
39 | public class CloudAwsPlugin extends AbstractPlugin {
40 |
41 | private final Settings settings;
42 |
43 | public CloudAwsPlugin(Settings settings) {
44 | this.settings = settings;
45 | }
46 |
47 | @Override
48 | public String name() {
49 | return "cloud-aws";
50 | }
51 |
52 | @Override
53 | public String description() {
54 | return "Cloud AWS Plugin";
55 | }
56 |
57 | @Override
58 | public Collection modules(Settings settings) {
59 | Collection modules = Lists.newArrayList();
60 | if (settings.getAsBoolean("cloud.enabled", true)) {
61 | modules.add(new AwsModule(settings));
62 | }
63 | return modules;
64 | }
65 |
66 | @Override
67 | public Collection> services() {
68 | Collection> services = Lists.newArrayList();
69 | if (settings.getAsBoolean("cloud.enabled", true)) {
70 | services.add(AwsModule.getS3ServiceClass(settings));
71 | services.add(AwsEc2Service.class);
72 | }
73 | return services;
74 | }
75 |
76 | public void onModule(RepositoriesModule repositoriesModule) {
77 | if (settings.getAsBoolean("cloud.enabled", true)) {
78 | repositoriesModule.registerRepository(S3Repository.TYPE, S3RepositoryModule.class);
79 | }
80 | }
81 | }
82 |
--------------------------------------------------------------------------------
/src/main/java/org/elasticsearch/cloud/aws/node/Ec2CustomNodeAttributes.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to Elasticsearch under one or more contributor
3 | * license agreements. See the NOTICE file distributed with
4 | * this work for additional information regarding copyright
5 | * ownership. Elasticsearch licenses this file to you under
6 | * the Apache License, Version 2.0 (the "License"); you may
7 | * not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing,
13 | * software distributed under the License is distributed on an
14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 | * KIND, either express or implied. See the License for the
16 | * specific language governing permissions and limitations
17 | * under the License.
18 | */
19 |
20 | package org.elasticsearch.cloud.aws.node;
21 |
22 | import org.apache.lucene.util.IOUtils;
23 | import org.elasticsearch.ExceptionsHelper;
24 | import org.elasticsearch.cloud.aws.AwsEc2Service;
25 | import org.elasticsearch.cluster.node.DiscoveryNodeService;
26 | import org.elasticsearch.common.collect.Maps;
27 | import org.elasticsearch.common.component.AbstractComponent;
28 | import org.elasticsearch.common.settings.Settings;
29 |
30 | import java.io.BufferedReader;
31 | import java.io.IOException;
32 | import java.io.InputStream;
33 | import java.io.InputStreamReader;
34 | import java.net.URL;
35 | import java.net.URLConnection;
36 | import java.util.Map;
37 |
38 | /**
39 | */
40 | public class Ec2CustomNodeAttributes extends AbstractComponent implements DiscoveryNodeService.CustomAttributesProvider {
41 |
42 | public Ec2CustomNodeAttributes(Settings settings) {
43 | super(settings);
44 | }
45 |
46 | @Override
47 | public Map buildAttributes() {
48 | if (!settings.getAsBoolean("cloud.node.auto_attributes", false)) {
49 | return null;
50 | }
51 | Map ec2Attributes = Maps.newHashMap();
52 |
53 | URLConnection urlConnection;
54 | InputStream in = null;
55 | try {
56 | URL url = new URL(AwsEc2Service.EC2_METADATA_URL + "placement/availability-zone");
57 | logger.debug("obtaining ec2 [placement/availability-zone] from ec2 meta-data url {}", url);
58 | urlConnection = url.openConnection();
59 | urlConnection.setConnectTimeout(2000);
60 | in = urlConnection.getInputStream();
61 | BufferedReader urlReader = new BufferedReader(new InputStreamReader(in));
62 |
63 | String metadataResult = urlReader.readLine();
64 | if (metadataResult == null || metadataResult.length() == 0) {
65 | logger.error("no ec2 metadata returned from {}", url);
66 | return null;
67 | }
68 | ec2Attributes.put("aws_availability_zone", metadataResult);
69 | } catch (IOException e) {
70 | logger.debug("failed to get metadata for [placement/availability-zone]: " + ExceptionsHelper.detailedMessage(e));
71 | } finally {
72 | IOUtils.closeWhileHandlingException(in);
73 | }
74 |
75 | return ec2Attributes;
76 | }
77 | }
78 |
--------------------------------------------------------------------------------
/src/test/java/org/elasticsearch/cloud/aws/AbstractAwsTest.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to Elasticsearch (the "Author") under one
3 | * or more contributor license agreements. See the NOTICE file
4 | * distributed with this work for additional information
5 | * regarding copyright ownership. Author licenses this
6 | * file to you under the Apache License, Version 2.0 (the
7 | * "License"); you may not use this file except in compliance
8 | * with the License. You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing,
13 | * software distributed under the License is distributed on an
14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 | * KIND, either express or implied. See the License for the
16 | * specific language governing permissions and limitations
17 | * under the License.
18 | */
19 |
20 | package org.elasticsearch.cloud.aws;
21 |
22 | import com.carrotsearch.randomizedtesting.annotations.TestGroup;
23 | import org.elasticsearch.common.Strings;
24 | import org.elasticsearch.common.settings.ImmutableSettings;
25 | import org.elasticsearch.common.settings.Settings;
26 | import org.elasticsearch.env.Environment;
27 | import org.elasticsearch.env.FailedToResolveConfigException;
28 | import org.elasticsearch.plugins.PluginsService;
29 | import org.elasticsearch.test.ElasticsearchIntegrationTest;
30 |
31 | import java.lang.annotation.Documented;
32 | import java.lang.annotation.Inherited;
33 | import java.lang.annotation.Retention;
34 | import java.lang.annotation.RetentionPolicy;
35 |
36 | /**
37 | *
38 | */
39 | public abstract class AbstractAwsTest extends ElasticsearchIntegrationTest {
40 |
41 | /**
42 | * Annotation for tests that require AWS to run. AWS tests are disabled by default.
43 | * Look at README file for details on how to run tests
44 | */
45 | @Documented
46 | @Inherited
47 | @Retention(RetentionPolicy.RUNTIME)
48 | @TestGroup(enabled = false, sysProperty = SYSPROP_AWS)
49 | public @interface AwsTest {
50 | }
51 |
52 | /**
53 | */
54 | public static final String SYSPROP_AWS = "tests.aws";
55 |
56 | @Override
57 | protected Settings nodeSettings(int nodeOrdinal) {
58 | ImmutableSettings.Builder settings = ImmutableSettings.builder()
59 | .put(super.nodeSettings(nodeOrdinal))
60 | .put("plugins." + PluginsService.LOAD_PLUGIN_FROM_CLASSPATH, true)
61 | .put(AwsModule.S3_SERVICE_TYPE_KEY, TestAwsS3Service.class)
62 | .put("cloud.aws.test.random", randomInt())
63 | .put("cloud.aws.test.write_failures", 0.1);
64 |
65 | Environment environment = new Environment();
66 |
67 | // if explicit, just load it and don't load from env
68 | try {
69 | if (Strings.hasText(System.getProperty("tests.config"))) {
70 | settings.loadFromUrl(environment.resolveConfig(System.getProperty("tests.config")));
71 | } else {
72 | fail("to run integration tests, you need to set -Dtest.aws=true and -Dtests.config=/path/to/elasticsearch.yml");
73 | }
74 | } catch (FailedToResolveConfigException exception) {
75 | fail("your test configuration file is incorrect: " + System.getProperty("tests.config"));
76 | }
77 | return settings.build();
78 | }
79 | }
80 |
--------------------------------------------------------------------------------
/src/main/java/org/elasticsearch/discovery/ec2/Ec2Discovery.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to Elasticsearch under one or more contributor
3 | * license agreements. See the NOTICE file distributed with
4 | * this work for additional information regarding copyright
5 | * ownership. Elasticsearch licenses this file to you under
6 | * the Apache License, Version 2.0 (the "License"); you may
7 | * not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing,
13 | * software distributed under the License is distributed on an
14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 | * KIND, either express or implied. See the License for the
16 | * specific language governing permissions and limitations
17 | * under the License.
18 | */
19 |
20 | package org.elasticsearch.discovery.ec2;
21 |
22 | import org.elasticsearch.Version;
23 | import org.elasticsearch.cloud.aws.AwsEc2Service;
24 | import org.elasticsearch.cluster.ClusterName;
25 | import org.elasticsearch.cluster.ClusterService;
26 | import org.elasticsearch.cluster.node.DiscoveryNodeService;
27 | import org.elasticsearch.common.collect.ImmutableList;
28 | import org.elasticsearch.common.inject.Inject;
29 | import org.elasticsearch.common.settings.Settings;
30 | import org.elasticsearch.discovery.DiscoverySettings;
31 | import org.elasticsearch.discovery.zen.ZenDiscovery;
32 | import org.elasticsearch.discovery.zen.elect.ElectMasterService;
33 | import org.elasticsearch.discovery.zen.ping.ZenPing;
34 | import org.elasticsearch.discovery.zen.ping.ZenPingService;
35 | import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing;
36 | import org.elasticsearch.node.settings.NodeSettingsService;
37 | import org.elasticsearch.threadpool.ThreadPool;
38 | import org.elasticsearch.transport.TransportService;
39 |
40 | /**
41 | *
42 | */
43 | public class Ec2Discovery extends ZenDiscovery {
44 |
45 | @Inject
46 | public Ec2Discovery(Settings settings, ClusterName clusterName, ThreadPool threadPool, TransportService transportService,
47 | ClusterService clusterService, NodeSettingsService nodeSettingsService, ZenPingService pingService,
48 | DiscoveryNodeService discoveryNodeService, AwsEc2Service ec2Service, DiscoverySettings discoverySettings,
49 | ElectMasterService electMasterService) {
50 | super(settings, clusterName, threadPool, transportService, clusterService, nodeSettingsService,
51 | discoveryNodeService, pingService, electMasterService, Version.CURRENT, discoverySettings);
52 | if (settings.getAsBoolean("cloud.enabled", true)) {
53 | ImmutableList extends ZenPing> zenPings = pingService.zenPings();
54 | UnicastZenPing unicastZenPing = null;
55 | for (ZenPing zenPing : zenPings) {
56 | if (zenPing instanceof UnicastZenPing) {
57 | unicastZenPing = (UnicastZenPing) zenPing;
58 | break;
59 | }
60 | }
61 |
62 | if (unicastZenPing != null) {
63 | // update the unicast zen ping to add cloud hosts provider
64 | // and, while we are at it, use only it and not the multicast for example
65 | unicastZenPing.addHostsProvider(new AwsEc2UnicastHostsProvider(settings, transportService, ec2Service.client()));
66 | pingService.zenPings(ImmutableList.of(unicastZenPing));
67 | } else {
68 | logger.warn("failed to apply ec2 unicast discovery, no unicast ping found");
69 | }
70 | }
71 | }
72 | }
73 |
--------------------------------------------------------------------------------
/src/main/java/org/elasticsearch/cloud/aws/blobstore/S3ImmutableBlobContainer.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to Elasticsearch under one or more contributor
3 | * license agreements. See the NOTICE file distributed with
4 | * this work for additional information regarding copyright
5 | * ownership. Elasticsearch licenses this file to you under
6 | * the Apache License, Version 2.0 (the "License"); you may
7 | * not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing,
13 | * software distributed under the License is distributed on an
14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 | * KIND, either express or implied. See the License for the
16 | * specific language governing permissions and limitations
17 | * under the License.
18 | */
19 |
20 | package org.elasticsearch.cloud.aws.blobstore;
21 |
22 | import com.amazonaws.services.s3.model.AmazonS3Exception;
23 | import com.amazonaws.services.s3.model.ObjectMetadata;
24 | import org.elasticsearch.common.blobstore.BlobPath;
25 | import org.elasticsearch.common.blobstore.ImmutableBlobContainer;
26 | import org.elasticsearch.common.blobstore.support.BlobStores;
27 |
28 | import java.io.IOException;
29 | import java.io.InputStream;
30 |
31 | /**
32 | *
33 | */
34 | public class S3ImmutableBlobContainer extends AbstractS3BlobContainer implements ImmutableBlobContainer {
35 |
36 | public S3ImmutableBlobContainer(BlobPath path, S3BlobStore blobStore) {
37 | super(path, blobStore);
38 | }
39 |
40 | @Override
41 | public void writeBlob(final String blobName, final InputStream is, final long sizeInBytes, final WriterListener listener) {
42 | blobStore.executor().execute(new Runnable() {
43 | @Override
44 | public void run() {
45 | int retry = 0;
46 | // Read limit is ignored by InputStreamIndexInput, but we will set it anyway in case
47 | // implementation will change
48 | is.mark(Integer.MAX_VALUE);
49 | while (true) {
50 | try {
51 | ObjectMetadata md = new ObjectMetadata();
52 | if (blobStore.serverSideEncryption()) {
53 | md.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
54 | }
55 | md.setContentLength(sizeInBytes);
56 | blobStore.client().putObject(blobStore.bucket(), buildKey(blobName), is, md);
57 | listener.onCompleted();
58 | return;
59 | } catch (AmazonS3Exception e) {
60 | if (shouldRetry(e) && retry < blobStore.numberOfRetries()) {
61 | try {
62 | is.reset();
63 | } catch (IOException ex) {
64 | listener.onFailure(e);
65 | return;
66 | }
67 | retry++;
68 | } else {
69 | listener.onFailure(e);
70 | return;
71 | }
72 | } catch (Throwable e) {
73 | listener.onFailure(e);
74 | return;
75 | }
76 | }
77 | }
78 | });
79 | }
80 |
81 | @Override
82 | public void writeBlob(String blobName, InputStream is, long sizeInBytes) throws IOException {
83 | BlobStores.syncWriteBlob(this, blobName, is, sizeInBytes);
84 | }
85 | }
86 |
--------------------------------------------------------------------------------
/src/test/java/org/elasticsearch/cloud/aws/TestAmazonS3.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to Elasticsearch (the "Author") under one
3 | * or more contributor license agreements. See the NOTICE file
4 | * distributed with this work for additional information
5 | * regarding copyright ownership. Author licenses this
6 | * file to you under the Apache License, Version 2.0 (the
7 | * "License"); you may not use this file except in compliance
8 | * with the License. You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing,
13 | * software distributed under the License is distributed on an
14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 | * KIND, either express or implied. See the License for the
16 | * specific language governing permissions and limitations
17 | * under the License.
18 | */
19 |
20 | package org.elasticsearch.cloud.aws;
21 |
22 | import com.amazonaws.AmazonClientException;
23 | import com.amazonaws.AmazonServiceException;
24 | import com.amazonaws.services.s3.AmazonS3;
25 | import com.amazonaws.services.s3.model.AmazonS3Exception;
26 | import com.amazonaws.services.s3.model.ObjectMetadata;
27 | import com.amazonaws.services.s3.model.PutObjectResult;
28 | import org.elasticsearch.ElasticsearchException;
29 | import org.elasticsearch.common.settings.Settings;
30 |
31 | import java.io.IOException;
32 | import java.io.InputStream;
33 | import java.io.UnsupportedEncodingException;
34 | import java.security.MessageDigest;
35 | import java.security.NoSuchAlgorithmException;
36 | import java.util.concurrent.ConcurrentHashMap;
37 | import java.util.concurrent.ConcurrentMap;
38 | import java.util.concurrent.atomic.AtomicLong;
39 |
40 | import static com.carrotsearch.randomizedtesting.RandomizedTest.randomDouble;
41 |
42 | /**
43 | *
44 | */
45 | public class TestAmazonS3 extends AmazonS3Wrapper {
46 |
47 | private double writeFailureRate = 0.0;
48 |
49 | private String randomPrefix;
50 |
51 | ConcurrentMap accessCounts = new ConcurrentHashMap();
52 |
53 | private long incrementAndGet(String path) {
54 | AtomicLong value = accessCounts.get(path);
55 | if (value == null) {
56 | value = accessCounts.putIfAbsent(path, new AtomicLong(1));
57 | }
58 | if (value != null) {
59 | return value.incrementAndGet();
60 | }
61 | return 1;
62 | }
63 |
64 | public TestAmazonS3(AmazonS3 delegate, Settings componentSettings) {
65 | super(delegate);
66 | randomPrefix = componentSettings.get("test.random");
67 | writeFailureRate = componentSettings.getAsDouble("test.write_failures", 0.0);
68 | }
69 |
70 | @Override
71 | public PutObjectResult putObject(String bucketName, String key, InputStream input, ObjectMetadata metadata) throws AmazonClientException, AmazonServiceException {
72 | if (shouldFail(bucketName, key, writeFailureRate)) {
73 | long length = metadata.getContentLength();
74 | long partToRead = (long) (length * randomDouble());
75 | byte[] buffer = new byte[1024];
76 | for (long cur = 0; cur < partToRead; cur += buffer.length) {
77 | try {
78 | input.read(buffer, 0, (int) (partToRead - cur > buffer.length ? buffer.length : partToRead - cur));
79 | } catch (IOException ex) {
80 | throw new ElasticsearchException("cannot read input stream", ex);
81 | }
82 | }
83 | AmazonS3Exception ex = new AmazonS3Exception("Random S3 exception");
84 | ex.setStatusCode(400);
85 | ex.setErrorCode("RequestTimeout");
86 | throw ex;
87 | } else {
88 | return super.putObject(bucketName, key, input, metadata);
89 | }
90 | }
91 |
92 | private boolean shouldFail(String bucketName, String key, double probability) {
93 | if (probability > 0.0) {
94 | String path = randomPrefix + "-" + bucketName + "+" + key;
95 | path += "/" + incrementAndGet(path);
96 | return Math.abs(hashCode(path)) < Integer.MAX_VALUE * probability;
97 | } else {
98 | return false;
99 | }
100 | }
101 |
102 | private int hashCode(String path) {
103 | try {
104 | MessageDigest digest = MessageDigest.getInstance("MD5");
105 | byte[] bytes = digest.digest(path.getBytes("UTF-8"));
106 | int i = 0;
107 | return ((bytes[i++] & 0xFF) << 24) | ((bytes[i++] & 0xFF) << 16)
108 | | ((bytes[i++] & 0xFF) << 8) | (bytes[i++] & 0xFF);
109 | } catch (UnsupportedEncodingException ex) {
110 | throw new ElasticsearchException("cannot calculate hashcode", ex);
111 | } catch (NoSuchAlgorithmException ex) {
112 | throw new ElasticsearchException("cannot calculate hashcode", ex);
113 | }
114 | }
115 | }
116 |
--------------------------------------------------------------------------------
/dev-tools/release.py:
--------------------------------------------------------------------------------
1 | # Licensed to Elasticsearch under one or more contributor
2 | # license agreements. See the NOTICE file distributed with
3 | # this work for additional information regarding copyright
4 | # ownership. Elasticsearch licenses this file to you under
5 | # the Apache License, Version 2.0 (the "License"); you may
6 | # not use this file except in compliance with the License.
7 | # You may obtain a copy of the License at
8 | #
9 | # http://www.apache.org/licenses/LICENSE-2.0
10 | #
11 | # Unless required by applicable law or agreed to in writing,
12 | # software distributed under the License is distributed on
13 | # an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
14 | # either express or implied. See the License for the specific
15 | # language governing permissions and limitations under the License.
16 |
17 | import datetime
18 | import os
19 | import shutil
20 | import sys
21 | import time
22 | import urllib
23 | import urllib.request
24 | import zipfile
25 |
26 | from os.path import dirname, abspath
27 |
28 | """
29 | This tool builds a release from the a given elasticsearch plugin branch.
30 |
31 | It is basically a wrapper on top of launch_release.py which:
32 |
33 | - tries to get a more recent version of launch_release.py in ...
34 | - download it if needed
35 | - launch it passing all arguments to it, like:
36 |
37 | $ python3 dev_tools/release.py --branch master --publish --remote origin
38 |
39 | Important options:
40 |
41 | # Dry run
42 | $ python3 dev_tools/release.py
43 |
44 | # Dry run without tests
45 | python3 dev_tools/release.py --skiptests
46 |
47 | # Release, publish artifacts and announce
48 | $ python3 dev_tools/release.py --publish
49 |
50 | See full documentation in launch_release.py
51 | """
52 | env = os.environ
53 |
54 | # Change this if the source repository for your scripts is at a different location
55 | SOURCE_REPO = 'elasticsearch/elasticsearch-plugins-script'
56 | # We define that we should download again the script after 1 days
57 | SCRIPT_OBSOLETE_DAYS = 1
58 | # We ignore in master.zip file the following files
59 | IGNORED_FILES = ['.gitignore', 'README.md']
60 |
61 |
62 | ROOT_DIR = abspath(os.path.join(abspath(dirname(__file__)), '../'))
63 | TARGET_TOOLS_DIR = ROOT_DIR + '/plugin_tools'
64 | DEV_TOOLS_DIR = ROOT_DIR + '/dev-tools'
65 | BUILD_RELEASE_FILENAME = 'release.zip'
66 | BUILD_RELEASE_FILE = TARGET_TOOLS_DIR + '/' + BUILD_RELEASE_FILENAME
67 | SOURCE_URL = 'https://github.com/%s/archive/master.zip' % SOURCE_REPO
68 |
69 | # Download a recent version of the release plugin tool
70 | try:
71 | os.mkdir(TARGET_TOOLS_DIR)
72 | print('directory %s created' % TARGET_TOOLS_DIR)
73 | except FileExistsError:
74 | pass
75 |
76 |
77 | try:
78 | # we check latest update. If we ran an update recently, we
79 | # are not going to check it again
80 | download = True
81 |
82 | try:
83 | last_download_time = datetime.datetime.fromtimestamp(os.path.getmtime(BUILD_RELEASE_FILE))
84 | if (datetime.datetime.now()-last_download_time).days < SCRIPT_OBSOLETE_DAYS:
85 | download = False
86 | except FileNotFoundError:
87 | pass
88 |
89 | if download:
90 | urllib.request.urlretrieve(SOURCE_URL, BUILD_RELEASE_FILE)
91 | with zipfile.ZipFile(BUILD_RELEASE_FILE) as myzip:
92 | for member in myzip.infolist():
93 | filename = os.path.basename(member.filename)
94 | # skip directories
95 | if not filename:
96 | continue
97 | if filename in IGNORED_FILES:
98 | continue
99 |
100 | # copy file (taken from zipfile's extract)
101 | source = myzip.open(member.filename)
102 | target = open(os.path.join(TARGET_TOOLS_DIR, filename), "wb")
103 | with source, target:
104 | shutil.copyfileobj(source, target)
105 | # We keep the original date
106 | date_time = time.mktime(member.date_time + (0, 0, -1))
107 | os.utime(os.path.join(TARGET_TOOLS_DIR, filename), (date_time, date_time))
108 | print('plugin-tools updated from %s' % SOURCE_URL)
109 | except urllib.error.HTTPError:
110 | pass
111 |
112 |
113 | # Let see if we need to update the release.py script itself
114 | source_time = os.path.getmtime(TARGET_TOOLS_DIR + '/release.py')
115 | repo_time = os.path.getmtime(DEV_TOOLS_DIR + '/release.py')
116 | if source_time > repo_time:
117 | input('release.py needs an update. Press a key to update it...')
118 | shutil.copyfile(TARGET_TOOLS_DIR + '/release.py', DEV_TOOLS_DIR + '/release.py')
119 |
120 | # We can launch the build process
121 | try:
122 | PYTHON = 'python'
123 | # make sure python3 is used if python3 is available
124 | # some systems use python 2 as default
125 | os.system('python3 --version > /dev/null 2>&1')
126 | PYTHON = 'python3'
127 | except RuntimeError:
128 | pass
129 |
130 | release_args = ''
131 | for x in range(1, len(sys.argv)):
132 | release_args += ' ' + sys.argv[x]
133 |
134 | os.system('%s %s/build_release.py %s' % (PYTHON, TARGET_TOOLS_DIR, release_args))
135 |
--------------------------------------------------------------------------------
/src/main/java/org/elasticsearch/cloud/aws/network/Ec2NameResolver.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to Elasticsearch under one or more contributor
3 | * license agreements. See the NOTICE file distributed with
4 | * this work for additional information regarding copyright
5 | * ownership. Elasticsearch licenses this file to you under
6 | * the Apache License, Version 2.0 (the "License"); you may
7 | * not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing,
13 | * software distributed under the License is distributed on an
14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 | * KIND, either express or implied. See the License for the
16 | * specific language governing permissions and limitations
17 | * under the License.
18 | */
19 |
20 | package org.elasticsearch.cloud.aws.network;
21 |
22 | import org.apache.lucene.util.IOUtils;
23 | import org.elasticsearch.ExceptionsHelper;
24 | import org.elasticsearch.cloud.aws.AwsEc2Service;
25 | import org.elasticsearch.common.component.AbstractComponent;
26 | import org.elasticsearch.common.network.NetworkService.CustomNameResolver;
27 | import org.elasticsearch.common.settings.Settings;
28 |
29 | import java.io.BufferedReader;
30 | import java.io.IOException;
31 | import java.io.InputStream;
32 | import java.io.InputStreamReader;
33 | import java.net.InetAddress;
34 | import java.net.URL;
35 | import java.net.URLConnection;
36 |
37 | /**
38 | * Resolves certain ec2 related 'meta' hostnames into an actual hostname
39 | * obtained from ec2 meta-data.
40 | *
41 | * Valid config values for {@link Ec2HostnameType}s are -
42 | *
43 | *
_ec2_ - maps to privateIpv4
44 | *
_ec2:privateIp_ - maps to privateIpv4
45 | *
_ec2:privateIpv4_
46 | *
_ec2:privateDns_
47 | *
_ec2:publicIp_ - maps to publicIpv4
48 | *
_ec2:publicIpv4_
49 | *
_ec2:publicDns_
50 | *
51 | *
52 | * @author Paul_Loy (keteracel)
53 | */
54 | public class Ec2NameResolver extends AbstractComponent implements CustomNameResolver {
55 |
56 | /**
57 | * enum that can be added to over time with more meta-data types (such as ipv6 when this is available)
58 | *
59 | * @author Paul_Loy
60 | */
61 | private static enum Ec2HostnameType {
62 |
63 | PRIVATE_IPv4("ec2:privateIpv4", "local-ipv4"),
64 | PRIVATE_DNS("ec2:privateDns", "local-hostname"),
65 | PUBLIC_IPv4("ec2:publicIpv4", "public-ipv4"),
66 | PUBLIC_DNS("ec2:publicDns", "public-hostname"),
67 |
68 | // some less verbose defaults
69 | PUBLIC_IP("ec2:publicIp", PUBLIC_IPv4.ec2Name),
70 | PRIVATE_IP("ec2:privateIp", PRIVATE_IPv4.ec2Name),
71 | EC2("ec2", PRIVATE_IPv4.ec2Name);
72 |
73 | final String configName;
74 | final String ec2Name;
75 |
76 | private Ec2HostnameType(String configName, String ec2Name) {
77 | this.configName = configName;
78 | this.ec2Name = ec2Name;
79 | }
80 | }
81 |
82 | /**
83 | * Construct a {@link CustomNameResolver}.
84 | */
85 | public Ec2NameResolver(Settings settings) {
86 | super(settings);
87 | }
88 |
89 | /**
90 | * @param type the ec2 hostname type to discover.
91 | * @return the appropriate host resolved from ec2 meta-data.
92 | * @throws IOException if ec2 meta-data cannot be obtained.
93 | * @see CustomNameResolver#resolveIfPossible(String)
94 | */
95 | public InetAddress resolve(Ec2HostnameType type, boolean warnOnFailure) {
96 | URLConnection urlConnection = null;
97 | InputStream in = null;
98 | try {
99 | URL url = new URL(AwsEc2Service.EC2_METADATA_URL + type.ec2Name);
100 | logger.debug("obtaining ec2 hostname from ec2 meta-data url {}", url);
101 | urlConnection = url.openConnection();
102 | urlConnection.setConnectTimeout(2000);
103 | in = urlConnection.getInputStream();
104 | BufferedReader urlReader = new BufferedReader(new InputStreamReader(in));
105 |
106 | String metadataResult = urlReader.readLine();
107 | if (metadataResult == null || metadataResult.length() == 0) {
108 | logger.error("no ec2 metadata returned from {}", url);
109 | return null;
110 | }
111 | return InetAddress.getByName(metadataResult);
112 | } catch (IOException e) {
113 | if (warnOnFailure) {
114 | logger.warn("failed to get metadata for [" + type.configName + "]: " + ExceptionsHelper.detailedMessage(e));
115 | } else {
116 | logger.debug("failed to get metadata for [" + type.configName + "]: " + ExceptionsHelper.detailedMessage(e));
117 | }
118 | return null;
119 | } finally {
120 | IOUtils.closeWhileHandlingException(in);
121 | }
122 | }
123 |
124 | @Override
125 | public InetAddress resolveDefault() {
126 | return null; // using this, one has to explicitly specify _ec2_ in network setting
127 | // return resolve(Ec2HostnameType.DEFAULT, false);
128 | }
129 |
130 | @Override
131 | public InetAddress resolveIfPossible(String value) {
132 | for (Ec2HostnameType type : Ec2HostnameType.values()) {
133 | if (type.configName.equals(value)) {
134 | return resolve(type, true);
135 | }
136 | }
137 | return null;
138 | }
139 |
140 | }
141 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | Contributing to elasticsearch
2 | =============================
3 |
4 | Elasticsearch is an open source project and we love to receive contributions from our community — you! There are many ways to contribute, from writing tutorials or blog posts, improving the documentation, submitting bug reports and feature requests or writing code which can be incorporated into Elasticsearch itself.
5 |
6 | Bug reports
7 | -----------
8 |
9 | If you think you have found a bug in Elasticsearch, first make sure that you are testing against the [latest version of Elasticsearch](http://www.elasticsearch.org/download/) - your issue may already have been fixed. If not, search our [issues list](https://github.com/elasticsearch/elasticsearch/issues) on GitHub in case a similar issue has already been opened.
10 |
11 | It is very helpful if you can prepare a reproduction of the bug. In other words, provide a small test case which we can run to confirm your bug. It makes it easier to find the problem and to fix it. Test cases should be provided as `curl` commands which we can copy and paste into a terminal to run it locally, for example:
12 |
13 | ```sh
14 | # delete the index
15 | curl -XDELETE localhost:9200/test
16 |
17 | # insert a document
18 | curl -XPUT localhost:9200/test/test/1 -d '{
19 | "title": "test document"
20 | }'
21 |
22 | # this should return XXXX but instead returns YYY
23 | curl ....
24 | ```
25 |
26 | Provide as much information as you can. You may think that the problem lies with your query, when actually it depends on how your data is indexed. The easier it is for us to recreate your problem, the faster it is likely to be fixed.
27 |
28 | Feature requests
29 | ----------------
30 |
31 | If you find yourself wishing for a feature that doesn't exist in Elasticsearch, you are probably not alone. There are bound to be others out there with similar needs. Many of the features that Elasticsearch has today have been added because our users saw the need.
32 | Open an issue on our [issues list](https://github.com/elasticsearch/elasticsearch/issues) on GitHub which describes the feature you would like to see, why you need it, and how it should work.
33 |
34 | Contributing code and documentation changes
35 | -------------------------------------------
36 |
37 | If you have a bugfix or new feature that you would like to contribute to Elasticsearch, please find or open an issue about it first. Talk about what you would like to do. It may be that somebody is already working on it, or that there are particular issues that you should know about before implementing the change.
38 |
39 | We enjoy working with contributors to get their code accepted. There are many approaches to fixing a problem and it is important to find the best approach before writing too much code.
40 |
41 | The process for contributing to any of the [Elasticsearch repositories](https://github.com/elasticsearch/) is similar. Details for individual projects can be found below.
42 |
43 | ### Fork and clone the repository
44 |
45 | You will need to fork the main Elasticsearch code or documentation repository and clone it to your local machine. See
46 | [github help page](https://help.github.com/articles/fork-a-repo) for help.
47 |
48 | Further instructions for specific projects are given below.
49 |
50 | ### Submitting your changes
51 |
52 | Once your changes and tests are ready to submit for review:
53 |
54 | 1. Test your changes
55 | Run the test suite to make sure that nothing is broken.
56 |
57 | 2. Sign the Contributor License Agreement
58 | Please make sure you have signed our [Contributor License Agreement](http://www.elasticsearch.org/contributor-agreement/). We are not asking you to assign copyright to us, but to give us the right to distribute your code without restriction. We ask this of all contributors in order to assure our users of the origin and continuing existence of the code. You only need to sign the CLA once.
59 |
60 | 3. Rebase your changes
61 | Update your local repository with the most recent code from the main Elasticsearch repository, and rebase your branch on top of the latest master branch. We prefer your changes to be squashed into a single commit.
62 |
63 | 4. Submit a pull request
64 | Push your local changes to your forked copy of the repository and [submit a pull request](https://help.github.com/articles/using-pull-requests). In the pull request, describe what your changes do and mention the number of the issue where discussion has taken place, eg "Closes #123".
65 |
66 | Then sit back and wait. There will probably be discussion about the pull request and, if any changes are needed, we would love to work with you to get your pull request merged into Elasticsearch.
67 |
68 |
69 | Contributing to the Elasticsearch plugin
70 | ----------------------------------------
71 |
72 | **Repository:** [https://github.com/elasticsearch/elasticsearch-cloud-aws](https://github.com/elasticsearch/elasticsearch-cloud-aws)
73 |
74 | Make sure you have [Maven](http://maven.apache.org) installed, as Elasticsearch uses it as its build system. Integration with IntelliJ and Eclipse should work out of the box. Eclipse users can automatically configure their IDE by running `mvn eclipse:eclipse` and then importing the project into their workspace: `File > Import > Existing project into workspace`.
75 |
76 | Please follow these formatting guidelines:
77 |
78 | * Java indent is 4 spaces
79 | * Line width is 140 characters
80 | * The rest is left to Java coding standards
81 | * Disable “auto-format on save” to prevent unnecessary format changes. This makes reviews much harder as it generates unnecessary formatting changes. If your IDE supports formatting only modified chunks that is fine to do.
82 |
83 | To create a distribution from the source, simply run:
84 |
85 | ```sh
86 | cd elasticsearch-cloud-aws/
87 | mvn clean package -DskipTests
88 | ```
89 |
90 | You will find the newly built packages under: `./target/releases/`.
91 |
92 | Before submitting your changes, run the test suite to make sure that nothing is broken, with:
93 |
94 | ```sh
95 | mvn clean test
96 | ```
97 |
98 | Source: [Contributing to elasticsearch](http://www.elasticsearch.org/contributing-to-elasticsearch/)
99 |
--------------------------------------------------------------------------------
/src/main/java/org/elasticsearch/cloud/aws/blobstore/S3BlobStore.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to Elasticsearch under one or more contributor
3 | * license agreements. See the NOTICE file distributed with
4 | * this work for additional information regarding copyright
5 | * ownership. Elasticsearch licenses this file to you under
6 | * the Apache License, Version 2.0 (the "License"); you may
7 | * not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing,
13 | * software distributed under the License is distributed on an
14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 | * KIND, either express or implied. See the License for the
16 | * specific language governing permissions and limitations
17 | * under the License.
18 | */
19 |
20 | package org.elasticsearch.cloud.aws.blobstore;
21 |
22 | import com.amazonaws.services.s3.AmazonS3;
23 | import com.amazonaws.services.s3.model.DeleteObjectsRequest;
24 | import com.amazonaws.services.s3.model.DeleteObjectsRequest.KeyVersion;
25 | import com.amazonaws.services.s3.model.ObjectListing;
26 | import com.amazonaws.services.s3.model.S3ObjectSummary;
27 | import org.elasticsearch.common.Nullable;
28 | import org.elasticsearch.common.blobstore.BlobPath;
29 | import org.elasticsearch.common.blobstore.BlobStore;
30 | import org.elasticsearch.common.blobstore.ImmutableBlobContainer;
31 | import org.elasticsearch.common.component.AbstractComponent;
32 | import org.elasticsearch.common.settings.Settings;
33 | import org.elasticsearch.common.unit.ByteSizeUnit;
34 | import org.elasticsearch.common.unit.ByteSizeValue;
35 | import org.elasticsearch.threadpool.ThreadPool;
36 |
37 | import java.util.ArrayList;
38 | import java.util.concurrent.Executor;
39 |
40 | /**
41 | *
42 | */
43 | public class S3BlobStore extends AbstractComponent implements BlobStore {
44 |
45 | private final AmazonS3 client;
46 |
47 | private final String bucket;
48 |
49 | private final String region;
50 |
51 | private final ThreadPool threadPool;
52 |
53 | private final int bufferSizeInBytes;
54 |
55 | private final boolean serverSideEncryption;
56 |
57 | private final int numberOfRetries;
58 |
59 | public S3BlobStore(Settings settings, AmazonS3 client, String bucket, @Nullable String region, ThreadPool threadPool, boolean serverSideEncryption) {
60 | super(settings);
61 | this.client = client;
62 | this.bucket = bucket;
63 | this.region = region;
64 | this.threadPool = threadPool;
65 | this.serverSideEncryption = serverSideEncryption;
66 |
67 | this.bufferSizeInBytes = (int) settings.getAsBytesSize("buffer_size", new ByteSizeValue(100, ByteSizeUnit.KB)).bytes();
68 | this.numberOfRetries = settings.getAsInt("max_retries", 3);
69 | if (!client.doesBucketExist(bucket)) {
70 | if (region != null) {
71 | client.createBucket(bucket, region);
72 | } else {
73 | client.createBucket(bucket);
74 | }
75 | }
76 | }
77 |
78 | @Override
79 | public String toString() {
80 | return (region == null ? "" : region + "/") + bucket;
81 | }
82 |
83 | public AmazonS3 client() {
84 | return client;
85 | }
86 |
87 | public String bucket() {
88 | return bucket;
89 | }
90 |
91 | public Executor executor() {
92 | return threadPool.executor(ThreadPool.Names.SNAPSHOT_DATA);
93 | }
94 |
95 | public boolean serverSideEncryption() { return serverSideEncryption; }
96 |
97 | public int bufferSizeInBytes() {
98 | return bufferSizeInBytes;
99 | }
100 |
101 | public int numberOfRetries() {
102 | return numberOfRetries;
103 | }
104 |
105 | @Override
106 | public ImmutableBlobContainer immutableBlobContainer(BlobPath path) {
107 | return new S3ImmutableBlobContainer(path, this);
108 | }
109 |
110 | @Override
111 | public void delete(BlobPath path) {
112 | ObjectListing prevListing = null;
113 | //From http://docs.amazonwebservices.com/AmazonS3/latest/dev/DeletingMultipleObjectsUsingJava.html
114 | //we can do at most 1K objects per delete
115 | //We don't know the bucket name until first object listing
116 | DeleteObjectsRequest multiObjectDeleteRequest = null;
117 | ArrayList keys = new ArrayList();
118 | while (true) {
119 | ObjectListing list;
120 | if (prevListing != null) {
121 | list = client.listNextBatchOfObjects(prevListing);
122 | } else {
123 | String keyPath = path.buildAsString("/");
124 | if (!keyPath.isEmpty()) {
125 | keyPath = keyPath + "/";
126 | }
127 | list = client.listObjects(bucket, keyPath);
128 | multiObjectDeleteRequest = new DeleteObjectsRequest(list.getBucketName());
129 | }
130 | for (S3ObjectSummary summary : list.getObjectSummaries()) {
131 | keys.add(new KeyVersion(summary.getKey()));
132 | //Every 500 objects batch the delete request
133 | if (keys.size() > 500) {
134 | multiObjectDeleteRequest.setKeys(keys);
135 | client.deleteObjects(multiObjectDeleteRequest);
136 | multiObjectDeleteRequest = new DeleteObjectsRequest(list.getBucketName());
137 | keys.clear();
138 | }
139 | }
140 | if (list.isTruncated()) {
141 | prevListing = list;
142 | } else {
143 | break;
144 | }
145 | }
146 | if (!keys.isEmpty()) {
147 | multiObjectDeleteRequest.setKeys(keys);
148 | client.deleteObjects(multiObjectDeleteRequest);
149 | }
150 | }
151 |
152 | @Override
153 | public void close() {
154 | }
155 | }
156 |
--------------------------------------------------------------------------------
/src/main/java/org/elasticsearch/cloud/aws/blobstore/AbstractS3BlobContainer.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to Elasticsearch under one or more contributor
3 | * license agreements. See the NOTICE file distributed with
4 | * this work for additional information regarding copyright
5 | * ownership. Elasticsearch licenses this file to you under
6 | * the Apache License, Version 2.0 (the "License"); you may
7 | * not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing,
13 | * software distributed under the License is distributed on an
14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 | * KIND, either express or implied. See the License for the
16 | * specific language governing permissions and limitations
17 | * under the License.
18 | */
19 |
20 | package org.elasticsearch.cloud.aws.blobstore;
21 |
22 | import com.amazonaws.services.s3.model.AmazonS3Exception;
23 | import com.amazonaws.services.s3.model.ObjectListing;
24 | import com.amazonaws.services.s3.model.S3Object;
25 | import com.amazonaws.services.s3.model.S3ObjectSummary;
26 | import org.apache.lucene.util.IOUtils;
27 | import org.elasticsearch.common.Nullable;
28 | import org.elasticsearch.common.blobstore.BlobMetaData;
29 | import org.elasticsearch.common.blobstore.BlobPath;
30 | import org.elasticsearch.common.blobstore.BlobStoreException;
31 | import org.elasticsearch.common.blobstore.support.AbstractBlobContainer;
32 | import org.elasticsearch.common.blobstore.support.PlainBlobMetaData;
33 | import org.elasticsearch.common.collect.ImmutableMap;
34 |
35 | import java.io.FileNotFoundException;
36 | import java.io.IOException;
37 | import java.io.InputStream;
38 |
39 | /**
40 | *
41 | */
42 | public class AbstractS3BlobContainer extends AbstractBlobContainer {
43 |
44 | protected final S3BlobStore blobStore;
45 |
46 | protected final String keyPath;
47 |
48 | public AbstractS3BlobContainer(BlobPath path, S3BlobStore blobStore) {
49 | super(path);
50 | this.blobStore = blobStore;
51 | String keyPath = path.buildAsString("/");
52 | if (!keyPath.isEmpty()) {
53 | keyPath = keyPath + "/";
54 | }
55 | this.keyPath = keyPath;
56 | }
57 |
58 | @Override
59 | public boolean blobExists(String blobName) {
60 | try {
61 | blobStore.client().getObjectMetadata(blobStore.bucket(), buildKey(blobName));
62 | return true;
63 | } catch (AmazonS3Exception e) {
64 | return false;
65 | } catch (Throwable e) {
66 | throw new BlobStoreException("failed to check if blob exists", e);
67 | }
68 | }
69 |
70 | @Override
71 | public boolean deleteBlob(String blobName) throws IOException {
72 | blobStore.client().deleteObject(blobStore.bucket(), buildKey(blobName));
73 | return true;
74 | }
75 |
76 | @Override
77 | public void readBlob(final String blobName, final ReadBlobListener listener) {
78 | blobStore.executor().execute(new Runnable() {
79 | @Override
80 | public void run() {
81 | InputStream is;
82 | try {
83 | S3Object object = blobStore.client().getObject(blobStore.bucket(), buildKey(blobName));
84 | is = object.getObjectContent();
85 | } catch (AmazonS3Exception e) {
86 | if (e.getStatusCode() == 404) {
87 | listener.onFailure(new FileNotFoundException(e.getMessage()));
88 | } else {
89 | listener.onFailure(e);
90 | }
91 | return;
92 | } catch (Throwable e) {
93 | listener.onFailure(e);
94 | return;
95 | }
96 | byte[] buffer = new byte[blobStore.bufferSizeInBytes()];
97 | try {
98 | int bytesRead;
99 | while ((bytesRead = is.read(buffer)) != -1) {
100 | listener.onPartial(buffer, 0, bytesRead);
101 | }
102 | listener.onCompleted();
103 | } catch (Throwable e) {
104 | IOUtils.closeWhileHandlingException(is);
105 | listener.onFailure(e);
106 | }
107 | }
108 | });
109 | }
110 |
111 | @Override
112 | public ImmutableMap listBlobsByPrefix(@Nullable String blobNamePrefix) throws IOException {
113 | ImmutableMap.Builder blobsBuilder = ImmutableMap.builder();
114 | ObjectListing prevListing = null;
115 | while (true) {
116 | ObjectListing list;
117 | if (prevListing != null) {
118 | list = blobStore.client().listNextBatchOfObjects(prevListing);
119 | } else {
120 | if (blobNamePrefix != null) {
121 | list = blobStore.client().listObjects(blobStore.bucket(), buildKey(blobNamePrefix));
122 | } else {
123 | list = blobStore.client().listObjects(blobStore.bucket(), keyPath);
124 | }
125 | }
126 | for (S3ObjectSummary summary : list.getObjectSummaries()) {
127 | String name = summary.getKey().substring(keyPath.length());
128 | blobsBuilder.put(name, new PlainBlobMetaData(name, summary.getSize()));
129 | }
130 | if (list.isTruncated()) {
131 | prevListing = list;
132 | } else {
133 | break;
134 | }
135 | }
136 | return blobsBuilder.build();
137 | }
138 |
139 | @Override
140 | public ImmutableMap listBlobs() throws IOException {
141 | return listBlobsByPrefix(null);
142 | }
143 |
144 | protected String buildKey(String blobName) {
145 | return keyPath + blobName;
146 | }
147 |
148 | protected boolean shouldRetry(AmazonS3Exception e) {
149 | return e.getStatusCode() == 400 && "RequestTimeout".equals(e.getErrorCode());
150 | }
151 |
152 | }
153 |
--------------------------------------------------------------------------------
/src/main/java/org/elasticsearch/cloud/aws/AwsEc2Service.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to Elasticsearch under one or more contributor
3 | * license agreements. See the NOTICE file distributed with
4 | * this work for additional information regarding copyright
5 | * ownership. Elasticsearch licenses this file to you under
6 | * the Apache License, Version 2.0 (the "License"); you may
7 | * not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing,
13 | * software distributed under the License is distributed on an
14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 | * KIND, either express or implied. See the License for the
16 | * specific language governing permissions and limitations
17 | * under the License.
18 | */
19 |
20 | package org.elasticsearch.cloud.aws;
21 |
22 | import com.amazonaws.ClientConfiguration;
23 | import com.amazonaws.Protocol;
24 | import com.amazonaws.auth.*;
25 | import com.amazonaws.internal.StaticCredentialsProvider;
26 | import com.amazonaws.services.ec2.AmazonEC2;
27 | import com.amazonaws.services.ec2.AmazonEC2Client;
28 | import org.elasticsearch.ElasticsearchException;
29 | import org.elasticsearch.ElasticsearchIllegalArgumentException;
30 | import org.elasticsearch.cloud.aws.network.Ec2NameResolver;
31 | import org.elasticsearch.cloud.aws.node.Ec2CustomNodeAttributes;
32 | import org.elasticsearch.cluster.node.DiscoveryNodeService;
33 | import org.elasticsearch.common.component.AbstractLifecycleComponent;
34 | import org.elasticsearch.common.inject.Inject;
35 | import org.elasticsearch.common.network.NetworkService;
36 | import org.elasticsearch.common.settings.Settings;
37 | import org.elasticsearch.common.settings.SettingsFilter;
38 |
39 | /**
40 | *
41 | */
42 | public class AwsEc2Service extends AbstractLifecycleComponent {
43 |
44 | public static final String EC2_METADATA_URL = "http://169.254.169.254/latest/meta-data/";
45 |
46 | private AmazonEC2Client client;
47 |
48 | @Inject
49 | public AwsEc2Service(Settings settings, SettingsFilter settingsFilter, NetworkService networkService, DiscoveryNodeService discoveryNodeService) {
50 | super(settings);
51 | settingsFilter.addFilter(new AwsSettingsFilter());
52 | // add specific ec2 name resolver
53 | networkService.addCustomNameResolver(new Ec2NameResolver(settings));
54 | discoveryNodeService.addCustomAttributeProvider(new Ec2CustomNodeAttributes(settings));
55 | }
56 |
57 | public synchronized AmazonEC2 client() {
58 | if (client != null) {
59 | return client;
60 | }
61 |
62 | ClientConfiguration clientConfiguration = new ClientConfiguration();
63 | String protocol = componentSettings.get("protocol", "https").toLowerCase();
64 | protocol = componentSettings.get("ec2.protocol", protocol).toLowerCase();
65 | if ("http".equals(protocol)) {
66 | clientConfiguration.setProtocol(Protocol.HTTP);
67 | } else if ("https".equals(protocol)) {
68 | clientConfiguration.setProtocol(Protocol.HTTPS);
69 | } else {
70 | throw new ElasticsearchIllegalArgumentException("No protocol supported [" + protocol + "], can either be [http] or [https]");
71 | }
72 | String account = componentSettings.get("access_key", settings.get("cloud.account"));
73 | String key = componentSettings.get("secret_key", settings.get("cloud.key"));
74 |
75 | String proxyHost = componentSettings.get("proxy_host");
76 | if (proxyHost != null) {
77 | String portString = componentSettings.get("proxy_port", "80");
78 | Integer proxyPort;
79 | try {
80 | proxyPort = Integer.parseInt(portString, 10);
81 | } catch (NumberFormatException ex) {
82 | throw new ElasticsearchIllegalArgumentException("The configured proxy port value [" + portString + "] is invalid", ex);
83 | }
84 | clientConfiguration.withProxyHost(proxyHost).setProxyPort(proxyPort);
85 | }
86 |
87 | AWSCredentialsProvider credentials;
88 |
89 | if (account == null && key == null) {
90 | credentials = new AWSCredentialsProviderChain(
91 | new EnvironmentVariableCredentialsProvider(),
92 | new SystemPropertiesCredentialsProvider(),
93 | new InstanceProfileCredentialsProvider()
94 | );
95 | } else {
96 | credentials = new AWSCredentialsProviderChain(
97 | new StaticCredentialsProvider(new BasicAWSCredentials(account, key))
98 | );
99 | }
100 |
101 | this.client = new AmazonEC2Client(credentials, clientConfiguration);
102 |
103 | if (componentSettings.get("ec2.endpoint") != null) {
104 | String endpoint = componentSettings.get("ec2.endpoint");
105 | logger.debug("using explicit ec2 endpoint [{}]", endpoint);
106 | client.setEndpoint(endpoint);
107 | } else if (componentSettings.get("region") != null) {
108 | String region = componentSettings.get("region").toLowerCase();
109 | String endpoint;
110 | if (region.equals("us-east-1") || region.equals("us-east")) {
111 | endpoint = "ec2.us-east-1.amazonaws.com";
112 | } else if (region.equals("us-west") || region.equals("us-west-1")) {
113 | endpoint = "ec2.us-west-1.amazonaws.com";
114 | } else if (region.equals("us-west-2")) {
115 | endpoint = "ec2.us-west-2.amazonaws.com";
116 | } else if (region.equals("ap-southeast") || region.equals("ap-southeast-1")) {
117 | endpoint = "ec2.ap-southeast-1.amazonaws.com";
118 | } else if (region.equals("ap-southeast-2")) {
119 | endpoint = "ec2.ap-southeast-2.amazonaws.com";
120 | } else if (region.equals("ap-northeast") || region.equals("ap-northeast-1")) {
121 | endpoint = "ec2.ap-northeast-1.amazonaws.com";
122 | } else if (region.equals("eu-west") || region.equals("eu-west-1")) {
123 | endpoint = "ec2.eu-west-1.amazonaws.com";
124 | } else if (region.equals("sa-east") || region.equals("sa-east-1")) {
125 | endpoint = "ec2.sa-east-1.amazonaws.com";
126 | } else {
127 | throw new ElasticsearchIllegalArgumentException("No automatic endpoint could be derived from region [" + region + "]");
128 | }
129 | if (endpoint != null) {
130 | logger.debug("using ec2 region [{}], with endpoint [{}]", region, endpoint);
131 | client.setEndpoint(endpoint);
132 | }
133 | }
134 |
135 | return this.client;
136 |
137 | }
138 |
139 | @Override
140 | protected void doStart() throws ElasticsearchException {
141 | }
142 |
143 | @Override
144 | protected void doStop() throws ElasticsearchException {
145 | }
146 |
147 | @Override
148 | protected void doClose() throws ElasticsearchException {
149 | if (client != null) {
150 | client.shutdown();
151 | }
152 | }
153 | }
154 |
--------------------------------------------------------------------------------
/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to ElasticSearch and Shay Banon under one
3 | * or more contributor license agreements. See the NOTICE file
4 | * distributed with this work for additional information
5 | * regarding copyright ownership. ElasticSearch licenses this
6 | * file to you under the Apache License, Version 2.0 (the
7 | * "License"); you may not use this file except in compliance
8 | * with the License. You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing,
13 | * software distributed under the License is distributed on an
14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 | * KIND, either express or implied. See the License for the
16 | * specific language governing permissions and limitations
17 | * under the License.
18 | */
19 |
20 | package org.elasticsearch.repositories.s3;
21 |
22 | import org.elasticsearch.cloud.aws.AwsS3Service;
23 | import org.elasticsearch.cloud.aws.blobstore.S3BlobStore;
24 | import org.elasticsearch.common.Strings;
25 | import org.elasticsearch.common.blobstore.BlobPath;
26 | import org.elasticsearch.common.blobstore.BlobStore;
27 | import org.elasticsearch.common.inject.Inject;
28 | import org.elasticsearch.common.unit.ByteSizeUnit;
29 | import org.elasticsearch.common.unit.ByteSizeValue;
30 | import org.elasticsearch.index.snapshots.IndexShardRepository;
31 | import org.elasticsearch.repositories.RepositoryException;
32 | import org.elasticsearch.repositories.RepositoryName;
33 | import org.elasticsearch.repositories.RepositorySettings;
34 | import org.elasticsearch.repositories.blobstore.BlobStoreRepository;
35 | import org.elasticsearch.threadpool.ThreadPool;
36 |
37 | import java.io.IOException;
38 | import java.util.Locale;
39 |
40 | /**
41 | * Shared file system implementation of the BlobStoreRepository
42 | *
43 | * Shared file system repository supports the following settings
44 | *
45 | *
{@code bucket}
S3 bucket
46 | *
{@code region}
S3 region. Defaults to us-east
47 | *
{@code base_path}
Specifies the path within bucket to repository data. Defaults to root directory.
48 | *
{@code concurrent_streams}
Number of concurrent read/write stream (per repository on each node). Defaults to 5.
49 | *
{@code chunk_size}
Large file can be divided into chunks. This parameter specifies the chunk size. Defaults to not chucked.
50 | *
{@code compress}
If set to true metadata files will be stored compressed. Defaults to false.
51 | *
52 | */
53 | public class S3Repository extends BlobStoreRepository {
54 |
55 | public final static String TYPE = "s3";
56 |
57 | private final S3BlobStore blobStore;
58 |
59 | private final BlobPath basePath;
60 |
61 | private ByteSizeValue chunkSize;
62 |
63 | private boolean compress;
64 |
65 | /**
66 | * Constructs new shared file system repository
67 | *
68 | * @param name repository name
69 | * @param repositorySettings repository settings
70 | * @param indexShardRepository index shard repository
71 | * @param s3Service S3 service
72 | * @throws IOException
73 | */
74 | @Inject
75 | public S3Repository(RepositoryName name, RepositorySettings repositorySettings, IndexShardRepository indexShardRepository, AwsS3Service s3Service, ThreadPool threadPool) throws IOException {
76 | super(name.getName(), repositorySettings, indexShardRepository);
77 |
78 | String bucket = repositorySettings.settings().get("bucket", componentSettings.get("bucket"));
79 | if (bucket == null) {
80 | throw new RepositoryException(name.name(), "No bucket defined for s3 gateway");
81 | }
82 |
83 | String region = repositorySettings.settings().get("region", componentSettings.get("region"));
84 | if (region == null) {
85 | // Bucket setting is not set - use global region setting
86 | String regionSetting = repositorySettings.settings().get("cloud.aws.region", settings.get("cloud.aws.region"));
87 | if (regionSetting != null) {
88 | regionSetting = regionSetting.toLowerCase(Locale.ENGLISH);
89 | if ("us-east".equals(regionSetting)) {
90 | // Default bucket - setting region to null
91 | region = null;
92 | } else if ("us-east-1".equals(regionSetting)) {
93 | region = null;
94 | } else if ("us-west".equals(regionSetting)) {
95 | region = "us-west-1";
96 | } else if ("us-west-1".equals(regionSetting)) {
97 | region = "us-west-1";
98 | } else if ("us-west-2".equals(regionSetting)) {
99 | region = "us-west-2";
100 | } else if ("ap-southeast".equals(regionSetting)) {
101 | region = "ap-southeast-1";
102 | } else if ("ap-southeast-1".equals(regionSetting)) {
103 | region = "ap-southeast-1";
104 | } else if ("ap-southeast-2".equals(regionSetting)) {
105 | region = "ap-southeast-2";
106 | } else if ("ap-northeast".equals(regionSetting)) {
107 | region = "ap-northeast-1";
108 | } else if ("ap-northeast-1".equals(regionSetting)) {
109 | region = "ap-northeast-1";
110 | } else if ("eu-west".equals(regionSetting)) {
111 | region = "EU";
112 | } else if ("eu-west-1".equals(regionSetting)) {
113 | region = "EU";
114 | } else if ("sa-east".equals(regionSetting)) {
115 | region = "sa-east-1";
116 | } else if ("sa-east-1".equals(regionSetting)) {
117 | region = "sa-east-1";
118 | }
119 | }
120 | }
121 |
122 | boolean serverSideEncryption = repositorySettings.settings().getAsBoolean("server_side_encryption", componentSettings.getAsBoolean("server_side_encryption", false));
123 | logger.debug("using bucket [{}], region [{}], chunk_size [{}], server_side_encryption [{}]", bucket, region, chunkSize, serverSideEncryption);
124 | blobStore = new S3BlobStore(settings, s3Service.client(region, repositorySettings.settings().get("access_key"), repositorySettings.settings().get("secret_key")), bucket, region, threadPool, serverSideEncryption);
125 | this.chunkSize = repositorySettings.settings().getAsBytesSize("chunk_size", componentSettings.getAsBytesSize("chunk_size", new ByteSizeValue(100, ByteSizeUnit.MB)));
126 | this.compress = repositorySettings.settings().getAsBoolean("compress", componentSettings.getAsBoolean("compress", false));
127 | String basePath = repositorySettings.settings().get("base_path", null);
128 | if (Strings.hasLength(basePath)) {
129 | BlobPath path = new BlobPath();
130 | for(String elem : Strings.splitStringToArray(basePath, '/')) {
131 | path = path.add(elem);
132 | }
133 | this.basePath = path;
134 | } else {
135 | this.basePath = BlobPath.cleanPath();
136 | }
137 | }
138 |
139 | /**
140 | * {@inheritDoc}
141 | */
142 | @Override
143 | protected BlobStore blobStore() {
144 | return blobStore;
145 | }
146 |
147 | @Override
148 | protected BlobPath basePath() {
149 | return basePath;
150 | }
151 |
152 | /**
153 | * {@inheritDoc}
154 | */
155 | @Override
156 | protected boolean isCompress() {
157 | return compress;
158 | }
159 |
160 | /**
161 | * {@inheritDoc}
162 | */
163 | @Override
164 | protected ByteSizeValue chunkSize() {
165 | return chunkSize;
166 | }
167 |
168 |
169 | }
170 |
--------------------------------------------------------------------------------
/src/main/java/org/elasticsearch/cloud/aws/InternalAwsS3Service.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to Elasticsearch under one or more contributor
3 | * license agreements. See the NOTICE file distributed with
4 | * this work for additional information regarding copyright
5 | * ownership. Elasticsearch licenses this file to you under
6 | * the Apache License, Version 2.0 (the "License"); you may
7 | * not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing,
13 | * software distributed under the License is distributed on an
14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 | * KIND, either express or implied. See the License for the
16 | * specific language governing permissions and limitations
17 | * under the License.
18 | */
19 |
20 | package org.elasticsearch.cloud.aws;
21 |
22 | import com.amazonaws.ClientConfiguration;
23 | import com.amazonaws.Protocol;
24 | import com.amazonaws.auth.*;
25 | import com.amazonaws.internal.StaticCredentialsProvider;
26 | import com.amazonaws.services.s3.AmazonS3;
27 | import com.amazonaws.services.s3.AmazonS3Client;
28 | import org.elasticsearch.ElasticsearchException;
29 | import org.elasticsearch.ElasticsearchIllegalArgumentException;
30 | import org.elasticsearch.common.collect.Tuple;
31 | import org.elasticsearch.common.component.AbstractLifecycleComponent;
32 | import org.elasticsearch.common.inject.Inject;
33 | import org.elasticsearch.common.settings.Settings;
34 | import org.elasticsearch.common.settings.SettingsFilter;
35 |
36 | import java.util.HashMap;
37 | import java.util.Map;
38 |
39 | /**
40 | *
41 | */
42 | public class InternalAwsS3Service extends AbstractLifecycleComponent implements AwsS3Service {
43 |
44 | /**
45 | * (acceskey, endpoint) -> client
46 | */
47 | private Map, AmazonS3Client> clients = new HashMap, AmazonS3Client>();
48 |
49 | @Inject
50 | public InternalAwsS3Service(Settings settings, SettingsFilter settingsFilter) {
51 | super(settings);
52 |
53 | settingsFilter.addFilter(new AwsSettingsFilter());
54 | }
55 |
56 | @Override
57 | public synchronized AmazonS3 client() {
58 | String endpoint = getDefaultEndpoint();
59 | String account = componentSettings.get("access_key", settings.get("cloud.account"));
60 | String key = componentSettings.get("secret_key", settings.get("cloud.key"));
61 |
62 | return getClient(endpoint, account, key);
63 | }
64 |
65 | @Override
66 | public synchronized AmazonS3 client(String region, String account, String key) {
67 | String endpoint;
68 | if (region == null) {
69 | endpoint = getDefaultEndpoint();
70 | } else {
71 | endpoint = getEndpoint(region);
72 | logger.debug("using s3 region [{}], with endpoint [{}]", region, endpoint);
73 | }
74 | if (account == null || key == null) {
75 | account = componentSettings.get("access_key", settings.get("cloud.account"));
76 | key = componentSettings.get("secret_key", settings.get("cloud.key"));
77 | }
78 |
79 | return getClient(endpoint, account, key);
80 | }
81 |
82 |
83 | private synchronized AmazonS3 getClient(String endpoint, String account, String key) {
84 | Tuple clientDescriptor = new Tuple(endpoint, account);
85 | AmazonS3Client client = clients.get(clientDescriptor);
86 | if (client != null) {
87 | return client;
88 | }
89 |
90 | ClientConfiguration clientConfiguration = new ClientConfiguration();
91 | String protocol = componentSettings.get("protocol", "https").toLowerCase();
92 | protocol = componentSettings.get("s3.protocol", protocol).toLowerCase();
93 | if ("http".equals(protocol)) {
94 | clientConfiguration.setProtocol(Protocol.HTTP);
95 | } else if ("https".equals(protocol)) {
96 | clientConfiguration.setProtocol(Protocol.HTTPS);
97 | } else {
98 | throw new ElasticsearchIllegalArgumentException("No protocol supported [" + protocol + "], can either be [http] or [https]");
99 | }
100 |
101 | String proxyHost = componentSettings.get("proxy_host");
102 | if (proxyHost != null) {
103 | String portString = componentSettings.get("proxy_port", "80");
104 | Integer proxyPort;
105 | try {
106 | proxyPort = Integer.parseInt(portString, 10);
107 | } catch (NumberFormatException ex) {
108 | throw new ElasticsearchIllegalArgumentException("The configured proxy port value [" + portString + "] is invalid", ex);
109 | }
110 | clientConfiguration.withProxyHost(proxyHost).setProxyPort(proxyPort);
111 | }
112 |
113 | AWSCredentialsProvider credentials;
114 |
115 | if (account == null && key == null) {
116 | credentials = new AWSCredentialsProviderChain(
117 | new EnvironmentVariableCredentialsProvider(),
118 | new SystemPropertiesCredentialsProvider(),
119 | new InstanceProfileCredentialsProvider()
120 | );
121 | } else {
122 | credentials = new AWSCredentialsProviderChain(
123 | new StaticCredentialsProvider(new BasicAWSCredentials(account, key))
124 | );
125 | }
126 | client = new AmazonS3Client(credentials, clientConfiguration);
127 |
128 | if (endpoint != null) {
129 | client.setEndpoint(endpoint);
130 | }
131 | clients.put(clientDescriptor, client);
132 | return client;
133 | }
134 |
135 | private String getDefaultEndpoint() {
136 | String endpoint = null;
137 | if (componentSettings.get("s3.endpoint") != null) {
138 | endpoint = componentSettings.get("s3.endpoint");
139 | logger.debug("using explicit s3 endpoint [{}]", endpoint);
140 | } else if (componentSettings.get("region") != null) {
141 | String region = componentSettings.get("region").toLowerCase();
142 | endpoint = getEndpoint(region);
143 | logger.debug("using s3 region [{}], with endpoint [{}]", region, endpoint);
144 | }
145 | return endpoint;
146 | }
147 |
148 | private static String getEndpoint(String region) {
149 | if ("us-east".equals(region)) {
150 | return "s3.amazonaws.com";
151 | } else if ("us-east-1".equals(region)) {
152 | return "s3.amazonaws.com";
153 | } else if ("us-west".equals(region)) {
154 | return "s3-us-west-1.amazonaws.com";
155 | } else if ("us-west-1".equals(region)) {
156 | return "s3-us-west-1.amazonaws.com";
157 | } else if ("us-west-2".equals(region)) {
158 | return "s3-us-west-2.amazonaws.com";
159 | } else if ("ap-southeast".equals(region)) {
160 | return "s3-ap-southeast-1.amazonaws.com";
161 | } else if ("ap-southeast-1".equals(region)) {
162 | return "s3-ap-southeast-1.amazonaws.com";
163 | } else if ("ap-southeast-2".equals(region)) {
164 | return "s3-ap-southeast-2.amazonaws.com";
165 | } else if ("ap-northeast".equals(region)) {
166 | return "s3-ap-northeast-1.amazonaws.com";
167 | } else if ("ap-northeast-1".equals(region)) {
168 | return "s3-ap-northeast-1.amazonaws.com";
169 | } else if ("eu-west".equals(region)) {
170 | return "s3-eu-west-1.amazonaws.com";
171 | } else if ("eu-west-1".equals(region)) {
172 | return "s3-eu-west-1.amazonaws.com";
173 | } else if ("sa-east".equals(region)) {
174 | return "s3-sa-east-1.amazonaws.com";
175 | } else if ("sa-east-1".equals(region)) {
176 | return "s3-sa-east-1.amazonaws.com";
177 | } else {
178 | throw new ElasticsearchIllegalArgumentException("No automatic endpoint could be derived from region [" + region + "]");
179 | }
180 | }
181 |
182 | @Override
183 | protected void doStart() throws ElasticsearchException {
184 | }
185 |
186 | @Override
187 | protected void doStop() throws ElasticsearchException {
188 | }
189 |
190 | @Override
191 | protected void doClose() throws ElasticsearchException {
192 | for (AmazonS3Client client : clients.values()) {
193 | client.shutdown();
194 | }
195 | }
196 | }
197 |
--------------------------------------------------------------------------------
/src/main/java/org/elasticsearch/discovery/ec2/AwsEc2UnicastHostsProvider.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to Elasticsearch under one or more contributor
3 | * license agreements. See the NOTICE file distributed with
4 | * this work for additional information regarding copyright
5 | * ownership. Elasticsearch licenses this file to you under
6 | * the Apache License, Version 2.0 (the "License"); you may
7 | * not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing,
13 | * software distributed under the License is distributed on an
14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 | * KIND, either express or implied. See the License for the
16 | * specific language governing permissions and limitations
17 | * under the License.
18 | */
19 |
20 | package org.elasticsearch.discovery.ec2;
21 |
22 | import com.amazonaws.AmazonClientException;
23 | import com.amazonaws.services.ec2.AmazonEC2;
24 | import com.amazonaws.services.ec2.model.*;
25 | import org.elasticsearch.Version;
26 | import org.elasticsearch.cluster.node.DiscoveryNode;
27 | import org.elasticsearch.common.Strings;
28 | import org.elasticsearch.common.collect.ImmutableMap;
29 | import org.elasticsearch.common.collect.ImmutableSet;
30 | import org.elasticsearch.common.collect.Lists;
31 | import org.elasticsearch.common.collect.Sets;
32 | import org.elasticsearch.common.component.AbstractComponent;
33 | import org.elasticsearch.common.inject.Inject;
34 | import org.elasticsearch.common.settings.Settings;
35 | import org.elasticsearch.common.transport.TransportAddress;
36 | import org.elasticsearch.discovery.zen.ping.unicast.UnicastHostsProvider;
37 | import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing;
38 | import org.elasticsearch.transport.TransportService;
39 |
40 | import java.util.ArrayList;
41 | import java.util.Collections;
42 | import java.util.List;
43 | import java.util.Map;
44 | import java.util.Set;
45 |
46 | /**
47 | *
48 | */
49 | public class AwsEc2UnicastHostsProvider extends AbstractComponent implements UnicastHostsProvider {
50 |
51 | private static enum HostType {
52 | PRIVATE_IP,
53 | PUBLIC_IP,
54 | PRIVATE_DNS,
55 | PUBLIC_DNS
56 | }
57 |
58 | private final TransportService transportService;
59 |
60 | private final AmazonEC2 client;
61 |
62 | private final boolean bindAnyGroup;
63 |
64 | private final ImmutableSet groups;
65 |
66 | private final ImmutableMap tags;
67 |
68 | private final ImmutableSet availabilityZones;
69 |
70 | private final HostType hostType;
71 |
72 | @Inject
73 | public AwsEc2UnicastHostsProvider(Settings settings, TransportService transportService, AmazonEC2 client) {
74 | super(settings);
75 | this.transportService = transportService;
76 | this.client = client;
77 |
78 | this.hostType = HostType.valueOf(componentSettings.get("host_type", "private_ip").toUpperCase());
79 |
80 | this.bindAnyGroup = componentSettings.getAsBoolean("any_group", true);
81 | this.groups = ImmutableSet.copyOf(componentSettings.getAsArray("groups"));
82 |
83 | this.tags = componentSettings.getByPrefix("tag.").getAsMap();
84 |
85 | Set availabilityZones = Sets.newHashSet(componentSettings.getAsArray("availability_zones"));
86 | if (componentSettings.get("availability_zones") != null) {
87 | availabilityZones.addAll(Strings.commaDelimitedListToSet(componentSettings.get("availability_zones")));
88 | }
89 | this.availabilityZones = ImmutableSet.copyOf(availabilityZones);
90 |
91 | if (logger.isDebugEnabled()) {
92 | logger.debug("using host_type [{}], tags [{}], groups [{}] with any_group [{}], availability_zones [{}]", hostType, tags, groups, bindAnyGroup, availabilityZones);
93 | }
94 | }
95 |
96 | @Override
97 | public List buildDynamicNodes() {
98 | List discoNodes = Lists.newArrayList();
99 |
100 | DescribeInstancesResult descInstances;
101 | try {
102 | // Query EC2 API based on AZ, instance state, and tag.
103 |
104 | // NOTE: we don't filter by security group during the describe instances request for two reasons:
105 | // 1. differences in VPCs require different parameters during query (ID vs Name)
106 | // 2. We want to use two different strategies: (all security groups vs. any security groups)
107 | descInstances = client.describeInstances(buildDescribeInstancesRequest());
108 | } catch (AmazonClientException e) {
109 | logger.info("Exception while retrieving instance list from AWS API: {}", e.getMessage());
110 | logger.debug("Full exception:", e);
111 | return discoNodes;
112 | }
113 |
114 | logger.trace("building dynamic unicast discovery nodes...");
115 | for (Reservation reservation : descInstances.getReservations()) {
116 | for (Instance instance : reservation.getInstances()) {
117 | // lets see if we can filter based on groups
118 | if (!groups.isEmpty()) {
119 | List instanceSecurityGroups = instance.getSecurityGroups();
120 | ArrayList securityGroupNames = new ArrayList();
121 | ArrayList securityGroupIds = new ArrayList();
122 | for (GroupIdentifier sg : instanceSecurityGroups) {
123 | securityGroupNames.add(sg.getGroupName());
124 | securityGroupIds.add(sg.getGroupId());
125 | }
126 | if (bindAnyGroup) {
127 | // We check if we can find at least one group name or one group id in groups.
128 | if (Collections.disjoint(securityGroupNames, groups)
129 | && Collections.disjoint(securityGroupIds, groups)) {
130 | logger.trace("filtering out instance {} based on groups {}, not part of {}", instance.getInstanceId(), instanceSecurityGroups, groups);
131 | // continue to the next instance
132 | continue;
133 | }
134 | } else {
135 | // We need tp match all group names or group ids, otherwise we ignore this instance
136 | if (!(securityGroupNames.containsAll(groups) || securityGroupIds.containsAll(groups))) {
137 | logger.trace("filtering out instance {} based on groups {}, does not include all of {}", instance.getInstanceId(), instanceSecurityGroups, groups);
138 | // continue to the next instance
139 | continue;
140 | }
141 | }
142 | }
143 |
144 | String address = null;
145 | switch (hostType) {
146 | case PRIVATE_DNS:
147 | address = instance.getPrivateDnsName();
148 | break;
149 | case PRIVATE_IP:
150 | address = instance.getPrivateIpAddress();
151 | break;
152 | case PUBLIC_DNS:
153 | address = instance.getPublicDnsName();
154 | break;
155 | case PUBLIC_IP:
156 | address = instance.getPublicDnsName();
157 | break;
158 | }
159 | if (address != null) {
160 | try {
161 | TransportAddress[] addresses = transportService.addressesFromString(address);
162 | // we only limit to 1 addresses, makes no sense to ping 100 ports
163 | for (int i = 0; (i < addresses.length && i < UnicastZenPing.LIMIT_PORTS_COUNT); i++) {
164 | logger.trace("adding {}, address {}, transport_address {}", instance.getInstanceId(), address, addresses[i]);
165 | discoNodes.add(new DiscoveryNode("#cloud-" + instance.getInstanceId() + "-" + i, addresses[i], Version.CURRENT));
166 | }
167 | } catch (Exception e) {
168 | logger.warn("failed ot add {}, address {}", e, instance.getInstanceId(), address);
169 | }
170 | } else {
171 | logger.trace("not adding {}, address is null, host_type {}", instance.getInstanceId(), hostType);
172 | }
173 | }
174 | }
175 |
176 | logger.debug("using dynamic discovery nodes {}", discoNodes);
177 |
178 | return discoNodes;
179 | }
180 |
181 | private DescribeInstancesRequest buildDescribeInstancesRequest() {
182 | DescribeInstancesRequest describeInstancesRequest = new DescribeInstancesRequest()
183 | .withFilters(
184 | new Filter("instance-state-name").withValues("running", "pending")
185 | );
186 |
187 | for (Map.Entry tagFilter : tags.entrySet()) {
188 | // for a given tag key, OR relationship for multiple different values
189 | describeInstancesRequest.withFilters(
190 | new Filter("tag:" + tagFilter.getKey()).withValues(tagFilter.getValue())
191 | );
192 | }
193 |
194 | if (!availabilityZones.isEmpty()) {
195 | // OR relationship amongst multiple values of the availability-zone filter
196 | describeInstancesRequest.withFilters(
197 | new Filter("availability-zone").withValues(availabilityZones)
198 | );
199 | }
200 |
201 | return describeInstancesRequest;
202 | }
203 | }
204 |
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 |
2 | Apache License
3 | Version 2.0, January 2004
4 | http://www.apache.org/licenses/
5 |
6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7 |
8 | 1. Definitions.
9 |
10 | "License" shall mean the terms and conditions for use, reproduction,
11 | and distribution as defined by Sections 1 through 9 of this document.
12 |
13 | "Licensor" shall mean the copyright owner or entity authorized by
14 | the copyright owner that is granting the License.
15 |
16 | "Legal Entity" shall mean the union of the acting entity and all
17 | other entities that control, are controlled by, or are under common
18 | control with that entity. For the purposes of this definition,
19 | "control" means (i) the power, direct or indirect, to cause the
20 | direction or management of such entity, whether by contract or
21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 | outstanding shares, or (iii) beneficial ownership of such entity.
23 |
24 | "You" (or "Your") shall mean an individual or Legal Entity
25 | exercising permissions granted by this License.
26 |
27 | "Source" form shall mean the preferred form for making modifications,
28 | including but not limited to software source code, documentation
29 | source, and configuration files.
30 |
31 | "Object" form shall mean any form resulting from mechanical
32 | transformation or translation of a Source form, including but
33 | not limited to compiled object code, generated documentation,
34 | and conversions to other media types.
35 |
36 | "Work" shall mean the work of authorship, whether in Source or
37 | Object form, made available under the License, as indicated by a
38 | copyright notice that is included in or attached to the work
39 | (an example is provided in the Appendix below).
40 |
41 | "Derivative Works" shall mean any work, whether in Source or Object
42 | form, that is based on (or derived from) the Work and for which the
43 | editorial revisions, annotations, elaborations, or other modifications
44 | represent, as a whole, an original work of authorship. For the purposes
45 | of this License, Derivative Works shall not include works that remain
46 | separable from, or merely link (or bind by name) to the interfaces of,
47 | the Work and Derivative Works thereof.
48 |
49 | "Contribution" shall mean any work of authorship, including
50 | the original version of the Work and any modifications or additions
51 | to that Work or Derivative Works thereof, that is intentionally
52 | submitted to Licensor for inclusion in the Work by the copyright owner
53 | or by an individual or Legal Entity authorized to submit on behalf of
54 | the copyright owner. For the purposes of this definition, "submitted"
55 | means any form of electronic, verbal, or written communication sent
56 | to the Licensor or its representatives, including but not limited to
57 | communication on electronic mailing lists, source code control systems,
58 | and issue tracking systems that are managed by, or on behalf of, the
59 | Licensor for the purpose of discussing and improving the Work, but
60 | excluding communication that is conspicuously marked or otherwise
61 | designated in writing by the copyright owner as "Not a Contribution."
62 |
63 | "Contributor" shall mean Licensor and any individual or Legal Entity
64 | on behalf of whom a Contribution has been received by Licensor and
65 | subsequently incorporated within the Work.
66 |
67 | 2. Grant of Copyright License. Subject to the terms and conditions of
68 | this License, each Contributor hereby grants to You a perpetual,
69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 | copyright license to reproduce, prepare Derivative Works of,
71 | publicly display, publicly perform, sublicense, and distribute the
72 | Work and such Derivative Works in Source or Object form.
73 |
74 | 3. Grant of Patent License. Subject to the terms and conditions of
75 | this License, each Contributor hereby grants to You a perpetual,
76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 | (except as stated in this section) patent license to make, have made,
78 | use, offer to sell, sell, import, and otherwise transfer the Work,
79 | where such license applies only to those patent claims licensable
80 | by such Contributor that are necessarily infringed by their
81 | Contribution(s) alone or by combination of their Contribution(s)
82 | with the Work to which such Contribution(s) was submitted. If You
83 | institute patent litigation against any entity (including a
84 | cross-claim or counterclaim in a lawsuit) alleging that the Work
85 | or a Contribution incorporated within the Work constitutes direct
86 | or contributory patent infringement, then any patent licenses
87 | granted to You under this License for that Work shall terminate
88 | as of the date such litigation is filed.
89 |
90 | 4. Redistribution. You may reproduce and distribute copies of the
91 | Work or Derivative Works thereof in any medium, with or without
92 | modifications, and in Source or Object form, provided that You
93 | meet the following conditions:
94 |
95 | (a) You must give any other recipients of the Work or
96 | Derivative Works a copy of this License; and
97 |
98 | (b) You must cause any modified files to carry prominent notices
99 | stating that You changed the files; and
100 |
101 | (c) You must retain, in the Source form of any Derivative Works
102 | that You distribute, all copyright, patent, trademark, and
103 | attribution notices from the Source form of the Work,
104 | excluding those notices that do not pertain to any part of
105 | the Derivative Works; and
106 |
107 | (d) If the Work includes a "NOTICE" text file as part of its
108 | distribution, then any Derivative Works that You distribute must
109 | include a readable copy of the attribution notices contained
110 | within such NOTICE file, excluding those notices that do not
111 | pertain to any part of the Derivative Works, in at least one
112 | of the following places: within a NOTICE text file distributed
113 | as part of the Derivative Works; within the Source form or
114 | documentation, if provided along with the Derivative Works; or,
115 | within a display generated by the Derivative Works, if and
116 | wherever such third-party notices normally appear. The contents
117 | of the NOTICE file are for informational purposes only and
118 | do not modify the License. You may add Your own attribution
119 | notices within Derivative Works that You distribute, alongside
120 | or as an addendum to the NOTICE text from the Work, provided
121 | that such additional attribution notices cannot be construed
122 | as modifying the License.
123 |
124 | You may add Your own copyright statement to Your modifications and
125 | may provide additional or different license terms and conditions
126 | for use, reproduction, or distribution of Your modifications, or
127 | for any such Derivative Works as a whole, provided Your use,
128 | reproduction, and distribution of the Work otherwise complies with
129 | the conditions stated in this License.
130 |
131 | 5. Submission of Contributions. Unless You explicitly state otherwise,
132 | any Contribution intentionally submitted for inclusion in the Work
133 | by You to the Licensor shall be under the terms and conditions of
134 | this License, without any additional terms or conditions.
135 | Notwithstanding the above, nothing herein shall supersede or modify
136 | the terms of any separate license agreement you may have executed
137 | with Licensor regarding such Contributions.
138 |
139 | 6. Trademarks. This License does not grant permission to use the trade
140 | names, trademarks, service marks, or product names of the Licensor,
141 | except as required for reasonable and customary use in describing the
142 | origin of the Work and reproducing the content of the NOTICE file.
143 |
144 | 7. Disclaimer of Warranty. Unless required by applicable law or
145 | agreed to in writing, Licensor provides the Work (and each
146 | Contributor provides its Contributions) on an "AS IS" BASIS,
147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 | implied, including, without limitation, any warranties or conditions
149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 | PARTICULAR PURPOSE. You are solely responsible for determining the
151 | appropriateness of using or redistributing the Work and assume any
152 | risks associated with Your exercise of permissions under this License.
153 |
154 | 8. Limitation of Liability. In no event and under no legal theory,
155 | whether in tort (including negligence), contract, or otherwise,
156 | unless required by applicable law (such as deliberate and grossly
157 | negligent acts) or agreed to in writing, shall any Contributor be
158 | liable to You for damages, including any direct, indirect, special,
159 | incidental, or consequential damages of any character arising as a
160 | result of this License or out of the use or inability to use the
161 | Work (including but not limited to damages for loss of goodwill,
162 | work stoppage, computer failure or malfunction, or any and all
163 | other commercial damages or losses), even if such Contributor
164 | has been advised of the possibility of such damages.
165 |
166 | 9. Accepting Warranty or Additional Liability. While redistributing
167 | the Work or Derivative Works thereof, You may choose to offer,
168 | and charge a fee for, acceptance of support, warranty, indemnity,
169 | or other liability obligations and/or rights consistent with this
170 | License. However, in accepting such obligations, You may act only
171 | on Your own behalf and on Your sole responsibility, not on behalf
172 | of any other Contributor, and only if You agree to indemnify,
173 | defend, and hold each Contributor harmless for any liability
174 | incurred by, or claims asserted against, such Contributor by reason
175 | of your accepting any such warranty or additional liability.
176 |
177 | END OF TERMS AND CONDITIONS
178 |
179 | APPENDIX: How to apply the Apache License to your work.
180 |
181 | To apply the Apache License to your work, attach the following
182 | boilerplate notice, with the fields enclosed by brackets "[]"
183 | replaced with your own identifying information. (Don't include
184 | the brackets!) The text should be enclosed in the appropriate
185 | comment syntax for the file format. We also recommend that a
186 | file or class name and description of purpose be included on the
187 | same "printed page" as the copyright notice for easier
188 | identification within third-party archives.
189 |
190 | Copyright [yyyy] [name of copyright owner]
191 |
192 | Licensed under the Apache License, Version 2.0 (the "License");
193 | you may not use this file except in compliance with the License.
194 | You may obtain a copy of the License at
195 |
196 | http://www.apache.org/licenses/LICENSE-2.0
197 |
198 | Unless required by applicable law or agreed to in writing, software
199 | distributed under the License is distributed on an "AS IS" BASIS,
200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201 | See the License for the specific language governing permissions and
202 | limitations under the License.
203 |
--------------------------------------------------------------------------------
/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | 4.0.0
6 | org.elasticsearch
7 | elasticsearch-cloud-aws
8 | 3.0.0-SNAPSHOT
9 | jar
10 | Elasticsearch AWS cloud plugin
11 | The Amazon Web Service (AWS) Cloud plugin allows to use AWS API for the unicast discovery mechanism and add S3 repositories.
12 | https://github.com/elasticsearch/elasticsearch-cloud-aws/
13 | 2009
14 |
15 |
16 | The Apache Software License, Version 2.0
17 | http://www.apache.org/licenses/LICENSE-2.0.txt
18 | repo
19 |
20 |
21 |
22 | scm:git:git@github.com:elasticsearch/elasticsearch-cloud-aws.git
23 | scm:git:git@github.com:elasticsearch/elasticsearch-cloud-aws.git
24 |
25 | http://github.com/elasticsearch/elasticsearch-cloud-aws
26 |
27 |
28 |
29 | org.sonatype.oss
30 | oss-parent
31 | 7
32 |
33 |
34 |
35 | 2.0.0-SNAPSHOT
36 | 4.9.0
37 | onerror
38 | true
39 | onerror
40 |
41 |
42 | INFO
43 |
44 |
45 |
46 |
47 | sonatype
48 | http://oss.sonatype.org/content/repositories/releases/
49 |
50 |
51 |
52 |
53 |
54 | org.hamcrest
55 | hamcrest-core
56 | 1.3.RC2
57 | test
58 |
59 |
60 | org.hamcrest
61 | hamcrest-library
62 | 1.3.RC2
63 | test
64 |
65 |
66 | org.apache.lucene
67 | lucene-test-framework
68 | ${lucene.version}
69 | test
70 |
71 |
72 |
73 | org.elasticsearch
74 | elasticsearch
75 | ${elasticsearch.version}
76 | provided
77 |
78 |
79 | org.apache.lucene
80 | lucene-core
81 | ${lucene.version}
82 | provided
83 |
84 |
85 |
86 | com.amazonaws
87 | aws-java-sdk
88 | 1.7.13
89 | compile
90 |
91 |
92 |
93 | org.codehaus.jackson
94 | jackson-core-asl
95 |
96 |
97 | org.codehaus.jackson
98 | jackson-mapper-asl
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 | commons-codec
107 | commons-codec
108 | 1.4
109 |
110 |
111 |
112 | log4j
113 | log4j
114 | 1.2.17
115 | test
116 | true
117 |
118 |
119 |
120 | org.elasticsearch
121 | elasticsearch
122 | ${elasticsearch.version}
123 | test-jar
124 | test
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 | src/main/resources
133 | true
134 |
135 |
136 |
137 |
138 | org.apache.maven.plugins
139 | maven-compiler-plugin
140 | 3.0
141 |
142 | 1.7
143 | 1.7
144 |
145 |
146 |
147 | com.carrotsearch.randomizedtesting
148 | junit4-maven-plugin
149 | 2.0.14
150 |
151 |
152 | tests
153 | test
154 |
155 | junit4
156 |
157 |
158 | 20
159 | pipe,warn
160 | true
161 |
162 |
164 |
174 |
175 |
176 |
177 |
178 |
179 | 1
180 |
181 |
182 |
183 |
184 |
185 |
186 | **/*Tests.class
187 | **/*Test.class
188 |
189 |
190 | **/Abstract*.class
191 | **/*StressTest.class
192 |
193 |
194 | ${tests.jvm.argline}
195 |
196 |
197 | -Xmx512m
198 | -Xss256k
199 | -XX:MaxDirectMemorySize=512m
200 | -Des.logger.prefix=
201 |
202 | ${tests.shuffle}
203 | ${tests.verbose}
204 | ${tests.seed}
205 | ${tests.failfast}
206 |
207 |
208 | ${tests.jvm.argline}
209 | ${tests.iters}
210 | ${tests.maxfailures}
211 | ${tests.failfast}
212 | ${tests.class}
213 | ${tests.method}
214 | ${tests.nightly}
215 | ${tests.badapples}
216 | ${tests.weekly}
217 | ${tests.slow}
218 | ${tests.aws}
219 | ${tests.config}
220 | ${tests.awaitsfix}
221 | ${tests.slow}
222 | ${tests.timeoutSuite}
223 | ${tests.showSuccess}
224 | ${tests.integration}
225 | ${tests.cluster_seed}
226 | ${tests.client.ratio}
227 | ${es.logger.level}
228 | true
229 |
230 |
231 |
232 |
233 |
234 |
235 | org.apache.maven.plugins
236 | maven-surefire-plugin
237 | 2.13
238 |
239 | true
240 |
241 |
242 |
243 | org.apache.maven.plugins
244 | maven-source-plugin
245 | 2.2.1
246 |
247 |
248 | attach-sources
249 |
250 | jar
251 |
252 |
253 |
254 |
255 |
256 | maven-assembly-plugin
257 | 2.4
258 |
259 | false
260 | ${project.build.directory}/releases/
261 |
262 | ${basedir}/src/main/assemblies/plugin.xml
263 |
264 |
265 |
266 |
267 | package
268 |
269 | single
270 |
271 |
272 |
273 |
274 |
275 |
276 |
277 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | AWS Cloud Plugin for Elasticsearch
2 | ==================================
3 |
4 | The Amazon Web Service (AWS) Cloud plugin allows to use [AWS API](https://github.com/aws/aws-sdk-java)
5 | for the unicast discovery mechanism and add S3 repositories.
6 |
7 | In order to install the plugin, run:
8 |
9 | ```sh
10 | bin/plugin -install elasticsearch/elasticsearch-cloud-aws/2.3.0
11 | ```
12 |
13 | You need to install a version matching your Elasticsearch version:
14 |
15 | | Elasticsearch | AWS Cloud Plugin | Docs |
16 | |------------------------|-------------------|------------------------------------------------------------------------------------------------------------------------------------|
17 | | master | Build from source | See below |
18 | | es-1.x | Build from source | [2.4.0-SNAPSHOT](https://github.com/elasticsearch/elasticsearch-cloud-aws/tree/es-1.x/#version-240-snapshot-for-elasticsearch-14) |
19 | | es-1.3 | 2.3.0 | [2.3.0](https://github.com/elasticsearch/elasticsearch-cloud-aws/tree/v2.3.0/#version-230-for-elasticsearch-13) |
20 | | es-1.2 | 2.2.0 | [2.2.0](https://github.com/elasticsearch/elasticsearch-cloud-aws/tree/v2.2.0/#aws-cloud-plugin-for-elasticsearch) |
21 | | es-1.1 | 2.1.1 | [2.1.1](https://github.com/elasticsearch/elasticsearch-cloud-aws/tree/v2.1.1/#aws-cloud-plugin-for-elasticsearch) |
22 | | es-1.0 | 2.0.0 | [2.0.0](https://github.com/elasticsearch/elasticsearch-cloud-aws/tree/v2.0.0/#aws-cloud-plugin-for-elasticsearch) |
23 | | es-0.90 | 1.16.0 | [1.16.0](https://github.com/elasticsearch/elasticsearch-cloud-aws/tree/v1.16.0/#aws-cloud-plugin-for-elasticsearch) |
24 |
25 | To build a `SNAPSHOT` version, you need to build it with Maven:
26 |
27 | ```bash
28 | mvn clean install
29 | plugin --install cloud-aws \
30 | --url file:target/releases/elasticsearch-cloud-aws-X.X.X-SNAPSHOT.zip
31 | ```
32 |
33 | ## Generic Configuration
34 |
35 | The plugin will default to using [IAM Role](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html) credentials
36 | for authentication. These can be overridden by, in increasing order of precedence, system properties `aws.accessKeyId` and `aws.secretKey`,
37 | environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_KEY`, or the elasticsearch config using `cloud.aws.access_key` and `cloud.aws.secret_key`:
38 |
39 | ```
40 | cloud:
41 | aws:
42 | access_key: AKVAIQBF2RECL7FJWGJQ
43 | secret_key: vExyMThREXeRMm/b/LRzEB8jWwvzQeXgjqMX+6br
44 | ```
45 |
46 | ### Transport security
47 |
48 | By default this plugin uses HTTPS for all API calls to AWS endpoints. If you wish to configure HTTP you can set
49 | `cloud.aws.protocol` in the elasticsearch config. You can optionally override this setting per individual service
50 | via: `cloud.aws.ec2.protocol` or `cloud.aws.s3.protocol`.
51 |
52 | ```
53 | cloud:
54 | aws:
55 | protocol: https
56 | s3:
57 | protocol: http
58 | ec2:
59 | protocol: https
60 | ```
61 |
62 | ### Region
63 |
64 | The `cloud.aws.region` can be set to a region and will automatically use the relevant settings for both `ec2` and `s3`. The available values are:
65 |
66 | * `us-east` (`us-east-1`)
67 | * `us-west` (`us-west-1`)
68 | * `us-west-1`
69 | * `us-west-2`
70 | * `ap-southeast` (`ap-southeast-1`)
71 | * `ap-southeast-1`
72 | * `ap-southeast-2`
73 | * `ap-northeast` (`ap-northeast-1`)
74 | * `eu-west` (`eu-west-1`)
75 | * `sa-east` (`sa-east-1`).
76 |
77 |
78 | ## EC2 Discovery
79 |
80 | ec2 discovery allows to use the ec2 APIs to perform automatic discovery (similar to multicast in non hostile multicast environments). Here is a simple sample configuration:
81 |
82 | ```
83 | discovery:
84 | type: ec2
85 | ```
86 |
87 | The ec2 discovery is using the same credentials as the rest of the AWS services provided by this plugin (`repositories`).
88 | See [Generic Configuration](#generic-configuration) for details.
89 |
90 | The following are a list of settings (prefixed with `discovery.ec2`) that can further control the discovery:
91 |
92 | * `groups`: Either a comma separated list or array based list of (security) groups. Only instances with the provided security groups will be used in the cluster discovery. (NOTE: You could provide either group NAME or group ID.)
93 | * `host_type`: The type of host type to use to communicate with other instances. Can be one of `private_ip`, `public_ip`, `private_dns`, `public_dns`. Defaults to `private_ip`.
94 | * `availability_zones`: Either a comma separated list or array based list of availability zones. Only instances within the provided availability zones will be used in the cluster discovery.
95 | * `any_group`: If set to `false`, will require all security groups to be present for the instance to be used for the discovery. Defaults to `true`.
96 | * `ping_timeout`: How long to wait for existing EC2 nodes to reply during discovery. Defaults to `3s`. If no unit like `ms`, `s` or `m` is specified, milliseconds are used.
97 |
98 | ### Recommended EC2 Permissions
99 |
100 | EC2 discovery requires making a call to the EC2 service. You'll want to setup an IAM policy to allow this. You can create a custom policy via the IAM Management Console. It should look similar to this.
101 |
102 | ```js
103 | {
104 | "Statement": [
105 | {
106 | "Action": [
107 | "ec2:DescribeInstances"
108 | ],
109 | "Effect": "Allow",
110 | "Resource": [
111 | "*"
112 | ]
113 | }
114 | ],
115 | "Version": "2014-09-03"
116 | }
117 | ```
118 |
119 |
120 | ### Filtering by Tags
121 |
122 | The ec2 discovery can also filter machines to include in the cluster based on tags (and not just groups). The settings to use include the `discovery.ec2.tag.` prefix. For example, setting `discovery.ec2.tag.stage` to `dev` will only filter instances with a tag key set to `stage`, and a value of `dev`. Several tags set will require all of those tags to be set for the instance to be included.
123 |
124 | One practical use for tag filtering is when an ec2 cluster contains many nodes that are not running elasticsearch. In this case (particularly with high `ping_timeout` values) there is a risk that a new node's discovery phase will end before it has found the cluster (which will result in it declaring itself master of a new cluster with the same name - highly undesirable). Tagging elasticsearch ec2 nodes and then filtering by that tag will resolve this issue.
125 |
126 | ### Automatic Node Attributes
127 |
128 | Though not dependent on actually using `ec2` as discovery (but still requires the cloud aws plugin installed), the plugin can automatically add node attributes relating to ec2 (for example, availability zone, that can be used with the awareness allocation feature). In order to enable it, set `cloud.node.auto_attributes` to `true` in the settings.
129 |
130 |
131 | ### Using other EC2 endpoint
132 |
133 | If you are using any EC2 api compatible service, you can set the endpoint you want to use by setting `cloud.aws.ec2.endpoint`
134 | to your URL provider.
135 |
136 | ## S3 Repository
137 |
138 | The S3 repository is using S3 to store snapshots. The S3 repository can be created using the following command:
139 |
140 | ```sh
141 | $ curl -XPUT 'http://localhost:9200/_snapshot/my_s3_repository' -d '{
142 | "type": "s3",
143 | "settings": {
144 | "bucket": "my_bucket_name",
145 | "region": "us-west"
146 | }
147 | }'
148 | ```
149 |
150 | The following settings are supported:
151 |
152 | * `bucket`: The name of the bucket to be used for snapshots. (Mandatory)
153 | * `region`: The region where bucket is located. Defaults to US Standard
154 | * `base_path`: Specifies the path within bucket to repository data. Defaults to root directory.
155 | * `access_key`: The access key to use for authentication. Defaults to value of `cloud.aws.access_key`.
156 | * `secret_key`: The secret key to use for authentication. Defaults to value of `cloud.aws.secret_key`.
157 | * `chunk_size`: Big files can be broken down into chunks during snapshotting if needed. The chunk size can be specified in bytes or by using size value notation, i.e. `1g`, `10m`, `5k`. Defaults to `100m`.
158 | * `compress`: When set to `true` metadata files are stored in compressed format. This setting doesn't affect index files that are already compressed by default. Defaults to `false`.
159 | * `server_side_encryption`: When set to `true` files are encrypted on server side using AES256 algorithm. Defaults to `false`.
160 | * `max_retries`: Number of retries in case of S3 errors. Defaults to `3`.
161 |
162 | The S3 repositories are using the same credentials as the rest of the AWS services provided by this plugin (`discovery`).
163 | See [Generic Configuration](#generic-configuration) for details.
164 |
165 | Multiple S3 repositories can be created. If the buckets require different credentials, then define them as part of the repository settings.
166 |
167 | ### Recommended S3 Permissions
168 |
169 | In order to restrict the Elasticsearch snapshot process to the minimum required resources, we recommend using Amazon IAM in conjunction with pre-existing S3 buckets. Here is an example policy which will allow the snapshot access to an S3 bucket named "snaps.example.com". This may be configured through the AWS IAM console, by creating a Custom Policy, and using a Policy Document similar to this (changing snaps.example.com to your bucket name).
170 |
171 | ```js
172 | {
173 | "Statement": [
174 | {
175 | "Action": [
176 | "s3:ListBucket"
177 | ],
178 | "Effect": "Allow",
179 | "Resource": [
180 | "arn:aws:s3:::snaps.example.com"
181 | ]
182 | },
183 | {
184 | "Action": [
185 | "s3:GetObject",
186 | "s3:PutObject",
187 | "s3:DeleteObject"
188 | ],
189 | "Effect": "Allow",
190 | "Resource": [
191 | "arn:aws:s3:::snaps.example.com/*"
192 | ]
193 | }
194 | ],
195 | "Version": "2012-10-17"
196 | }
197 |
198 | ```
199 |
200 | You may further restrict the permissions by specifying a prefix within the bucket, in this example, named "foo".
201 |
202 | ```js
203 | {
204 | "Statement": [
205 | {
206 | "Action": [
207 | "s3:ListBucket"
208 | ],
209 | "Condition": {
210 | "StringLike": {
211 | "s3:prefix": [
212 | "foo/*"
213 | ]
214 | }
215 | },
216 | "Effect": "Allow",
217 | "Resource": [
218 | "arn:aws:s3:::snaps.example.com"
219 | ]
220 | },
221 | {
222 | "Action": [
223 | "s3:GetObject",
224 | "s3:PutObject",
225 | "s3:DeleteObject"
226 | ],
227 | "Effect": "Allow",
228 | "Resource": [
229 | "arn:aws:s3:::snaps.example.com/foo/*"
230 | ]
231 | }
232 | ],
233 | "Version": "2012-10-17"
234 | }
235 |
236 | ```
237 |
238 | The bucket needs to exist to register a repository for snapshots. If you did not create the bucket then the repository registration will fail. If you want elasticsearch to create the bucket instead, you can add the permission to create a specific bucket like this:
239 |
240 | ```js
241 | {
242 | "Action": [
243 | "s3:CreateBucket"
244 | ],
245 | "Effect": "Allow",
246 | "Resource": [
247 | "arn:aws:s3:::snaps.example.com"
248 | ]
249 | }
250 | ```
251 |
252 | ### Using other S3 endpoint
253 |
254 | If you are using any S3 api compatible service, you can set the endpoint you want to use by setting `cloud.aws.s3.endpoint`
255 | to your URL provider.
256 |
257 |
258 | ## Testing
259 |
260 | Integrations tests in this plugin require working AWS configuration and therefore disabled by default. Three buckets and two iam users have to be created. The first iam user needs access to two buckets in different regions and the final bucket is exclusive for the other iam user. To enable tests prepare a config file elasticsearch.yml with the following content:
261 |
262 | ```
263 | cloud:
264 | aws:
265 | access_key: AKVAIQBF2RECL7FJWGJQ
266 | secret_key: vExyMThREXeRMm/b/LRzEB8jWwvzQeXgjqMX+6br
267 |
268 | repositories:
269 | s3:
270 | bucket: "bucket_name"
271 | region: "us-west-2"
272 | private-bucket:
273 | bucket:
274 | access_key:
275 | secret_key:
276 | remote-bucket:
277 | bucket:
278 | region:
279 |
280 | ```
281 |
282 | Replace all occurrences of `access_key`, `secret_key`, `bucket` and `region` with your settings. Please, note that the test will delete all snapshot/restore related files in the specified buckets.
283 |
284 | To run test:
285 |
286 | ```sh
287 | mvn -Dtests.aws=true -Dtests.config=/path/to/config/file/elasticsearch.yml clean test
288 | ```
289 |
290 |
291 | License
292 | -------
293 |
294 | This software is licensed under the Apache 2 license, quoted below.
295 |
296 | Copyright 2009-2014 Elasticsearch
297 |
298 | Licensed under the Apache License, Version 2.0 (the "License"); you may not
299 | use this file except in compliance with the License. You may obtain a copy of
300 | the License at
301 |
302 | http://www.apache.org/licenses/LICENSE-2.0
303 |
304 | Unless required by applicable law or agreed to in writing, software
305 | distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
306 | WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
307 | License for the specific language governing permissions and limitations under
308 | the License.
309 |
--------------------------------------------------------------------------------
/src/test/java/org/elasticsearch/cloud/aws/AmazonS3Wrapper.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to Elasticsearch (the "Author") under one
3 | * or more contributor license agreements. See the NOTICE file
4 | * distributed with this work for additional information
5 | * regarding copyright ownership. Author licenses this
6 | * file to you under the Apache License, Version 2.0 (the
7 | * "License"); you may not use this file except in compliance
8 | * with the License. You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing,
13 | * software distributed under the License is distributed on an
14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 | * KIND, either express or implied. See the License for the
16 | * specific language governing permissions and limitations
17 | * under the License.
18 | */
19 |
20 | package org.elasticsearch.cloud.aws;
21 |
22 | import com.amazonaws.AmazonClientException;
23 | import com.amazonaws.AmazonServiceException;
24 | import com.amazonaws.AmazonWebServiceRequest;
25 | import com.amazonaws.HttpMethod;
26 | import com.amazonaws.regions.Region;
27 | import com.amazonaws.services.s3.AmazonS3;
28 | import com.amazonaws.services.s3.S3ClientOptions;
29 | import com.amazonaws.services.s3.S3ResponseMetadata;
30 | import com.amazonaws.services.s3.model.*;
31 |
32 | import java.io.File;
33 | import java.io.InputStream;
34 | import java.net.URL;
35 | import java.util.Date;
36 | import java.util.List;
37 |
38 | /**
39 | *
40 | */
41 | public class AmazonS3Wrapper implements AmazonS3 {
42 |
43 | protected AmazonS3 delegate;
44 |
45 | public AmazonS3Wrapper(AmazonS3 delegate) {
46 | this.delegate = delegate;
47 | }
48 |
49 |
50 | @Override
51 | public void setEndpoint(String endpoint) {
52 | delegate.setEndpoint(endpoint);
53 | }
54 |
55 | @Override
56 | public void setRegion(Region region) throws IllegalArgumentException {
57 | delegate.setRegion(region);
58 | }
59 |
60 | @Override
61 | public void setS3ClientOptions(S3ClientOptions clientOptions) {
62 | delegate.setS3ClientOptions(clientOptions);
63 | }
64 |
65 | @Override
66 | public void changeObjectStorageClass(String bucketName, String key, StorageClass newStorageClass) throws AmazonClientException, AmazonServiceException {
67 | delegate.changeObjectStorageClass(bucketName, key, newStorageClass);
68 | }
69 |
70 | @Override
71 | public void setObjectRedirectLocation(String bucketName, String key, String newRedirectLocation) throws AmazonClientException, AmazonServiceException {
72 | delegate.setObjectRedirectLocation(bucketName, key, newRedirectLocation);
73 | }
74 |
75 | @Override
76 | public ObjectListing listObjects(String bucketName) throws AmazonClientException, AmazonServiceException {
77 | return delegate.listObjects(bucketName);
78 | }
79 |
80 | @Override
81 | public ObjectListing listObjects(String bucketName, String prefix) throws AmazonClientException, AmazonServiceException {
82 | return delegate.listObjects(bucketName, prefix);
83 | }
84 |
85 | @Override
86 | public ObjectListing listObjects(ListObjectsRequest listObjectsRequest) throws AmazonClientException, AmazonServiceException {
87 | return delegate.listObjects(listObjectsRequest);
88 | }
89 |
90 | @Override
91 | public ObjectListing listNextBatchOfObjects(ObjectListing previousObjectListing) throws AmazonClientException, AmazonServiceException {
92 | return delegate.listNextBatchOfObjects(previousObjectListing);
93 | }
94 |
95 | @Override
96 | public VersionListing listVersions(String bucketName, String prefix) throws AmazonClientException, AmazonServiceException {
97 | return delegate.listVersions(bucketName, prefix);
98 | }
99 |
100 | @Override
101 | public VersionListing listNextBatchOfVersions(VersionListing previousVersionListing) throws AmazonClientException, AmazonServiceException {
102 | return delegate.listNextBatchOfVersions(previousVersionListing);
103 | }
104 |
105 | @Override
106 | public VersionListing listVersions(String bucketName, String prefix, String keyMarker, String versionIdMarker, String delimiter, Integer maxResults) throws AmazonClientException, AmazonServiceException {
107 | return delegate.listVersions(bucketName, prefix, keyMarker, versionIdMarker, delimiter, maxResults);
108 | }
109 |
110 | @Override
111 | public VersionListing listVersions(ListVersionsRequest listVersionsRequest) throws AmazonClientException, AmazonServiceException {
112 | return delegate.listVersions(listVersionsRequest);
113 | }
114 |
115 | @Override
116 | public Owner getS3AccountOwner() throws AmazonClientException, AmazonServiceException {
117 | return delegate.getS3AccountOwner();
118 | }
119 |
120 | @Override
121 | public boolean doesBucketExist(String bucketName) throws AmazonClientException, AmazonServiceException {
122 | return delegate.doesBucketExist(bucketName);
123 | }
124 |
125 | @Override
126 | public List listBuckets() throws AmazonClientException, AmazonServiceException {
127 | return delegate.listBuckets();
128 | }
129 |
130 | @Override
131 | public List listBuckets(ListBucketsRequest listBucketsRequest) throws AmazonClientException, AmazonServiceException {
132 | return delegate.listBuckets(listBucketsRequest);
133 | }
134 |
135 | @Override
136 | public String getBucketLocation(String bucketName) throws AmazonClientException, AmazonServiceException {
137 | return delegate.getBucketLocation(bucketName);
138 | }
139 |
140 | @Override
141 | public String getBucketLocation(GetBucketLocationRequest getBucketLocationRequest) throws AmazonClientException, AmazonServiceException {
142 | return delegate.getBucketLocation(getBucketLocationRequest);
143 | }
144 |
145 | @Override
146 | public Bucket createBucket(CreateBucketRequest createBucketRequest) throws AmazonClientException, AmazonServiceException {
147 | return delegate.createBucket(createBucketRequest);
148 | }
149 |
150 | @Override
151 | public Bucket createBucket(String bucketName) throws AmazonClientException, AmazonServiceException {
152 | return delegate.createBucket(bucketName);
153 | }
154 |
155 | @Override
156 | public Bucket createBucket(String bucketName, com.amazonaws.services.s3.model.Region region) throws AmazonClientException, AmazonServiceException {
157 | return delegate.createBucket(bucketName, region);
158 | }
159 |
160 | @Override
161 | public Bucket createBucket(String bucketName, String region) throws AmazonClientException, AmazonServiceException {
162 | return delegate.createBucket(bucketName, region);
163 | }
164 |
165 | @Override
166 | public AccessControlList getObjectAcl(String bucketName, String key) throws AmazonClientException, AmazonServiceException {
167 | return delegate.getObjectAcl(bucketName, key);
168 | }
169 |
170 | @Override
171 | public AccessControlList getObjectAcl(String bucketName, String key, String versionId) throws AmazonClientException, AmazonServiceException {
172 | return delegate.getObjectAcl(bucketName, key, versionId);
173 | }
174 |
175 | @Override
176 | public void setObjectAcl(String bucketName, String key, AccessControlList acl) throws AmazonClientException, AmazonServiceException {
177 | delegate.setObjectAcl(bucketName, key, acl);
178 | }
179 |
180 | @Override
181 | public void setObjectAcl(String bucketName, String key, CannedAccessControlList acl) throws AmazonClientException, AmazonServiceException {
182 | delegate.setObjectAcl(bucketName, key, acl);
183 | }
184 |
185 | @Override
186 | public void setObjectAcl(String bucketName, String key, String versionId, AccessControlList acl) throws AmazonClientException, AmazonServiceException {
187 | delegate.setObjectAcl(bucketName, key, versionId, acl);
188 | }
189 |
190 | @Override
191 | public void setObjectAcl(String bucketName, String key, String versionId, CannedAccessControlList acl) throws AmazonClientException, AmazonServiceException {
192 | delegate.setObjectAcl(bucketName, key, versionId, acl);
193 | }
194 |
195 | @Override
196 | public AccessControlList getBucketAcl(String bucketName) throws AmazonClientException, AmazonServiceException {
197 | return delegate.getBucketAcl(bucketName);
198 | }
199 |
200 | @Override
201 | public void setBucketAcl(SetBucketAclRequest setBucketAclRequest) throws AmazonClientException, AmazonServiceException {
202 | delegate.setBucketAcl(setBucketAclRequest);
203 | }
204 |
205 | @Override
206 | public AccessControlList getBucketAcl(GetBucketAclRequest getBucketAclRequest) throws AmazonClientException, AmazonServiceException {
207 | return delegate.getBucketAcl(getBucketAclRequest);
208 | }
209 |
210 | @Override
211 | public void setBucketAcl(String bucketName, AccessControlList acl) throws AmazonClientException, AmazonServiceException {
212 | delegate.setBucketAcl(bucketName, acl);
213 | }
214 |
215 | @Override
216 | public void setBucketAcl(String bucketName, CannedAccessControlList acl) throws AmazonClientException, AmazonServiceException {
217 | delegate.setBucketAcl(bucketName, acl);
218 | }
219 |
220 | @Override
221 | public ObjectMetadata getObjectMetadata(String bucketName, String key) throws AmazonClientException, AmazonServiceException {
222 | return delegate.getObjectMetadata(bucketName, key);
223 | }
224 |
225 | @Override
226 | public ObjectMetadata getObjectMetadata(GetObjectMetadataRequest getObjectMetadataRequest) throws AmazonClientException, AmazonServiceException {
227 | return delegate.getObjectMetadata(getObjectMetadataRequest);
228 | }
229 |
230 | @Override
231 | public S3Object getObject(String bucketName, String key) throws AmazonClientException, AmazonServiceException {
232 | return delegate.getObject(bucketName, key);
233 | }
234 |
235 | @Override
236 | public S3Object getObject(GetObjectRequest getObjectRequest) throws AmazonClientException, AmazonServiceException {
237 | return delegate.getObject(getObjectRequest);
238 | }
239 |
240 | @Override
241 | public ObjectMetadata getObject(GetObjectRequest getObjectRequest, File destinationFile) throws AmazonClientException, AmazonServiceException {
242 | return delegate.getObject(getObjectRequest, destinationFile);
243 | }
244 |
245 | @Override
246 | public void deleteBucket(DeleteBucketRequest deleteBucketRequest) throws AmazonClientException, AmazonServiceException {
247 | delegate.deleteBucket(deleteBucketRequest);
248 | }
249 |
250 | @Override
251 | public void deleteBucket(String bucketName) throws AmazonClientException, AmazonServiceException {
252 | delegate.deleteBucket(bucketName);
253 | }
254 |
255 | @Override
256 | public PutObjectResult putObject(PutObjectRequest putObjectRequest) throws AmazonClientException, AmazonServiceException {
257 | return delegate.putObject(putObjectRequest);
258 | }
259 |
260 | @Override
261 | public PutObjectResult putObject(String bucketName, String key, File file) throws AmazonClientException, AmazonServiceException {
262 | return delegate.putObject(bucketName, key, file);
263 | }
264 |
265 | @Override
266 | public PutObjectResult putObject(String bucketName, String key, InputStream input, ObjectMetadata metadata) throws AmazonClientException, AmazonServiceException {
267 | return delegate.putObject(bucketName, key, input, metadata);
268 | }
269 |
270 | @Override
271 | public CopyObjectResult copyObject(String sourceBucketName, String sourceKey, String destinationBucketName, String destinationKey) throws AmazonClientException, AmazonServiceException {
272 | return delegate.copyObject(sourceBucketName, sourceKey, destinationBucketName, destinationKey);
273 | }
274 |
275 | @Override
276 | public CopyObjectResult copyObject(CopyObjectRequest copyObjectRequest) throws AmazonClientException, AmazonServiceException {
277 | return delegate.copyObject(copyObjectRequest);
278 | }
279 |
280 | @Override
281 | public CopyPartResult copyPart(CopyPartRequest copyPartRequest) throws AmazonClientException, AmazonServiceException {
282 | return delegate.copyPart(copyPartRequest);
283 | }
284 |
285 | @Override
286 | public void deleteObject(String bucketName, String key) throws AmazonClientException, AmazonServiceException {
287 | delegate.deleteObject(bucketName, key);
288 | }
289 |
290 | @Override
291 | public void deleteObject(DeleteObjectRequest deleteObjectRequest) throws AmazonClientException, AmazonServiceException {
292 | delegate.deleteObject(deleteObjectRequest);
293 | }
294 |
295 | @Override
296 | public DeleteObjectsResult deleteObjects(DeleteObjectsRequest deleteObjectsRequest) throws AmazonClientException, AmazonServiceException {
297 | return delegate.deleteObjects(deleteObjectsRequest);
298 | }
299 |
300 | @Override
301 | public void deleteVersion(String bucketName, String key, String versionId) throws AmazonClientException, AmazonServiceException {
302 | delegate.deleteVersion(bucketName, key, versionId);
303 | }
304 |
305 | @Override
306 | public void deleteVersion(DeleteVersionRequest deleteVersionRequest) throws AmazonClientException, AmazonServiceException {
307 | delegate.deleteVersion(deleteVersionRequest);
308 | }
309 |
310 | @Override
311 | public BucketLoggingConfiguration getBucketLoggingConfiguration(String bucketName) throws AmazonClientException, AmazonServiceException {
312 | return delegate.getBucketLoggingConfiguration(bucketName);
313 | }
314 |
315 | @Override
316 | public void setBucketLoggingConfiguration(SetBucketLoggingConfigurationRequest setBucketLoggingConfigurationRequest) throws AmazonClientException, AmazonServiceException {
317 | delegate.setBucketLoggingConfiguration(setBucketLoggingConfigurationRequest);
318 | }
319 |
320 | @Override
321 | public BucketVersioningConfiguration getBucketVersioningConfiguration(String bucketName) throws AmazonClientException, AmazonServiceException {
322 | return delegate.getBucketVersioningConfiguration(bucketName);
323 | }
324 |
325 | @Override
326 | public void setBucketVersioningConfiguration(SetBucketVersioningConfigurationRequest setBucketVersioningConfigurationRequest) throws AmazonClientException, AmazonServiceException {
327 | delegate.setBucketVersioningConfiguration(setBucketVersioningConfigurationRequest);
328 | }
329 |
330 | @Override
331 | public BucketLifecycleConfiguration getBucketLifecycleConfiguration(String bucketName) {
332 | return delegate.getBucketLifecycleConfiguration(bucketName);
333 | }
334 |
335 | @Override
336 | public void setBucketLifecycleConfiguration(String bucketName, BucketLifecycleConfiguration bucketLifecycleConfiguration) {
337 | delegate.setBucketLifecycleConfiguration(bucketName, bucketLifecycleConfiguration);
338 | }
339 |
340 | @Override
341 | public void setBucketLifecycleConfiguration(SetBucketLifecycleConfigurationRequest setBucketLifecycleConfigurationRequest) {
342 | delegate.setBucketLifecycleConfiguration(setBucketLifecycleConfigurationRequest);
343 | }
344 |
345 | @Override
346 | public void deleteBucketLifecycleConfiguration(String bucketName) {
347 | delegate.deleteBucketLifecycleConfiguration(bucketName);
348 | }
349 |
350 | @Override
351 | public void deleteBucketLifecycleConfiguration(DeleteBucketLifecycleConfigurationRequest deleteBucketLifecycleConfigurationRequest) {
352 | delegate.deleteBucketLifecycleConfiguration(deleteBucketLifecycleConfigurationRequest);
353 | }
354 |
355 | @Override
356 | public BucketCrossOriginConfiguration getBucketCrossOriginConfiguration(String bucketName) {
357 | return delegate.getBucketCrossOriginConfiguration(bucketName);
358 | }
359 |
360 | @Override
361 | public void setBucketCrossOriginConfiguration(String bucketName, BucketCrossOriginConfiguration bucketCrossOriginConfiguration) {
362 | delegate.setBucketCrossOriginConfiguration(bucketName, bucketCrossOriginConfiguration);
363 | }
364 |
365 | @Override
366 | public void setBucketCrossOriginConfiguration(SetBucketCrossOriginConfigurationRequest setBucketCrossOriginConfigurationRequest) {
367 | delegate.setBucketCrossOriginConfiguration(setBucketCrossOriginConfigurationRequest);
368 | }
369 |
370 | @Override
371 | public void deleteBucketCrossOriginConfiguration(String bucketName) {
372 | delegate.deleteBucketCrossOriginConfiguration(bucketName);
373 | }
374 |
375 | @Override
376 | public void deleteBucketCrossOriginConfiguration(DeleteBucketCrossOriginConfigurationRequest deleteBucketCrossOriginConfigurationRequest) {
377 | delegate.deleteBucketCrossOriginConfiguration(deleteBucketCrossOriginConfigurationRequest);
378 | }
379 |
380 | @Override
381 | public BucketTaggingConfiguration getBucketTaggingConfiguration(String bucketName) {
382 | return delegate.getBucketTaggingConfiguration(bucketName);
383 | }
384 |
385 | @Override
386 | public void setBucketTaggingConfiguration(String bucketName, BucketTaggingConfiguration bucketTaggingConfiguration) {
387 | delegate.setBucketTaggingConfiguration(bucketName, bucketTaggingConfiguration);
388 | }
389 |
390 | @Override
391 | public void setBucketTaggingConfiguration(SetBucketTaggingConfigurationRequest setBucketTaggingConfigurationRequest) {
392 | delegate.setBucketTaggingConfiguration(setBucketTaggingConfigurationRequest);
393 | }
394 |
395 | @Override
396 | public void deleteBucketTaggingConfiguration(String bucketName) {
397 | delegate.deleteBucketTaggingConfiguration(bucketName);
398 | }
399 |
400 | @Override
401 | public void deleteBucketTaggingConfiguration(DeleteBucketTaggingConfigurationRequest deleteBucketTaggingConfigurationRequest) {
402 | delegate.deleteBucketTaggingConfiguration(deleteBucketTaggingConfigurationRequest);
403 | }
404 |
405 | @Override
406 | public BucketNotificationConfiguration getBucketNotificationConfiguration(String bucketName) throws AmazonClientException, AmazonServiceException {
407 | return delegate.getBucketNotificationConfiguration(bucketName);
408 | }
409 |
410 | @Override
411 | public void setBucketNotificationConfiguration(SetBucketNotificationConfigurationRequest setBucketNotificationConfigurationRequest) throws AmazonClientException, AmazonServiceException {
412 | delegate.setBucketNotificationConfiguration(setBucketNotificationConfigurationRequest);
413 | }
414 |
415 | @Override
416 | public void setBucketNotificationConfiguration(String bucketName, BucketNotificationConfiguration bucketNotificationConfiguration) throws AmazonClientException, AmazonServiceException {
417 | delegate.setBucketNotificationConfiguration(bucketName, bucketNotificationConfiguration);
418 | }
419 |
420 | @Override
421 | public BucketWebsiteConfiguration getBucketWebsiteConfiguration(String bucketName) throws AmazonClientException, AmazonServiceException {
422 | return delegate.getBucketWebsiteConfiguration(bucketName);
423 | }
424 |
425 | @Override
426 | public BucketWebsiteConfiguration getBucketWebsiteConfiguration(GetBucketWebsiteConfigurationRequest getBucketWebsiteConfigurationRequest) throws AmazonClientException, AmazonServiceException {
427 | return delegate.getBucketWebsiteConfiguration(getBucketWebsiteConfigurationRequest);
428 | }
429 |
430 | @Override
431 | public void setBucketWebsiteConfiguration(String bucketName, BucketWebsiteConfiguration configuration) throws AmazonClientException, AmazonServiceException {
432 | delegate.setBucketWebsiteConfiguration(bucketName, configuration);
433 | }
434 |
435 | @Override
436 | public void setBucketWebsiteConfiguration(SetBucketWebsiteConfigurationRequest setBucketWebsiteConfigurationRequest) throws AmazonClientException, AmazonServiceException {
437 | delegate.setBucketWebsiteConfiguration(setBucketWebsiteConfigurationRequest);
438 | }
439 |
440 | @Override
441 | public void deleteBucketWebsiteConfiguration(String bucketName) throws AmazonClientException, AmazonServiceException {
442 | delegate.deleteBucketWebsiteConfiguration(bucketName);
443 | }
444 |
445 | @Override
446 | public void deleteBucketWebsiteConfiguration(DeleteBucketWebsiteConfigurationRequest deleteBucketWebsiteConfigurationRequest) throws AmazonClientException, AmazonServiceException {
447 | delegate.deleteBucketWebsiteConfiguration(deleteBucketWebsiteConfigurationRequest);
448 | }
449 |
450 | @Override
451 | public BucketPolicy getBucketPolicy(String bucketName) throws AmazonClientException, AmazonServiceException {
452 | return delegate.getBucketPolicy(bucketName);
453 | }
454 |
455 | @Override
456 | public BucketPolicy getBucketPolicy(GetBucketPolicyRequest getBucketPolicyRequest) throws AmazonClientException, AmazonServiceException {
457 | return delegate.getBucketPolicy(getBucketPolicyRequest);
458 | }
459 |
460 | @Override
461 | public void setBucketPolicy(String bucketName, String policyText) throws AmazonClientException, AmazonServiceException {
462 | delegate.setBucketPolicy(bucketName, policyText);
463 | }
464 |
465 | @Override
466 | public void setBucketPolicy(SetBucketPolicyRequest setBucketPolicyRequest) throws AmazonClientException, AmazonServiceException {
467 | delegate.setBucketPolicy(setBucketPolicyRequest);
468 | }
469 |
470 | @Override
471 | public void deleteBucketPolicy(String bucketName) throws AmazonClientException, AmazonServiceException {
472 | delegate.deleteBucketPolicy(bucketName);
473 | }
474 |
475 | @Override
476 | public void deleteBucketPolicy(DeleteBucketPolicyRequest deleteBucketPolicyRequest) throws AmazonClientException, AmazonServiceException {
477 | delegate.deleteBucketPolicy(deleteBucketPolicyRequest);
478 | }
479 |
480 | @Override
481 | public URL generatePresignedUrl(String bucketName, String key, Date expiration) throws AmazonClientException {
482 | return delegate.generatePresignedUrl(bucketName, key, expiration);
483 | }
484 |
485 | @Override
486 | public URL generatePresignedUrl(String bucketName, String key, Date expiration, HttpMethod method) throws AmazonClientException {
487 | return delegate.generatePresignedUrl(bucketName, key, expiration, method);
488 | }
489 |
490 | @Override
491 | public URL generatePresignedUrl(GeneratePresignedUrlRequest generatePresignedUrlRequest) throws AmazonClientException {
492 | return delegate.generatePresignedUrl(generatePresignedUrlRequest);
493 | }
494 |
495 | @Override
496 | public InitiateMultipartUploadResult initiateMultipartUpload(InitiateMultipartUploadRequest request) throws AmazonClientException, AmazonServiceException {
497 | return delegate.initiateMultipartUpload(request);
498 | }
499 |
500 | @Override
501 | public UploadPartResult uploadPart(UploadPartRequest request) throws AmazonClientException, AmazonServiceException {
502 | return delegate.uploadPart(request);
503 | }
504 |
505 | @Override
506 | public PartListing listParts(ListPartsRequest request) throws AmazonClientException, AmazonServiceException {
507 | return delegate.listParts(request);
508 | }
509 |
510 | @Override
511 | public void abortMultipartUpload(AbortMultipartUploadRequest request) throws AmazonClientException, AmazonServiceException {
512 | delegate.abortMultipartUpload(request);
513 | }
514 |
515 | @Override
516 | public CompleteMultipartUploadResult completeMultipartUpload(CompleteMultipartUploadRequest request) throws AmazonClientException, AmazonServiceException {
517 | return delegate.completeMultipartUpload(request);
518 | }
519 |
520 | @Override
521 | public MultipartUploadListing listMultipartUploads(ListMultipartUploadsRequest request) throws AmazonClientException, AmazonServiceException {
522 | return delegate.listMultipartUploads(request);
523 | }
524 |
525 | @Override
526 | public S3ResponseMetadata getCachedResponseMetadata(AmazonWebServiceRequest request) {
527 | return delegate.getCachedResponseMetadata(request);
528 | }
529 |
530 | @Override
531 | public void restoreObject(RestoreObjectRequest copyGlacierObjectRequest) throws AmazonServiceException {
532 | delegate.restoreObject(copyGlacierObjectRequest);
533 | }
534 |
535 | @Override
536 | public void restoreObject(String bucketName, String key, int expirationInDays) throws AmazonServiceException {
537 | delegate.restoreObject(bucketName, key, expirationInDays);
538 | }
539 |
540 | @Override
541 | public void enableRequesterPays(String bucketName) throws AmazonServiceException, AmazonClientException {
542 | delegate.enableRequesterPays(bucketName);
543 | }
544 |
545 | @Override
546 | public void disableRequesterPays(String bucketName) throws AmazonServiceException, AmazonClientException {
547 | delegate.disableRequesterPays(bucketName);
548 | }
549 |
550 | @Override
551 | public boolean isRequesterPaysEnabled(String bucketName) throws AmazonServiceException, AmazonClientException {
552 | return delegate.isRequesterPaysEnabled(bucketName);
553 | }
554 | }
555 |
--------------------------------------------------------------------------------
/src/test/java/org/elasticsearch/repositories/s3/AbstractS3SnapshotRestoreTest.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to Elasticsearch (the "Author") under one
3 | * or more contributor license agreements. See the NOTICE file
4 | * distributed with this work for additional information
5 | * regarding copyright ownership. Author licenses this
6 | * file to you under the Apache License, Version 2.0 (the
7 | * "License"); you may not use this file except in compliance
8 | * with the License. You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing,
13 | * software distributed under the License is distributed on an
14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 | * KIND, either express or implied. See the License for the
16 | * specific language governing permissions and limitations
17 | * under the License.
18 | */
19 |
20 | package org.elasticsearch.repositories.s3;
21 |
22 | import com.amazonaws.services.s3.AmazonS3;
23 | import com.amazonaws.services.s3.model.DeleteObjectsRequest;
24 | import com.amazonaws.services.s3.model.ObjectListing;
25 | import com.amazonaws.services.s3.model.S3ObjectSummary;
26 | import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse;
27 | import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
28 | import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
29 | import org.elasticsearch.client.Client;
30 | import org.elasticsearch.client.ClusterAdminClient;
31 | import org.elasticsearch.cloud.aws.AbstractAwsTest;
32 | import org.elasticsearch.cloud.aws.AbstractAwsTest.AwsTest;
33 | import org.elasticsearch.cloud.aws.AwsS3Service;
34 | import org.elasticsearch.cluster.ClusterState;
35 | import org.elasticsearch.common.settings.ImmutableSettings;
36 | import org.elasticsearch.common.settings.Settings;
37 | import org.elasticsearch.common.util.concurrent.UncategorizedExecutionException;
38 | import org.elasticsearch.plugins.PluginsService;
39 | import org.elasticsearch.repositories.RepositoryMissingException;
40 | import org.elasticsearch.snapshots.SnapshotMissingException;
41 | import org.elasticsearch.snapshots.SnapshotState;
42 | import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
43 | import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope;
44 | import org.elasticsearch.test.store.MockDirectoryHelper;
45 | import org.junit.After;
46 | import org.junit.Before;
47 | import org.junit.Test;
48 |
49 | import java.util.ArrayList;
50 | import java.util.List;
51 |
52 | import static org.hamcrest.Matchers.*;
53 |
54 | /**
55 | */
56 | @AwsTest
57 | @ClusterScope(scope = Scope.SUITE, numDataNodes = 2, numClientNodes = 0, transportClientRatio = 0.0)
58 | abstract public class AbstractS3SnapshotRestoreTest extends AbstractAwsTest {
59 |
60 | @Override
61 | public Settings indexSettings() {
62 | // During restore we frequently restore index to exactly the same state it was before, that might cause the same
63 | // checksum file to be written twice during restore operation
64 | return ImmutableSettings.builder().put(super.indexSettings())
65 | .put(MockDirectoryHelper.RANDOM_PREVENT_DOUBLE_WRITE, false)
66 | .put(MockDirectoryHelper.RANDOM_NO_DELETE_OPEN_FILE, false)
67 | .put("cloud.enabled", true)
68 | .put("plugins." + PluginsService.LOAD_PLUGIN_FROM_CLASSPATH, true)
69 | .build();
70 | }
71 |
72 | private String basePath;
73 |
74 | @Before
75 | public final void wipeBefore() {
76 | wipeRepositories();
77 | basePath = "repo-" + randomInt();
78 | cleanRepositoryFiles(basePath);
79 | }
80 |
81 | @After
82 | public final void wipeAfter() {
83 | wipeRepositories();
84 | cleanRepositoryFiles(basePath);
85 | }
86 |
87 | @Test
88 | public void testSimpleWorkflow() {
89 | Client client = client();
90 | logger.info("--> creating s3 repository with bucket[{}] and path [{}]", internalCluster().getInstance(Settings.class).get("repositories.s3.bucket"), basePath);
91 | PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
92 | .setType("s3").setSettings(ImmutableSettings.settingsBuilder()
93 | .put("base_path", basePath)
94 | .put("chunk_size", randomIntBetween(1000, 10000))
95 | ).get();
96 | assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
97 |
98 | createIndex("test-idx-1", "test-idx-2", "test-idx-3");
99 | ensureGreen();
100 |
101 | logger.info("--> indexing some data");
102 | for (int i = 0; i < 100; i++) {
103 | index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i);
104 | index("test-idx-2", "doc", Integer.toString(i), "foo", "baz" + i);
105 | index("test-idx-3", "doc", Integer.toString(i), "foo", "baz" + i);
106 | }
107 | refresh();
108 | assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L));
109 | assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(100L));
110 | assertThat(client.prepareCount("test-idx-3").get().getCount(), equalTo(100L));
111 |
112 | logger.info("--> snapshot");
113 | CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-3").get();
114 | assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
115 | assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
116 |
117 | assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
118 |
119 | logger.info("--> delete some data");
120 | for (int i = 0; i < 50; i++) {
121 | client.prepareDelete("test-idx-1", "doc", Integer.toString(i)).get();
122 | }
123 | for (int i = 50; i < 100; i++) {
124 | client.prepareDelete("test-idx-2", "doc", Integer.toString(i)).get();
125 | }
126 | for (int i = 0; i < 100; i += 2) {
127 | client.prepareDelete("test-idx-3", "doc", Integer.toString(i)).get();
128 | }
129 | refresh();
130 | assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(50L));
131 | assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(50L));
132 | assertThat(client.prepareCount("test-idx-3").get().getCount(), equalTo(50L));
133 |
134 | logger.info("--> close indices");
135 | client.admin().indices().prepareClose("test-idx-1", "test-idx-2").get();
136 |
137 | logger.info("--> restore all indices from the snapshot");
138 | RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
139 | assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
140 |
141 | ensureGreen();
142 | assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L));
143 | assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(100L));
144 | assertThat(client.prepareCount("test-idx-3").get().getCount(), equalTo(50L));
145 |
146 | // Test restore after index deletion
147 | logger.info("--> delete indices");
148 | cluster().wipeIndices("test-idx-1", "test-idx-2");
149 | logger.info("--> restore one index after deletion");
150 | restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-2").execute().actionGet();
151 | assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
152 | ensureGreen();
153 | assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L));
154 | ClusterState clusterState = client.admin().cluster().prepareState().get().getState();
155 | assertThat(clusterState.getMetaData().hasIndex("test-idx-1"), equalTo(true));
156 | assertThat(clusterState.getMetaData().hasIndex("test-idx-2"), equalTo(false));
157 | }
158 |
159 | @Test
160 | public void testEncryption() {
161 | Client client = client();
162 | logger.info("--> creating s3 repository with bucket[{}] and path [{}]", internalCluster().getInstance(Settings.class).get("repositories.s3.bucket"), basePath);
163 | PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
164 | .setType("s3").setSettings(ImmutableSettings.settingsBuilder()
165 | .put("base_path", basePath)
166 | .put("chunk_size", randomIntBetween(1000, 10000))
167 | .put("server_side_encryption", true)
168 | ).get();
169 | assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
170 |
171 | createIndex("test-idx-1", "test-idx-2", "test-idx-3");
172 | ensureGreen();
173 |
174 | logger.info("--> indexing some data");
175 | for (int i = 0; i < 100; i++) {
176 | index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i);
177 | index("test-idx-2", "doc", Integer.toString(i), "foo", "baz" + i);
178 | index("test-idx-3", "doc", Integer.toString(i), "foo", "baz" + i);
179 | }
180 | refresh();
181 | assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L));
182 | assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(100L));
183 | assertThat(client.prepareCount("test-idx-3").get().getCount(), equalTo(100L));
184 |
185 | logger.info("--> snapshot");
186 | CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-3").get();
187 | assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
188 | assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
189 |
190 | assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
191 |
192 | Settings settings = internalCluster().getInstance(Settings.class);
193 | Settings bucket = settings.getByPrefix("repositories.s3.");
194 | AmazonS3 s3Client = internalCluster().getInstance(AwsS3Service.class).client(
195 | bucket.get("region", settings.get("repositories.s3.region")),
196 | bucket.get("access_key", settings.get("cloud.aws.access_key")),
197 | bucket.get("secret_key", settings.get("cloud.aws.secret_key")));
198 |
199 | String bucketName = bucket.get("bucket");
200 | logger.info("--> verify encryption for bucket [{}], prefix [{}]", bucketName, basePath);
201 | List summaries = s3Client.listObjects(bucketName, basePath).getObjectSummaries();
202 | for (S3ObjectSummary summary : summaries) {
203 | assertThat(s3Client.getObjectMetadata(bucketName, summary.getKey()).getSSEAlgorithm(), equalTo("AES256"));
204 | }
205 |
206 | logger.info("--> delete some data");
207 | for (int i = 0; i < 50; i++) {
208 | client.prepareDelete("test-idx-1", "doc", Integer.toString(i)).get();
209 | }
210 | for (int i = 50; i < 100; i++) {
211 | client.prepareDelete("test-idx-2", "doc", Integer.toString(i)).get();
212 | }
213 | for (int i = 0; i < 100; i += 2) {
214 | client.prepareDelete("test-idx-3", "doc", Integer.toString(i)).get();
215 | }
216 | refresh();
217 | assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(50L));
218 | assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(50L));
219 | assertThat(client.prepareCount("test-idx-3").get().getCount(), equalTo(50L));
220 |
221 | logger.info("--> close indices");
222 | client.admin().indices().prepareClose("test-idx-1", "test-idx-2").get();
223 |
224 | logger.info("--> restore all indices from the snapshot");
225 | RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
226 | assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
227 |
228 | ensureGreen();
229 | assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L));
230 | assertThat(client.prepareCount("test-idx-2").get().getCount(), equalTo(100L));
231 | assertThat(client.prepareCount("test-idx-3").get().getCount(), equalTo(50L));
232 |
233 | // Test restore after index deletion
234 | logger.info("--> delete indices");
235 | cluster().wipeIndices("test-idx-1", "test-idx-2");
236 | logger.info("--> restore one index after deletion");
237 | restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-2").execute().actionGet();
238 | assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
239 | ensureGreen();
240 | assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L));
241 | ClusterState clusterState = client.admin().cluster().prepareState().get().getState();
242 | assertThat(clusterState.getMetaData().hasIndex("test-idx-1"), equalTo(true));
243 | assertThat(clusterState.getMetaData().hasIndex("test-idx-2"), equalTo(false));
244 | }
245 |
246 | /**
247 | * This test verifies that the test configuration is set up in a manner that
248 | * does not make the test {@link #testRepositoryWithCustomCredentials()} pointless.
249 | */
250 | @Test(expected = UncategorizedExecutionException.class)
251 | public void assertRepositoryWithCustomCredentialsIsNotAccessibleByDefaultCredentials() {
252 | Client client = client();
253 | Settings bucketSettings = internalCluster().getInstance(Settings.class).getByPrefix("repositories.s3.private-bucket.");
254 | logger.info("--> creating s3 repository with bucket[{}] and path [{}]", bucketSettings.get("bucket"), basePath);
255 | PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
256 | .setType("s3").setSettings(ImmutableSettings.settingsBuilder()
257 | .put("base_path", basePath)
258 | .put("bucket", bucketSettings.get("bucket"))
259 | ).get();
260 | assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
261 |
262 | assertRepositoryIsOperational(client, "test-repo");
263 | }
264 |
265 | @Test
266 | public void testRepositoryWithCustomCredentials() {
267 | Client client = client();
268 | Settings bucketSettings = internalCluster().getInstance(Settings.class).getByPrefix("repositories.s3.private-bucket.");
269 | logger.info("--> creating s3 repository with bucket[{}] and path [{}]", bucketSettings.get("bucket"), basePath);
270 | PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
271 | .setType("s3").setSettings(ImmutableSettings.settingsBuilder()
272 | .put("base_path", basePath)
273 | .put("region", bucketSettings.get("region"))
274 | .put("access_key", bucketSettings.get("access_key"))
275 | .put("secret_key", bucketSettings.get("secret_key"))
276 | .put("bucket", bucketSettings.get("bucket"))
277 | ).get();
278 | assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
279 |
280 | assertRepositoryIsOperational(client, "test-repo");
281 | }
282 |
283 | /**
284 | * This test verifies that the test configuration is set up in a manner that
285 | * does not make the test {@link #testRepositoryInRemoteRegion()} pointless.
286 | */
287 | @Test(expected = UncategorizedExecutionException.class)
288 | public void assertRepositoryInRemoteRegionIsRemote() {
289 | Client client = client();
290 | Settings bucketSettings = internalCluster().getInstance(Settings.class).getByPrefix("repositories.s3.remote-bucket.");
291 | logger.info("--> creating s3 repository with bucket[{}] and path [{}]", bucketSettings.get("bucket"), basePath);
292 | PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
293 | .setType("s3").setSettings(ImmutableSettings.settingsBuilder()
294 | .put("base_path", basePath)
295 | .put("bucket", bucketSettings.get("bucket"))
296 | // Below setting intentionally omitted to assert bucket is not available in default region.
297 | // .put("region", privateBucketSettings.get("region"))
298 | ).get();
299 | assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
300 |
301 | assertRepositoryIsOperational(client, "test-repo");
302 | }
303 |
304 | @Test
305 | public void testRepositoryInRemoteRegion() {
306 | Client client = client();
307 | Settings settings = internalCluster().getInstance(Settings.class);
308 | Settings bucketSettings = settings.getByPrefix("repositories.s3.remote-bucket.");
309 | logger.info("--> creating s3 repository with bucket[{}] and path [{}]", bucketSettings.get("bucket"), basePath);
310 | PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
311 | .setType("s3").setSettings(ImmutableSettings.settingsBuilder()
312 | .put("base_path", basePath)
313 | .put("bucket", bucketSettings.get("bucket"))
314 | .put("region", bucketSettings.get("region"))
315 | ).get();
316 | assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
317 |
318 | assertRepositoryIsOperational(client, "test-repo");
319 | }
320 |
321 | /**
322 | * Test case for issue #86: https://github.com/elasticsearch/elasticsearch-cloud-aws/issues/86
323 | */
324 | @Test
325 | public void testNonExistingRepo_86() {
326 | Client client = client();
327 | logger.info("--> creating s3 repository with bucket[{}] and path [{}]", internalCluster().getInstance(Settings.class).get("repositories.s3.bucket"), basePath);
328 | PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
329 | .setType("s3").setSettings(ImmutableSettings.settingsBuilder()
330 | .put("base_path", basePath)
331 | ).get();
332 | assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
333 |
334 | logger.info("--> restore non existing snapshot");
335 | try {
336 | client.admin().cluster().prepareRestoreSnapshot("test-repo", "no-existing-snapshot").setWaitForCompletion(true).execute().actionGet();
337 | fail("Shouldn't be here");
338 | } catch (SnapshotMissingException ex) {
339 | // Expected
340 | }
341 | }
342 |
343 | /**
344 | * For issue #86: https://github.com/elasticsearch/elasticsearch-cloud-aws/issues/86
345 | */
346 | @Test
347 | public void testGetDeleteNonExistingSnapshot_86() {
348 | ClusterAdminClient client = client().admin().cluster();
349 | logger.info("--> creating azure repository without any path");
350 | PutRepositoryResponse putRepositoryResponse = client.preparePutRepository("test-repo").setType("azure")
351 | .setType("s3").setSettings(ImmutableSettings.settingsBuilder()
352 | .put("base_path", basePath)
353 | ).get();
354 | assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
355 |
356 | try {
357 | client.prepareGetSnapshots("test-repo").addSnapshots("no-existing-snapshot").get();
358 | fail("Shouldn't be here");
359 | } catch (SnapshotMissingException ex) {
360 | // Expected
361 | }
362 |
363 | try {
364 | client.prepareDeleteSnapshot("test-repo", "no-existing-snapshot").get();
365 | fail("Shouldn't be here");
366 | } catch (SnapshotMissingException ex) {
367 | // Expected
368 | }
369 | }
370 |
371 | private void assertRepositoryIsOperational(Client client, String repository) {
372 | createIndex("test-idx-1");
373 | ensureGreen();
374 |
375 | logger.info("--> indexing some data");
376 | for (int i = 0; i < 100; i++) {
377 | index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i);
378 | }
379 | refresh();
380 | assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L));
381 |
382 | logger.info("--> snapshot");
383 | CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot(repository, "test-snap").setWaitForCompletion(true).setIndices("test-idx-*").get();
384 | assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
385 | assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
386 |
387 | assertThat(client.admin().cluster().prepareGetSnapshots(repository).setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
388 |
389 | logger.info("--> delete some data");
390 | for (int i = 0; i < 50; i++) {
391 | client.prepareDelete("test-idx-1", "doc", Integer.toString(i)).get();
392 | }
393 | refresh();
394 | assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(50L));
395 |
396 | logger.info("--> close indices");
397 | client.admin().indices().prepareClose("test-idx-1").get();
398 |
399 | logger.info("--> restore all indices from the snapshot");
400 | RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot(repository, "test-snap").setWaitForCompletion(true).execute().actionGet();
401 | assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
402 |
403 | ensureGreen();
404 | assertThat(client.prepareCount("test-idx-1").get().getCount(), equalTo(100L));
405 | }
406 |
407 |
408 | /**
409 | * Deletes repositories, supports wildcard notation.
410 | */
411 | public static void wipeRepositories(String... repositories) {
412 | // if nothing is provided, delete all
413 | if (repositories.length == 0) {
414 | repositories = new String[]{"*"};
415 | }
416 | for (String repository : repositories) {
417 | try {
418 | client().admin().cluster().prepareDeleteRepository(repository).execute().actionGet();
419 | } catch (RepositoryMissingException ex) {
420 | // ignore
421 | }
422 | }
423 | }
424 |
425 | /**
426 | * Deletes content of the repository files in the bucket
427 | */
428 | public void cleanRepositoryFiles(String basePath) {
429 | Settings settings = internalCluster().getInstance(Settings.class);
430 | Settings[] buckets = {
431 | settings.getByPrefix("repositories.s3."),
432 | settings.getByPrefix("repositories.s3.private-bucket."),
433 | settings.getByPrefix("repositories.s3.remote-bucket.")
434 | };
435 | for (Settings bucket : buckets) {
436 | String region = bucket.get("region", settings.get("repositories.s3.region"));
437 | String accessKey = bucket.get("access_key", settings.get("cloud.aws.access_key"));
438 | String secretKey = bucket.get("secret_key", settings.get("cloud.aws.secret_key"));
439 | String bucketName = bucket.get("bucket");
440 |
441 | // We check that settings has been set in elasticsearch.yml integration test file
442 | // as described in README
443 | assertThat("Your settings in elasticsearch.yml are incorrects. Check README file.", bucketName, notNullValue());
444 | AmazonS3 client = internalCluster().getInstance(AwsS3Service.class).client(region, accessKey, secretKey);
445 | try {
446 | ObjectListing prevListing = null;
447 | //From http://docs.amazonwebservices.com/AmazonS3/latest/dev/DeletingMultipleObjectsUsingJava.html
448 | //we can do at most 1K objects per delete
449 | //We don't know the bucket name until first object listing
450 | DeleteObjectsRequest multiObjectDeleteRequest = null;
451 | ArrayList keys = new ArrayList();
452 | while (true) {
453 | ObjectListing list;
454 | if (prevListing != null) {
455 | list = client.listNextBatchOfObjects(prevListing);
456 | } else {
457 | list = client.listObjects(bucketName, basePath);
458 | multiObjectDeleteRequest = new DeleteObjectsRequest(list.getBucketName());
459 | }
460 | for (S3ObjectSummary summary : list.getObjectSummaries()) {
461 | keys.add(new DeleteObjectsRequest.KeyVersion(summary.getKey()));
462 | //Every 500 objects batch the delete request
463 | if (keys.size() > 500) {
464 | multiObjectDeleteRequest.setKeys(keys);
465 | client.deleteObjects(multiObjectDeleteRequest);
466 | multiObjectDeleteRequest = new DeleteObjectsRequest(list.getBucketName());
467 | keys.clear();
468 | }
469 | }
470 | if (list.isTruncated()) {
471 | prevListing = list;
472 | } else {
473 | break;
474 | }
475 | }
476 | if (!keys.isEmpty()) {
477 | multiObjectDeleteRequest.setKeys(keys);
478 | client.deleteObjects(multiObjectDeleteRequest);
479 | }
480 | } catch (Throwable ex) {
481 | logger.warn("Failed to delete S3 repository [{}] in [{}]", ex, bucketName, region);
482 | }
483 | }
484 | }
485 | }
486 |
--------------------------------------------------------------------------------