├── LICENSE
├── README.md
├── diagram1.png
├── diagram2.png
├── pom.xml
└── src
└── main
├── java
└── org
│ └── bitsofinfo
│ ├── ec2
│ └── Ec2Util.java
│ └── s3
│ ├── S3BucketLoader.java
│ ├── S3Util.java
│ ├── cmd
│ ├── CmdResult.java
│ ├── CommandExecutor.java
│ └── TocPathOpResult.java
│ ├── control
│ ├── CCMode.java
│ ├── CCPayload.java
│ ├── CCPayloadHandler.java
│ ├── CCPayloadType.java
│ └── ControlChannel.java
│ ├── master
│ ├── Master.java
│ ├── ShutdownInfo.java
│ ├── TOCGenerationEventHandler.java
│ ├── TOCGeneratorAndSender.java
│ ├── TOCQueueEmptier.java
│ ├── TocInfoQueueSender.java
│ ├── TocInfoSizeAwareQueue.java
│ ├── WorkerInfo.java
│ └── WorkerRegistry.java
│ ├── toc
│ ├── DirectoryCrawler.java
│ ├── FileCopyTOCPayloadHandler.java
│ ├── S3BucketObjectLister.java
│ ├── S3KeyCopyingTOCPayloadHandler.java
│ ├── SourceTOCGenerator.java
│ ├── TOCManifestBasedGenerator.java
│ ├── TOCPayload.java
│ ├── TOCPayloadHandler.java
│ ├── TOCPayloadValidator.java
│ ├── TOCQueue.java
│ ├── TocInfo.java
│ └── ValidatingTOCPayloadHandler.java
│ ├── util
│ └── CompressUtil.java
│ ├── worker
│ ├── ErrorReport.java
│ ├── ResultSummary.java
│ ├── Worker.java
│ ├── WorkerState.java
│ ├── WriteBackoffMonitor.java
│ ├── WriteErrorMonitor.java
│ ├── WriteMonitor.java
│ └── WriteMonitorError.java
│ └── yas3fs
│ └── Yas3fsS3UploadMonitor.java
└── resources
├── ec2-init-s3BucketLoader.sample.py
├── log4j.properties
└── s3BucketLoader.sample.properties
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "{}"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright {yyyy} {name of copyright owner}
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
203 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | s3-bucket-loader
2 | ================
3 |
4 | This project originated out of a need to quickly import (and backup) a massive amount of files (hundreds of gigabytes) into an AWS S3 bucket,
5 | with the ultimate intent that this bucket be managed going forward via the S3 distributed file-system;
6 | [yas3fs](https://github.com/danilop/yas3fs). Initial attempts at doing this a traditional way,
7 | (i.e. rsyncing or copying from source to destination) quickly became impractical due to the sheer
8 | amount of time that single-threaded, and even limited multi-threaded copiers would take.
9 |
10 | s3-bucket-loader leverages a simple master/worker paradigm to get economies of scale for copying many files from sourceA to targetB.
11 | "sourceA" and "targetB" could be two S3 buckets, or a file-system to S3 bucket (via an S3 file-system abstraction like yas3fs or s3fs etc).
12 | Even though this is coded with S3 being the ultimate destination it could be used for other targets as well including other shared file-systems.
13 | The speed at which you can import a given file-set into S3 (through yas3fs in this case) is only limited on how much money you
14 | want to spend in worker hardware. For example this has been used to import and validate in S3 over 35k files (11gb total)
15 | in roughly 16 minutes; using 40 ec2 t2.medium instances as workers. In another scenario it was used to import and validate
16 | over 800k files totaling roughly 600gb in under 8 hours. This program has also been used to copy the previously imported
17 | buckets to secondary 'backup' buckets in under an hour.
18 |
19 |
20 | 
21 |
22 | 
23 |
24 | ## How it works
25 |
26 | This is a multi-threaded Java program that can be launched in two modes `master` or `worker`. The `master` is
27 | responsible for determining a table of contents (TOC) (i.e. file paths) which are candidates for WRITE to the
28 | destination and subsequently VALIDATED. The `master` node streams these TOC events over an SQS queue which is
29 | consumed to by one or more `workers`. Each `worker` must also have access to the `source` from which the TOC
30 | was generated from. The `source` data could be the same physical set of files, an S3 bucket, a copy of them or whatever... it really
31 | does not matter, but they just need to be accessible from each `worker` (i.e. via a SAN/NAS/NFS share, source S3 bucket etc).
32 | The `worker` then copies each item (in the case of files via rsync (or cp) to S3 via an S3 FS abstraction) or via an S3 key-copy.
33 | It uses rsync to preserve uid/gid information which is important for the ultimate consumer; and ensured preservation
34 | if written to S3 via S3 file-system abstractions like [yas3fs](https://github.com/danilop/yas3fs).
35 | It is also important to note that each `worker` leverages N threads to increase parallelism and maximize the
36 | throughput to S3. The more `workers` you have the faster it goes.
37 |
38 | Please see [s3BucketLoader.sample.properties](https://github.com/bitsofinfo/s3-bucket-loader/blob/master/src/main/resources/s3BucketLoader.sample.properties) for
39 | more details on configuration options and how-to-use etc
40 |
41 | ## Flow overview
42 |
43 | 1. End user starts the Master which creates the SNS control-channel and SQS TOC queue
44 |
45 | 2. The Master (optionally) launches N worker nodes on EC2
46 |
47 | 3. As each worker node initializes its subscribes to the control-channel and publishes that it is INITIALIZED
48 |
49 | 4. Once the master sees all of its workers in INITIALIZED state, the master changes the state to WRITE
50 |
51 | 5. The master begins creating the TOC (consisting of path, isDirectory and size), and sends a SQS message for each file to the TOC queue. Again the 'source' for these
52 | TOC entries could be a path realized via the file-system, or a file-like key name in a source S3 bucket.
53 |
54 | 6. Workers begin consuming TOC messages off the queue and execute their TOCPayloadHandler, which might do a S3 key-copy or
55 | rsyncs (or cp) from the source -> destination through an S3 file-system abstraction. As workers are consuming they periodically
56 | send CURRENT SUMMARY updates to the master. If `failfast` is configured and any failures are detected the master can
57 | switch the cluster to ERROR_REPORT mode immediately (see below). Depending on the handler, they can also do chowns, chmods etc.
58 |
59 | 7. When workers are complete, they publish their WRITE SUMMARY and go into an IDLE state
60 |
61 | 8. Master receives all WRITE SUMMARYs from the workers
62 | * If no errors, the master transitions to the VALIDATE state, and sends the TOC to the queue again
63 | * If errors the master transitions to the ERROR_REPORT state, and requests error details from the workers
64 |
65 | 9. In VALIDATE state, all workers consume TOC file paths from the SQS queue and attempt to verify the file exists
66 | and its sizes matches the expected TOC size (locally and/or s3 object metat-data calls). When complete they go into IDLE state and publish their VALIDATE SUMMARY
67 |
68 | 10. After receiving all VALIDATE SUMMARYs from the workers
69 | * If no errors, the master issues a shutdown command to all workers, then optionally terminates all instances
70 | * If errors the master transitions to the ERROR_REPORT state, and requests error details from the workers
71 |
72 | 11. In ERROR REPORT state, workers summarize and publish their errors from either state WRITE/VALIDATE,
73 | the master aggregates them and reports them to the master log file for analysis. All workers are then shutdown.
74 |
75 | 12. At any stage, issuing a control-C on the master triggers a shutdown of the entire cluster,
76 | including ec2 worker termination if configured in the properties file
77 |
78 |
79 | ## How to run
80 |
81 | * Clone this repository
82 |
83 | * You need a Java JDK installed preferable 1.6+
84 |
85 | * You need [Maven](http://maven.apache.org/) installed
86 |
87 | * Change dir to the root of the project and run 'mvn package' (this will build a runnable Jar under target/)
88 |
89 | * Copy the [s3BucketLoader.sample.properties](https://github.com/bitsofinfo/s3-bucket-loader/blob/master/src/main/resources/s3BucketLoader.sample.properties)
90 | file under src/main/resources, make your own and customize it.
91 |
92 | * run the below to launch, 1st on the MASTER, and then on the WORKERS (which the Master can do itself...)
93 | ```
94 | java -jar -DisMaster=true|false -Ds3BucketLoaderHome=/some/dir -DconfigFilePath=s3BucketLoader.properties s3-bucket-loader-0.0.1-SNAPSHOT.jar
95 | ```
96 |
97 | * The sample properties should be fairly self explanatory. Its important to understand that it is up
98 | to YOU to properly configure your environment for both the master and worker(s). The `master` needs access to the
99 | gold-copy "source" files that you want to get into S3. The `workers` need access to both the "source" files and
100 | some sort of S3 target (via an S3 file-system abstraction like yas3fs). Note that s3-bucket-loader can automatically
101 | configure your workers for you... you just need to configure a 'user-data' startup script for the EC2 instances
102 | that your `master` will launch. A example/sample one that I have used previously is provided under
103 | [ec2-init-s3BucketLoader.sample.py](src/main/resources/ec2-init-s3BucketLoader.sample.py). For example, when ec2 launches your
104 | workers, a startup script can pull all packages needed to prepare the environment from another S3 bucket, install things,
105 | configure and even pull down the latest s3-bucket-loader jar file, the worker properties file and finally launch the worker.
106 |
107 | Enjoy.
108 |
109 |
110 |
111 |
112 |
--------------------------------------------------------------------------------
/diagram1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bitsofinfo/s3-bucket-loader/68f876198084665b74a744a37875a4be00c397e1/diagram1.png
--------------------------------------------------------------------------------
/diagram2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bitsofinfo/s3-bucket-loader/68f876198084665b74a744a37875a4be00c397e1/diagram2.png
--------------------------------------------------------------------------------
/pom.xml:
--------------------------------------------------------------------------------
1 |
2 | 4.0.0
3 | org.bitsofinfo
4 | s3-bucket-loader
5 | 0.0.1-SNAPSHOT
6 |
7 |
8 |
9 |
10 | local_repo
11 |
12 | true
13 | ignore
14 |
15 |
16 | false
17 |
18 | file://${project.basedir}/local_repo
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 | org.apache.maven.plugins
28 | maven-compiler-plugin
29 |
30 | UTF-8
31 | 1.6
32 | 1.6
33 |
34 | **/*.java
35 | **/*.xml
36 | **/*.txt
37 | **/*.yml
38 |
39 |
40 |
41 |
42 |
43 | org.apache.maven.plugins
44 | maven-jar-plugin
45 | 2.3.2
46 |
47 |
48 |
49 | true
50 |
51 |
52 |
53 |
54 |
55 |
56 | org.apache.maven.plugins
57 | maven-shade-plugin
58 | 1.6
59 |
60 | true
61 |
62 |
63 | *:*
64 |
65 | META-INF/*.SF
66 | META-INF/*.DSA
67 | META-INF/*.RSA
68 |
69 |
70 |
71 |
72 |
73 |
74 | package
75 |
76 | shade
77 |
78 |
79 |
80 |
82 | META-INF/spring.handlers
83 |
84 |
86 | META-INF/spring.schemas
87 |
88 |
90 |
92 | org.bitsofinfo.s3.S3BucketLoader
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 | com.google.guava
106 | guava
107 | 18.0
108 |
109 |
110 |
111 | commons-dbcp
112 | commons-dbcp
113 | 1.4
114 |
115 |
116 |
117 |
118 | com.amazonaws
119 | aws-java-sdk
120 | 1.8.10.2
121 |
122 |
123 |
124 | log4j
125 | log4j
126 | 1.2.16
127 |
128 |
129 |
130 | com.fasterxml.jackson.core
131 | jackson-databind
132 | 2.2.0
133 |
134 |
135 |
136 |
137 | stax
138 | stax-api
139 | 1.0.1
140 |
141 |
142 |
143 | stax
144 | stax
145 | 1.2.0
146 |
147 |
148 |
149 | org.springframework
150 | spring-context
151 | 3.1.4.RELEASE
152 |
153 |
154 |
155 | org.springframework
156 | spring-orm
157 | 3.1.4.RELEASE
158 |
159 |
160 |
161 | joda-time
162 | joda-time
163 | 2.3
164 |
165 |
166 |
167 | javax.mail
168 | mail
169 | 1.4.7
170 |
171 |
172 |
173 | org.freemarker
174 | freemarker
175 | 2.3.20
176 |
177 |
178 |
179 | org.aspectj
180 | aspectjrt
181 | 1.6.12
182 |
183 |
184 |
185 | commons-io
186 | commons-io
187 | 2.4
188 |
189 |
190 |
191 | org.springframework
192 | spring-core
193 | 3.1.4.RELEASE
194 |
195 |
196 |
197 | org.apache.httpcomponents
198 | httpclient
199 | 4.3.3
200 |
201 |
202 |
203 |
204 | commons-codec
205 | commons-codec
206 | 1.9
207 |
208 |
209 |
210 | org.apache.commons
211 | commons-exec
212 | 1.2
213 |
214 |
215 |
216 | com.google.code.gson
217 | gson
218 | 2.2.4
219 |
220 |
221 |
222 |
223 |
--------------------------------------------------------------------------------
/src/main/java/org/bitsofinfo/ec2/Ec2Util.java:
--------------------------------------------------------------------------------
1 | package org.bitsofinfo.ec2;
2 |
3 | import java.io.File;
4 | import java.io.FileInputStream;
5 | import java.io.IOException;
6 | import java.util.ArrayList;
7 | import java.util.Arrays;
8 | import java.util.Collection;
9 | import java.util.List;
10 | import java.util.Map;
11 | import java.util.Properties;
12 | import java.util.TreeMap;
13 |
14 | import org.apache.log4j.Logger;
15 |
16 | import com.amazonaws.services.ec2.AmazonEC2Client;
17 | import com.amazonaws.services.ec2.model.BlockDeviceMapping;
18 | import com.amazonaws.services.ec2.model.DescribeInstanceStatusRequest;
19 | import com.amazonaws.services.ec2.model.DescribeInstanceStatusResult;
20 | import com.amazonaws.services.ec2.model.EbsBlockDevice;
21 | import com.amazonaws.services.ec2.model.Instance;
22 | import com.amazonaws.services.ec2.model.InstanceStatus;
23 | import com.amazonaws.services.ec2.model.Reservation;
24 | import com.amazonaws.services.ec2.model.RunInstancesRequest;
25 | import com.amazonaws.services.ec2.model.RunInstancesResult;
26 | import com.amazonaws.services.ec2.model.ShutdownBehavior;
27 | import com.amazonaws.services.ec2.model.StartInstancesRequest;
28 | import com.amazonaws.services.ec2.model.StopInstancesRequest;
29 | import com.amazonaws.services.ec2.model.TerminateInstancesRequest;
30 | import com.amazonaws.services.ec2.model.VolumeType;
31 | import com.amazonaws.util.Base64;
32 |
33 | public class Ec2Util {
34 |
35 | private static final Logger logger = Logger.getLogger(Ec2Util.class);
36 |
37 | /**
38 | * Returns map of instanceId:privateDnsName
39 | *
40 | * @param ec2Instances
41 | * @return
42 | */
43 | public Map getPrivateDNSNames(List ec2Instances) {
44 | TreeMap names = new TreeMap(String.CASE_INSENSITIVE_ORDER);
45 | for (Instance i : ec2Instances) {
46 | names.put(i.getInstanceId(),i.getPrivateDnsName().toLowerCase());
47 | }
48 | return names;
49 | }
50 |
51 | /**
52 | * Returns map of instanceId:privateIp
53 | *
54 | * @param ec2Instances
55 | * @return
56 | */
57 | public Map getPrivateIPs(List ec2Instances) {
58 | TreeMap names = new TreeMap(String.CASE_INSENSITIVE_ORDER);
59 | for (Instance i : ec2Instances) {
60 | names.put(i.getInstanceId(),i.getPrivateIpAddress());
61 | }
62 | return names;
63 | }
64 |
65 | public void startInstance(AmazonEC2Client ec2Client, String instanceId) throws Exception {
66 | StartInstancesRequest startReq = new StartInstancesRequest();
67 | List instanceIds = new ArrayList();
68 | instanceIds.add(instanceId);
69 | startReq.setInstanceIds(instanceIds);
70 | logger.debug("Starting EC2 instance...." + Arrays.toString(instanceIds.toArray(new String[]{})));
71 | ec2Client.startInstances(startReq);
72 | }
73 |
74 | public void stopInstance(AmazonEC2Client ec2Client, String instanceId) throws Exception {
75 | StopInstancesRequest stopReq = new StopInstancesRequest();
76 | List instanceIds = new ArrayList();
77 | instanceIds.add(instanceId);
78 | stopReq.setInstanceIds(instanceIds);
79 | logger.debug("Stopping EC2 instance...." + Arrays.toString(instanceIds.toArray(new String[]{})));
80 | ec2Client.stopInstances(stopReq);
81 | }
82 |
83 | public void terminateEc2Instance(AmazonEC2Client ec2Client, String instanceId) throws Exception {
84 | try {
85 | TerminateInstancesRequest termReq = new TerminateInstancesRequest();
86 | List instanceIds = new ArrayList();
87 | instanceIds.add(instanceId);
88 | termReq.setInstanceIds(instanceIds);
89 | logger.debug("Terminating EC2 instances...." + Arrays.toString(instanceIds.toArray(new String[]{})));
90 | ec2Client.terminateInstances(termReq);
91 |
92 | } catch(Exception e) {
93 | logger.error("Unexpected error terminating: " + instanceId + " "+ e.getMessage(),e);
94 | }
95 | }
96 |
97 |
98 | public List launchEc2Instances(AmazonEC2Client ec2Client, Properties props) throws Exception {
99 |
100 | Integer totalExpectedWorkers = Integer.valueOf(props.getProperty("master.workers.total"));
101 |
102 | // disk size
103 | Collection blockDevices = new ArrayList();
104 | blockDevices.add(
105 | new BlockDeviceMapping()
106 | .withDeviceName(props.getProperty("master.workers.ec2.disk.deviceName"))
107 | .withEbs(new EbsBlockDevice()
108 | .withVolumeType(VolumeType.valueOf(props.getProperty("master.workers.ec2.disk.volumeType")))
109 | .withDeleteOnTermination(true)
110 | .withVolumeSize(Integer.valueOf(props.getProperty("master.workers.ec2.disk.size.gigabytes")))));
111 |
112 | // create our run request for the total workers we expect
113 | RunInstancesRequest runInstancesRequest = new RunInstancesRequest();
114 | runInstancesRequest.withImageId(props.getProperty("master.workers.ec2.ami.id"))
115 | .withInstanceType(props.getProperty("master.workers.ec2.instanceType"))
116 | .withMinCount(totalExpectedWorkers)
117 | .withMaxCount(totalExpectedWorkers)
118 | .withBlockDeviceMappings(blockDevices)
119 | .withKeyName(props.getProperty("master.workers.ec2.keyName"))
120 | .withSecurityGroupIds(props.getProperty("master.workers.ec2.securityGroupId"))
121 | .withInstanceInitiatedShutdownBehavior(ShutdownBehavior.valueOf(props.getProperty("master.workers.ec2.shutdownBehavior")))
122 | .withSubnetId(props.getProperty("master.workers.ec2.subnetId"))
123 | .withUserData(Base64.encodeAsString(readFile(props.getProperty("master.workers.ec2.userDataFile")).getBytes()));
124 |
125 | // launch
126 | logger.debug("Launching " + totalExpectedWorkers + " EC2 instances, " +
127 | "it may take few minutes for workers to come up...: \n" +
128 | "\tamiId:" + runInstancesRequest.getImageId() +"\n"+
129 | "\tsecGrpId:" + runInstancesRequest.getSecurityGroupIds().get(0) +"\n"+
130 | "\tsubnetId:" + runInstancesRequest.getSubnetId() +"\n"+
131 | "\tinstanceType:" + runInstancesRequest.getInstanceType() +"\n"+
132 | "\tshutdownBehavior:" + runInstancesRequest.getInstanceInitiatedShutdownBehavior() +"\n"+
133 | "\tkeyName:" + runInstancesRequest.getKeyName()
134 | );
135 |
136 |
137 | // as the instances come up, assuming the "userData" above launches the worker we will be good
138 | // they will auto register w/ us the master
139 | RunInstancesResult result = ec2Client.runInstances(runInstancesRequest);
140 | Reservation reservation = result.getReservation();
141 | return reservation.getInstances();
142 | }
143 |
144 | public InstanceStatus getInstanceStatus(AmazonEC2Client ec2Client, String instanceId) {
145 | List instanceIds = new ArrayList();
146 | instanceIds.add(instanceId);
147 | DescribeInstanceStatusRequest statusReq = new DescribeInstanceStatusRequest();
148 | statusReq.setInstanceIds(instanceIds);
149 | DescribeInstanceStatusResult result = ec2Client.describeInstanceStatus(statusReq);
150 | List statuses = result.getInstanceStatuses();
151 | if (statuses == null || statuses.size() == 0) {
152 | return null;
153 | }
154 | return statuses.iterator().next();
155 | }
156 |
157 | public List dumpEc2InstanceStatus(AmazonEC2Client ec2Client, List ec2Instances) {
158 | try {
159 | List instanceIds = new ArrayList();
160 |
161 | for (Instance ec2node : ec2Instances) {
162 | instanceIds.add(ec2node.getInstanceId());
163 | }
164 |
165 | DescribeInstanceStatusRequest statusReq = new DescribeInstanceStatusRequest();
166 | statusReq.setInstanceIds(instanceIds);
167 | DescribeInstanceStatusResult result = ec2Client.describeInstanceStatus(statusReq);
168 |
169 | List statuses = result.getInstanceStatuses();
170 |
171 | List impairedInstances = new ArrayList();
172 |
173 | StringBuffer sb = new StringBuffer("EC2 worker instance STATUS:\n");
174 | for (InstanceStatus status : statuses) {
175 | sb.append("\tid:"+status.getInstanceId() +
176 | "\taz:" + status.getAvailabilityZone() +
177 | "\tstate:" + status.getInstanceState().getName() +
178 | "\tstatus:" + status.getInstanceStatus().getStatus() +
179 | "\tsystem_status: " + status.getSystemStatus().getStatus() + "\n");
180 |
181 | if (status.getInstanceStatus().getStatus().equalsIgnoreCase("impaired")) {
182 | impairedInstances.add(status.getInstanceId());
183 | }
184 | }
185 |
186 | logger.info(sb.toString()+"\n");
187 |
188 | return impairedInstances;
189 |
190 | } catch(Exception e) {
191 | logger.error("Error getting instance state: " + e.getMessage(),e);
192 | return null;
193 | }
194 |
195 | }
196 |
197 | public static String readFile(String path) throws IOException {
198 | File file = new File(path);
199 | FileInputStream fis = new FileInputStream(file);
200 | byte[] data = new byte[(int)file.length()];
201 | fis.read(data);
202 | fis.close();
203 | return new String(data, "UTF-8");
204 | }
205 |
206 | }
207 |
--------------------------------------------------------------------------------
/src/main/java/org/bitsofinfo/s3/S3BucketLoader.java:
--------------------------------------------------------------------------------
1 | package org.bitsofinfo.s3;
2 |
3 | import java.io.FileInputStream;
4 | import java.io.InputStream;
5 | import java.nio.charset.Charset;
6 | import java.util.Properties;
7 |
8 | import org.apache.log4j.Logger;
9 | import org.bitsofinfo.s3.master.Master;
10 | import org.bitsofinfo.s3.worker.Worker;
11 |
12 | public class S3BucketLoader {
13 |
14 | private static final Logger logger = Logger.getLogger(S3BucketLoader.class);
15 |
16 | private static Master master = null;
17 | private static Worker worker = null;
18 |
19 | public static void main(String[] args) throws Exception {
20 |
21 | try {
22 |
23 | Properties props = new Properties();
24 | String confPath = System.getProperty("configFilePath");
25 |
26 | logger.info("System file.encoding: " + System.getProperty("file.encoding"));
27 | logger.info("System charset: " + Charset.defaultCharset().name());
28 |
29 | InputStream input = null;
30 | try {
31 | logger.info("Attempting to load props from: " + confPath);
32 | input = new FileInputStream(confPath);
33 | props.load(input);
34 | } catch(Exception e) {
35 | e.printStackTrace();
36 | throw e;
37 | }
38 |
39 | boolean isMaster = Boolean.valueOf(System.getProperty("isMaster"));
40 |
41 | Runtime.getRuntime().addShutdownHook(new Thread(new S3BucketLoader().new ShutdownHook()));
42 |
43 | if (isMaster) {
44 | execAsMaster(props);
45 | } else {
46 | execAsWorker(props);
47 | }
48 |
49 | // run until we are shutdown....
50 | while(true) {
51 | Thread.currentThread().sleep(60000);
52 | }
53 |
54 | } catch(Exception e) {
55 | logger.error("main() unexpected error: " + e.getMessage(),e);
56 | }
57 |
58 |
59 | }
60 |
61 | public class ShutdownHook implements Runnable {
62 | public void run() {
63 | try {
64 | logger.debug("ShutdownHook invoked...");
65 | if (worker != null) {worker.destroy();}
66 | if (master != null) {master.destroy();}
67 | } catch(Exception ignore){}
68 | }
69 | }
70 |
71 |
72 | private static void execAsMaster(Properties props) throws Exception {
73 | master = new Master(props);
74 | master.start();
75 | }
76 |
77 | public static void execAsWorker(Properties props) {
78 | worker = new Worker(props);
79 | worker.startConsuming();
80 | }
81 |
82 |
83 | }
84 |
--------------------------------------------------------------------------------
/src/main/java/org/bitsofinfo/s3/S3Util.java:
--------------------------------------------------------------------------------
1 | package org.bitsofinfo.s3;
2 |
3 | import java.io.File;
4 | import java.util.List;
5 |
6 | import org.apache.log4j.Logger;
7 |
8 | import com.amazonaws.services.s3.AmazonS3Client;
9 | import com.amazonaws.services.s3.model.ObjectMetadata;
10 | import com.amazonaws.services.s3.model.PutObjectRequest;
11 | import com.amazonaws.services.s3.model.StorageClass;
12 |
13 | public class S3Util {
14 |
15 | private static final Logger logger = Logger.getLogger(S3Util.class);
16 |
17 | public void uploadToS3(AmazonS3Client s3Client,
18 | String bucketName,
19 | String s3LogBucketFolderRoot,
20 | String host,
21 | List filePathsToUpload) {
22 |
23 | try {
24 |
25 | for (String file : filePathsToUpload) {
26 |
27 | String key = null;
28 | try {
29 | File item = new File(file.trim());
30 |
31 | if (!item.exists()) {
32 | logger.error("uploadToS3() cannot upload item, does not exist! " + item.getAbsolutePath());
33 | continue;
34 | }
35 |
36 | // default to the one file
37 | File[] allFiles = new File[]{item};
38 |
39 | if (item.isDirectory()) {
40 | allFiles = item.listFiles();
41 | }
42 |
43 | for (File toUpload : allFiles) {
44 |
45 | if (!toUpload.exists() || toUpload.getName().startsWith(".") || toUpload.isDirectory()) {
46 | logger.error("uploadToS3() cannot upload, does not exist, starts w/ . or is a directory: " + toUpload.getAbsolutePath());
47 | continue;
48 | }
49 |
50 | key = s3LogBucketFolderRoot + "/" + host + "/" + toUpload.getName();
51 |
52 | PutObjectRequest req = new PutObjectRequest(bucketName, key, toUpload);
53 | req.setStorageClass(StorageClass.ReducedRedundancy);
54 | ObjectMetadata objectMetadata = new ObjectMetadata();
55 | objectMetadata.setContentType("text/plain");
56 | objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);
57 | req.setMetadata(objectMetadata);
58 |
59 | s3Client.putObject(req);
60 | }
61 |
62 | } catch(Exception e) {
63 | logger.error("uploadToS3() unexpected error uploading logs to: " +bucketName + " key:"+ key + " for " +file);
64 | }
65 |
66 | }
67 |
68 |
69 | } catch(Exception e) {
70 | logger.error("uploadToS3() error uploading logs to S3: " + e.getMessage(),e);
71 | }
72 |
73 | }
74 |
75 | }
76 |
--------------------------------------------------------------------------------
/src/main/java/org/bitsofinfo/s3/cmd/CmdResult.java:
--------------------------------------------------------------------------------
1 | package org.bitsofinfo.s3.cmd;
2 |
3 | public class CmdResult {
4 |
5 | private int exitCode;
6 | private String stdOut;
7 | private String stdErr;
8 |
9 | public CmdResult(int exitCode, String stdOut, String stdErr) {
10 | super();
11 | this.exitCode = exitCode;
12 | this.stdOut = stdOut;
13 | this.stdErr = stdErr;
14 | }
15 |
16 | public int getExitCode() {
17 | return exitCode;
18 | }
19 | public void setExitCode(int exitCode) {
20 | this.exitCode = exitCode;
21 | }
22 | public String getStdOut() {
23 | return stdOut;
24 | }
25 | public void setStdOut(String stdOut) {
26 | this.stdOut = stdOut;
27 | }
28 | public String getStdErr() {
29 | return stdErr;
30 | }
31 | public void setStdErr(String stdErr) {
32 | this.stdErr = stdErr;
33 | }
34 |
35 | }
36 |
--------------------------------------------------------------------------------
/src/main/java/org/bitsofinfo/s3/cmd/CommandExecutor.java:
--------------------------------------------------------------------------------
1 | package org.bitsofinfo.s3.cmd;
2 |
3 | import java.io.IOException;
4 | import java.io.InputStream;
5 | import java.io.OutputStream;
6 | import java.io.StringWriter;
7 |
8 | import org.apache.commons.exec.CommandLine;
9 | import org.apache.commons.exec.DefaultExecutor;
10 | import org.apache.commons.exec.ExecuteStreamHandler;
11 | import org.apache.commons.io.IOUtils;
12 | import org.apache.log4j.Logger;
13 |
14 |
15 | public class CommandExecutor {
16 |
17 | private static final Logger logger = Logger.getLogger(CommandExecutor.class);
18 |
19 | public CmdResult execute(CommandLine cmdLine, int maxAttempts) {
20 |
21 | CmdResult lastCmdResult = null;
22 |
23 | int attempts = 0;
24 | while(attempts < maxAttempts) {
25 |
26 | attempts++;
27 |
28 | final StringWriter stdOut = new StringWriter();
29 | final StringWriter stdErr = new StringWriter();
30 |
31 | try {
32 |
33 | DefaultExecutor executor = new DefaultExecutor();
34 | executor.setStreamHandler(new ExecuteStreamHandler() {
35 | public void setProcessOutputStream(InputStream is) throws IOException {IOUtils.copy(is, stdOut, "UTF-8");}
36 | public void setProcessErrorStream(InputStream is) throws IOException {IOUtils.copy(is, stdErr, "UTF-8");}
37 | public void stop() throws IOException {}
38 | public void start() throws IOException {}
39 | public void setProcessInputStream(OutputStream os) throws IOException {}
40 | });
41 |
42 | logger.trace("Executing: attempt:" + attempts + " " + cmdLine.toString());
43 |
44 | int exitValue = executor.execute(cmdLine);
45 | if (exitValue > 0) {
46 | logger.error("ERROR: attempt #: " + attempts+ " exitCode: "+exitValue+" cmd=" + cmdLine.toString());
47 | }
48 |
49 | //System.out.println("STDOUT:"+stdOut);
50 | //System.out.println("STDERR:"+stdErr);
51 |
52 | lastCmdResult = new CmdResult(exitValue,stdOut.toString(),stdErr.toString());
53 |
54 | // if successful exit loop immediately...
55 | if (exitValue == 0) {
56 | logger.trace("SUCCESS! exitCode = 0: " + cmdLine.toString());
57 | break;
58 | }
59 |
60 | } catch(Exception e) {
61 | logger.error("execute() attempt #: " + attempts+ " cmd:"+cmdLine.toString() + " exception:"+e.getMessage(),e);
62 | lastCmdResult = new CmdResult(9999, stdOut.toString(), "attempt #: " + attempts+ " exception: " + e.getMessage() + " stdErr: " + stdErr.toString());
63 | }
64 | }
65 |
66 | return lastCmdResult;
67 | }
68 |
69 |
70 | }
71 |
--------------------------------------------------------------------------------
/src/main/java/org/bitsofinfo/s3/cmd/TocPathOpResult.java:
--------------------------------------------------------------------------------
1 | package org.bitsofinfo.s3.cmd;
2 |
3 | import org.bitsofinfo.s3.toc.TOCPayload;
4 |
5 | public class TocPathOpResult {
6 |
7 | public boolean success;
8 | public String filePath;
9 | public String operation;
10 | public String message;
11 | public TOCPayload.MODE mode;
12 |
13 |
14 | public TocPathOpResult(TOCPayload.MODE mode, boolean success, String filePath, String operation, String message) {
15 | super();
16 | this.success = success;
17 | this.filePath = filePath;
18 | this.operation = operation;
19 | this.message = message;
20 | this.mode = mode;
21 | }
22 |
23 | public boolean isSuccess() {
24 | return success;
25 | }
26 | public void setSuccess(boolean success) {
27 | this.success = success;
28 | }
29 | public String getFilePath() {
30 | return filePath;
31 | }
32 | public void setFilePath(String filePath) {
33 | this.filePath = filePath;
34 | }
35 | public String getOperation() {
36 | return operation;
37 | }
38 | public void setOperation(String operation) {
39 | this.operation = operation;
40 | }
41 | public String getMessage() {
42 | return message;
43 | }
44 | public void setMessage(String message) {
45 | this.message = message;
46 | }
47 |
48 | public TOCPayload.MODE getMode() {
49 | return mode;
50 | }
51 |
52 | public void setMode(TOCPayload.MODE mode) {
53 | this.mode = mode;
54 | }
55 |
56 |
57 | }
58 |
--------------------------------------------------------------------------------
/src/main/java/org/bitsofinfo/s3/control/CCMode.java:
--------------------------------------------------------------------------------
1 | package org.bitsofinfo.s3.control;
2 |
3 | public enum CCMode {
4 |
5 | INITIALIZED,
6 | PREPARE,
7 | WRITE,
8 | IDLE,
9 | VALIDATE,
10 | REPORT_ERRORS
11 |
12 | }
13 |
--------------------------------------------------------------------------------
/src/main/java/org/bitsofinfo/s3/control/CCPayload.java:
--------------------------------------------------------------------------------
1 | package org.bitsofinfo.s3.control;
2 |
3 | public class CCPayload {
4 |
5 | public boolean fromMaster = false;
6 | public String sourceHostId = null;
7 | public String sourceHostIP = null;
8 | public String onlyForHostIdOrIP = null;
9 | public CCPayloadType type = null;
10 | public Object value = null;
11 |
12 | }
13 |
--------------------------------------------------------------------------------
/src/main/java/org/bitsofinfo/s3/control/CCPayloadHandler.java:
--------------------------------------------------------------------------------
1 | package org.bitsofinfo.s3.control;
2 |
3 | public interface CCPayloadHandler {
4 |
5 | public void handlePayload(CCPayload payload) throws Exception;
6 |
7 | }
8 |
--------------------------------------------------------------------------------
/src/main/java/org/bitsofinfo/s3/control/CCPayloadType.java:
--------------------------------------------------------------------------------
1 | package org.bitsofinfo.s3.control;
2 |
3 | public enum CCPayloadType {
4 |
5 | MASTER_CURRENT_MODE, // current mode of the master
6 | WORKER_CURRENT_MODE, // current mode of workers
7 |
8 | WORKER_WRITES_CURRENT_SUMMARY, // sent periodically during write mode stating number of successful/failed so far
9 | WORKER_VALIDATIONS_CURRENT_SUMMARY, // sent periodically during write mode stating number of successful/failed so far
10 |
11 | WORKER_WRITES_FINISHED_SUMMARY, // sent by workers when idle and report total WRITE mode messages processed
12 | WORKER_VALIDATIONS_FINISHED_SUMMARY, // sent by workers when idle and report total VALIDATE mode messages processed
13 |
14 | WORKER_ERROR_REPORT_DETAILS, // sent by workers when REPORT_ERRORS mode is switched on
15 |
16 | CMD_WORKER_SHUTDOWN // sent when master to tell worker to shutdown
17 |
18 | }
19 |
--------------------------------------------------------------------------------
/src/main/java/org/bitsofinfo/s3/control/ControlChannel.java:
--------------------------------------------------------------------------------
1 | package org.bitsofinfo.s3.control;
2 |
3 | import java.net.InetAddress;
4 | import java.util.Arrays;
5 | import java.util.HashMap;
6 | import java.util.List;
7 | import java.util.Map;
8 | import java.util.UUID;
9 |
10 | import org.apache.log4j.Logger;
11 |
12 | import com.amazonaws.auth.BasicAWSCredentials;
13 | import com.amazonaws.auth.policy.Policy;
14 | import com.amazonaws.auth.policy.Principal;
15 | import com.amazonaws.auth.policy.Resource;
16 | import com.amazonaws.auth.policy.Statement;
17 | import com.amazonaws.auth.policy.Statement.Effect;
18 | import com.amazonaws.auth.policy.actions.SQSActions;
19 | import com.amazonaws.auth.policy.conditions.ConditionFactory;
20 | import com.amazonaws.services.sns.AmazonSNSClient;
21 | import com.amazonaws.services.sns.model.CreateTopicResult;
22 | import com.amazonaws.services.sns.model.ListTopicsResult;
23 | import com.amazonaws.services.sns.model.SubscribeResult;
24 | import com.amazonaws.services.sns.model.Topic;
25 | import com.amazonaws.services.sqs.AmazonSQSClient;
26 | import com.amazonaws.services.sqs.model.CreateQueueResult;
27 | import com.amazonaws.services.sqs.model.Message;
28 | import com.amazonaws.services.sqs.model.ReceiveMessageRequest;
29 | import com.amazonaws.services.sqs.model.ReceiveMessageResult;
30 | import com.amazonaws.services.sqs.model.SetQueueAttributesRequest;
31 | import com.google.gson.Gson;
32 | import com.google.gson.reflect.TypeToken;
33 |
34 | public class ControlChannel implements Runnable {
35 |
36 | private static final Logger logger = Logger.getLogger(ControlChannel.class);
37 |
38 | private AmazonSNSClient snsClient = null;
39 | private AmazonSQSClient sqsClient = null;
40 |
41 | private String snsTopicARN = null;
42 | private String snsSubscriptionARN = null;
43 | private String sqsQueueUrl = null;
44 | private String sqsQueueARN = null;
45 |
46 | private Gson gson = new Gson();
47 |
48 | private CCPayloadHandler ccPayloadHandler = null;
49 | private String mySourceIdentifier = null;
50 | private String mySourceIp = null;
51 | private Thread consumerThread = null;
52 |
53 | private boolean canDestroyTopic = false;
54 |
55 | private boolean running = true;
56 | private String snsControlTopicName = null;
57 |
58 | private String uuid = UUID.randomUUID().toString().replace("-", "").substring(0,4);;
59 |
60 | public ControlChannel(boolean callerIsMaster,
61 | String awsAccessKey, String awsSecretKey, String snsControlTopicName,
62 | String userAccountPrincipalId,
63 | String userARN,
64 | CCPayloadHandler ccPayloadHandler) throws Exception {
65 | super();
66 |
67 | try {
68 | this.mySourceIp = InetAddress.getLocalHost().getHostAddress();
69 | } catch(Exception e) {
70 | logger.error("Error getting local inet address: " + e.getMessage());
71 | }
72 |
73 | mySourceIdentifier = determineHostName() + "-" +uuid;
74 | this.snsControlTopicName = snsControlTopicName;
75 |
76 | if (callerIsMaster) {
77 | canDestroyTopic = true;
78 | this.snsControlTopicName += "-" + mySourceIdentifier;
79 | }
80 |
81 | this.ccPayloadHandler = ccPayloadHandler;
82 |
83 | sqsClient = new AmazonSQSClient(new BasicAWSCredentials(awsAccessKey, awsSecretKey));
84 | snsClient = new AmazonSNSClient(new BasicAWSCredentials(awsAccessKey, awsSecretKey));
85 |
86 |
87 | this.connectToTopic(callerIsMaster, 1000, userAccountPrincipalId, userARN);
88 |
89 |
90 |
91 | }
92 |
93 |
94 | public void connectToTopic(boolean callerIsMaster, int maxAttempts, String userAccountPrincipalId, String userARN) throws Exception {
95 |
96 |
97 | // try up to max attempts to connect to pre-existing topic
98 | for (int i=0; i topics = listResult.getTopics();
104 |
105 | while(topics != null) {
106 |
107 | for (Topic topic : topics) {
108 |
109 | // note we do index of match....
110 | if (topic.getTopicArn().indexOf(snsControlTopicName) != -1) {
111 | snsTopicARN = topic.getTopicArn();
112 | logger.info("Found existing SNS topic by name: "+snsControlTopicName + " @ " + snsTopicARN);
113 | break;
114 | }
115 | }
116 |
117 | String nextToken = listResult.getNextToken();
118 |
119 | if (nextToken != null && snsTopicARN == null) {
120 | listResult = snsClient.listTopics(nextToken);
121 | topics = listResult.getTopics();
122 |
123 | } else {
124 | break;
125 | }
126 | }
127 |
128 | // if consumer, retry, otherwise is master, so just exit quick to create...
129 | if (snsTopicARN == null && !callerIsMaster) {
130 | Thread.currentThread().sleep(1000);
131 | continue;
132 | } else {
133 | break; // exit;
134 | }
135 | }
136 |
137 |
138 |
139 | // if master only he can create...
140 | if (snsTopicARN == null && callerIsMaster) {
141 | this.snsControlTopicName = this.snsControlTopicName.substring(0,(snsControlTopicName.length() > 80 ? 80 : this.snsControlTopicName.length()));
142 |
143 | logger.info("Attempting to create new SNS control channel topic by name: "+this.snsControlTopicName);
144 |
145 | CreateTopicResult createTopicResult = snsClient.createTopic(this.snsControlTopicName);
146 | snsTopicARN = createTopicResult.getTopicArn();
147 | snsClient.addPermission(snsTopicARN, "Permit_SNSAdd",
148 | Arrays.asList(new String[]{userARN}),
149 | Arrays.asList(new String[]{"Publish","Subscribe","Receive"}));
150 | logger.info("Created new SNS control channel topic by name: "+this.snsControlTopicName + " @ " + snsTopicARN);
151 |
152 | } else if (snsTopicARN == null) {
153 | throw new Exception("Worker() cannot start, snsControlTopicName has yet to be created by master?: " + this.snsControlTopicName);
154 | }
155 |
156 | // http://www.jorgjanke.com/2013/01/aws-sns-topic-subscriptions-with-sqs.html
157 |
158 | // create SQS queue to get SNS notifications (max 80 len)
159 | String prefix = ("s3bktLoaderCC_" + mySourceIdentifier);
160 | String sqsQueueName = prefix.substring(0,(prefix.length() > 80 ? 80 : prefix.length()));
161 |
162 | CreateQueueResult createQueueResult = sqsClient.createQueue(sqsQueueName);
163 | this.sqsQueueUrl = createQueueResult.getQueueUrl();
164 | this.sqsQueueARN = sqsClient.getQueueAttributes(sqsQueueUrl, Arrays.asList(new String[]{"QueueArn"})).getAttributes().get("QueueArn");
165 |
166 | Statement statement = new Statement(Effect.Allow)
167 | .withActions(SQSActions.SendMessage)
168 | .withPrincipals(new Principal("*"))
169 | .withConditions(ConditionFactory.newSourceArnCondition(snsTopicARN))
170 | .withResources(new Resource(sqsQueueARN));
171 | Policy policy = new Policy("SubscriptionPermission").withStatements(statement);
172 |
173 | HashMap attributes = new HashMap();
174 | attributes.put("Policy", policy.toJson());
175 | SetQueueAttributesRequest request = new SetQueueAttributesRequest(sqsQueueUrl, attributes);
176 | sqsClient.setQueueAttributes(request);
177 |
178 | logger.info("Created SQS queue: " + sqsQueueARN + " @ " + sqsQueueUrl);
179 |
180 | // subscribe our SQS queue to the SNS:s3MountTest topic
181 | SubscribeResult subscribeResult = snsClient.subscribe(snsTopicARN,"sqs",sqsQueueARN);
182 | snsSubscriptionARN = subscribeResult.getSubscriptionArn();
183 | logger.info("Subscribed for messages from SNS control channel:" + snsTopicARN + " ----> SQS: "+sqsQueueARN);
184 | logger.info("Subscription ARN: " + snsSubscriptionARN);
185 |
186 | this.consumerThread = new Thread(this,"ControlChannel msg consumer thread");
187 | this.consumerThread.start();
188 |
189 | logger.info("\n-------------------------------------------\n" +
190 | "CONTROL CHANNEL: ALL SNS/SQS resources hooked up OK\n" +
191 | "-------------------------------------------\n");
192 | }
193 |
194 |
195 | public void send(boolean fromMaster, CCPayloadType type, Object value) throws Exception {
196 | this.send(fromMaster,type,null,value);
197 | }
198 |
199 | public void send(boolean fromMaster, CCPayloadType type, String onlyForHostOrIp, Object value) throws Exception {
200 | CCPayload payload = new CCPayload();
201 | payload.fromMaster = fromMaster;
202 | payload.type = type;
203 | payload.value = value;
204 | payload.onlyForHostIdOrIP = onlyForHostOrIp;
205 | payload.sourceHostId = this.mySourceIdentifier.trim();
206 | payload.sourceHostIP = this.mySourceIp;
207 |
208 | logger.debug("Sending: " + type + "="+value);
209 |
210 | // send!
211 | this.snsClient.publish(this.snsTopicARN, gson.toJson(payload));
212 | }
213 |
214 | public void run() {
215 |
216 | while(running) {
217 |
218 | try {
219 |
220 | ReceiveMessageRequest req = new ReceiveMessageRequest();
221 | req.setWaitTimeSeconds(10);
222 | req.setMaxNumberOfMessages(10);
223 | req.setVisibilityTimeout(300);
224 | req.setQueueUrl(sqsQueueUrl);
225 |
226 | ReceiveMessageResult msgResult = sqsClient.receiveMessage(req);
227 | List messages = msgResult.getMessages();
228 |
229 | for (Message msg : messages) {
230 |
231 | CCPayload payload = null;
232 |
233 | try {
234 | Map body = gson.fromJson(msg.getBody(), new TypeToken