├── .gitignore
├── LICENSE
├── README.md
├── bin
└── p42
├── doc
└── cf-template.json
├── package.json
├── share
├── aws
│ ├── cf
│ │ └── vpc.yaml
│ ├── dns
│ │ ├── a.yaml
│ │ ├── alias.yaml
│ │ └── srv.yaml
│ └── ecr
│ │ └── policy.yaml
├── commands.yaml
├── interviews
│ └── init.yaml
├── messages.yaml
├── mixins
│ └── coffee
│ │ ├── interview.yaml
│ │ └── template
│ │ ├── Dockerfile.tmpl
│ │ └── config.yaml
├── options.yaml
└── words.yaml
├── src
├── application.coffee
├── cli.coffee
├── cluster.coffee
├── commands
│ ├── build.coffee
│ ├── cluster.coffee
│ ├── help.coffee
│ ├── index.coffee
│ ├── init.coffee
│ ├── mixin.coffee
│ ├── run.coffee
│ ├── start.coffee
│ ├── stop.coffee
│ └── target.coffee
├── decorators.coffee
├── helpers
│ ├── aws.coffee
│ ├── dns.coffee
│ └── docker.coffee
├── interview.coffee
├── logger.coffee
├── name.coffee
├── options.coffee
├── raise.coffee
├── run.coffee
├── serialize.coffee
├── sh.coffee
├── shared.coffee
├── template.coffee
└── tmp.coffee
└── test
├── aws-helpers.coffee
├── cli-helpers.coffee
├── data
├── app
│ ├── p42.yaml
│ └── run
│ │ └── api
│ │ ├── Dockerfile
│ │ └── config.yaml
├── clusters
│ └── violent-aftermath.yaml
└── expectations.yaml
├── dns-helpers.coffee
├── docker-helpers.coffee
├── foundation.coffee
├── helpers.coffee
├── index.coffee
└── test-sh.coffee
/.gitignore:
--------------------------------------------------------------------------------
1 | node_modules
2 | log
3 | *.log
4 | .DS_Store
5 | lib
6 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (c) 2016, Panda Strike
2 |
3 | Permission to use, copy, modify, and/or distribute this software for any
4 | purpose with or without fee is hereby granted, provided that the above
5 | copyright notice and this permission notice appear in all copies.
6 |
7 | THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 | WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 | MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 | ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 | WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 | ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
13 | OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # p42
2 |
3 | > **IMPORTANT** This project is no longer under active development.
4 | > Based on what we've learned building this,
5 | > we recommend looking at [Convox][] instead.
6 |
7 | [Convox]:https://github.com/convox/rack
8 |
9 | A CLI for simplifying the use of AWS with Docker Swarm.
10 |
11 | ## Getting Started
12 |
13 | ### Prerequites
14 |
15 | - Bash version 3 or later
16 | - Docker version 1.10
17 | - Docker Machine version 0.6 release candidate (see below)
18 | - AWS CLI version 1.10.8
19 | - Node version 4 or later
20 | - NPM version 2 or later
21 | - `yaml` (via `npm install yaml -g`) version 1 or later
22 |
23 | To install the Docker Machine release candidate, run the following from the shell:
24 |
25 | ```sh
26 | curl -L https://github.com/docker/machine/releases/download/v0.6.0-rc4/docker-machine-`uname -s`-`uname -m` > /usr/local/bin/docker-machine && \\
27 | chmod +x /usr/local/bin/docker-machine
28 | ```
29 |
30 | ### Installation
31 |
32 | ```
33 | $ npm install -g p42
34 | ```
35 |
36 | ### Creating A Cluster
37 |
38 | ```
39 | $ p42 cluster create
40 | Creating VPC [red-ghost]...
41 | ```
42 |
43 | #### Add Nodes To The Cluster
44 |
45 | To add 3 nodes to a cluster:
46 |
47 | ```
48 | $ p42 cluster add red-ghost -n 3
49 | ```
50 |
51 | To add just one:
52 |
53 | ```
54 | $ p42 cluster add red-ghost
55 | ```
56 |
57 | #### Using Docker Commands
58 |
59 | If you want to use Docker commands directly:
60 |
61 | ```
62 | $ eval $(p42 cluster env red-ghost)
63 | ```
64 |
65 | which will select the Swarm master, if possible, or the default machine otherwise.
66 |
67 | #### Examining Your Cluster
68 |
69 | ```
70 | p42 cluster ls red-ghost
71 | ```
72 |
73 | ### Running An App
74 |
75 | #### Initialize Your App
76 |
77 | ```
78 | $ p42 init
79 | Application name [blurb9]:
80 | Organization repository [pandastrike]:
81 | ```
82 |
83 | #### Add Mixins
84 |
85 | Provide the git cloneable URL for the mixin repo:
86 |
87 | ```
88 | $ p42 mixin add git@github.com:pandastrike/p42-mixin-nginx.git
89 | Document root [www]:
90 | ```
91 |
92 | #### Add Target
93 |
94 | Add the cluster as a target for your app.
95 |
96 | ```tty
97 | $ p42 target add master red-ghost
98 | ```
99 |
100 | #### Run Your App
101 |
102 | The `run` command will build and run all the images described in your `launch` directory.
103 |
104 | ```
105 | $ p42 run
106 | ```
107 |
108 | ## Example
109 |
110 | Let's build a simple Web page and deploy it using `p42`.
111 |
112 | We'll assume we've already run a cluster (see [Creating A Cluster](#creating-a-cluster)).
113 |
114 | Let's create an application directory and initialize it.
115 |
116 | ```
117 | $ mkdir hello-world
118 | $ cd hello-world
119 | $ p42 init
120 | Application name [hello-world]:
121 | Organization repository []: pandastrike
122 | ```
123 |
124 | Add the Nginx mixin.
125 |
126 | ```
127 | $ p42 mixin add git@github.com:pandastrike/p42-mixin-nginx.git
128 | Document root [www]:
129 | ```
130 |
131 | This will create a `launch/www` directory that includes a `Dockerfile` for running Nginx.
132 |
133 | Create an index HTML file.
134 |
135 | ```
136 | $ mkdir www
137 | $ cat >> www/index.html
138 |
Hello, World!
139 | ```
140 |
141 | Run your application.
142 |
143 | ```
144 | $ p42 run
145 | ```
146 |
147 | This will take a minute to build and run the image described by `launch/www/Dockerfile`.
148 |
149 | Get the IP and port of your Nginx container.
150 |
151 | ```
152 | $ p42 ps
153 | swarm-01/hello-world-www 159.203.247.225:32769->80/tcp, 159.203.247.225:32768->443/tcp
154 | $ curl 159.203.247.225:32769
155 | Hello, World!
156 | ```
157 |
158 | ## Autocomplete
159 |
160 | You can add autocomplete to your shell by running:
161 |
162 | ```
163 | $ eval $(p42 env -)
164 | ```
165 |
166 | ## Status
167 |
168 | `p42` is under heavy development.
169 |
170 | ## Reference
171 |
172 | Run `p42 help` to get a list of commands and what they do.
173 |
--------------------------------------------------------------------------------
/bin/p42:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env coffee
2 |
3 | CLI = require "../src/cli"
4 |
5 | CLI process.argv[2..]
6 |
--------------------------------------------------------------------------------
/doc/cf-template.json:
--------------------------------------------------------------------------------
1 | {
2 | "AWSTemplateFormatVersion": "2010-09-09",
3 | "Description": "AWS CloudFormation Template for use with p42.",
4 | "Parameters": {
5 | "KeyName": {
6 | "Description": "Name of an existing EC2 KeyPair to enable SSH access to the bastion host",
7 | "Type": "AWS::EC2::KeyPair::KeyName",
8 | "ConstraintDescription": "must be the name of an existing EC2 KeyPair."
9 | },
10 | "SSHLocation": {
11 | "Description": "Lockdown SSH access to the bastion host (default can be accessed from anywhere)",
12 | "Type": "String",
13 | "MinLength": "9",
14 | "MaxLength": "18",
15 | "Default": "0.0.0.0/0",
16 | "AllowedPattern": "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})",
17 | "ConstraintDescription": "must be a valid CIDR range of the form x.x.x.x/x."
18 | },
19 | "EC2InstanceType": {
20 | "Description": "EC2 instance type",
21 | "Type": "String",
22 | "Default": "t2.small",
23 | "AllowedValues": [
24 | "t1.micro",
25 | "t2.nano",
26 | "t2.micro",
27 | "t2.small",
28 | "t2.medium",
29 | "t2.large",
30 | "m1.small",
31 | "m1.medium",
32 | "m1.large",
33 | "m1.xlarge",
34 | "m2.xlarge",
35 | "m2.2xlarge",
36 | "m2.4xlarge",
37 | "m3.medium",
38 | "m3.large",
39 | "m3.xlarge",
40 | "m3.2xlarge",
41 | "m4.large",
42 | "m4.xlarge",
43 | "m4.2xlarge",
44 | "m4.4xlarge",
45 | "m4.10xlarge",
46 | "c1.medium",
47 | "c1.xlarge",
48 | "c3.large",
49 | "c3.xlarge",
50 | "c3.2xlarge",
51 | "c3.4xlarge",
52 | "c3.8xlarge",
53 | "c4.large",
54 | "c4.xlarge",
55 | "c4.2xlarge",
56 | "c4.4xlarge",
57 | "c4.8xlarge",
58 | "g2.2xlarge",
59 | "g2.8xlarge",
60 | "r3.large",
61 | "r3.xlarge",
62 | "r3.2xlarge",
63 | "r3.4xlarge",
64 | "r3.8xlarge",
65 | "i2.xlarge",
66 | "i2.2xlarge",
67 | "i2.4xlarge",
68 | "i2.8xlarge",
69 | "d2.xlarge",
70 | "d2.2xlarge",
71 | "d2.4xlarge",
72 | "d2.8xlarge",
73 | "hi1.4xlarge",
74 | "hs1.8xlarge",
75 | "cr1.8xlarge",
76 | "cc2.8xlarge",
77 | "cg1.4xlarge"
78 | ],
79 | "ConstraintDescription": "must be a valid EC2 instance type."
80 | }
81 | },
82 | "Mappings": {
83 | "SubnetConfig": {
84 | "VPC": {
85 | "CIDR": "10.0.0.0/16"
86 | },
87 | "Public": {
88 | "CIDR": "10.0.0.0/24"
89 | }
90 | },
91 | "AWSInstanceType2Arch": {
92 | "t1.micro": {
93 | "Arch": "PV64"
94 | },
95 | "t2.nano": {
96 | "Arch": "HVM64"
97 | },
98 | "t2.micro": {
99 | "Arch": "HVM64"
100 | },
101 | "t2.small": {
102 | "Arch": "HVM64"
103 | },
104 | "t2.medium": {
105 | "Arch": "HVM64"
106 | },
107 | "t2.large": {
108 | "Arch": "HVM64"
109 | },
110 | "m1.small": {
111 | "Arch": "PV64"
112 | },
113 | "m1.medium": {
114 | "Arch": "PV64"
115 | },
116 | "m1.large": {
117 | "Arch": "PV64"
118 | },
119 | "m1.xlarge": {
120 | "Arch": "PV64"
121 | },
122 | "m2.xlarge": {
123 | "Arch": "PV64"
124 | },
125 | "m2.2xlarge": {
126 | "Arch": "PV64"
127 | },
128 | "m2.4xlarge": {
129 | "Arch": "PV64"
130 | },
131 | "m3.medium": {
132 | "Arch": "HVM64"
133 | },
134 | "m3.large": {
135 | "Arch": "HVM64"
136 | },
137 | "m3.xlarge": {
138 | "Arch": "HVM64"
139 | },
140 | "m3.2xlarge": {
141 | "Arch": "HVM64"
142 | },
143 | "m4.large": {
144 | "Arch": "HVM64"
145 | },
146 | "m4.xlarge": {
147 | "Arch": "HVM64"
148 | },
149 | "m4.2xlarge": {
150 | "Arch": "HVM64"
151 | },
152 | "m4.4xlarge": {
153 | "Arch": "HVM64"
154 | },
155 | "m4.10xlarge": {
156 | "Arch": "HVM64"
157 | },
158 | "c1.medium": {
159 | "Arch": "PV64"
160 | },
161 | "c1.xlarge": {
162 | "Arch": "PV64"
163 | },
164 | "c3.large": {
165 | "Arch": "HVM64"
166 | },
167 | "c3.xlarge": {
168 | "Arch": "HVM64"
169 | },
170 | "c3.2xlarge": {
171 | "Arch": "HVM64"
172 | },
173 | "c3.4xlarge": {
174 | "Arch": "HVM64"
175 | },
176 | "c3.8xlarge": {
177 | "Arch": "HVM64"
178 | },
179 | "c4.large": {
180 | "Arch": "HVM64"
181 | },
182 | "c4.xlarge": {
183 | "Arch": "HVM64"
184 | },
185 | "c4.2xlarge": {
186 | "Arch": "HVM64"
187 | },
188 | "c4.4xlarge": {
189 | "Arch": "HVM64"
190 | },
191 | "c4.8xlarge": {
192 | "Arch": "HVM64"
193 | },
194 | "g2.2xlarge": {
195 | "Arch": "HVMG2"
196 | },
197 | "g2.8xlarge": {
198 | "Arch": "HVMG2"
199 | },
200 | "r3.large": {
201 | "Arch": "HVM64"
202 | },
203 | "r3.xlarge": {
204 | "Arch": "HVM64"
205 | },
206 | "r3.2xlarge": {
207 | "Arch": "HVM64"
208 | },
209 | "r3.4xlarge": {
210 | "Arch": "HVM64"
211 | },
212 | "r3.8xlarge": {
213 | "Arch": "HVM64"
214 | },
215 | "i2.xlarge": {
216 | "Arch": "HVM64"
217 | },
218 | "i2.2xlarge": {
219 | "Arch": "HVM64"
220 | },
221 | "i2.4xlarge": {
222 | "Arch": "HVM64"
223 | },
224 | "i2.8xlarge": {
225 | "Arch": "HVM64"
226 | },
227 | "d2.xlarge": {
228 | "Arch": "HVM64"
229 | },
230 | "d2.2xlarge": {
231 | "Arch": "HVM64"
232 | },
233 | "d2.4xlarge": {
234 | "Arch": "HVM64"
235 | },
236 | "d2.8xlarge": {
237 | "Arch": "HVM64"
238 | },
239 | "hi1.4xlarge": {
240 | "Arch": "HVM64"
241 | },
242 | "hs1.8xlarge": {
243 | "Arch": "HVM64"
244 | },
245 | "cr1.8xlarge": {
246 | "Arch": "HVM64"
247 | },
248 | "cc2.8xlarge": {
249 | "Arch": "HVM64"
250 | }
251 | },
252 | "AWSInstanceType2NATArch": {
253 | "t1.micro": {
254 | "Arch": "NATPV64"
255 | },
256 | "t2.nano": {
257 | "Arch": "NATHVM64"
258 | },
259 | "t2.micro": {
260 | "Arch": "NATHVM64"
261 | },
262 | "t2.small": {
263 | "Arch": "NATHVM64"
264 | },
265 | "t2.medium": {
266 | "Arch": "NATHVM64"
267 | },
268 | "t2.large": {
269 | "Arch": "NATHVM64"
270 | },
271 | "m1.small": {
272 | "Arch": "NATPV64"
273 | },
274 | "m1.medium": {
275 | "Arch": "NATPV64"
276 | },
277 | "m1.large": {
278 | "Arch": "NATPV64"
279 | },
280 | "m1.xlarge": {
281 | "Arch": "NATPV64"
282 | },
283 | "m2.xlarge": {
284 | "Arch": "NATPV64"
285 | },
286 | "m2.2xlarge": {
287 | "Arch": "NATPV64"
288 | },
289 | "m2.4xlarge": {
290 | "Arch": "NATPV64"
291 | },
292 | "m3.medium": {
293 | "Arch": "NATHVM64"
294 | },
295 | "m3.large": {
296 | "Arch": "NATHVM64"
297 | },
298 | "m3.xlarge": {
299 | "Arch": "NATHVM64"
300 | },
301 | "m3.2xlarge": {
302 | "Arch": "NATHVM64"
303 | },
304 | "m4.large": {
305 | "Arch": "NATHVM64"
306 | },
307 | "m4.xlarge": {
308 | "Arch": "NATHVM64"
309 | },
310 | "m4.2xlarge": {
311 | "Arch": "NATHVM64"
312 | },
313 | "m4.4xlarge": {
314 | "Arch": "NATHVM64"
315 | },
316 | "m4.10xlarge": {
317 | "Arch": "NATHVM64"
318 | },
319 | "c1.medium": {
320 | "Arch": "NATPV64"
321 | },
322 | "c1.xlarge": {
323 | "Arch": "NATPV64"
324 | },
325 | "c3.large": {
326 | "Arch": "NATHVM64"
327 | },
328 | "c3.xlarge": {
329 | "Arch": "NATHVM64"
330 | },
331 | "c3.2xlarge": {
332 | "Arch": "NATHVM64"
333 | },
334 | "c3.4xlarge": {
335 | "Arch": "NATHVM64"
336 | },
337 | "c3.8xlarge": {
338 | "Arch": "NATHVM64"
339 | },
340 | "c4.large": {
341 | "Arch": "NATHVM64"
342 | },
343 | "c4.xlarge": {
344 | "Arch": "NATHVM64"
345 | },
346 | "c4.2xlarge": {
347 | "Arch": "NATHVM64"
348 | },
349 | "c4.4xlarge": {
350 | "Arch": "NATHVM64"
351 | },
352 | "c4.8xlarge": {
353 | "Arch": "NATHVM64"
354 | },
355 | "g2.2xlarge": {
356 | "Arch": "NATHVMG2"
357 | },
358 | "g2.8xlarge": {
359 | "Arch": "NATHVMG2"
360 | },
361 | "r3.large": {
362 | "Arch": "NATHVM64"
363 | },
364 | "r3.xlarge": {
365 | "Arch": "NATHVM64"
366 | },
367 | "r3.2xlarge": {
368 | "Arch": "NATHVM64"
369 | },
370 | "r3.4xlarge": {
371 | "Arch": "NATHVM64"
372 | },
373 | "r3.8xlarge": {
374 | "Arch": "NATHVM64"
375 | },
376 | "i2.xlarge": {
377 | "Arch": "NATHVM64"
378 | },
379 | "i2.2xlarge": {
380 | "Arch": "NATHVM64"
381 | },
382 | "i2.4xlarge": {
383 | "Arch": "NATHVM64"
384 | },
385 | "i2.8xlarge": {
386 | "Arch": "NATHVM64"
387 | },
388 | "d2.xlarge": {
389 | "Arch": "NATHVM64"
390 | },
391 | "d2.2xlarge": {
392 | "Arch": "NATHVM64"
393 | },
394 | "d2.4xlarge": {
395 | "Arch": "NATHVM64"
396 | },
397 | "d2.8xlarge": {
398 | "Arch": "NATHVM64"
399 | },
400 | "hi1.4xlarge": {
401 | "Arch": "NATHVM64"
402 | },
403 | "hs1.8xlarge": {
404 | "Arch": "NATHVM64"
405 | },
406 | "cr1.8xlarge": {
407 | "Arch": "NATHVM64"
408 | },
409 | "cc2.8xlarge": {
410 | "Arch": "NATHVM64"
411 | }
412 | },
413 | "AWSRegionArch2AMI": {
414 | "us-east-1": {
415 | "PV64": "ami-5fb8c835",
416 | "HVM64": "ami-60b6c60a",
417 | "HVMG2": "ami-e998ea83"
418 | },
419 | "us-west-2": {
420 | "PV64": "ami-d93622b8",
421 | "HVM64": "ami-f0091d91",
422 | "HVMG2": "ami-315f4850"
423 | },
424 | "us-west-1": {
425 | "PV64": "ami-56ea8636",
426 | "HVM64": "ami-d5ea86b5",
427 | "HVMG2": "ami-943956f4"
428 | },
429 | "eu-west-1": {
430 | "PV64": "ami-95e33ce6",
431 | "HVM64": "ami-bff32ccc",
432 | "HVMG2": "ami-83fd23f0"
433 | },
434 | "eu-central-1": {
435 | "PV64": "ami-794a5915",
436 | "HVM64": "ami-bc5b48d0",
437 | "HVMG2": "ami-ba1a09d6"
438 | },
439 | "ap-northeast-1": {
440 | "PV64": "ami-393c1957",
441 | "HVM64": "ami-383c1956",
442 | "HVMG2": "ami-08e5c166"
443 | },
444 | "ap-northeast-2": {
445 | "PV64": "NOT_SUPPORTED",
446 | "HVM64": "ami-249b554a",
447 | "HVMG2": "NOT_SUPPORTED"
448 | },
449 | "ap-southeast-1": {
450 | "PV64": "ami-34bd7a57",
451 | "HVM64": "ami-c9b572aa",
452 | "HVMG2": "ami-5a15d239"
453 | },
454 | "ap-southeast-2": {
455 | "PV64": "ami-ced887ad",
456 | "HVM64": "ami-48d38c2b",
457 | "HVMG2": "ami-0c1a446f"
458 | },
459 | "sa-east-1": {
460 | "PV64": "ami-7d15ad11",
461 | "HVM64": "ami-6817af04",
462 | "HVMG2": "NOT_SUPPORTED"
463 | },
464 | "cn-north-1": {
465 | "PV64": "ami-18ac6575",
466 | "HVM64": "ami-43a36a2e",
467 | "HVMG2": "NOT_SUPPORTED"
468 | }
469 | }
470 | },
471 | "Resources": {
472 | "VPC": {
473 | "Type": "AWS::EC2::VPC",
474 | "Properties": {
475 | "EnableDnsSupport": "true",
476 | "EnableDnsHostnames": "true",
477 | "CidrBlock": {
478 | "Fn::FindInMap": [
479 | "SubnetConfig",
480 | "VPC",
481 | "CIDR"
482 | ]
483 | },
484 | "Tags": [
485 | {
486 | "Key": "Application",
487 | "Value": {
488 | "Ref": "AWS::StackName"
489 | }
490 | },
491 | {
492 | "Key": "Network",
493 | "Value": "Public"
494 | }
495 | ]
496 | },
497 | "Metadata": {
498 | "AWS::CloudFormation::Designer": {
499 | "id": "a01f146b-dd51-4935-94cb-7d46278bccba"
500 | }
501 | }
502 | },
503 | "PublicSubnet": {
504 | "Type": "AWS::EC2::Subnet",
505 | "Properties": {
506 | "VpcId": {
507 | "Ref": "VPC"
508 | },
509 | "CidrBlock": {
510 | "Fn::FindInMap": [
511 | "SubnetConfig",
512 | "Public",
513 | "CIDR"
514 | ]
515 | },
516 | "Tags": [
517 | {
518 | "Key": "Application",
519 | "Value": {
520 | "Ref": "AWS::StackName"
521 | }
522 | },
523 | {
524 | "Key": "Network",
525 | "Value": "Public"
526 | }
527 | ]
528 | },
529 | "Metadata": {
530 | "AWS::CloudFormation::Designer": {
531 | "id": "237a98ca-bae9-44d0-8c0e-078801a04cef"
532 | }
533 | }
534 | },
535 | "InternetGateway": {
536 | "Type": "AWS::EC2::InternetGateway",
537 | "Properties": {
538 | "Tags": [
539 | {
540 | "Key": "Application",
541 | "Value": {
542 | "Ref": "AWS::StackName"
543 | }
544 | },
545 | {
546 | "Key": "Network",
547 | "Value": "Public"
548 | }
549 | ]
550 | },
551 | "Metadata": {
552 | "AWS::CloudFormation::Designer": {
553 | "id": "1e4580ef-6e51-4daf-9f9b-778096368de7"
554 | }
555 | }
556 | },
557 | "GatewayToInternet": {
558 | "Type": "AWS::EC2::VPCGatewayAttachment",
559 | "Properties": {
560 | "VpcId": {
561 | "Ref": "VPC"
562 | },
563 | "InternetGatewayId": {
564 | "Ref": "InternetGateway"
565 | }
566 | },
567 | "Metadata": {
568 | "AWS::CloudFormation::Designer": {
569 | "id": "650759d1-5122-4fd9-8e1f-93bda58244bc"
570 | }
571 | }
572 | },
573 | "PublicRouteTable": {
574 | "Type": "AWS::EC2::RouteTable",
575 | "Properties": {
576 | "VpcId": {
577 | "Ref": "VPC"
578 | },
579 | "Tags": [
580 | {
581 | "Key": "Application",
582 | "Value": {
583 | "Ref": "AWS::StackName"
584 | }
585 | },
586 | {
587 | "Key": "Network",
588 | "Value": "Public"
589 | }
590 | ]
591 | },
592 | "Metadata": {
593 | "AWS::CloudFormation::Designer": {
594 | "id": "e9acd514-bdcc-4d89-944c-6c9585c0b215"
595 | }
596 | }
597 | },
598 | "PublicRoute": {
599 | "Type": "AWS::EC2::Route",
600 | "DependsOn": "GatewayToInternet",
601 | "Properties": {
602 | "RouteTableId": {
603 | "Ref": "PublicRouteTable"
604 | },
605 | "DestinationCidrBlock": "0.0.0.0/0",
606 | "GatewayId": {
607 | "Ref": "InternetGateway"
608 | }
609 | },
610 | "Metadata": {
611 | "AWS::CloudFormation::Designer": {
612 | "id": "e940bae2-493f-4016-9fb4-c1b62e1a348d"
613 | }
614 | }
615 | },
616 | "PublicSubnetRouteTableAssociation": {
617 | "Type": "AWS::EC2::SubnetRouteTableAssociation",
618 | "Properties": {
619 | "SubnetId": {
620 | "Ref": "PublicSubnet"
621 | },
622 | "RouteTableId": {
623 | "Ref": "PublicRouteTable"
624 | }
625 | },
626 | "Metadata": {
627 | "AWS::CloudFormation::Designer": {
628 | "id": "5135621c-2561-4efd-ba18-e9419832ef2c"
629 | }
630 | }
631 | },
632 | "PublicNetworkAcl": {
633 | "Type": "AWS::EC2::NetworkAcl",
634 | "Properties": {
635 | "VpcId": {
636 | "Ref": "VPC"
637 | },
638 | "Tags": [
639 | {
640 | "Key": "Application",
641 | "Value": {
642 | "Ref": "AWS::StackName"
643 | }
644 | },
645 | {
646 | "Key": "Network",
647 | "Value": "Public"
648 | }
649 | ]
650 | },
651 | "Metadata": {
652 | "AWS::CloudFormation::Designer": {
653 | "id": "d41ab43e-f7ca-46e9-86e2-2520558295c8"
654 | }
655 | }
656 | },
657 | "InboundHTTPPublicNetworkAclEntry": {
658 | "Type": "AWS::EC2::NetworkAclEntry",
659 | "Properties": {
660 | "NetworkAclId": {
661 | "Ref": "PublicNetworkAcl"
662 | },
663 | "RuleNumber": "100",
664 | "Protocol": "6",
665 | "RuleAction": "allow",
666 | "Egress": "false",
667 | "CidrBlock": "0.0.0.0/0",
668 | "PortRange": {
669 | "From": "80",
670 | "To": "80"
671 | }
672 | },
673 | "Metadata": {
674 | "AWS::CloudFormation::Designer": {
675 | "id": "17c905aa-4073-44c3-97d5-b1ccd010fce2"
676 | }
677 | }
678 | },
679 | "InboundHTTPSPublicNetworkAclEntry": {
680 | "Type": "AWS::EC2::NetworkAclEntry",
681 | "Properties": {
682 | "NetworkAclId": {
683 | "Ref": "PublicNetworkAcl"
684 | },
685 | "RuleNumber": "101",
686 | "Protocol": "6",
687 | "RuleAction": "allow",
688 | "Egress": "false",
689 | "CidrBlock": "0.0.0.0/0",
690 | "PortRange": {
691 | "From": "443",
692 | "To": "443"
693 | }
694 | },
695 | "Metadata": {
696 | "AWS::CloudFormation::Designer": {
697 | "id": "35f2c538-7640-48f7-84aa-6927f323f9cf"
698 | }
699 | }
700 | },
701 | "InboundSSHPublicNetworkAclEntry": {
702 | "Type": "AWS::EC2::NetworkAclEntry",
703 | "Properties": {
704 | "NetworkAclId": {
705 | "Ref": "PublicNetworkAcl"
706 | },
707 | "RuleNumber": "102",
708 | "Protocol": "6",
709 | "RuleAction": "allow",
710 | "Egress": "false",
711 | "CidrBlock": {
712 | "Ref": "SSHLocation"
713 | },
714 | "PortRange": {
715 | "From": "22",
716 | "To": "22"
717 | }
718 | },
719 | "Metadata": {
720 | "AWS::CloudFormation::Designer": {
721 | "id": "6e66b590-be39-480f-988e-84ce2bcfa993"
722 | }
723 | }
724 | },
725 | "InboundEphemeralPublicNetworkAclEntry": {
726 | "Type": "AWS::EC2::NetworkAclEntry",
727 | "Properties": {
728 | "NetworkAclId": {
729 | "Ref": "PublicNetworkAcl"
730 | },
731 | "RuleNumber": "103",
732 | "Protocol": "6",
733 | "RuleAction": "allow",
734 | "Egress": "false",
735 | "CidrBlock": "0.0.0.0/0",
736 | "PortRange": {
737 | "From": "1024",
738 | "To": "65535"
739 | }
740 | },
741 | "Metadata": {
742 | "AWS::CloudFormation::Designer": {
743 | "id": "bf45e14e-4cc1-46d0-bab9-8dc2fa5c678e"
744 | }
745 | }
746 | },
747 | "OutboundPublicNetworkAclEntry": {
748 | "Type": "AWS::EC2::NetworkAclEntry",
749 | "Properties": {
750 | "NetworkAclId": {
751 | "Ref": "PublicNetworkAcl"
752 | },
753 | "RuleNumber": "100",
754 | "Protocol": "6",
755 | "RuleAction": "allow",
756 | "Egress": "true",
757 | "CidrBlock": "0.0.0.0/0",
758 | "PortRange": {
759 | "From": "0",
760 | "To": "65535"
761 | }
762 | },
763 | "Metadata": {
764 | "AWS::CloudFormation::Designer": {
765 | "id": "19573bf0-8f10-4fe6-a443-6ee35b60a98c"
766 | }
767 | }
768 | },
769 | "PublicSubnetNetworkAclAssociation": {
770 | "Type": "AWS::EC2::SubnetNetworkAclAssociation",
771 | "Properties": {
772 | "SubnetId": {
773 | "Ref": "PublicSubnet"
774 | },
775 | "NetworkAclId": {
776 | "Ref": "PublicNetworkAcl"
777 | }
778 | },
779 | "Metadata": {
780 | "AWS::CloudFormation::Designer": {
781 | "id": "e9e0d41b-07e7-465c-95dc-de12ffb1f1aa"
782 | }
783 | }
784 | },
785 | "EC2Host": {
786 | "Type": "AWS::EC2::Instance",
787 | "DependsOn": "GatewayToInternet",
788 | "Properties": {
789 | "InstanceType": {
790 | "Ref": "EC2InstanceType"
791 | },
792 | "KeyName": {
793 | "Ref": "KeyName"
794 | },
795 | "ImageId": {
796 | "Fn::FindInMap": [
797 | "AWSRegionArch2AMI",
798 | {
799 | "Ref": "AWS::Region"
800 | },
801 | {
802 | "Fn::FindInMap": [
803 | "AWSInstanceType2Arch",
804 | {
805 | "Ref": "EC2InstanceType"
806 | },
807 | "Arch"
808 | ]
809 | }
810 | ]
811 | },
812 | "NetworkInterfaces": [
813 | {
814 | "GroupSet": [
815 | {
816 | "Ref": "EC2SecurityGroup"
817 | }
818 | ],
819 | "AssociatePublicIpAddress": "true",
820 | "DeviceIndex": "0",
821 | "DeleteOnTermination": "true",
822 | "SubnetId": {
823 | "Ref": "PublicSubnet"
824 | }
825 | }
826 | ]
827 | },
828 | "Metadata": {
829 | "AWS::CloudFormation::Designer": {
830 | "id": "e4d17af6-fe25-4470-a26d-bf34f3dfd621"
831 | }
832 | }
833 | },
834 | "EC2SecurityGroup": {
835 | "Type": "AWS::EC2::SecurityGroup",
836 | "Properties": {
837 | "GroupDescription": "Enable access to the EC2 host",
838 | "VpcId": {
839 | "Ref": "VPC"
840 | },
841 | "SecurityGroupIngress": [
842 | {
843 | "IpProtocol": "tcp",
844 | "FromPort": "22",
845 | "ToPort": "22",
846 | "CidrIp": {
847 | "Ref": "SSHLocation"
848 | }
849 | }
850 | ]
851 | },
852 | "Metadata": {
853 | "AWS::CloudFormation::Designer": {
854 | "id": "d4611cc9-f137-4942-9e2a-d49a52f0195a"
855 | }
856 | }
857 | }
858 | },
859 | "Outputs": {
860 | "VPCId": {
861 | "Description": "VPCId of the newly created VPC",
862 | "Value": {
863 | "Ref": "VPC"
864 | }
865 | },
866 | "PublicSubnet": {
867 | "Description": "SubnetId of the public subnet",
868 | "Value": {
869 | "Ref": "PublicSubnet"
870 | }
871 | },
872 | "DNSName": {
873 | "Description": "DNS Name of the EC2 host",
874 | "Value": {
875 | "Fn::GetAtt": [
876 | "EC2Host",
877 | "PublicDnsName"
878 | ]
879 | }
880 | }
881 | },
882 | "Metadata": {
883 | "AWS::CloudFormation::Designer": {
884 | "1e4580ef-6e51-4daf-9f9b-778096368de7": {
885 | "size": {
886 | "width": 60,
887 | "height": 60
888 | },
889 | "position": {
890 | "x": 900,
891 | "y": 90
892 | },
893 | "z": 1,
894 | "embeds": []
895 | },
896 | "a01f146b-dd51-4935-94cb-7d46278bccba": {
897 | "size": {
898 | "width": 780,
899 | "height": 780
900 | },
901 | "position": {
902 | "x": 60,
903 | "y": 90
904 | },
905 | "z": 1,
906 | "embeds": [
907 | "d4611cc9-f137-4942-9e2a-d49a52f0195a",
908 | "d41ab43e-f7ca-46e9-86e2-2520558295c8",
909 | "e9acd514-bdcc-4d89-944c-6c9585c0b215",
910 | "237a98ca-bae9-44d0-8c0e-078801a04cef"
911 | ]
912 | },
913 | "d4611cc9-f137-4942-9e2a-d49a52f0195a": {
914 | "size": {
915 | "width": 60,
916 | "height": 60
917 | },
918 | "position": {
919 | "x": 570,
920 | "y": 150
921 | },
922 | "z": 2,
923 | "parent": "a01f146b-dd51-4935-94cb-7d46278bccba",
924 | "embeds": []
925 | },
926 | "d41ab43e-f7ca-46e9-86e2-2520558295c8": {
927 | "size": {
928 | "width": 420,
929 | "height": 330
930 | },
931 | "position": {
932 | "x": 90,
933 | "y": 150
934 | },
935 | "z": 2,
936 | "parent": "a01f146b-dd51-4935-94cb-7d46278bccba",
937 | "embeds": [
938 | "19573bf0-8f10-4fe6-a443-6ee35b60a98c",
939 | "bf45e14e-4cc1-46d0-bab9-8dc2fa5c678e",
940 | "6e66b590-be39-480f-988e-84ce2bcfa993",
941 | "35f2c538-7640-48f7-84aa-6927f323f9cf",
942 | "17c905aa-4073-44c3-97d5-b1ccd010fce2"
943 | ]
944 | },
945 | "19573bf0-8f10-4fe6-a443-6ee35b60a98c": {
946 | "size": {
947 | "width": 60,
948 | "height": 60
949 | },
950 | "position": {
951 | "x": 120,
952 | "y": 210
953 | },
954 | "z": 3,
955 | "parent": "d41ab43e-f7ca-46e9-86e2-2520558295c8",
956 | "embeds": []
957 | },
958 | "bf45e14e-4cc1-46d0-bab9-8dc2fa5c678e": {
959 | "size": {
960 | "width": 60,
961 | "height": 60
962 | },
963 | "position": {
964 | "x": 240,
965 | "y": 210
966 | },
967 | "z": 3,
968 | "parent": "d41ab43e-f7ca-46e9-86e2-2520558295c8",
969 | "embeds": []
970 | },
971 | "6e66b590-be39-480f-988e-84ce2bcfa993": {
972 | "size": {
973 | "width": 60,
974 | "height": 60
975 | },
976 | "position": {
977 | "x": 120,
978 | "y": 330
979 | },
980 | "z": 3,
981 | "parent": "d41ab43e-f7ca-46e9-86e2-2520558295c8",
982 | "embeds": []
983 | },
984 | "35f2c538-7640-48f7-84aa-6927f323f9cf": {
985 | "size": {
986 | "width": 60,
987 | "height": 60
988 | },
989 | "position": {
990 | "x": 240,
991 | "y": 330
992 | },
993 | "z": 3,
994 | "parent": "d41ab43e-f7ca-46e9-86e2-2520558295c8",
995 | "embeds": []
996 | },
997 | "17c905aa-4073-44c3-97d5-b1ccd010fce2": {
998 | "size": {
999 | "width": 60,
1000 | "height": 60
1001 | },
1002 | "position": {
1003 | "x": 360,
1004 | "y": 210
1005 | },
1006 | "z": 3,
1007 | "parent": "d41ab43e-f7ca-46e9-86e2-2520558295c8",
1008 | "embeds": []
1009 | },
1010 | "e9acd514-bdcc-4d89-944c-6c9585c0b215": {
1011 | "size": {
1012 | "width": 240,
1013 | "height": 240
1014 | },
1015 | "position": {
1016 | "x": 390,
1017 | "y": 540
1018 | },
1019 | "z": 2,
1020 | "parent": "a01f146b-dd51-4935-94cb-7d46278bccba",
1021 | "embeds": [
1022 | "e940bae2-493f-4016-9fb4-c1b62e1a348d"
1023 | ]
1024 | },
1025 | "650759d1-5122-4fd9-8e1f-93bda58244bc": {
1026 | "source": {
1027 | "id": "1e4580ef-6e51-4daf-9f9b-778096368de7"
1028 | },
1029 | "target": {
1030 | "id": "a01f146b-dd51-4935-94cb-7d46278bccba"
1031 | }
1032 | },
1033 | "e940bae2-493f-4016-9fb4-c1b62e1a348d": {
1034 | "size": {
1035 | "width": 60,
1036 | "height": 60
1037 | },
1038 | "position": {
1039 | "x": 420,
1040 | "y": 600
1041 | },
1042 | "z": 3,
1043 | "parent": "e9acd514-bdcc-4d89-944c-6c9585c0b215",
1044 | "embeds": [],
1045 | "references": [
1046 | "1e4580ef-6e51-4daf-9f9b-778096368de7"
1047 | ],
1048 | "dependson": [
1049 | "650759d1-5122-4fd9-8e1f-93bda58244bc"
1050 | ]
1051 | },
1052 | "237a98ca-bae9-44d0-8c0e-078801a04cef": {
1053 | "size": {
1054 | "width": 240,
1055 | "height": 240
1056 | },
1057 | "position": {
1058 | "x": 90,
1059 | "y": 540
1060 | },
1061 | "z": 2,
1062 | "parent": "a01f146b-dd51-4935-94cb-7d46278bccba",
1063 | "embeds": [
1064 | "e4d17af6-fe25-4470-a26d-bf34f3dfd621"
1065 | ]
1066 | },
1067 | "e4d17af6-fe25-4470-a26d-bf34f3dfd621": {
1068 | "size": {
1069 | "width": 60,
1070 | "height": 60
1071 | },
1072 | "position": {
1073 | "x": 120,
1074 | "y": 600
1075 | },
1076 | "z": 3,
1077 | "parent": "237a98ca-bae9-44d0-8c0e-078801a04cef",
1078 | "embeds": [],
1079 | "dependson": [
1080 | "650759d1-5122-4fd9-8e1f-93bda58244bc"
1081 | ],
1082 | "isrelatedto": [
1083 | "d4611cc9-f137-4942-9e2a-d49a52f0195a"
1084 | ]
1085 | },
1086 | "e9e0d41b-07e7-465c-95dc-de12ffb1f1aa": {
1087 | "source": {
1088 | "id": "d41ab43e-f7ca-46e9-86e2-2520558295c8"
1089 | },
1090 | "target": {
1091 | "id": "237a98ca-bae9-44d0-8c0e-078801a04cef"
1092 | }
1093 | },
1094 | "5135621c-2561-4efd-ba18-e9419832ef2c": {
1095 | "source": {
1096 | "id": "e9acd514-bdcc-4d89-944c-6c9585c0b215"
1097 | },
1098 | "target": {
1099 | "id": "237a98ca-bae9-44d0-8c0e-078801a04cef"
1100 | }
1101 | }
1102 | }
1103 | }
1104 | }
1105 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "p42",
3 | "version": "1.2.0",
4 | "description": "CLI wrapper for using Docker with Swarm.",
5 | "files": [
6 | "lib",
7 | "share"
8 | ],
9 | "bin": {
10 | "p42": "bin/p42"
11 | },
12 | "scripts": {
13 | "test": "coffee test/index.coffee",
14 | "prepublish": "coffee -o lib/ -c src/*.*coffee",
15 | "postpublish": "(node_modules/.bin/json -f package.json version | xargs -I version git tag -am version version) && git push --tags"
16 | },
17 | "repository": {
18 | "type": "git",
19 | "url": "git+https://github.com/pandastrike/p42.git"
20 | },
21 | "keywords": [
22 | "Docker",
23 | "Swarm",
24 | "CLI"
25 | ],
26 | "author": "Dan Yoder",
27 | "license": "ISC",
28 | "bugs": {
29 | "url": "https://github.com/pandastrike/p42/issues"
30 | },
31 | "homepage": "https://github.com/pandastrike/p42#readme",
32 | "devDependencies": {
33 | "amen": "^1.0.0-alpha-06",
34 | "json": "^9.0.3"
35 | },
36 | "dependencies": {
37 | "bartlett": "^1.0.0-alpha-02",
38 | "fairmont": "^1.0.x",
39 | "handlebars": "^4.0.5",
40 | "js-yaml": "^3.5.3",
41 | "panda-messages": "0.0.1",
42 | "panda-rw": "^1.0.0-beta-02",
43 | "prompt": "^1.0.0",
44 | "rimraf": "^2.5.2",
45 | "sprintf": "^0.1.5",
46 | "swag": "^0.7.0",
47 | "when": "^3.7.7"
48 | }
49 | }
50 |
--------------------------------------------------------------------------------
/share/aws/cf/vpc.yaml:
--------------------------------------------------------------------------------
1 | AWSTemplateFormatVersion: '2010-09-09'
2 | Description: AWS CloudFormation Template for use with p42.
3 | Parameters:
4 | SSHLocation:
5 | Description: Lockdown SSH access to the bastion host (default can be accessed from anywhere)
6 | Type: String
7 | MinLength: '9'
8 | MaxLength: '18'
9 | Default: 0.0.0.0/0
10 | AllowedPattern: "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})"
11 | ConstraintDescription: must be a valid CIDR range of the form x.x.x.x/x.
12 | Mappings:
13 | SubnetConfig:
14 | VPC:
15 | CIDR: 10.0.0.0/16
16 | Public:
17 | CIDR: 10.0.0.0/24
18 | Resources:
19 | VPC:
20 | Type: 'AWS::EC2::VPC'
21 | Properties:
22 | EnableDnsSupport: 'true'
23 | EnableDnsHostnames: 'true'
24 | CidrBlock:
25 | 'Fn::FindInMap':
26 | - SubnetConfig
27 | - VPC
28 | - CIDR
29 | Tags:
30 | - Key: Application
31 | Value:
32 | Ref: 'AWS::StackName'
33 | - Key: Network
34 | Value: Public
35 | PublicSubnet:
36 | Type: 'AWS::EC2::Subnet'
37 | Properties:
38 | VpcId:
39 | Ref: VPC
40 | CidrBlock:
41 | 'Fn::FindInMap':
42 | - SubnetConfig
43 | - Public
44 | - CIDR
45 | Tags:
46 | - Key: Application
47 | Value:
48 | Ref: 'AWS::StackName'
49 | - Key: Network
50 | Value: Public
51 | InternetGateway:
52 | Type: 'AWS::EC2::InternetGateway'
53 | Properties:
54 | Tags:
55 | - Key: Application
56 | Value:
57 | Ref: 'AWS::StackName'
58 | - Key: Network
59 | Value: Public
60 | GatewayToInternet:
61 | Type: 'AWS::EC2::VPCGatewayAttachment'
62 | Properties:
63 | VpcId:
64 | Ref: VPC
65 | InternetGatewayId:
66 | Ref: InternetGateway
67 | PublicRouteTable:
68 | Type: 'AWS::EC2::RouteTable'
69 | Properties:
70 | VpcId:
71 | Ref: VPC
72 | Tags:
73 | - Key: Application
74 | Value:
75 | Ref: 'AWS::StackName'
76 | - Key: Network
77 | Value: Public
78 | PublicRoute:
79 | Type: 'AWS::EC2::Route'
80 | DependsOn: GatewayToInternet
81 | Properties:
82 | RouteTableId:
83 | Ref: PublicRouteTable
84 | DestinationCidrBlock: 0.0.0.0/0
85 | GatewayId:
86 | Ref: InternetGateway
87 | PublicSubnetRouteTableAssociation:
88 | Type: 'AWS::EC2::SubnetRouteTableAssociation'
89 | Properties:
90 | SubnetId:
91 | Ref: PublicSubnet
92 | RouteTableId:
93 | Ref: PublicRouteTable
94 | PublicNetworkAcl:
95 | Type: 'AWS::EC2::NetworkAcl'
96 | Properties:
97 | VpcId:
98 | Ref: VPC
99 | Tags:
100 | - Key: Application
101 | Value:
102 | Ref: 'AWS::StackName'
103 | - Key: Network
104 | Value: Public
105 | InboundHTTPPublicNetworkAclEntry:
106 | Type: 'AWS::EC2::NetworkAclEntry'
107 | Properties:
108 | NetworkAclId:
109 | Ref: PublicNetworkAcl
110 | RuleNumber: '100'
111 | Protocol: '6'
112 | RuleAction: allow
113 | Egress: 'false'
114 | CidrBlock: 0.0.0.0/0
115 | PortRange:
116 | From: '80'
117 | To: '80'
118 | InboundHTTPSPublicNetworkAclEntry:
119 | Type: 'AWS::EC2::NetworkAclEntry'
120 | Properties:
121 | NetworkAclId:
122 | Ref: PublicNetworkAcl
123 | RuleNumber: '101'
124 | Protocol: '6'
125 | RuleAction: allow
126 | Egress: 'false'
127 | CidrBlock: 0.0.0.0/0
128 | PortRange:
129 | From: '443'
130 | To: '443'
131 | InboundSSHPublicNetworkAclEntry:
132 | Type: 'AWS::EC2::NetworkAclEntry'
133 | Properties:
134 | NetworkAclId:
135 | Ref: PublicNetworkAcl
136 | RuleNumber: '102'
137 | Protocol: '6'
138 | RuleAction: allow
139 | Egress: 'false'
140 | CidrBlock:
141 | Ref: SSHLocation
142 | PortRange:
143 | From: '22'
144 | To: '22'
145 | InboundEphemeralPublicNetworkAclEntry:
146 | Type: 'AWS::EC2::NetworkAclEntry'
147 | Properties:
148 | NetworkAclId:
149 | Ref: PublicNetworkAcl
150 | RuleNumber: '103'
151 | Protocol: '6'
152 | RuleAction: allow
153 | Egress: 'false'
154 | CidrBlock: 0.0.0.0/0
155 | PortRange:
156 | From: '1024'
157 | To: '65535'
158 | OutboundPublicNetworkAclEntry:
159 | Type: 'AWS::EC2::NetworkAclEntry'
160 | Properties:
161 | NetworkAclId:
162 | Ref: PublicNetworkAcl
163 | RuleNumber: '100'
164 | Protocol: '6'
165 | RuleAction: allow
166 | Egress: 'true'
167 | CidrBlock: 0.0.0.0/0
168 | PortRange:
169 | From: '0'
170 | To: '65535'
171 | PublicSubnetNetworkAclAssociation:
172 | Type: 'AWS::EC2::SubnetNetworkAclAssociation'
173 | Properties:
174 | SubnetId:
175 | Ref: PublicSubnet
176 | NetworkAclId:
177 | Ref: PublicNetworkAcl
178 | PrivateDNS:
179 | Type: 'AWS::Route53::HostedZone'
180 | DependsOn: VPC
181 | Properties:
182 | Name: 'name.internal.'
183 | VPCs:
184 | - VPCId:
185 | Ref: VPC
186 | VPCRegion:
187 | Ref: 'AWS::Region'
188 | DHCPOptions :
189 | Type: 'AWS::EC2::DHCPOptions'
190 | Properties:
191 | DomainName: 'name.internal'
192 | DomainNameServers:
193 | - AmazonProvidedDNS
194 | ELBSecurityGroup:
195 | Type: 'AWS::EC2::SecurityGroup'
196 | Properties:
197 | GroupDescription: 'Allow ELB to accept traffic from the public Web'
198 | VpcId:
199 | Ref: 'VPC'
200 | SecurityGroupIngress:
201 | - IpProtocol: 'tcp'
202 | FromPort: '80'
203 | ToPort: '80'
204 | CidrIp: '0.0.0.0/0'
205 | ELB:
206 | Type: 'AWS::ElasticLoadBalancing::LoadBalancer'
207 | DependsOn: GatewayToInternet
208 | Properties:
209 | LoadBalancerName:
210 | Ref: 'AWS::StackName'
211 | SecurityGroups:
212 | - Ref: 'ELBSecurityGroup'
213 | - 'Fn::GetAtt':
214 | - VPC
215 | - DefaultSecurityGroup
216 | Listeners:
217 | - LoadBalancerPort: 80
218 | Protocol: HTTP
219 | InstancePort: 80
220 | # Ignore HTTPS for now
221 | # - LoadBalancerPort: 443
222 | # Protocol: HTTPS
223 | # InstancePort: 80
224 | # InstanceProtocol: HTTP
225 | Subnets:
226 | - Ref: 'PublicSubnet'
227 | HealthCheck:
228 | HealthyThreshold: 2 # checks
229 | Interval: 30 # seconds
230 | Target: "TCP:80"
231 | Timeout: 5
232 | UnhealthyThreshold: 2
233 | Outputs:
234 | VPCId:
235 | Description: VPCId of the newly created VPC
236 | Value:
237 | Ref: VPC
238 | PublicSubnet:
239 | Description: SubnetId of the public subnet
240 | Value:
241 | Ref: PublicSubnet
242 | AvailabilityZone:
243 | Description: Availability Zone of the public subnet
244 | Value:
245 | 'Fn::GetAtt':
246 | - PublicSubnet
247 | - AvailabilityZone
248 | HostedZoneId:
249 | Description: HostedZoneId of the private HostedZone
250 | Value:
251 | Ref: PrivateDNS
252 | DefaultSecurityGroupId:
253 | Description: "Default security group ID for the VPC"
254 | Value:
255 | 'Fn::GetAtt':
256 | - VPC
257 | - DefaultSecurityGroup
258 |
--------------------------------------------------------------------------------
/share/aws/dns/a.yaml:
--------------------------------------------------------------------------------
1 | Comment: "{{comment}}"
2 | Changes:
3 | - Action: UPSERT
4 | ResourceRecordSet:
5 | Name: {{name}}.name.internal.
6 | Type: A
7 | TTL: 60
8 | ResourceRecords:
9 | - Value: '{{ip}}'
10 |
--------------------------------------------------------------------------------
/share/aws/dns/alias.yaml:
--------------------------------------------------------------------------------
1 | Comment: "{{comment}}"
2 | Changes:
3 | - Action: UPSERT
4 | ResourceRecordSet:
5 | Name: '{{domain}}.'
6 | Type: A
7 | AliasTarget:
8 | HostedZoneId: '{{zoneId}}'
9 | DNSName: '{{name}}'
10 | EvaluateTargetHealth: false
11 |
--------------------------------------------------------------------------------
/share/aws/dns/srv.yaml:
--------------------------------------------------------------------------------
1 | Comment: "{{comment}}"
2 | Changes:
3 | - Action: UPSERT
4 | ResourceRecordSet:
5 | Name: _{{subdomain}}._{{protocol}}.name.internal.
6 | Type: SRV
7 | TTL: 60
8 | ResourceRecords:
9 | {{#targets}}
10 | - Value: '0 0 {{port}} {{host}}.name.internal'
11 | {{/targets}}
12 |
--------------------------------------------------------------------------------
/share/aws/ecr/policy.yaml:
--------------------------------------------------------------------------------
1 | Version: '2008-10-17'
2 | Statement:
3 | - Sid: 'Allow Any/All'
4 | Effect: Allow
5 | Principal: '*'
6 | Action:
7 | - 'ecr:*'
8 |
--------------------------------------------------------------------------------
/share/commands.yaml:
--------------------------------------------------------------------------------
1 | aws:
2 |
3 | cloudformation:
4 |
5 | create-stack:
6 | template: |
7 | aws cloudformation create-stack
8 | --stack-name {{stack}}
9 | --template-body file://{{file}}
10 |
11 | describe-stacks:
12 |
13 | template: |
14 | aws cloudformation describe-stacks --stack-name {{stack}}
15 |
16 | processor: json
17 |
18 | attributes:
19 | - name: status
20 | accessor: Stacks.0.StackStatus
21 | - name: vpcId
22 | accessor: Stacks.0.Outputs.0.OutputValue
23 | - name: subnetId
24 | accessor: Stacks.0.Outputs.1.OutputValue
25 | - name: az
26 | accessor: Stacks.0.Outputs.2.OutputValue
27 | - name: zoneId
28 | accessor: Stacks.0.Outputs.3.OutputValue
29 |
30 | test:
31 | status: CREATE_COMPLETE
32 | vpcId: test-vpc-00
33 | subnetId: test-subnet-00
34 | az: us-west-1a
35 | zoneId: test-zone-00
36 |
37 | delete-stack:
38 |
39 | template: |
40 | aws cloudformation delete-stack --stack-name {{stack}}
41 |
42 | elb:
43 |
44 | describe-load-balancers:
45 |
46 | template: |
47 | aws elb describe-load-balancers
48 | --load-balancer-name {{cluster}}
49 |
50 | processor: json
51 |
52 | attributes:
53 | - name: zoneId
54 | accessor: LoadBalancerDescriptions.0.CanonicalHostedZoneNameID
55 | - name: domain
56 | accessor: LoadBalancerDescriptions.0.CanonicalHostedZoneName
57 |
58 | test:
59 | zoneId: test-zone-00
60 | domain: '123.elb.test.com'
61 |
62 | register-instances-with-load-balancer:
63 |
64 | template: |
65 | aws elb register-instances-with-load-balancer
66 | --load-balancer-name {{cluster}}
67 | --instances {{instanceId}}
68 |
69 | ecr:
70 |
71 | create-repository:
72 |
73 | template: |
74 | aws ecr create-repository
75 | --repository-name {{repository}}
76 | --region us-east-1
77 |
78 | set-repository-policy:
79 |
80 | template: |
81 | aws ecr set-repository-policy
82 | --repository-name {{repository}}
83 | --region us-east-1
84 | --policy-text {{policy}}
85 |
86 |
87 | describe-repositories:
88 |
89 | template: |
90 | aws ecr describe-repositories
91 | --repository-name {{repository}}
92 | --region us-east-1
93 |
94 | processor: json
95 |
96 | attributes:
97 | - name: registryId
98 | accessor: repositories.0.registryId
99 |
100 | test:
101 | repositoryId: test-repo-00
102 |
103 | # actually processord to get the registry URL
104 | get-authorization-token:
105 | template: |
106 | aws ecr get-authorization-token
107 | --region us-east-1
108 |
109 | processor: json
110 |
111 | attributes:
112 | - name: url
113 | accessor: authorizationData.0.proxyEndpoint
114 |
115 | test:
116 | url: 'https://123.registry.test.com'
117 |
118 | ec2:
119 |
120 | describe-instances:
121 |
122 | template: |
123 | aws ec2 describe-instances
124 | --filters Name=tag-value,Values={{instance}}
125 |
126 | processor: json
127 |
128 | attributes:
129 | - name: instanceId
130 | accessor: Reservations.0.Instances.0.InstanceId
131 | - name: ip
132 | accessor: Reservations.0.Instances.0.PrivateIpAddress
133 |
134 | test:
135 | instanceId: test-instance-00
136 | ip: '192.168.0.42'
137 |
138 | describe-security-groups:
139 |
140 | template: |
141 | aws ec2 describe-security-groups
142 | --filters
143 | Name=vpc-id,Values={{vpcId}}
144 | Name=group-name,Values={{group}}
145 |
146 | processor: json
147 |
148 | attributes:
149 | - name: groupId
150 | accessor: SecurityGroups.0.GroupId
151 |
152 | test:
153 | groupId: test-group-00
154 |
155 | modify-instance-attribute:
156 |
157 | template: |
158 | aws ec2 modify-instance-attribute
159 | --instance-id {{instanceId}}
160 | --groups {{join " " groupIds}}
161 |
162 | route53:
163 |
164 | list-hosted-zones-by-name:
165 |
166 | template: |
167 | aws route53 list-hosted-zones-by-name
168 | --dns-name {{domain}}
169 | --max-items 1
170 |
171 | processor: json
172 |
173 | attributes:
174 | - name: zoneId
175 | accessor: HostedZones.0.Id
176 |
177 | test:
178 | zoneId: test-zone-00
179 |
180 | change-resource-record-sets:
181 |
182 | template: |
183 | aws route53 change-resource-record-sets
184 | --hosted-zone-id {{zoneId}}
185 | --change-batch file://{{file}}
186 |
187 | docker:
188 |
189 | login:
190 |
191 | template: |
192 | eval $(aws ecr get-login --region us-east-1)
193 |
194 | machine:
195 |
196 | env:
197 |
198 | template: |
199 | eval $(docker-machine env {{name}})
200 |
201 | create:
202 |
203 | template: |
204 | docker-machine create {{name}}
205 | --driver amazonec2
206 | --amazonec2-region {{region}}
207 | --amazonec2-vpc-id {{vpcId}}
208 | --amazonec2-subnet-id {{subnetId}}
209 | --amazonec2-zone {{zone}}
210 |
211 | ls:
212 |
213 | template: >-
214 | docker-machine ls
215 | --format '\{{ .Name }}'
216 | --filter name={{ name }}
217 |
218 | processor: line
219 |
220 | test:
221 | - violent-aftermath-00
222 | - violent-aftermath-01
223 | - violent-aftermath-02
224 |
225 |
226 | stop:
227 | template: |
228 | docker-machine stop {{join " " nodes}}
229 |
230 | rm:
231 | template: |
232 | docker-machine rm {{join " " nodes}}
233 |
234 | swarm:
235 |
236 | env:
237 |
238 | template: |
239 | eval $(docker-machine env --swarm {{name}})
240 |
241 | create:
242 |
243 | template: |
244 | docker-machine create {{name}}
245 | --driver amazonec2
246 | --amazonec2-region {{region}}
247 | --amazonec2-vpc-id {{vpcId}}
248 | --amazonec2-subnet-id {{subnetId}}
249 | --amazonec2-zone {{zone}}
250 | --swarm
251 | --swarm-discovery nodes://10.0.[0:255].[0:255]:2375
252 | {{#master}}--swarm-master{{/master}}
253 |
254 | build:
255 |
256 | template: |
257 | docker build
258 | -t {{tag}}
259 | -f {{file}}
260 | .
261 |
262 | push:
263 |
264 | template: |
265 | docker push {{tag}}
266 |
267 | pull:
268 |
269 | template: |
270 | docker pull {{tag}}
271 |
272 | run:
273 |
274 | # TODO: may parameterize keys/region too?
275 | # Doesn't seem like high-value coverage.
276 | template: |
277 | docker run
278 | {{options}}
279 | --name {{name}}
280 | --restart always
281 | -e AWS_ACCESS_KEYId="$(aws configure get aws_access_keyId)"
282 | -e AWS_SECRET_ACCESS_KEY="$(aws configure get aws_secret_access_key)"
283 | -e AWS_DEFAULT_REGION="$(aws configure get region)"
284 | -d {{tag}}
285 |
286 | inspect:
287 |
288 | template: |
289 | docker inspect {{name}}
290 |
291 | processor: json
292 |
293 | attributes:
294 |
295 | - name: name
296 | accessor: '.0.Node.Name'
297 | - name: ip
298 | accessor: '.0..Node.IP'
299 | - name: port
300 | accessor: '.0.NetworkSettings.Ports["80/tcp"].0.HostPort'
301 |
302 | test:
303 | name: violent-aftermath-01
304 | ip: 192.168.0.42
305 | port: 32768
306 |
307 | ps:
308 |
309 | template: |
310 | docker ps --filter name={{cluster}} --format \{{ .ID }}
311 |
312 |
313 |
314 | #
315 | # find_available_name() {
316 | # local cluster="${1}"
317 | # local candidates=$(printf '%s\n' $(echo ${cluster}-{0..9}{0..9}))
318 | # local taken=$(list-nodes ${cluster})
319 | # # return the first element from candidates list that
320 | # # isn't in the taken list...
321 | # comm -23 <(echo "${candidates}") <(echo "${taken}") | head -n 1
322 | # }
323 | #
324 | # list_swarm_nodes() {
325 | # docker-machine ls \
326 | # --format '{{ .Name }}' \
327 | # --filter "name=${1}"
328 | # }
329 | #
330 | # remove_swarm_nodes() {
331 | # local cluster="${1}"
332 | # echo "Removing Swarm nodes for cluster <${cluster}>"
333 | # local machines=$(list_swarm_nodes ${cluster})
334 | # docker-machine stop $machines
335 | # docker-machine rm $machines
336 | # }
337 |
--------------------------------------------------------------------------------
/share/interviews/init.yaml:
--------------------------------------------------------------------------------
1 | - name: name
2 | description: Application name
3 | type: string
4 | required: true
5 |
6 | - name: domain
7 | description: Domain name
8 | type: string
9 | require: true
10 |
11 | - name: registry
12 | description: Private registry domain
13 | type: string
14 | require: true
15 |
--------------------------------------------------------------------------------
/share/messages.yaml:
--------------------------------------------------------------------------------
1 | unexpected-error: >-
2 | p42: unexpected error: {{message}}
3 |
4 | bad-command: |
5 | p42: '{{name}}' is not a valid command. Run `p42 help` for help.
6 |
7 | bad-subcommand: |
8 | p42: '{{name}}' is not a valid subcommand.
9 |
10 | Run `p42 help ` for help on a specific command.
11 |
12 | not-implemented: "p42: Sorry, but this feature is not yet implemented."
13 |
14 | bad-option: |
15 | p42: '{{name}}' is not a valid option.
16 |
17 | help: |
18 |
19 | usage: p42
20 |
21 | Run Docker containers in an AWS VPC.
22 |
23 | Available commands are:
24 |
25 | {{join ", " (pluck "name" (filter "key" "command" (values .)))}}
26 |
27 | Options valid for all commands:
28 |
29 | {{#filter "type" "switch" (values .)}}
30 | {{sprintf "%16s " (join ", " flags) ~}}
31 | {{hang 20 80 help}}
32 |
33 | {{/filter~}}
34 |
35 | Run `p42 help ` for help on a specific command.
36 |
37 | cluster:
38 |
39 | # help: |
40 | #
41 | # usage: p42 cluster
42 | #
43 | # Create, manage, or remove clusters.
44 | #
45 | # Available subcommands are:
46 | #
47 | # {{options|values|filter>key>subcommand|pluck>name|join>, }}
48 | #
49 | # Run 'p42 cluster help ' for command specific help.
50 |
51 | not-found: |
52 | p42: can't find cluster [{{name}}].
53 |
54 | create:
55 |
56 | starting: Creating cluster [{{name}}] ...
57 |
58 | complete: Cluster [{{name}}] created.
59 |
60 | help: |
61 | Usage: p42 cluster create
62 | Create a VPC and a master Swarm node.
63 |
64 | expand:
65 |
66 | starting: Expanding cluster [{{name}}] by {{count}} node(s) ...
67 |
68 | complete: Cluster [{{name}}] expanded by {{count}} node(s).
69 |
70 | rm:
71 |
72 | starting: Removing cluster [{{name}}] ...
73 |
74 | complete: Cluster [{{name}}] removed.
75 |
76 | build:
77 |
78 | starting: >-
79 | Building mixins
80 | {{#mixins~}} [{{.}}] {{/mixins~}}
81 | ...
82 |
83 | complete: >-
84 | Application mixins
85 | {{#mixins~}} [{{.}}] {{/mixins~}}
86 | built.
87 |
88 | init:
89 |
90 | help: p42 init
91 |
92 | determining-registry: >-
93 | Determining default registry domain ...
94 |
95 | application:
96 |
97 | no-configuration: >-
98 | p42: missing application configuration. Try running 'p42 init'.
99 |
100 | nothing-to-run: Missing run directory.
101 |
102 | bad-mixin: |
103 | [{{name}}] is not a valid mixin.
104 |
105 | no-target: >-
106 | p42: no target for this branch.
107 | Try: [ p42 target add ].
108 |
109 | start:
110 |
111 | starting: >-
112 | Starting [{{mixin}}] ...
113 |
114 | complete: >-
115 | [{{mixin}}] started.
116 |
117 | # Summary: Initialize an application for use with p42.
118 | # Help: Initialize an application for use with p42.
119 | # This interactive command will ask you a few questions
120 | # about your application and then write a corresponding
121 | # configuration file.
122 |
123 |
124 | # Usage: p42 mixin [add] [...]
125 | # Summary: Add/manage mixins for an application.
126 | # Help: To find out about a specific subcommand:
127 | #
128 | # p42 mixin help
129 | #
130 |
131 | # Usage: p42 mixin add
132 | # Add a mixin based on a git repo path or URL.
133 | #
134 | # p42 mixin add www https://github.com/pandastrike/p42-mixin-nginx.git
135 | #
136 | # Mixins write to your applications launch directory. Mixins may define a
137 | # configuration based on interactive prompts.
138 | #
139 |
140 | # Usage: p42 name
141 | # Summary: Generate a name.
142 | # Help: Generate a name.
143 | # $ p42 name
144 | # yellow-ghost
145 |
146 | # Usage: p42 ps
147 | # Summary: List deployed containers and their IP/port.
148 | # Help: List deployed containers and their IP/port.
149 |
150 | # Usage: p42 run []
151 | # Summary: Build images and start application containers.
152 | # Help: Build images and start application containers.
153 | # You can run the entire application or just a given set of components.
154 | #
155 | # p42 run
156 | # p42 run www redis
157 | #
158 |
159 | # Usage: p42 start []
160 | # Summary: Start application component.
161 | # Help: Start application component.
162 | # You can start component for the entire application
163 | # or just a given set of components. Images for each
164 | # container must already exist.
165 | #
166 | # p42 start
167 | # p42 start www redis
168 | #
169 | # You can build images first using the build subcommand.
170 | # The run subcommand will build and start component for you.
171 | #
172 |
173 | # Usage: p42 stop []
174 | # Summary: Stop application containers and remove them.
175 | # Help: Stop application containers and remove them.
176 | # You can stop an entire application or just a given set of components.
177 | #
178 | # p42 stop
179 | # p42 stop www redis
180 | #
181 | # Usage: p42 target [add|rm|remove|mv|rename]
182 | # Summary: Create/manage deploy targets.
183 | # Help: To find out about a specific subcommand:
184 | #
185 | # p42 target help
186 | #
187 | #
188 | target:
189 |
190 | help: p42 target [add|rm|remove|mv|rename]
191 |
192 | errors:
193 |
194 | invalid-branch: invalid branch '{{branch}}'.
195 |
196 | invalid-cluster: invalid cluster '{{cluster}}'.
197 |
198 | invalid-config: invalid application configuration.
199 |
200 | error-writing-config: |
201 | unexpected error updating application configuration.
202 |
203 | target-add:
204 |
205 | help: p42 target add
206 |
207 | target-remove:
208 |
209 | help: p42 target [remove|rm]
210 |
211 | target-rename:
212 |
213 | help: p42 target [rename|mv]
214 |
215 |
216 | cluster-add:
217 |
218 | help: |
219 | Usage: p42 cluster add [-n ]
220 | Add a node to the cluster.
221 |
222 | You can add multiple nodes using -n.
223 | To add 3 nodes to the cluster named "red-ghost":
224 |
225 | p42 cluster add red-ghost -n 3
226 |
227 | cluster-rm:
228 |
229 | help: |
230 | Usage: p42 cluster rm
231 | Stop a given cluster (Swarm) and remove the associated machines.
232 |
233 | cluster-ls:
234 |
235 | help: |
236 | Usage: p42 cluster ls
237 | List the available clusters.
238 |
239 | cluster-ps:
240 |
241 | help: |
242 | Usage: p42 cluster ls
243 | List all the nodes in a given cluster.
244 |
245 | cluster-env:
246 |
247 | help: |
248 | Usage: p42 cluster env
249 | Set up your environment to use Docker with your cluster.
250 |
251 | eval $(p42 cluster env red-ghost)
252 |
253 | cluster-get:
254 |
255 | help: |
256 | Usage: p42 cluster get
257 | Get the value of a given property for the given cluster.
258 |
259 | p42 cluster get red-host vpc
260 |
261 | dns:
262 |
263 | elb-alias: Adding alias for subdomain [{{subdomain}}]
264 |
265 |
266 | # don't delete this -- it's used to test messages interface
267 | fubar: this is a test {{name}}
268 |
--------------------------------------------------------------------------------
/share/mixins/coffee/interview.yaml:
--------------------------------------------------------------------------------
1 | - name: port
2 | description: Port
3 | type: string
4 | require: true
5 |
6 | - name: count
7 | description: Number of containers to run
8 | type: number
9 | require: true
10 |
11 | # TODO: array support is broken in prompt
12 | # - requires CTRL-C to end input!?
13 | # - can't handle array default...
14 | #
15 | # - name: discovery
16 | # description: Protocols
17 | # type: array
18 | # required: true
19 | #
20 | # - name: include
21 | # description: Directories to include
22 | # type: array
23 | # required: true
24 |
--------------------------------------------------------------------------------
/share/mixins/coffee/template/Dockerfile.tmpl:
--------------------------------------------------------------------------------
1 | FROM node:4.4
2 | RUN mkdir -p /usr/src/app
3 | WORKDIR /usr/src/app
4 |
5 | COPY package.json /usr/src/app/
6 |
7 | RUN npm install
8 | RUN npm install coffee-script -g
9 |
10 | {{#include}}
11 | COPY {{.}}/ /usr/src/app/{{.}}
12 | {{/include}}
13 |
14 | CMD [ "npm", "start" ]
15 |
16 | EXPOSE {{port}}
17 |
--------------------------------------------------------------------------------
/share/mixins/coffee/template/config.yaml:
--------------------------------------------------------------------------------
1 | count: 2
2 | discovery:
3 | - http
4 | port: 80
5 | include:
6 | - src
7 | - bin
8 |
--------------------------------------------------------------------------------
/share/options.yaml:
--------------------------------------------------------------------------------
1 | help:
2 | type: word
3 | key: command
4 | default: help
5 |
6 | dryRun:
7 | type: switch
8 | flags:
9 | - dry-run
10 | help: >-
11 | Show the commands that would be run, but don't actually run them.
12 | default: false
13 |
14 | verbose:
15 | type: switch
16 | flags:
17 | - verbose
18 | - v
19 | help: >-
20 | Log debug statements to the console.
21 | default: false
22 |
23 | build:
24 | type: word
25 | key: command
26 | options:
27 | mixins: &mixins
28 | type: list
29 | subtype:
30 | type: parameter
31 | optional: true
32 |
33 | run:
34 | type: word
35 | key: command
36 | options:
37 | mixins: *mixins
38 |
39 | start:
40 | type: word
41 | key: command
42 | options:
43 | mixins: *mixins
44 |
45 | stop:
46 | type: word
47 | key: command
48 | options:
49 | mixins: *mixins
50 |
51 | init:
52 | type: word
53 | key: command
54 |
55 | target:
56 |
57 | type: word
58 | key: command
59 |
60 | options:
61 |
62 | add:
63 |
64 | type: word
65 | key: subcommand
66 |
67 | options:
68 |
69 | branch:
70 | type: parameter
71 |
72 | cluster:
73 | type: parameter
74 |
75 | remove: &target-remove
76 |
77 | type: word
78 | key: subcommand
79 | # fix the value so we can create rm alias
80 | value: remove
81 |
82 | options:
83 |
84 | target:
85 | type: parameter
86 |
87 | rm: *target-remove
88 |
89 | rename: &target-rename
90 |
91 | type: word
92 | key: subcommand
93 | # fix the value so we can create mv alias
94 | value: rename
95 |
96 | options:
97 |
98 | before:
99 | type: parameter
100 |
101 | after:
102 | type: parameter
103 |
104 | mv: *target-rename
105 |
106 |
107 | cluster:
108 |
109 | type: word
110 | key: command
111 |
112 | options:
113 |
114 | create:
115 | type: word
116 | key: subcommand
117 |
118 | expand:
119 | type: word
120 | key: subcommand
121 | options:
122 | cluster: &cluster
123 | type: parameter
124 | optional: true
125 | count:
126 | type: option
127 | flags:
128 | - count
129 | - n
130 | default: 1
131 |
132 | contract:
133 | type: word
134 | key: subcommand
135 |
136 | ls:
137 | type: word
138 | key: subcommand
139 |
140 | rm:
141 | type: word
142 | key: subcommand
143 | options:
144 | cluster: *cluster
145 |
146 | ps:
147 | type: word
148 | key: subcommand
149 | options:
150 | cluster: *cluster
151 |
152 | env:
153 | type: word
154 | key: subcommand
155 | options:
156 | cluster: *cluster
157 |
158 | get:
159 | type: word
160 | key: subcommand
161 | options:
162 | cluster: *cluster
163 | property:
164 | optional: true
165 | type: parameter
166 |
167 | mixin:
168 | type: word
169 | key: command
170 | options:
171 | add:
172 | type: word
173 | key: subcommand
174 | options:
175 | mixin:
176 | type: parameter
177 | name:
178 | type: parameter
179 | rm:
180 | type: word
181 | key: subcommand
182 | options:
183 | mixin:
184 | type: parameter
185 |
--------------------------------------------------------------------------------
/share/words.yaml:
--------------------------------------------------------------------------------
1 | adjectives:
2 | - threatening
3 | - elegant
4 | - venomous
5 | - productive
6 | - halting
7 | - pastoral
8 | - tranquil
9 | - fearless
10 | - humdrum
11 | - mellow
12 | - splendid
13 | - broad
14 | - red
15 | - orange
16 | - yellow
17 | - green
18 | - blue
19 | - purple
20 | - violet
21 | - wise
22 | - sad
23 | - combative
24 | - dark
25 | - light
26 | - auspicious
27 | - outstanding
28 | - ornery
29 | - sage
30 | - excellent
31 | - energetic
32 | - super
33 | - happy
34 |
35 |
36 | nouns:
37 | - panda
38 | - apparel
39 | - atom
40 | - carpenter
41 | - jacket
42 | - surgeon
43 | - yacht
44 | - chef
45 | - ghost
46 | - night
47 | - shovel
48 | - mimosa
49 | - sled
50 | - vault
51 | - plains
52 | - sphere
53 | - cube
54 | - star
55 | - core
56 | - shield
57 | - sword
58 | - rhino
59 | - sword
60 | - vacation
61 | - film
62 | - cat
63 | - dog
64 | - bear
65 | - vase
66 | - clock
67 | - table
68 | - bottle
69 | - car
70 | - nova
71 |
--------------------------------------------------------------------------------
/src/application.coffee:
--------------------------------------------------------------------------------
1 | {basename, join} = require "path"
2 | F = {async, include, isFile, isDirectory, glob,
3 | readdir, shell, empty} = require "fairmont"
4 | {read, write} = require "panda-rw"
5 | render = require "./template"
6 | raise = require "./raise"
7 |
8 | _exports = do async ->
9 |
10 | shared = yield require "./shared"
11 | Decorators = yield require "./decorators"
12 |
13 | Git =
14 |
15 | getBranch: async ->
16 | if shared.settings.dryRun
17 | "master"
18 | else
19 | (yield shell "git symbolic-ref --short -q HEAD")
20 | .stdout.trim()
21 |
22 |
23 | Application =
24 |
25 | create: (definition) -> Application.save definition
26 |
27 | load: async ->
28 |
29 | raise "application.no-configuration" if ! yield isFile "./p42.yaml"
30 | branch = yield Git.getBranch()
31 |
32 | raise "application.no-branch" if ! branch?
33 | {name, domain, registry, clusters} = yield read "./p42.yaml"
34 | clusters ?= {}
35 | cluster = clusters?[branch]
36 | {name, domain, registry, clusters, cluster}
37 |
38 | save: ({name, domain, registry, clusters}) ->
39 | clusters ?= {}
40 | write "./p42.yaml", {name, domain, registry, clusters}
41 |
42 | Mixins: Mixins =
43 |
44 | assert: async (name) ->
45 | if ! yield isFile "./run/#{name}/config.yaml"
46 | raise "application.bad-mixin"
47 |
48 | list: async ->
49 | raise "application.nothing-to-run" if ! yield isDirectory "./run"
50 | yield readdir "./run"
51 |
52 | load: async (name) ->
53 | path = join shared.run, name
54 | config = join path, "config.yaml"
55 | include (yield read config), {name, path}
56 |
57 | build: async ({mixins}) ->
58 | application = yield Application.load()
59 | for name in mixins
60 | yield Mixins.assert name
61 | mixin = yield Mixins.load name
62 | mixin.style ?= "docker"
63 | directory = join ".", "run", name
64 | for path in yield glob "**/*.tmpl", directory
65 | destination = join directory, basename path, ".tmpl"
66 | template = yield F.read path
67 | content = render template, mixin
68 | yield F.write destination, content
69 | yield Decorators[mixin.style]? application, mixin
70 |
71 | Targets:
72 |
73 | add: async ({branch, cluster}) ->
74 | application = yield Application.load()
75 | application.clusters[branch] = cluster
76 | Application.save application
77 |
78 | remove: async ({branch}) ->
79 | application = yield Application.load()
80 | delete application.clusters[branch]
81 | Application.save application
82 |
83 | rename: async ({before, after}) ->
84 | application = yield Application.load()
85 | application.clusters[after] = application.clusters[before]
86 | delete application.clusters[before]
87 | Application.save application
88 |
89 | module.exports = _exports
90 |
--------------------------------------------------------------------------------
/src/cli.coffee:
--------------------------------------------------------------------------------
1 | {async, include, isObject, isString, isArray, isDefined, isUndefined, isPromise, Method} = require "fairmont"
2 | {all} = require "when"
3 | {yaml} = require "./serialize"
4 |
5 | global.$P = -> console.log arguments...
6 | $P.p = (x = "hola")-> $P "----> #{x} <-----"
7 |
8 | module.exports = async (args) ->
9 |
10 | [
11 | shared
12 | Commands
13 | Options
14 | ] = yield all [
15 | require "./shared"
16 | require "./commands"
17 | require "./options"
18 | ]
19 |
20 | {bye, error, _error} = shared.loggers.status
21 | O = shared.loggers.output
22 |
23 | show = Method.create()
24 |
25 | Method.define show, isUndefined, ->
26 | Method.define show, isDefined, (x) -> show x.toString()
27 | Method.define show, isPromise, (p) -> p.then (x) -> show x
28 | Method.define show, isString, (s) -> O.info s
29 | Method.define show, isArray, (ax) -> show a for a in ax ;;
30 | # could support JSON flag here
31 | Method.define show, isObject, (o) -> show yaml o
32 |
33 |
34 | try
35 |
36 | options = Options.parse args
37 |
38 | include shared.settings, options
39 |
40 | if shared.settings.dryRun
41 | shared.loggers.command._self.loggers.stderr.level = "debug"
42 |
43 | if shared.settings.verbose
44 | shared.loggers.status._self.loggers.stderr.level = "debug"
45 |
46 | if (command = Commands[options.command])?
47 | show yield command options
48 | else
49 | bye "bad-command", name: options.command
50 |
51 | catch e
52 | # errors expected by p42
53 | # have a p42 attribute
54 | if isArray e.p42
55 | bye e.p42...
56 | else
57 | # otherwise, this is unexpected, just re-throw
58 | error "unexpected-error", e
59 | _error e.stack
60 | throw e
61 |
62 | finally
63 |
64 | # be sure to shut down the shell process
65 | if !shared.settings.dryRun
66 | process.exit 0
67 |
--------------------------------------------------------------------------------
/src/cluster.coffee:
--------------------------------------------------------------------------------
1 | {basename, join} = require "path"
2 | {all} = require "when"
3 | {async, curry, collect, flow, map, isFile, mkdirp, rm, glob, sleep} = require "fairmont"
4 | basename = do (basename) -> curry (extension, path) -> basename path, extension
5 | {read, write} = require "panda-rw"
6 | Tmp = require "./tmp"
7 | {yaml, json} = require "./serialize"
8 | raise = require "./raise"
9 |
10 | _exports = do async ->
11 |
12 | [
13 | shared
14 | AWSHelpers
15 | # TODO: somehow there's a circular dependency here
16 | # Application
17 | ] = yield all [
18 | require "./shared"
19 | require "./helpers/aws"
20 | # require "./application"
21 | ]
22 |
23 | {createStack, getStack, removeStack} = AWSHelpers
24 |
25 | shared.clusters = join shared.config, "clusters"
26 | yield mkdirp shared.config.clusters
27 |
28 | Cluster =
29 |
30 | join: (name) -> join shared.clusters, "#{name}.yaml"
31 |
32 | load: async (name) ->
33 | path = Cluster.join name
34 | if yield isFile path
35 | read path
36 | else
37 | raise "cluster.not-found", {name}
38 |
39 | save: async (cluster) ->
40 | yield write (Cluster.join cluster.name), cluster
41 | cluster
42 |
43 | create: async (name) ->
44 | yield createStack name
45 | loop
46 | # wait 5 seconds before querying status
47 | yield sleep 5000 unless shared.settings.dryRun
48 | cluster = yield getStack name
49 | if cluster.status == "CREATE_COMPLETE"
50 | break
51 | else if cluster.status != "CREATE_IN_PROGRESS"
52 | raise "cluster.create-failed", {name}
53 |
54 | Cluster.save cluster
55 |
56 | resolve: async (name) ->
57 | name ?= yield do async ->
58 | # see above re inline require
59 | Application = yield require "./application"
60 | {cluster} = yield Application.load()
61 | cluster
62 | yield Cluster.load name
63 |
64 | remove: async (name) ->
65 | yield removeStack name
66 | # TODO: find a way to re-create the cluster
67 | # YAML file for tests that depend on it
68 | (rm Cluster.join name) unless shared.settings.dryRun
69 |
70 | list: ->
71 | collect flow [
72 | glob "*.yaml", shared.clusters
73 | map basename ".yaml"
74 | ]
75 |
76 | module.exports = _exports
77 |
--------------------------------------------------------------------------------
/src/commands/build.coffee:
--------------------------------------------------------------------------------
1 | # {all} = require "when"
2 | {async, collect, pull} = require "fairmont"
3 |
4 | _exports = do async ->
5 |
6 | [
7 | shared
8 | Application
9 | DockerHelpers
10 | Cluster
11 | ] = yield collect pull [
12 | require "../shared"
13 | require "../application"
14 | require "../helpers/docker"
15 | require "../cluster"
16 | ]
17 |
18 | {info} = shared.loggers.status
19 | {Mixins} = Application
20 |
21 | build = async ({mixins}) ->
22 | mixins ?= yield Mixins.list()
23 | info "build.starting", {mixins}
24 | application = yield Application.load()
25 | cluster = yield Cluster.load application.cluster
26 | yield DockerHelpers.swarmEnv cluster.name
27 | yield DockerHelpers.login()
28 | yield Mixins.build {mixins}
29 | info "build.complete", {mixins}
30 |
31 | module.exports = _exports
32 |
--------------------------------------------------------------------------------
/src/commands/cluster.coffee:
--------------------------------------------------------------------------------
1 | {all} = require "when"
2 | {async, include, isArray} = require "fairmont"
3 | Application = require "../application"
4 | {yaml} = require "../serialize"
5 |
6 | _exports = do async ->
7 |
8 | [
9 | shared
10 | Options
11 | Cluster
12 | AWSHelpers
13 | DockerHelpers
14 | Name
15 | ] = yield all [
16 | require "../shared"
17 | require "../options"
18 | require "../cluster"
19 | require "../helpers/aws"
20 | require "../helpers/docker"
21 | require "../name"
22 | ]
23 |
24 | {bye, info, error} = shared.loggers.status
25 |
26 | Commands =
27 |
28 | create: async ->
29 | name = yield Name.generate()
30 | info "cluster.create.starting", {name}
31 | yield DockerHelpers.createSwarmInstance
32 | cluster: yield Cluster.create name
33 | name: "#{name}-00"
34 | master: true
35 | info "cluster.create.complete", {name}
36 |
37 | expand: async ({name, count}) ->
38 | {name} = cluster = yield Cluster.resolve name
39 | info "cluster.expand.starting", {name, count}
40 | names = yield DockerHelpers.findAvailableNames name, count
41 | # TODO: ideally we'd launch all three swarm instances in parallel
42 | # but that messes up the tests at the moment
43 | for _name in names
44 | yield DockerHelpers.createSwarmInstance
45 | name: _name
46 | cluster: cluster
47 | info "cluster.expand.complete", {name, count}
48 |
49 | contract: ({name, count}) ->
50 | bye "not-implemented"
51 |
52 | rm: async ({name}) ->
53 | {name} = yield Cluster.resolve name
54 | info "cluster.rm.starting", {name}
55 | yield DockerHelpers.removeSwarmNodes name
56 | yield Cluster.remove name
57 | info "cluster.rm.complete", {name}
58 |
59 | ls: -> Cluster.list()
60 |
61 | ps: async ({name}) ->
62 | {name} = yield Cluster.resolve name
63 | DockerHelpers.listSwarmNodes name
64 |
65 | env: async ({name}) ->
66 | {name} = yield Cluster.resolve name
67 | DockerHelpers.swarmEnv name
68 |
69 | get: async ({name, property}) ->
70 | cluster = yield Cluster.resolve name
71 | if property? then cluster[property] else cluster
72 |
73 | async (options) ->
74 |
75 | options.name = options.cluster
76 |
77 | if (command = Commands[options.subcommand])?
78 | try
79 | yield command options
80 | catch e
81 | # errors expected by p42
82 | # have a p42 attribute
83 | if isArray e.p42
84 | bye e.p42...
85 | else
86 | # otherwise, this is unexpected, just re-throw
87 | error "unexpected-error"
88 | throw e
89 | else
90 | bye "bad-subcommand", name: options.subcommand
91 |
92 |
93 | module.exports = _exports
94 |
--------------------------------------------------------------------------------
/src/commands/help.coffee:
--------------------------------------------------------------------------------
1 | {async} = require "fairmont"
2 | {all} = require "when"
3 |
4 | _exports = do async ->
5 |
6 | [
7 | shared
8 | Options
9 | ] = yield all [
10 | require "../shared"
11 | require "../options"
12 | ]
13 |
14 | {bye, _error} = shared.loggers.status
15 |
16 | ({name}) ->
17 | try
18 | _error Options.help name
19 | catch
20 | bye "bad-command", {name}
21 |
22 | module.exports = _exports
23 |
--------------------------------------------------------------------------------
/src/commands/index.coffee:
--------------------------------------------------------------------------------
1 | {async} = require "fairmont"
2 | {all} = require "when"
3 |
4 | module.exports = do async ->
5 |
6 | [
7 | build
8 | cluster
9 | help
10 | init
11 | mixin
12 | run
13 | start
14 | stop
15 | target
16 | ] = yield all [
17 | require "./build"
18 | require "./cluster"
19 | require "./help"
20 | require "./init"
21 | require "./mixin"
22 | require "./run"
23 | require "./start"
24 | require "./stop"
25 | require "./target"
26 | ]
27 |
28 | {build, cluster, help, init, run, mixin, start, stop, target}
29 |
--------------------------------------------------------------------------------
/src/commands/init.coffee:
--------------------------------------------------------------------------------
1 | {basename, join} = require "path"
2 | {async} = require "fairmont"
3 | {read} = require "panda-rw"
4 | Interview = require "../interview"
5 |
6 | # TODO: if a p42.yaml file already exists, use it
7 | # as the default instead
8 |
9 | module.exports = async ->
10 |
11 | shared = yield require "../shared"
12 | Application = yield require "../application"
13 | AWSHelpers = yield require "../helpers/aws"
14 |
15 | {info} = shared.loggers.status
16 |
17 | info "init.determining-registry"
18 | registry = yield AWSHelpers.getRegistryDomain()
19 |
20 | name = basename process.cwd()
21 | domain = "#{name}.com"
22 |
23 | interview = yield Interview.create
24 | questions: yield read shared.interviews.init
25 | defaults: {name, domain, registry}
26 |
27 | try
28 | answers = yield Interview.start interview
29 | # Node's read fn throws an exception on SIGINT
30 | # TODO: is there a better way to do deal with this?
31 | catch e
32 | if e.message == "canceled"
33 | process.exit 1
34 | else
35 | throw e
36 |
37 | Application.create answers
38 |
--------------------------------------------------------------------------------
/src/commands/mixin.coffee:
--------------------------------------------------------------------------------
1 | {basename, dirname, join} = require "path"
2 | {mkdirp, cp, lsR, async, collect, pull, isArray, include} = require "fairmont"
3 | {read, write} = require "panda-rw"
4 | {lift} = require "when/node"
5 | Interview = require "../interview"
6 |
7 | # TODO: add this to Fairmont
8 | # - what if source is a glob?
9 | cpR = do ([_cp] = []) ->
10 |
11 | _cp = (from..., to) ->
12 | for _from in from
13 | _to = join to, (basename _from)
14 | cp _from, _to
15 |
16 | async (from, to) ->
17 | _from = yield lsR from
18 | collect pull _cp _from..., to
19 |
20 | # TODO: why isn't this in Fairmont already
21 | rmRF = do ([f] = []) ->
22 | f = lift require "rimraf"
23 | (directory) -> f directory
24 |
25 | _exports = do async ->
26 |
27 | [
28 | shared
29 | ] = yield collect pull [
30 | require "../shared"
31 | ]
32 |
33 | {bye, error} = shared.loggers.status
34 |
35 | Commands =
36 |
37 | add: async ({mixin, name}) ->
38 |
39 | paths = shared.mixins[mixin]
40 |
41 | questions = yield read paths.interview
42 | defaults = yield read paths.template.config
43 | interview = yield Interview.create {questions, defaults}
44 | answers = yield Interview.start interview
45 |
46 | destination = join ".", "run", name
47 | yield mkdirp "0777", destination
48 |
49 | yield cpR (dirname paths.template.config), destination
50 |
51 | # overwrite config
52 | # - add the defaults in cases where there was no question
53 | include answers, defaults
54 | write (join destination, "config.yaml"), answers
55 |
56 | rm: ({mixin}) ->
57 | rmRF join ".", "run", mixin
58 |
59 | async (options) ->
60 |
61 | if (command = Commands[options.subcommand])?
62 | try
63 | yield command options
64 | catch e
65 | # errors expected by p42
66 | # have a p42 attribute
67 | if isArray e.p42
68 | bye e.p42...
69 | else
70 | # otherwise, this is unexpected, just re-throw
71 | error "unexpected-error"
72 | throw e
73 | else
74 | bye "bad-subcommand", name: options.subcommand
75 |
76 | module.exports = _exports
77 |
78 | # 1) load defaults from mixin directory
79 | # 2) load interviewer with defaults
80 | # 3) conduct interview
81 | # 4) copy result to the run directory
82 |
--------------------------------------------------------------------------------
/src/commands/run.coffee:
--------------------------------------------------------------------------------
1 | {all} = require "when"
2 | {async} = require "fairmont"
3 | _exports = do async ->
4 |
5 | [
6 | build
7 | start
8 | ] = yield all [
9 | require "./build"
10 | require "./start"
11 | ]
12 |
13 | async ->
14 | yield build arguments...
15 | start arguments...
16 |
17 | module.exports = _exports
18 |
--------------------------------------------------------------------------------
/src/commands/start.coffee:
--------------------------------------------------------------------------------
1 | {all} = require "when"
2 | {async, isArray} = require "fairmont"
3 | sprintf = require "sprintf"
4 |
5 | _exports = do async ->
6 |
7 | [
8 | shared
9 | AWSHelpers
10 | DockerHelpers
11 | DNSHelpers
12 | Application
13 | Cluster
14 | ] = yield all [
15 | require "../shared"
16 | require "../helpers/aws"
17 | require "../helpers/docker"
18 | require "../helpers/dns"
19 | require "../application"
20 | require "../cluster"
21 | ]
22 |
23 | {info} = shared.loggers.status
24 |
25 | {Mixins} = Application
26 |
27 | pull = async (cluster, registry, tag) ->
28 | for node in yield DockerHelpers.listSwarmNodes cluster
29 | yield DockerHelpers.env node
30 | yield DockerHelpers.login()
31 | yield DockerHelpers.pull {registry, tag}
32 |
33 | start = async (mixin) ->
34 |
35 | info "start.starting", {mixin}
36 |
37 | {registry, domain} = application = yield Application.load()
38 | {subdomains, count, discovery} = yield Mixins.load mixin
39 | cluster = yield Cluster.load application.cluster
40 |
41 | # normalize the mixin config properties
42 | subdomains = if isArray subdomains then subdomains
43 | # TODO: discovery is now an array
44 | discovery = discovery == true
45 |
46 | # comment for DNS records
47 | # comment = msg "dns.comment", application
48 | comment = ""
49 |
50 | tag = "#{application.name}-#{mixin}"
51 | yield pull cluster.name, registry, tag
52 |
53 | yield DockerHelpers.swarmEnv cluster.name
54 | yield DockerHelpers.login()
55 |
56 | # TODO: this is a hack. We know that if you're
57 | # setting up subdomains for a part, that you must
58 | # want to use standard ports. The ports we expose
59 | # should be based on some additional piece of
60 | # information (ex: protocols, like we do with
61 | # discovery), but for the moment, we're just
62 | # assuming you want port 80. Note that we're
63 | # terminating TLS at the ELB so you probably
64 | # don't want to expose 443.
65 | if subdomains?
66 | options = "-p 80:80"
67 | else
68 | options = "-P"
69 |
70 | # TODO: Refactoring this loop is tricky because
71 | # all the pieces here are pretty intertwined:
72 | #
73 | # - run the docker container
74 | #
75 | # - add the private A record for the container
76 | #
77 | # - if we want discovery for this container,
78 | # add the container and it's port to a list
79 | # so we can later add a private SRV record
80 | #
81 | # - possibly add the node running the container
82 | # to the ELB if it's advertising a subdomain
83 | #
84 |
85 | targets = []
86 | for i in [1..count]
87 |
88 | # run the docker container
89 | name = "#{application.name}-#{mixin}-#{sprintf '%02d', i}"
90 | yield DockerHelpers.run {name, tag, options}
91 |
92 | # get the IP and instance for the newly launched container
93 | {name} = yield DockerHelpers.inspect name
94 | {ip, instanceId} = yield AWSHelpers.getInstance name
95 |
96 | # Add private DNS A record associating the name with the IP
97 | yield DNSHelpers.a {cluster, name, ip, comment}
98 |
99 | # save the list of containers so we can set up
100 | # DNS SRV records later / we only need to do this
101 | # if discovery is set in the config
102 | targets.push name if discovery?
103 |
104 | # Add to ELB, if applicable, based on subdomains
105 | if subdomains?
106 | AWSHelpers.registerWithELB {cluster: cluster.name, instanceId}
107 |
108 | # no point in adding an instance to the ELB
109 | # if we can't find it via public DNS...
110 | for subdomain in subdomains
111 | DNSHelpers.alias {cluster: cluster.name, domain, subdomain, comment}
112 |
113 | # Create SRV records for each target,
114 | # for each protocol the container supports
115 | for protocol in discovery
116 | DNSHelpers.srv {protocol, subdomain: mixin, targets, comment}
117 |
118 | async ({mixins}) ->
119 | mixins ?= yield Mixins.list()
120 | (yield start mixin) for mixin in mixins
121 |
122 | module.exports = _exports
123 |
--------------------------------------------------------------------------------
/src/commands/stop.coffee:
--------------------------------------------------------------------------------
1 | {all} = require "when"
2 | {async, empty} = require "fairmont"
3 |
4 | _exports = do async ->
5 |
6 | [
7 | DockerHelpers
8 | Application
9 | ] = yield all [
10 | require "../helpers/docker"
11 | require "../application"
12 | ]
13 |
14 | {Mixins} = Application
15 |
16 | stop = (mixin) ->
17 | # TODO: make sure we can do it this way
18 | DockerHelpers.stopContainers "#{name}-#{mixin}"
19 |
20 | async ({mixins}) ->
21 | mixins ?= yield Mixins.list()
22 | (yield stop mixin) for mixin in mixins
23 |
24 | module.exports = _exports
25 |
--------------------------------------------------------------------------------
/src/commands/target.coffee:
--------------------------------------------------------------------------------
1 | {async, isArray, collect, pull} = require "fairmont"
2 |
3 | _exports = do async ->
4 |
5 | [
6 | shared
7 | Application
8 | ] = yield collect pull [
9 | require "../shared"
10 | require "../application"
11 | ]
12 |
13 | {bye, error} = shared.loggers.status
14 |
15 | Commands =
16 |
17 | add: -> Application.Targets.add arguments...
18 |
19 | remove: -> Application.Targets.remove arguments...
20 |
21 | rename: -> Application.Targets.rename arguments...
22 |
23 |
24 | async (options) ->
25 |
26 | options.name = options.cluster
27 |
28 | if (command = Commands[options.subcommand])?
29 | try
30 | yield command options
31 | catch e
32 | # errors expected by p42
33 | # have a p42 attribute
34 | if isArray e.p42
35 | bye e.p42...
36 | else
37 | # otherwise, this is unexpected, just re-throw
38 | error "unexpected-error"
39 | throw e
40 | else
41 | bye "bad-subcommand", name: options.subcommand
42 |
43 | module.exports = _exports
44 |
--------------------------------------------------------------------------------
/src/decorators.coffee:
--------------------------------------------------------------------------------
1 | {async} = require "fairmont"
2 |
3 | _exports = do async ->
4 |
5 | {getRepository, createRepository} = yield require "./helpers/aws"
6 | {build, push} = yield require "./helpers/docker"
7 |
8 | # The beginnings of an extensible mechanism
9 | Decorators =
10 |
11 | docker: async (application, mixin) ->
12 | {registry} = application
13 | tag = "#{application.name}-#{mixin.name}"
14 | if !(yield getRepository tag)?
15 | yield createRepository tag
16 | yield build {registry, tag, mixin}
17 | push {registry, tag}
18 |
19 | module.exports = _exports
20 |
--------------------------------------------------------------------------------
/src/helpers/aws.coffee:
--------------------------------------------------------------------------------
1 | {async} = require "fairmont"
2 | {read, write} = require "panda-rw"
3 | {yaml, json} = require "../serialize"
4 | Tmp = require "../tmp"
5 |
6 | _exports = do async ->
7 |
8 | shared = yield require "../shared"
9 | run = yield require "../run"
10 |
11 | H =
12 |
13 | getInstance: (instance) -> run "aws.ec2.describe-instances", {instance}
14 |
15 | getSecurityGroup: ({vpcId, group}) ->
16 | run "aws.ec2.describe-security-groups", {vpcId, group}
17 |
18 | setSecurityGroups: async ({vpcId, instance, groups}) ->
19 | groupIds = for group in groups
20 | (yield H.getSecurityGroup {vpcId, group})
21 | .groupId
22 | {instanceId} = yield H.getInstance instance
23 | yield run "aws.ec2.modify-instance-attribute", {instanceId, groupIds}
24 |
25 | getELB: (cluster) -> run "aws.elb.describe-load-balancers", {cluster}
26 |
27 | registerWithELB: ({cluster, instanceId}) ->
28 | run "aws.elb.register-instances-with-load-balancer", {cluster, instanceId}
29 |
30 | getRegistryURL: async ->
31 | (yield run "aws.ecr.get-authorization-token").url
32 |
33 | getRegistryDomain: async ->
34 | (yield H.getRegistryURL()).replace /^https:\/\//, ''
35 |
36 | getRepository: (repository) ->
37 | run "aws.ecr.describe-repositories", {repository}
38 |
39 | createRepository: async (repository) ->
40 | yield run "aws.ecr.create-repository", {repository}
41 | policy = json yield read shared.aws.ecr.policy
42 | run "aws.ecr.set-repository-policy", {repository, policy}
43 |
44 | createStack: async (stack) ->
45 | file = yield Tmp.file() + ".json"
46 | yield write file, yield read shared.aws.cf.vpc
47 | run "aws.cloudformation.create-stack", {stack, file}
48 |
49 | removeStack: (stack) ->
50 | run "aws.cloudformation.delete-stack", {stack}
51 |
52 | getStack: async (stack) ->
53 | stackData = yield run "aws.cloudformation.describe-stacks", {stack}
54 | stackData.name = stack
55 | if stackData.az?
56 | stackData.region = stackData.az[..-2]
57 | stackData.zone = stackData.az[-1..-1]
58 | stackData
59 |
60 | module.exports = _exports
61 |
--------------------------------------------------------------------------------
/src/helpers/dns.coffee:
--------------------------------------------------------------------------------
1 | {async, merge, read} = require "fairmont"
2 | {write} = require "panda-rw"
3 | {yaml, json} = require "../serialize"
4 | render = require "../template"
5 | Tmp = require "../tmp"
6 |
7 | _exports = do async ->
8 |
9 | shared = yield require "../shared"
10 | run = yield require "../run"
11 | {getELB} = yield require "./aws"
12 | Cluster = yield require "../cluster"
13 |
14 | build = async (name, data) ->
15 |
16 | update = async (type, cluster, data) ->
17 | # merge cluster with update-specific data
18 | data = merge cluster, data
19 | # extract the zoneId
20 | {zoneId} = data
21 | # create tempfile and write JSON string to it
22 | file = Tmp.file() + ".json"
23 | template = yield read shared.aws.dns[type]
24 | yield write file, yaml render template, data
25 | # run the update
26 | yield run "aws.route53.change-resource-record-sets", {zoneId, file}
27 |
28 | H =
29 |
30 | a: ({cluster, name, ip, comment}) ->
31 | update "a", cluster, {name, ip, comment}
32 |
33 | alias: async ({cluster, domain, subdomain, comment}) ->
34 | elb = yield getELB cluster.name
35 | {zoneId} = yield run "aws.route53.list-hosted-zones-by-name", {domain}
36 | yield update "alias", cluster,
37 | domain: "#{subdomain}.#{domain}",
38 | zoneId: zoneId
39 | elbZoneId: elb.zoneId
40 | elbDomain: elb.domain
41 | comment: comment
42 |
43 | # Temporary hack--we assume www is also the apex record,
44 | # so we add a second alias for apex.
45 | if subdomain == "www"
46 | alias {cluster, domain, subdomain: "", comment}
47 |
48 | srv: async ({cluster, protocol, subdomain, targets, comment}) ->
49 | yield update "srv", cluster, {protocol, subdomain, targets, comment}
50 |
51 | # Temporary hack--we assume www is also the apex record,
52 | # so we add a second SRV for the empty value, ex: _._http.
53 | if subdomain == "www"
54 | yield update "srv", cluster, {protocol, subdomain: "", targets, comment}
55 |
56 | module.exports = _exports
57 |
--------------------------------------------------------------------------------
/src/helpers/docker.coffee:
--------------------------------------------------------------------------------
1 | {join} = require "path"
2 | {async} = require "fairmont"
3 | sprintf = require "sprintf"
4 |
5 | _exports = do async ->
6 |
7 | shared = yield require "../shared"
8 | Cluster = yield require "../cluster"
9 | run = yield require "../run"
10 | AWSHelpers = yield require "../helpers/aws"
11 |
12 | H =
13 |
14 | # TODO: env, swarmEnv, and login need to be run alongside ensuing commands
15 | # Solution: create a process that to which we can pipe arbitrary bash
16 | # commands, using something like:
17 | #
18 | # while true ; do read -r line; eval $line; done
19 | #
20 |
21 | env: (name) -> run "docker.machine.env", {name}
22 |
23 | # TODO: dynamically determine the Swarm master
24 | swarmEnv: (name) ->
25 | run "docker.machine.swarm.env", name: "#{name}-00"
26 |
27 | login: -> run "docker.login"
28 |
29 | build: ({registry, tag, mixin}) ->
30 | file = join mixin.path, "Dockerfile"
31 | run "docker.build", {tag: "#{registry}/#{tag}", file}
32 |
33 | push: ({registry, tag}) ->
34 | run "docker.push", tag: "#{registry}/#{tag}"
35 |
36 | pull: ({registry, tag}) ->
37 | run "docker.pull", tag: "#{registry}/#{tag}"
38 |
39 | run: ({name, tag, options}) -> run "docker.run", {name, tag, options}
40 |
41 | inspect: (name) -> run "docker.inspect", {name}
42 |
43 | listContainers: (cluster) -> run "docker.ps", {cluster}
44 |
45 | stopContainers: (name) -> run "docker.stop", {name}
46 |
47 | # TODO: make "instance" consistent with "node"
48 | createInstance: ({name, cluster}) ->
49 | {region, vpcId, subnetId, zone} = cluster
50 | run "docker.machine.create",
51 | {name, region, vpcId, subnetId, zone}
52 |
53 | createSwarmInstance: async ({name, cluster, master}) ->
54 | master ?= false
55 | {region, vpcId, subnetId, zone} = cluster
56 | yield run "docker.machine.swarm.create",
57 | {name, region, vpcId, subnetId, zone, master}
58 | AWSHelpers.setSecurityGroups
59 | vpcId: vpcId
60 | instance: name
61 | groups: [ "default", "docker-machine" ]
62 |
63 | findAvailableNames: async (cluster, count = 1) ->
64 | taken = yield H.listSwarmNodes cluster
65 | counter = 0
66 | names = []
67 | while names.length < count
68 | candidate = sprintf "%s-%02d", cluster, counter++
69 | names.push candidate if ! (candidate in taken)
70 | names
71 |
72 | listSwarmNodes: (name) -> run "docker.machine.ls", {name}
73 |
74 | removeSwarmNodes: async (cluster) ->
75 | nodes = yield H.listSwarmNodes cluster
76 | yield run "docker.machine.stop", {nodes}
77 | yield run "docker.machine.rm", {nodes}
78 |
79 | module.exports = _exports
80 |
--------------------------------------------------------------------------------
/src/interview.coffee:
--------------------------------------------------------------------------------
1 | {promise} = require "when"
2 | {async, read} = require "fairmont"
3 | prompt = require "prompt"
4 | render = require "./template"
5 | {yaml} = require "./serialize"
6 |
7 | Interview =
8 |
9 | create: ({questions, defaults}) ->
10 | for question in questions when defaults[question.name]?
11 | question.default = defaults[question.name]
12 | {questions}
13 |
14 | start: ({questions}) ->
15 |
16 | prompt.message = ""
17 | prompt.delimiter = ":"
18 | prompt.start stdout: process.stderr
19 |
20 | promise (resolve, reject) ->
21 | prompt.get questions, (error, answers) ->
22 | if error?
23 | reject error
24 | else
25 | resolve answers
26 |
27 | module.exports = Interview
28 |
--------------------------------------------------------------------------------
/src/logger.coffee:
--------------------------------------------------------------------------------
1 | FS = require "fs"
2 | {curry, go, map, pull, push, async, include, w, empty,
3 | Type, isType, isKind, isString, isArray, isWritable,
4 | toString, Method, read, write} = require "fairmont"
5 |
6 | # special version of include that won't
7 | # overwrite with a null value
8 | defaults = (target, objects...)->
9 | for object in objects
10 | for key, value of object when value?
11 | target[key] = value
12 | target
13 |
14 | Tmp = require "./tmp"
15 |
16 | Logger = Type.define()
17 |
18 | include Logger,
19 |
20 | defaults:
21 | level: "info"
22 |
23 | # RFC5424 syslog levels
24 | levels: do (levels={}) ->
25 | for level, index in w "emerg alert crit error warning notice info debug"
26 | do (level, index) -> levels[level] = index
27 | levels
28 |
29 | # using yield allows for async logger create fns
30 | create: async (type, options) ->
31 | defaults (yield Type.create type), Logger.defaults, options
32 |
33 | # TODO: formatter support
34 | # We don't use the level in the output, nor include a timestamp,
35 | # and so on. That's to avoid messing with the tests, which rely
36 | # on clean output. With a formatter, we could just set the
37 | # formatter when testing to be as below and otherwise more useful.
38 | Logger.log = log = Method.create()
39 |
40 | Method.define log, (isKind Logger), isString, (-> true),
41 | (logger, level, data...) ->
42 | if Logger.levels[logger.level] >= Logger.levels[level]
43 | try
44 | log logger.sink, level, data...
45 | catch
46 | p isWritable logger.sink
47 |
48 | Method.define log, isWritable, isString, (-> true),
49 | (stream, level, data...) ->
50 | go [
51 | data
52 | map toString
53 | map (s) -> s + "\n"
54 | map write stream
55 | pull
56 | ]
57 |
58 | Logger.Stream = Type.define Logger
59 |
60 | include Logger.Stream,
61 |
62 | create: ({stream, level}) ->
63 | sink = stream
64 | Logger.create Logger.Stream, {stream, sink, level}
65 |
66 | Logger.File = Type.define Logger.Stream
67 |
68 | include Logger.File,
69 |
70 | create: ({path, level}) ->
71 | stream = sink = FS.createWriteStream path
72 | Logger.create Logger.File, {stream, sink, path, level}
73 |
74 | Logger.TmpFile =
75 |
76 | create: async ({name, level}) ->
77 | Logger.File.create {path: yield Tmp.file "#{name}.log", level}
78 |
79 | Logger.Memory = Type.define Logger
80 |
81 | include Logger.Memory,
82 |
83 | create: ({level} = {}) ->
84 | content = sink = []
85 | Logger.create Logger.Memory, {sink, content, level}
86 |
87 | Method.define log, isArray, isString, (-> true),
88 | (array, level, data...) ->
89 | for item in data
90 | push array, toString item
91 |
92 | Logger.Composite = Type.define Logger
93 |
94 | include Logger.Composite,
95 |
96 | create: ({loggers}) ->
97 | Logger.create Logger.Composite, {loggers}
98 |
99 | Method.define log, (isKind Logger.Composite), isString, (-> true),
100 | ({loggers}, level, data...) ->
101 | (log logger, level, data...) for key, logger of loggers
102 |
103 | Logger.Helpers = do (helpers = {}) ->
104 | for level, index of Logger.levels
105 | do (level, index) ->
106 | helpers[level] = (logger, data...) ->
107 | log logger, level, data...
108 | helpers
109 |
110 | Logger.helpers = (logger, helpers = {}) ->
111 | for name, fn of Logger.Helpers
112 | helpers._self = logger
113 | do (name, fn) ->
114 | helpers[name] = -> fn logger, arguments...
115 | helpers
116 |
117 | module.exports = Logger
118 |
--------------------------------------------------------------------------------
/src/name.coffee:
--------------------------------------------------------------------------------
1 | {read} = require "panda-rw"
2 | {async, pluck} = require "fairmont"
3 |
4 | [adjectives, nouns] = []
5 |
6 | _exports = do async ->
7 |
8 | shared = yield require "./shared"
9 | {adjectives, nouns} = yield read shared.words
10 |
11 | Name =
12 |
13 | # Generate a name from our list of adjectives and nouns.
14 | generate: ->
15 | if shared.settings.dryRun
16 | "violent-aftermath"
17 | else
18 | "#{pluck adjectives}-#{pluck nouns}"
19 |
20 | module.exports = _exports
21 |
--------------------------------------------------------------------------------
/src/options.coffee:
--------------------------------------------------------------------------------
1 | {async} = require "fairmont"
2 | {all} = require "when"
3 | {read} = require "panda-rw"
4 | raise = require "./raise"
5 |
6 | _exports = do async ->
7 |
8 | [
9 | shared
10 | ] = yield all [
11 | require "./shared"
12 | ]
13 |
14 | definitions = yield read shared.options
15 |
16 | {all, any, many, optional, rule} = require "bartlett"
17 | {include, merge, first, second, empty, collect, map} = require "fairmont"
18 |
19 | # take a key and a value and return an object
20 | _kv = (k,v, o = {}) -> o[k] = v ; o
21 |
22 | # merge wrapper for use with rules
23 | _merge = ({value}) -> merge value...
24 |
25 | grammar = (r) ->
26 | (s) ->
27 | match = r(s)
28 | if match?
29 | {value, rest} = match
30 | if empty rest
31 | value
32 | else
33 | raise "bad-option", name: first rest
34 | else
35 | raise "bad-command", name: s.join ' '
36 |
37 | # match a set of rules in any order, but only
38 | # once per rule...
39 |
40 | set = (px...) ->
41 |
42 | (sx) ->
43 |
44 | qx = px # don't mess with the original ruleset
45 | rx = [] # as yet unmatched rules
46 | vx = [] # matched values
47 |
48 | # try until there are no rules left to try
49 | # or until there is nothing left to match
50 | until (empty qx) || (empty sx)
51 | [p, qrest...] = qx
52 | if (match = p(sx))? # .... found a match
53 | {value, rest} = match
54 | vx.push value # save the value
55 | sx = rest # continue matching
56 | qx = [qrest..., rx...] # reconsider unmatched rules
57 | rx = [] # and emptyt the unmatched list
58 | else # ... no match yet
59 | qx = qrest # move to the next rule
60 | rx.push p # saving the unmatched rule
61 |
62 | {value: vx, rest: sx}
63 |
64 | # a parameter is anything that isn't a flag
65 | parameter = ([value, rest...]) -> {value, rest} if !value.match /^\-/
66 |
67 | # a word here means a specific string
68 | word = (w) -> ([value, rest...]) -> {value, rest} if value == w
69 |
70 | flag = (s) ->
71 | switch s.length
72 | when 0 then (throw new RangeError)
73 | when 1 then "-#{s}"
74 | else "--#{s}"
75 |
76 | normalize = (definitions) ->
77 |
78 | for name, d of definitions
79 |
80 | # with YAML refs its possible to see the same
81 | # definition more than once...
82 | if !(d.__processed)?
83 |
84 | d.__processed = true
85 |
86 | # The "key" is what we use to save the resulting value,
87 | # and we default it to the name (which is the property name).
88 | d.name = name
89 | d.key ?= name
90 |
91 | # add dashes in front of flags
92 | if d.flags?
93 | d.flags = collect map flag, d.flags
94 | if d.options?
95 | normalize d.options
96 | # TODO: less hacky way to generate supplementary help text
97 | if d.help?
98 | if d.default?
99 | d.help += " Defaults to #{d.default}."
100 | else if d.required || !d.optional?
101 | d.help += " Required."
102 |
103 | definitions
104 |
105 | build = (definitions) ->
106 |
107 | # As we process definitions, we're going to
108 | # compile a list of required options and defaults
109 | required = []
110 | defaults = {}
111 |
112 | # Process each definition and map the results into
113 | # an array of rules that we're going to place in `ax`
114 | px = for name, d of definitions
115 |
116 | # Create a base rule based on the definition type...
117 | p = build[d.type] d
118 |
119 | # Wrap it in a rule that converts the result into an
120 | # object with one property...
121 | p = do (d) ->
122 | # if there is a value override, use that
123 | if d.value?
124 | (rule p, -> _kv d.key, d.value)
125 | # otherwise just use whatever value we parsed
126 | else
127 | (rule p, ({value}) -> _kv d.key, value)
128 |
129 | # If this definition itself has an options definition,
130 | # recursively generate rules for those options,
131 | # and wrap that in another rule that requires both
132 | # the current option and the nested options to match,
133 | # merging the resulting objects...
134 | if d.options?
135 | p = rule (all p, (build d.options)), _merge
136 |
137 | # Save default values and required options...
138 | # Defaults take precedence.
139 | if d.default?
140 | defaults[d.key] = d.default
141 | else if d.required? || !d.optional?
142 | required.push d.key unless d.key in required
143 |
144 | # Return that rule, so it ends up in our rule array px
145 | p
146 |
147 | # Okay, now we have a list of rules. We'll use `set` to match
148 | # those in any order and then merge the resulting objects...
149 | q = rule (set px...), _merge
150 |
151 | # Now we have an object based on what we were able to parse out
152 | # of the options. We still need to layer in the defaults and
153 | # check for required options.
154 | do (defaults, required) ->
155 |
156 | (s) ->
157 |
158 | # If we don't match to begin with, nevermind...
159 | if (match = q(s))?
160 |
161 | {value, rest} = match
162 |
163 | # Layer in the defaults...
164 | value = merge defaults, value
165 |
166 | # Make sure we have all the required values...
167 | # The reason this works for a set is because each
168 | # item in the set will get marked as required
169 | # unless explicitly marked as optional, using the
170 | # same _key_ ...
171 | return undefined for key in required when !(value[key]?)
172 |
173 | # If we're still here, we have a valid result
174 | {value, rest}
175 |
176 |
177 | # helper for generating a rule for flags
178 | _flags = (flags) -> any (collect map word, flags)...
179 | _options = (flags) -> all (_flags flags), parameter
180 |
181 | # Generate rules based on the type attributes in the definitions
182 | include build,
183 | switch: (d) -> rule (_flags d.flags), -> true
184 | option: (d) -> rule (_options d.flags), ({value}) -> second value
185 | word: ({name}) -> word name
186 | list: ({subtype: {type, options}}) -> many build[type] options
187 | parameter: -> parameter
188 |
189 | parser = grammar build normalize definitions
190 | # parser = build normalize definitions
191 |
192 | parse = (args) -> parser args
193 |
194 | # {w} = require "fairmont"
195 | # $P parse w "cluster create"
196 | # $P parse w "cluster expand violent-aftermath -n 3"
197 | # $P parse w "cluster rm violent-aftermath"
198 | # $P parse w "cluster create"
199 |
200 | render = require "./template"
201 | messages = yield read shared.messages
202 |
203 | help = (name) ->
204 | if name?
205 | render messages[name].help, definitions[name]
206 | else
207 | render messages.help, definitions
208 |
209 | {parse, help}
210 |
211 | module.exports = _exports
212 |
--------------------------------------------------------------------------------
/src/raise.coffee:
--------------------------------------------------------------------------------
1 | {include} = require "fairmont"
2 |
3 | module.exports = (args...) -> throw include (new Error), p42: args
4 |
--------------------------------------------------------------------------------
/src/run.coffee:
--------------------------------------------------------------------------------
1 | {async, reduce, Method} = require "fairmont"
2 | messages = require "panda-messages"
3 | {yaml, json} = require "./serialize"
4 | render = require "./template"
5 | createShell = require "./sh"
6 |
7 | unquote = (s) ->
8 | s
9 | .replace /\s+/g, ' '
10 | .trim()
11 |
12 | _exports = do async ->
13 |
14 | shared = yield require "./shared"
15 | {lookup} = yield messages shared.commands
16 | C = shared.loggers.command
17 | S = shared.loggers.status
18 |
19 | build = (key, data={}) ->
20 | {template, processor, attributes, test} = lookup key
21 | string = unquote render template, data
22 | {string, processor, attributes, test}
23 |
24 | Processors =
25 |
26 | line: (command, response) -> response.split "\n"
27 |
28 | # TODO: Object reference parsing should be in Fairmont
29 | # or something...
30 | json: (command, response) ->
31 | data = json response
32 | result = {}
33 | for {name, accessor} in command.attributes
34 | current = data
35 | for key in accessor.split(".")
36 | current = current[key]
37 | break if ! current?
38 | result[name] = current
39 | result
40 |
41 | [sh] = []
42 | run = async (key, data={}) ->
43 | # TODO: elegant way to access logger streams?
44 | if !shared.settings.dryRun
45 | sh ?= do ([shell]=[]) ->
46 | shell = createShell
47 | stdout: S._self.loggers.stderr.stream
48 | stderr: S._self.loggers.stderr.stream
49 | process.on "exit", -> shell.close()
50 | shell.run
51 |
52 | command = build key, data
53 |
54 | if shared.settings.dryRun
55 | yield C.info command.string
56 | command.test
57 | else
58 | C.info command.string
59 | S._debug command.string
60 | response = yield sh command.string
61 | if response != ""
62 | Processors[command.processor]? command, response
63 |
64 | module.exports = _exports
65 |
--------------------------------------------------------------------------------
/src/serialize.coffee:
--------------------------------------------------------------------------------
1 | {isString, isObject, fromJSON, toJSON, Method} = require "fairmont"
2 | YAML = require "js-yaml"
3 |
4 | toYAML = (data) -> YAML.safeDump data
5 | fromYAML = (yaml) -> YAML.safeLoad yaml
6 |
7 | yaml = Method.create()
8 |
9 | Method.define yaml, isString, fromYAML
10 | Method.define yaml, isObject, toYAML
11 |
12 | yaml.from = fromYAML
13 | yaml.to = toYAML
14 |
15 | json = Method.create()
16 |
17 | Method.define json, isString, fromJSON
18 | Method.define json, isObject, toJSON
19 |
20 | json.from = fromJSON
21 | json.to = toJSON
22 |
23 | module.exports = {json, yaml}
24 |
--------------------------------------------------------------------------------
/src/sh.coffee:
--------------------------------------------------------------------------------
1 | {promise} = require "when"
2 | {spawn} = require "child_process"
3 | {EventEmitter} = require "events"
4 |
5 | module.exports = ({stdout, stderr}) ->
6 |
7 | # Run commands from stdin within a single process, using the
8 | # file separate character (\u001c) to indicate that a command has
9 | # completed
10 | p = spawn "bash",
11 | [
12 | "-c"
13 | "while true ; do read -r line; eval $line; printf '\u001c'; done"
14 | ]
15 |
16 | # p.stdout.pipe stdout if stdout?
17 | p.stderr.pipe stderr if stderr?
18 |
19 | p.on "error", (e) -> console.error e
20 |
21 | events = new EventEmitter()
22 | do (result = "") ->
23 | p.stdout.on "data", (buffer) ->
24 | string = buffer.toString()
25 | if (match = string.match /\u001c/)?
26 | {index} = match
27 | result += string[...index]
28 | events.emit "result", result
29 | result = string[(index+1)..]
30 | else
31 | result += string
32 |
33 | # Each call to run MUST WAIT on the promise to resolve
34 | # before the next call can be made. Otherwise, two
35 | # commands can potentially get the same result event.
36 | run: (s) ->
37 | promise (resolve, reject) ->
38 | p.stdin.write s + "\n"
39 | events.once "result", resolve
40 |
41 | close: -> p.kill()
42 |
--------------------------------------------------------------------------------
/src/shared.coffee:
--------------------------------------------------------------------------------
1 | Path = require "path"
2 | {reduce, reject, async, isFunction, lsR, include, mkdirp} = require "fairmont"
3 | messages = require "panda-messages"
4 |
5 | expand = (current, part) -> current[part] ?= {}
6 | blank = (part) -> part == ''
7 |
8 | # Build an object whose properties correspond to paths
9 | paths = async (root) ->
10 | object = {root}
11 | # Go through all the files in root...
12 | for path in (yield lsR root)
13 | # get the directory and names for the relative paths
14 | {dir, name} = Path.parse Path.relative root, path
15 | # descend into the object based on the path...
16 | parent = reduce expand, object,
17 | # ... unless the relative path is itself a filename
18 | reject blank, dir.split Path.sep
19 | # set the corresponding property of the parent
20 | # (if path is a filename, parent will be object)
21 | parent[name] = path
22 | object
23 |
24 | loggers = async (shared, loggers = {}) ->
25 |
26 | {message} = yield messages shared.messages
27 |
28 | {helpers, TmpFile, Stream, Memory, Composite} = yield require "./logger"
29 |
30 | wrap = (helpers, wrapped = {}) ->
31 | for name, fn of helpers when isFunction fn
32 | do (name, fn) ->
33 | wrapped["_#{name}"] = fn
34 | wrapped[name] = (key, data = {}) ->
35 | fn message key, data
36 |
37 | wrapped.bye = (key, data = {}) ->
38 | wrapped.error key, data
39 | process.exit 1
40 |
41 | wrapped._self = helpers._self
42 | wrapped
43 |
44 | # composite loggers
45 | status = wrap helpers Composite.create loggers:
46 | debug: yield TmpFile.create name: "debug", level: "debug"
47 | stderr: Stream.create stream: process.stderr, level: "info"
48 |
49 | # command logger
50 | command = helpers Composite.create
51 | loggers:
52 | stderr: Stream.create stream: process.stderr, level: "emerg"
53 |
54 | # output logger for actual command output, ex: list of clusters
55 | output = helpers Composite.create
56 | loggers:
57 | stdout: Stream.create stream: process.stdout, level: "info"
58 |
59 | {output, status, command}
60 |
61 | _exports = do async ->
62 |
63 | # each p42 user has their own config directory
64 | config = Path.join process.env.HOME, ".config", "p42"
65 | yield mkdirp config
66 | # global settings
67 | settings = {}
68 | # each application has a run directory
69 | run = "run"
70 | # paths to various shared files
71 | share = yield paths Path.join __dirname, "..", "share"
72 | test = yield paths Path.join __dirname, "..", "test", "data"
73 | test.app.root = Path.join test.root, "app"
74 | # set up loggers
75 | loggers = yield loggers share
76 | # build the shared object
77 | include share, {config, run, settings, test, loggers}
78 |
79 | module.exports = _exports
80 |
--------------------------------------------------------------------------------
/src/template.coffee:
--------------------------------------------------------------------------------
1 | {identity} = require "fairmont"
2 | sprintf = require "sprintf"
3 | H = require "handlebars"
4 | S = require "swag"
5 | S.registerHelpers H
6 | join = (d, array) -> array.join d
7 |
8 | block = (f) ->
9 | (args..., options) ->
10 | if options.fn?
11 | join "", f args..., options.fn
12 | else
13 | f args..., identity
14 |
15 | H.registerHelper
16 |
17 | values: block (object, f) -> f value for key, value of object
18 |
19 | filter: block (property, value, objects, f) ->
20 | (f object) for object in objects when object[property] == value
21 |
22 | pluck: block (property, objects, f) ->
23 | (f object[property]) for object in objects
24 |
25 | join: (delimiter, array) -> join delimiter, array
26 |
27 | sprintf: (format, string) -> sprintf format, string
28 |
29 | hang: do ->
30 | indent = (x, s) -> (" ".repeat x) + s
31 | (i, w, s) ->
32 | m = w - i
33 | [first, rest...] = s.match ///.{1,#{m}}(\s+|$)///g
34 | [first, ((indent i, line) for line in rest)...].join "\n"
35 |
36 | module.exports = (template, context) ->
37 | (H.compile template, noEscape: true)(context)
38 |
--------------------------------------------------------------------------------
/src/tmp.coffee:
--------------------------------------------------------------------------------
1 | {join} = require "path"
2 | {async, shell, rmDir, mkdirp, exists} = require "fairmont"
3 |
4 | # TODO: check for error
5 | # TODO: use run instead? ex: run "mktemp"
6 | sh = async (command) ->
7 | (yield shell command)
8 | .stdout.trim()
9 |
10 | counter = 0
11 | Tmp =
12 |
13 | dir: do async ->
14 | (yield sh 'mktemp -d "${TMPDIR:-/tmp}p42-XXXXXXXXX"')
15 |
16 | file: async (name) ->
17 | name ?= "file-#{counter++}"
18 | path = yield Tmp.dir
19 | yield mkdirp path
20 | join path, name
21 |
22 | process.on "exit", ->
23 | path = yield Tmp.dir
24 | rmDir path if yield exists path
25 |
26 | module.exports = Tmp
27 |
--------------------------------------------------------------------------------
/test/aws-helpers.coffee:
--------------------------------------------------------------------------------
1 | assert = require "assert"
2 | {async} = require "fairmont"
3 | {read} = require "panda-rw"
4 | {command} = require "./helpers"
5 |
6 | module.exports = (context) ->
7 |
8 | context.test "AWS", (context) ->
9 |
10 | AWSHelpers = yield require "../src/helpers/aws"
11 |
12 | command "AWS.createStack", context, ->
13 | AWSHelpers.createStack "preventative-malpractice"
14 |
15 | command "AWS.getStack", context, async ->
16 | stack = yield AWSHelpers.getStack "preventative-malpractice"
17 | assert stack.vpcId?
18 | assert stack.zoneId?
19 |
20 | # test the AZ parsing
21 | assert.equal stack.az, 'us-west-1a'
22 | assert.equal stack.region, 'us-west-1'
23 | assert.equal stack.zone, 'a'
24 |
25 | command "AWS.removeStack", context, ->
26 | AWSHelpers.removeStack "preventative-malpractice"
27 |
28 | command "AWS.setSecurityGroups", context, ->
29 | AWSHelpers.setSecurityGroups
30 | vpcId: "test-vpc-00"
31 | instance: "preventative-malpractice-01"
32 | groups: [
33 | "default"
34 | "docker-machine"
35 | ]
36 |
37 | command "AWS.getELB", context, async ->
38 | {zoneId} = yield AWSHelpers.getELB "violent-aftermath"
39 | assert.equal "test-zone-00", zoneId
40 |
41 | command "AWS.registerWithELB", context, ->
42 | AWSHelpers.registerWithELB
43 | instanceId: "test-instance-00"
44 | cluster: "vodka-martini"
45 |
46 | command "AWS.getRepository", context, async ->
47 | {repositoryId} = yield AWSHelpers.getRepository "blurb9-api"
48 | assert.equal repositoryId, "test-repo-00"
49 |
50 | # TODO: check generated policy JSON
51 | command "AWS.createRepository", context, ->
52 | AWSHelpers.createRepository "blurb9-api"
53 |
54 | command "AWS.getRegistryDomain", context, async ->
55 | assert.equal "123.registry.test.com",
56 | yield AWSHelpers.getRegistryDomain()
57 |
--------------------------------------------------------------------------------
/test/cli-helpers.coffee:
--------------------------------------------------------------------------------
1 | assert = require "assert"
2 | {all} = require "when"
3 | {async, isArray, chdir, w} = require "fairmont"
4 | Logger = require "../src/logger"
5 | {command} = require "./helpers"
6 |
7 | module.exports = (context) ->
8 |
9 | context.test "CLI", (context) ->
10 |
11 | [
12 | shared
13 | CLI
14 | ] = yield all [
15 | require "../src/shared"
16 | require "../src/cli"
17 | ]
18 |
19 |
20 | run = async (string) ->
21 | # Redirect command output so we can inspect it
22 | logger = Logger.Memory.create()
23 | shared.loggers.output._self.loggers.stdout = logger
24 | yield CLI w "#{string} --dry-run"
25 | logger.sink
26 |
27 | command "CLI.cluster.create", context, ->
28 | run "cluster create"
29 |
30 | command "CLI.cluster.expand", context, ->
31 | run "cluster expand violent-aftermath -n 3"
32 |
33 | command "CLI.cluster.contract", context #, ->
34 | # run "cluster contract violent-aftermath --count 2"
35 |
36 | command "CLI.cluster.rm", context, ->
37 | run "cluster rm violent-aftermath"
38 |
39 | command "CLI.cluster.ls", context, async ->
40 | assert (yield run "cluster ls").indexOf("violent-aftermath") != -1
41 |
42 | command "CLI.cluster.ps", context, ->
43 | run "cluster ps violent-aftermath"
44 |
45 | command "CLI.cluster.env", context, ->
46 | run "cluster env violent-aftermath"
47 |
48 | command "CLI.cluster.get", context, async ->
49 | assert.equal "us-west-1",
50 | yield run "cluster get violent-aftermath region"
51 |
52 | command "CLI.build", context, ->
53 | chdir shared.test.app.root
54 | run "build"
55 |
56 | command "CLI.start", context, ->
57 | chdir shared.test.app.root
58 | run "start"
59 |
60 | command "CLI.run", context, ->
61 | chdir shared.test.app.root
62 | run "run"
63 |
--------------------------------------------------------------------------------
/test/data/app/p42.yaml:
--------------------------------------------------------------------------------
1 | name: blurb9
2 | domain: blurb9.com
3 | registry: 123456789.registry.test.com
4 | clusters:
5 | master: violent-aftermath
6 |
--------------------------------------------------------------------------------
/test/data/app/run/api/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM node:4.3
2 | RUN mkdir -p /usr/src/app
3 | WORKDIR /usr/src/app
4 |
5 | COPY package.json /usr/src/app/
6 | RUN npm install
7 | RUN npm install coffee-script -g
8 | COPY src/ /usr/src/app/src
9 | COPY env/ /usr/src/app/env
10 | COPY bin/blurb-server /usr/src/app/bin/
11 | CMD [ "bin/blurb-server", "production" ]
12 |
13 | EXPOSE 80
14 |
--------------------------------------------------------------------------------
/test/data/app/run/api/config.yaml:
--------------------------------------------------------------------------------
1 | style: docker
2 | count: 2
3 | discovery:
4 | - http
5 |
--------------------------------------------------------------------------------
/test/data/clusters/violent-aftermath.yaml:
--------------------------------------------------------------------------------
1 | status: CREATE_COMPLETE
2 | vpcId: test-vpc-00
3 | subnetId: test-subnet-00
4 | az: us-west-1a
5 | zoneId: test-zone-00
6 | name: violent-aftermath
7 | region: us-west-1
8 | zone: a
9 |
--------------------------------------------------------------------------------
/test/data/expectations.yaml:
--------------------------------------------------------------------------------
1 | AWS:
2 |
3 | getRepository:
4 | commands: |-
5 | aws ecr describe-repositories --repository-name blurb9-api --region us-east-1
6 |
7 | createRepository:
8 | commands: |-
9 | aws ecr create-repository --repository-name blurb9-api --region us-east-1
10 | aws ecr set-repository-policy --repository-name blurb9-api --region us-east-1 --policy-text {"Version":"2008-10-17","Statement":[{"Sid":"Allow Any/All","Effect":"Allow","Principal":"*","Action":["ecr:*"]}]}
11 |
12 | setSecurityGroups:
13 | commands: |-
14 | aws ec2 describe-security-groups --filters Name=vpc-id,Values=test-vpc-00 Name=group-name,Values=default
15 | aws ec2 describe-security-groups --filters Name=vpc-id,Values=test-vpc-00 Name=group-name,Values=docker-machine
16 | aws ec2 describe-instances --filters Name=tag-value,Values=preventative-malpractice-01
17 | aws ec2 modify-instance-attribute --instance-id test-instance-00 --groups test-group-00 test-group-00
18 |
19 | getRegistryDomain:
20 | commands: |-
21 | aws ecr get-authorization-token --region us-east-1
22 |
23 | getELB:
24 | commands: |-
25 | aws elb describe-load-balancers --load-balancer-name violent-aftermath
26 |
27 | registerWithELB:
28 | commands: |-
29 | aws elb register-instances-with-load-balancer --load-balancer-name vodka-martini --instances test-instance-00
30 |
31 | createStack:
32 | commands: |-
33 | aws cloudformation create-stack --stack-name preventative-malpractice --template-body file://.json
34 |
35 | files: &vpc
36 | - |
37 | {"AWSTemplateFormatVersion":"2010-09-09","Description":"AWS CloudFormation Template for use with p42.","Parameters":{"SSHLocation":{"Description":"Lockdown SSH access to the bastion host (default can be accessed from anywhere)","Type":"String","MinLength":"9","MaxLength":"18","Default":"0.0.0.0/0","AllowedPattern":"(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})","ConstraintDescription":"must be a valid CIDR range of the form x.x.x.x/x."}},"Mappings":{"SubnetConfig":{"VPC":{"CIDR":"10.0.0.0/16"},"Public":{"CIDR":"10.0.0.0/24"}}},"Resources":{"VPC":{"Type":"AWS::EC2::VPC","Properties":{"EnableDnsSupport":"true","EnableDnsHostnames":"true","CidrBlock":{"Fn::FindInMap":["SubnetConfig","VPC","CIDR"]},"Tags":[{"Key":"Application","Value":{"Ref":"AWS::StackName"}},{"Key":"Network","Value":"Public"}]}},"PublicSubnet":{"Type":"AWS::EC2::Subnet","Properties":{"VpcId":{"Ref":"VPC"},"CidrBlock":{"Fn::FindInMap":["SubnetConfig","Public","CIDR"]},"Tags":[{"Key":"Application","Value":{"Ref":"AWS::StackName"}},{"Key":"Network","Value":"Public"}]}},"InternetGateway":{"Type":"AWS::EC2::InternetGateway","Properties":{"Tags":[{"Key":"Application","Value":{"Ref":"AWS::StackName"}},{"Key":"Network","Value":"Public"}]}},"GatewayToInternet":{"Type":"AWS::EC2::VPCGatewayAttachment","Properties":{"VpcId":{"Ref":"VPC"},"InternetGatewayId":{"Ref":"InternetGateway"}}},"PublicRouteTable":{"Type":"AWS::EC2::RouteTable","Properties":{"VpcId":{"Ref":"VPC"},"Tags":[{"Key":"Application","Value":{"Ref":"AWS::StackName"}},{"Key":"Network","Value":"Public"}]}},"PublicRoute":{"Type":"AWS::EC2::Route","DependsOn":"GatewayToInternet","Properties":{"RouteTableId":{"Ref":"PublicRouteTable"},"DestinationCidrBlock":"0.0.0.0/0","GatewayId":{"Ref":"InternetGateway"}}},"PublicSubnetRouteTableAssociation":{"Type":"AWS::EC2::SubnetRouteTableAssociation","Properties":{"SubnetId":{"Ref":"PublicSubnet"},"RouteTableId":{"Ref":"PublicRouteTable"}}},"PublicNetworkAcl":{"Type":"AWS::EC2::NetworkAcl","Properties":{"VpcId":{"Ref":"VPC"},"Tags":[{"Key":"Application","Value":{"Ref":"AWS::StackName"}},{"Key":"Network","Value":"Public"}]}},"InboundHTTPPublicNetworkAclEntry":{"Type":"AWS::EC2::NetworkAclEntry","Properties":{"NetworkAclId":{"Ref":"PublicNetworkAcl"},"RuleNumber":"100","Protocol":"6","RuleAction":"allow","Egress":"false","CidrBlock":"0.0.0.0/0","PortRange":{"From":"80","To":"80"}}},"InboundHTTPSPublicNetworkAclEntry":{"Type":"AWS::EC2::NetworkAclEntry","Properties":{"NetworkAclId":{"Ref":"PublicNetworkAcl"},"RuleNumber":"101","Protocol":"6","RuleAction":"allow","Egress":"false","CidrBlock":"0.0.0.0/0","PortRange":{"From":"443","To":"443"}}},"InboundSSHPublicNetworkAclEntry":{"Type":"AWS::EC2::NetworkAclEntry","Properties":{"NetworkAclId":{"Ref":"PublicNetworkAcl"},"RuleNumber":"102","Protocol":"6","RuleAction":"allow","Egress":"false","CidrBlock":{"Ref":"SSHLocation"},"PortRange":{"From":"22","To":"22"}}},"InboundEphemeralPublicNetworkAclEntry":{"Type":"AWS::EC2::NetworkAclEntry","Properties":{"NetworkAclId":{"Ref":"PublicNetworkAcl"},"RuleNumber":"103","Protocol":"6","RuleAction":"allow","Egress":"false","CidrBlock":"0.0.0.0/0","PortRange":{"From":"1024","To":"65535"}}},"OutboundPublicNetworkAclEntry":{"Type":"AWS::EC2::NetworkAclEntry","Properties":{"NetworkAclId":{"Ref":"PublicNetworkAcl"},"RuleNumber":"100","Protocol":"6","RuleAction":"allow","Egress":"true","CidrBlock":"0.0.0.0/0","PortRange":{"From":"0","To":"65535"}}},"PublicSubnetNetworkAclAssociation":{"Type":"AWS::EC2::SubnetNetworkAclAssociation","Properties":{"SubnetId":{"Ref":"PublicSubnet"},"NetworkAclId":{"Ref":"PublicNetworkAcl"}}},"PrivateDNS":{"Type":"AWS::Route53::HostedZone","DependsOn":"VPC","Properties":{"Name":"name.internal.","VPCs":[{"VPCId":{"Ref":"VPC"},"VPCRegion":{"Ref":"AWS::Region"}}]}},"DHCPOptions":{"Type":"AWS::EC2::DHCPOptions","Properties":{"DomainName":"name.internal","DomainNameServers":["AmazonProvidedDNS"]}},"ELBSecurityGroup":{"Type":"AWS::EC2::SecurityGroup","Properties":{"GroupDescription":"Allow ELB to accept traffic from the public Web","VpcId":{"Ref":"VPC"},"SecurityGroupIngress":[{"IpProtocol":"tcp","FromPort":"80","ToPort":"80","CidrIp":"0.0.0.0/0"}]}},"ELB":{"Type":"AWS::ElasticLoadBalancing::LoadBalancer","DependsOn":"GatewayToInternet","Properties":{"LoadBalancerName":{"Ref":"AWS::StackName"},"SecurityGroups":[{"Ref":"ELBSecurityGroup"},{"Fn::GetAtt":["VPC","DefaultSecurityGroup"]}],"Listeners":[{"LoadBalancerPort":80,"Protocol":"HTTP","InstancePort":80}],"Subnets":[{"Ref":"PublicSubnet"}],"HealthCheck":{"HealthyThreshold":2,"Interval":30,"Target":"TCP:80","Timeout":5,"UnhealthyThreshold":2}}}},"Outputs":{"VPCId":{"Description":"VPCId of the newly created VPC","Value":{"Ref":"VPC"}},"PublicSubnet":{"Description":"SubnetId of the public subnet","Value":{"Ref":"PublicSubnet"}},"AvailabilityZone":{"Description":"Availability Zone of the public subnet","Value":{"Fn::GetAtt":["PublicSubnet","AvailabilityZone"]}},"HostedZoneId":{"Description":"HostedZoneId of the private HostedZone","Value":{"Ref":"PrivateDNS"}},"DefaultSecurityGroupId":{"Description":"Default security group ID for the VPC","Value":{"Fn::GetAtt":["VPC","DefaultSecurityGroup"]}}}}
38 |
39 | getStack:
40 | commands: |-
41 | aws cloudformation describe-stacks --stack-name preventative-malpractice
42 |
43 | removeStack:
44 | commands: |-
45 | aws cloudformation delete-stack --stack-name preventative-malpractice
46 |
47 | DNS:
48 | A:
49 | commands: |-
50 | aws route53 change-resource-record-sets --hosted-zone-id test-zone-00 --change-batch file://.json
51 |
52 | files:
53 | - |
54 | {"Comment":"this is a test","Changes":[{"Action":"UPSERT","ResourceRecordSet":{"Name":"blurb9-www-00.name.internal.","Type":"A","TTL":60,"ResourceRecords":[{"Value":"192.168.0.42"}]}}]}
55 |
56 | Alias:
57 | commands: |-
58 | aws elb describe-load-balancers --load-balancer-name violent-aftermath
59 | aws route53 list-hosted-zones-by-name --dns-name bar.com --max-items 1
60 | aws route53 change-resource-record-sets --hosted-zone-id test-zone-00 --change-batch file://.json
61 |
62 | files:
63 | - |
64 | {"Comment":"this is a test","Changes":[{"Action":"UPSERT","ResourceRecordSet":{"Name":"foo.bar.com.","Type":"A","AliasTarget":{"HostedZoneId":"test-zone-00","DNSName":"violent-aftermath","EvaluateTargetHealth":false}}}]}
65 |
66 | SRV:
67 | commands: |-
68 | aws route53 change-resource-record-sets --hosted-zone-id test-zone-00 --change-batch file://.json
69 | aws route53 change-resource-record-sets --hosted-zone-id test-zone-00 --change-batch file://.json
70 |
71 | files:
72 | - |
73 | {"Comment":"this is a test","Changes":[{"Action":"UPSERT","ResourceRecordSet":{"Name":"_www._http.name.internal.","Type":"SRV","TTL":60,"ResourceRecords":[{"Value":"0 0 32768 www-00.name.internal"}]}}]}
74 | - |
75 | {"Comment":"this is a test","Changes":[{"Action":"UPSERT","ResourceRecordSet":{"Name":"_www._http.name.internal.","Type":"SRV","TTL":60,"ResourceRecords":[{"Value":"0 0 32768 www-00.name.internal"}]}}]}
76 |
77 | Docker:
78 |
79 | env:
80 | commands: |-
81 | eval $(docker-machine env violent-aftermath-01)
82 |
83 | swarmEnv:
84 | commands: |-
85 | eval $(docker-machine env --swarm violent-aftermath-00)
86 |
87 | login:
88 | commands: |-
89 | eval $(aws ecr get-login --region us-east-1)
90 |
91 | build:
92 | commands: |-
93 | docker build -t 123456789.registry.test.com/blurb9-api -f run/api/Dockerfile .
94 |
95 | push:
96 | commands: |-
97 | docker push 123456789.registry.test.com/blurb9-api
98 |
99 | run:
100 | commands: |-
101 | docker run -P --name api-00 --restart always -e AWS_ACCESS_KEYId="$(aws configure get aws_access_keyId)" -e AWS_SECRET_ACCESS_KEY="$(aws configure get aws_secret_access_key)" -e AWS_DEFAULT_REGION="$(aws configure get region)" -d blurb9-api
102 |
103 | inspect:
104 | commands: |-
105 | docker inspect api-00
106 |
107 | listContainers:
108 | commands: |-
109 | docker ps --filter name=violent-aftermath --format {{ .ID }}
110 |
111 | createInstance:
112 | commands: |-
113 | docker-machine create violent-aftermath-00 --driver amazonec2 --amazonec2-region us-west-1 --amazonec2-vpc-id test-vpc-00 --amazonec2-subnet-id test-subnet-00 --amazonec2-zone a
114 |
115 | createSwarmInstance:
116 | commands: |-
117 | docker-machine create violent-aftermath-00 --driver amazonec2 --amazonec2-region us-west-1 --amazonec2-vpc-id test-vpc-00 --amazonec2-subnet-id test-subnet-00 --amazonec2-zone a --swarm --swarm-discovery nodes://10.0.[0:255].[0:255]:2375
118 | aws ec2 describe-security-groups --filters Name=vpc-id,Values=test-vpc-00 Name=group-name,Values=default
119 | aws ec2 describe-security-groups --filters Name=vpc-id,Values=test-vpc-00 Name=group-name,Values=docker-machine
120 | aws ec2 describe-instances --filters Name=tag-value,Values=violent-aftermath-00
121 | aws ec2 modify-instance-attribute --instance-id test-instance-00 --groups test-group-00 test-group-00
122 |
123 | listSwarmNodes:
124 | commands: |-
125 | docker-machine ls --format '{{ .Name }}' --filter name=violent-aftermath
126 |
127 | findAvailableNames:
128 | commands: |-
129 | docker-machine ls --format '{{ .Name }}' --filter name=violent-aftermath
130 |
131 | removeSwarmNodes:
132 | commands: |-
133 | docker-machine ls --format '{{ .Name }}' --filter name=violent-aftermath
134 | docker-machine stop violent-aftermath-00 violent-aftermath-01 violent-aftermath-02
135 | docker-machine rm violent-aftermath-00 violent-aftermath-01 violent-aftermath-02
136 |
137 | CLI:
138 |
139 | cluster:
140 | create:
141 | commands: |-
142 | aws cloudformation create-stack --stack-name violent-aftermath --template-body file://.json
143 | aws cloudformation describe-stacks --stack-name violent-aftermath
144 | docker-machine create violent-aftermath-00 --driver amazonec2 --amazonec2-region us-west-1 --amazonec2-vpc-id test-vpc-00 --amazonec2-subnet-id test-subnet-00 --amazonec2-zone a --swarm --swarm-discovery nodes://10.0.[0:255].[0:255]:2375 --swarm-master
145 | aws ec2 describe-security-groups --filters Name=vpc-id,Values=test-vpc-00 Name=group-name,Values=default
146 | aws ec2 describe-security-groups --filters Name=vpc-id,Values=test-vpc-00 Name=group-name,Values=docker-machine
147 | aws ec2 describe-instances --filters Name=tag-value,Values=violent-aftermath-00
148 | aws ec2 modify-instance-attribute --instance-id test-instance-00 --groups test-group-00 test-group-00
149 |
150 | files: *vpc
151 |
152 | expand:
153 | commands: |-
154 | docker-machine ls --format '{{ .Name }}' --filter name=violent-aftermath
155 | docker-machine create violent-aftermath-03 --driver amazonec2 --amazonec2-region us-west-1 --amazonec2-vpc-id test-vpc-00 --amazonec2-subnet-id test-subnet-00 --amazonec2-zone a --swarm --swarm-discovery nodes://10.0.[0:255].[0:255]:2375
156 | aws ec2 describe-security-groups --filters Name=vpc-id,Values=test-vpc-00 Name=group-name,Values=default
157 | aws ec2 describe-security-groups --filters Name=vpc-id,Values=test-vpc-00 Name=group-name,Values=docker-machine
158 | aws ec2 describe-instances --filters Name=tag-value,Values=violent-aftermath-03
159 | aws ec2 modify-instance-attribute --instance-id test-instance-00 --groups test-group-00 test-group-00
160 | docker-machine create violent-aftermath-04 --driver amazonec2 --amazonec2-region us-west-1 --amazonec2-vpc-id test-vpc-00 --amazonec2-subnet-id test-subnet-00 --amazonec2-zone a --swarm --swarm-discovery nodes://10.0.[0:255].[0:255]:2375
161 | aws ec2 describe-security-groups --filters Name=vpc-id,Values=test-vpc-00 Name=group-name,Values=default
162 | aws ec2 describe-security-groups --filters Name=vpc-id,Values=test-vpc-00 Name=group-name,Values=docker-machine
163 | aws ec2 describe-instances --filters Name=tag-value,Values=violent-aftermath-04
164 | aws ec2 modify-instance-attribute --instance-id test-instance-00 --groups test-group-00 test-group-00
165 | docker-machine create violent-aftermath-05 --driver amazonec2 --amazonec2-region us-west-1 --amazonec2-vpc-id test-vpc-00 --amazonec2-subnet-id test-subnet-00 --amazonec2-zone a --swarm --swarm-discovery nodes://10.0.[0:255].[0:255]:2375
166 | aws ec2 describe-security-groups --filters Name=vpc-id,Values=test-vpc-00 Name=group-name,Values=default
167 | aws ec2 describe-security-groups --filters Name=vpc-id,Values=test-vpc-00 Name=group-name,Values=docker-machine
168 | aws ec2 describe-instances --filters Name=tag-value,Values=violent-aftermath-05
169 | aws ec2 modify-instance-attribute --instance-id test-instance-00 --groups test-group-00 test-group-00
170 |
171 | rm:
172 | commands: |-
173 | docker-machine ls --format '{{ .Name }}' --filter name=violent-aftermath
174 | docker-machine stop violent-aftermath-00 violent-aftermath-01 violent-aftermath-02
175 | docker-machine rm violent-aftermath-00 violent-aftermath-01 violent-aftermath-02
176 | aws cloudformation delete-stack --stack-name violent-aftermath
177 |
178 | ls:
179 | commands: ''
180 |
181 | ps:
182 | commands: |-
183 | docker-machine ls --format '{{ .Name }}' --filter name=violent-aftermath
184 |
185 | env:
186 | commands: |-
187 | eval $(docker-machine env --swarm violent-aftermath-00)
188 |
189 | get:
190 | commands: ''
191 |
192 | start:
193 | commands: |-
194 | docker-machine ls --format '{{ .Name }}' --filter name=violent-aftermath
195 | eval $(docker-machine env violent-aftermath-00)
196 | eval $(aws ecr get-login --region us-east-1)
197 | docker pull 123456789.registry.test.com/blurb9-api
198 | eval $(docker-machine env violent-aftermath-01)
199 | eval $(aws ecr get-login --region us-east-1)
200 | docker pull 123456789.registry.test.com/blurb9-api
201 | eval $(docker-machine env violent-aftermath-02)
202 | eval $(aws ecr get-login --region us-east-1)
203 | docker pull 123456789.registry.test.com/blurb9-api
204 | eval $(docker-machine env --swarm violent-aftermath-00)
205 | eval $(aws ecr get-login --region us-east-1)
206 | docker run -P --name blurb9-api-01 --restart always -e AWS_ACCESS_KEYId="$(aws configure get aws_access_keyId)" -e AWS_SECRET_ACCESS_KEY="$(aws configure get aws_secret_access_key)" -e AWS_DEFAULT_REGION="$(aws configure get region)" -d blurb9-api
207 | docker inspect blurb9-api-01
208 | aws ec2 describe-instances --filters Name=tag-value,Values=violent-aftermath-01
209 | aws route53 change-resource-record-sets --hosted-zone-id test-zone-00 --change-batch file://.json
210 | docker run -P --name blurb9-api-02 --restart always -e AWS_ACCESS_KEYId="$(aws configure get aws_access_keyId)" -e AWS_SECRET_ACCESS_KEY="$(aws configure get aws_secret_access_key)" -e AWS_DEFAULT_REGION="$(aws configure get region)" -d blurb9-api
211 | docker inspect blurb9-api-02
212 | aws ec2 describe-instances --filters Name=tag-value,Values=violent-aftermath-01
213 | aws route53 change-resource-record-sets --hosted-zone-id test-zone-00 --change-batch file://.json
214 |
215 | files:
216 | - |
217 | {"Comment":"","Changes":[{"Action":"UPSERT","ResourceRecordSet":{"Name":"violent-aftermath-01.name.internal.","Type":"A","TTL":60,"ResourceRecords":[{"Value":"192.168.0.42"}]}}]}
218 | - |
219 | {"Comment":"","Changes":[{"Action":"UPSERT","ResourceRecordSet":{"Name":"violent-aftermath-01.name.internal.","Type":"A","TTL":60,"ResourceRecords":[{"Value":"192.168.0.42"}]}}]}
220 |
221 | build:
222 | commands: |-
223 | eval $(docker-machine env --swarm violent-aftermath-00)
224 | eval $(aws ecr get-login --region us-east-1)
225 | aws ecr describe-repositories --repository-name blurb9-api --region us-east-1
226 | docker build -t 123456789.registry.test.com/blurb9-api -f run/api/Dockerfile .
227 | docker push 123456789.registry.test.com/blurb9-api
228 |
229 | run:
230 | commands: |-
231 | eval $(docker-machine env --swarm violent-aftermath-00)
232 | eval $(aws ecr get-login --region us-east-1)
233 | aws ecr describe-repositories --repository-name blurb9-api --region us-east-1
234 | docker build -t 123456789.registry.test.com/blurb9-api -f run/api/Dockerfile .
235 | docker push 123456789.registry.test.com/blurb9-api
236 | docker-machine ls --format '{{ .Name }}' --filter name=violent-aftermath
237 | eval $(docker-machine env violent-aftermath-00)
238 | eval $(aws ecr get-login --region us-east-1)
239 | docker pull 123456789.registry.test.com/blurb9-api
240 | eval $(docker-machine env violent-aftermath-01)
241 | eval $(aws ecr get-login --region us-east-1)
242 | docker pull 123456789.registry.test.com/blurb9-api
243 | eval $(docker-machine env violent-aftermath-02)
244 | eval $(aws ecr get-login --region us-east-1)
245 | docker pull 123456789.registry.test.com/blurb9-api
246 | eval $(docker-machine env --swarm violent-aftermath-00)
247 | eval $(aws ecr get-login --region us-east-1)
248 | docker run -P --name blurb9-api-01 --restart always -e AWS_ACCESS_KEYId="$(aws configure get aws_access_keyId)" -e AWS_SECRET_ACCESS_KEY="$(aws configure get aws_secret_access_key)" -e AWS_DEFAULT_REGION="$(aws configure get region)" -d blurb9-api
249 | docker inspect blurb9-api-01
250 | aws ec2 describe-instances --filters Name=tag-value,Values=violent-aftermath-01
251 | aws route53 change-resource-record-sets --hosted-zone-id test-zone-00 --change-batch file://.json
252 | docker run -P --name blurb9-api-02 --restart always -e AWS_ACCESS_KEYId="$(aws configure get aws_access_keyId)" -e AWS_SECRET_ACCESS_KEY="$(aws configure get aws_secret_access_key)" -e AWS_DEFAULT_REGION="$(aws configure get region)" -d blurb9-api
253 | docker inspect blurb9-api-02
254 | aws ec2 describe-instances --filters Name=tag-value,Values=violent-aftermath-01
255 | aws route53 change-resource-record-sets --hosted-zone-id test-zone-00 --change-batch file://.json
256 |
257 | files:
258 | - |
259 | {"Comment":"","Changes":[{"Action":"UPSERT","ResourceRecordSet":{"Name":"violent-aftermath-01.name.internal.","Type":"A","TTL":60,"ResourceRecords":[{"Value":"192.168.0.42"}]}}]}
260 | - |
261 | {"Comment":"","Changes":[{"Action":"UPSERT","ResourceRecordSet":{"Name":"violent-aftermath-01.name.internal.","Type":"A","TTL":60,"ResourceRecords":[{"Value":"192.168.0.42"}]}}]}
262 |
--------------------------------------------------------------------------------
/test/dns-helpers.coffee:
--------------------------------------------------------------------------------
1 | assert = require "assert"
2 | {async} = require "fairmont"
3 | {read} = require "panda-rw"
4 | {command} = require "./helpers"
5 |
6 | module.exports = (context) ->
7 |
8 | context.test "DNS", (context) ->
9 |
10 | DNSHelpers = yield require "../src/helpers/dns"
11 | shared = yield require "../src/shared"
12 | cluster = yield read shared.test.clusters["violent-aftermath"]
13 |
14 | # TODO: check generated update JSON files
15 |
16 | command "DNS.A", context, ->
17 | DNSHelpers.a
18 | cluster: cluster
19 | name: "blurb9-www-00"
20 | ip: "192.168.0.42"
21 | comment: "this is a test"
22 |
23 | command "DNS.Alias", context, ->
24 | DNSHelpers.alias
25 | cluster: cluster
26 | subdomain: "foo"
27 | domain: "bar.com"
28 | comment: "this is a test"
29 |
30 | command "DNS.SRV", context, ->
31 | DNSHelpers.srv
32 | cluster: cluster
33 | protocol: "http"
34 | subdomain: "www"
35 | targets:
36 | host: "www-00"
37 | port: "32768"
38 | comment: "this is a test"
39 |
--------------------------------------------------------------------------------
/test/docker-helpers.coffee:
--------------------------------------------------------------------------------
1 | assert = require "assert"
2 | {join} = require "path"
3 | {async, isArray} = require "fairmont"
4 | {command} = require "./helpers"
5 |
6 | module.exports = (context) ->
7 |
8 | context.test "Docker", (context) ->
9 |
10 | shared = yield require "../src/shared"
11 | DockerHelpers = yield require "../src/helpers/docker"
12 |
13 | command "Docker.env", context, ->
14 | DockerHelpers.env "violent-aftermath-01"
15 |
16 | command "Docker.swarmEnv", context, ->
17 | DockerHelpers.swarmEnv "violent-aftermath"
18 |
19 | command "Docker.login", context, ->
20 | DockerHelpers.login()
21 |
22 | command "Docker.build", context, ->
23 | DockerHelpers.build
24 | registry: '123456789.registry.test.com'
25 | tag: 'blurb9-api'
26 | mixin:
27 | name: 'api'
28 | path: join shared.run, "api"
29 |
30 | command "Docker.push", context, ->
31 | DockerHelpers.push
32 | registry: '123456789.registry.test.com'
33 | tag: 'blurb9-api'
34 |
35 | command "Docker.run", context, ->
36 | DockerHelpers.run
37 | name: "api-00"
38 | tag: "blurb9-api"
39 | options: "-P"
40 |
41 | command "Docker.inspect", context, async ->
42 | {name, ip, port} = yield DockerHelpers.inspect "api-00"
43 |
44 | assert name?
45 | assert ip?
46 | assert port?
47 |
48 | command "Docker.listContainers", context, ->
49 | DockerHelpers.listContainers "violent-aftermath"
50 |
51 |
52 | command "Docker.createInstance", context, ->
53 | DockerHelpers.createInstance
54 | cluster:
55 | vpcId: "test-vpc-00"
56 | subnetId: "test-subnet-00"
57 | region: "us-west-1"
58 | zone: "a"
59 | name: "violent-aftermath-00"
60 |
61 | command "Docker.createSwarmInstance", context, ->
62 | DockerHelpers.createSwarmInstance
63 | cluster:
64 | vpcId: "test-vpc-00"
65 | subnetId: "test-subnet-00"
66 | region: "us-west-1"
67 | zone: "a"
68 | name: "violent-aftermath-00"
69 |
70 | command "Docker.listSwarmNodes", context, async ->
71 | nodes = yield DockerHelpers.listSwarmNodes "violent-aftermath"
72 | assert isArray nodes
73 |
74 | command "Docker.findAvailableNames", context, async ->
75 | [name] = yield DockerHelpers.findAvailableNames "violent-aftermath", 1
76 | assert.equal name, "violent-aftermath-03"
77 |
78 | command "Docker.removeSwarmNodes", context, ->
79 | DockerHelpers.removeSwarmNodes "violent-aftermath"
80 |
--------------------------------------------------------------------------------
/test/foundation.coffee:
--------------------------------------------------------------------------------
1 | assert = require "assert"
2 | Path = require "path"
3 | {async, isDirectory, read} = require "fairmont"
4 | {synchronize} = require "./helpers"
5 |
6 | # These tests mostly just make sure all the underlying mechanics
7 | # of the app work. These include:
8 | #
9 | # - the shared configuration
10 | # - tmp file generation
11 | # - logging and logger helpers
12 | # - running shell commands
13 | #
14 | # These are by no means exhaustive since that would be in some cases
15 | # pretty difficult. (Ex: verifying that the tmp files are cleaned up
16 | # on process exit.) It's more a smoke test.
17 | #
18 | # They're also something of a scaffolding. Consider removing these
19 | # tests later once the app is stable. They don't add much, if anything,
20 | # to the coverage and are thus mostly useful for debugging.
21 |
22 | module.exports = (context) ->
23 |
24 | context.test "foundation", (context) ->
25 |
26 | context.test "share", ->
27 | shared = yield require "../src/shared"
28 | assert shared.test.expectations?
29 |
30 | context.test "tmp", ->
31 | Tmp = require "../src/tmp"
32 | {dir, base} = Path.parse (yield Tmp.file "test.txt")
33 | assert.equal base, "test.txt"
34 | assert.equal true, (yield isDirectory dir)
35 |
36 | context.test "logger", (context) ->
37 |
38 | Logger = yield require "../src/logger"
39 |
40 | context.test "memory", ->
41 | logger = Logger.Memory.create()
42 | {info} = Logger.helpers logger
43 | info "this is a test"
44 | info "this is not a test"
45 | {content} = logger
46 | assert.equal content.toString(), "this is a test,this is not a test"
47 |
48 | context.test "file", ->
49 | logger = yield Logger.TmpFile.create name: "test"
50 | {info} = Logger.helpers logger
51 | yield info "this is a test"
52 | yield info "this is not a test"
53 | content = yield read logger.path
54 | assert.equal content, "this is a test\nthis is not a test\n"
55 |
56 | context.test "stream"
57 | # context.test "stream", ->
58 | # logger = Logger.Stream.create stream: process.stdout
59 | # {info} = Logger.helpers logger
60 | # yield info "this is a test"
61 | # yield info "this is not a test"
62 |
63 | context.test "composite"
64 | # context.test "composite", ->
65 | # stdout = yield Logger.Stream.create stream: process.stdout
66 | # tmpfile = yield Logger.TmpFile.create name: "test"
67 | # logger = Logger.Composite.create loggers: [ stdout, tmpfile ]
68 | # {info} = Logger.helpers logger
69 | # info "this is a test"
70 | # info "this is not a test"
71 |
72 |
73 | context.test "message logger"
74 |
75 | context.test "shell subprocess",# ->
76 | # createShell = require "../src/sh"
77 | # shell = createShell process.stdout, process.stderr
78 | # yield shell.run "ls"
79 | # shell.close()
80 |
81 | context.test "shell runner", ->
82 | shared = yield require "../src/shared"
83 | yield synchronize async ->
84 | shared.settings.dryRun = true
85 | run = yield require "../src/run"
86 | {zoneId} = yield run "aws.route53.list-hosted-zones-by-name",
87 | domain: "fubar.com"
88 | assert.equal zoneId, "test-zone-00"
89 |
--------------------------------------------------------------------------------
/test/helpers.coffee:
--------------------------------------------------------------------------------
1 | assert = require "assert"
2 | {promise} = require "when"
3 | F = require "fairmont"
4 | {async, sleep, zip, pair} = F
5 | {read} = require "panda-rw"
6 | messages = require "panda-messages"
7 | Tmp = require "../src/tmp"
8 | Logger = require "../src/logger"
9 |
10 | # This ensures that when we're logging the commands for test A,
11 | # we don't interfere with the commands for test B.
12 | synchronize = do (waiting=[]) ->
13 |
14 | # Main run loop. We wait one second before we starting processing
15 | # functions in the wait queue to ensure the tests are all queued.
16 | do async ->
17 | yield sleep 1000
18 | yield g() while g = waiting.shift()
19 |
20 | # Queuing function for test functions. We return a promise
21 | # the test can yield on, but all we do is a queue a wrapper fn.
22 | # The wrapper propagates the result back here from the run loop,
23 | # resolving the promise the test code is waiting on.
24 | (f) ->
25 | promise (resolve, reject) ->
26 | waiting.push async ->
27 | try
28 | # Important to yield here so that the run loop will wait
29 | # until f completes before running the next fn.
30 | resolve yield f()
31 | catch error
32 | reject error
33 |
34 | # Clean up any variability in the command logging so we can
35 | # reliably compare to expectations
36 | readFiles = async (s) ->
37 | if (paths = s.match /file:\/\/\/[\w\/\-\.\_]+/g)?
38 | for path in paths
39 | JSON.stringify JSON.parse (yield F.read (path.replace /file:\/\//g, ""))
40 |
41 |
42 | sanitize = (s) ->
43 | s.replace /file:\/+[\w\/\-\_]+/g, "file://"
44 |
45 | # Run a test, comparing the command log to an expected command log
46 | command = (name, context, f) ->
47 |
48 | if !f?
49 |
50 | context.test name
51 |
52 | else
53 |
54 | # Define a test...
55 | context.test name, ->
56 |
57 | # Synchronize the test...
58 | yield synchronize async ->
59 |
60 | shared = yield require "../src/shared"
61 |
62 | # Redirect command logging so we can inspect it
63 | logger = Logger.Memory.create()
64 | shared.loggers.command._self.loggers.stderr = logger
65 |
66 | # silence output
67 | # TODO: come up with a nicer interface for this
68 | shared.loggers.status._self.loggers.stderr.level = "emerg"
69 |
70 | # Actually run the test, and wait for the results
71 | yield f()
72 |
73 | # Read the log and sanitize the results
74 | # TODO: ... and this
75 | actual = logger.content.join("\n")
76 | contents = yield readFiles actual
77 |
78 | # Get the expectations for this test
79 | {lookup} = yield messages shared.test.expectations
80 | expected = lookup name
81 |
82 | # Compare the expectation with the actual results
83 | # We catch failures and log them to the console in
84 | # detail to make it easier to debug.
85 |
86 | # TODO: maybe use JSDiff?
87 | # https://github.com/kpdecker/jsdiff
88 | try
89 | assert (sanitize actual) == expected.commands
90 | catch error
91 | _actual = sanitize actual
92 |
93 | console.error """
94 | [ #{name} ]
95 |
96 | ACTUAL
97 | #{_actual}
98 |
99 | EXPECTED
100 | #{expected.commands}
101 |
102 | """
103 | # rethrow the error so the test fails
104 | throw error
105 |
106 | # now compare files
107 | try
108 | assert (!(contents?) && !(expected.files?)) ||
109 | (contents.length == expected.files?.length)
110 |
111 | if contents?
112 | for [actual, _expected] in (zip pair, contents, expected.files)
113 | assert.equal actual, _expected
114 |
115 | catch error
116 |
117 | console.error """
118 |
119 | [ #{name} - files ]
120 |
121 | ACTUAL
122 | #{contents}
123 |
124 | EXPECTED
125 | #{expected.files}
126 | """
127 |
128 | throw error
129 |
130 |
131 | module.exports = {command, synchronize}
132 |
--------------------------------------------------------------------------------
/test/index.coffee:
--------------------------------------------------------------------------------
1 | Amen = require "amen"
2 | foundation = require "./foundation"
3 | AWSHelpers = require "./aws-helpers"
4 | DNSHelpers = require "./dns-helpers"
5 | dockerHelpers = require "./docker-helpers"
6 | CLIHelpers = require "./cli-helpers"
7 |
8 | Amen.describe "p42", (context) ->
9 |
10 | foundation context
11 |
12 | context.test "helpers", (context) ->
13 |
14 | # Make sure the dryRun flag is set
15 | shared = yield require "../src/shared"
16 | shared.settings.dryRun = true
17 |
18 | AWSHelpers context
19 | DNSHelpers context
20 | dockerHelpers context
21 |
22 | CLIHelpers context
23 |
--------------------------------------------------------------------------------
/test/test-sh.coffee:
--------------------------------------------------------------------------------
1 | {async} = require "fairmont"
2 |
3 | createShell = require "../src/sh"
4 | shell = createShell process
5 |
6 | readline = require "readline"
7 | rl = readline.createInterface process.stdin, process.stdout
8 | rl.setPrompt ": "
9 |
10 | rl.prompt()
11 |
12 | rl.on "line", async (line) ->
13 | result = yield shell.run line
14 | # process.stdout.write result
15 | rl.prompt()
16 |
17 | rl.on "close", ->
18 | shell.close()
19 | process.exit 0
20 |
--------------------------------------------------------------------------------