├── .cingi.yaml
├── .gitignore
├── .travis.yml
├── LICENSE
├── Makefile
├── README.md
├── c-ing-i-logo.svg
├── config
└── config.exs
├── lib
├── cingi.ex
└── cingi
│ ├── branch.ex
│ ├── cli.ex
│ ├── commander.ex
│ ├── field_agent.ex
│ ├── headquarters.ex
│ ├── mission.ex
│ ├── mission_report.ex
│ └── outpost.ex
├── mix.exs
├── mix.lock
├── priv
└── bin
│ └── wrapper.sh
└── test
├── bash_scripts
└── tmpdir.sh
├── cingi
├── branch_test.exs
├── extends_file_plan_test.exs
├── extends_template_plan_test.exs
├── fail_fast_plan_test.exs
├── field_agent_test.exs
├── headquarters_test.exs
├── mission_plan_test.exs
├── mission_report_test.exs
├── mission_test.exs
├── outpost_plan_test.exs
├── outpost_test.exs
├── when_plan_test.exs
└── wrapper_test.exs
├── cingi_test.exs
├── distributed_env_test.exs
├── gitclone.yaml
├── gitclone_cingi.yaml
├── helper_modules_test.exs
├── mission_plans
├── example.yaml
├── exits.yaml
├── extends
│ ├── file.yaml
│ ├── file_1.yaml
│ ├── file_2.yaml
│ └── template.yaml
├── fail_fast.yaml
├── inputs
│ ├── parallel.yaml
│ └── sequential.yaml
├── nested.yaml
├── outposts
│ ├── env_and_dir.yaml
│ ├── multinode.yaml
│ ├── setup.yaml
│ ├── setup_fail.yaml
│ ├── simple.yaml
│ └── teardown.yaml
├── outputs.yaml
└── when.yaml
├── mockgenserver_test.exs
└── test_helper.exs
/.cingi.yaml:
--------------------------------------------------------------------------------
1 | missions:
2 | - echo "starting cingi tests"
3 | - outpost:
4 | setup:
5 | - mix deps.get
6 | - make build-cli
7 | - mix test --only nonexistenttag # Only so that dependencies are forced to compile
8 | missions:
9 | normal_test: echo "Running mix test for $UUID" && mix test
10 | distributed_test:
11 | outpost:
12 | setup: epmd -daemon
13 | missions: echo "Running mix distributed test for $UUID" && mix test --only distributed
14 | cli:
15 | outpost:
16 | setup: epmd -daemon
17 | missions:
18 | - echo "Running cli tests for $UUID"
19 | - ./cingi --file test/mission_plans/example.yaml
20 | - make two-cli FILE="--file test/mission_plans/example.yaml"
21 | - make three-cli FILE="--file test/mission_plans/example.yaml"
22 | - echo "cingi tests successful"
23 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.swo
2 | *.swp
3 |
4 | # The directory Mix will write compiled artifacts to.
5 | /_build/
6 |
7 | # If you run "mix test --cover", coverage assets end up here.
8 | /cover/
9 |
10 | # The directory Mix downloads your dependencies sources to.
11 | /deps/
12 |
13 | # Where 3rd-party dependencies like ExDoc output generated docs.
14 | /doc/
15 |
16 | # Ignore .fetch files in case you like to edit your project deps locally.
17 | /.fetch
18 |
19 | # If the VM crashes, it generates a dump, let's ignore it too.
20 | erl_crash.dump
21 |
22 | # Also ignore archive artifacts (built via "mix archive.build").
23 | *.ez
24 |
25 | /cingi
26 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: elixir
2 | elixir:
3 | - 1.5.1
4 | before_install:
5 | - sudo apt-get -qq update
6 | - sudo apt-get install nmap
7 | install:
8 | - mix local.rebar --force # for Elixir 1.3.0 and up
9 | - mix local.hex --force
10 | - mix deps.get
11 | - mix escript.build
12 | script:
13 | - ./cingi --file .cingi.yaml
14 | branches:
15 | only:
16 | - master
17 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2017 Ramon Sandoval
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | MIN_BRANCHES = 2
2 | BRANCH_NAME = two
3 | PRINT_BRANCH_OUTPUT =
4 | FILE = --file test/gitclone_cingi.yaml
5 | FORCE:
6 |
7 | deps: FORCE
8 | mix deps.get
9 |
10 | publish: FORCE
11 | mix hex.publish
12 |
13 | download: FORCE
14 | mix escript.install hex cingi
15 |
16 | test: FORCE
17 | mix test
18 | make test-distributed
19 |
20 | test-distributed: FORCE
21 | epmd -daemon
22 | mix test --only distributed
23 | make kill-all-epmd
24 |
25 | build-cli: FORCE
26 | mix escript.build
27 |
28 | epmd-daemon: FORCE
29 | epmd -daemon
30 |
31 | test-cli: build-cli
32 | ./cingi $(FILE); echo "exited with $$?"
33 |
34 | test-two-cli: build-cli epmd-daemon
35 | make two-cli
36 | make kill-all-epmd
37 |
38 | test-three-cli: build-cli epmd-daemon
39 | make three-cli
40 | make kill-all-epmd
41 |
42 | test-hq-cli: build-cli epmd-daemon
43 | make hq-cli
44 |
45 | test-branch-cli: build-cli epmd-daemon
46 | make branch-cli
47 |
48 | test-submit-file: build-cli epmd-daemon
49 | ./cingi $(FILE) --connectto one@localhost --sname file@localhost --cookie test
50 |
51 | test-close: build-cli epmd-daemon
52 | ./cingi --closehq --connectto one@localhost --sname close@localhost --cookie test
53 |
54 | hq-cli:
55 | ./cingi $(FILE) --minbranches $(MIN_BRANCHES) --sname one@localhost --cookie test $(if $(PRINT_BRANCH_OUTPUT), "--printbranchoutput")
56 |
57 | branch-cli:
58 | ./cingi --connectto one@localhost --sname $(BRANCH_NAME)@localhost --cookie test $(if $(PRINT_BRANCH_OUTPUT), "--printbranchoutput")
59 |
60 | two-cli:
61 | make branch-cli &
62 | make hq-cli
63 |
64 | three-cli:
65 | make branch-cli &
66 | make branch-cli BRANCH_NAME=three &
67 | make hq-cli MIN_BRANCHES=3
68 |
69 | kill-all-epmd: FORCE
70 | for pid in $$(ps -ef | grep -v "grep" | grep "epmd -daemon" | awk '{print $$2}'); do kill -9 $$pid; done
71 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # CingI [](https://travis-ci.org/Rhathe/CingI)
2 |
3 | 
4 |
5 |
6 | Continuous-ing Integration
7 |
8 | CingI is currently a distributed task ("mission") runner.
9 | It introduces the concepts of "Missions", which can be considered both pipelines and tasks.
10 | They either run "Submissions" or a basic bash command. Missions with submissions pipe the output
11 | of one submission to another if run sequentially, but submissions can also be run in parallel.
12 |
13 | It can be used as a simple command line task runner locally or set up to
14 | run missions spread out among different machines (A main "Headquarters" and several "Branch" nodes).
15 | CingI uses yaml files ("Mission Plans") to define and execute missions.
16 |
17 | Future work is to build a CI server on top of its core to put the
18 | "Continuous Integration" in "Continuous-ing Integration"... or CingI.
19 |
20 |
21 | ## Installation
22 |
23 | CingI is used via the command line. The easiest way to install is through mix.
24 |
25 |
26 | ```bash
27 | $ mix escript.install hex cingi
28 | ```
29 |
30 | Or you can build from source
31 |
32 |
33 | ```bash
34 | $ mix deps.get
35 | $ mix escript.build
36 | ```
37 |
38 | ## Command Line Examples
39 |
40 | ### Local Task Running
41 |
42 | You can run CingI as a local task runner by just passing a valid yaml file.
43 |
44 | ```bash
45 | $ cingi --file example.yaml
46 | ```
47 |
48 | ### Distributed Task Running
49 |
50 | You can also run it as a distributed task runner for a valid yaml file, stopping when the given file is finished.
51 | (NOTE: You may need to start the epmd daemon on each machine so they can connect by running epmd -daemon)
52 |
53 | 1. Start the Headquarters Branch node, waiting for 2 other branches to connect
54 |
55 | ```bash
56 | $ cingi --file example.yaml --minbranches 3 --name one@FIRST_IP --cookie test
57 | ```
58 |
59 | 2. Start the second branch node in a different machine and connect to the first node, having a different name but the same cookie
60 |
61 | ```bash
62 | $ cingi --connectto one@FIRST_IP --name two@SECOND_IP --cookie test
63 | ```
64 |
65 | 3. Start the third branch node in a third machine
66 |
67 | ```bash
68 | $ cingi --connectto one@FIRST_IP --name three@THIRD_IP --cookie test
69 | ```
70 |
71 | If you want to leave them constantly running instead:
72 |
73 | 1. Don't pass a file to the Headquarters node.
74 |
75 | ```bash
76 | $ cingi --minbranches 3 --name one@FIRST_IP --cookie test
77 | ```
78 |
79 | 2. Submit files to the Headquarters node.
80 |
81 | ```bash
82 | $ cingi --file example.yaml --connectto one@FIRST_IP --name file@ANY_IP --cookie test
83 | ```
84 |
85 | 3. You can stop the task runner by submitting a stop command.
86 |
87 | ```bash
88 | $ cingi --closehq --connectto one@FIRST_IP --name close@ANY_IP --cookie test
89 | ```
90 |
91 |
92 | ## Mission Plans
93 |
94 | A mission plan is a yaml file that defines a single mission.
95 |
96 | A mission can be a bash command:
97 |
98 | ```yaml
99 | echo "This line is a valid mission plan"
100 | ```
101 |
102 | Or a map to configure the mission:
103 |
104 | ```yaml
105 | name: Some mission
106 | missions: echo "This map is a valid mission plan"
107 | ```
108 |
109 | ### Sequential/Parallel Missions
110 |
111 | Although a mission plan is a single mission, all missions either run
112 | a single bash command, or are composed of other smaller missions, or submissions.
113 |
114 | You can do a sequential list of submissions, run one after the other, with one mission encompassing them all:
115 |
116 | ```yaml
117 | - echo "This list"
118 | - echo "is a valid"
119 | - echo "mission plan"
120 | ```
121 |
122 | ```yaml
123 | missions:
124 | - echo "This list"
125 | - echo "is also a valid"
126 | - echo "mission plan"
127 | ```
128 |
129 | Or a parallel map of submissions, which are all run at the same time
130 | (NOTE: parallel missions can only be defined under the missions key):
131 |
132 | ```yaml
133 | missions:
134 | one: echo "This map"
135 | two: echo "is also a valid"
136 | three: echo "mission plan"
137 | ```
138 |
139 | Submissions are just missions, so they too can have submissions of their own:
140 |
141 | ```yaml
142 | name: Top Missions
143 | missions:
144 | one: echo "Missions can just a bash command"
145 | two:
146 | - echo "Missions"
147 | - - echo "can be"
148 | - echo "a list"
149 | - name: Inside List
150 | missions: echo "of bash commands"
151 | three:
152 | name: Sub Parallel Group
153 | missions:
154 | threeone: echo "Or another map of bash commands"
155 | threetwo: echo "These are in parallel, so they may be executed out of order"
156 | ```
157 |
158 |
159 | #### Failing Fast
160 |
161 | When a submission fails in a sequential list of submissions,
162 | no further submissions of the mission will run
163 | and the exit code will propagate up to the supermission.
164 |
165 | ```yaml
166 | - echo "will run"
167 | - exit 5
168 | - echo "won't run"
169 | ```
170 |
171 | However, setting `fail_fast: false` will keep running submissions,
172 | and the exit code of the mission will be the last submissions exit code.
173 |
174 | ```yaml
175 | fail_fast: false
176 | missions:
177 | - echo "will run"
178 | - exit 5
179 | - echo "will still run"
180 | ```
181 |
182 | On the other hand, by default, a mission will wait for all parallel submissions
183 | to finish, and will use the largest exit code of from its submissions.
184 |
185 | ```yaml
186 | missions:
187 | one: sleep 2; echo "will run"
188 | two: sleep 1; exit 5
189 | three: sleep 2; echo "will also run"
190 | ```
191 |
192 | However, setting `fail_fast: true` will kill all parallel submissions when one fails,
193 | and the exit code of the mission will still be the largest submissions exit code.
194 |
195 | ```yaml
196 | fail_fast: true
197 | missions:
198 | one: echo "will run"
199 | two: sleep 1; exit 5
200 | three: sleep 2; echo "will not run"
201 | ```
202 |
203 |
204 | #### Inputs and Outputs
205 |
206 | A list of sequential submissions, by default, will pipe their output to the next submission.
207 |
208 | ```yaml
209 | - echo "mission input"
210 | - read INPUT; echo "input is ($INPUT)" # will print "input is (mission input)"
211 | ```
212 |
213 | If a mission is composed of sequential submissions, the first submission will use its supermission's input as its own input.
214 |
215 | ```yaml
216 | - echo "mission input"
217 | - missions:
218 | - read INPUT; echo "first input is ($INPUT)" # will print "first input is (mission input)"
219 | - read INPUT; echo "second input is [$INPUT]" # will print "second input is [first input is (mission input)]"
220 | ```
221 |
222 | If a mission is composed of parallel submissions, all submissions will use its supermission's input as its own input.
223 |
224 | ```yaml
225 | - echo "mission input"
226 | - missions:
227 | one: read INPUT; echo "first input is ($INPUT)" # will print "first input is (mission input)"
228 | two: read INPUT; echo "second input is [$INPUT]" # will print "second input is [mission input]"
229 | ```
230 |
231 | A mission's output will be the output of its submissions
232 | (in order if they are sequential submissions,
233 | interleaved depending on execution time if they are parallel submissions).
234 |
235 | ```yaml
236 | - - echo one
237 | - echo two
238 | - echo three
239 |
240 | # will print:
241 | # input: one
242 | # input: two
243 | # input: three
244 | - "while read line; do echo \"input: $line\"; done"
245 | ```
246 |
247 | ```yaml
248 | - missions:
249 | one: echo one
250 | two: echo two
251 |
252 | # will print:
253 | # input: one
254 | # input: two
255 | # or print:
256 | # input: two
257 | # input: one
258 | - "while read line; do echo \"input: $line\"; done"
259 | ```
260 |
261 | You can filter the output based on index if sequential submissions or key if parallel submissions
262 | with a special "$OUT" string or list of "$OUT" strings in the `output` field.
263 | (NOTE: filter order doesn't matter in the list)
264 |
265 | ```yaml
266 | # output will be "two"
267 | - output: $OUT[2]
268 | missions:
269 | - echo one
270 | - echo two
271 | - echo three
272 |
273 | # output will be "one\nthree"
274 | - output:
275 | - $OUT[$LAST]
276 | - $OUT[0]
277 | missions:
278 | - echo one
279 | - echo two
280 | - echo three
281 |
282 | # output will be "one\ntwo" or "two\none"
283 | - output:
284 | - $OUT['one']
285 | - $OUT["two"]
286 | missions:
287 | one: echo one
288 | two: echo two
289 | three: echo three
290 | ```
291 |
292 | A submission can select its input by index or key if the previous mission is composed of submissions
293 | with a special "$IN" string or list of "$IN" strings in the `input` field.
294 | (NOTE: Unlike the `output` field, input order DOES matter)
295 |
296 | ```yaml
297 | - - echo one
298 | - echo two
299 | - echo three
300 | - missions:
301 |
302 | # will print "a: one"
303 | a:
304 | input: $IN[0]
305 | missions: "while read line; do echo \"a: $line\"; done"
306 |
307 | # will print "b: three\nb: two"
308 | b:
309 | input:
310 | - $IN[$LAST]
311 | - $IN[1]
312 | missions: "while read line; do echo \"b: $line\"; done"
313 | ```
314 |
315 | ```yaml
316 | - missions:
317 | one: echo one
318 | two: echo two
319 | three: echo three
320 | - missions:
321 |
322 | # will print "a: one"
323 | a:
324 | input: $IN['one']
325 | missions: "while read line; do echo \"a: $line\"; done"
326 |
327 | # will print "b: three\nb: two"
328 | b:
329 | input:
330 | - $IN["three"]
331 | - $IN['two']
332 | missions: "while read line; do echo \"b: $line\"; done"
333 | ```
334 |
335 | ### Outposts
336 |
337 | Since CingI can be run in a distributed manner, each Branch could have been run in different
338 | directories with different environment variables. It may be necessary to set up the environment
339 | to run the missions in.
340 |
341 |
342 | #### Environment Variables and Working Directory
343 |
344 | "Outposts" serve as a way to setup the environment in a branch before running a mission.
345 | They are defined in the `outpost` field, with further configuration using the
346 | `dir`, `env`, and `setup` fields. Outpost directories and environment variables are
347 | carried through all submissions of the mission where it's defined in,
348 | unless overriden by a submission's own outpost.
349 |
350 | ```yaml
351 | outpost:
352 | dir: /tmp
353 | env:
354 | ENV_1: one
355 | ENV_2: two
356 | missions:
357 | - pwd # Will print "/tmp"
358 | - outpost:
359 | dir: /tmp/tmp2
360 | env:
361 | ENV_2: another_two
362 | missions:
363 | - pwd # Will print "/tmp/tmp2"
364 | - echo "$ENV_1, $ENV_2" # Will print "one, another_two"
365 | - echo "$ENV_1, $ENV_2" # Will print "one, two"
366 | ```
367 |
368 |
369 | #### Setup
370 |
371 | You can also specify a `setup`. An outpost setup is essentially a mission that's run
372 | whenever an outpost needs to be setup for a mission. Setups and any parent setups are run only when
373 | a submission of that particular branch is running a bash command.
374 |
375 | ```yaml
376 | outpost:
377 | setup:
378 | - mkdir test
379 | - echo "{}"
380 | missions:
381 | - outpost:
382 | setup:
383 | - cat "echo testecho" > test/script.sh
384 | - echo "{}"
385 | missions:
386 | - bash test/script.sh # Will print "testecho"
387 | ```
388 |
389 | If you're wondering what the `echo "{}"` is for, it's because the `dir` and `env` fields
390 | can be configured using the last line of the `setup` output if the last line
391 | is a valid json string, and can be selected with the special "$SETUP" string.
392 |
393 | ```yaml
394 | outpost:
395 | dir: $SETUP['a']
396 | env:
397 | SOME_ENV: $SETUP['b']
398 | setup:
399 | - "echo \"{\\\"a\\\": \\\"/tmp\\\", \\\"b\\\": \\\"someval\\\"}\""
400 | missions:
401 | - pwd # Will print "/tmp"
402 | - echo "$SOME_ENV" # Will print someval
403 | ```
404 |
405 |
406 | #### Teardown
407 |
408 | If you want to clean up the effects of `setup` steps in a node, you can specify a `teardown`.
409 | Like a `setup`, a `teardown` is a mission that's run whenever the mission that created the outpost
410 | has finished. Teardowns run in every node that had that specific outpost, and runs as missions
411 | in that outpost, so the `env` and `dir` values apply in the teardown step.
412 |
413 | ```yaml
414 | outpost:
415 | dir: $SETUP['a']
416 | setup:
417 | - mkdir /tmp/cingi_tmp_folder
418 | - "echo \"{\\\"a\\\": \\\"/tmp/cingi_tmp_folder\\\"}\""
419 | teardown:
420 | - pwd | xargs echo "teardown mission in" # Will print "teardown mission in /tmp/cingi_tmp_folder"
421 | - cd ../ && rmdir cingi_tmp_folder # Will go to tmp and rm the tmp directory we created
422 | missions:
423 | - pwd | xargs echo "main mission in" # Will print "main mission in /tmp/cingi_tmp_folder"
424 | ```
425 |
426 | ### When: Conditional Missions
427 |
428 | You can have missions run conditionally if run sequentially by setting the `when` field.
429 | You can conditionally run a missions based on `outputs`, `exit_codes`, and `success`.
430 | (NOTE: if you want to condition based on `exit_codes` or `success`,
431 | then make sure the supermission has `fail_fast: false`)
432 |
433 | ```yaml
434 | fail_fast: false
435 | missions:
436 | - echo test; exit 5;
437 | - missions:
438 | run1:
439 | when:
440 | - outputs: test
441 | missions: echo "runs with output 'test'"
442 | skipped1:
443 | when:
444 | - exit_codes: 4
445 | missions: echo "doesn't run since exit code is not 4"
446 | run2:
447 | when:
448 | - success: false
449 | missions: echo "runs with failure"
450 | ```
451 |
452 | Then `when` field takes a list, so all the elements of the list need to pass.
453 |
454 | ```yaml
455 | fail_fast: false
456 | missions:
457 | - echo test; exit 5;
458 | - missions:
459 | runs:
460 | when:
461 | - outputs: test
462 | - exit_codes: 5
463 | missions: echo "runs with output 'test' and exit_code 5"
464 | skips:
465 | when:
466 | - outputs: test
467 | - exit_codes: 4
468 | missions: echo "doesn't run since exit code is still not 4"
469 | ```
470 |
471 | However, only one condition within the element of the list needs to pass for the entire element to pass.
472 |
473 | ```yaml
474 | fail_fast: false
475 | missions:
476 | - echo test; exit 5;
477 | - missions:
478 | runs1:
479 | when:
480 | - outputs:
481 | - test
482 | - nottest
483 | missions: echo "runs with output 'test'"
484 | runs2:
485 | when:
486 | - outputs: test
487 | exit_codes: 4
488 | missions: echo "runs this time because of extra condition outputs: test"
489 | ```
490 |
491 |
492 | ### Extends
493 |
494 |
495 | #### Extending a template
496 |
497 | To prevent duplication of code, you can define the field `mission_plan_templates` in a mission
498 | and extend the templates defined there in its submissions with the field `extends_template`.
499 | A mission will search for a template up the supermission hierarchy until it finds the matching template key.
500 |
501 | ```yaml
502 | mission_plan_templates:
503 | one:
504 | missions: echo one
505 | missions:
506 | - extends_template: one # Will print one
507 | - mission_plan_templates:
508 | one:
509 | missions: echo another_one
510 | missions:
511 | - extends_template: one # Will print another_one
512 | - missions:
513 | - extends_template: one # Will also print one
514 | ```
515 |
516 |
517 | #### Extending a file
518 |
519 | You can also define a mission plan in another file and extend it with the `extends_file` field.
520 | (NOTE: The directory used for the file is the same directory defined it the outpost)
521 |
522 | ```yaml
523 | missions:
524 | - extends_file: one.yaml # Will extend the file found in the directory cingi was started in
525 | - outpost:
526 | dir: /tmp
527 | missions:
528 | - extends_file: one.yaml # Will extend /tmp/one.yaml instead
529 | ```
530 |
531 |
532 | ## Roadmap
533 |
534 | - Add support for saving state information to some database (mnesia or sqlite?)
535 | - Build a frontend to interact with the core
536 | - Build a frontend to easily create mission plans
537 | - Support specifying which nodes to run on
538 | - Support running each command in specified docker images?
539 |
540 |
541 | ## License
542 |
543 | CingI is licensed under the [MIT license](LICENSE).
544 |
--------------------------------------------------------------------------------
/c-ing-i-logo.svg:
--------------------------------------------------------------------------------
1 |
7 |
--------------------------------------------------------------------------------
/config/config.exs:
--------------------------------------------------------------------------------
1 | # This file is responsible for configuring your application
2 | # and its dependencies with the aid of the Mix.Config module.
3 | use Mix.Config
4 |
5 | # This configuration is loaded before any dependency and is restricted
6 | # to this project. If another project depends on this project, this
7 | # file won't be loaded nor affect the parent project. For this reason,
8 | # if you want to provide default values for your application for
9 | # 3rd-party users, it should be done in your "mix.exs" file.
10 |
11 | # You can configure your application as:
12 | #
13 | # config :cingi, key: :value
14 | #
15 | # and access this configuration in your application as:
16 | #
17 | # Application.get_env(:cingi, :key)
18 | #
19 | # You can also configure a 3rd-party app:
20 | #
21 | # config :logger, level: :info
22 | #
23 |
24 | # It is also possible to import configuration files, relative to this
25 | # directory. For example, you can emulate configuration per environment
26 | # by uncommenting the line below and defining dev.exs, test.exs and such.
27 | # Configuration from the imported file will override the ones defined
28 | # here (which is why it is important to import them last).
29 | #
30 | # import_config "#{Mix.env}.exs"
31 |
32 | config :porcelain, driver: Porcelain.Driver.Basic
33 |
--------------------------------------------------------------------------------
/lib/cingi.ex:
--------------------------------------------------------------------------------
1 | defmodule Cingi do
2 | # See https://hexdocs.pm/elixir/Application.html
3 | # for more information on OTP Applications
4 | @moduledoc false
5 |
6 | use Application
7 |
8 | def start(_type, _args) do
9 | # List all child processes to be supervised
10 | children = [
11 | # Starts a worker by calling: Cingi.Worker.start_link(arg)
12 | {Cingi.Branch, name: :local_branch},
13 | ]
14 |
15 | # See https://hexdocs.pm/elixir/Supervisor.html
16 | # for other strategies and supported options
17 | opts = [strategy: :one_for_one, name: Cingi.Supervisor]
18 | send(self(), :register_name)
19 |
20 | Supervisor.start_link(children, opts)
21 | end
22 | end
23 |
--------------------------------------------------------------------------------
/lib/cingi/branch.ex:
--------------------------------------------------------------------------------
1 | defmodule Cingi.Branch do
2 | @moduledoc """
3 | Branches manage the missions for a single node.
4 | They intialize missions and mission reports,
5 | and assign or create the outposts for missions to be sent
6 | to. Although they initialize missions, they send missions
7 | over to a single Headquarters which reassigns the missions
8 | to an appropriate branch based on capacity.
9 | """
10 |
11 | alias Cingi.Branch
12 | alias Cingi.Headquarters
13 | alias Cingi.Outpost
14 | alias Cingi.Mission
15 | alias Cingi.MissionReport
16 | use GenServer
17 |
18 | defstruct [
19 | node: nil,
20 | pid: nil,
21 | name: nil,
22 |
23 | hq_pid: nil,
24 | cli_pid: nil, # Get cli pid if run through cli
25 |
26 | running: true,
27 | mission_reports: [],
28 | started_missions: [],
29 | running_missions: [],
30 | finished_missions: [],
31 | ]
32 |
33 | def start_link(args \\ []) do
34 | GenServer.start_link(__MODULE__, args, [name: args[:name]])
35 | end
36 |
37 | def create_report(pid, yaml_tuple) do
38 | GenServer.call(pid, {:yaml, yaml_tuple})
39 | end
40 |
41 | def queue_report(pid, yaml_tuple) do
42 | GenServer.cast(pid, {:yaml, yaml_tuple})
43 | end
44 |
45 | def init_mission(pid, opts) do
46 | GenServer.cast(pid, {:init_mission, opts})
47 | end
48 |
49 | def run_mission(pid, mission) do
50 | GenServer.cast(pid, {:run_mission, mission, Node.self})
51 | end
52 |
53 | def send_mission_to_outpost(pid, mission_pid, alternates_node) do
54 | GenServer.cast(pid, {:outpost_for_mission, mission_pid, alternates_node})
55 | end
56 |
57 | def mission_has_run(pid, mission_pid) do
58 | GenServer.cast(pid, {:mission_has_run, mission_pid})
59 | end
60 |
61 | def mission_has_finished(pid, mission_pid, result) do
62 | GenServer.cast(pid, {:mission_has_finished, mission_pid, result})
63 | end
64 |
65 | def report_has_finished(pid, report_pid, mission_pid) do
66 | GenServer.cast(pid, {:report_has_finished, report_pid, mission_pid})
67 | end
68 |
69 | def outpost_data(pid, outpost_pid, data) do
70 | GenServer.cast(pid, {:outpost_data, outpost_pid, data})
71 | end
72 |
73 | def report_data(pid, report_pid, data) do
74 | GenServer.cast(pid, {:report_data, report_pid, data})
75 | end
76 |
77 | def pause(pid) do
78 | GenServer.call(pid, :pause)
79 | end
80 |
81 | def resume(pid) do
82 | GenServer.call(pid, :resume)
83 | end
84 |
85 | def get(pid) do
86 | GenServer.call(pid, :get)
87 | end
88 |
89 | def terminate(pid) do
90 | GenServer.call(pid, :terminate)
91 | end
92 |
93 | def link_headquarters(pid, hq_pid) do
94 | GenServer.call(pid, {:link_headquarters, hq_pid})
95 | end
96 |
97 | def link_cli(pid, cli_pid) do
98 | GenServer.call(pid, {:link_cli, cli_pid})
99 | end
100 |
101 | # Server Callbacks
102 |
103 | def init(opts) do
104 | branch = %Branch{
105 | node: Node.self,
106 | pid: self(),
107 | name: opts[:name],
108 | hq_pid: nil,
109 | }
110 | {:ok, branch}
111 | end
112 |
113 | def handle_call({:yaml, yaml_tuple}, _from, branch) do
114 | {missionReport, branch} = get_branch_and_new_report(branch, yaml_tuple)
115 | {:reply, missionReport, branch}
116 | end
117 |
118 | def handle_call(:pause, _from, branch) do
119 | branch = %Branch{branch | running: false}
120 | for m <- branch.running_missions do Mission.pause(m) end
121 | {:reply, branch, branch}
122 | end
123 |
124 | def handle_call(:resume, _from, branch) do
125 | branch = %Branch{branch | running: true}
126 | for m <- branch.running_missions do Mission.resume(m) end
127 | Headquarters.run_missions(branch.hq_pid)
128 | {:reply, branch, branch}
129 | end
130 |
131 | def handle_call(:get, _from, branch) do
132 | {:reply, branch, branch}
133 | end
134 |
135 | def handle_call({:link_headquarters, hq_pid}, _from, branch) do
136 | branch = %Branch{branch | hq_pid: hq_pid}
137 | {:reply, branch, branch}
138 | end
139 |
140 | def handle_call({:link_cli, cli_pid}, _from, branch) do
141 | branch = %Branch{branch | cli_pid: cli_pid}
142 | {:reply, branch, branch}
143 | end
144 |
145 | def handle_call(:terminate, _from, branch) do
146 | if (branch.cli_pid) do
147 | send branch.cli_pid, :terminate
148 | end
149 | {:reply, branch, branch}
150 | end
151 |
152 | def handle_cast({:yaml, yaml_tuple}, branch) do
153 | {_, branch} = get_branch_and_new_report(branch, yaml_tuple)
154 | {:noreply, branch}
155 | end
156 |
157 | def handle_cast({:init_mission, opts}, branch) do
158 | {:ok, mission} = Mission.start_link(opts)
159 |
160 | # Report passes in opts of the report_pid and outpost_pid
161 | # If there is an outpost_pid, then an outpost sent the report
162 | case opts[:outpost_pid] do
163 | # No outpost_pid, sne dto hq for distribution
164 | nil -> Headquarters.queue_mission(branch.hq_pid, mission)
165 |
166 | # outpost_pid, bypass hq and run on this branch
167 | _ -> Branch.run_mission(self(), mission)
168 | end
169 | {:noreply, branch}
170 | end
171 |
172 | def handle_cast({:run_mission, mission, alternates_node}, branch) do
173 | Branch.send_mission_to_outpost(self(), mission, alternates_node)
174 | branch = %Branch{branch | started_missions: branch.started_missions ++ [mission]}
175 | {:noreply, branch}
176 | end
177 |
178 | # Getting of the outpost should be handled by the specific Branch
179 | # Because a Mission could have initialized at a different Branch
180 | # than the one currently running it, so the outpost that's retrieved
181 | # should be the one on the same node as the Branch running the mission
182 | def handle_cast({:outpost_for_mission, mission_pid, alternates_node}, branch) do
183 | mission = Mission.get(mission_pid)
184 |
185 | # The parent outpost process is either the outpost of its supermission
186 | # or potentially the parent of the outpost that started the mission_report,
187 | # as that outpost would be for setting up and needs its parent environnment to do so
188 | base_outpost = case mission.supermission_pid do
189 | nil ->
190 | case MissionReport.get(mission.report_pid).outpost_pid do
191 | nil -> nil
192 | opid ->
193 | o = Outpost.get(opid)
194 | # Use the outpost itself if it's already setup,
195 | # otherwise use it's parent so its setup can be run on
196 | # an already setup outpost
197 | if o.is_setup do opid else o.parent_pid end
198 | end
199 | supermission -> Mission.get_outpost(supermission)
200 | end
201 |
202 | outpost_opts = [
203 | branch_pid: self(),
204 | plan: Mission.get_outpost_plan(mission_pid),
205 | parent_pid: base_outpost,
206 | root_mission_pid: mission_pid,
207 | alternates: :rpc.call(alternates_node, Outpost, :start_alternates, [mission_pid]),
208 | ]
209 |
210 | # See if mission has an outpost configuration
211 | # if so, use that to start initialize a new outpost,
212 | # otherwise use an outpost from this mission's supermission,
213 | # constructing on this node if necessary
214 | {:ok, outpost} = case {outpost_opts[:plan], base_outpost} do
215 | {nil, nil} -> Outpost.start_link(outpost_opts)
216 | {nil, base_outpost} -> Outpost.get_or_create_version_on_branch(base_outpost, self())
217 | _ -> Outpost.start_link(outpost_opts)
218 | end
219 |
220 | Outpost.run_mission(outpost, mission_pid)
221 | {:noreply, branch}
222 | end
223 |
224 | def handle_cast({:mission_has_run, mission_pid}, branch) do
225 | started_missions = cond do
226 | mission_pid in branch.started_missions -> List.delete(branch.started_missions, mission_pid)
227 | true -> raise "Mission ran but not started"
228 | end
229 | Headquarters.run_missions(branch.hq_pid)
230 | {:noreply, %Branch{branch |
231 | started_missions: started_missions,
232 | running_missions: branch.running_missions ++ [mission_pid],
233 | }}
234 | end
235 |
236 | def handle_cast({:mission_has_finished, mission_pid, result}, branch) do
237 | running_missions = cond do
238 | mission_pid in branch.running_missions ->
239 | List.delete(branch.running_missions, mission_pid)
240 | true ->
241 | IO.puts :stderr, "Mission finished but not ran #{inspect(Mission.get(mission_pid))}"
242 | branch.running_missions
243 | end
244 |
245 | Headquarters.finished_mission(branch.hq_pid, mission_pid, result, self())
246 |
247 | {:noreply, %Branch{branch |
248 | running_missions: running_missions,
249 | finished_missions: branch.finished_missions ++ [mission_pid],
250 | }}
251 | end
252 |
253 | def handle_cast({:report_has_finished, report_pid, mission_pid}, branch) do
254 | if (branch.cli_pid) do
255 | send branch.cli_pid, {:report, report_pid, mission_pid}
256 | end
257 | {:noreply, branch}
258 | end
259 |
260 | def handle_cast({:outpost_data, _outpost_pid, data}, branch) do
261 | if (branch.cli_pid) do
262 | send branch.cli_pid, {:branch_outpost_data, data}
263 | end
264 | {:noreply, branch}
265 | end
266 |
267 | def handle_cast({:report_data, _report_pid, data}, branch) do
268 | if (branch.cli_pid) do
269 | send branch.cli_pid, {:branch_report_data, data}
270 | end
271 | {:noreply, branch}
272 | end
273 |
274 | def get_branch_and_new_report(branch, yaml_tuple) do
275 | {:ok, missionReport} = MissionReport.start_link(yaml_tuple ++ [branch_pid: self()])
276 | reports = branch.mission_reports ++ [missionReport]
277 | {missionReport, %Branch{branch | mission_reports: reports}}
278 | end
279 | end
280 |
--------------------------------------------------------------------------------
/lib/cingi/cli.ex:
--------------------------------------------------------------------------------
1 | defmodule Cingi.CLI do
2 | @moduledoc """
3 | The CLI handles all the command line functionality.
4 | They are linked to the local branch started by the application.
5 | They also create the global Headquarters all branches connect to.
6 | The CLI manages the netwwork and connections,
7 | and also handles disconnect signals.
8 | """
9 |
10 | def main(args) do
11 | Process.register self(), :local_cli
12 | args |> parse_args |> process
13 | end
14 |
15 | def process([]) do
16 | IO.puts "No arguments given"
17 | end
18 |
19 | def process(options) do
20 | Cingi.Branch.link_cli(:local_branch, self())
21 | mbn = options[:minbranches]
22 | connect_to = options[:connectto]
23 | connect_or_headquarters(connect_to, mbn, options)
24 | end
25 |
26 | def connect_or_headquarters(nil, min_branch_num, options) do
27 | min_branch_num = min_branch_num || 0
28 | Cingi.Headquarters.start_link(name: {:global, :hq})
29 | Cingi.Headquarters.link_branch({:global, :hq}, :local_branch)
30 |
31 | set_up_network(min_branch_num, options)
32 | wait_for_branches(min_branch_num - 1)
33 | start_missions(options[:file], options)
34 | end
35 |
36 | def connect_or_headquarters(host, nil, options) do
37 | set_up_network(true, options)
38 | host = String.to_atom host
39 | wait_for_hq(host)
40 |
41 | cond do
42 | options[:file] -> connect_to_hq(:file, options[:file], host)
43 | options[:closehq] -> connect_to_hq(:closehq, host)
44 | true -> connect_to_hq(:branch, host, options)
45 | end
46 | end
47 |
48 | def connect_or_headquarters(_, _, _) do
49 | raise "Cannot have both connect_to and min_branch_num options"
50 | end
51 |
52 | def connect_to_hq(:branch, host, options) do
53 | Cingi.Headquarters.link_branch({:global, :hq}, :local_branch)
54 | IO.puts "Connected local branch to global headquarters"
55 | Process.send({:local_cli, host}, {:branch_connect, self()}, [])
56 |
57 | Node.monitor host, true
58 |
59 | receive_loop = fn(loop) ->
60 | receive do
61 | {:branch_outpost_data, data} ->
62 | print_output(data, options[:printbranchoutput])
63 | loop.(loop)
64 | {:nodedown, _} -> :error
65 | :terminate -> :ok
66 | _ -> loop.(loop)
67 | end
68 | end
69 | receive_loop.(receive_loop)
70 | end
71 |
72 | def connect_to_hq(:file, file, host) do
73 | yaml_opts = [file: file, cli_pid: self()]
74 | Cingi.Branch.create_report :local_branch, yaml_opts
75 | :rpc.call(host, Cingi.Branch, :create_report, [:local_branch, yaml_opts])
76 | end
77 |
78 | def connect_to_hq(:closehq, host) do
79 | Process.send({:local_cli, host}, :terminate, [])
80 | end
81 |
82 | def start_missions(file, options) do
83 | report_pid = case file do
84 | nil -> nil
85 | file ->
86 | yaml_opts = [file: file, cli_pid: self()]
87 | Cingi.Branch.create_report :local_branch, yaml_opts
88 | end
89 |
90 | receive_loop = fn(loop) ->
91 | receive do
92 | {:branch_report_data, data} ->
93 | print_output(data, true)
94 | loop.(loop)
95 | {:branch_outpost_data, data} ->
96 | print_output(data, options[:printbranchoutput])
97 | loop.(loop)
98 | {:report, ^report_pid, mission_pid} ->
99 | Cingi.Headquarters.terminate_branches({:global, :hq})
100 | case Cingi.Mission.get(mission_pid).exit_code do
101 | 0 -> :ok
102 | nil ->
103 | IO.puts :stderr, "No missions ran, everything was skipped"
104 | System.halt(1)
105 | exit_code -> System.halt(exit_code)
106 | end
107 | :terminate ->
108 | Cingi.Headquarters.terminate_branches({:global, :hq})
109 | _ -> loop.(loop)
110 | end
111 | end
112 | receive_loop.(receive_loop)
113 | end
114 |
115 | defp parse_args(args) do
116 | {options, _, _} = OptionParser.parse(args,
117 | switches: [
118 | minbranches: :integer,
119 | file: :string,
120 | name: :string,
121 | sname: :string,
122 | cookie: :string,
123 | connectto: :string,
124 | branchoutput: :boolean,
125 | printbranchoutput: :boolean,
126 | closehq: :boolean,
127 | ]
128 | )
129 | options
130 | end
131 |
132 | def wait_for_branches(countdown) do
133 | case countdown do
134 | n when n <= 0 -> :ok
135 | n ->
136 | IO.puts "Waiting for #{n} branches to connect"
137 | receive do
138 | {:branch_connect, _} ->
139 | IO.puts "branch connected"
140 | wait_for_branches(n - 1)
141 | end
142 | end
143 | end
144 |
145 | def set_up_network(0, _) do end
146 |
147 | def set_up_network(_, options) do
148 | # Determine either short name or long name
149 | {type, name} = case {options[:name], options[:sname]} do
150 | {nil, sname} -> {:shortnames, sname}
151 | {lname, nil} -> {:longnames, lname}
152 | {nil, nil} -> {nil, nil}
153 | _ -> raise "Can't have both a long name and short name"
154 | end
155 |
156 | case {name, options[:cookie]} do
157 | {nil, nil} -> raise "Requires name and cookie for networking"
158 | {nil, _} -> raise "Requires name for networking"
159 | {_, nil} -> raise "Requires cookie for networking"
160 | {name, cookie} ->
161 | case Node.start(String.to_atom(name), type) do
162 | {:ok, _pid} -> Node.set_cookie(String.to_atom(cookie))
163 | {:error, term} -> raise term
164 | end
165 | end
166 | end
167 |
168 | def wait_for_hq(host, countdown \\ 100) do
169 | Node.connect(host)
170 | case GenServer.whereis({:global, :hq}) do
171 | nil ->
172 | Process.sleep 100
173 | case countdown do
174 | n when n <= 0 -> raise "Took too long connecting to headquarters"
175 | n -> wait_for_hq(host, n - 1)
176 | end
177 | _ -> Cingi.Headquarters.get({:global, :hq})
178 | end
179 | end
180 |
181 | def print_output(data, print) do
182 | print = print || false
183 | case print do
184 | false -> :ok
185 | true ->
186 | field_agent = Cingi.FieldAgent.get(data[:field_agent_pid])
187 |
188 | data[:data]
189 | |> String.split("\n")
190 | |> Enum.map(fn(line) ->
191 | keys = case field_agent.node do
192 | :nonode@nohost -> []
193 | x -> [" #{x} "]
194 | end
195 |
196 | keys = keys ++ case data[:pid] do
197 | [] -> []
198 | [_|_] -> data[:pid]
199 | |> Enum.map(fn(pid) ->
200 | key = Cingi.Mission.get(pid).key
201 | shortened = String.slice(key, 0, 7)
202 | case shortened == key do
203 | true -> key
204 | false -> shortened <> "..."
205 | end
206 | end)
207 | end
208 |
209 | keys = Enum.join(keys, "|")
210 |
211 | "[#{keys}] #{line}"
212 | end)
213 | |> Enum.map(&IO.puts/1)
214 | end
215 | end
216 | end
217 |
--------------------------------------------------------------------------------
/lib/cingi/commander.ex:
--------------------------------------------------------------------------------
1 | defmodule Cingi.Commander do
2 | @moduledoc """
3 | Commanders are long running processes that are used to start missions
4 | They have a main_mission and a transforming_mission
5 | main_missions are the long running script process that output to standard out
6 | transforming_missions are optional missions take each line of main_mission's output,
7 | parses it, and returns it back to the commander ina suitable format.
8 | If the commander gets a line in an appropriate format, it'll
9 | start up a MissionReport and send it to its headquarters
10 | """
11 |
12 | alias Cingi.Commander
13 | use GenServer
14 |
15 | defstruct [
16 | orders: nil,
17 | main_mission_pid: nil,
18 | transforming_mission_pid: nil,
19 | headquarters_pid: nil,
20 | ]
21 |
22 | # Client API
23 |
24 | def start_link(opts) do
25 | GenServer.start_link(__MODULE__, opts)
26 | end
27 |
28 | # Server Callbacks
29 |
30 | def init(_) do
31 | {:ok, %Commander{}}
32 | end
33 | end
34 |
--------------------------------------------------------------------------------
/lib/cingi/field_agent.ex:
--------------------------------------------------------------------------------
1 | defmodule Cingi.FieldAgent do
2 | @moduledoc """
3 | Field agents are processes that are assigned a mission by an outpost
4 | Typically they run the bash command in the same environment as the outpost
5 | They run on the same node as the outpost but report the output to the mission
6 | """
7 |
8 | alias Cingi.FieldAgent
9 | alias Cingi.Outpost
10 | alias Cingi.Mission
11 | alias Cingi.MissionReport
12 | alias Porcelain.Process, as: Proc
13 | use GenServer
14 |
15 | defstruct [
16 | mission_pid: nil,
17 | outpost_pid: nil,
18 | node: nil,
19 | proc: nil,
20 | constructed_plan: %{},
21 | ]
22 |
23 |
24 | @external_resource "priv/bin/wrapper.sh"
25 | @wrapper_contents File.read! "priv/bin/wrapper.sh"
26 |
27 | # Client API
28 |
29 | def start_link(args \\ []) do
30 | GenServer.start_link(__MODULE__, args, [])
31 | end
32 |
33 | def get(pid) do
34 | GenServer.call(pid, :get)
35 | end
36 |
37 | def stop(pid) do
38 | GenServer.cast(pid, :stop)
39 | end
40 |
41 | def run_mission(pid) do
42 | GenServer.cast(pid, :run_mission)
43 | end
44 |
45 | def run_bash_process(pid) do
46 | GenServer.cast(pid, :run_bash_process)
47 | end
48 |
49 | def send_mission_plan(pid, plan, from_pid, next_mpid \\ nil) do
50 | GenServer.cast(pid, {:received_mission_plan, plan, from_pid, next_mpid})
51 | end
52 |
53 | def finish_mission_plan(pid) do
54 | GenServer.cast(pid, :finish_mission_plan)
55 | end
56 |
57 | def mission_has_finished(pid, result) do
58 | GenServer.cast(pid, {:mission_has_finished, result})
59 | end
60 |
61 | def queue_other_field_agent_on_outpost_of_branch(pid, file, callback_fa_pid, branch_pid) do
62 | GenServer.cast(pid, {:queue_other_field_agent_on_outpost_of_branch, file, callback_fa_pid, branch_pid})
63 | end
64 |
65 | def send_result(pid, result, finished_mpid) do
66 | GenServer.cast(pid, {:result, result, finished_mpid})
67 | end
68 |
69 | # Server Callbacks
70 |
71 | def init(opts) do
72 | field_agent = struct(FieldAgent, opts)
73 | mpid = field_agent.mission_pid
74 | Mission.set_field_agent(mpid, self())
75 | {:ok, %FieldAgent{field_agent | node: Node.self}}
76 | end
77 |
78 | def handle_call(:get, _from, field_agent) do
79 | {:reply, field_agent, field_agent}
80 | end
81 |
82 | def handle_cast({:received_mission_plan, plan, from_pid, next_mpid}, field_agent) do
83 | new_plan = case plan do
84 | %{} -> plan |> Map.merge(field_agent.constructed_plan)
85 | [] -> %{}
86 | nil -> %{}
87 | _ -> %{"missions" => plan}
88 | end
89 |
90 | case {next_mpid, new_plan} do
91 | {_, %{"extends_file" => file}} ->
92 | outpost = Outpost.get(field_agent.outpost_pid)
93 | branch_pid = outpost.branch_pid
94 | mission = Mission.get(from_pid)
95 | FieldAgent.queue_other_field_agent_on_outpost_of_branch(mission.field_agent_pid, file, self(), branch_pid)
96 |
97 | # No more mpids to request from, construct from new_plan regardless
98 | {nil, _} -> FieldAgent.finish_mission_plan(self())
99 |
100 | # If a key does exist, request for the template with given key from the given mpid
101 | {mpid, %{"extends_template" => key}} -> Mission.request_mission_plan(mpid, key, self())
102 |
103 | # No more extending, construct_ from new_plan
104 | _ -> FieldAgent.finish_mission_plan(self())
105 |
106 | end
107 |
108 | new_plan = new_plan
109 | |> Map.delete("extends_template")
110 | |> Map.delete("extends_file")
111 |
112 | {:noreply, %FieldAgent{field_agent | constructed_plan: new_plan}}
113 | end
114 |
115 | def handle_cast({:queue_other_field_agent_on_outpost_of_branch, file, callback_fa_pid, branch_pid}, field_agent) do
116 | outpost_pid = Outpost.get_version_on_branch(field_agent.outpost_pid, branch_pid)
117 | Outpost.queue_field_agent_for_plan(outpost_pid, file, callback_fa_pid)
118 | {:noreply, field_agent}
119 | end
120 |
121 | def handle_cast(:finish_mission_plan, field_agent) do
122 | Mission.construct_from_plan(field_agent.mission_pid, field_agent.constructed_plan)
123 | Outpost.mission_plan_has_finished(field_agent.outpost_pid, self())
124 | {:noreply, field_agent}
125 | end
126 |
127 | def handle_cast(:run_mission, field_agent) do
128 | mpid = field_agent.mission_pid
129 | mission = Mission.get(mpid)
130 | Outpost.mission_has_run(field_agent.outpost_pid, mpid)
131 |
132 | cond do
133 | mission.skipped -> FieldAgent.send_result(self(), %{status: nil}, mpid)
134 | mission.cmd -> Outpost.queue_field_agent_for_bash(field_agent.outpost_pid, self())
135 | mission.submissions -> Mission.run_submissions(mpid, mission.prev_mission_pid)
136 | end
137 |
138 | {:noreply, field_agent}
139 | end
140 |
141 | def handle_cast(:stop, field_agent) do
142 | case field_agent.proc do
143 | nil -> :ok
144 | _ -> Proc.send_input field_agent.proc, "kill\n"
145 | end
146 | FieldAgent.send_result(self(), %{status: 137}, field_agent.mission_pid)
147 | {:noreply, field_agent}
148 | end
149 |
150 | def handle_cast(:run_bash_process, field_agent) do
151 | mpid = field_agent.mission_pid
152 | mission = Mission.get(mpid)
153 |
154 | proc = case mission.finished do
155 | true -> nil
156 | false ->
157 | {input_file, is_tmp} = init_input_file(mission)
158 |
159 | cmds = [mission.cmd] ++ case input_file do
160 | nil -> []
161 | false -> []
162 | _ -> [input_file, is_tmp]
163 | end
164 |
165 | # Porcelain's basic driver only takes nil or :out for err
166 | err = case mission.output_with_stderr do
167 | true -> :out
168 | false -> nil
169 | end
170 |
171 | outpost = Outpost.get(field_agent.outpost_pid)
172 | env = convert_env(outpost.env)
173 | dir = outpost.dir
174 |
175 | # Create wrapper file temporarily,
176 | # will get cleaned up by itself
177 | # Necessary in distribution of escript,
178 | # escripts don't support priv folder
179 | {:ok, fd, script} = Temp.open
180 | IO.write fd, @wrapper_contents
181 | File.close fd
182 | System.cmd "chmod", ["+x", script]
183 |
184 | try do
185 | Porcelain.spawn(script, cmds, dir: dir, env: env, in: :receive, out: {:send, self()}, err: err)
186 | rescue
187 | # Error, send result as a 137 sigkill
188 | _ ->
189 | FieldAgent.send_result(self(), %{status: 137}, mpid)
190 | File.rm script
191 | end
192 | end
193 | {:noreply, %FieldAgent{field_agent | proc: proc}}
194 | end
195 |
196 | def handle_cast({:result, result, finished_mpid}, field_agent) do
197 | mpid = field_agent.mission_pid
198 | Mission.send_result(mpid, result, finished_mpid)
199 | {:noreply, field_agent}
200 | end
201 |
202 | def handle_cast({:mission_has_finished, result}, field_agent) do
203 | Outpost.mission_has_finished(field_agent.outpost_pid, field_agent.mission_pid, result)
204 | {:noreply, field_agent}
205 | end
206 |
207 | #########
208 | # INFOS #
209 | #########
210 |
211 | def handle_info({_pid, :data, :out, data}, field_agent) do
212 | add_to_output(field_agent, data: data, type: :out)
213 | end
214 |
215 | def handle_info({_pid, :data, :err, data}, field_agent) do
216 | add_to_output(field_agent, data: data, type: :err)
217 | end
218 |
219 | def handle_info({_pid, :result, result}, field_agent) do
220 | FieldAgent.send_result(self(), result, field_agent.mission_pid)
221 | {:noreply, field_agent}
222 | end
223 |
224 | ###########
225 | # HELPERS #
226 | ###########
227 |
228 | defp add_to_output(field_agent, opts) do
229 | time = System.system_time(:millisecond)
230 | data = opts ++ [timestamp: time, field_agent_pid: self(), pid: []]
231 | Mission.send(field_agent.mission_pid, data)
232 | Outpost.field_agent_data(field_agent.outpost_pid, self(), data)
233 | {:noreply, field_agent}
234 | end
235 |
236 | # Return {path_of_file, _boolean_indicating_whether_its_a_tmp_file}
237 | def init_input_file(mission) do
238 | input = case mission.input_file do
239 | n when n in [nil, false, []] -> []
240 | [_|_] -> mission.input_file
241 | input -> [input]
242 | end
243 |
244 | input = input
245 | |> Enum.map(fn (x) ->
246 | case MissionReport.parse_variable(x, last_index: mission.submissions_num - 1) do
247 | [error: _] -> :error
248 | [type: "IN"] -> Mission.get_output(mission.prev_mission_pid)
249 | [type: "IN", key: key] -> Mission.get_output(mission.prev_mission_pid, key)
250 | [type: "IN", index: index] -> Mission.get_output(mission.prev_mission_pid, index)
251 | end
252 | end)
253 |
254 | case input do
255 | [] -> {nil, false}
256 | input ->
257 | input = Enum.join(input)
258 | {:ok, fd, path} = Temp.open
259 | IO.write fd, input
260 | File.close fd
261 | {path, true}
262 | end
263 | end
264 |
265 | def convert_env(env_map) do
266 | Enum.map(env_map || %{}, &(&1))
267 | end
268 | end
269 |
--------------------------------------------------------------------------------
/lib/cingi/headquarters.ex:
--------------------------------------------------------------------------------
1 | defmodule Cingi.Headquarters do
2 | @moduledoc """
3 | Headquarters manage all the branches within the cluster
4 | and assign mission to branches based on capacity.
5 | There should only be one Headquarters at each cluster.
6 | If a branch is started without a Headquarters, and
7 | doesn't intend to connect to an existing cluster,
8 | a Headquarters should be created for it.
9 | """
10 |
11 | alias Cingi.Headquarters
12 | alias Cingi.Branch
13 | alias Cingi.Mission
14 | use GenServer
15 |
16 | defstruct [
17 | node: nil,
18 | running: true,
19 | branch_pids: [],
20 | queued_missions: [],
21 | running_missions: %{},
22 | finished_missions: %{},
23 | ]
24 |
25 | def start_link(opts \\ []) do
26 | GenServer.start_link(__MODULE__, [], opts)
27 | end
28 |
29 | def get(pid) do
30 | GenServer.call pid, :get
31 | end
32 |
33 | def pause(pid) do
34 | GenServer.call pid, :pause
35 | end
36 |
37 | def resume(pid) do
38 | GenServer.call pid, :resume
39 | end
40 |
41 | def link_branch(pid, branch_pid) do
42 | # May be passed in name, so get real pid while still in same node
43 | branch = Branch.get(branch_pid)
44 | true_branch_pid = branch.pid
45 | GenServer.call pid, {:link_branch, true_branch_pid, Node.self}
46 | end
47 |
48 | def terminate_branches(pid) do
49 | GenServer.call pid, :terminate_branches
50 | end
51 |
52 | def queue_mission(pid, mission_pid) do
53 | GenServer.cast pid, {:queue_mission, mission_pid}
54 | end
55 |
56 | def run_missions(pid) do
57 | GenServer.cast pid, :run_missions
58 | end
59 |
60 | def finished_mission(pid, mission_pid, result, branch_pid) do
61 | GenServer.cast pid, {:finished_mission, mission_pid, result, branch_pid}
62 | end
63 |
64 | # Server Callbacks
65 |
66 | def init(_) do
67 | headquarters = %Headquarters{node: Node.self}
68 | {:ok, headquarters}
69 | end
70 |
71 | def handle_call(:get, _from, hq) do
72 | {:reply, hq, hq}
73 | end
74 |
75 | def handle_call(:pause, _from, hq) do
76 | hq = %Headquarters{hq | running: false}
77 | for b <- get_all_branches(hq) do Branch.pause(b) end
78 | {:reply, hq, hq}
79 | end
80 |
81 | def handle_call(:resume, _from, hq) do
82 | hq = %Headquarters{hq | running: true}
83 | for b <- get_all_branches(hq) do Branch.resume(b) end
84 | Headquarters.run_missions(self())
85 | {:reply, hq, hq}
86 | end
87 |
88 | def handle_call({:link_branch, branch_pid, branch_node}, _from, hq) do
89 | Node.monitor branch_node, true
90 | hq = %Headquarters{hq | branch_pids: hq.branch_pids ++ [branch_pid]}
91 | Branch.link_headquarters(branch_pid, self())
92 | {:reply, hq, hq}
93 | end
94 |
95 | def handle_call(:terminate_branches, _from, hq) do
96 | get_all_branches(hq) |> Enum.map(&Branch.terminate/1)
97 | {:reply, hq, hq}
98 | end
99 |
100 | def handle_cast({:queue_mission, mission_pid}, hq) do
101 | missions = hq.queued_missions ++ [mission_pid]
102 | Headquarters.run_missions(self())
103 | {:noreply, %Headquarters{hq | queued_missions: missions}}
104 | end
105 |
106 | def handle_cast(:run_missions, hq) do
107 | hq = try do
108 | if not hq.running do raise "Not running" end
109 |
110 | [mission | queued_missions] = hq.queued_missions
111 |
112 | branch_pid = get_branch(hq)
113 | branch_missions = Map.get(hq.running_missions, branch_pid, []) ++ [mission]
114 | Branch.run_mission(branch_pid, mission)
115 |
116 | %Headquarters{hq |
117 | queued_missions: queued_missions,
118 | running_missions: Map.put(hq.running_missions, branch_pid, branch_missions),
119 | }
120 | rescue
121 | MatchError -> hq
122 | RuntimeError -> hq
123 | end
124 | {:noreply, hq}
125 | end
126 |
127 | def handle_cast({:finished_mission, mission_pid, result, branch_pid}, hq) do
128 | Mission.report_result_up(mission_pid, result)
129 |
130 | running = hq.running_missions
131 | |> Map.get(branch_pid, [])
132 | |> List.delete(mission_pid)
133 |
134 | finished = Map.get(hq.running_missions, branch_pid, []) ++ [mission_pid]
135 |
136 | {:noreply, %Headquarters{hq |
137 | running_missions: Map.put(hq.running_missions, branch_pid, running),
138 | finished_missions: Map.put(hq.finished_missions, branch_pid, finished),
139 | }}
140 | end
141 |
142 | def handle_info({:nodedown, _}, hq) do
143 | self_pid = self()
144 | {up, _} = get_all_branches(hq, false)
145 | {running, stopped} = hq.running_missions |> Map.split(up)
146 | stopped
147 | |> Enum.map(&(elem(&1, 1)))
148 | |> List.flatten
149 | |> Enum.map(fn (m) ->
150 | Mission.send_result(m, %{status: 221}, m)
151 | Headquarters.finished_mission(self_pid, m, %{status: 221}, nil)
152 | end)
153 |
154 | {:noreply, %Headquarters{
155 | branch_pids: up,
156 | running_missions: running,
157 | }}
158 | end
159 |
160 | # Get branch with lowerst number of current missions to pass a mission along to
161 | def get_branch(hq) do
162 | get_all_branches(hq)
163 | |> Enum.map(&Branch.get/1)
164 | |> Enum.min_by(&(length(&1.running_missions) + length(&1.started_missions)))
165 | |> (fn(b) -> b.pid end).()
166 | end
167 |
168 | # Get all branches that are currently still alive
169 | def get_all_branches(hq, get_running_only \\ true) do
170 | hq.branch_pids
171 | |> Enum.split_with(fn b ->
172 | case :rpc.pinfo(b) do
173 | {:badrpc, _} -> false
174 | _ -> true
175 | end
176 | end)
177 | |> (fn({up, down}) ->
178 | case get_running_only do
179 | true -> up
180 | false -> {up, down}
181 | end
182 | end).()
183 | end
184 | end
185 |
--------------------------------------------------------------------------------
/lib/cingi/mission.ex:
--------------------------------------------------------------------------------
1 | defmodule Cingi.Mission do
2 | @moduledoc """
3 | Missions are the actual processses thnat records the information
4 | necessary to run submissions or bash commands. They function as the task
5 | or pipeline needed to be queued by headquarters, sent to branches, sent to outposts,
6 | amd run by the field agent assigned by the outpost. They record
7 | the output and exit code of the submissioons or bash commands when run.
8 | """
9 |
10 | alias Cingi.Mission
11 | alias Cingi.MissionReport
12 | alias Cingi.FieldAgent
13 | alias Cingi.Outpost
14 | use GenServer
15 |
16 | defstruct [
17 | pid: nil,
18 | key: "",
19 | index: nil,
20 | name: nil,
21 |
22 | report_pid: nil,
23 | prev_mission_pid: nil,
24 | supermission_pid: nil,
25 | submission_holds: [],
26 | field_agent_pid: nil,
27 |
28 | mission_plan_templates: %{},
29 | mission_plan: nil,
30 | original_mission_plan: nil,
31 |
32 | cmd: nil,
33 | submissions: nil,
34 | submissions_num: nil,
35 |
36 | input_file: "$IN", # Get input by default
37 | output_filter: [], # Don't filter anything by default
38 | output: [],
39 |
40 | output_with_stderr: false, # Stderr will be printed to ouput if false, redirected to output if true
41 | fail_fast: true, # fail_fast true by default, but if parallel will default to false
42 | skipped: false,
43 |
44 | running: false,
45 | finished: false,
46 |
47 | when: nil,
48 | exit_code: nil,
49 | ]
50 |
51 | # Client API
52 |
53 | def start_link(opts) do
54 | GenServer.start_link(__MODULE__, opts)
55 | end
56 |
57 | def send(pid, data) do
58 | GenServer.cast(pid, {:data_and_metadata, data})
59 | end
60 |
61 | def initialized_submission(pid, submission_pid) do
62 | GenServer.cast(pid, {:init_submission, submission_pid})
63 | end
64 |
65 | def send_result(pid, result, finished_mpid) do
66 | GenServer.cast(pid, {:finished, result, finished_mpid})
67 | end
68 |
69 | def run_submissions(pid, prev_pid \\ nil) do
70 | GenServer.cast(pid, {:run_submissions, prev_pid})
71 | end
72 |
73 | def construct_from_plan(pid, new_plan) do
74 | GenServer.cast(pid, {:construct_from_plan, new_plan})
75 | end
76 |
77 | def set_field_agent(pid, field_agent_pid) do
78 | GenServer.cast(pid, {:set_field_agent, field_agent_pid})
79 | end
80 |
81 | def request_mission_plan(pid, key, fa_pid) do
82 | GenServer.cast(pid, {:request_mission_plan, key, fa_pid})
83 | end
84 |
85 | def report_result_up(pid, result) do
86 | GenServer.cast(pid, {:report_result_up, result})
87 | end
88 |
89 | def stop(pid) do
90 | GenServer.cast(pid, :stop)
91 | end
92 |
93 | def pause(pid) do
94 | GenServer.call(pid, :pause)
95 | end
96 |
97 | def resume(pid) do
98 | GenServer.call(pid, :resume)
99 | end
100 |
101 | def get(pid) do
102 | GenServer.call(pid, :get)
103 | end
104 |
105 | def get_outpost(pid) do
106 | GenServer.call(pid, :get_outpost)
107 | end
108 |
109 | def get_outpost_plan(pid) do
110 | GenServer.call(pid, :get_outpost_plan)
111 | end
112 |
113 | def get_output(pid, selector \\ nil) do
114 | case pid do
115 | nil -> []
116 | _ -> GenServer.call(pid, {:get_output, selector})
117 | end
118 | end
119 |
120 | # Server Callbacks
121 |
122 | def init(opts) do
123 | opts = opts ++ [
124 | pid: self(),
125 | original_mission_plan: opts[:mission_plan],
126 | ]
127 | mission = struct(Mission, opts)
128 | {:ok, mission}
129 | end
130 |
131 | #########
132 | # CASTS #
133 | #########
134 |
135 | def handle_cast({:request_mission_plan, key, fa_pid}, mission) do
136 | templates = mission.mission_plan_templates || %{}
137 | case {templates[key], mission.supermission_pid} do
138 | {nil, nil} ->
139 | IO.puts :stderr, "Template key #{key} doesn't exist in the hierarchy"
140 | FieldAgent.send_mission_plan(fa_pid, %{}, self())
141 | {nil, spid} -> Mission.request_mission_plan(spid, key, fa_pid)
142 | {template, spid} -> FieldAgent.send_mission_plan(fa_pid, template, self(), spid)
143 | end
144 | {:noreply, mission}
145 | end
146 |
147 | def handle_cast({:construct_from_plan, new_plan}, mission) do
148 | mission = Map.merge(mission, construct_plan(new_plan))
149 |
150 | mission = %Mission{mission |
151 | submissions: case mission.submissions do
152 | [] -> nil
153 | s -> s
154 | end,
155 | submissions_num: case mission.submissions do
156 | %{} -> length(Map.keys(mission.submissions))
157 | [_|_] -> length(mission.submissions)
158 | _ -> 0
159 | end,
160 | key: case mission.key do
161 | "" -> construct_key(mission.name || mission.cmd)
162 | _ -> mission.key
163 | end,
164 | skipped: determine_skipped_status(mission),
165 | }
166 |
167 | # Reconstruct mission after getting submissions_num
168 | mission = %Mission{mission |
169 | output_filter: get_output_filter(
170 | mission.output_filter,
171 | last_index: mission.submissions_num - 1
172 | ),
173 | }
174 |
175 | mission = case mission do
176 | %{cmd: nil, submissions: nil} ->
177 | IO.puts :stderr, "Must have cmd or submissions, got #{inspect(mission.mission_plan)}"
178 | %Mission{mission | cmd: "exit 199"}
179 | _ -> mission
180 | end
181 |
182 | mission_pid = mission.supermission_pid
183 |
184 | Mission.initialized_submission(mission_pid, self())
185 | MissionReport.initialized_mission(mission.report_pid, self())
186 |
187 | {:noreply, mission}
188 | end
189 |
190 | def handle_cast({:finished, result, finished_mpid}, mission) do
191 | # Indicate that finished_mpid has finished
192 | submission_holds = update_in_list(
193 | mission.submission_holds,
194 | fn({h, _}) -> h.pid == finished_mpid end,
195 | fn(h) -> Map.replace(h, :finished, true) end
196 | )
197 |
198 | # Submission might not have initialized yet, filter out nil
199 | sub_pids = submission_holds
200 | |> Enum.map(&(&1.pid))
201 | |> Enum.filter(&(&1))
202 |
203 | exit_codes = sub_pids
204 | |> Enum.map(&(Mission.get(&1)))
205 | |> Enum.filter(&(&1.finished))
206 | |> Enum.map(&(&1.exit_code))
207 |
208 | # Check if a failure should trigger a fail_fast behavior
209 | fail_fast? =
210 | self() == finished_mpid or (
211 | length(exit_codes) > 0
212 | and Enum.max(exit_codes) > 0
213 | and mission.fail_fast
214 | )
215 |
216 | # stop all submissions if fail_fast is necessary
217 | if fail_fast? do Enum.map(sub_pids, &Mission.stop/1) end
218 |
219 | # Boolean to check if more submissions need to run
220 | more_submissions? = not mission.skipped
221 | and not fail_fast?
222 | and (length(exit_codes) != mission.submissions_num)
223 |
224 | exit_code = cond do
225 | # Must not have any submissions, use whatever result is given
226 | length(exit_codes) == 0 -> result.status
227 |
228 | # Get last non-nil exit code if missions are sequential
229 | is_list(mission.submissions) ->
230 | exit_codes |> Enum.reverse |> Enum.find(&(&1))
231 |
232 | # Get largest exit code if parallel
233 | true ->
234 | exit_codes
235 | |> Enum.filter(&(&1))
236 | |> (fn(x) ->
237 | case x do
238 | [] -> nil
239 | x -> Enum.max(x)
240 | end
241 | end).()
242 | end
243 |
244 | # If submissions have not finished then more should be queued up
245 | # Else tell the field agent that the mission is finished
246 | {finished, running, exit_code} = cond do
247 | mission.finished ->
248 | {true, false, mission.exit_code}
249 | more_submissions? ->
250 | Mission.run_submissions(self(), finished_mpid)
251 | {false, true, nil}
252 | true ->
253 | FieldAgent.mission_has_finished(mission.field_agent_pid, result)
254 | {true, false, exit_code}
255 | end
256 |
257 | {:noreply, %Mission{mission |
258 | exit_code: exit_code,
259 | finished: finished,
260 | running: running,
261 | submission_holds: submission_holds,
262 | }}
263 | end
264 |
265 | def handle_cast({:data_and_metadata, data}, mission) do
266 | submission_pid = Enum.at(data[:pid], 0)
267 | submission_index = Enum.find_index(mission.submission_holds, &(&1.pid == submission_pid))
268 |
269 | splits = Enum.split_with(mission.output_filter, &(&1[:key]))
270 |
271 | new_data = case splits do
272 | # All empty lists, no filter
273 | {[], []} -> [data]
274 | {keys, indices} ->
275 | indices = Enum.map(indices, &(&1[:index]))
276 | keys = Enum.map(keys, &(&1[:key]))
277 |
278 | cond do
279 | is_nil(submission_pid) -> []
280 | submission_index in indices -> [data]
281 | length(keys) == 0 -> []
282 | Mission.get(submission_pid).key in keys -> [data]
283 | true -> []
284 | end
285 | end
286 |
287 | case new_data do
288 | [] -> :ok
289 | _ ->
290 | pids = [self()] ++ data[:pid]
291 | data_without_pid = Keyword.delete(data, :pid)
292 | send_data = data_without_pid ++ [pid: pids]
293 |
294 | if mission.supermission_pid do
295 | Mission.send(mission.supermission_pid, send_data)
296 | else
297 | MissionReport.send_data(mission.report_pid, send_data)
298 | end
299 | end
300 |
301 | {:noreply, %Mission{mission | output: mission.output ++ new_data}}
302 | end
303 |
304 | def handle_cast({:init_submission, pid}, mission) do
305 | sh = update_in_list(
306 | mission.submission_holds,
307 | fn({h, _}) -> is_nil(h.pid) end,
308 | fn(h) -> Map.replace(h, :pid, pid) end
309 | )
310 |
311 | # Send stop message
312 | if (mission.finished) do Mission.stop(pid) end
313 | {:noreply, %Mission{mission | submission_holds: sh}}
314 | end
315 |
316 | def handle_cast({:run_submissions, prev_pid}, mission) do
317 | {running, remaining} = case mission.submissions do
318 | %{} -> {Enum.map(mission.submissions, fn({k, v}) -> [mission_plan: v, key: k] end), %{}}
319 | [{submission, index}|b] -> {[[mission_plan: submission, index: index]], b}
320 | [] -> {[], []}
321 | nil -> {[], nil}
322 | end
323 |
324 | sh = mission.submission_holds
325 | sh = sh ++ for submission <- running do
326 | opts = submission ++ [supermission_pid: self(), prev_mission_pid: prev_pid]
327 | MissionReport.init_mission(mission.report_pid, opts)
328 | %{pid: nil, finished: false}
329 | end
330 |
331 | {:noreply, %Mission{mission | submissions: remaining, submission_holds: sh}}
332 | end
333 |
334 | def handle_cast(:stop, mission) do
335 | FieldAgent.stop(mission.field_agent_pid)
336 | {:noreply, %Mission{mission | fail_fast: true}}
337 | end
338 |
339 | def handle_cast({:set_field_agent, field_agent}, mission) do
340 | mission = %Mission{mission | running: true, field_agent_pid: field_agent}
341 | plan = mission.mission_plan
342 | spid = mission.supermission_pid
343 | FieldAgent.send_mission_plan(field_agent, plan, self(), spid)
344 | {:noreply, mission}
345 | end
346 |
347 | def handle_cast({:report_result_up, result}, mission) do
348 | # Get the alternates agent, make sure all alternate outposts
349 | # That have this mission as its root mission are torn down
350 | teardowns = :gproc.where({:n, :l, {:outpost_agent_by_mission, self()}})
351 | |> Agent.get(&(&1))
352 | |> Enum.map(fn ({_, outpost_pid}) ->
353 | Task.async(fn -> Outpost.teardown outpost_pid end)
354 | end)
355 |
356 | # Wait for all teardowns
357 | Task.yield_many teardowns
358 |
359 | super_pid = mission.supermission_pid
360 | report_pid = mission.report_pid
361 |
362 | cond do
363 | super_pid -> Mission.send_result(super_pid, result, self())
364 | report_pid -> MissionReport.finished_mission(report_pid, self())
365 | true -> :ok
366 | end
367 | {:noreply, mission}
368 | end
369 |
370 | #########
371 | # CALLS #
372 | #########
373 |
374 |
375 | def handle_call(:pause, _from, mission) do
376 | mission = %Mission{mission | running: false}
377 | {:reply, mission, mission}
378 | end
379 |
380 | def handle_call(:resume, _from, mission) do
381 | mission = %Mission{mission | running: true}
382 | {:reply, mission, mission}
383 | end
384 |
385 | def handle_call(:get, _from, mission) do
386 | {:reply, mission, mission}
387 | end
388 |
389 | def handle_call({:get_output, selector}, _from, mission) do
390 | output =
391 | try do
392 | case selector do
393 | # Empty slector means just get normal output
394 | nil -> mission.output
395 |
396 | # String sleector means get submission output with same key
397 | "" <> output_key ->
398 | mission.submission_holds
399 | |> Enum.map(&(&1.pid))
400 | |> Enum.map(&Mission.get/1)
401 | |> Enum.find(&(&1.key == output_key))
402 | |> (fn(s) -> s.output end).()
403 |
404 | # Default/integer selector means get submissions at index
405 | index ->
406 | mission.submission_holds
407 | |> Enum.at(index)
408 | |> (fn(s) -> Mission.get(s.pid).output end).()
409 | end
410 | rescue
411 | _ -> []
412 | end |> Enum.map(&(&1[:data]))
413 |
414 | {:reply, output, mission}
415 | end
416 |
417 | def handle_call(:get_outpost, _from, mission) do
418 | outpost_pid = try do
419 | field_agent = FieldAgent.get(mission.field_agent_pid)
420 | field_agent.outpost_pid
421 | catch
422 | :exit, _ -> nil
423 | end
424 |
425 | {:reply, outpost_pid, mission}
426 | end
427 |
428 | def handle_call(:get_outpost_plan, _from, mission) do
429 | # FIXME: Currently does not work correctly on edge case
430 | # where mission extends a template or file
431 | # and the mission_template defines an outpost plan
432 | plan = case mission.mission_plan do
433 | %{"outpost" => plan} -> plan
434 | _ -> nil
435 | end
436 | {:reply, plan, mission}
437 | end
438 |
439 | ##################
440 | # MISC FUNCTIONS #
441 | ##################
442 |
443 | defp update_in_list(list, filter, update) do
444 | case list do
445 | [] -> []
446 | _ ->
447 | found = list
448 | |> Enum.with_index
449 | |> Enum.find(filter)
450 |
451 | case found do
452 | nil -> list
453 | {el, index} ->
454 | el = update.(el)
455 | List.replace_at(list, index, el)
456 | end
457 | end
458 | end
459 |
460 | def determine_skipped_status(mission) do
461 | w = mission.when
462 |
463 | case {w, mission.prev_mission_pid} do
464 | # Don't skip if no when conditions
465 | {nil, _} -> false
466 |
467 | # Don't skip if conditions are empty
468 | {[], _} -> false
469 |
470 | # Skip if there are conditions but no previous mission to base it on
471 | {_, nil} -> true
472 |
473 | # Check when when conditions are list
474 | {[_|_], prev_pid} ->
475 | prev = Mission.get(prev_pid)
476 | output = prev.output
477 | |> Enum.map(&(&1[:data]))
478 | |> Enum.join("")
479 | |> String.trim()
480 |
481 | Enum.reduce_while(w, false, fn wcond, acc ->
482 | [exit_codes, outputs] = ["exit_codes", "outputs"]
483 | |> Enum.map(&(Map.get(wcond, &1, [])))
484 | |> Enum.map(&(if is_list(&1) do &1 else [&1] end))
485 |
486 | check? = acc or cond do
487 | prev.exit_code in exit_codes -> false
488 | output in outputs -> false
489 | prev.exit_code == 0 and Map.get(wcond, "success") == true -> false
490 | prev.exit_code > 0 and Map.get(wcond, "success") == false -> false
491 | true -> true
492 | end
493 |
494 | if check? do {:halt, true} else {:cont, false} end
495 | end)
496 | _ -> true
497 | end
498 | end
499 |
500 | def get_output_filter(output_plan, opts) do
501 | case output_plan do
502 | nil -> []
503 | [] -> []
504 | [_|_] -> output_plan
505 | x -> [x]
506 | end
507 | |> Enum.map(fn(x) ->
508 | case MissionReport.parse_variable(x, opts) do
509 | [error: _] -> nil
510 | y -> y
511 | end
512 | end)
513 | |> Enum.filter(&(&1))
514 | end
515 |
516 | defp construct_key(name) do
517 | name = name || ""
518 | name = String.replace(name, ~r/ /, "_")
519 | name = String.replace(name, ~r/[^_a-zA-Z0-9]/, "")
520 | String.downcase(name)
521 | end
522 |
523 | def construct_plan(plan) do
524 | missions = plan["missions"]
525 |
526 | Map.merge(
527 | %{
528 | name: plan["name"] || nil,
529 | when: plan["when"] || nil,
530 | mission_plan: plan,
531 | mission_plan_templates: plan["mission_plan_templates"] || nil,
532 | input_file: case Map.has_key?(plan, "input") do
533 | false -> "$IN"
534 | true -> plan["input"]
535 | end,
536 | output_filter: plan["output"],
537 | },
538 | cond do
539 | is_map(missions) -> %{
540 | submissions: missions,
541 | fail_fast: Map.get(plan, "fail_fast", false) || false # By default parallel missions don't fail fast
542 | }
543 | is_list(missions) -> %{
544 | submissions: missions |> Enum.with_index,
545 | fail_fast: Map.get(plan, "fail_fast", true) || false # By default sequential missions fail fast
546 | }
547 | true -> %{cmd: missions}
548 | end
549 | )
550 | end
551 | end
552 |
--------------------------------------------------------------------------------
/lib/cingi/mission_report.ex:
--------------------------------------------------------------------------------
1 | defmodule Cingi.MissionReport do
2 | @moduledoc """
3 | Mission Reports are created when a mission plan is sent to a branch.
4 | Mission Reports hold the mission plan that ultimaely creates a mission to be run.
5 | They are given a yaml file/string/or map and convert that into a usable elixir map.
6 | """
7 |
8 | alias Cingi.MissionReport
9 | alias Cingi.Branch
10 | alias Cingi.Outpost
11 | use GenServer
12 |
13 | defstruct [
14 | plan: %{},
15 | branch_pid: nil,
16 | prev_mission_pid: nil,
17 | outpost_pid: nil, # Used when submitted by an outpost trying to setup
18 | missions: []
19 | ]
20 |
21 | # Client API
22 |
23 | def start_link(opts) do
24 | GenServer.start_link(__MODULE__, opts)
25 | end
26 |
27 | def initialized_mission(pid, mission_pid) do
28 | GenServer.cast(pid, {:mission_has_init, mission_pid})
29 | end
30 |
31 | def finished_mission(pid, mission_pid) do
32 | GenServer.cast(pid, {:mission_finished, mission_pid})
33 | end
34 |
35 | def init_mission(pid, opts) do
36 | GenServer.cast(pid, {:init_mission, opts})
37 | end
38 |
39 | def send_data(pid, data) do
40 | GenServer.cast(pid, {:data, data})
41 | end
42 |
43 | def get(pid) do
44 | GenServer.call(pid, :get)
45 | end
46 |
47 | # Server Callbacks
48 |
49 | def init(opts) do
50 | report = cond do
51 | opts[:map] -> opts[:map] |> start_missions(opts)
52 | opts[:string] -> opts[:string] |> get_map_from_yaml(&YamlElixir.read_from_string/1) |> start_missions(opts)
53 | opts[:file] -> opts[:file] |> get_map_from_yaml(&YamlElixir.read_from_file/1) |> start_missions(opts)
54 | end
55 | {:ok, report}
56 | end
57 |
58 | def start_missions(map, opts) do
59 | opts = opts |> Keyword.delete(:string) |> Keyword.delete(:file) |> Keyword.delete(:map)
60 | MissionReport.init_mission(self(), [mission_plan: map, prev_mission_pid: opts[:prev_mission_pid]])
61 | struct(MissionReport, Keyword.put(opts, :plan, map))
62 | end
63 |
64 | def handle_cast({:init_mission, opts}, report) do
65 | opts = opts ++ [report_pid: self(), outpost_pid: report.outpost_pid]
66 | Branch.init_mission(report.branch_pid, opts)
67 | {:noreply, report}
68 | end
69 |
70 | def handle_cast({:mission_has_init, mission_pid}, report) do
71 | missions = report.missions ++ [mission_pid]
72 | {:noreply, %MissionReport{report | missions: missions}}
73 | end
74 |
75 | def handle_cast({:mission_finished, mission_pid}, report) do
76 | Branch.report_has_finished(report.branch_pid, self(), mission_pid)
77 | if report.outpost_pid do
78 | Outpost.report_has_finished(report.outpost_pid, self(), mission_pid)
79 | end
80 | {:noreply, report}
81 | end
82 |
83 | def handle_cast({:data, data}, report) do
84 | Branch.report_data(report.branch_pid, self(), data)
85 | {:noreply, report}
86 | end
87 |
88 | def handle_call(:get, _from, report) do
89 | {:reply, report, report}
90 | end
91 |
92 | def get_map_from_yaml(yaml, parser) do
93 | try do
94 | parser.(yaml)
95 | catch
96 | err ->
97 | IO.puts :stderr, "Error parsing yaml: #{yaml}"
98 | IO.puts :stderr, inspect(err)
99 | %{}
100 | end
101 | end
102 |
103 | def parse_variable(v, opts \\ []) do
104 | v = v || ""
105 | reg = ~r/\$(?[a-zA-Z]+)(?[^\[]*)(?\[?)(?['"]?)(?\$?[a-zA-Z_0-9]*)(?['"]?)(?\]?)/
106 | captured = Regex.named_captures(reg, v)
107 | case captured do
108 | nil -> [error: "Unrecognized pattern #{v}"]
109 | %{"vartype" => nil} -> [error: "Unrecognized pattern #{v}"]
110 | %{
111 | "vartype" => type,
112 | "key" => "",
113 | "invalids" => "",
114 | "bracket1" => "",
115 | "bracket2" => "",
116 | "quote1" => "",
117 | "quote2" => "",
118 | } -> [type: type]
119 | %{
120 | "vartype" => type,
121 | "key" => key,
122 | "invalids" => "",
123 | "bracket1" => "[",
124 | "bracket2" => "]",
125 | } ->
126 | case captured do
127 | %{"key" => ""} -> [error: "Empty/bad key"]
128 | %{"quote1" => "'", "quote2" => "'"} -> [type: type, key: key]
129 | %{"quote1" => "\"", "quote2" => "\""} -> [type: type, key: key]
130 | %{"quote1" => "", "quote2" => ""} ->
131 | case {key, Integer.parse(key)} do
132 | {"$LAST", _} -> [type: type, index: opts[:last_index]]
133 | {_, :error} -> [type: type, key: key]
134 | {_, {i, ""}} -> [type: type, index: i]
135 | {_, {_, _}} -> [error: "Invalid index"]
136 | end
137 | _ -> [error: "Nonmatching quotes"]
138 | end
139 | %{"invalids" => ""} -> [error: "Nonmatching brackets"]
140 | _ -> [error: "Invalid characters"]
141 | end
142 | end
143 | end
144 |
--------------------------------------------------------------------------------
/lib/cingi/outpost.ex:
--------------------------------------------------------------------------------
1 | defmodule Cingi.Outpost do
2 | @moduledoc """
3 | Outposts are processes set up by commanders to connect to its branch
4 | and receive missions. Outposts have to set up the environment,
5 | like a workspace folder, by setting the environment variables,
6 | working diretctory, and any setup steps that need to run when a mission
7 | is being run by any node.
8 | """
9 |
10 | alias Cingi.Outpost
11 | alias Cingi.Branch
12 | alias Cingi.FieldAgent
13 | alias Cingi.Mission
14 | alias Cingi.MissionReport
15 | use GenServer
16 |
17 | defstruct [
18 | name: nil,
19 | node: nil,
20 |
21 | pid: nil,
22 | branch_pid: nil,
23 | root_mission_pid: nil,
24 | parent_pid: nil,
25 | child_pids: [],
26 |
27 | alternates: nil,
28 |
29 | plan: %{},
30 |
31 | setup: nil,
32 | is_setup: false,
33 | setting_up: false,
34 | setup_failed: false,
35 |
36 | teardown: nil,
37 | is_torndown: false,
38 | tearing_down: false,
39 | teardown_failed: false,
40 | teardown_callback_pid: nil,
41 |
42 | dir: ".",
43 | env: %{},
44 |
45 | queued_field_agents: [],
46 | missions: [],
47 | ]
48 |
49 | # Client API
50 |
51 | def start_link(args \\ []) do
52 | opts = case args[:name] do
53 | nil -> []
54 | name -> [name: name]
55 | end
56 | GenServer.start_link(__MODULE__, args, opts)
57 | end
58 |
59 | def get(pid) do
60 | GenServer.call(pid, :get)
61 | end
62 |
63 | def get_version_on_branch(pid, branch_pid) do
64 | try do
65 | GenServer.call(pid, {:outpost_on_branch, branch_pid})
66 | catch
67 | :exit, _ -> nil
68 | end
69 | end
70 |
71 | def create_version_on_branch(pid, branch_pid) do
72 | start_link(original: pid, branch_pid: branch_pid)
73 | end
74 |
75 | def get_or_create_version_on_branch(pid, branch_pid) do
76 | case get_version_on_branch(pid, branch_pid) do
77 | nil -> create_version_on_branch(pid, branch_pid)
78 | x -> {:ok, x}
79 | end
80 | end
81 |
82 | def field_agent_data(pid, fa_pid, data) do
83 | GenServer.cast(pid, {:field_agent_data, fa_pid, data})
84 | end
85 |
86 | def run_mission(pid, mission) do
87 | GenServer.cast(pid, {:run_mission, mission})
88 | end
89 |
90 | def mission_has_run(pid, mission_pid) do
91 | GenServer.cast(pid, {:mission_has_run, mission_pid})
92 | end
93 |
94 | def mission_plan_has_finished(pid, fa_pid) do
95 | GenServer.cast(pid, {:mission_plan_has_finished, fa_pid})
96 | end
97 |
98 | def mission_has_finished(pid, mission_pid, result) do
99 | GenServer.cast(pid, {:mission_has_finished, mission_pid, result})
100 | end
101 |
102 | def report_has_finished(pid, report_pid, mission_pid) do
103 | GenServer.cast(pid, {:report_has_finished, report_pid, mission_pid})
104 | end
105 |
106 | def setup_with_steps(pid) do
107 | GenServer.cast(pid, :setup_with_steps)
108 | end
109 |
110 | def queue_field_agent_for_plan(pid, file, fa_pid) do
111 | GenServer.cast(pid, {:queue_field_agent, {:mission_plan, fa_pid, file}})
112 | end
113 |
114 | def queue_field_agent_for_bash(pid, fa_pid) do
115 | GenServer.cast(pid, {:queue_field_agent, {:bash_process, fa_pid}})
116 | end
117 |
118 | # Call explicitely, don't use Agent module with anonymous functions
119 | # See section on "A word on distributed agents"
120 | # https://github.com/elixir-lang/elixir/blob/cddc99b1d393e99a45db239334aba7bcbff3b218/lib/elixir/lib/agent.ex#L102
121 | def get_alternates(pid) do
122 | GenServer.call(pid, :get_alternates)
123 | end
124 |
125 | def update_alternates(pid) do
126 | GenServer.cast(pid, :update_alternates)
127 | end
128 |
129 | def inform_parent_of_child(parent_pid, child_pid) do
130 | GenServer.cast(parent_pid, {:inform_parent_of_child, child_pid})
131 | end
132 |
133 | def teardown(pid) do
134 | try do
135 | GenServer.call(pid, :teardown)
136 | catch
137 | :exit, _ -> nil
138 | end
139 | end
140 |
141 | # Server Callbacks
142 |
143 | def init(opts) do
144 | outpost = case opts[:original] do
145 | nil -> struct(Outpost, opts)
146 | opid ->
147 | try do
148 | o = Outpost.get opid
149 | %Outpost{
150 | name: o.name,
151 | alternates: o.alternates,
152 | parent_pid: o.parent_pid,
153 | plan: o.plan,
154 | root_mission_pid: o.root_mission_pid,
155 | setup: o.setup,
156 | teardown: o.teardown,
157 | }
158 | catch
159 | # FIXME: Make a blank outpost with bad seup steps instead to fail fast
160 | :exit, _ -> struct(Outpost, opts)
161 | end
162 | end
163 |
164 | outpost = %Outpost{outpost |
165 | node: Node.self,
166 | branch_pid: opts[:branch_pid],
167 | pid: self(),
168 | setup: outpost.plan["setup"],
169 | teardown: outpost.plan["teardown"],
170 |
171 | # Branch synchronously creates new outposts and their versions,
172 | # So outposts on a branch are created one at a time
173 | # So if new version of parent needs to be created,
174 | # it needs to be created atomically along with the new outpost itself
175 | # to prevent race condition with outpost creation on branch
176 | parent_pid: case {outpost.parent_pid, opts[:branch_pid]} do
177 | {nil, _} -> nil
178 | {_, nil} -> nil
179 | {ppid, bpid} -> elem(Outpost.get_or_create_version_on_branch(ppid, bpid), 1)
180 | end,
181 | }
182 |
183 | Outpost.inform_parent_of_child(outpost.parent_pid, self())
184 | Outpost.update_alternates(self())
185 | {:ok, outpost}
186 | end
187 |
188 | def handle_call(:get, _from, outpost) do
189 | {:reply, outpost, outpost}
190 | end
191 |
192 | def handle_call({:outpost_on_branch, branch_pid}, _from, outpost) do
193 | alternate = Agent.get(outpost.alternates, &(&1))[branch_pid]
194 | {:reply, alternate, outpost}
195 | end
196 |
197 | def handle_call(:get_alternates, _from, outpost) do
198 | alternates = Agent.get(outpost.alternates, &(&1))
199 | {:reply, alternates, outpost}
200 | end
201 |
202 | def handle_call(:teardown, from_pid, outpost) do
203 | case outpost.teardown do
204 | nil ->
205 | outpost = %Outpost{outpost | is_torndown: true}
206 | {:reply, outpost, outpost}
207 | teardown ->
208 | run_setup_or_teardown(outpost, teardown)
209 | {:noreply, %Outpost{outpost |
210 | tearing_down: true,
211 | teardown_callback_pid: from_pid,
212 | }}
213 | end
214 | end
215 |
216 | def handle_cast({:field_agent_data, _fa_pid, data}, outpost) do
217 | Branch.outpost_data(outpost.branch_pid, self(), data)
218 | {:noreply, outpost}
219 | end
220 |
221 | def handle_cast(:update_alternates, outpost) do
222 | alternates = case outpost.alternates do
223 | nil -> Outpost.start_alternates(outpost.root_mission_pid)
224 | x -> x
225 | end
226 |
227 | self_pid = self()
228 | Agent.update(alternates, &(Map.put_new(&1, outpost.branch_pid, self_pid)))
229 |
230 | {:noreply, %Outpost{outpost | alternates: alternates}}
231 | end
232 |
233 | def handle_cast({:inform_parent_of_child, child_pid}, outpost) do
234 | child_pids = outpost.child_pids ++ [%{pid: child_pid}]
235 | {:noreply, %Outpost{outpost | child_pids: child_pids}}
236 | end
237 |
238 | def handle_cast({:run_mission, mission}, outpost) do
239 | FieldAgent.start_link(mission_pid: mission, outpost_pid: self())
240 | {:noreply, %Outpost{outpost | missions: outpost.missions ++ [mission]}}
241 | end
242 |
243 | def handle_cast({:mission_has_run, mission_pid}, outpost) do
244 | Branch.mission_has_run(outpost.branch_pid, mission_pid)
245 | {:noreply, outpost}
246 | end
247 |
248 | def handle_cast({:mission_plan_has_finished, fa_pid}, outpost) do
249 | FieldAgent.run_mission(fa_pid)
250 | {:noreply, outpost}
251 | end
252 |
253 | def handle_cast({:mission_has_finished, mission_pid, result}, outpost) do
254 | Branch.mission_has_finished(outpost.branch_pid, mission_pid, result)
255 | {:noreply, outpost}
256 | end
257 |
258 | def handle_cast(:setup_with_steps, outpost) do
259 | outpost = case outpost.setting_up do
260 | true -> outpost
261 | false ->
262 | case {outpost.setup, outpost.parent_pid} do
263 | {nil, nil} -> Outpost.report_has_finished(self(), nil, nil)
264 | {osetup, _} ->
265 | root_mission = Mission.get(outpost.root_mission_pid)
266 | run_setup_or_teardown(outpost, osetup, root_mission.prev_mission_pid)
267 | end
268 | %Outpost{outpost | setting_up: true}
269 | end
270 | {:noreply, outpost}
271 | end
272 |
273 | def handle_cast({:queue_field_agent, queued_fa_tup}, outpost) do
274 | outpost = case outpost.is_setup do
275 | true ->
276 | run_field_agent(queued_fa_tup, outpost)
277 | outpost
278 | false ->
279 | Outpost.setup_with_steps self()
280 | queue = outpost.queued_field_agents ++ [queued_fa_tup]
281 | %Outpost{outpost | queued_field_agents: queue}
282 | end
283 | {:noreply, outpost}
284 | end
285 |
286 | def handle_cast({:report_has_finished, _report_pid, mission_pid}, outpost) do
287 | outpost = case {outpost.setting_up, outpost.tearing_down} do
288 | {false, false} -> outpost
289 | {true, _} -> setup_report_has_finished(mission_pid, outpost)
290 | {_, true} -> teardown_report_has_finished(mission_pid, outpost)
291 | _ -> outpost
292 | end
293 | {:noreply, outpost}
294 | end
295 |
296 | ###########
297 | # HELPERS #
298 | ###########
299 |
300 | def setup_report_has_finished(mission_pid, outpost) do
301 | # Get last line of output from setup and see if it is in a proper format
302 | output = try do
303 | Mission.get_output(mission_pid)
304 | |> Enum.join("\n")
305 | |> String.split("\n", trim: true)
306 | |> Enum.take(-1)
307 | |> Enum.at(0)
308 | rescue
309 | _ -> ""
310 | end
311 |
312 | output = try do
313 | YamlElixir.read_from_string(output || "")
314 | catch
315 | err ->
316 | IO.puts :stderr, "Error parsing setup steps line: #{output}"
317 | IO.puts :stderr, inspect(err)
318 | %{}
319 | end
320 |
321 | # output needs to be a map
322 | output = case output do
323 | %{} -> output
324 | _ -> %{}
325 | end
326 |
327 | replace_with = fn(var) ->
328 | case MissionReport.parse_variable(var) do
329 | [type: "SETUP", key: key] -> output[key]
330 | _ -> var
331 | end
332 | end
333 |
334 | base_outpost = case outpost.parent_pid do
335 | nil -> outpost
336 | ppid -> Outpost.get(ppid)
337 | end
338 |
339 | base_dir = outpost.plan["dir"] || base_outpost.dir
340 | base_env = Map.merge(base_outpost.env, outpost.plan["env"] || %{})
341 |
342 | dir = replace_with.(base_dir)
343 | env = base_env
344 | |>
345 | Enum.map(fn({k, v}) ->
346 | {replace_with.(k), replace_with.(v)}
347 | end)
348 | |>
349 | Enum.filter(fn(x) ->
350 | case x do
351 | {nil, _} -> false
352 | {_, nil} -> false
353 | _ -> true
354 | end
355 | end)
356 | |> Enum.into(%{})
357 |
358 | exit_code = case mission_pid do
359 | nil -> 0
360 | _ -> Mission.get(mission_pid).exit_code
361 | end
362 |
363 | setup_failed = case exit_code do
364 | 0 -> false
365 | _ -> Enum.map(outpost.queued_field_agents, &(FieldAgent.stop(elem(&1, 1))))
366 | true
367 | end
368 |
369 | outpost = %Outpost{outpost |
370 | dir: dir || ".",
371 | env: env,
372 | is_setup: true,
373 | setting_up: false,
374 | setup_failed: setup_failed,
375 | }
376 |
377 | Enum.map(outpost.queued_field_agents, &(run_field_agent(&1, outpost)))
378 |
379 | %Outpost{outpost | queued_field_agents: []}
380 | end
381 |
382 | def teardown_report_has_finished(_mission_pid, outpost) do
383 | outpost = %Outpost{outpost |
384 | is_torndown: true,
385 | tearing_down: false,
386 | }
387 | GenServer.reply(outpost.teardown_callback_pid, outpost)
388 | outpost
389 | end
390 |
391 | def run_field_agent({:bash_process, fa_pid}, outpost) do
392 | case outpost.setup_failed do
393 | false -> FieldAgent.run_bash_process fa_pid
394 | true -> FieldAgent.stop fa_pid
395 | end
396 | end
397 |
398 | def run_field_agent({:mission_plan, fa_pid, file}, outpost) do
399 | path = Path.join(outpost.dir, file)
400 | plan = try do
401 | YamlElixir.read_from_file path
402 | catch
403 | _ ->
404 | IO.puts :stderr, "File #{path} does not exist here in #{System.cwd}"
405 | %{}
406 | end
407 |
408 | FieldAgent.send_mission_plan(fa_pid, plan, nil, fa_pid)
409 | end
410 |
411 | def run_setup_or_teardown(outpost, missions, prev_mission_pid \\ nil) do
412 | missions = missions || [":"]
413 |
414 | yaml_opts = [
415 | prev_mission_pid: prev_mission_pid,
416 | map: %{"missions" => missions},
417 | outpost_pid: self(),
418 | ]
419 | Branch.queue_report outpost.branch_pid, yaml_opts
420 | end
421 |
422 | # Should not be called on its own,
423 | # Need to start alternates in the headquarters node
424 | # So call this with :rpc.call in the hq node
425 | # So gproc registers agent on hq node
426 | def start_alternates(root_mission_pid) do
427 | {:ok, agent_pid} = Agent.start_link(fn ->
428 | case root_mission_pid do
429 | nil -> :ok
430 | mpid ->:gproc.reg {:n, :l, {:outpost_agent_by_mission, mpid}}
431 | end
432 | %{}
433 | end)
434 | agent_pid
435 | end
436 | end
437 |
--------------------------------------------------------------------------------
/mix.exs:
--------------------------------------------------------------------------------
1 | defmodule Cingi.Mixfile do
2 | use Mix.Project
3 |
4 | def project do
5 | [
6 | app: :cingi,
7 | name: "CingI",
8 | version: "0.1.3",
9 | elixir: "~> 1.5",
10 | escript: [
11 | main_module: Cingi.CLI,
12 | emu_args: "-noinput",
13 | ],
14 | source_url: "https://github.com/Rhathe/CingI",
15 | start_permanent: Mix.env == :prod,
16 | deps: deps(),
17 | package: package(),
18 | description: """
19 | Continuous-ing Integration (...core). A distributed pipeline-based
20 | command line task runner providing the core functionality for a CI server.
21 | """,
22 | ]
23 | end
24 |
25 | # Run "mix help compile.app" to learn about applications.
26 | def application do
27 | [
28 | applications: [
29 | :porcelain,
30 | :yaml_elixir,
31 | :gproc,
32 | ],
33 | extra_applications: [:logger],
34 | mod: {Cingi, []},
35 | ]
36 | end
37 |
38 | # Run "mix help deps" to learn about dependencies.
39 | defp deps do
40 | [
41 | {:porcelain, "~> 2.0"},
42 | {:yaml_elixir, "~> 1.3.1"},
43 | {:temp, "~> 0.4"},
44 | {:gproc, "~> 0.5.0"},
45 | {:ex_doc, ">= 0.0.0", only: :dev},
46 | ]
47 | end
48 |
49 | defp package do
50 | [
51 | maintainers: ["Ramon Sandoval"],
52 | licenses: ["MIT"],
53 | links: %{github: "https://github.com/Rhathe/CingI"},
54 | files: ~w(mix.exs lib priv README.md config c-ing-i-logo.svg LICENSE),
55 | ]
56 | end
57 | end
58 |
--------------------------------------------------------------------------------
/mix.lock:
--------------------------------------------------------------------------------
1 | %{"earmark": {:hex, :earmark, "1.2.3", "206eb2e2ac1a794aa5256f3982de7a76bf4579ff91cb28d0e17ea2c9491e46a4", [], [], "hexpm"},
2 | "ex_doc": {:hex, :ex_doc, "0.16.4", "4bf6b82d4f0a643b500366ed7134896e8cccdbab4d1a7a35524951b25b1ec9f0", [], [{:earmark, "~> 1.1", [hex: :earmark, repo: "hexpm", optional: false]}], "hexpm"},
3 | "gproc": {:hex, :gproc, "0.5.0", "2df2d886f8f8a7b81a4b04aa17972b5965bbc5bf0100ea6d8e8ac6a0e7389afe", [:rebar], [], "hexpm"},
4 | "porcelain": {:hex, :porcelain, "2.0.3", "2d77b17d1f21fed875b8c5ecba72a01533db2013bd2e5e62c6d286c029150fdc", [], [], "hexpm"},
5 | "temp": {:hex, :temp, "0.4.3", "b641c3ce46094839bff110fdb64162536d640d9d47ca2c37add9104a2fa3bd81", [], [], "hexpm"},
6 | "yamerl": {:hex, :yamerl, "0.5.0", "6ec55a5d830f6f0d65a4030f5c5db24b0e72b813dfbde32fea44b4951ed9417c", [], [], "hexpm"},
7 | "yaml_elixir": {:hex, :yaml_elixir, "1.3.1", "b84b6333343b0cba176c43c463e622f838825e7476e35334f719b83df4535bff", [], [{:yamerl, "~> 0.5", [hex: :yamerl, repo: "hexpm", optional: false]}], "hexpm"}}
8 |
--------------------------------------------------------------------------------
/priv/bin/wrapper.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -m
4 |
5 | # Had to make this script a temp file because
6 | # escripts aren't packaged with the priv directory
7 | # yet erlang still requires a wrapper to prevent zombie processes
8 | # So this current script is always made temporarily
9 | # and also needs to be cleaned up, unless it's being
10 | # used in a test directly, in which case,
11 | # don't delete
12 | if [[ "$0" =~ "priv/bin/wrapper.sh" ]]; then
13 | :
14 | else
15 | rm "$0" > /dev/null 2>&1
16 | fi
17 |
18 | cmd=$1
19 | file="$2"
20 | del_tmp_file="$3"
21 | PID=""
22 | PGID=""
23 | STDIN_PID=""
24 |
25 | cleanup() {
26 | exit_code=$?
27 |
28 | # Indicated to be a tmp_file, delete here so that exit traps clean it up
29 | if [ "$del_tmp_file" = "true" ]; then
30 | rm "$file" > /dev/null 2>&1
31 | fi
32 |
33 | kill -KILL "$PID" > /dev/null 2>&1
34 | kill -KILL -"$PID" > /dev/null 2>&1
35 | kill -KILL -"$PGID" > /dev/null 2>&1
36 | kill -KILL $STDIN_PID > /dev/null 2>&1
37 | exit $exit_code
38 | }
39 |
40 | trap cleanup EXIT
41 |
42 | if [ "$#" -eq "1" ]; then
43 | bash -c "$cmd"&
44 | PID=$!
45 | else
46 | # Pipe input file to cmd with cat, suppress stderr since
47 | # pipe can be broken but we don't care
48 | (cat "$file" 2> /dev/null) | (bash -c "$cmd") &
49 | PID=$!
50 | fi
51 |
52 | # Get PGID to kill all child processes
53 | # https://stackoverflow.com/questions/392022/best-way-to-kill-all-child-processes
54 | PGID=$(ps opgid= "$PID")
55 |
56 | # Needed wrapper because Erlang VM sends EOF when process dies, but
57 | # some programs don't respect the EOF signal, so a kill is necessary
58 | # NOTE: Don't use /bin/sh, <&0 redirection does not work
59 | {
60 | while read line ; do
61 | if [ "$line" = "kill" ]; then
62 | cleanup
63 | fi
64 | done
65 | cleanup
66 | } <&0 &
67 | STDIN_PID=$!
68 |
69 | wait $PID > /dev/null 2>&1
70 |
--------------------------------------------------------------------------------
/test/bash_scripts/tmpdir.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ -z "$1" ]; then
4 | WORK_DIR=`mktemp -d`
5 | else
6 | WORK_DIR=`mktemp -d -p $1`
7 | fi
8 |
9 | cp $0 $WORK_DIR
10 |
11 | OLD_DIR=`pwd`
12 | echo "{\"dir\": \"$WORK_DIR\", \"old_dir\": \"$OLD_DIR\"}"
13 |
--------------------------------------------------------------------------------
/test/cingi/branch_test.exs:
--------------------------------------------------------------------------------
1 | defmodule CingiBranchTest do
2 | use ExUnit.Case
3 | alias Cingi.Branch
4 | alias Cingi.Headquarters
5 | alias Cingi.Outpost
6 | alias Cingi.Mission
7 | doctest Branch
8 |
9 | test "creates branch" do
10 | {:ok, pid} = Branch.start_link()
11 | assert %{
12 | running: true,
13 | mission_reports: [],
14 | started_missions: [],
15 | running_missions: [],
16 | finished_missions: []
17 | } = Branch.get(pid)
18 | end
19 |
20 | test "can create mission report" do
21 | res = Helper.create_mission_report([string: "missions: echo 1"])
22 | assert %{"missions" => "echo 1"} = res[:report].plan
23 | assert res[:report_pid] in res[:branch].mission_reports
24 | end
25 |
26 | test "runs queued missions" do
27 | res = Helper.create_mission_report([string: "missions: echo 1"])
28 | bpid = res[:branch_pid]
29 | mpid = res[:mission_pid]
30 | Headquarters.resume(res[:hq_pid])
31 | Helper.check_exit_code mpid
32 |
33 | branch = Helper.wait_for_finished_missions(bpid, 1)
34 | assert length(branch.started_missions) == 0
35 | assert length(branch.running_missions) == 0
36 | assert length(branch.finished_missions) == 1
37 | mission = Helper.check_exit_code(mpid)
38 | assert [[data: "1\n", type: :out, timestamp: _, field_agent_pid: _, pid: []]] = mission.output
39 | end
40 |
41 | test "runs missions with outputs" do
42 | cmd_1 = " - echo -e \"match1\\nignored2\\nmatch3\""
43 | grep_cmd = " - missions: grep match\n input: $IN"
44 |
45 | res = Helper.create_mission_report([string: "\nmissions:\n#{cmd_1}\n#{grep_cmd}\n - echo end"])
46 | Headquarters.resume(res[:hq_pid])
47 | mission = Helper.check_exit_code(res[:mission_pid])
48 |
49 | outputs = mission.output
50 | |> Enum.map(&(String.split(&1[:data], "\n", trim: true)))
51 | |> List.flatten
52 |
53 | assert ["match1", "ignored2", "match3", "match1", "match3", "end"] = outputs
54 | end
55 |
56 | test "runs sequential submissions" do
57 | cmd8000 = "ncat -l -i 1 8000"
58 | cmd8001 = "ncat -l -i 1 8001"
59 |
60 | yaml = "missions:\n - #{cmd8000}\n - #{cmd8001}"
61 | res = Helper.create_mission_report([string: yaml])
62 | Headquarters.resume(res[:hq_pid])
63 | bpid = res[:branch_pid]
64 |
65 | branch = Helper.wait_for_running_missions(bpid, 2)
66 | assert length(branch.started_missions) == 0
67 | assert length(branch.running_missions) == 2
68 | assert length(branch.finished_missions) == 0
69 |
70 | mission = Mission.get(res[:mission_pid])
71 | assert %{output: [], exit_code: nil, submission_holds: [sm1]} = mission
72 | submission1 = Mission.get(sm1.pid)
73 |
74 | assert %{cmd: ^cmd8000, running: true, finished: false} = submission1
75 |
76 | Helper.wait_for_process cmd8000
77 | Porcelain.exec("bash", [ "-c", "echo -n blah1 | ncat localhost 8000"])
78 |
79 | Helper.wait_for_finished_missions(bpid, 1)
80 | branch = Helper.wait_for_running_missions(bpid, 2)
81 |
82 | assert length(branch.started_missions) == 0
83 | assert length(branch.running_missions) == 2
84 | assert length(branch.finished_missions) == 1
85 |
86 | mission = Mission.get(res[:mission_pid])
87 | assert %{output: output, exit_code: nil, submission_holds: [sm1, sm2]} = mission
88 | sm1pid = sm1.pid
89 | assert [[data: "blah1", type: :out, timestamp: _, field_agent_pid: _, pid: [^sm1pid]]] = output
90 |
91 | submission1 = Mission.get(sm1.pid)
92 | submission2 = Mission.get(sm2.pid)
93 |
94 | assert %{cmd: ^cmd8000, running: false, finished: true} = submission1
95 | assert %{cmd: ^cmd8001, running: true, finished: false} = submission2
96 |
97 | Helper.wait_for_process cmd8001
98 | Porcelain.spawn("bash", [ "-c", "echo -n blah2 | ncat localhost 8001"])
99 | mission = Helper.check_exit_code(res[:mission_pid])
100 |
101 | sm1pid = sm1.pid
102 | sm2pid = sm2.pid
103 | assert %{output: output, exit_code: 0} = mission
104 | assert [
105 | [data: "blah1", type: :out, timestamp: _, field_agent_pid: _, pid: [^sm1pid]],
106 | [data: "blah2", type: :out, timestamp: _, field_agent_pid: _, pid: [^sm2pid]]
107 | ] = output
108 |
109 | submission2 = Mission.get(sm2.pid)
110 | assert %{cmd: ^cmd8001, running: false, finished: true} = submission2
111 |
112 | branch = Helper.wait_for_finished_missions(bpid, 3)
113 | assert length(branch.started_missions) == 0
114 | assert length(branch.running_missions) == 0
115 | assert length(branch.finished_missions) == 3
116 | end
117 |
118 | test "runs parallel submissions" do
119 | yaml = Enum.map [1,2,3,4], &(" s#{&1}: ncat -l -i 1 900#{&1}")
120 | yaml = ["missions:"] ++ yaml
121 | yaml = Enum.join yaml, "\n"
122 |
123 | res = Helper.create_mission_report([string: yaml])
124 | Headquarters.resume(res[:hq_pid])
125 |
126 | branch = Helper.wait_for_running_missions(res[:branch_pid], 5)
127 | assert length(branch.started_missions) == 0
128 | assert length(branch.running_missions) == 5
129 |
130 | Enum.map [1,2,3,4], &(Helper.wait_for_process("ncat -l -i 1 900#{&1}"))
131 | finish = &(Porcelain.exec("bash", [ "-c", "echo -n blah#{&1} | ncat localhost 900#{&1}"]))
132 |
133 | finish.(3)
134 | Helper.wait_for_submissions_finish(res[:mission_pid], 1)
135 | finish.(2)
136 | Helper.wait_for_submissions_finish(res[:mission_pid], 2)
137 | finish.(4)
138 | Helper.wait_for_submissions_finish(res[:mission_pid], 3)
139 | finish.(1)
140 | Helper.wait_for_submissions_finish(res[:mission_pid], 4)
141 |
142 | mission = Helper.check_exit_code(res[:mission_pid])
143 | assert %{output: [
144 | [data: "blah3", type: :out, timestamp: _, field_agent_pid: _, pid: [pid1]],
145 | [data: "blah2", type: :out, timestamp: _, field_agent_pid: _, pid: [pid2]],
146 | [data: "blah4", type: :out, timestamp: _, field_agent_pid: _, pid: [pid3]],
147 | [data: "blah1", type: :out, timestamp: _, field_agent_pid: _, pid: [pid4]]
148 | ], exit_code: 0} = mission
149 |
150 | pids = mission.submission_holds |> Enum.map(&(&1.pid))
151 | assert pid1 != pid2 != pid3 != pid4
152 | assert pid1 in pids
153 | assert pid2 in pids
154 | assert pid3 in pids
155 | assert pid4 in pids
156 | end
157 |
158 | test "runs example file" do
159 | res = Helper.create_mission_report([file: "test/mission_plans/example.yaml"])
160 | hpid = res[:hq_pid]
161 | Headquarters.resume(hpid)
162 | mission = Helper.check_exit_code(res[:mission_pid])
163 | output = mission.output |> Enum.map(&(&1[:data]))
164 | assert ["beginning\n", a, b, c, d, e, f, g, grepped, "end\n"] = output
165 |
166 | l1 = Enum.sort(["match 1\n", "ignored 2\n", "match 3\n", "ignored 4\n", "match 5\n", "ignored 6\n", "match 7\n"])
167 | l2 = Enum.sort([a, b, c, d, e, f, g])
168 | assert ^l1 = l2
169 |
170 | matches = grepped |> String.split("\n") |> Enum.sort
171 | assert length(matches) == 5
172 | match_check = Enum.sort(["match 1", "match 3", "match 5", "match 7", ""])
173 | assert ^match_check = matches
174 | end
175 |
176 | test "make sure inputs are passed correctly to nested missions" do
177 | res = Helper.create_mission_report([file: "test/mission_plans/nested.yaml"])
178 | Headquarters.resume(res[:hq_pid])
179 | mission = Helper.check_exit_code(res[:mission_pid])
180 | output = mission.output |> Enum.map(&(&1[:data]))
181 |
182 | assert [
183 | "blah1\n",
184 | "blah1\n",
185 | "1match1\n",
186 | "2match2\n",
187 | "1match3\n",
188 | "2match1\n",
189 | "ignored\n",
190 | "1match4\n",
191 | "2match5\n",
192 | "1match1\n2match2\n1match3\n2match1\n1match4\n2match5\n",
193 | "2match2\n2match1\n2match5\n",
194 | a,
195 | b,
196 | ] = output
197 |
198 | sublist = [a, b]
199 | assert "2match1\n" in sublist
200 | assert "2match5\n" in sublist
201 | end
202 |
203 | test "generates correct outposts" do
204 | res = Helper.create_mission_report([file: "test/mission_plans/outposts/simple.yaml"])
205 | bpid = res[:branch_pid]
206 | mpid = res[:mission_pid]
207 | Headquarters.resume(res[:hq_pid])
208 | Helper.check_exit_code mpid
209 |
210 | opids = Branch.get(bpid).finished_missions
211 | |> Enum.map(&Mission.get_outpost/1)
212 | |> Enum.uniq
213 |
214 | assert length(opids) == 2
215 | outposts = opids |> Enum.map(&Outpost.get/1)
216 |
217 | assert %{
218 | alternates: _,
219 | node: :nonode@nohost,
220 | } = Enum.at(outposts, 0)
221 | end
222 |
223 | test "gets correct exit codes fails fast when necessary" do
224 | res = Helper.create_mission_report([file: "test/mission_plans/exits.yaml"])
225 | bpid = res[:branch_pid]
226 | mpid = res[:mission_pid]
227 | Headquarters.resume(res[:hq_pid])
228 |
229 | branch = Helper.wait_for_finished_missions(bpid, 12)
230 | assert length(branch.started_missions) == 0
231 |
232 | # non-fail fast ncat task, its parent,
233 | # the whole parallel mission, and the mission itself
234 | assert length(branch.running_missions) == 4
235 |
236 | # 1 sequential supermission
237 | # 2 submissions below that
238 | # 5 sequential missions
239 | # 1 fail fast parallel supermission
240 | # 2 fail fast parallel missions
241 | # 1 non-fail fast parallel mission
242 | assert length(branch.finished_missions) == 12
243 |
244 | Porcelain.exec("bash", [ "-c", "echo -n endncat | ncat localhost 9991"])
245 | Helper.check_exit_code mpid
246 |
247 | mission = Mission.get(mpid)
248 | assert 7 = mission.exit_code
249 |
250 | output = mission.output |>
251 | Enum.map(&(&1[:data]))
252 |
253 | assert [a, b, c, "endncat"] = output
254 | l1 = Enum.sort(["seq_continue\n", "Should still be in seq_continue\n", "seq_fail_fast\n"])
255 | assert ^l1 = Enum.sort([a, b, c])
256 | end
257 | end
258 |
--------------------------------------------------------------------------------
/test/cingi/extends_file_plan_test.exs:
--------------------------------------------------------------------------------
1 | defmodule CingiExtendsFilePlanTest do
2 | use ExUnit.Case
3 |
4 | describe "extends file" do
5 | setup do
6 | Helper.run_mission_report("test/mission_plans/extends/file.yaml")
7 | end
8 |
9 | test "right amount of output", ctx do
10 | assert 3 = length(ctx.output)
11 | end
12 |
13 | test "can extend file created in outpost setup", ctx do
14 | assert "in_extends_file_2" = Enum.at(ctx.output, 0)
15 | end
16 |
17 | test "can extend template that extends file in template", ctx do
18 | assert "in_extends_file_1" = Enum.at(ctx.output, 1)
19 | end
20 |
21 | test "can still extend template along with extending file", ctx do
22 | assert "two" = Enum.at(ctx.output, 2)
23 | end
24 | end
25 | end
26 |
--------------------------------------------------------------------------------
/test/cingi/extends_template_plan_test.exs:
--------------------------------------------------------------------------------
1 | defmodule CingiExtendsTemplatePlanTest do
2 | use ExUnit.Case
3 |
4 | import ExUnit.CaptureIO
5 |
6 | describe "extends template" do
7 | setup do
8 | execute = fn() ->
9 | ctx = Helper.run_mission_report("test/mission_plans/extends/template.yaml")
10 | send self(), {:ctx, ctx}
11 | end
12 |
13 | assert capture_io(:stderr, execute) =~ "Template key no_mission_plan doesn't exist in the hierarchy"
14 |
15 | receive do
16 | {:ctx, ctx} -> ctx
17 | end
18 | end
19 |
20 | test "right amount of output", ctx do
21 | assert 7 = length(ctx.output)
22 | end
23 |
24 | test "extends correct template in supermission", ctx do
25 | assert "one" = Enum.at(ctx.output, 0)
26 | end
27 |
28 | test "extends template within template", ctx do
29 | assert "onetwo" = Enum.at(ctx.output, 1)
30 | end
31 |
32 | test "once extended, can extend templates in new context", ctx do
33 | assert "three" = Enum.at(ctx.output, 2)
34 | end
35 |
36 | test "extends template two missions up", ctx do
37 | assert "two" = Enum.at(ctx.output, 3)
38 | end
39 |
40 | test "extends supermission template, not template in same mission", ctx do
41 | assert "four" = Enum.at(ctx.output, 4)
42 | assert "four shouldn't be here" not in ctx.output
43 | end
44 |
45 | test "extends template that extends another template", ctx do
46 | assert "nested_complete" = Enum.at(ctx.output, 5)
47 | end
48 |
49 | test "if no template found, exit", ctx do
50 | assert "premature end" = Enum.at(ctx.output, 6)
51 | assert "unreachable end" not in ctx.output
52 | assert 199 = ctx.exit_code
53 | end
54 | end
55 | end
56 |
--------------------------------------------------------------------------------
/test/cingi/fail_fast_plan_test.exs:
--------------------------------------------------------------------------------
1 | defmodule CingiFailFastPlanTest do
2 | use ExUnit.Case
3 |
4 | describe "fail fast" do
5 | setup do
6 | Helper.run_mission_report("test/mission_plans/fail_fast.yaml")
7 | end
8 |
9 | test "right output", ctx do
10 | assert ["two"] = ctx.output
11 | end
12 |
13 | test "right exit_code", ctx do
14 | assert 5 = ctx.exit_code
15 | end
16 | end
17 | end
18 |
--------------------------------------------------------------------------------
/test/cingi/field_agent_test.exs:
--------------------------------------------------------------------------------
1 | defmodule CingiFieldAgentTest do
2 | use ExUnit.Case
3 | alias Cingi.FieldAgent
4 | alias Cingi.Mission
5 | alias Cingi.Outpost
6 | doctest FieldAgent
7 |
8 | import ExUnit.CaptureIO
9 |
10 | describe "with a mock outpost" do
11 | setup [:mock_outpost]
12 |
13 | test "constructs with yaml command", ctx do
14 | {_, mpid} = fa_with_plan("echo 1", ctx.outpost_pid)
15 | mission = Mission.get(mpid)
16 | assert mission.key == "echo_1"
17 | assert mission.cmd == "echo 1"
18 | assert mission.submissions == nil
19 | end
20 |
21 | test "constructs with yaml map", ctx do
22 | {_, mpid} = fa_with_plan(%{
23 | "name" => "mission_1",
24 | "missions" => "echo 1",
25 | }, ctx.outpost_pid)
26 | mission = Mission.get(mpid)
27 | assert mission.key == "mission_1"
28 | assert mission.cmd == "echo 1"
29 | assert mission.submissions == nil
30 | end
31 |
32 | test "constructs with yaml map and just command, key is missions", ctx do
33 | {_, mpid} = fa_with_plan(%{
34 | "missions" => "echo 1",
35 | }, ctx.outpost_pid)
36 | mission = Mission.get(mpid)
37 | assert mission.key == "echo_1"
38 | assert mission.cmd == "echo 1"
39 | assert mission.submissions == nil
40 | end
41 |
42 | test "constructs with yaml map and just command, keys are missions", ctx do
43 | {_, mpid} = fa_with_plan(%{
44 | "missions" => %{
45 | "missions" => "echo 1"
46 | }
47 | }, ctx.outpost_pid)
48 | mission = Mission.get(mpid)
49 | assert mission.key == ""
50 | assert mission.cmd == nil
51 | assert mission.submissions == %{"missions" => "echo 1"}
52 | end
53 |
54 | test "constructs with yaml map and array of commands", ctx do
55 | {_, mpid} = fa_with_plan(%{
56 | "name" => "mission_1",
57 | "missions" => ["echo 1", "echo 2"],
58 | }, ctx.outpost_pid)
59 | mission = Mission.get(mpid)
60 | assert mission.key == "mission_1"
61 | assert mission.cmd == nil
62 | assert mission.submissions == [{"echo 1", 0}, {"echo 2", 1}]
63 | end
64 |
65 | test "constructs with yaml map and map of commands", ctx do
66 | {_, mpid} = fa_with_plan(%{
67 | "name" => "mission_1",
68 | "missions" => %{
69 | "submission 1" => "echo 1",
70 | "submission 2" => %{
71 | "name" => "new submission 2",
72 | "missions" => "echo 2"
73 | }
74 | },
75 | }, ctx.outpost_pid)
76 | mission = Mission.get(mpid)
77 | assert mission.key == "mission_1"
78 | assert mission.cmd == nil
79 | assert mission.submissions == %{
80 | "submission 1" => "echo 1",
81 | "submission 2" => %{
82 | "name" => "new submission 2",
83 | "missions" => "echo 2"
84 | }
85 | }
86 | end
87 | end
88 |
89 | describe "with a blank outpost, running mission" do
90 | setup [:blank_outpost]
91 |
92 | test "runs with mission args", ctx do
93 | {fpid, mpid} = fa_with_plan("echo blah", ctx.outpost_pid)
94 | Helper.check_exit_code mpid
95 |
96 | assert %{
97 | cmd: "echo blah",
98 | output: [[data: "blah\n", type: :out, timestamp: _, field_agent_pid: _, pid: []]],
99 | finished: true,
100 | running: false,
101 | exit_code: 0,
102 | field_agent_pid: ^fpid,
103 | } = Mission.get(mpid)
104 | end
105 |
106 | test "runs no mission args", ctx do
107 | {fpid, mpid} = fa_with_plan("echo", ctx.outpost_pid)
108 | Helper.check_exit_code mpid
109 |
110 | assert %{
111 | cmd: "echo",
112 | output: [[data: "\n", type: :out, timestamp: _, field_agent_pid: _, pid: []]],
113 | finished: true,
114 | running: false,
115 | exit_code: 0,
116 | field_agent_pid: ^fpid,
117 | } = Mission.get(mpid)
118 | end
119 |
120 | test "runs mission with appropriate running/finished flag", ctx do
121 | cmd = "ncat -l -i 1 9000"
122 | {fpid, mpid} = fa_with_plan(cmd, ctx.outpost_pid)
123 | FieldAgent.get(fpid) # flush
124 |
125 | assert %{
126 | cmd: ^cmd,
127 | output: [],
128 | finished: false,
129 | running: true,
130 | exit_code: nil,
131 | field_agent_pid: ^fpid,
132 | } = Mission.get(mpid)
133 |
134 | Helper.wait_for_process cmd
135 | Porcelain.spawn("bash", [ "-c", "echo -n blah | ncat localhost 9000"])
136 | Helper.check_exit_code mpid
137 |
138 | assert %{
139 | cmd: ^cmd,
140 | output: [[data: "blah", type: :out, timestamp: _, field_agent_pid: _, pid: []]],
141 | finished: true,
142 | running: false,
143 | exit_code: 0,
144 | field_agent_pid: ^fpid,
145 | } = Mission.get(mpid)
146 | end
147 |
148 | test "runs mission with args and ampersands", ctx do
149 | {fpid, mpid} = fa_with_plan("echo blah1 && sleep 0.1 && echo blah2", ctx.outpost_pid)
150 | Helper.check_exit_code mpid
151 |
152 | assert %{
153 | cmd: "echo blah1 && sleep 0.1 && echo blah2",
154 | output: [
155 | [data: "blah1\n", type: :out, timestamp: _, field_agent_pid: _, pid: []],
156 | [data: "blah2\n", type: :out, timestamp: _, field_agent_pid: _, pid: []]
157 | ],
158 | finished: true,
159 | running: false,
160 | exit_code: 0,
161 | field_agent_pid: ^fpid,
162 | } = Mission.get(mpid)
163 | end
164 |
165 | test "replaces empty mission with an easy exit command", ctx do
166 | execute = fn ->
167 | {_, mpid} = fa_with_plan(nil, ctx.outpost_pid)
168 | Helper.check_exit_code mpid
169 |
170 | assert %{
171 | cmd: "exit 199",
172 | mission_plan: %{},
173 | output: [],
174 | input_file: "$IN",
175 | submissions_num: 0,
176 | running: false,
177 | } = Mission.get(mpid)
178 | end
179 |
180 | assert capture_io(:stderr, execute) =~ "Must have cmd or submissions, got %{}"
181 | end
182 |
183 | test "kills bash process", ctx do
184 | {fpid, mpid} = fa_with_plan("ncat -l -i 2 19009", ctx.outpost_pid)
185 | FieldAgent.stop fpid
186 | Helper.check_exit_code mpid
187 |
188 | assert %{
189 | cmd: "ncat -l -i 2 19009",
190 | output: [],
191 | finished: true,
192 | running: false,
193 | exit_code: 137,
194 | field_agent_pid: ^fpid,
195 | } = Mission.get(mpid)
196 | end
197 |
198 | test "killing mission kills submission process", ctx do
199 | opid = ctx.outpost_pid
200 | {:ok, mpid1} = Mission.start_link [mission_plan: %{"missions" => ["echo 1"]}]
201 | {:ok, fpid1} = FieldAgent.start_link(mission_pid: mpid1, outpost_pid: opid)
202 |
203 | Mission.run_submissions(mpid1)
204 | FieldAgent.stop fpid1
205 |
206 | {:ok, mpid2} = Mission.start_link [mission_plan: "sleep 1; exit 198", supermission_pid: mpid1]
207 | {:ok, fpid2} = FieldAgent.start_link(mission_pid: mpid2, outpost_pid: opid)
208 |
209 | Helper.check_exit_code mpid2
210 |
211 | assert %{
212 | cmd: "sleep 1; exit 198",
213 | output: [],
214 | finished: true,
215 | running: false,
216 | exit_code: 137,
217 | field_agent_pid: ^fpid2,
218 | } = Mission.get(mpid2)
219 | end
220 | end
221 |
222 | defp mock_outpost(_) do
223 | {:ok, pid} = MockGenServer.start_link
224 | [outpost_pid: pid]
225 | end
226 |
227 | defp blank_outpost(_) do
228 | {:ok, pid} = Outpost.start_link
229 | [outpost_pid: pid]
230 | end
231 |
232 | defp fa_with_plan(plan, opid) do
233 | {:ok, mpid} = Mission.start_link [mission_plan: plan]
234 | {:ok, fpid} = FieldAgent.start_link(mission_pid: mpid, outpost_pid: opid)
235 | Helper.wait_for_valid_mission(mpid)
236 | {fpid, mpid}
237 | end
238 | end
239 |
--------------------------------------------------------------------------------
/test/cingi/headquarters_test.exs:
--------------------------------------------------------------------------------
1 | defmodule CingiHeadquartersTest do
2 | use ExUnit.Case
3 | alias Cingi.Headquarters
4 | alias Cingi.Branch
5 | doctest Headquarters
6 |
7 | test "creating mission report queued mission, but no plan yet" do
8 | res = Helper.create_mission_report([string: "missions: echo 1"])
9 | assert length(Headquarters.get(res[:hq_pid]).queued_missions) == 1
10 | assert nil == res[:mission].cmd
11 | assert nil == res[:mission].submissions
12 | end
13 |
14 | test "can resume" do
15 | res = Helper.create_mission_report([string: "missions: echo 1"])
16 | hpid = res[:hq_pid]
17 | assert length(Headquarters.get(hpid).queued_missions) == 1
18 | Headquarters.resume(hpid)
19 | Helper.wait_for_finished(res[:mission_pid])
20 | assert length(Headquarters.get(hpid).queued_missions) == 0
21 | end
22 |
23 | test "links branch" do
24 | res = Helper.create_mission_report([string: "missions: echo 1"])
25 | hpid = res[:hq_pid]
26 | bpid = res[:branch_pid]
27 | assert [^bpid] = Headquarters.get(hpid).branch_pids
28 | assert ^hpid = Branch.get(bpid).hq_pid
29 |
30 | {:ok, bpid2} = Branch.start_link()
31 | Headquarters.link_branch(hpid, bpid2)
32 |
33 | assert [^bpid, ^bpid2] = Headquarters.get(hpid).branch_pids
34 | assert ^hpid = Branch.get(bpid2).hq_pid
35 | end
36 |
37 | test "distributes parallel missions" do
38 | yaml = Enum.map [1,2,3,4], &(" s#{&1}: ncat -l -i 1 902#{&1}")
39 | yaml = ["missions:"] ++ yaml
40 | yaml = Enum.join yaml, "\n"
41 |
42 | res = Helper.create_mission_report([string: yaml])
43 | hpid = res[:hq_pid]
44 | bpid = res[:branch_pid]
45 | {:ok, bpid2} = Branch.start_link()
46 | Headquarters.link_branch(hpid, bpid2)
47 | Headquarters.resume(hpid)
48 |
49 | branch1 = Helper.wait_for_running_missions(bpid, 3)
50 | branch2 = Helper.wait_for_running_missions(bpid2, 2)
51 | assert length(branch1.started_missions) == 0
52 | assert length(branch1.running_missions) == 3
53 | assert length(branch2.started_missions) == 0
54 | assert length(branch2.running_missions) == 2
55 |
56 | Enum.map [1,2,3,4], &(Helper.wait_for_process("ncat -l -i 1 902#{&1}"))
57 | finish = &(Porcelain.exec("bash", [ "-c", "echo -n blah#{&1} | ncat localhost 902#{&1}"]))
58 |
59 | finish.(3)
60 | finish.(2)
61 | finish.(4)
62 | finish.(1)
63 |
64 | mission = Helper.check_exit_code(res[:mission_pid])
65 | assert %{output: [
66 | [data: "blah3", type: :out, timestamp: _, field_agent_pid: _, pid: [pid1]],
67 | [data: "blah2", type: :out, timestamp: _, field_agent_pid: _, pid: [pid2]],
68 | [data: "blah4", type: :out, timestamp: _, field_agent_pid: _, pid: [pid3]],
69 | [data: "blah1", type: :out, timestamp: _, field_agent_pid: _, pid: [pid4]]
70 | ], exit_code: 0} = mission
71 |
72 | pids = mission.submission_holds |> Enum.map(&(&1.pid))
73 | assert pid1 != pid2 != pid3 != pid4
74 | assert pid1 in pids
75 | assert pid2 in pids
76 | assert pid3 in pids
77 | assert pid4 in pids
78 | end
79 | end
80 |
--------------------------------------------------------------------------------
/test/cingi/mission_plan_test.exs:
--------------------------------------------------------------------------------
1 | defmodule CingiMissionPlansTest do
2 | use ExUnit.Case
3 | alias Cingi.Headquarters
4 |
5 | test "runs parallel inputs file" do
6 | res = Helper.create_mission_report([file: "test/mission_plans/inputs/parallel.yaml"])
7 | Headquarters.resume(res[:hq_pid])
8 | mission = Helper.check_exit_code(res[:mission_pid])
9 | output = mission.output
10 | |> Enum.map(&(&1[:data]))
11 | |> Enum.join("\n")
12 | |> String.split("\n", trim: true)
13 |
14 | {initial, next} = Enum.split output, 9
15 | {firstthird, next} = Enum.partition(next, &(case &1 do; "first, third: " <> _ -> true; _ -> false; end))
16 | {second, next} = Enum.partition(next, &(case &1 do; "second: " <> _ -> true; _ -> false; end))
17 | {within, next} = Enum.partition(next, &(case &1 do; "with in: " <> _ -> true; _ -> false; end))
18 | {without, next} = Enum.partition(next, &(case &1 do; "without in: " <> _ -> true; _ -> false; end))
19 |
20 | assert [
21 | "first1",
22 | "first2",
23 | "first3",
24 | "second1",
25 | "second2",
26 | "third1",
27 | "third2",
28 | "third3",
29 | "third4",
30 | ] = Enum.sort(initial)
31 |
32 | assert [
33 | "first, third: first1",
34 | "first, third: first2",
35 | "first, third: first3",
36 | "first, third: third1",
37 | "first, third: third2",
38 | "first, third: third3",
39 | "first, third: third4",
40 | ] = firstthird
41 |
42 | assert [
43 | "second: second1",
44 | "second: second2",
45 | ] = second
46 |
47 | assert [
48 | "with in: first1",
49 | "with in: first2",
50 | "with in: first3",
51 | "with in: second1",
52 | "with in: second2",
53 | "with in: third1",
54 | "with in: third2",
55 | "with in: third3",
56 | "with in: third4",
57 | ] = Enum.sort(within)
58 |
59 | assert [
60 | "without in: first1",
61 | "without in: first2",
62 | "without in: first3",
63 | "without in: second1",
64 | "without in: second2",
65 | "without in: third1",
66 | "without in: third2",
67 | "without in: third3",
68 | "without in: third4",
69 | ] = Enum.sort(without)
70 |
71 | assert [] = next
72 | end
73 |
74 | describe "runs sequential inputs file" do
75 | setup do
76 | res = Helper.create_mission_report([file: "test/mission_plans/inputs/sequential.yaml"])
77 | Headquarters.resume(res[:hq_pid])
78 | mission = Helper.wait_for_finished(res[:mission_pid])
79 | output = mission.output
80 | |> Enum.map(&(&1[:data]))
81 | |> Enum.join("\n")
82 | |> String.split("\n", trim: true)
83 | [output: output]
84 | end
85 |
86 | test "right amount of output", ctx do
87 | assert 7 = length(ctx.output)
88 | end
89 |
90 | test "first blahs", ctx do
91 | assert ["blah1", "blah2", "blah3"] = Enum.slice(ctx.output, 0, 3)
92 | end
93 |
94 | test "gets by integer index", ctx do
95 | outputs = Enum.filter(ctx.output, &(case &1 do; "0: " <> _ -> true; _ -> false end))
96 | assert ["0: blah1"] = outputs
97 | end
98 |
99 | test "gets by $LAST index", ctx do
100 | outputs = Enum.filter(ctx.output, &(case &1 do; "last: " <> _ -> true; _ -> false end))
101 | assert ["last: blah3"] = outputs
102 | end
103 |
104 | test "gets by $LAST and index", ctx do
105 | outputs = Enum.filter(ctx.output, &(case &1 do; "last, 1: " <> _ -> true; _ -> false end))
106 | assert ["last, 1: blah3", "last, 1: blah2"] = outputs
107 | end
108 | end
109 |
110 | describe "runs outputs file" do
111 | setup do
112 | res = Helper.create_mission_report([file: "test/mission_plans/outputs.yaml"])
113 | Headquarters.resume(res[:hq_pid])
114 | mission = Helper.wait_for_finished(res[:mission_pid])
115 | output = mission.output
116 | |> Enum.map(&(&1[:data]))
117 | |> Enum.join("\n")
118 | |> String.split("\n", trim: true)
119 | [output: output]
120 | end
121 |
122 | test "right amount of output", ctx do
123 | assert 13 = length(ctx.output)
124 | end
125 |
126 | test "first does not go through", ctx do
127 | assert "first1" not in ctx.output
128 | assert "first2" not in ctx.output
129 | assert "first3" not in ctx.output
130 | end
131 |
132 | test "second and third goes through", ctx do
133 | assert [
134 | "second1",
135 | "second2",
136 | "third2",
137 | "third3"
138 | ] = ctx.output |> Enum.slice(0, 4) |> Enum.sort()
139 | end
140 |
141 | test "third filters indices", ctx do
142 | assert "third1" not in ctx.output
143 | assert "third4" not in ctx.output
144 | end
145 |
146 | test "hidden inputs can still be taken", ctx do
147 | firsts = Enum.filter(ctx.output, &(case &1 do; "first: " <> _ -> true; _ -> false end))
148 | assert "first: first3" in firsts
149 | assert "first: first1" not in firsts
150 | assert "first: first2" not in firsts
151 | end
152 |
153 | test "selective input has thirds first", ctx do
154 | outputs = Enum.filter(ctx.output, &(case &1 do; "third, second: " <> _ -> true; _ -> false end))
155 | assert [
156 | "third, second: third2",
157 | "third, second: third3",
158 | "third, second: second1",
159 | "third, second: second2",
160 | ] = outputs
161 | end
162 |
163 | test "normal input", ctx do
164 | outputs = Enum.filter(ctx.output, &(case &1 do; "normal: " <> _ -> true; _ -> false end))
165 | assert [
166 | "normal: second1",
167 | "normal: second2",
168 | "normal: third2",
169 | "normal: third3",
170 | ] = Enum.sort(outputs)
171 | end
172 | end
173 | end
174 |
--------------------------------------------------------------------------------
/test/cingi/mission_report_test.exs:
--------------------------------------------------------------------------------
1 | defmodule CingiMissionReportTest do
2 | use ExUnit.Case
3 | alias Cingi.MissionReport, as: Report
4 | doctest Report
5 |
6 | describe "MissionReport.parse_variable/1" do
7 | test "parses $IN" do
8 | assert [type: "IN"] = Report.parse_variable "$IN"
9 | end
10 |
11 | test "parses $IN[2]" do
12 | assert [type: "IN", index: 2] = Report.parse_variable "$IN[2]"
13 | end
14 |
15 | test "parses $IN['2']" do
16 | assert [type: "IN", key: "2"] = Report.parse_variable "$IN['2']"
17 | end
18 |
19 | test "parses $IN[str]" do
20 | assert [type: "IN", key: "str"] = Report.parse_variable "$IN[str]"
21 | end
22 |
23 | test "parses $IN['str']" do
24 | assert [type: "IN", key: "str"] = Report.parse_variable "$IN['str']"
25 | end
26 |
27 | test "parses $IN[\"str\"]" do
28 | assert [type: "IN", key: "str"] = Report.parse_variable "$IN[\"str\"]"
29 | end
30 |
31 | test "parses $IN[$LAST] gives nil" do
32 | assert [type: "IN", index: nil] = Report.parse_variable "$IN[$LAST]"
33 | end
34 |
35 | test "parses $IN[$LAST] gives last_index" do
36 | assert [type: "IN", index: 5] = Report.parse_variable("$IN[$LAST]", last_index: 5)
37 | end
38 |
39 | test "fails to parse nil" do
40 | assert [error: "Unrecognized pattern "] = Report.parse_variable nil
41 | end
42 |
43 | test "fails to parse empty string" do
44 | assert [error: "Unrecognized pattern "] = Report.parse_variable ""
45 | end
46 |
47 | test "fails to parse arbitrary string" do
48 | assert [error: "Unrecognized pattern blah"] = Report.parse_variable "blah"
49 | end
50 |
51 | test "fails to parse invalids in type" do
52 | assert [error: "Invalid characters"] = Report.parse_variable "$af09"
53 | assert [error: "Invalid characters"] = Report.parse_variable "$af09[s]"
54 | end
55 |
56 | test "fails to parse if invalid after $" do
57 | assert [error: "Unrecognized pattern $09af"] = Report.parse_variable "$09af"
58 | end
59 |
60 | test "fails to parse with bad brackets" do
61 | assert [error: "Nonmatching brackets"] = Report.parse_variable "$IN["
62 | assert [error: "Invalid characters"] = Report.parse_variable "$IN]"
63 | assert [error: "Nonmatching brackets"] = Report.parse_variable "$IN[s"
64 | end
65 |
66 | test "fails to parse with no key" do
67 | assert [error: "Empty/bad key"] = Report.parse_variable "$IN[]"
68 | assert [error: "Empty/bad key"] = Report.parse_variable "$IN['']"
69 | assert [error: "Empty/bad key"] = Report.parse_variable "$IN[\"\"]"
70 | assert [error: "Empty/bad key"] = Report.parse_variable "$IN[\"]"
71 | assert [error: "Empty/bad key"] = Report.parse_variable "$IN[']"
72 | end
73 |
74 | test "fails to parse with nonmatching strings" do
75 | assert [error: "Nonmatching quotes"] = Report.parse_variable "$IN['blah\"]"
76 | assert [error: "Nonmatching quotes"] = Report.parse_variable "$IN[\"blah']"
77 | assert [error: "Nonmatching quotes"] = Report.parse_variable "$IN['blah]"
78 | assert [error: "Nonmatching quotes"] = Report.parse_variable "$IN[blah']"
79 | assert [error: "Nonmatching quotes"] = Report.parse_variable "$IN[\"blah]"
80 | assert [error: "Nonmatching quotes"] = Report.parse_variable "$IN[blah\"]"
81 | end
82 | end
83 | end
84 |
--------------------------------------------------------------------------------
/test/cingi/mission_test.exs:
--------------------------------------------------------------------------------
1 | defmodule CingiMissionTest do
2 | use ExUnit.Case
3 | alias Cingi.Mission
4 | doctest Mission
5 |
6 | test "creates mission" do
7 | {:ok, pid} = Mission.start_link([mission_plan: "echo"])
8 | assert %{
9 | cmd: nil,
10 | mission_plan: "echo",
11 | output: [],
12 | input_file: "$IN",
13 | running: false,
14 | } = Mission.get(pid)
15 | end
16 | end
17 |
--------------------------------------------------------------------------------
/test/cingi/outpost_plan_test.exs:
--------------------------------------------------------------------------------
1 | defmodule CingiOutpostPlansTest do
2 | use ExUnit.Case
3 |
4 | describe "simple outpost plan" do
5 | setup do
6 | Helper.run_mission_report("test/mission_plans/outposts/simple.yaml")
7 | end
8 |
9 | test "right amount of output", ctx do
10 | assert 8 = length(ctx.output)
11 | end
12 |
13 | test "directory changes from setup", ctx do
14 | assert "dir /tmp" in ctx.output
15 | end
16 |
17 | test "static env works", ctx do
18 | assert "TEST: test_value" in ctx.output
19 | end
20 |
21 | test "env key is set", ctx do
22 | assert "ENV1: env1_value" in ctx.output
23 | end
24 |
25 | test "env val is set", ctx do
26 | assert "ENV3: VAL2" in ctx.output
27 | end
28 |
29 | test "env key and val is set", ctx do
30 | assert "ENV2: VAL1" in ctx.output
31 | end
32 |
33 | test "missing key is not set", ctx do
34 | assert "MISSING_KEY: " in ctx.output
35 | end
36 | end
37 |
38 | describe "env and dir outpost plan" do
39 | setup do
40 | Helper.run_mission_report("test/mission_plans/outposts/env_and_dir.yaml")
41 | end
42 |
43 | test "right amount of output", ctx do
44 | assert 9 = length(ctx.output)
45 | end
46 |
47 | test "directory changes from outpost", ctx do
48 | assert "start pwd: /" in ctx.output
49 | assert "newdir pwd: /tmp" in ctx.output
50 | assert "end pwd: /" in ctx.output
51 | end
52 |
53 | test "env set by outpost", ctx do
54 | assert "START, TEST_OUTPOSTS: test_outposts_value" in ctx.output
55 | end
56 |
57 | test "env carries through in submissions", ctx do
58 | assert "TEST_OUTPOSTS 1: test_outposts_value" in ctx.output
59 | end
60 |
61 | test "env is added in sub outposts", ctx do
62 | assert "TEST_OUTPOSTS 2: test_outposts_2_value" in ctx.output
63 | assert "TEST_OUTPOSTS 3: test_outposts_3_value" in ctx.output
64 | end
65 |
66 | test "env is overriden in sub outpost", ctx do
67 | assert "TEST_OUTPOSTS 4: test_outposts_override" in ctx.output
68 | end
69 |
70 | test "env does not get reset by sub outposts", ctx do
71 | assert "END, TEST_OUTPOSTS: test_outposts_value" in ctx.output
72 | end
73 | end
74 |
75 | describe "nested setup outpost plan" do
76 | setup do
77 | Helper.run_mission_report("test/mission_plans/outposts/setup.yaml")
78 | end
79 |
80 | setup ctx do
81 | reports = ctx.res[:branch_pid]
82 | |> Cingi.Branch.get
83 | |> (fn(b) -> b.mission_reports end).()
84 | |> Enum.map(&Cingi.MissionReport.get/1)
85 | [reports: reports]
86 | end
87 |
88 | setup ctx do
89 | setup_output = ctx.reports
90 | |> Enum.slice(1, 3)
91 | |> Enum.map(&(Enum.at(&1.missions, 0)))
92 | |> Enum.map(&Cingi.Mission.get/1)
93 | |> Enum.map(&Helper.get_output/1)
94 | [setup_output: setup_output]
95 | end
96 |
97 | test "right amount of output", ctx do
98 | assert 13 = length(ctx.output)
99 | end
100 |
101 | test "right amount of mission plans", ctx do
102 | assert 4 = length(ctx.reports)
103 | end
104 |
105 | test "top outpost starts first, because of the mission that runs before the outpost mission", ctx do
106 | assert "top setup" = ctx.setup_output |> Enum.at(0) |> Enum.at(0)
107 | end
108 |
109 | test "bottom outpost starts second, outposts are not started until run by a command", ctx do
110 | assert "bottom setup" = ctx.setup_output |> Enum.at(1) |> Enum.at(0)
111 | end
112 |
113 | test "middle outpost runs last, triggered by bottom outpost needing environement to run in", ctx do
114 | assert "middle setup" = ctx.setup_output |> Enum.at(2) |> Enum.at(0)
115 | end
116 |
117 | test "top setup has no envs set", ctx do
118 | assert [
119 | "top setup",
120 | "top setup TMP_DIR_1: ",
121 | "top setup TMP_DIR_2: ",
122 | "top setup TMP_DIR_3: ",
123 | "{\"dir\": \"/tmp/tmp." <> _,
124 | ] = Enum.at(ctx.setup_output, 0)
125 | end
126 |
127 | test "middle setup inherited envs and dir from top", ctx do
128 | assert [
129 | "middle setup",
130 | "middle setup pwd: /tmp/tmp." <> key1,
131 | "middle setup TMP_DIR_1: /tmp/tmp." <> key2,
132 | "middle setup TMP_DIR_2: /tmp/tmp." <> key3,
133 | "middle setup TMP_DIR_3: /tmp/tmp." <> key4,
134 | "{\"dir\": \"/tmp/tmp." <> key5,
135 | ] = Enum.at(ctx.setup_output, 2)
136 |
137 | # Top set all TMP_DIR envs as the working directory
138 | assert key1 == key2
139 | assert key1 == key3
140 | assert key1 == key4
141 |
142 | # Middle setting new working directory as subdirectory
143 | assert key5 =~ key1
144 | assert key5 != key1
145 | end
146 |
147 | test "bottom setup inherited envs and dir from middle", ctx do
148 | assert [
149 | "bottom setup",
150 | "bottom setup pwd: /tmp/tmp." <> key1,
151 | "bottom setup TMP_DIR_1: /tmp/tmp." <> key2,
152 | "bottom setup TMP_DIR_2: /tmp/tmp." <> key3,
153 | "bottom setup TMP_DIR_3: first_override",
154 | ] = Enum.at(ctx.setup_output, 1)
155 |
156 | assert key1 == key3 # Current directory matches env override
157 | assert key1 =~ key2 # Current directory is subdirectory of original directory
158 | assert key1 != key2 # Exclusively subdirectory
159 | end
160 |
161 | test "top missions set in tmp directory from top outpost", ctx do
162 | output = Enum.at(ctx.output, 0)
163 | assert "top pwd: /tmp/tmp." <> key = output
164 | assert Regex.match?(~r/^[a-zA-Z0-9]+$/, key)
165 | end
166 |
167 | test "middle missions set in tmp directory from middle outpost", ctx do
168 | output = Enum.at(ctx.output, 5)
169 | assert "middle pwd: /tmp/tmp." <> key = output
170 | assert Regex.match?(~r/^[a-zA-Z0-9]+\/tmp\.[a-zA-Z0-9]+$/, key)
171 | end
172 |
173 | test "bottom missions set in tmp directory from middle outpost", ctx do
174 | output = Enum.at(ctx.output, 1)
175 | assert "bottom pwd: /tmp/tmp." <> key = output
176 | assert Regex.match?(~r/^[a-zA-Z0-9]+\/tmp\.[a-zA-Z0-9]+$/, key)
177 | end
178 |
179 | test "top missions get envs set in outpost", ctx do
180 | assert [
181 | "top TMP_DIR_1: /tmp/tmp." <> k1,
182 | "top TMP_DIR_2: /tmp/tmp." <> k2,
183 | "top TMP_DIR_3: /tmp/tmp." <> k3,
184 | ] = Enum.slice(ctx.output, 10, 3)
185 |
186 | assert k1 == k2
187 | assert k1 == k3
188 | end
189 |
190 | test "middle missions get envs set in outpost", ctx do
191 | assert [
192 | "middle TMP_DIR_1: /tmp/tmp." <> k1,
193 | "middle TMP_DIR_2: /tmp/tmp." <> k2,
194 | "middle TMP_DIR_3: first_override",
195 | ] = Enum.slice(ctx.output, 6, 3)
196 |
197 | # Middle setting env as new subdirectory
198 | assert k2 =~ k1
199 | assert k1 != k2
200 | end
201 |
202 | test "bottom missions get envs set in outpost", ctx do
203 | assert [
204 | "bottom TMP_DIR_1: /tmp/tmp." <> k1,
205 | "bottom TMP_DIR_2: /tmp/tmp." <> k2,
206 | "bottom TMP_DIR_3: second_override",
207 | ] = Enum.slice(ctx.output, 2, 3)
208 |
209 | # Middle setting env as new subdirectory
210 | assert k2 =~ k1
211 | assert k1 != k2
212 | end
213 |
214 | test "can run files made in outpost setup", ctx do
215 | assert "inside_tmp_dir" = Enum.at(ctx.output, 9)
216 | end
217 | end
218 |
219 | describe "setup fail outpost plan" do
220 | setup do
221 | Helper.run_mission_report("test/mission_plans/outposts/setup_fail.yaml")
222 | end
223 |
224 | test "right output", ctx do
225 | assert ["should run"] = ctx.output
226 | end
227 |
228 | test "right exit code", ctx do
229 | assert 137 = ctx.exit_code
230 | end
231 | end
232 |
233 | describe "teardown outpost plan" do
234 | setup do
235 | Helper.run_mission_report("test/mission_plans/outposts/teardown.yaml")
236 | end
237 |
238 | setup ctx do
239 | reports = ctx.res[:branch_pid]
240 | |> Cingi.Branch.get
241 | |> (fn(b) -> b.mission_reports end).()
242 | |> Enum.map(&Cingi.MissionReport.get/1)
243 | [reports: reports]
244 | end
245 |
246 | setup ctx do
247 | setup_output = ctx.reports
248 | |> Enum.slice(-3, 3)
249 | |> Enum.map(&(Enum.at(&1.missions, 0)))
250 | |> Enum.map(&Cingi.Mission.get/1)
251 | |> Enum.map(&Helper.get_output/1)
252 | [setup_output: setup_output]
253 | end
254 |
255 | test "right output", ctx do
256 | assert [
257 | "bottom mission one",
258 | "bottom mission three",
259 | "bottom mission two",
260 | "middle mission",
261 | "top mission"
262 | ] = Enum.sort(ctx.output)
263 | end
264 |
265 | test "right teardown output, teardown in own outpost", ctx do
266 | assert [
267 | ["bottom teardown BOTTOMTHREE"],
268 | ["middle teardown MIDDLETWO"],
269 | ["top teardown TOPONE"],
270 | ] = ctx.setup_output
271 | end
272 | end
273 | end
274 |
--------------------------------------------------------------------------------
/test/cingi/outpost_test.exs:
--------------------------------------------------------------------------------
1 | defmodule CingiOutpostTest do
2 | use ExUnit.Case
3 | alias Cingi.Outpost
4 | alias Cingi.Branch
5 | alias Cingi.Headquarters
6 | doctest Outpost
7 |
8 | test "creates outpost" do
9 | {:ok, pid} = Outpost.start_link()
10 | assert %{
11 | name: nil,
12 | is_setup: false,
13 | setup: nil,
14 | branch_pid: nil,
15 | node: :"nonode@nohost",
16 | } = Outpost.get(pid)
17 | end
18 |
19 | test "alternates includes outpost" do
20 | {:ok, pid} = Outpost.start_link()
21 | assert %{nil: ^pid} = Outpost.get(pid).alternates |> Agent.get(&(&1))
22 | end
23 |
24 | test "alternates is registered under root mission" do
25 | mpid = spawn fn -> :ok end
26 | {:ok, pid} = Outpost.start_link(root_mission_pid: mpid)
27 | alternates = Outpost.get(pid).alternates
28 | key = {:n, :l, {:outpost_agent_by_mission, mpid}}
29 | assert ^alternates = :gproc.where(key)
30 | end
31 |
32 | test "alternates do not update with new outposts" do
33 | {:ok, pid1} = Outpost.start_link()
34 | {:ok, pid2} = Outpost.start_link(original: pid1)
35 |
36 | a1 = Outpost.get pid1
37 | a2 = Outpost.get pid2
38 |
39 | assert pid1 != pid2
40 | assert a1.alternates == a2.alternates
41 | assert %{nil: ^pid1} = a1.alternates |> Agent.get(&(&1))
42 | end
43 |
44 | test "alternates gets first outpost on same branch" do
45 | {:ok, bpid} = Branch.start_link()
46 | {:ok, pid1} = Outpost.start_link(branch_pid: bpid)
47 | {:ok, pid2} = Outpost.start_link(original: pid1, branch_pid: bpid)
48 |
49 | outpost1 = Outpost.get_version_on_branch pid1, bpid
50 | outpost2 = Outpost.get_version_on_branch pid2, bpid
51 |
52 | assert pid1 != pid2
53 | assert pid1 == outpost1
54 | assert pid1 == outpost2
55 | end
56 |
57 | test "alternates gets outpost on different branch" do
58 | {:ok, bpid1} = Branch.start_link()
59 | {:ok, bpid2} = Branch.start_link()
60 | {:ok, pid1} = Outpost.start_link(branch_pid: bpid1)
61 | {:ok, pid2} = Outpost.create_version_on_branch(pid1, bpid2)
62 | assert pid1 != pid2
63 |
64 | outpost1 = Outpost.get_version_on_branch pid1, bpid1
65 | outpost2 = Outpost.get_version_on_branch pid2, bpid1
66 |
67 | assert pid1 == outpost1
68 | assert pid1 == outpost2
69 |
70 | outpost1 = Outpost.get_version_on_branch pid1, bpid2
71 | outpost2 = Outpost.get_version_on_branch pid2, bpid2
72 |
73 | assert pid2 == outpost1
74 | assert pid2 == outpost2
75 | end
76 |
77 | test "parents are updated with children" do
78 | {:ok, bpid} = Branch.start_link()
79 | {:ok, pid1} = Outpost.start_link(branch_pid: bpid)
80 | {:ok, pid2} = Outpost.start_link(parent_pid: pid1, branch_pid: bpid)
81 |
82 | assert [%{pid: ^pid2}] = Outpost.get(pid1).child_pids
83 |
84 | {:ok, pid3} = Outpost.start_link(parent_pid: pid1, branch_pid: bpid)
85 | assert [%{pid: ^pid2}, %{pid: ^pid3}] = Outpost.get(pid1).child_pids
86 | end
87 |
88 | test "parents on branch are updated with correct children" do
89 | {:ok, bpid1} = Branch.start_link()
90 | {:ok, bpid2} = Branch.start_link()
91 | {:ok, pid1} = Outpost.start_link(branch_pid: bpid1)
92 | {:ok, pid2} = Outpost.create_version_on_branch(pid1, bpid2)
93 |
94 | # child outpost started on same branch
95 | {:ok, pid3} = Outpost.start_link(parent_pid: pid1, branch_pid: bpid1)
96 |
97 | # parent outpost on same branch has the child outpost
98 | assert %{parent_pid: ^pid1} = Outpost.get(pid3)
99 | assert [%{pid: ^pid3}] = Outpost.get(pid1).child_pids
100 | assert [] = Outpost.get(pid2).child_pids
101 |
102 | # child outpost started on different branch than given parent outpost
103 | {:ok, pid4} = Outpost.start_link(parent_pid: pid1, branch_pid: bpid2)
104 |
105 | # parent on the second branch has the child instead
106 | assert %{parent_pid: ^pid2} = Outpost.get(pid4)
107 | assert [%{pid: ^pid3}] = Outpost.get(pid1).child_pids
108 | assert [%{pid: ^pid4}] = Outpost.get(pid2).child_pids
109 | end
110 |
111 | @tag :ncat
112 | @tag :ncat8100
113 | test "teardown" do
114 | cmd = "ncat -l -i 1 8100"
115 |
116 | {:ok, bpid} = Branch.start_link()
117 | {:ok, hpid} = Headquarters.start_link()
118 | Headquarters.link_branch(hpid, bpid)
119 |
120 | {:ok, opid} = Outpost.start_link(
121 | branch_pid: bpid,
122 | plan: %{"teardown" => cmd},
123 | )
124 | t = Task.async(fn -> Outpost.teardown(opid) end)
125 |
126 | Helper.wait_for_process cmd
127 | outpost = Outpost.get opid
128 | assert %{
129 | teardown: ^cmd,
130 | teardown_callback_pid: {_, _},
131 | tearing_down: true,
132 | teardown_failed: false,
133 | is_torndown: false,
134 | } = outpost
135 |
136 | Porcelain.exec("bash", [ "-c", "echo -n blah1 | ncat localhost 8100"])
137 |
138 | outpost = Task.await t
139 | assert %{
140 | teardown: ^cmd,
141 | teardown_callback_pid: {_, _},
142 | tearing_down: false,
143 | teardown_failed: false,
144 | is_torndown: true,
145 | } = outpost
146 | end
147 |
148 | @tag distributed: true
149 | test "distributed outposts" do
150 | count = 3
151 |
152 | DistributedEnv.start(count)
153 | assert length(Node.list()) === count
154 |
155 | nodes = [:"slave1@127.0.0.1", :"slave2@127.0.0.1", :"slave3@127.0.0.1"]
156 | [node1, node2, node3] = nodes
157 | {:ok, bpid1} = :rpc.block_call(node1, Branch, :start_link, [[name: :testb]])
158 | {:ok, bpid2} = :rpc.block_call(node2, Branch, :start_link, [[name: :testb]])
159 | {:ok, bpid3} = :rpc.block_call(node3, Branch, :start_link, [[name: :testb]])
160 |
161 | :rpc.block_call(node1, Outpost, :start_link, [[name: {:global, :test}, branch_pid: bpid1]])
162 |
163 | original = Outpost.get {:global, :test}
164 |
165 | assert %{
166 | name: {:global, :test},
167 | node: :"slave1@127.0.0.1",
168 | branch_pid: ^bpid1,
169 | } = original
170 |
171 | [first, second, third] = nodes
172 | |> Enum.map(&(:rpc.block_call(&1, Branch, :get, [:testb])))
173 | |> Enum.map(&(:rpc.block_call(&1.node, Outpost, :get_version_on_branch, [{:global, :test}, &1.pid])))
174 |
175 | # Assert that the outpost is only showing up on the first node
176 | assert original == Outpost.get first
177 | assert nil == second
178 | assert nil == third
179 |
180 | # Assert that the second outpost is a clone of the first on the second node,
181 | # and that they know that they are alternates of each other
182 | {:ok, second} = :rpc.block_call(node2, Outpost, :create_version_on_branch, [{:global, :test}, bpid2])
183 |
184 | assert %{
185 | name: {:global, :test},
186 | node: :"slave2@127.0.0.1",
187 | branch_pid: ^bpid2,
188 | } = Outpost.get second
189 |
190 | assert original.alternates == Outpost.get(second).alternates
191 |
192 | alts = {:global, :test}
193 | |> Outpost.get_alternates
194 | |> Map.values
195 |
196 | assert length(alts) == 2
197 | assert first in alts
198 | assert second in alts
199 |
200 | # Assert that node3 still does not have an associated outpost
201 | third = :rpc.block_call(node3, Outpost, :get_version_on_branch, [{:global, :test}, bpid3])
202 | assert nil == third
203 |
204 | DistributedEnv.stop()
205 | assert length(Node.list()) === 0
206 | end
207 | end
208 |
--------------------------------------------------------------------------------
/test/cingi/when_plan_test.exs:
--------------------------------------------------------------------------------
1 | defmodule CingiWhenTest do
2 | use ExUnit.Case
3 |
4 | describe "when" do
5 | setup do
6 | Helper.run_mission_report("test/mission_plans/when.yaml")
7 | end
8 |
9 | test "runs correct amount of output", ctx do
10 | assert 10 = length(ctx.output)
11 | end
12 |
13 | test "things that should not run don't run", ctx do
14 | Enum.map ctx.output, &(assert not(&1 =~ "should not run"))
15 | end
16 |
17 | test "runs first few commands", ctx do
18 | assert ["first", "second"] = Enum.slice(ctx.output, 0, 2)
19 | end
20 |
21 | test "runs regardless, since fail_fast is false", ctx do
22 | assert "runs regardless" in ctx.output
23 | end
24 |
25 | test "runs correct output for exit code", ctx do
26 | assert "runs because of exit code 1" in ctx.output
27 | assert "should not run because not exit code 0" not in ctx.output
28 | end
29 |
30 | test "runs correct output for failure", ctx do
31 | assert "runs because of failure" in ctx.output
32 | assert "should not run because not success" not in ctx.output
33 | end
34 |
35 | test "runs correct output for output", ctx do
36 | assert "runs because of second in outputs" in ctx.output
37 | assert "should not run because of no first in outputs" not in ctx.output
38 | end
39 |
40 | test "runs correct output for multiple conditions", ctx do
41 | assert "runs because of second in outputs and exit code of 1" in ctx.output
42 | assert "should not run because although second in outputs, exit_code is not 2" not in ctx.output
43 | end
44 |
45 | test "runs correct output for parallel group", ctx do
46 | assert "runs because parallel group exited with 0" in ctx.output
47 | assert "should not run because parallel group was success" not in ctx.output
48 | end
49 |
50 | test "runs correct output meaning last submission does not make a nil exit code", ctx do
51 | assert "runs because exit code is not nil with last mission being skipped" in ctx.output
52 | end
53 |
54 | test "runs end mission because of false fail_fast", ctx do
55 | assert ["end"] = Enum.take(ctx.output, -1)
56 | end
57 | end
58 | end
59 |
--------------------------------------------------------------------------------
/test/cingi/wrapper_test.exs:
--------------------------------------------------------------------------------
1 | defmodule WrapperTest do
2 | use ExUnit.Case
3 |
4 | test "runs echo" do
5 | proc = exec(["echo blah"])
6 | assert "blah\n" = proc.out
7 | assert 0 = proc.status
8 | end
9 |
10 | test "gets exit_code" do
11 | proc = exec(["exit 5"])
12 | assert "" = proc.out
13 | assert 5 = proc.status
14 | end
15 |
16 | test "runs ncat" do
17 | cmd = "ncat -l -i 1 8500"
18 | t = async_exec [cmd]
19 | is_running cmd
20 | exec ["echo finished | ncat localhost 8500"]
21 | res = Task.await t
22 | isnt_running cmd
23 | assert res.out == "finished\n"
24 | end
25 |
26 | test "runs ncat, kills ncat process" do
27 | cmd = "ncat -l -i 1 8501"
28 | t = _spawn [cmd]
29 | is_running cmd
30 | Process.exit t.pid, "test"
31 | isnt_running cmd
32 | end
33 |
34 | test "background processes get killed" do
35 | cmd = "sleep 9"
36 | path = tmp_file("#/bin/sh\nsleep 9 &\npid=$!\nwait $pid")
37 | t = _spawn ["bash #{path}"]
38 |
39 | # Wait until sleep actually shows up as a process
40 | Helper.wait_for_process cmd
41 |
42 | is_running cmd
43 | Process.exit t.pid, "test"
44 | isnt_running cmd
45 | File.rm path
46 | end
47 |
48 |
49 | test "runs ncat, kills ncat process, also deletes tmp_file" do
50 | cmd = "ncat -l -i 1 8502"
51 | path = tmp_file("")
52 | t = _spawn [cmd, path, "true"]
53 | is_running cmd
54 | Process.exit t.pid, "test"
55 | isnt_running cmd
56 | assert false == File.exists? path
57 | end
58 |
59 | test "file piping works" do
60 | path = tmp_file("match1\nignored\nmatch2")
61 | assert "match1\nmatch2\n" = exec(["grep match", path]).out
62 | assert File.exists? path
63 | File.rm path
64 | end
65 |
66 | test "file piping autoremoves file" do
67 | path = tmp_file("match1\nignored\nmatch2")
68 | assert "match1\nmatch2\n" = exec(["grep match", path, "true"]).out
69 | assert false == File.exists? path
70 | end
71 |
72 | test "file piping works even without needing it" do
73 | path = tmp_file("match1\nignored\nmatch2")
74 | assert "blah\n" = exec(["echo blah", path, "true"]).out
75 | end
76 |
77 | test "stdin receiving kill kills process" do
78 | path = tmp_file("one\ntwo\nkill\nfour")
79 | cmd = "sleep 2"
80 | proc = exec [cmd], {:path, path}
81 | assert 137 = proc.status
82 | end
83 |
84 | test "stdin receiving anything else doesn't kill process" do
85 | path = tmp_file("one\ntwo\nthree\nfour")
86 | cmd = "exit 5"
87 | proc = exec [cmd], {:path, path}
88 | assert 5 = proc.status
89 | end
90 |
91 | defp exec(cmds, input \\ nil) do
92 | Porcelain.exec("./priv/bin/wrapper.sh", cmds, in: input)
93 | end
94 |
95 | defp _spawn(cmds, input \\ nil) do
96 | pid = Porcelain.spawn("./priv/bin/wrapper.sh", cmds, in: input)
97 | Helper.wait_for_process Enum.at(cmds, 0)
98 | pid
99 | end
100 |
101 | defp is_running(cmd) do
102 | assert Helper.get_process_lines(cmd) > 0
103 | end
104 |
105 | defp isnt_running(cmd) do
106 | # Might have checked too fast, wait for a quarter of a second before checking again
107 | # Since process dying from signal may not happen immediately
108 | n = Helper.get_process_lines(cmd)
109 | if n > 0 do Process.sleep(250) end
110 | assert Helper.get_process_lines(cmd) == 0
111 | end
112 |
113 | defp async_exec(cmds, input \\ nil) do
114 | pid = Task.async(fn() -> exec(cmds, input) end)
115 | Helper.wait_for_process Enum.at(cmds, 0)
116 | pid
117 | end
118 |
119 | defp tmp_file(content) do
120 | Temp.track!
121 | {:ok, fd, path} = Temp.open
122 | IO.write fd, content
123 | File.close fd
124 | path
125 | end
126 | end
127 |
--------------------------------------------------------------------------------
/test/cingi_test.exs:
--------------------------------------------------------------------------------
1 | defmodule CingiTest do
2 | use ExUnit.Case
3 | doctest Cingi
4 | end
5 |
--------------------------------------------------------------------------------
/test/distributed_env_test.exs:
--------------------------------------------------------------------------------
1 | defmodule DistributedEnv do
2 | @moduledoc false
3 |
4 | def start(num_nodes) do
5 | spawn_master()
6 | num_nodes
7 | |> spawn_slaves()
8 | end
9 |
10 | def stop() do
11 | Node.list()
12 | |> Enum.map(&:slave.stop/1)
13 | :net_kernel.stop()
14 | end
15 |
16 | defp spawn_master() do
17 | :net_kernel.start([:"primary@127.0.0.1"])
18 | :erl_boot_server.start([])
19 | allow_boot(~c"127.0.0.1")
20 | end
21 |
22 | defp spawn_slaves(num_nodes) do
23 | 1..num_nodes
24 | |> Enum.map(fn index -> ~c"slave#{index}@127.0.0.1" end)
25 | |> Enum.map(&Task.async(fn -> spawn_slave(&1) end))
26 | |> Enum.map(&Task.await(&1, 30_000))
27 | end
28 |
29 | defp spawn_slave(node_host) do
30 | {:ok, node} = :slave.start(~c"127.0.0.1", node_name(node_host), inet_loader_args())
31 | add_code_paths(node)
32 | transfer_configuration(node)
33 | ensure_applications_started(node)
34 | {:ok, node}
35 | end
36 |
37 | defp rpc(node, module, function, args) do
38 | :rpc.block_call(node, module, function, args)
39 | end
40 |
41 | defp inet_loader_args do
42 | ~c"-loader inet -hosts 127.0.0.1 -setcookie #{:erlang.get_cookie()}"
43 | end
44 |
45 | defp allow_boot(host) do
46 | {:ok, ipv4} = :inet.parse_ipv4_address(host)
47 | :erl_boot_server.add_slave(ipv4)
48 | end
49 |
50 | defp add_code_paths(node) do
51 | rpc(node, :code, :add_paths, [:code.get_path()])
52 | end
53 |
54 | defp transfer_configuration(node) do
55 | for {app_name, _, _} <- Application.loaded_applications do
56 | for {key, val} <- Application.get_all_env(app_name) do
57 | rpc(node, Application, :put_env, [app_name, key, val])
58 | end
59 | end
60 | end
61 |
62 | defp ensure_applications_started(node) do
63 | rpc(node, Application, :ensure_all_started, [:mix])
64 | rpc(node, Mix, :env, [Mix.env()])
65 | for {app_name, _, _} <- Application.loaded_applications do
66 | rpc(node, Application, :ensure_all_started, [app_name])
67 | end
68 | end
69 |
70 | defp node_name(node_host) do
71 | node_host
72 | |> to_string()
73 | |> String.split("@")
74 | |> Enum.at(0)
75 | |> String.to_atom()
76 | end
77 | end
78 |
--------------------------------------------------------------------------------
/test/gitclone.yaml:
--------------------------------------------------------------------------------
1 | # Make sure uuid is generated outside of
2 | # otupost, since outposts are regenerated at each node
3 | - >
4 | read REMOTE_REPO;
5 | echo "{
6 | \"uuid\": \"$(cat /proc/sys/kernel/random/uuid)\",
7 | \"repo\": \"$REMOTE_REPO\"
8 | }"
9 | - outpost:
10 | setup: cat
11 | env:
12 | UUID: $SETUP['uuid']
13 | REMOTE_REPO: $SETUP['repo']
14 | missions:
15 | - outpost:
16 | dir: $SETUP['dir']
17 | setup:
18 | - mktemp -d --suffix GITCLONE
19 | - read TMP_DIR; git clone "$REMOTE_REPO" "$TMP_DIR/repo"; echo $TMP_DIR
20 | - "read TMP_DIR; echo \"{\\\"dir\\\": \\\"$TMP_DIR/repo\\\"}\""
21 | missions:
22 | - extends_file: .cingi.yaml
23 |
--------------------------------------------------------------------------------
/test/gitclone_cingi.yaml:
--------------------------------------------------------------------------------
1 | - echo "git@github.com:Rhathe/CingI.git"
2 | - extends_file: test/gitclone.yaml
3 |
--------------------------------------------------------------------------------
/test/helper_modules_test.exs:
--------------------------------------------------------------------------------
1 | defmodule Helper do
2 | alias Cingi.Branch
3 | alias Cingi.Headquarters
4 | alias Cingi.Mission
5 | alias Cingi.MissionReport
6 |
7 | def check_exit_code(pid) do
8 | timing(fn () ->
9 | mission = Mission.get pid
10 | ec = mission.exit_code
11 | [ec, mission]
12 | end)
13 | end
14 |
15 | def wait_for_process(cmd) do
16 | timing(fn() ->
17 | [get_process_lines(cmd) > 0, nil]
18 | end)
19 | end
20 |
21 | def wait_for_valid_mission(pid) do
22 | timing(fn () ->
23 | mission = Mission.get pid
24 | [mission.cmd != nil or mission.submissions != nil, mission]
25 | end)
26 | end
27 |
28 | def wait_for_finished(pid) do
29 | timing(fn () ->
30 | mission = Mission.get pid
31 | [mission.finished, mission]
32 | end)
33 | end
34 |
35 | def wait_for_queued(pid, n) do
36 | timing(fn () ->
37 | hq = Headquarters.get(pid)
38 | [n <= length(hq.queued_missions), hq]
39 | end)
40 | end
41 |
42 | def wait_for_running_missions(pid, n) do
43 | timing(fn () ->
44 | branch = Branch.get(pid)
45 | [n <= length(branch.running_missions), branch]
46 | end)
47 | end
48 |
49 | def wait_for_finished_missions(pid, n) do
50 | timing(fn () ->
51 | branch = Branch.get(pid)
52 | [n <= length(branch.finished_missions), branch]
53 | end)
54 | end
55 |
56 | def wait_for_submissions_finish(pid, n) do
57 | timing(fn () ->
58 | mission = Mission.get(pid)
59 | pids = Enum.map(mission.submission_holds, &(&1.pid))
60 | sum = length(Enum.filter(pids, &(not is_nil(Mission.get(&1).exit_code))))
61 | [n <= sum, mission]
62 | end)
63 | end
64 |
65 | def timing(fnc, limit \\ 10, start \\ nil) do
66 | start = start || Time.utc_now
67 | diff = Time.diff(Time.utc_now, start)
68 |
69 | ret = [diff > limit] ++ fnc.()
70 | case ret do
71 | [true, _, _] ->
72 | IO.inspect {"Wait exceeded #{limit} seconds", ret}
73 | raise "Waiting exceeded #{limit} seconds"
74 | [false, false, _] -> timing(fnc, limit, start)
75 | [false, nil, _] -> timing(fnc, limit, start)
76 | [_, _, val] -> val
77 | end
78 | end
79 |
80 | def create_mission_report(opts) do
81 | {:ok, bpid} = Branch.start_link()
82 | {:ok, hpid} = Headquarters.start_link()
83 | Headquarters.pause(hpid)
84 | Headquarters.link_branch(hpid, bpid)
85 |
86 | report_pid = Branch.create_report(bpid, opts)
87 | hq = wait_for_queued(hpid, 1)
88 | mission_pid = Enum.at(hq.queued_missions, 0)
89 |
90 | [
91 | hq: hq,
92 | branch: Branch.get(bpid),
93 | report: MissionReport.get(report_pid),
94 | mission: Mission.get(mission_pid),
95 |
96 | hq_pid: hpid,
97 | branch_pid: bpid,
98 | report_pid: report_pid,
99 | mission_pid: mission_pid,
100 | ]
101 | end
102 |
103 | def run_mission_report(plan) do
104 | res = Helper.create_mission_report([file: plan])
105 | Headquarters.resume(res[:hq_pid])
106 | mission = Helper.wait_for_finished(res[:mission_pid])
107 | [output: get_output(mission), exit_code: mission.exit_code, res: res]
108 | end
109 |
110 | def get_output(mission) do
111 | mission.output
112 | |> Enum.map(&(&1[:data]))
113 | |> Enum.join("\n")
114 | |> String.split("\n", trim: true)
115 | end
116 |
117 | def get_process_lines(cmd) do
118 | res = Porcelain.exec("bash", ["-c", "ps -o command | awk '$0==\"#{cmd}\"' | wc -l"])
119 | {n, _} = Integer.parse res.out
120 | n
121 | end
122 | end
123 |
--------------------------------------------------------------------------------
/test/mission_plans/example.yaml:
--------------------------------------------------------------------------------
1 | missions:
2 | - echo "beginning"
3 | - missions:
4 | first_echo: echo "match 1"
5 | second_echo:
6 | - echo "ignored 2"
7 | - echo "match 3"
8 | third_echo:
9 | missions:
10 | - echo "ignored 4"
11 | - - echo "match 5"
12 | - echo "ignored 6"
13 | fourth_echo:
14 | missions: echo "match 7"
15 | - missions: grep match
16 | - missions:
17 | - echo "end"
18 |
--------------------------------------------------------------------------------
/test/mission_plans/exits.yaml:
--------------------------------------------------------------------------------
1 | missions:
2 | sequential:
3 | missions:
4 | fail_fast:
5 | missions:
6 | - echo "seq_fail_fast"
7 | - exit 7
8 | - echo "Should not be in seq_fail_fast"
9 | continue:
10 | fail_fast: false
11 | missions:
12 | - echo "seq_continue"
13 | - exit 9
14 | - echo "Should still be in seq_continue"
15 |
16 | parallel:
17 | missions:
18 | fail_fast:
19 | fail_fast: true
20 | missions:
21 | 1: ncat -l -i 2 9990
22 | 2: exit 3
23 | continue:
24 | missions:
25 | 1: ncat -l -i 2 9991
26 | 2: exit 4
27 |
--------------------------------------------------------------------------------
/test/mission_plans/extends/file.yaml:
--------------------------------------------------------------------------------
1 | mission_plan_templates:
2 | one:
3 | extends_file: test/mission_plans/extends/file_1.yaml
4 | two: echo two
5 | missions:
6 | - outpost:
7 | setup:
8 | - bash test/bash_scripts/tmpdir.sh
9 | dir: $SETUP['dir']
10 | env:
11 | OLD_DIR: $SETUP['old_dir']
12 | missions:
13 | - outpost:
14 | setup:
15 | - "cp \"$OLD_DIR/test/mission_plans/extends/file_2.yaml\" ./test_tmp_extends.yaml"
16 | missions:
17 | - extends_file: test_tmp_extends.yaml
18 | - extends_template: one
19 | - extends_template: two
20 | - rm test_tmp_extends.yaml
21 | - rm tmpdir.sh
22 | - rmdir `pwd`
23 |
--------------------------------------------------------------------------------
/test/mission_plans/extends/file_1.yaml:
--------------------------------------------------------------------------------
1 | echo in_extends_file_1
2 |
--------------------------------------------------------------------------------
/test/mission_plans/extends/file_2.yaml:
--------------------------------------------------------------------------------
1 | echo in_extends_file_2
2 |
--------------------------------------------------------------------------------
/test/mission_plans/extends/template.yaml:
--------------------------------------------------------------------------------
1 | mission_plan_templates:
2 | one:
3 | mission_plan_templates:
4 | two:
5 | missions: echo onetwo
6 | missions:
7 | - echo one
8 | - extends_template: two
9 | - extends_template: three
10 | two:
11 | missions: echo two
12 | three:
13 | missions: echo three
14 | four: echo four
15 | finished_plan:
16 | missions: echo nested_complete
17 | missions:
18 | - extends_template: one
19 | - missions:
20 | - extends_template: two
21 | - extends_template: four
22 | mission_plan_templates:
23 | four: echo "four shouldn't be here"
24 | - mission_plan_templates:
25 | nested_plan:
26 | extends_template: finished_plan
27 | missions:
28 | - extends_template: nested_plan
29 | - echo "premature end"
30 | - extends_template: no_mission_plan
31 | - echo "unreachable end"
32 |
--------------------------------------------------------------------------------
/test/mission_plans/fail_fast.yaml:
--------------------------------------------------------------------------------
1 | fail_fast: true
2 | missions:
3 | one:
4 | fail_fast: false
5 | missions:
6 | three:
7 | - sleep 1
8 | - echo onethree
9 | four: sleep 1
10 | two:
11 | - echo two
12 | - exit 5
13 | five: sleep 1
14 |
--------------------------------------------------------------------------------
/test/mission_plans/inputs/parallel.yaml:
--------------------------------------------------------------------------------
1 | missions:
2 | - missions:
3 | first:
4 | missions:
5 | - echo first1
6 | - echo first2
7 | - echo first3
8 | second:
9 | missions:
10 | - echo second1
11 | - echo second2
12 | third:
13 | missions:
14 | - echo third1
15 | - echo third2
16 | - echo third3
17 | - echo third4
18 | - missions:
19 | a:
20 | input: $IN['second']
21 | missions: "while read line; do echo \"second: $line\"; done"
22 | b:
23 | input:
24 | - $IN['first']
25 | - $IN['third']
26 | missions: "while read line; do echo \"first, third: $line\"; done"
27 | c:
28 | input: $IN
29 | missions: "while read line; do echo \"with in: $line\"; done"
30 | d:
31 | missions: "while read line; do echo \"without in: $line\"; done"
32 |
--------------------------------------------------------------------------------
/test/mission_plans/inputs/sequential.yaml:
--------------------------------------------------------------------------------
1 | missions:
2 | - missions:
3 | - echo blah1
4 | - echo blah2
5 | - echo blah3
6 | - missions:
7 | a:
8 | input: $IN[0]
9 | missions: "while read line; do echo \"0: $line\"; done"
10 | b:
11 | input: $IN[$LAST]
12 | missions: "while read line; do echo \"last: $line\"; done"
13 | c:
14 | input:
15 | - $IN[$LAST]
16 | - $IN[1]
17 | missions: "while read line; do echo \"last, 1: $line\"; done"
18 |
--------------------------------------------------------------------------------
/test/mission_plans/nested.yaml:
--------------------------------------------------------------------------------
1 | missions:
2 | - missions:
3 | - echo blah1
4 | - grep blah
5 | - missions:
6 | - echo 1match1
7 | - echo 2match2
8 | - echo 1match3
9 | - echo 2match1
10 | - echo ignored
11 | - echo 1match4
12 | - echo 2match5
13 | - missions:
14 | - missions:
15 | - grep match
16 | - missions:
17 | - grep 2match
18 | - missions:
19 | - missions:
20 | 5: grep 5
21 | 1: grep 1
22 |
--------------------------------------------------------------------------------
/test/mission_plans/outposts/env_and_dir.yaml:
--------------------------------------------------------------------------------
1 | outpost:
2 | dir: "/"
3 | env:
4 | TEST_OUTPOSTS: test_outposts_value
5 | missions:
6 | - "pwd | xargs echo \"start pwd:\""
7 | - "echo \"START, TEST_OUTPOSTS: $TEST_OUTPOSTS\""
8 | - outpost:
9 | env:
10 | TEST_OUTPOSTS_2: test_outposts_2_value
11 | missions:
12 | - "echo \"TEST_OUTPOSTS 1: $TEST_OUTPOSTS\""
13 | - "echo \"TEST_OUTPOSTS 2: $TEST_OUTPOSTS_2\""
14 | - outpost:
15 | env:
16 | TEST_OUTPOSTS: test_outposts_override
17 | TEST_OUTPOSTS_3: test_outposts_3_value
18 | missions:
19 | - "echo \"TEST_OUTPOSTS 3: $TEST_OUTPOSTS_3\""
20 | - "echo \"TEST_OUTPOSTS 4: $TEST_OUTPOSTS\""
21 | - outpost:
22 | dir: "/tmp"
23 | missions: "pwd | xargs echo \"newdir pwd:\""
24 | - "echo \"END, TEST_OUTPOSTS: $TEST_OUTPOSTS\""
25 | - "pwd | xargs echo \"end pwd:\""
26 |
--------------------------------------------------------------------------------
/test/mission_plans/outposts/multinode.yaml:
--------------------------------------------------------------------------------
1 | outpost:
2 | setup:
3 | - echo "top setup"
4 | - "echo \"top setup TMP_DIR_1: $TMP_DIR_1\""
5 | - "echo \"top setup TMP_DIR_2: $TMP_DIR_2\""
6 | - "echo \"top setup TMP_DIR_3: $TMP_DIR_3\""
7 | - bash test/bash_scripts/tmpdir.sh
8 | dir: $SETUP['dir']
9 | env:
10 | TMP_DIR_1: $SETUP['dir']
11 | TMP_DIR_2: $SETUP['dir']
12 | TMP_DIR_3: $SETUP['dir']
13 | TEST_OUTPOSTS: test_outposts_value
14 | missions:
15 | - "pwd | xargs echo 'top pwd:' "
16 | - outpost:
17 | dir: $SETUP['dir']
18 | env:
19 | TMP_DIR_2: $SETUP['dir']
20 | TMP_DIR_3: first_override
21 | setup:
22 | - echo "middle setup"
23 | - "pwd | xargs echo 'middle setup pwd:' "
24 | - "echo \"echo inside_tmp_dir\" > tmpecho.sh"
25 | - "echo \"middle setup TMP_DIR_1: $TMP_DIR_1\""
26 | - "echo \"middle setup TMP_DIR_2: $TMP_DIR_2\""
27 | - "echo \"middle setup TMP_DIR_3: $TMP_DIR_3\""
28 | - pwd | xargs bash tmpdir.sh
29 | missions:
30 | - outpost:
31 | env:
32 | TMP_DIR_3: second_override
33 | setup:
34 | - echo "bottom setup"
35 | - "pwd | xargs echo 'bottom setup pwd:' "
36 | - "echo \"bottom setup TMP_DIR_1: $TMP_DIR_1\""
37 | - "echo \"bottom setup TMP_DIR_2: $TMP_DIR_2\""
38 | - "echo \"bottom setup TMP_DIR_3: $TMP_DIR_3\""
39 | missions:
40 | one:
41 | - "pwd | xargs echo 'one bottom pwd:' "
42 | - "echo \"one bottom TMP_DIR_1: $TMP_DIR_1\""
43 | - "echo \"one bottom TMP_DIR_2: $TMP_DIR_2\""
44 | - "echo \"one bottom TMP_DIR_3: $TMP_DIR_3\""
45 | two:
46 | - "pwd | xargs echo 'two bottom pwd:' "
47 | - "echo \"two bottom TMP_DIR_1: $TMP_DIR_1\""
48 | - "echo \"two bottom TMP_DIR_2: $TMP_DIR_2\""
49 | - "echo \"two bottom TMP_DIR_3: $TMP_DIR_3\""
50 | three:
51 | missions:
52 | a: "pwd | xargs echo 'three bottom pwd:' "
53 | b: "echo \"three bottom TMP_DIR_1: $TMP_DIR_1\""
54 | c: "echo \"three bottom TMP_DIR_2: $TMP_DIR_2\""
55 | d: "echo \"three bottom TMP_DIR_3: $TMP_DIR_3\""
56 | four:
57 | missions:
58 | a: "pwd | xargs echo 'four bottom pwd:' "
59 | b: "echo \"four bottom TMP_DIR_1: $TMP_DIR_1\""
60 | c: "echo \"four bottom TMP_DIR_2: $TMP_DIR_2\""
61 | d: "echo \"four bottom TMP_DIR_3: $TMP_DIR_3\""
62 |
--------------------------------------------------------------------------------
/test/mission_plans/outposts/setup.yaml:
--------------------------------------------------------------------------------
1 | outpost:
2 | setup:
3 | - echo "top setup"
4 | - "echo \"top setup TMP_DIR_1: $TMP_DIR_1\""
5 | - "echo \"top setup TMP_DIR_2: $TMP_DIR_2\""
6 | - "echo \"top setup TMP_DIR_3: $TMP_DIR_3\""
7 | - bash test/bash_scripts/tmpdir.sh
8 | dir: $SETUP['dir']
9 | env:
10 | TMP_DIR_1: $SETUP['dir']
11 | TMP_DIR_2: $SETUP['dir']
12 | TMP_DIR_3: $SETUP['dir']
13 | TEST_OUTPOSTS: test_outposts_value
14 | missions:
15 | - "pwd | xargs echo 'top pwd:' "
16 | - outpost:
17 | dir: $SETUP['dir']
18 | env:
19 | TMP_DIR_2: $SETUP['dir']
20 | TMP_DIR_3: first_override
21 | setup:
22 | - echo "middle setup"
23 | - "pwd | xargs echo 'middle setup pwd:' "
24 | - "echo \"echo inside_tmp_dir\" > tmpecho.sh"
25 | - "echo \"middle setup TMP_DIR_1: $TMP_DIR_1\""
26 | - "echo \"middle setup TMP_DIR_2: $TMP_DIR_2\""
27 | - "echo \"middle setup TMP_DIR_3: $TMP_DIR_3\""
28 | - pwd | xargs bash tmpdir.sh
29 | missions:
30 | - outpost:
31 | env:
32 | TMP_DIR_3: second_override
33 | setup:
34 | - echo "bottom setup"
35 | - "pwd | xargs echo 'bottom setup pwd:' "
36 | - "echo \"bottom setup TMP_DIR_1: $TMP_DIR_1\""
37 | - "echo \"bottom setup TMP_DIR_2: $TMP_DIR_2\""
38 | - "echo \"bottom setup TMP_DIR_3: $TMP_DIR_3\""
39 | missions:
40 | - "pwd | xargs echo 'bottom pwd:' "
41 | - "echo \"bottom TMP_DIR_1: $TMP_DIR_1\""
42 | - "echo \"bottom TMP_DIR_2: $TMP_DIR_2\""
43 | - "echo \"bottom TMP_DIR_3: $TMP_DIR_3\""
44 | - "pwd | xargs echo 'middle pwd:' "
45 | - "echo \"middle TMP_DIR_1: $TMP_DIR_1\""
46 | - "echo \"middle TMP_DIR_2: $TMP_DIR_2\""
47 | - "echo \"middle TMP_DIR_3: $TMP_DIR_3\""
48 |
49 | # Remove tmp files and dir
50 | - "rm \"$TMP_DIR_2/tmpdir.sh\""
51 | - "rmdir \"$TMP_DIR_2\""
52 | - bash tmpecho.sh
53 | - "echo \"top TMP_DIR_1: $TMP_DIR_1\""
54 | - "echo \"top TMP_DIR_2: $TMP_DIR_2\""
55 | - "echo \"top TMP_DIR_3: $TMP_DIR_3\""
56 |
57 | # Remove tmp files and dir
58 | - "rm \"$TMP_DIR_1/tmpecho.sh\""
59 | - "rm \"$TMP_DIR_1/tmpdir.sh\""
60 | - "rmdir \"$TMP_DIR_1\""
61 |
--------------------------------------------------------------------------------
/test/mission_plans/outposts/setup_fail.yaml:
--------------------------------------------------------------------------------
1 | outpost:
2 | setup:
3 | - echo "{}"; exit 0
4 | missions:
5 | - echo "should run"
6 | - outpost:
7 | setup:
8 | - echo "{}"; exit 7
9 | missions: echo "should not run"
10 | - echo "should also not run"
11 |
--------------------------------------------------------------------------------
/test/mission_plans/outposts/simple.yaml:
--------------------------------------------------------------------------------
1 | missions:
2 | - echo "beginning"
3 | - missions:
4 | - missions:
5 | - pwd | xargs echo "dir"
6 | - "echo \"TEST: $TEST\""
7 | - "echo \"ENV1: $ENV1\""
8 | - "echo \"ENV2: $ENV2\""
9 | - "echo \"ENV3: $ENV3\""
10 | - "echo \"MISSING_KEY: $MISSING_KEY\""
11 | outpost:
12 | dir: $SETUP['dir']
13 | env:
14 | TEST: test_value
15 | $SETUP['env1key']: env1_value
16 | $SETUP['env2key']: $SETUP['env1val']
17 | ENV3: $SETUP['env2val']
18 | $SETUP['missingkey']: missing_val
19 | MISSING_KEY: $SETUP['missingval']
20 | setup:
21 | - >
22 | echo "{
23 | \"dir\": \"/tmp\",
24 | \"env1key\": \"ENV1\",
25 | \"env2key\": \"ENV2\",
26 | \"env1val\": \"VAL1\",
27 | \"env2val\": \"VAL2\"
28 | }"
29 | - missions:
30 | - echo "end"
31 |
--------------------------------------------------------------------------------
/test/mission_plans/outposts/teardown.yaml:
--------------------------------------------------------------------------------
1 | - outpost:
2 | env:
3 | SETUP_ENV: "TOPONE"
4 | teardown:
5 | - echo "top teardown $SETUP_ENV"
6 | missions:
7 | - echo "top mission"
8 | - outpost:
9 | env:
10 | SETUP_ENV: "MIDDLETWO"
11 | teardown:
12 | - echo "middle teardown $SETUP_ENV"
13 | missions:
14 | - echo "middle mission"
15 | - outpost:
16 | env:
17 | SETUP_ENV: "BOTTOMTHREE"
18 | teardown: echo "bottom teardown $SETUP_ENV"
19 | missions:
20 | one: echo "bottom mission one"
21 | two: echo "bottom mission two"
22 | three: echo "bottom mission three"
23 |
--------------------------------------------------------------------------------
/test/mission_plans/outputs.yaml:
--------------------------------------------------------------------------------
1 | missions:
2 | - output:
3 | - $OUT[$LAST]
4 | - $OUT['second']
5 | missions:
6 | first:
7 | output: $OUT[2]
8 | missions:
9 | - echo first1
10 | - echo first2
11 | - echo first3
12 | second:
13 | missions:
14 | - echo second1
15 | - echo second2
16 | third:
17 | output:
18 | - $OUT[4]
19 | - $OUT[2]
20 | - $OUT[1]
21 | missions:
22 | - echo third1
23 | - echo third2
24 | - echo third3
25 | - echo third4
26 | - missions:
27 | normal: "while read line; do echo \"normal: $line\"; done"
28 | selective_input:
29 | input:
30 | - $IN['third']
31 | - $IN['second']
32 | missions: "while read line; do echo \"third, second: $line\"; done"
33 | hidden_input:
34 | input:
35 | - $IN['first']
36 | missions: "while read line; do echo \"first: $line\"; done"
37 |
--------------------------------------------------------------------------------
/test/mission_plans/when.yaml:
--------------------------------------------------------------------------------
1 | missions:
2 | - fail_fast: false
3 | missions:
4 | - echo first
5 | - echo second; exit 1
6 | - missions:
7 | success_regardless:
8 | missions: echo "runs regardless"
9 |
10 | success_with_exit_code:
11 | when:
12 | - exit_codes:
13 | - 1
14 | missions: echo "runs because of exit code 1"
15 |
16 | fail_with_exit_code:
17 | when:
18 | - exit_codes:
19 | - 0
20 | missions: echo "should not run because not exit code 0"
21 |
22 | success_with_success:
23 | when:
24 | - success: true
25 | missions: echo "should not run because not success"
26 |
27 | fail_without_success:
28 | when:
29 | - success: false
30 | missions: echo "runs because of failure"
31 |
32 | success_with_output:
33 | when:
34 | - outputs:
35 | - second
36 | missions: echo "runs because of second in outputs"
37 |
38 | fail_without_outputs:
39 | when:
40 | - outputs:
41 | - first
42 | missions: echo "should not run because of no first in outputs"
43 |
44 | success_with_multiple_conditions:
45 | when:
46 | - outputs: second
47 | - exit_codes: 1
48 | missions: echo "runs because of second in outputs and exit code of 1"
49 |
50 | fail_with_multple_conditions:
51 | when:
52 | - outputs: second
53 | - exit_codes: 2
54 | missions: echo "should not run because although second in outputs, exit_code is not 2"
55 | - missions:
56 | success_of_whens:
57 | when:
58 | - exit_codes: 0
59 | missions: echo "runs because parallel group exited with 0"
60 |
61 | failure_of_whens:
62 | when:
63 | - success: false
64 | missions: echo "should not run because parallel group was success"
65 | - when:
66 | - output: NON_EXISTENT_OUTPUT
67 | missions: echo "should not run because of non-existent output"
68 | - when:
69 | - exit_codes: 0
70 | missions: echo "runs because exit code is not nil with last mission being skipped"
71 | - echo end
72 |
--------------------------------------------------------------------------------
/test/mockgenserver_test.exs:
--------------------------------------------------------------------------------
1 | defmodule MockGenServer do
2 | use GenServer
3 |
4 | # Client API
5 |
6 | def start_link(args \\ []) do
7 | GenServer.start_link(__MODULE__, args, [])
8 | end
9 |
10 | # Server Callbacks
11 |
12 | def init(opts) do
13 | {:ok, %{opts: opts, calls: []}}
14 | end
15 |
16 | def handle_cast(args, mock) do
17 | calls = mock.calls ++ [{:cast, args}]
18 | {:noreply, Map.merge(mock, %{calls: calls})}
19 | end
20 | end
21 |
--------------------------------------------------------------------------------
/test/test_helper.exs:
--------------------------------------------------------------------------------
1 | ExUnit.configure exclude: [distributed: true]
2 | ExUnit.start()
3 |
--------------------------------------------------------------------------------