├── .github
└── workflows
│ └── concourse.yml
├── .gitignore
├── .kitchen-cluster.yml
├── .kitchen.yml
├── .travis.yml
├── Gemfile
├── Gemfile.lock
├── LICENSE
├── README.md
├── Vagrantfile
├── ansible.cfg
├── defaults
└── main.yml
├── handlers
└── main.yml
├── keys
└── key.sh
├── meta
└── main.yml
├── tasks
├── auth.yml
├── checks.yml
├── common_nix.yml
├── install_nix_4.yml
├── install_nix_5.yml
├── kernel_update.yml
├── main.yml
├── manage
│ ├── main.yml
│ └── teams.yml
├── web_nix.yml
└── worker_nix.yml
├── templates
├── com.github.ahelal.concourse.web.plist.j2
├── com.github.ahelal.concourse.worker.plist.j2
├── concourse-web-init.sh.j2
├── concourse-web.j2
├── concourse-worker-init.sh.j2
└── concourse-worker.j2
├── test
├── ansible-setup.sh
├── ansible.cfg
├── helper_roles
│ ├── fly
│ │ ├── defaults
│ │ │ └── main.yml
│ │ ├── files
│ │ │ ├── macos.yml
│ │ │ ├── simple_failure.yml
│ │ │ └── simple_success.yml
│ │ └── tasks
│ │ │ ├── fly.yml
│ │ │ ├── main.yml
│ │ │ └── packages.yml
│ ├── hosts
│ │ └── tasks
│ │ │ └── main.yml
│ └── roles_requirements.yml
├── integration
│ ├── Gemfile
│ ├── helper_spec.rb
│ ├── simple
│ │ ├── concourse-vars.yml
│ │ ├── serverspec
│ │ │ ├── a_user_spec.rb
│ │ │ ├── a_web_spec.rb
│ │ │ ├── b_worker_spec.rb
│ │ │ ├── c_binary_spec.rb
│ │ │ ├── ca_binary_web_spec.rb
│ │ │ ├── cb_binary_worker_spec.rb
│ │ │ ├── d_job_spec.rb
│ │ │ ├── e_manage_spec.rb
│ │ │ ├── ssh_spec.rb
│ │ │ └── z_control_scripts_spec.rb
│ │ └── simple.yml
│ ├── web
│ │ ├── serverspec
│ │ │ ├── a_user_spec.rb
│ │ │ ├── a_web_spec.rb
│ │ │ ├── b_worker_not_running_spec.rb
│ │ │ ├── c_binary_spec.rb
│ │ │ ├── ca_binary_web_spec.rb
│ │ │ ├── d_job_spec.rb
│ │ │ ├── e_manage_spec.rb
│ │ │ ├── ssh_spec.rb
│ │ │ └── z_web_control_scripts_spec.rb
│ │ └── web.yml
│ └── worker
│ │ ├── serverspec
│ │ ├── a_user_spec.rb
│ │ ├── b_worker_spec.rb
│ │ ├── c_binary_spec.rb
│ │ ├── cb_binary_worker_spec.rb
│ │ ├── d-web_not_running_spec.rb
│ │ ├── ssh_spec.rb
│ │ └── z_worker_control_scripts_spec.rb
│ │ └── worker.yml
├── setup_roles.sh
├── test-cluster.sh
├── test-pipeline.yml
└── uploaded.sh
├── vars
└── main.yml
├── web_arguments.txt
└── worker_arguments.txt
/.github/workflows/concourse.yml:
--------------------------------------------------------------------------------
1 | name: Concourse
2 |
3 | on:
4 | push:
5 | branches: [ master ]
6 | pull_request:
7 | branches: [ master ]
8 |
9 | jobs:
10 | test:
11 |
12 | runs-on: ubuntu-latest
13 | strategy:
14 | matrix:
15 | ruby-version: ['3.0']
16 | ansible-version: ['2.8.17', '2.9.17', '2.10.3']
17 |
18 | steps:
19 | - uses: actions/checkout@v2
20 | - name: Set up Ruby
21 | # To automatically get bug fixes and new Ruby versions for ruby/setup-ruby,
22 | # change this to (see https://github.com/ruby/setup-ruby#versioning):
23 | # uses: ruby/setup-ruby@v1
24 | uses: ruby/setup-ruby@473e4d8fe5dd94ee328fdfca9f8c9c7afc9dae5e
25 | with:
26 | ruby-version: ${{ matrix.ruby-version }}
27 | bundler-cache: true # runs 'bundle install' and caches installed gems automatically
28 | - name: Bundle install
29 | run: bundle install
30 | - name: Setup ansible
31 | run: |
32 | sudo apt-get install -y python3-setuptools
33 | pip3 install ansible==${{matrix.ansible-version}}
34 | export PATH=/home/runner/.local/bin:$PATH
35 |
36 | - name: Setup ansible roles
37 | run: |
38 | export DEFAULT_LOCAL_TMP=/home/runner/ansible-tmp
39 | ./test/setup_roles.sh
40 | - name: simple integration tests
41 | run: |
42 | echo "${{ matrix.ruby-version }} ${{matrix.ansible-version}}"
43 | export DEFAULT_LOCAL_TMP=/home/runner/ansible-tmp
44 | bundle check
45 | which ansible-playbook
46 | echo "**** ANSIBLE VERSION ****"
47 | ansible-playbook --version
48 | echo "**** ****"
49 | bundle exec kitchen test
50 | - name: cluster integration tests
51 | run: ./test/test-cluster.sh
52 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .kitchen
2 | keys/vars
3 | *.retry
4 | .vagrant
5 | *.log
6 | *.pyc
7 | password
8 | test/helper_roles/postgresql
9 |
--------------------------------------------------------------------------------
/.kitchen-cluster.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | driver :
4 | name : docker
5 | socket : unix:///var/run/docker.sock
6 | use_sudo : false
7 | privileged : true
8 | volume : /opt/concourseci/worker
9 | provision_command : mkdir -p /run/sshd
10 |
11 | verifier :
12 | name : serverspec
13 | remote_exec : false
14 | default_pattern : true
15 | bundler_path : <%= '/opt/hellofresh/kitchen/embedded/bin' if File.exist?('/opt/hellofresh/kitchen/embedded/bin/bundler') %>
16 | rspec_path : <%= '/opt/hellofresh/kitchen/embedded/bin' if File.exist?('/opt/hellofresh/kitchen/embedded/bin/rspec') %>
17 | gemfile : ./test/integration/Gemfile
18 |
19 | provisioner :
20 | name : ansible_push
21 | ansible_config : "tests/ansible.cfg"
22 | extra_vars : "@./test/integration/simple/concourse-vars.yml"
23 | raw_arguments : "--extra-vars='ansible_python_interpreter=/usr/bin/python3'"
24 |
25 | groups :
26 | concourse-web : ['web-ubuntu1804']
27 | concourse-worker : ['worker-ubuntu1804']
28 | chef_bootstrap_url : nil
29 | use_instance_name : True
30 | idempotency_test : false
31 | diff : True
32 | sudo : True
33 | verbose : "v"
34 |
35 | platforms :
36 | - name : "ubuntu1804"
37 | driver_config :
38 | image : ubuntu:18.04
39 | platform : ubuntu
40 |
41 | suites:
42 | - name : web
43 | provisioner :
44 | playbook : "test/integration/web/web.yml"
45 | driver_config :
46 | instance_name : "web-ubuntu1804"
47 | hostname : "web-ubuntu1804"
48 |
49 | - name : worker
50 | provisioner :
51 | playbook : "test/integration/worker/worker.yml"
52 | driver_config :
53 | hostname : "worker-ubuntu1804"
54 | instance_name : "worker-ubuntu1804"
55 | links : "web-ubuntu1804"
56 |
--------------------------------------------------------------------------------
/.kitchen.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | driver :
4 | name : docker
5 | socket : unix:///var/run/docker.sock
6 | use_sudo : false
7 | privileged : true
8 | volume : /opt/concourseci/worker
9 | provision_command : mkdir -p /run/sshd
10 |
11 | verifier :
12 | name : serverspec
13 | remote_exec : false
14 | default_pattern : true
15 | bundler_path : <%= '/opt/hellofresh/kitchen/embedded/bin' if File.exist?('/opt/hellofresh/kitchen/embedded/bin/bundler') %>
16 | rspec_path : <%= '/opt/hellofresh/kitchen/embedded/bin' if File.exist?('/opt/hellofresh/kitchen/embedded/bin/rspec') %>
17 | gemfile : ./test/integration/Gemfile
18 |
19 | provisioner :
20 | name : ansible_push
21 | ansible_config : "tests/ansible.cfg"
22 | chef_bootstrap_url : nil
23 | use_instance_name : True
24 | idempotency_test : True
25 | diff : True
26 | sudo : True
27 | extra_vars : "@./test/integration/simple/concourse-vars.yml"
28 | # verbose : "vvvvv"
29 |
30 | platforms :
31 | ### Ubuntu 1604
32 | - name : "ubuntu1604"
33 | driver_config :
34 | image : ubuntu:16.04
35 | platform : ubuntu
36 | provisioner :
37 | groups :
38 | concourse-web :
39 | - 'simple-ubuntu1604'
40 | concourse-worker :
41 | - 'simple-ubuntu1604'
42 | - 'worker-ubuntu1604'
43 | ### Ubuntu 1804
44 | - name : "ubuntu1804"
45 | driver_config :
46 | image : ubuntu:18.04
47 | platform : ubuntu
48 | provisioner :
49 | groups :
50 | concourse-web :
51 | - 'simple-ubuntu1804'
52 | concourse-worker :
53 | - 'simple-ubuntu1804'
54 | - 'worker-ubuntu1804'
55 | ### Ubuntu 2004
56 | # - name : "ubuntu2004"
57 | # driver_config :
58 | # image : ubuntu:20.04
59 | # platform : ubuntu
60 | # provisioner :
61 | # groups :
62 | # concourse-web :
63 | # - 'simple-ubuntu2004'
64 | # concourse-worker :
65 | # - 'simple-ubuntu2004'
66 | # - 'worker-ubuntu2004'
67 | suites:
68 | - name : simple
69 | provisioner :
70 | playbook : "test/integration/simple/simple.yml"
71 | driver_config :
72 | hostname : "simple"
73 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | ---
2 | language: python
3 | python: "2.7"
4 | dist: trusty
5 | sudo: required
6 | services:
7 | - docker
8 |
9 | env:
10 | - TEST_COMMAND="bundle exec kitchen test simple-ubuntu1404-one"
11 | - TEST_COMMAND="bundle exec kitchen test simple-ubuntu1404-three"
12 |
13 | - TEST_COMMAND="bundle exec kitchen test simple-ubuntu1604-one"
14 | - TEST_COMMAND="bundle exec kitchen test simple-ubuntu1604-two"
15 | - TEST_COMMAND="bundle exec kitchen test simple-ubuntu1604-three"
16 |
17 | - TEST_COMMAND="bundle exec kitchen test simple-ubuntu1804-one"
18 | - TEST_COMMAND="bundle exec kitchen test simple-ubuntu1804-three"
19 |
20 | - TEST_COMMAND="./test/test-cluster.sh"
21 |
22 | before_install:
23 | # Make sure everything's up to date.
24 | - sudo apt-get update -qq
25 | - sudo apt-get install -qq python-apt python-pycurl git python-pip ruby ruby-dev build-essential autoconf
26 | - gem install bundler
27 |
28 | install:
29 | - sudo locale-gen en_US.UTF-8
30 | - sudo dpkg-reconfigure locales
31 | - bash ./test/ansible-setup.sh
32 | - bash ./test/setup_roles.sh
33 | - bundle install
34 |
35 | script:
36 | - LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8 $TEST_COMMAND
37 |
38 | after_success:
39 | - echo "Success"
40 |
--------------------------------------------------------------------------------
/Gemfile:
--------------------------------------------------------------------------------
1 | source 'https://rubygems.org'
2 |
3 | group :development do
4 | gem 'test-kitchen'
5 | gem 'kitchen-ansiblepush'
6 | gem 'kitchen-docker'
7 | gem 'kitchen-verifier-serverspec'
8 | gem 'thor'
9 | gem 'net-ssh'
10 | gem 'serverspec'
11 | end
12 |
--------------------------------------------------------------------------------
/Gemfile.lock:
--------------------------------------------------------------------------------
1 | GEM
2 | remote: https://rubygems.org/
3 | specs:
4 | builder (3.2.4)
5 | diff-lcs (1.3)
6 | erubi (1.9.0)
7 | ffi (1.13.1)
8 | gssapi (1.3.0)
9 | ffi (>= 1.0.1)
10 | gyoku (1.3.1)
11 | builder (>= 2.1.2)
12 | httpclient (2.8.3)
13 | kitchen-ansiblepush (0.9.1)
14 | test-kitchen (~> 1.4)
15 | kitchen-docker (2.7.0)
16 | test-kitchen (>= 1.0.0)
17 | kitchen-verifier-serverspec (0.6.11)
18 | net-ssh (>= 3)
19 | test-kitchen (~> 1.4)
20 | little-plugger (1.1.4)
21 | logging (2.3.0)
22 | little-plugger (~> 1.1)
23 | multi_json (~> 1.14)
24 | mixlib-install (3.11.5)
25 | mixlib-shellout
26 | mixlib-versioning
27 | thor
28 | mixlib-shellout (2.4.0)
29 | mixlib-versioning (1.2.2)
30 | multi_json (1.15.0)
31 | net-scp (1.2.1)
32 | net-ssh (>= 2.6.5)
33 | net-ssh (4.2.0)
34 | net-ssh-gateway (1.3.0)
35 | net-ssh (>= 2.6.5)
36 | net-telnet (0.1.1)
37 | nori (2.6.0)
38 | rspec (3.8.0)
39 | rspec-core (~> 3.8.0)
40 | rspec-expectations (~> 3.8.0)
41 | rspec-mocks (~> 3.8.0)
42 | rspec-core (3.8.0)
43 | rspec-support (~> 3.8.0)
44 | rspec-expectations (3.8.2)
45 | diff-lcs (>= 1.2.0, < 2.0)
46 | rspec-support (~> 3.8.0)
47 | rspec-its (1.2.0)
48 | rspec-core (>= 3.0.0)
49 | rspec-expectations (>= 3.0.0)
50 | rspec-mocks (3.8.0)
51 | diff-lcs (>= 1.2.0, < 2.0)
52 | rspec-support (~> 3.8.0)
53 | rspec-support (3.8.0)
54 | rubyntlm (0.6.2)
55 | rubyzip (2.3.0)
56 | serverspec (2.41.3)
57 | multi_json
58 | rspec (~> 3.0)
59 | rspec-its
60 | specinfra (~> 2.72)
61 | sfl (2.3)
62 | specinfra (2.76.3)
63 | net-scp
64 | net-ssh (>= 2.7)
65 | net-telnet (= 0.1.1)
66 | sfl
67 | test-kitchen (1.23.2)
68 | mixlib-install (~> 3.6)
69 | mixlib-shellout (>= 1.2, < 3.0)
70 | net-scp (~> 1.1)
71 | net-ssh (>= 2.9, < 5.0)
72 | net-ssh-gateway (~> 1.2)
73 | thor (~> 0.19)
74 | winrm (~> 2.0)
75 | winrm-elevated (~> 1.0)
76 | winrm-fs (~> 1.1)
77 | thor (0.20.3)
78 | winrm (2.3.4)
79 | builder (>= 2.1.2)
80 | erubi (~> 1.8)
81 | gssapi (~> 1.2)
82 | gyoku (~> 1.0)
83 | httpclient (~> 2.2, >= 2.2.0.2)
84 | logging (>= 1.6.1, < 3.0)
85 | nori (~> 2.0)
86 | rubyntlm (~> 0.6.0, >= 0.6.1)
87 | winrm-elevated (1.1.0)
88 | winrm (~> 2.0)
89 | winrm-fs (~> 1.0)
90 | winrm-fs (1.3.4)
91 | erubi (~> 1.8)
92 | logging (>= 1.6.1, < 3.0)
93 | rubyzip (~> 2.0)
94 | winrm (~> 2.0)
95 |
96 | PLATFORMS
97 | ruby
98 |
99 | DEPENDENCIES
100 | kitchen-ansiblepush
101 | kitchen-docker
102 | kitchen-verifier-serverspec
103 | net-ssh
104 | serverspec
105 | test-kitchen
106 | thor
107 |
108 | BUNDLED WITH
109 | 1.16.0
110 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) 2016 Adham Helal
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # ansible-concourse
2 |
3 | [](https://travis-ci.org/ahelal/ansible-concourse)
4 |
5 | An easy way to deploy and manage a [Concourse CI](https://concourse-ci.org/) with a cluster of workers using ansible
6 |
7 | ## Requirements
8 |
9 | * Ansible 2.6 or higher
10 | * PostgreSQL I recommend [ansible postgresql role](https://github.com/ANXS/postgresql)
11 |
12 | Supported concourse:
13 |
14 | * v4.x
15 | * v5.x
16 |
17 | Supported platforms:
18 |
19 | * Ubuntu 16.04 and 18.04
20 | * MacOS (Early support. Accepting PRs)
21 | * Windows (not supported yet. Accepting PRs)
22 |
23 | Optional TLS termination
24 |
25 | * Use concourse web argument to configure TLS (recommended)
26 | * [ansible nginx role](https://github.com/AutomationWithAnsible/ansible-nginx)
27 |
28 | ## Overview
29 |
30 | I am a big fan of concourse. This role will install and manage concourse using Ansible. A more robust solution is to use Bosh
31 |
32 | ## Examples
33 |
34 | ### Single node
35 |
36 | ```yaml
37 | ---
38 | - name: Create Single node host
39 | hosts: ci.example.com
40 | become: True
41 | vars:
42 | # Set your own password and save it securely in vault
43 | concourse_local_users:
44 | - {user: "user1", pass: "pass1"}
45 | concourse_web_options:
46 | CONCOURSE_POSTGRES_DATABASE : "concourse"
47 | CONCOURSE_POSTGRES_HOST : "127.0.0.1"
48 | CONCOURSE_POSTGRES_PASSWORD : "conpass"
49 | CONCOURSE_POSTGRES_SSLMODE : "disable"
50 | CONCOURSE_POSTGRES_USER : "concourseci"
51 | # ********************* Example Keys (YOU MUST OVERRIDE THEM) *********************
52 | # This keys are demo keys. generate your own and store them safely i.e. ansible-vault
53 | # Check the key section on how to auto generate keys.
54 | # **********************************************************************************
55 | concourseci_key_session_public : ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC6tKH.....
56 | concourseci_key_session_private : |
57 | -----BEGIN RSA PRIVATE KEY-----
58 | MIIEowIBAAKCAQEAurSh5kbUadGuUgHqm1ct6SUrqFkH5kyJNdOjHdWxoxCzw5I9
59 | ................................
60 | N1EQdIhtxo4mgHXjF/8L32SqinAJb5ErNXQQwT5k9G22mZkHZY7Y
61 | -----END RSA PRIVATE KEY-----
62 |
63 | concourseci_key_tsa_public : ssh-rsa AAAAB3NzaC1yc2EAAAADAQ......
64 | concourseci_key_tsa_private : |
65 | -----BEGIN RSA PRIVATE KEY-----
66 | MIIEogIBAAKCAQEAo3XY74qhdwY1Z8a5XnTbCjNMJu28CcEYJ1KJi1a8B143wKxM
67 | .........
68 | uPTcE+vQzvMV3lJo0CHTlNMo1JgHOO5UsFZ1cBxO7MZXCzChGE8=
69 | -----END RSA PRIVATE KEY-----
70 | concourseci_worker_keys :
71 | - public : ssh-rsa AAAAB3N.....
72 | private : |
73 | -----BEGIN RSA PRIVATE KEY-----
74 | MIIEpQIBAAKCAQEAylt9UCFnAkdhofItX6HQzx6r4kFeXgFu2b9+x87NUiiEr2Hi
75 | .......
76 | ZNJ69MjK2HDIBIpqFJ7jnp32Dp8wviHXQ5e1PJQxoaXNyubfOs1Cpa0=
77 | -----END RSA PRIVATE KEY-----
78 | roles:
79 | - { name: "postgresql", tags: "postgresql" }
80 | - { name: "ansible-concourse", tags: "concourse" }
81 | ```
82 |
83 | ```ìni
84 | [concourse-web]
85 | ci.example.com
86 | [concourse-worker]
87 | ci.example.com
88 | ```
89 |
90 | ## Breaking changes as of version v4.0.0
91 |
92 | As of version 4.x of this role the user management has changed to reflect changes in Concourse 4.x the new team auth https://concourse-ci.org/authentication.html.
93 |
94 | I would recommend reading the new authentication before proceeding. A new top level list can be used `concourse_local_users` to add local user.
95 | example
96 |
97 | ```yaml
98 | concourse_local_users:
99 | - user: "user1"
100 | pass: "pass1"
101 | - user: "user2"
102 | pass: "pass2"
103 | ```
104 |
105 | ## Clustered nodes 2x web & 4x worker
106 |
107 | In order to make a cluster of servers you can easily add the host to groups
108 |
109 | ```ini
110 | [concourse-web]
111 | ci-web01.example.com
112 | ci-web02.example.com
113 | [concourse-worker]
114 | ci-worker01.example.com
115 | ci-worker02.example.com
116 | ci-worker03.example.com
117 | ci-worker04.example.com
118 | ```
119 |
120 | You would also need to generate keys for workers check [key section](https://github.com/ahelal/ansible-concourse#keys)
121 |
122 | ## Configuration
123 |
124 | All command line options are now supported as of ansible-concourse version 4.x in *Web* and *worker* as a dictionary.
125 | **Note:** *if you are upgrade from a version prior to 3.0.0 you would need to accommodate for changes*
126 |
127 | The configuration is split between two dictionaries *concourse_web_options* and *concourse_worker_options* all key values defined will be exported as an environmental variable to concourse process. There are some ansible-concourse flags that can be defined outside `concourse_web_options` and `concourse_worker_options` fpr more info check defaults.yml
128 |
129 | ```yaml
130 | concourse_local_users:
131 | - {user: "user1", pass: "pass1"}
132 | - {user: "user2", pass: "pass2"}
133 | concourse_web_options :
134 | CONCOURSE_POSTGRES_DATABASE : "concourse"
135 | CONCOURSE_POSTGRES_HOST : "127.0.0.1"
136 | CONCOURSE_POSTGRES_PASSWORD : "NO_PLAIN_TEXT_USE_VAULT"
137 | CONCOURSE_POSTGRES_SSLMODE : "disable"
138 | CONCOURSE_POSTGRES_USER : "concourseci"
139 |
140 | concourse_worker_options :
141 | CONCOURSE_GARDEN_NETWORK_POOL : "10.254.0.0/22"
142 | CONCOURSE_GARDEN_MAX_CONTAINERS : 150
143 | ```
144 |
145 | To view all environmental options please check
146 | [web options](web_arguments.txt) and [worker options](worker_arguments.txt).
147 |
148 | ansible-concourse has some sane defaults defined `concourse_web_options_default` and `concourse_worker_options_default` in [default.yml](default.yml) those default will merge with `concourse_web_option` and `concourse_worker_option`. `concourse_web_option` and `concourse_worker_option`has higher precedence.
149 |
150 |
151 | ## Concourse versions
152 |
153 | This role supports installation of release candidate and final releases. Simply overriding **concourseci_version** with desired version.
154 |
155 | * Fpr [rc](https://github.com/concourse/bin/releases/). `concourseci_version : "vx.x.x-rc.xx"` that will install release candidate.
156 | * For [final release](https://github.com/concourse/concourse/releases). ```concourseci_version : "vx.x.x"```
157 |
158 | By default this role will try to have the latest stable release look at [defaults/main.yml](https://github.com/ahelal/ansible-concourse/blob/master/defaults/main.yml#L2-L3)
159 |
160 | ## Default variables
161 |
162 | Check [defaults/main.yml](/defaults/main.yml) for all bells and whistles.
163 |
164 | ## Keys
165 |
166 | **Warning** the role comes with default keys. This keys are used for demo only you should generate your own and store them **safely** i.e. ansible-vault
167 |
168 | You would need to generate 2 keys for web and one key for each worker node.
169 | An easy way to generate your keys to use a script in ```keys/key.sh``` or you can reuse the same keys for all workers.
170 |
171 | The bash script will ask you for the number of workers you require. It will then generate ansible compatible yaml files in ```keys/vars```
172 | You can than copy the content in your group vars or any other method you prefer.
173 |
174 | ## Managing teams
175 |
176 | This role supports Managing teams :
177 |
178 | *NOTE* if you use manage _DO NOT USE DEFAULT PASSWORD_ you should set your own password and save it securely in vault. or you can look it up from web options
179 |
180 |
181 | ```yaml
182 | concourseci_manage_teams : True
183 | ## User must be added first concourse_local_users
184 | concourseci_manage_credential_user : "api"
185 | concourseci_manage_credential_password : "apiPassword"
186 |
187 |
188 | concourseci_teams :
189 | - name: "team_1"
190 | state: "present"
191 | flags:
192 | local-user : user1
193 | - name: "team_2"
194 | state: "absent"
195 | - name: "team_3"
196 | state: "present"
197 | flags:
198 | # See [web options](web_arguments.txt) for how to integrate Concourse Web with GitHub for auth
199 | github-organization: ORG
200 | github-team: ORG:TEAM
201 | github-user: LOGIN
202 | - name: "team_4"
203 | state: "present"
204 | flags:
205 | no-really-i-dont-want-any-auth: ""
206 | - name: "x5"
207 | state: "absent"
208 | flags:
209 | local-user : user5
210 | ```
211 |
212 | The role supports all arguments passed to fly for more info `fly set-team --help`.
213 | *Please note if you delete a team you remove all the pipelines in that team*
214 |
215 | ## Auto scaling
216 |
217 | * Scaling out: Simply just add a new instance :)
218 | * Scaling in: You would need to drain the worker first by running `service concourse-worker stop`
219 |
220 | ## Vagrant demo
221 |
222 | You can use vagrant to spin a test machine.
223 |
224 | ```bash
225 | # Install postgresql role in test/helper_roles
226 | ./test/setup_roles.sh
227 | vagrant up
228 | ```
229 |
230 | The vagrant machine will have an IP of **192.168.50.150** you can access the web `http://192.168.50.150:8080`
231 |
232 | You can access the web and API on port 8080 with username **myuser** and **mypass**
233 |
234 | Once your done
235 |
236 | ```
237 | vagrant destroy
238 | ```
239 |
240 | ## Contribution
241 |
242 | Pull requests on GitHub are welcome on any issue.
243 |
244 | Thanks for all the [contrubtors](https://github.com/ahelal/ansible-concourse/graphs/contributors)
245 |
246 |
247 | ## TODO
248 |
249 | * Support pipeline upload
250 | * Full MacOS support
251 | * Add distributed cluster tests
252 | * Windows support
253 |
254 | ## License
255 |
256 | MIT
257 |
--------------------------------------------------------------------------------
/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 |
4 | Vagrant.configure('2') do |config|
5 | config.vm.box = 'ubuntu/xenial64'
6 | config.vm.define 'vagrantci' do |vagrantci|
7 | end
8 |
9 | config.vm.network "private_network", ip: '192.168.50.150'
10 |
11 | config.vm.provision 'ansible' do |ansible|
12 | ansible.playbook = 'test/integration/simple/simple.yml'
13 | ansible.groups = {
14 | "concourse-web" => ["vagrantci"],
15 | "concourse-worker" => ["vagrantci"],
16 | }
17 |
18 | ansible.extra_vars = {
19 | CONCOURSE_EXTERNAL_URL_VAGRANT: "http://192.168.50.150:8080/"
20 | }
21 | end
22 | end
23 |
--------------------------------------------------------------------------------
/ansible.cfg:
--------------------------------------------------------------------------------
1 | ./test/ansible.cfg
--------------------------------------------------------------------------------
/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Concourse version
3 | concourseci_version : "v5.4.0"
4 | ## Dir structure
5 | concourseci_base_dir : "/opt/concourseci"
6 | concourseci_bin_dir : "{{ concourseci_base_dir }}/bin"
7 | concourseci_worker_dir : "{{ concourseci_base_dir }}/worker"
8 | concourseci_ssh_dir : "{{ concourseci_base_dir }}/.ssh"
9 | concourseci_archive_dir : "{{ concourseci_base_dir }}/archive"
10 |
11 | ## Concourse log
12 | concourseci_log_dir : "/var/log/concourse"
13 | concourseci_log_worker : "{{ concourseci_log_dir }}/concourseci_worker.log"
14 | concourseci_log_web : "{{ concourseci_log_dir }}/concourseci_web.log"
15 |
16 | ## Concourse User
17 | concourseci_user : "concourseci"
18 | concourseci_group : "concourseci"
19 |
20 | ## Amount of tries to retire running worker when process management stops daemon
21 | concourseci_worker_retire_tries : 10
22 |
23 | ## Timeout in seconds defining how long do we wait for worker process exit after retire attempt
24 | concourseci_worker_process_exit_timeout : 150
25 |
26 | ## New config dictionary based.
27 | ## This dictionary is merged with 'concourse_web_options' dictionary, make your overrides there!
28 | ## e.g.
29 | ## concourse_web_options:
30 | ## CONCOURSE_BIND_IP : "10.0.0.1"
31 | ##
32 | concourse_web_options : { }
33 |
34 | concourse_web_options_default :
35 | CONCOURSE_BIND_IP : "0.0.0.0"
36 | CONCOURSE_TSA_HOST : "{{ groups.get(concourseci_web_group, [''])[0] }}" # By default we pick the first host in web group
37 | CONCOURSE_TSA_BIND_IP : "0.0.0.0"
38 | CONCOURSE_TSA_BIND_PORT : "2222"
39 | CONCOURSE_TSA_AUTHORIZED_KEYS : "{{ concourseci_ssh_dir }}/tsa_authorization"
40 | CONCOURSE_TSA_HOST_KEY : "{{ concourseci_ssh_dir }}/tsa"
41 | CONCOURSE_SESSION_SIGNING_KEY : "{{ concourseci_ssh_dir }}/session_signing"
42 |
43 | ##
44 | ## local users that will be added on ATC startup, i.e.
45 | ## concourse_local_users:
46 | ## - user: "user1", pass: "pass1"
47 | ## - user: "user2", pass: "pass2"
48 | ##
49 | concourse_local_users : []
50 |
51 | ## This dictionary is merged with 'concourse_worker_options' dictionary, make your overrides there!
52 | ## e.g.
53 | ## concourse_worker_options:
54 | ## CONCOURSE_NAME : "my-worker"
55 | ##
56 | concourse_worker_options : { }
57 |
58 | concourse_worker_options_default :
59 | CONCOURSE_WORK_DIR : "{{ concourseci_worker_dir }}"
60 | CONCOURSE_TSA_WORKER_PRIVATE_KEY : "{{ concourseci_ssh_dir }}/worker"
61 | CONCOURSE_SESSION_SIGNING_KEY : "{{ concourseci_ssh_dir }}/session_signing"
62 | CONCOURSE_TSA_HOST_KEY : "{{ concourseci_ssh_dir }}/tsa"
63 | CONCOURSE_TSA_HOST : "{{ concourse_web_options['CONCOURSE_TSA_HOST'] | default(concourse_web_options_default['CONCOURSE_TSA_HOST']) }}"
64 | CONCOURSE_TSA_PORT : "{{ concourse_web_options['CONCOURSE_TSA_BIND_PORT'] | default(concourse_web_options_default['CONCOURSE_TSA_BIND_PORT']) }}"
65 | CONCOURSE_TSA_PUBLIC_KEY : "{{ concourse_web_options['CONCOURSE_TSA_HOST_KEY'] | default(concourse_web_options_default['CONCOURSE_TSA_HOST_KEY']) }}.pub"
66 |
67 |
68 | ## Ansible Groups to form clusters
69 | concourseci_web_group : "concourse-web"
70 | concourseci_worker_group : "concourse-worker"
71 |
72 | ## See docs https://concourse-ci.org/concourse-web.html#resource-defaults
73 | ## The value will land into the YAML file as is - after the usual string
74 | ## interpolation is done.
75 | ## The location of the file can be overridden by setting the appropriate
76 | ## environment variable for the Concourse web node(s), e.g.:
77 | ##
78 | ## concourse_web_options:
79 | ## CONCOURSE_BASE_RESOURCE_TYPE_DEFAULTS: '/some/path/to/defaults.yml'
80 | ##
81 | concourseci_web_resource_type_defaults : { }
82 |
83 | ## Management
84 | concourseci_manage_url : "{{ concourse_web_options_combined['CONCOURSE_EXTERNAL_URL'] | default('http://127.0.0.1:8080') }}"
85 |
86 | # ## Manage creds
87 | concourseci_manage_credential_user : "{{ concourse_local_users[0].user }}"
88 | concourseci_manage_credential_password : "{{ concourse_local_users[0].pass }}"
89 |
90 | ### Management Teams
91 | concourseci_manage_teams : False
92 | concourseci_teams : []
93 |
94 | ### Management Pipelines ( still not working )
95 | concourseci_manage_pipelines : False
96 |
97 | ## Reboot when updating kernel (we disable this for test :( on docker)
98 | concourseci_reboot : True
99 |
100 | ## SSH
101 | concourseci_worker_position : "{{ groups[concourseci_worker_group].index(inventory_hostname)| default(0) }}"
102 | concourseci_key_worker_public : "{{ concourseci_worker_keys[concourseci_worker_position | int ].public}}"
103 | concourseci_key_worker_private : "{{ concourseci_worker_keys[concourseci_worker_position | int ].private}}"
104 |
105 | # temp solution to systemV issue and ansible :(
106 | concourse_ignore_errors : "{{ ansible_lsb['codename'] == 'xenial' | default(False)}}"
107 |
108 | concourse_web_rbac_config:
109 | CONCOURSE_CONFIG_RBAC: "{{concourseci_base_dir}}/bin/rbac_config.yml"
110 |
--------------------------------------------------------------------------------
/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: restart concourse-web
4 | debug:
5 | msg="Notifier concourse-web restart"
6 | changed_when: True
7 | notify:
8 | - restart concourse-web linux
9 | - restart concourse-web macosx
10 |
11 | - name: restart concourse-worker
12 | debug:
13 | msg="Notifier concourse-web restart"
14 | changed_when: True
15 | notify:
16 | - restart concourse-worker linux
17 | - restart concourse-worker macosx
18 |
19 | - name: restart concourse-web linux
20 | service:
21 | name="concourse-web"
22 | state="restarted"
23 | when: "groups[concourseci_web_group] is defined and inventory_hostname in groups[concourseci_web_group] and ansible_system == 'Linux'"
24 | ignore_errors: "{{ concourse_ignore_errors }}"
25 |
26 | - name: restart concourse-web macosx
27 | shell: launchctl stop {{ concourseci_launchd_web }} && launchctl start {{ concourseci_launchd_web }}
28 | when: "groups[concourseci_web_group] is defined and inventory_hostname in groups[concourseci_web_group] and ansible_system == 'Darwin'"
29 |
30 | - name: restart concourse-worker linux
31 | service:
32 | name="concourse-worker"
33 | state="restarted"
34 | when: "groups[concourseci_worker_group] is defined and inventory_hostname in groups[concourseci_worker_group]"
35 | ignore_errors: "{{ concourse_ignore_errors }}"
36 |
37 | - name: restart concourse-worker macosx
38 | shell: launchctl stop {{ concourseci_launchd_worker }} && launchctl start {{ concourseci_launchd_worker }}
39 | when: "groups[concourseci_worker_group] is defined and inventory_hostname in groups[concourseci_worker_group] and ansible_system == 'Darwin'"
40 |
41 | - name: reload systemd
42 | command: systemctl daemon-reload
43 | when: ansible_service_mgr == "systemd"
44 |
--------------------------------------------------------------------------------
/keys/key.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | set -e
3 |
4 | # Workers to generate
5 | read -p "How many workers do you want to generate keys for ? : " workers
6 |
7 | MY_PATH="`dirname \"$0\"`" # relative
8 | DIR="`( cd \"$MY_PATH\" && pwd )`" # absolutized and normalized
9 | OUTPUT_DIR="${DIR}/vars"
10 | mkdir -p ${OUTPUT_DIR}
11 | temp_dir=$(mktemp -d)
12 | worker_yaml_output="${OUTPUT_DIR}/workers_ssh.yml"
13 | web_yaml_output="${OUTPUT_DIR}/web_ssh.yml"
14 |
15 | # Script to indent
16 | python_ident='''import sys
17 | for line in sys.stdin:
18 | sys.stdout.write("\t\t\t\t\t\t\t\t\t%s" % line)
19 | print ""
20 | '''
21 |
22 | cd ${temp_dir}
23 | # Remove previous file
24 | rm -rf ${worker_yaml_output}
25 | rm -rf ${web_yaml_output}
26 | echo "concourseci_worker_keys\t\t\t:" > ${worker_yaml_output}
27 | for i in $(seq 1 $workers); do
28 | ssh-keygen -b 4096 -f id_${i}_rsa -N ""
29 | public="$(cat id_${i}_rsa.pub)"
30 | private="$(cat id_${i}_rsa)"
31 | echo "\t\t\t\t\t\t- public : ${public}" >> ${worker_yaml_output}
32 | echo "\t\t\t\t\t\t private: |" >> ${worker_yaml_output}
33 | echo "${private}" | python -c "${python_ident}" >> ${worker_yaml_output}
34 | done
35 |
36 | ssh-keygen -b 4096 -f id_session_rsa -N ""
37 | public="$(cat id_session_rsa.pub)"
38 | private="$(cat id_session_rsa)"
39 | echo "concourseci_key_session_public : ${public}" >> ${web_yaml_output}
40 | echo "concourseci_key_session_private : |" >> ${web_yaml_output}
41 | echo "${private}" | python -c "${python_ident}" >> ${web_yaml_output}
42 |
43 | ssh-keygen -b 4096 -f id_web_rsa -N ""
44 | public="$(cat id_web_rsa.pub)"
45 | private="$(cat id_web_rsa)"
46 | echo "concourseci_key_tsa_public : ${public}" >> ${web_yaml_output}
47 | echo "concourseci_key_tsa_private : |" >> ${web_yaml_output}
48 | echo "${private}" | python -c "${python_ident}" >> ${web_yaml_output}
49 |
50 | rm -rf ${temp_dir}
--------------------------------------------------------------------------------
/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | author: Adham Helal
4 | description: Install Concourse CI
5 | license: MIT
6 | min_ansible_version: 2.0
7 | platforms:
8 | - name: Ubuntu
9 | versions:
10 | - trusty
11 | - xenial
12 | - name: MacOSX
13 | versions:
14 | - 10.12
15 | galaxy_tags:
16 | - CI
17 | dependencies: []
18 |
19 |
--------------------------------------------------------------------------------
/tasks/auth.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: auth | Install httpasswd
4 | apt:
5 | name: "python-passlib,python-bcrypt"
6 | state: "present"
7 | when: ansible_os_family == 'Debian'
8 |
9 | - name: auth | Check that one local user is added
10 | fail:
11 | msg="At least one local user is needed"
12 | when: concourse_local_users | length == 0
13 |
14 | - name: auth | Create bcrypt password for local users
15 | htpasswd:
16 | path: "{{ concourseci_base_dir }}/concourse_users"
17 | name: "{{ item.user }}"
18 | password: "{{ item.pass }}"
19 | crypt_scheme: "bcrypt"
20 | owner: root
21 | mode: 0640
22 | with_items: "{{ concourse_local_users }}"
23 | loop_control:
24 | label: "{{ item.user }}"
25 |
26 | - name: auth | Get bcrypt passwords
27 | shell: "cat {{ concourseci_base_dir }}/concourse_users"
28 | changed_when: false
29 | register: list_users
30 |
31 | - name: auth | Add concourse_add_local_user
32 | set_fact:
33 | concourse_facts_local_users:
34 | CONCOURSE_ADD_LOCAL_USER: "{{ list_users.stdout_lines | join(',') }}"
35 |
36 | - name: auth | Set empty dict concourse_facts_main_users
37 | set_fact:
38 | concourse_facts_main_users: {}
39 |
40 | - name: auth | Add all local users to concourse main team if CONCOURSE_MAIN_TEAM_LOCAL_USER is not defined
41 | set_fact:
42 | concourse_facts_main_users:
43 | CONCOURSE_MAIN_TEAM_LOCAL_USER: "{% for local_user in concourse_local_users %}{{local_user.user}}{% if not loop.last %},{% endif %}{% endfor %}"
44 | when: concourse_web_options['CONCOURSE_MAIN_TEAM_LOCAL_USER'] is not defined
45 |
46 |
--------------------------------------------------------------------------------
/tasks/checks.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: checks | Fail if not running 64bit
4 | fail:
5 | msg: " Only 64 bit arch is supported."
6 | when: ansible_userspace_bits != '64'
7 |
8 | - name: checks | Fail if using old style config < 3.0.0
9 | fail:
10 | msg: "Configuration for this role has changed. Please read the Readme.md"
11 | when: CONCOURSE_WEB_BIND_IP is defined or CONCOURSE_WEB_TSA_HOST is defined
12 |
13 | - name: checks | Set downloaded url for concourse version 4.x or lower
14 | set_fact:
15 | concourseci_download_url: "{{ concourseci_download_fr_url }}/{{ version_4_filename }}"
16 | when: concourseci_version is version('v4.2.1', '<=')
17 |
18 | - name: checks | Set downloaded url for concourse version 5.x and above
19 | set_fact:
20 | concourseci_download_url: "{{ concourseci_download_fr_url }}/{{ version_5_filename }}"
21 | when: concourseci_version is version('v5.0.0', '>=')
22 |
--------------------------------------------------------------------------------
/tasks/common_nix.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: common | Set default for 'CONCOURSE_BASE_RESOURCE_TYPE_DEFAULTS'
4 | set_fact:
5 | concourse_web_options_resource_type_defaults_file_path:
6 | CONCOURSE_BASE_RESOURCE_TYPE_DEFAULTS: '{{ concourseci_base_dir }}/resource-type-defaults.yml'
7 | when: "{{ concourseci_web_resource_type_defaults | dict2items | length > 0 }}"
8 |
9 | - name: common | Combine dictionary options (web)
10 | set_fact:
11 | concourse_web_options_combined: >-
12 | {{
13 | concourse_web_options_default
14 | | combine(concourse_web_options_resource_type_defaults_file_path | default({}))
15 | | combine(concourse_web_options)
16 | | combine(concourse_facts_local_users)
17 | | combine(concourse_facts_main_users)
18 | }}
19 | when: "groups[concourseci_web_group] is defined and inventory_hostname in groups[concourseci_web_group]"
20 |
21 | - name: common | Combine option for concourse_rbac
22 | set_fact:
23 | concourse_web_options_combined: '{{ concourse_web_options_combined | combine(concourse_web_rbac_config) }}'
24 | when: "groups[concourseci_web_group] is defined and inventory_hostname in groups[concourseci_web_group] and concourse_rbac is defined"
25 |
26 | - name: common | Combine dictionary options (worker)
27 | set_fact:
28 | concourse_worker_options_combined: '{{ concourse_worker_options_default | combine(concourse_worker_options) }}'
29 | when: "groups[concourseci_worker_group] is defined and inventory_hostname in groups[concourseci_worker_group]"
30 |
31 | - name: common | Ensure Concourse group exists
32 | group:
33 | name: '{{ concourseci_group }}'
34 | state: present
35 |
36 | - name: common | Ensure Concourse user exists
37 | user:
38 | home: '{{ concourseci_base_dir }}'
39 | name: '{{ concourseci_user }}'
40 | group: '{{ concourseci_group }}'
41 | system: yes
42 |
43 | - name: common | Ensure Concourse directory exists
44 | file:
45 | path: '{{ item }}'
46 | state: directory
47 | owner: '{{ concourseci_user }}'
48 | group: '{{ concourseci_group }}'
49 | mode: '0750'
50 | with_items:
51 | - '{{ concourseci_bin_dir }}'
52 | - '{{ concourseci_log_dir }}'
53 | - '{{ concourseci_archive_dir }}'
54 | - '{{ concourseci_archive_dir }}/{{ concourseci_binary_file }}'
55 |
56 | - name: common | Ensure Concourse .ssh dir
57 | file:
58 | path: '{{ concourseci_ssh_dir }}'
59 | state: directory
60 | mode: '0750'
61 | owner: '{{ concourseci_user }}'
62 | group: '{{ concourseci_group }}'
63 |
--------------------------------------------------------------------------------
/tasks/install_nix_4.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Install | Check Concourse archive stat
4 | stat:
5 | path: "{{ concourseci_bin_dir }}/{{ concourseci_binary_file }}"
6 | register: concourseci_binary_file_stat
7 |
8 | - name: Install | Download Concourse CI
9 | get_url:
10 | url: "{{ concourseci_download_url }}"
11 | dest: "{{ concourseci_bin_dir }}/{{ concourseci_binary_file }}"
12 | timeout: 200
13 | mode: 0755
14 | tmp_dest: "{{ concourseci_bin_dir }}"
15 | validate_certs: false
16 | become_user: "{{ concourseci_user }}"
17 | register: concourseci_get
18 | when: concourseci_binary_file_stat.stat.exists == False
19 | notify:
20 | - restart concourse-worker
21 | - restart concourse-web
22 |
23 | - name: Install | Concourse link binary
24 | file:
25 | src: "{{ concourseci_bin_dir }}/{{ concourseci_binary_file }}"
26 | dest: "{{ concourseci_bin_dir }}/concourse"
27 | state: "link"
28 | notify:
29 | - restart concourse-worker
30 | - restart concourse-web
31 |
32 | - name: Install | Allow Concourse to bind to privileged ports
33 | capabilities:
34 | path: "{{ concourseci_bin_dir }}/{{ concourseci_binary_file }}"
35 | capability: cap_net_bind_service+ep
36 | state: present
37 | ignore_errors: true
38 |
--------------------------------------------------------------------------------
/tasks/install_nix_5.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Install | Check Concourse archive stat
4 | stat:
5 | path: "{{ concourseci_archive_dir }}/{{ concourseci_binary_file }}.tar.gz"
6 | register: concourseci_binary_file_stat
7 |
8 | - name: common | Ensure Concourse user exists
9 | user:
10 | home: '{{ concourseci_base_dir }}'
11 | name: '{{ concourseci_user }}'
12 | group: '{{ concourseci_group }}'
13 | system: yes
14 |
15 | - name: Install | Download Concourse CI
16 | get_url:
17 | url: "{{ concourseci_download_url }}"
18 | dest: "{{ concourseci_archive_dir }}/{{ concourseci_binary_file }}.tar.gz"
19 | timeout: 200
20 | mode: 0755
21 | tmp_dest: "{{ concourseci_archive_dir }}"
22 | validate_certs: false
23 | become_user: "{{ concourseci_user }}"
24 | register: concourseci_get
25 | when: concourseci_binary_file_stat.stat.exists == False
26 | notify:
27 | - restart concourse-worker
28 | - restart concourse-web
29 |
30 | - name: Install | untar the archive
31 | unarchive:
32 | src: "{{ concourseci_archive_dir }}/{{ concourseci_binary_file }}.tar.gz"
33 | dest: "{{ concourseci_archive_dir }}/{{ concourseci_binary_file }}"
34 | creates: "{{ concourseci_archive_dir }}/{{ concourseci_binary_file }}/bin/concourse"
35 | remote_src: yes
36 | extra_opts:
37 | - --strip-components=1
38 |
39 | - name: Install | Concourse link binary
40 | file:
41 | src: "{{ concourseci_archive_dir }}/{{ concourseci_binary_file }}/bin/concourse"
42 | dest: "{{ concourseci_bin_dir }}/concourse"
43 | state: "link"
44 | notify:
45 | - restart concourse-worker
46 | - restart concourse-web
47 |
48 | - name: Install | Allow Concourse to bind to privileged ports
49 | capabilities:
50 | path: "{{ concourseci_bin_dir }}/{{ concourseci_binary_file }}"
51 | capability: cap_net_bind_service+ep
52 | state: present
53 | ignore_errors: true
54 |
--------------------------------------------------------------------------------
/tasks/kernel_update.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: kernel update | Install latest kernel
4 | apt:
5 | name: "linux-generic-lts-vivid"
6 | state: "present"
7 | register: kernel_update
8 | when: ansible_os_family == 'Debian'
9 |
10 | - name: kernel update | Reboot now
11 | shell: sleep 2 && shutdown -r now "Ansible updates triggered"
12 | async: 1
13 | poll: 0
14 | when: kernel_update is changed and concourseci_reboot
15 |
16 | - name: kernel update | Wait for ssh port to open again
17 | wait_for:
18 | port: "{{ apt_port | default('22') }}"
19 | host: "{{ apt_hostname | default(inventory_hostname) }}"
20 | delay: 30
21 | timeout: 180
22 | search_regex: "OpenSSH"
23 | connection: local
24 | become: False
25 | when: kernel_update is changed and concourseci_reboot
26 |
27 | - name: kernel update | pause a little to ensure everything is running
28 | pause:
29 | seconds: "5"
30 | when: kernel_update is changed and concourseci_reboot
31 |
--------------------------------------------------------------------------------
/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Do some checks
4 | import_tasks: checks.yml
5 |
6 | - name: Kernel upgrade
7 | import_tasks: kernel_update.yml
8 | when: "ansible_os_family == 'Debian' and ( ansible_distribution_release == 'precise' or ansible_distribution_release == 'trusty')"
9 |
10 | - name: Manage user auth web node
11 | import_tasks: auth.yml
12 | when: "groups[concourseci_web_group] is defined and inventory_hostname in groups[concourseci_web_group] and (ansible_system == 'Linux' or ansible_system == 'Darwin')"
13 |
14 | - name: Common tasks
15 | import_tasks: common_nix.yml
16 | when: "ansible_system == 'Linux' or ansible_system == 'Darwin'"
17 |
18 | - name: Install tasks for concourse version <= 4
19 | import_tasks: install_nix_4.yml
20 | when: ("ansible_system == 'Linux' or ansible_system == 'Darwin'") and concourseci_version is version('v4.2.1', '<=')
21 |
22 | - name: Install tasks for concourse version >= 5
23 | import_tasks: install_nix_5.yml
24 | when: ("ansible_system == 'Linux' or ansible_system == 'Darwin'") and concourseci_version is version('v5.0.0', '>=')
25 |
26 | - name: Manage web node
27 | import_tasks: web_nix.yml
28 | when: "groups[concourseci_web_group] is defined and inventory_hostname in groups[concourseci_web_group] and (ansible_system == 'Linux' or ansible_system == 'Darwin')"
29 |
30 | - name: Manage worker node
31 | import_tasks: worker_nix.yml
32 | when: "groups[concourseci_worker_group] is defined and inventory_hostname in groups[concourseci_worker_group] and (ansible_system == 'Linux' or ansible_system == 'Darwin')"
33 |
34 | - name: Flush handlers
35 | meta: flush_handlers
36 |
37 | - name: Manage Teams & Pipelines
38 | import_tasks: manage/main.yml
39 | when: (concourseci_manage_pipelines or concourseci_manage_teams) and groups[concourseci_web_group] is defined and inventory_hostname in groups[concourseci_web_group] and (ansible_system == 'Linux' or ansible_system == 'Darwin')
40 | tags: ["manage"]
41 |
--------------------------------------------------------------------------------
/tasks/manage/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: manage | main | Download fly binary
4 | get_url:
5 | url: "{{ concourseci_manage_url }}/api/v1/cli?arch=amd64&platform={{ ansible_system | lower }}"
6 | dest: "{{ concourseci_bin_dir }}/fly-{{ concourseci_version }}"
7 | mode: 0755
8 |
9 | - name: manage | main | Link fly binary
10 | file:
11 | src: "{{ concourseci_bin_dir }}/fly-{{ concourseci_version }}"
12 | dest: "{{ concourseci_bin_dir }}/fly"
13 | state: link
14 |
15 | - import_tasks: teams.yml
16 |
--------------------------------------------------------------------------------
/tasks/manage/teams.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: manage | team | login
4 | command: "{{ concourse_fly }} login -n main -c {{ concourseci_manage_url }} -u {{ concourseci_manage_credential_user }} -p {{ concourseci_manage_credential_password }}"
5 | changed_when: false
6 |
7 | - name: manage | team | Get registered teams
8 | shell: "{{ concourse_fly }} teams | grep -vwE 'main' | awk '{$1=$1;print}'"
9 | register: registered_teams
10 | changed_when: false
11 |
12 | - name: manage | team | Extract teams that must be present and absent
13 | set_fact:
14 | teams_present : "{{ concourseci_teams | selectattr('state', 'equalto', 'present') | list | map(attribute='name') | list }}"
15 | teams_absent : "{{ concourseci_teams | selectattr('state', 'equalto', 'absent') | list | map(attribute='name') | list }}"
16 |
17 | - name: manage | team | Compile list of diff of teams to be added and removed
18 | set_fact:
19 | teams_to_add : "{{ teams_present | difference(registered_teams.stdout_lines) }}"
20 | teams_to_remove : "{{ teams_absent | intersect(registered_teams.stdout_lines) }}"
21 |
22 | - name: manage | team | Print teams that will be added
23 | debug:
24 | var: teams_to_add
25 | when: teams_to_add | length > 0
26 |
27 | - name: manage | team | Ensure teams that are present exists
28 | shell : "yes | {{ concourse_fly }} set-team -n {{ item }}{% set attr=concourseci_teams | selectattr('name', 'equalto', item) | list | map(attribute='flags') | list | first%} {% for key, value in attr.items() %} --{{key}}={{value}}{% endfor %}"
29 | with_items: "{{ teams_to_add }}"
30 | when: teams_to_add | length > 0
31 |
32 | - name: manage | team | Print teams that will be removed
33 | debug:
34 | var: teams_to_remove
35 | when: teams_to_remove | length > 0
36 |
37 | - name: manage | team | Ensure teams that are absent does not exists
38 | shell: "echo {{ item }} | {{ concourse_fly }} destroy-team -n {{ item }}"
39 | with_items: "{{ teams_to_remove }}"
40 | when: teams_to_remove | length > 0
41 |
--------------------------------------------------------------------------------
/tasks/web_nix.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: web | Concourse web start script config
4 | template:
5 | src="concourse-web.j2"
6 | dest="{{ concourseci_bin_dir }}/concourse-web"
7 | mode=0755
8 | notify:
9 | - restart concourse-web
10 |
11 | - name: web | Copy session public key
12 | copy:
13 | content="{{ concourseci_key_session_public }}"
14 | dest="{{ concourse_web_options_combined['CONCOURSE_SESSION_SIGNING_KEY'] }}.pub"
15 | mode="0644"
16 | owner="{{ concourseci_user }}"
17 | group="{{ concourseci_group }}"
18 | notify:
19 | - restart concourse-web
20 |
21 | - name: web | Copy session private key
22 | copy:
23 | content="{{ concourseci_key_session_private }}"
24 | dest="{{ concourse_web_options_combined['CONCOURSE_SESSION_SIGNING_KEY'] }}"
25 | mode="0600"
26 | owner="{{ concourseci_user }}"
27 | group="{{ concourseci_group }}"
28 | no_log: True
29 | notify:
30 | - restart concourse-web
31 |
32 | - name: web | Copy tsa private key
33 | copy:
34 | content="{{ concourseci_key_tsa_private }}"
35 | dest="{{ concourse_web_options_combined['CONCOURSE_TSA_HOST_KEY'] }}"
36 | mode="0600"
37 | owner="{{ concourseci_user }}"
38 | group="{{ concourseci_group }}"
39 | no_log: True
40 | notify:
41 | - restart concourse-web
42 |
43 | - name: web | Create authorized key for tsa
44 | authorized_key:
45 | user: "{{ concourseci_user }}"
46 | key: "{{ item.public }}"
47 | path: "{{ concourse_web_options_combined['CONCOURSE_TSA_AUTHORIZED_KEYS'] }}"
48 | manage_dir: no
49 | with_items: "{{ concourseci_worker_keys }}"
50 | notify:
51 | - restart concourse-web
52 |
53 | - name: web | Write RBAC_CONFIG
54 | copy:
55 | content: "{{ concourse_rbac | to_yaml }}"
56 | dest: "{{ concourse_web_options_combined['CONCOURSE_CONFIG_RBAC'] }}"
57 | mode: 0755
58 | owner: "{{ concourseci_user }}"
59 | group: "{{ concourseci_group }}"
60 | when: "concourse_web_options_combined['CONCOURSE_CONFIG_RBAC'] is defined"
61 | notify:
62 | - restart concourse-web
63 |
64 | - name: web | Templating concourse web start init script (linux)
65 | template:
66 | src="concourse-web-init.sh.j2"
67 | dest="/etc/init.d/concourse-web"
68 | mode=0755
69 | notify:
70 | - reload systemd
71 | - restart concourse-web
72 | when: "ansible_system == 'Linux'"
73 |
74 | - name: web | Ensure Concourse web is running and Starts on boot (linux)
75 | service:
76 | name="concourse-web"
77 | state="started"
78 | enabled=True
79 | ignore_errors: "{{ concourse_ignore_errors }}"
80 | when: "ansible_system == 'Linux'"
81 |
82 | - name: web | Templating concourse web start launchd plist (macOSx)
83 | template:
84 | src="{{ concourseci_launchd_web }}.plist.j2"
85 | dest="{{ concourseci_launchd_path }}/{{ concourseci_launchd_web }}.plist"
86 | mode=0750
87 | owner="root"
88 | group="wheel"
89 | validate='plutil %s'
90 | notify:
91 | - restart concourse-web
92 | register: launchd_template
93 | when: "ansible_system == 'Darwin'"
94 |
95 | - name: web | Ensure Concourse web is running and Starts on boot (macOSx)
96 | shell: launchctl load -w {{ concourseci_launchd_path }}/{{ concourseci_launchd_web }}.plist && launchctl start {{ concourseci_launchd_web }}
97 | changed_when: False # since no way to detect if it started or not
98 | when: "ansible_system == 'Darwin'"
99 |
100 | - name: web | Create base resource type defaults configuration file
101 | copy:
102 | owner: "{{ concourseci_user }}"
103 | group: "{{ concourseci_group }}"
104 | dest: "{{ concourse_web_options_combined['CONCOURSE_BASE_RESOURCE_TYPE_DEFAULTS'] }}"
105 | content: "{{ concourseci_web_resource_type_defaults | to_nice_yaml(indent=2) }}"
106 | mode: '644'
107 | when: "{{ concourseci_web_resource_type_defaults | dict2items | length > 0 }}"
108 | notify:
109 | - restart concourse-web
110 |
--------------------------------------------------------------------------------
/tasks/worker_nix.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: worker | Concourse worker start script
4 | template:
5 | src: "concourse-worker.j2"
6 | dest: "{{ concourseci_bin_dir }}/concourse-worker"
7 | mode: 0755
8 | notify:
9 | - restart concourse-worker
10 |
11 | - name: worker | Copy session public key
12 | copy:
13 | content: "{{ concourseci_key_session_public }}"
14 | dest: "{{ concourse_worker_options_combined['CONCOURSE_SESSION_SIGNING_KEY'] }}.pub"
15 | mode: "0644"
16 | owner: "{{ concourseci_user }}"
17 | group: "{{ concourseci_group }}"
18 | notify:
19 | - restart concourse-worker
20 |
21 | - name: worker | Copy tsa public key
22 | copy:
23 | content: "{{ concourseci_key_tsa_public }}"
24 | dest: "{{ concourse_worker_options_combined['CONCOURSE_TSA_HOST_KEY'] }}.pub"
25 | mode: "0644"
26 | owner: "{{ concourseci_user }}"
27 | group: "{{ concourseci_group }}"
28 | notify:
29 | - restart concourse-worker
30 |
31 | - name: worker | Copy worker public key
32 | copy:
33 | content: "{{ concourseci_key_worker_public }}"
34 | dest: "{{ concourse_worker_options_combined['CONCOURSE_TSA_WORKER_PRIVATE_KEY'] }}.pub"
35 | mode: "0644"
36 | owner: "{{ concourseci_user }}"
37 | group: "{{ concourseci_group }}"
38 |
39 | - name: worker | Copy worker private key
40 | copy:
41 | content: "{{ concourseci_key_worker_private }}"
42 | dest: "{{ concourse_worker_options_combined['CONCOURSE_TSA_WORKER_PRIVATE_KEY'] }}"
43 | mode: "0600"
44 | owner: "{{ concourseci_user }}"
45 | group: "{{ concourseci_group }}"
46 | no_log: True
47 | notify:
48 | - restart concourse-worker
49 |
50 | - name: Worker | Concourse worker start init script (linux)
51 | template:
52 | src: "concourse-worker-init.sh.j2"
53 | dest: "/etc/init.d/concourse-worker"
54 | mode: 0755
55 | notify:
56 | - reload systemd
57 | - restart concourse-worker
58 | when: "ansible_system == 'Linux'"
59 |
60 | - name: Worker | Ensure Concourse worker is running and Starts on boot (linux)
61 | service:
62 | name: "concourse-worker"
63 | state: "started"
64 | enabled: True
65 | ignore_errors: "{{ concourse_ignore_errors }}"
66 | when: "ansible_system == 'Linux'"
67 |
68 | - name: worker | Templating concourse worker start launchd plist (macOSx)
69 | template:
70 | src: "{{ concourseci_launchd_worker }}.plist.j2"
71 | dest: "{{ concourseci_launchd_path }}/{{ concourseci_launchd_worker }}.plist"
72 | mode: 0750
73 | owner: "root"
74 | group: "wheel"
75 | validate: 'plutil %s'
76 | notify:
77 | - restart concourse-worker
78 | register: launchd_template
79 | when: "ansible_system == 'Darwin'"
80 |
81 | - name: worker | Ensure Concourse worker is running and Starts on boot (macOSx)
82 | shell: launchctl load -w {{ concourseci_launchd_path }}/{{ concourseci_launchd_worker }}.plist && launchctl start {{ concourseci_launchd_worker }}
83 | changed_when: False # since no way to detect if it started or not
84 | when: "ansible_system == 'Darwin'"
85 |
--------------------------------------------------------------------------------
/templates/com.github.ahelal.concourse.web.plist.j2:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Label
6 | com.github.ahelal.concourse.web
7 | Program
8 | /opt/concourseci/bin/concourse-web
9 | RunAtLoad
10 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/templates/com.github.ahelal.concourse.worker.plist.j2:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Label
6 | com.github.ahelal.concourse.worker
7 | Program
8 | /opt/concourseci/bin/concourse-worker
9 | RunAtLoad
10 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/templates/concourse-web-init.sh.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ### BEGIN INIT INFO
3 | # Provides: concourse-web
4 | # Required-Start: $local_fs $network $remote_fs $syslog
5 | # Required-Stop: $local_fs $remote_fs $syslog
6 | # Default-Start: 2 3 4 5
7 | # Default-Stop: 0 1 6
8 | # Short-Description: concourse web
9 | # Description: This script starts and stops the concourse web service daemon
10 | ### END INIT INFO
11 | # {{ ansible_managed }}
12 |
13 | NAME="ConcourseWeb"
14 | DESC="Concourse-Web"
15 |
16 | RUN_AS_USER="{{ concourseci_user }}"
17 | RUN_AS_GROUP="{{ concourseci_group }}"
18 |
19 | BASE_DIR="{{ concourseci_bin_dir }}"
20 | GREP_NAME="{{ concourseci_bin_dir }}/concourse web"
21 |
22 | RUN_CMD="{{ concourseci_bin_dir }}/concourse-web"
23 |
24 | ATTEMPTS_TO_CHECK_PID=4
25 |
26 | PROG_PID() {
27 | check_prog=$(ps aux | grep -e "${GREP_NAME}" | grep -v grep | awk '{ print $2 }' )
28 | echo ${check_prog}
29 | }
30 |
31 | SLEEP_FOR_WHILE(){
32 | for i in $(seq ${ATTEMPTS_TO_CHECK_PID}); do
33 | sleep 1
34 | echo -n "."
35 | done
36 | }
37 |
38 | start() {
39 | PID=$(PROG_PID)
40 | if [ -n "${PID}" ] ; then
41 | echo "${NAME} is already running (PID: ${PID})"
42 | else
43 | echo -n "Starting $NAME "
44 |
45 | #Start quite background uid and gid
46 | start-stop-daemon --start --background --name ${NAME} --chdir ${BASE_DIR} --chuid ${RUN_AS_USER} \
47 | --group ${RUN_AS_GROUP} --exec ${RUN_CMD} || echo "[ FAILED ]" || exit 1
48 |
49 | SLEEP_FOR_WHILE
50 | if [ -n "$(PROG_PID)" ]; then
51 | echo "[ OK ]"
52 | else
53 | echo "[ FAILED ]"
54 | exit 1
55 | fi
56 | fi
57 | }
58 |
59 | stop() {
60 | PID=$(PROG_PID)
61 | if [ -n "${PID}" ]; then
62 | echo -n "Stoping ${NAME} "
63 | kill -9 ${PID} || /bin/true
64 |
65 | SLEEP_FOR_WHILE
66 | if [ -n "$(PROG_PID)" ]; then
67 | echo "[ FAILED ]"
68 | exit 1
69 | else
70 | echo "[ OK ]"
71 | fi
72 | else
73 | echo "${NAME} not running."
74 | fi
75 | }
76 |
77 | status() {
78 | PID=$(PROG_PID)
79 | if [ -n "${PID}" ]; then
80 | echo "${NAME} is running with PID:${PID}"
81 | else
82 | echo "${NAME} is not running"
83 | fi
84 | }
85 |
86 | case "$1" in
87 | start)
88 | start ;;
89 | stop)
90 | stop ;;
91 | restart)
92 | stop
93 | start ;;
94 | status)
95 | status ;;
96 | *)
97 | echo "Usage: $0 {start|stop|restart|status}"
98 | exit 1 ;;
99 | esac
100 | exit 0
101 |
--------------------------------------------------------------------------------
/templates/concourse-web.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # {{ ansible_managed }}
3 |
4 | # Config
5 | {% for option, value in concourse_web_options_combined.items() %}
6 | export {{ option }}='{{ value }}'
7 | {% endfor %}
8 |
9 | echo "" >> {{ concourseci_log_web }}
10 | echo "$(date) starting " >> {{ concourseci_log_web }}
11 |
12 | ## Exec web
13 | exec {{ concourseci_bin_dir }}/concourse web >> {{ concourseci_log_web }} 2>&1
14 |
--------------------------------------------------------------------------------
/templates/concourse-worker-init.sh.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ### BEGIN INIT INFO
3 | # Provides: concourse-worker
4 | # Required-Start: $local_fs $network $remote_fs $syslog
5 | # Required-Stop: $local_fs $remote_fs $syslog
6 | # Default-Start: 2 3 4 5
7 | # Default-Stop: 0 1 6
8 | # Short-Description: concourse worker
9 | # Description: This script starts and stops the concourse worker service daemon
10 | ### END INIT INFO
11 | # {{ ansible_managed }}
12 |
13 | NAME="Worker"
14 | DESC="Concourse-Worker"
15 |
16 | RUN_AS_USER="root"
17 | RUN_AS_GROUP="root"
18 |
19 | BASE_DIR="{{ concourseci_bin_dir }}"
20 | GREP_NAME="{{ concourseci_bin_dir }}/concourse worker"
21 |
22 | RUN_CMD="{{ concourseci_bin_dir }}/concourse-worker"
23 |
24 | ## Retire config
25 | export CONCOURSE_NAME="{{ concourse_worker_options_combined['CONCOURSE_NAME'] | default('$(hostname)') }}"
26 | {% if concourseci_version is version_compare('v3.10.0', '<') %}
27 | export CONCOURSE_TSA_HOST="{{ concourse_worker_options_combined['CONCOURSE_TSA_HOST'] }}"
28 | export CONCOURSE_TSA_PORT="{{ concourse_worker_options_combined['CONCOURSE_TSA_PORT'] }}"
29 | {% else %}
30 | export CONCOURSE_TSA_HOST="{{ concourse_worker_options_combined['CONCOURSE_TSA_HOST'] }}:{{ concourse_worker_options_combined['CONCOURSE_TSA_PORT'] }}"
31 | {% endif %}
32 | export CONCOURSE_TSA_PUBLIC_KEY="{{ concourse_worker_options_combined['CONCOURSE_TSA_HOST_KEY'] }}.pub"
33 | export CONCOURSE_TSA_WORKER_PRIVATE_KEY="{{ concourse_worker_options_combined['CONCOURSE_TSA_WORKER_PRIVATE_KEY'] }}"
34 |
35 | ## Helper variables
36 | CONCOURSE_BIN_DIR="{{ concourseci_bin_dir }}"
37 | CONCOURSE_LOG_FILE="{{ concourseci_log_worker }}"
38 | SLEEP=5
39 | #
40 | MAX_TRIES="{{ concourseci_worker_retire_tries }}"
41 | PROCESS_EXIT_TIMEOUT="{{ concourseci_worker_process_exit_timeout }}"
42 | temp_file="$(mktemp -d)/retire.log"
43 |
44 | PROG_PID() {
45 | check_prog=$(ps aux | grep -e "${GREP_NAME}" | grep -v grep | awk '{ print $2 }' )
46 | echo "${check_prog}"
47 | }
48 |
49 | SLEEP_FOR_WHILE(){
50 | for i in $(seq 5); do
51 | sleep 1
52 | echo -n "."
53 | done
54 | }
55 |
56 | ## Log to concourse log file
57 | #
58 | log(){
59 | echo "$(date) ${1}" >> "${CONCOURSE_LOG_FILE}"
60 | }
61 |
62 | retire_worker(){
63 | log "Starting retire function ..."
64 | "${CONCOURSE_BIN_DIR}"/concourse retire-worker > "${temp_file}" 2>&1
65 | rc="$?"
66 | if [ "${rc}" -eq 0 ]; then
67 | log "Concourse retire-worker command executed successfully"
68 | log "Waiting for up to ${PROCESS_EXIT_TIMEOUT} seconds for process exit"
69 | for i in $(seq ${PROCESS_EXIT_TIMEOUT}); do
70 | if [ -z "$(PROG_PID)" ]; then
71 | log "Worker process exited gracefully. Assuming successful retire of ${CONCOURSE_NAME}."
72 | rm "${temp_file}"
73 | return 0
74 | else
75 | echo -n "."
76 | log "Waiting for worker process exit..."
77 | sleep 1
78 | fi
79 | done
80 | else
81 | log "Got an error while retiring ${CONCOURSE_NAME}. Logged it and trying again."
82 | cat "${temp_file}" >> "${CONCOURSE_LOG_FILE}"
83 | rm "${temp_file}"
84 | return 124
85 | fi
86 |
87 | # if we reached here timeout
88 | log "Retire timeout for ${CONCOURSE_NAME} pid $(PROG_PID)"
89 | cat "${temp_file}" >> "${CONCOURSE_LOG_FILE}"
90 | rm "${temp_file}"
91 | return 124
92 | }
93 |
94 | retire_initd_interface(){
95 | for i in $(seq ${MAX_TRIES}); do
96 | log "Attempt #$i / $MAX_TRIES to retire worker."
97 | set +e
98 | retire_worker
99 | rc="$?"
100 | set -e
101 | if [ "${rc}" -eq 0 ]; then
102 | return 0
103 | elif [ "${rc}" -eq 1 ]; then
104 | echo "[ FAILED ]"
105 | echo " Retire failed with an error. Check worker logs"
106 | exit 1
107 | elif [ "${rc}" -eq 124 ]; then
108 | continue
109 | else
110 | echo "[ FAILED ]"
111 | echo " Retire failed with an unexpected error. Check worker logs"
112 | exit 1
113 | fi
114 | done
115 |
116 | echo "[ ERROR ]"
117 | echo " Retire failed timed out. Check worker logs"
118 | exit 1
119 | }
120 |
121 | start() {
122 | PID=$(PROG_PID)
123 | if [ -n "${PID}" ] ; then
124 | echo "${NAME} is already running (PID: ${PID})"
125 | else
126 | echo -n "Starting ${NAME} "
127 | # Make sure to clean up if needed
128 | retire_initd_interface
129 |
130 | # Start quite background uid and gid
131 | start-stop-daemon --start --background --name ${NAME} --chdir "${BASE_DIR}" --chuid ${RUN_AS_USER} \
132 | --group ${RUN_AS_GROUP} --exec "${RUN_CMD}" || echo "[ FAILED ]" || exit 1
133 | SLEEP_FOR_WHILE
134 | if [ -n "$(PROG_PID)" ]; then
135 | echo "[ OK ]"
136 | else
137 | echo "[ FAILED ]"
138 | exit 1
139 | fi
140 | fi
141 | }
142 |
143 | stop() {
144 | if [ -n "$(PROG_PID)" ]; then
145 | echo -n "Retiring/Stopping ${NAME} "
146 | # let us just retire work
147 | retire_initd_interface
148 | # at this point let us just kill it if it still alive
149 | kill -9 "$(PROG_PID)" || /bin/true
150 |
151 | SLEEP_FOR_WHILE
152 | if [ -n "$(PROG_PID)" ]; then
153 | echo "[ FAILED ]"
154 | exit 1
155 | else
156 | echo "[ OK ]"
157 | fi
158 | else
159 | echo "${NAME} not running."
160 | fi
161 | }
162 |
163 | status() {
164 | PID=$(PROG_PID)
165 | if [ -n "${PID}" ]; then
166 | echo "${NAME} is running with PID:${PID}"
167 | else
168 | echo "${NAME} is not running"
169 | fi
170 | }
171 |
172 | case "$1" in
173 | start)
174 | start ;;
175 | stop)
176 | stop ;;
177 | restart)
178 | stop
179 | start ;;
180 | status)
181 | status ;;
182 | *)
183 | echo "Usage: $0 {start|stop|restart|status}"
184 | exit 1 ;;
185 | esac
186 | exit 0
187 |
--------------------------------------------------------------------------------
/templates/concourse-worker.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # {{ ansible_managed }}
3 |
4 | # Config
5 | {% for option, value in concourse_worker_options_combined.items() %}
6 | {% if (concourseci_version is version_compare('v3.10.0', '>=')) and option == 'CONCOURSE_TSA_HOST' %}
7 | export {{ option }}="{{ value }}:{{ concourse_worker_options_combined['CONCOURSE_TSA_PORT'] }}"
8 | {% elif (concourseci_version is version_compare('v3.10.0', '>=')) and option == 'CONCOURSE_TSA_PORT' %}
9 | {% elif (concourseci_version is version_compare('v6.2.0', '>=')) and option in ['CONCOURSE_SESSION_SIGNING_KEY', 'CONCOURSE_TSA_HOST_KEY'] %}
10 | {% else %}
11 | export {{ option }}='{{ value }}'
12 | {% endif %}
13 | {% endfor %}
14 |
15 | echo "" >> {{ concourseci_log_worker }}
16 | echo "$(date) starting " >> {{ concourseci_log_worker }}
17 |
18 | # Exec worker
19 | exec {{ concourseci_bin_dir }}/concourse worker >> {{ concourseci_log_worker }} 2>&1
20 |
--------------------------------------------------------------------------------
/test/ansible-setup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | AVM_VERSION="v1.0.0"
5 |
6 | export ANSIBLE_VERSIONS_1="2.6.18"
7 | export INSTALL_TYPE_1="pip"
8 | export ANSIBLE_LABEL_1="vOne"
9 |
10 | export ANSIBLE_VERSIONS_2="2.7.12"
11 | export INSTALL_TYPE_2="pip"
12 | export ANSIBLE_LABEL_2="vTwo"
13 |
14 | export ANSIBLE_VERSIONS_3="2.8.3"
15 | export INSTALL_TYPE_3="pip"
16 | export ANSIBLE_LABEL_3="vThree"
17 |
18 | # Whats the default version
19 | export ANSIBLE_DEFAULT_VERSION="vOne"
20 |
21 | echo "* Setting up ansible "
22 | echo "* ANSIBLE_LABEL_1=${ANSIBLE_LABEL_1} ANSIBLE_VERSIONS_1=${ANSIBLE_VERSIONS_1}"
23 | echo "* ANSIBLE_LABEL_2=${ANSIBLE_LABEL_2} ANSIBLE_VERSIONS_2=${ANSIBLE_VERSIONS_2}"
24 | echo "* ANSIBLE_LABEL_3=${ANSIBLE_LABEL_3} ANSIBLE_VERSIONS_3=${ANSIBLE_VERSIONS_3}"
25 |
26 | ## Create a temp dir to download avm
27 | avm_dir="$(mktemp -d 2>/dev/null || mktemp -d -t 'mytmpdir')"
28 | git clone https://github.com/ahelal/avm.git "${avm_dir}" >/dev/null 2>&1
29 | cd "${avm_dir}"
30 | git checkout ${AVM_VERSION}
31 | /bin/sh "${avm_dir}"/setup.sh
32 |
33 | exit 0
34 |
--------------------------------------------------------------------------------
/test/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | roles_path=../:./test/helper_roles:./helper_roles:../../
3 | callback_whitelist = changes
4 |
5 | [ssh_connection]
6 | # Improve speed requires you disable 'requiretty' in /etc/sudoers on all managed hosts.
7 | pipelining=True
8 |
9 | ## SSH Args
10 | control_path=%(directory)s/ansible-ssh-%%h-%%p-%%r
11 |
--------------------------------------------------------------------------------
/test/helper_roles/fly/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | pipelines :
4 | - simple_failure.yml
5 | - simple_success.yml
6 |
7 | pipeline_dest : "/opt/concourseci"
--------------------------------------------------------------------------------
/test/helper_roles/fly/files/macos.yml:
--------------------------------------------------------------------------------
1 | ---
2 | resources:
3 | - name: resource-tutorial
4 | type: git
5 | source:
6 | uri: https://github.com/starkandwayne/concourse-tutorial.git
7 |
8 | jobs:
9 | - name: navi
10 | plan:
11 | - get: resource-tutorial
12 | - task: annoy
13 | config:
14 | platform: darwin
15 | run:
16 | path: sh
17 | args:
18 | - -exc
19 | - |
20 | uname -a
21 | echo "xxx" > /tmp/x1.local
22 | hostname
23 |
--------------------------------------------------------------------------------
/test/helper_roles/fly/files/simple_failure.yml:
--------------------------------------------------------------------------------
1 | ---
2 | jobs:
3 | - name: job
4 | public: true
5 | plan:
6 | - task: hello-world failure
7 | config:
8 | platform: linux
9 | image_resource:
10 | type: docker-image
11 | source:
12 | repository: registry.hub.docker.com/library/busybox
13 | run:
14 | path: sh
15 | args:
16 | - -exc
17 | - |
18 | echo "I WILL FAIL"
19 | exit 1
20 |
--------------------------------------------------------------------------------
/test/helper_roles/fly/files/simple_success.yml:
--------------------------------------------------------------------------------
1 | ---
2 | jobs:
3 | - name: job
4 | public: true
5 | plan:
6 | - task: hello-world success
7 | config:
8 | platform: linux
9 | image_resource:
10 | type: docker-image
11 | source:
12 | repository: registry.hub.docker.com/library/busybox
13 | run:
14 | path: sh
15 | args:
16 | - -exc
17 | - |
18 | echo "I WILL SUCCESSED"
19 | exit 0
20 |
--------------------------------------------------------------------------------
/test/helper_roles/fly/tasks/fly.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - pause:
4 | seconds: 20
5 |
6 | - name: fly | copy pipelines
7 | copy:
8 | src="{{ item }}"
9 | dest="{{ pipeline_dest }}/{{ item }}"
10 | changed_when: false
11 | with_items: "{{ pipelines }}"
12 |
13 | - name: fly | download fly
14 | get_url:
15 | url: "http://127.0.0.1:8080/api/v1/cli?arch=amd64&platform={{ ansible_system | lower }}"
16 | dest: "/usr/local/bin/fly"
17 | mode: 0755
18 |
19 | - name: fly | login with fly
20 | command: /usr/local/bin/fly -t kitchen login -c http://127.0.0.1:8080 -u {{ concourseci_manage_credential_user }} -p {{ concourseci_manage_credential_password }}
21 | become: False
22 | changed_when: false
23 |
24 | - name: fly | upload pipelines
25 | command: /usr/local/bin/fly -t kitchen sp -p {{ item }} -c {{ pipeline_dest }}/{{ item }} -n
26 | changed_when: false
27 | become: False
28 | with_items: "{{ pipelines }}"
29 |
30 | - name: fly | unpause pipelines
31 | command: /usr/local/bin/fly -t kitchen unpause-pipeline -p {{ item }}
32 | changed_when: false
33 | become: False
34 | with_items: "{{ pipelines }}"
35 |
36 | - name: fly | trigger pipelines
37 | command: /usr/local/bin/fly -t kitchen trigger-job -j "{{ item }}/job"
38 | changed_when: false
39 | become: False
40 | with_items: "{{ pipelines }}"
41 |
42 | - name: pause to make sure pipeline ran
43 | pause:
44 | seconds: 10
--------------------------------------------------------------------------------
/test/helper_roles/fly/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - import_tasks: packages.yml
4 |
5 | - import_tasks: fly.yml
--------------------------------------------------------------------------------
/test/helper_roles/fly/tasks/packages.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: packages | install net-tools
4 | apt:
5 | name=net-tools
6 | when: ansible_os_family == 'Debian'
7 |
--------------------------------------------------------------------------------
/test/helper_roles/hosts/tasks/main.yml:
--------------------------------------------------------------------------------
1 |
2 | # Use hacky method since we cant move /etc/hosts in docker
3 | - name: Ensure hostname entries in /etc/hosts
4 | command: grep "127.0.1.1 {{ inventory_hostname }}" /etc/hosts
5 | register: grep_command
6 | failed_when: false
7 | changed_when: false
8 |
9 | - name: Ensure hostname entries in /etc/hosts
10 | shell: echo "127.0.1.1 {{ inventory_hostname }}" >> /etc/hosts
11 | when: grep_command.rc == 1
12 |
--------------------------------------------------------------------------------
/test/helper_roles/roles_requirements.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: postgresql
4 | src: git+https://github.com/ANXS/postgresql.git
5 | version: v1.9.0
6 |
--------------------------------------------------------------------------------
/test/integration/Gemfile:
--------------------------------------------------------------------------------
1 | source 'https://rubygems.org'
2 |
3 | group :development do
4 | gem 'net-ssh'
5 | gem 'serverspec'
6 | gem 'thor' >= '0.19.0'
7 | end
8 |
--------------------------------------------------------------------------------
/test/integration/helper_spec.rb:
--------------------------------------------------------------------------------
1 | require 'rubygems'
2 | require 'serverspec'
3 | require 'pathname'
4 | require 'net/ssh'
5 |
6 | RSpec.configure do |config|
7 | # Use color in STDOUT
8 | config.color = true
9 | # Use color not only in STDOUT but also in pagers and files
10 | config.tty = true
11 |
12 | # By default use ssh
13 | verify_conn = ENV['KITCHEN_VERIFY_CONN'] || 'ssh'
14 | if verify_conn == 'ssh'
15 | set :host, ENV['KITCHEN_HOSTNAME']
16 | # ssh options at http://net-ssh.github.io/net-ssh/Net/SSH.html#method-c-start
17 | set :ssh_options,
18 | user: ENV['KITCHEN_USERNAME'],
19 | port: ENV['KITCHEN_PORT'],
20 | auth_methods: ['publickey'],
21 | keys: [ENV['KITCHEN_SSH_KEY']],
22 | keys_only: true,
23 | paranoid: false,
24 | use_agent: false,
25 | verbose: :error
26 | set :backend, :ssh
27 | set :request_pty, true
28 | elsif verify_conn == 'exec'
29 | puts 'serverspec :backend, :exec'
30 | set :backend, :exec
31 | else
32 | puts "invalid serverspec backend #{verify_conn}"
33 | end
34 | end
35 |
--------------------------------------------------------------------------------
/test/integration/simple/concourse-vars.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | postgresql_version: 9.5
4 | postgresql_databases:
5 | - name: "concourse"
6 | owner: "concourseci"
7 | postgresql_users:
8 | - name : "concourseci"
9 | pass : "conpass"
10 | concourse_worker_options :
11 | CONCOURSE_BAGGAGECLAIM_DRIVER : "naive"
12 | CONCOURSE_GARDEN_DESTROY_CONTAINERS_ON_STARTUP : true
13 | CONCOURSE_GARDEN_DNS_PROXY_ENABLE : true
14 | CONCOURSE_GARDEN_DNS_SERVER : 8.8.8.8
15 | concourse_web_options:
16 | CONCOURSE_MAIN_TEAM_LOCAL_USER : "user1,user6"
17 | CONCOURSE_EXTERNAL_URL : "{{ CONCOURSE_EXTERNAL_URL_VAGRANT | default('http://127.0.0.1:8080') }}"
18 | CONCOURSE_POSTGRES_DATABASE : "concourse"
19 | CONCOURSE_POSTGRES_HOST : "127.0.0.1"
20 | CONCOURSE_POSTGRES_PASSWORD : "conpass"
21 | CONCOURSE_POSTGRES_SSLMODE : "disable"
22 | CONCOURSE_POSTGRES_USER : "concourseci"
23 | # ********************* Example Keys (YOU MUST OVERRIDE THEM) *********************
24 | # This keys are used for demo only you should generate your own and store them
25 | # safely i.e. ansible-vault
26 | # **********************************************************************************
27 | concourseci_key_session_public : ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC6tKHmRtRp0a5SAeqbVy3pJSuoWQfmTIk106Md1bGjELPDkj0A8Z4a5rJZrAR7WqrRmHr2dTL9eKroymtIqxgJdu1RO+SM3uZVV5UFfYrBV0rmp5fP2g/+Wom2RB+zCzPT1TjDnKph8xPqj19P/0FY9rKbU8h6EzEp6Z5DjwKZKvxAF8p9r6wJde4nY+oneIuG1qpxYmLpNvdM3G44vgNeMg20jVywjJVwYDNe8ourqPu8rBauLbSiQI8Uxx6dlJSTsVawrKwHQUPEI9B5LPwUzZ9t/d7k2uJnCig6aJwM8dcyr8tqxlfdfmQiHRlZozail8UzIv65MbVngji5sqoB
28 | concourseci_key_session_private : |
29 | -----BEGIN RSA PRIVATE KEY-----
30 | MIIEowIBAAKCAQEAurSh5kbUadGuUgHqm1ct6SUrqFkH5kyJNdOjHdWxoxCzw5I9
31 | APGeGuayWawEe1qq0Zh69nUy/Xiq6MprSKsYCXbtUTvkjN7mVVeVBX2KwVdK5qeX
32 | z9oP/lqJtkQfswsz09U4w5yqYfMT6o9fT/9BWPaym1PIehMxKemeQ48CmSr8QBfK
33 | fa+sCXXuJ2PqJ3iLhtaqcWJi6Tb3TNxuOL4DXjINtI1csIyVcGAzXvKLq6j7vKwW
34 | ri20okCPFMcenZSUk7FWsKysB0FDxCPQeSz8FM2fbf3e5NriZwooOmicDPHXMq/L
35 | asZX3X5kIh0ZWaM2opfFMyL+uTG1Z4I4ubKqAQIDAQABAoIBAFWUZoF/Be5bRmQg
36 | rMD3fPvZJeHMrWpKuroJgEM0qG/uP/ftGDlOhwIdrLKdvpAsRxA7rGE751t37B84
37 | aWStyB7OfIk3wtMveLS1qIETwn5M3PBM8bE8awhTx7vcDgurnt4CZjqDnTW4jfB+
38 | N1obzoBQ1B2Okd4i3e4wP3MIIlDCMoTPPd79DfQ6Hz2vd0eFlQcwb2S66oAGTgxi
39 | oG0X0A+o+/GXGGhcuoRfXCR/oaeMtCTAML8UVNT8qktYr+Lfo4JoQR6VroQMStOm
40 | 7DvS3yJe7ZZDrQBdNDHVAsIG9/QXEWmiKNv3p1gHm216FQeJV6rzSXGjeE22tE9S
41 | JzmBKAECgYEA6CiFBIMECPzEnBooyrh8tehb5m0F6TeSeYwdgu+WuuBDRMh5Kruu
42 | 9ydHE3tYHE1uR2Lng6suoU4Mnzmjv4E6THPTmTlolDQEqv7V24a0e8nWxV/+K7lN
43 | XHrq4BFE5Xa8lkLAHw4tF8Ix6162ooHkaLhhmUWzkGVxAUhL/tbVc/ECgYEAzeEn
44 | cR2NMDsNMR/anJzkjDilhiM5pORtN5O+eBIzpbFUEDZL4LIT7gqzic0kKnMJczr7
45 | 0WYUp2U762yrA4U2BqiGyTO2yhcMM5kuDTG+1VTdw3G6rZ0L80jUugW9131VC3tB
46 | zcinIUs8N2hWsbuaRNhTCmlEzfe5UsikRjHgZxECgYEAze1DMCFWrvInI6BAlrDW
47 | TjTxb489MwVMM+yJMN98f/71LEn20GTyaeC5NxqtqU01iLS+TxjEn+gvYf0qtm/W
48 | WoJTKxK1JOCPU24AHF18MmFy1Fi1h+syJ9oQBPjMeA2+cjp7WBCnBvAGf5Tfw34c
49 | MJd8WwxsnqScfFq4ri+53sECgYBGobw6Xn0V0uyPsfH6UQlH4hdHkcYw//1IV/O8
50 | leIKMnA4r6gQioez3xABctO5jIXtdor2KCNl2qFX/4wcRRNn7WFwncFUS9vvx9m4
51 | xRxHbDo410fIUFzNNmtk9ptO1rzal4rX4sMT9Q/Poog7qbUfcWfr5nmogBiggh15
52 | x5rJQQKBgE4khLJKEpLi8ozo/h02R/H4XT1YGNuadyuIULZDyuXVzcUP8R7Xx43n
53 | ITU3tVCvmKizZmC3LZvVPkfDskhI9Yl3X7weBMUDeXxgDeUJNJZXXuDf1CC//Uo9
54 | N1EQdIhtxo4mgHXjF/8L32SqinAJb5ErNXQQwT5k9G22mZkHZY7Y
55 | -----END RSA PRIVATE KEY-----
56 |
57 | concourseci_key_tsa_public : ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCjddjviqF3BjVnxrledNsKM0wm7bwJwRgnUomLVrwHXjfArEz5yFa2C87IT9CYpIxkZMgmd0Bdtwj3kiNPP0qYpcj/uTqQTE5xLzTiJIUFsgSQwrMt/zd5x44g71qiHF/1KtHdcZq1dW3+5IwBog692HjcytbAxpUEGGpocHs/aoJ5/xn2tx61QOhkr5+PP1Ft7eHu719/pb1czhH8tZwCwNJQs4vzf79Mlgt0ikjJ84o9kOiUGP+Fc0+EjapBg9M2GE6/l86IJzcx/t/uQYCFOdKbg5ukck9NztldaOUeAPkUttPtf2vdjZU+EwSYc3XvhyQlN/QQmZ8tvG3gV9wv
58 | concourseci_key_tsa_private : |
59 | -----BEGIN RSA PRIVATE KEY-----
60 | MIIEogIBAAKCAQEAo3XY74qhdwY1Z8a5XnTbCjNMJu28CcEYJ1KJi1a8B143wKxM
61 | +chWtgvOyE/QmKSMZGTIJndAXbcI95IjTz9KmKXI/7k6kExOcS804iSFBbIEkMKz
62 | Lf83eceOIO9aohxf9SrR3XGatXVt/uSMAaIOvdh43MrWwMaVBBhqaHB7P2qCef8Z
63 | 9rcetUDoZK+fjz9Rbe3h7u9ff6W9XM4R/LWcAsDSULOL83+/TJYLdIpIyfOKPZDo
64 | lBj/hXNPhI2qQYPTNhhOv5fOiCc3Mf7f7kGAhTnSm4ObpHJPTc7ZXWjlHgD5FLbT
65 | 7X9r3Y2VPhMEmHN174ckJTf0EJmfLbxt4FfcLwIDAQABAoIBAFzux2OJIbuV4A8c
66 | QI+fSFlISOdpChtRmPXiSyjZKxXVT0VPsIPijsn5dJsWJbZi9x6s3c5gxkuBoKuA
67 | fmqzxSl8OAaLvOwFNiPLfvmDYc2XJFlZGJ3yGAw4lGnNK243S6cLrT2FNTwtg1gD
68 | gEX9aPwucqi0+duoC1jEuNqf+LJYZykDicw3yHixgas/pKe2yDvsUhyQy2m/g9SW
69 | rpKjppxas7aKQr1GEI4Gz4JY6L78ksdLLFCiXD/pg/DLbyfOoMid8eCUnGbh1rhB
70 | PsKNyk3r/CSWsSlUlrujEqFdc/H8Ej07wVmVduTZddvjE4LcVtFlBzcEZbEofnyx
71 | H8wLv8ECgYEA0F/jBIVcSWLTB00R/Fix7Bo9ICtZ1sXL+hLPm/zVlL/gD+MlAAVB
72 | FimJKqMZa25B1ZUrYWV+Zddtel61ZxTrb86KKqtb0yuIVtPBc2ssVsO9hKL7NJ9i
73 | g6tpR0hOhD46WJxOI9Srjv61f9tP7izlwbKXo6TrdYxM8YdjXlUyMCcCgYEAyNIB
74 | IayYqg+pFoNdqKi3/n7/yGGWvlO0kW9aXkzrwTxT/k3NCHwarGgeSqU+/XVhnAHB
75 | pvsORLAnf++gQNfoxU10nrdhkj6YIdg8OK5rO4n7iNysa4bZi2DrwJt9/mFpNkvY
76 | lD956Lof/J1gPKmcNAwnsxijJE7w3I3rJ5UucLkCgYB5PMEGWV2XqTMlVVc4npZu
77 | y9lyxSZRSuZiSt2WYaYXFQiV1dAqUeRLs8EGGL1qf004qsEBux6uvIgLId2j600M
78 | 0XwcVXVoyTRbaHtu3xV+Kgczi+xi8rVL7MilW9GrKdWixtbEDDIBUftiN8Uqy96m
79 | M3X9FbCVxRrjkKVlNmasEwKBgBCMxg0ZZUd2nO+/CcPxi6BMpRXFfR/YVCQ8Mg1d
80 | d3xoVV+616/gUm5s8joinitTNiUeO/Bf9lAQ2GCBxgoyAPvpozfFUyQzRmRbprLh
81 | JPM2LuWbkhYWee0zopov9lU1f+86lvG4vXpBhItUCO9W5wmfCtKGsEM4wj7a70tG
82 | zxn5AoGARTzJJb6nxhqQCAWTq2GOQ1dL3uJvHihDs8+Brtn686Xqkajaw+1+OX2i
83 | ehm8iE8k8Mdv7BqPMXQxlLb954l/ieTYkmwTnTG5Ot2+bx0Q15TGJqKFCWQZRveV
84 | uPTcE+vQzvMV3lJo0CHTlNMo1JgHOO5UsFZ1cBxO7MZXCzChGE8=
85 | -----END RSA PRIVATE KEY-----
86 |
87 | concourseci_worker_position : "{{ groups[concourseci_worker_group].index(inventory_hostname)| default(0) }}"
88 | concourseci_key_worker_public : "{{ concourseci_worker_keys[concourseci_worker_position | int ].public}}"
89 | concourseci_key_worker_private : "{{ concourseci_worker_keys[concourseci_worker_position | int ].private}}"
90 | concourseci_worker_keys :
91 | - public : ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKW31QIWcCR2Gh8i1fodDPHqviQV5eAW7Zv37Hzs1SKISvYeJ32EQ1mx2UXV8omzJiVojlXIsqkTXIBK6awvXQcRt8HFXwB9LjBfbYOUm+vU6L46HG3p2rBFAynh3NOeXvV1IBMNeuJ/w7v4CNNIkfKfQ34iwpirnX9fwoRV1pIt7c7MnKwZVrq/BwFpGh/GfOKrXLRXUsJAxDA+Mm0q2rvfpcsviINM7V41Lzemany1KVfjMLVe86CKWT0j2WERYejVlhxTXLlz7lHAowyU87dXh4QVHmDgMMSRIWgbMS0/1uAwfpdLMkzBEUhWRgKXDe/NWRk2I+Q77IJa1fnunJ
92 | private : |
93 | -----BEGIN RSA PRIVATE KEY-----
94 | MIIEpQIBAAKCAQEAylt9UCFnAkdhofItX6HQzx6r4kFeXgFu2b9+x87NUiiEr2Hi
95 | d9hENZsdlF1fKJsyYlaI5VyLKpE1yASumsL10HEbfBxV8AfS4wX22DlJvr1Oi+Oh
96 | xt6dqwRQMp4dzTnl71dSATDXrif8O7+AjTSJHyn0N+IsKYq51/X8KEVdaSLe3OzJ
97 | ysGVa6vwcBaRofxnziq1y0V1LCQMQwPjJtKtq736XLL4iDTO1eNS83pmp8tSlX4z
98 | C1XvOgilk9I9lhEWHo1ZYcU1y5c+5RwKMMlPO3V4eEFR5g4DDEkSFoGzEtP9bgMH
99 | 6XSzJMwRFIVkYClw3vzVkZNiPkO+yCWtX57pyQIDAQABAoIBAQCP6rWbEcaDFmVX
100 | mjeu9hTd2YCBb+A/l2FROCJg1LGuJucHHOTGO2d3gJRu+mE9LfONgOHnzgOkCJZp
101 | ZPsRUmslDexwPm7YQZg4oftHGKdcIqMEVqauG5GjGXQ4K8AiP3VK3Z2S/zvFvuZj
102 | T/WLd7u2EE6CmDa0bNdzwpzNv1eJ92DGTm7bz71tGbjexuXuIzJVmUq1UVhj6lle
103 | dklzM9RIp0wAaCrKVifNhEdZ4cy6YG0vBaAVbUZfxO9Qnec9V5Ycor9HZ9bsPhub
104 | 7H3i5j7eGFH6f01bm2o3bSVwsvSosIiG6uXbNw83RGZhsIIFK1bJ2W4CtP86C1fG
105 | +L2GaZtpAoGBAO9Anc8hsLAZEJ9gYm+abTFbTkNv4f/TPQxSngNbPx/OaDsBtHK0
106 | pQ0piG21wx6eKER0Bsb3p44Qav1G/3NVMwYAPWkoujai6OGt0bAjNCBZe5jzoYHO
107 | cN/PTSNuhfri5Hpp6EqF8m3H6gJT/rMVgEfflorXnfj7WvNwVIh50CynAoGBANiF
108 | t5pHWmvIWJs3feLiJm0o0Jp7IlpwS7vn62qfnoqv9Yze/0vNVscczkCzCbUuayf4
109 | TVgtfOe+AHs+N8u38BHrLzcYf/uRAj6fi9rf8Lhxbjv+jFOhPNttGdP5m+GDjlsW
110 | 5D14cNjD/8jKIgecmYSgRTIQmdevfZseQQKhPtQPAoGBAMVVAFQlL3wvUDyD3Oy7
111 | 7C/3ZRfOIhNFAWc2hUmzat8q+WEhyNmLEU9H4FTMxABu5jt/j09wWGyeMgBxHKTd
112 | stXSQNSJWP1TZM0u9nJWttmvtHe1CpLr2MFgU/lTYYJKvbQRwhwlWo0dhG8jJEJF
113 | C6c8TQh7SrpfZua+0Zo3DnKlAoGAPYpL8/Kh1Y6c+IjeI9VJPK9kEvQ6gF/4dpDl
114 | TWnOwvZeIUrkXuQe7PrX+HWqpa9qz3J4cT6EiM1tD5pQe3ttJXql8c/p2FOPwsLQ
115 | GkaaAaJjxXOE6OQkCu3IcII6du9QT72C46HO2R1kHuqsn2M4EwUGhcNIJpB/b846
116 | hgfUdqsCgYEAn3EGdd1DNC+ykrCk2P6nlFXvjxdlxCKNzCPWHjwPGE3t2DRIaEWI
117 | 0XBHuBy2hbiZPIZpAK3DtPONUjjrcv3gPmhraoz5K7saY5vxyEJFYNCu2nKCJUkp
118 | ZNJ69MjK2HDIBIpqFJ7jnp32Dp8wviHXQ5e1PJQxoaXNyubfOs1Cpa0=
119 | -----END RSA PRIVATE KEY-----
120 |
121 | - public : ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDXQFU/VlngUtaW9i6HgkdEfcB6Ak+Ogk/EN96lB6lm6NHvMWL0ggtrxzPcyQ+K6Rri1Vh2zDKenF+ZutqfxfNEmmDUNuHW96djUXEzwLuTYYdoobHNFtV9s2pEix2QFaMxMnvWSIfvKqDvI2Z+zwfzNFKjDweiVsCPw3vAF9vIL6W12zDb3hGN4uJqpz4GCj0K3DR/dxMZVEcE4VQ5ITOusqRKeZTt3QMJI9ZdJF8xg+Bdg/NSDvH7GOcmN5eLEheIx3lWCmhtQvh1iwa+JlDVWQFmxbVqPTzI/8phjOIfEqimg+nBVq157UIfDf77Xj2YyVQSv2inVc4RLZSMQw3p
122 | private : |
123 | -----BEGIN RSA PRIVATE KEY-----
124 | MIIEowIBAAKCAQEA10BVP1ZZ4FLWlvYuh4JHRH3AegJPjoJPxDfepQepZujR7zFi
125 | 9IILa8cz3MkPiuka4tVYdswynpxfmbran8XzRJpg1Dbh1venY1FxM8C7k2GHaKGx
126 | zRbVfbNqRIsdkBWjMTJ71kiH7yqg7yNmfs8H8zRSow8HolbAj8N7wBfbyC+ltdsw
127 | 294RjeLiaqc+Bgo9Ctw0f3cTGVRHBOFUOSEzrrKkSnmU7d0DCSPWXSRfMYPgXYPz
128 | Ug7x+xjnJjeXixIXiMd5VgpobUL4dYsGviZQ1VkBZsW1aj08yP/KYYziHxKopoPp
129 | wVatee1CHw3++149mMlUEr9op1XOES2UjEMN6QIDAQABAoIBAH42vsWwwGqEqEdE
130 | euwCO/+xLNdd24BYcKVBjU9/OpmZEuAKOVfdmQzNdV+UlYSCQr2XE5Q1D8lpL7VY
131 | lzDwRUCItRY6SBpghMn7y0DpVhOJMHjttu/m37AhL8KZP/Bof5QtYee4B9z5Rfxy
132 | 6XqZsrOsjngGLBfIfojNuxZb5wdttX/u7Qp9otnESxifTbn9PfUM5UwhXRncWbsT
133 | MJ0p+aP36aNxwWDKht6bxiBRryvwNbRZX2iu18oxUUWg50uK0M/lo0KK4Svvc5lN
134 | YfBFvum78KckgDX7zVenEOmU9bQfXWgB79oP8IpRP5OyPF2AJjgiKOfR7X+JA7Nq
135 | pfXj48ECgYEA8MjIKz4ILS0ahsaxOIPsc1UyOK6F9v1PrU7ooi8WfLdY4ouPvIl5
136 | BI6zCFL9IdNOlc6Rh+UpfPYndaJz/1cWJyC0diChVdLAR+j6fuEqIKPSiv/Xn+hM
137 | sbsiNn23MoA6C2Jvv1FLez+Shvlj6fF4G1t8MfHwoXZ0yVhuSkJH9o0CgYEA5Np/
138 | k4fA9w/OsbtJu0KtGN0AwhCVmFE/3doE4BVSsmWGznzHcC864S924CHsgznrM3OW
139 | HX7C+PFgbsbtXwqxiaMAaxrh1wBnx28c4wMsNkXCFUds4DkjqDW6IhNH7W9TtuDL
140 | qNoniBH/o18aj0xGF6HJFt6tU7f9iTxJ+tmY280CgYB2HMe0DpXMM1fTzRuZ8XzH
141 | hn9ANrwYUGIJTa/n/tk1DGtZlcRIY9ctWSKRbsQlF5Zw/gd9dfhICCeLGMl18642
142 | O2DKoW8CvoL7w1k9bA5SPIpHDQEku7sDZByARmLbLvNKKltOqf4w0xp5g1RzqbOV
143 | F+dwSJIVYhofunU/kAvk8QKBgGMPcUma6ZwH66BjQXcdVW/9ueZG53oXMV4GkTWu
144 | BS3TZJbczDdzOjlfIkXCaW4kE/shfUknJZ48XVGWKgmJx2+cbwHtkPRP6JwbLJXX
145 | ObwEVg5/7FDiatzU5Mz7K5dLKSFwDLf6NkJgCBffgs+kZHK2RSTxHnWunsBYqG08
146 | 4z3BAoGBAItHnGHbnl7cVHJFHd8teLzS+ki+gv+mKwPwSmeimV6zALAKZvj4RIg8
147 | 4g6kUWry+NNuiaH6fsDA0FWnT3Kyc59/M/EuKNCR7ci1Gnkunc0IUn78aWtNcxX5
148 | RsCKJUM8l63P0jyUufpTbG6nAP8fMdWCdtDBidFLV2JMPYnWb4aP
149 | -----END RSA PRIVATE KEY-----
150 | concourseci_reboot : false
151 | concourse_local_users:
152 | - {user: "user1", pass: "pass1"}
153 | - {user: "user2", pass: "pass2"}
154 | - {user: "user3", pass: "pass3"}
155 | - {user: "user4", pass: "pass4"}
156 | - {user: "user5", pass: "pass5"}
157 | - {user: "user6", pass: "pass6"}
158 | concourseci_manage_teams : true
159 | concourseci_teams :
160 | - name: "x1"
161 | state: "present"
162 | flags:
163 | local-user: user1
164 | - name: "x2"
165 | state: "absent"
166 | flags:
167 | local-user: user2
168 | - name: "x3"
169 | state: "present"
170 | flags:
171 | local-user: user2
172 | - name: "x4"
173 | state: "present"
174 | flags:
175 | local-user: user4
176 | - name: "x5"
177 | state: "absent"
178 | flags:
179 | local-user: user5
180 |
--------------------------------------------------------------------------------
/test/integration/simple/serverspec/a_user_spec.rb:
--------------------------------------------------------------------------------
1 | require_relative '../../helper_spec.rb'
2 |
3 | describe user('concourseci') do
4 | it { should exist }
5 | end
6 |
7 | describe group('concourseci') do
8 | it { should exist }
9 | end
10 |
--------------------------------------------------------------------------------
/test/integration/simple/serverspec/a_web_spec.rb:
--------------------------------------------------------------------------------
1 | require_relative '../../helper_spec.rb'
2 |
3 | describe port(8080) do
4 | it 'concourse web to be listenting to connection' do
5 | expect(subject).to be_listening
6 | end
7 | end
8 |
9 | # Concourse web process to be running with concourse user
10 | describe command('pgrep -u concourseci -f concourse\\ web -c') do
11 | its(:exit_status) { should eq 0 }
12 | its(:stdout) { should match '1' }
13 | end
14 |
--------------------------------------------------------------------------------
/test/integration/simple/serverspec/b_worker_spec.rb:
--------------------------------------------------------------------------------
1 | require_relative '../../helper_spec.rb'
2 |
3 | describe command('/usr/local/bin/fly -t kitchen workers | wc -l') do
4 | it 'concourse worker should be registered' do
5 | expect(subject.stdout).to match('1')
6 | expect(subject.exit_status).to eq(0)
7 | end
8 | end
9 |
10 | # Concourse web process to be running with concourse user
11 | describe command('pgrep -u root -f concourse\\ worker -c') do
12 | its(:exit_status) { should eq 0 }
13 | its(:stdout) { should match '1' }
14 | end
15 |
--------------------------------------------------------------------------------
/test/integration/simple/serverspec/c_binary_spec.rb:
--------------------------------------------------------------------------------
1 | require_relative '../../helper_spec.rb'
2 |
3 | describe file('/opt/concourseci/bin/concourse') do
4 | it 'concourse binary has right permission' do
5 | expect(subject).to be_file
6 | expect(subject).to be_executable
7 | expect(subject).to be_owned_by 'root'
8 | end
9 | end
10 |
11 | describe command('/opt/concourseci/bin/concourse --help') do
12 | it 'concourse binary execute and print help' do
13 | expect(subject.stdout).to match('worker')
14 | expect(subject.stdout).to match('web')
15 | end
16 | end
17 |
--------------------------------------------------------------------------------
/test/integration/simple/serverspec/ca_binary_web_spec.rb:
--------------------------------------------------------------------------------
1 | require_relative '../../helper_spec.rb'
2 |
3 | describe file('/opt/concourseci/bin/concourse-web') do
4 | it 'concourse-web script has right permission' do
5 | expect(subject).to be_file
6 | expect(subject).to be_executable
7 | expect(subject).to be_owned_by 'root'
8 | end
9 | end
10 |
--------------------------------------------------------------------------------
/test/integration/simple/serverspec/cb_binary_worker_spec.rb:
--------------------------------------------------------------------------------
1 | require_relative '../../helper_spec.rb'
2 |
3 | describe file('/opt/concourseci/bin/concourse-worker') do
4 | it 'concourse-worker script has right permission' do
5 | expect(subject).to be_file
6 | expect(subject).to be_executable
7 | expect(subject).to be_owned_by 'root'
8 | end
9 | end
10 |
--------------------------------------------------------------------------------
/test/integration/simple/serverspec/d_job_spec.rb:
--------------------------------------------------------------------------------
1 | require_relative '../../helper_spec.rb'
2 |
3 | fly = '/usr/local/bin/fly -t kitchen'
4 | builds = "#{fly} builds"
5 |
6 | describe 'simple_success job' do
7 | describe command("sleep 20 && #{builds} | grep simple_success") do
8 | it 'simple_success are in the builds' do
9 | puts '#################################'
10 | puts subject.stdout.to_s
11 | puts '#################################'
12 | expect(subject.stdout).to match('simple_success')
13 | expect(subject.exit_status).to eq(0)
14 | end
15 | end
16 |
17 | describe command("sleep 20 && #{builds} | grep simple_success | head -1 | awk '{ print $4 }'") do
18 | it 'simple_success pipeline should succeed' do
19 | puts '#################################'
20 | puts subject.stdout.to_s
21 | puts '#################################'
22 | expect(subject.stdout).to match('succeeded')
23 | expect(subject.exit_status).to eq(0)
24 | end
25 | end
26 |
27 | describe command("#{fly} watch -b $(#{builds} | grep simple_success | head -1 | awk '{ print $1 }')") do
28 | it 'simple_success pipeline log should have "I WILL SUCCESSED"' do
29 | puts '#################################'
30 | puts subject.stdout.to_s
31 | puts '#################################'
32 | expect(subject.stdout).to match('I WILL SUCCESSED')
33 | expect(subject.exit_status).to eq(0)
34 | end
35 | end
36 | end
37 |
38 | describe 'simple_failure job' do
39 | describe command("#{builds} | grep simple_failure") do
40 | it 'simple_failure are in the builds' do
41 | expect(subject.stdout).to match('simple_failure')
42 | expect(subject.exit_status).to eq(0)
43 | end
44 | end
45 |
46 | describe command("#{builds} | grep simple_failure | head -1 | awk '{ print $4 }'") do
47 | it 'simple_success pipelines should be failed' do
48 | expect(subject.stdout).to match('failed')
49 | expect(subject.exit_status).to eq(0)
50 | end
51 | end
52 |
53 | describe command("#{fly} watch -b $(#{builds} | grep simple_failure | head -1 | awk '{ print $1 }')") do
54 | it 'simple_failure pipelines log should have "I WILL FAIL"' do
55 | expect(subject.stdout).to match('I WILL FAIL')
56 | expect(subject.exit_status).to eq(1)
57 | end
58 | end
59 | end
60 |
--------------------------------------------------------------------------------
/test/integration/simple/serverspec/e_manage_spec.rb:
--------------------------------------------------------------------------------
1 | require_relative '../../helper_spec.rb'
2 |
3 | fly = '/usr/local/bin/fly -t kitchen'
4 |
5 | describe 'teams' do
6 | describe command("#{fly} teams | grep main | grep -v grep") do
7 | it 'main should exist' do
8 | expect(subject.stdout).to match('main')
9 | end
10 | end
11 |
12 | describe command("#{fly} teams | grep x1 | grep -v grep") do
13 | it 'main should exist' do
14 | expect(subject.stdout).to match('x1')
15 | end
16 | end
17 |
18 | describe command("#{fly} teams | grep x3 | grep -v grep") do
19 | it 'main should exist' do
20 | expect(subject.stdout).to match('x3')
21 | end
22 | end
23 |
24 | describe command("#{fly} teams | grep x4 | grep -v grep") do
25 | it 'main should exist' do
26 | expect(subject.stdout).to match('x4')
27 | end
28 | end
29 | end
30 |
31 | describe 'users login' do
32 | describe command("#{fly} login -n x1 -u user1 -p pass1") do
33 | it 'user1 should be part of x1' do
34 | expect(subject.stdout).to match('target saved')
35 | expect(subject.exit_status).to eq(0)
36 | end
37 | end
38 | describe command("#{fly} login -n x3 -u user2 -p pass2") do
39 | it 'user2 should be part of x3' do
40 | expect(subject.stdout).to match('target saved')
41 | expect(subject.exit_status).to eq(0)
42 | end
43 | end
44 | describe command("#{fly} login -n x4 -u user4 -p pass4") do
45 | it 'user4 should be part of x4' do
46 | expect(subject.stdout).to match('target saved')
47 | expect(subject.exit_status).to eq(0)
48 | end
49 | end
50 | describe command("#{fly} login -n main -u user1 -p pass1") do
51 | it 'user1 should be part of main' do
52 | expect(subject.stdout).to match('target saved')
53 | expect(subject.exit_status).to eq(0)
54 | end
55 | end
56 | end
57 |
--------------------------------------------------------------------------------
/test/integration/simple/serverspec/ssh_spec.rb:
--------------------------------------------------------------------------------
1 | describe 'SSH port open and listening' do
2 | describe port(22) do
3 | it { should be_listening }
4 | end
5 | end
6 |
--------------------------------------------------------------------------------
/test/integration/simple/serverspec/z_control_scripts_spec.rb:
--------------------------------------------------------------------------------
1 | require_relative '../../helper_spec.rb'
2 |
3 | PS_WEB = 'ps ax|grep "/opt/concourseci/bin/concourse web"|grep -v grep'.freeze
4 | PS_WRK = 'ps ax|grep "/opt/concourseci/bin/concourse worke"|grep -v grep'.freeze
5 |
6 | ## Worker stop
7 | describe 'concourse worker stopped' do
8 | describe command('sudo service concourse-worker stop') do
9 | it 'initd script should stop worker and return OK' do
10 | expect(subject.stdout).to match('[ OK ]')
11 | expect(subject.exit_status).to eq(0)
12 | end
13 | end
14 |
15 | describe command('sudo service concourse-worker status') do
16 | it 'initd script should return stopped' do
17 | expect(subject.stdout).to match('not running')
18 | expect(subject.exit_status).to eq(0)
19 | end
20 | end
21 |
22 | describe command(PS_WRK.to_s) do
23 | it 'ps should report no process running' do
24 | expect(subject.stdout).to eq('')
25 | end
26 | end
27 | end
28 | ## Web stop
29 | describe 'concourse web stopped' do
30 | describe command('sudo service concourse-web stop') do
31 | it 'initd script should stop web and return OK' do
32 | expect(subject.stdout).to match('[ OK ]')
33 | expect(subject.exit_status).to eq(0)
34 | end
35 | end
36 |
37 | describe command('sudo service concourse-web status') do
38 | it 'initd script should return stopped' do
39 | expect(subject.stdout).to match('not running')
40 | end
41 | end
42 |
43 | describe command(PS_WEB.to_s) do
44 | it 'ps should report no process running' do
45 | expect(subject.stdout).to eq('')
46 | end
47 | end
48 | end
49 | ## Web running
50 | describe 'concourse web running' do
51 | describe command('sudo service concourse-web start') do
52 | it 'initd script should start and return running' do
53 | expect(subject.stdout).to match('[ OK ]')
54 | expect(subject.exit_status).to eq(0)
55 | end
56 | end
57 |
58 | describe command('sudo service concourse-web status') do
59 | it 'initd script should return running' do
60 | expect(subject.stdout).to match('running with PID:')
61 | expect(subject.exit_status).to eq(0)
62 | end
63 | end
64 |
65 | describe command(PS_WEB.to_s) do
66 | it 'ps should report process running' do
67 | expect(subject.stdout).to match('/opt/concourseci/bin/concourse')
68 | expect(subject.exit_status).to eq(0)
69 | end
70 | end
71 | end
72 | ## worker running
73 | describe 'concourse worker running' do
74 | describe command('sudo service concourse-worker start') do
75 | it 'initd script should start worker and return running' do
76 | expect(subject.stdout).to match('[ OK ]')
77 | expect(subject.exit_status).to eq(0)
78 | end
79 | end
80 |
81 | describe command('sudo service concourse-worker status') do
82 | it 'initd script should return running' do
83 | expect(subject.stdout).to match('running with PID:')
84 | expect(subject.exit_status).to eq(0)
85 | end
86 | end
87 |
88 | describe command(PS_WRK.to_s) do
89 | it 'ps should report process running' do
90 | expect(subject.stdout).to match('/opt/concourseci/bin/concourse')
91 | expect(subject.exit_status).to eq(0)
92 | end
93 | end
94 | end
95 |
--------------------------------------------------------------------------------
/test/integration/simple/simple.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Check for python2.7
4 | hosts: all
5 | gather_facts: False
6 | become: True
7 | tasks:
8 | - name: install python 2
9 | raw: test -e /usr/bin/python || (apt -y update && apt install -y python-minimal)
10 | changed_when: False
11 |
12 | - name: Install iproute2
13 | hosts: all
14 | gather_facts: False
15 | become: True
16 | tasks:
17 | - name: install iproute 2
18 | apt:
19 | name: iproute2
20 |
21 | - name: Provision Concourse
22 | hosts: all
23 | become: True
24 | roles:
25 | - { name: "hosts", tags: "hosts" }
26 | - { name: "postgresql", tags: "postgresql" }
27 | - { name: "ansible-concourse", tags: "concourse" }
28 | - { name: "fly", tags: "fly" }
29 |
--------------------------------------------------------------------------------
/test/integration/web/serverspec/a_user_spec.rb:
--------------------------------------------------------------------------------
1 | ../../simple/serverspec/a_user_spec.rb
--------------------------------------------------------------------------------
/test/integration/web/serverspec/a_web_spec.rb:
--------------------------------------------------------------------------------
1 | ../../simple/serverspec/a_web_spec.rb
--------------------------------------------------------------------------------
/test/integration/web/serverspec/b_worker_not_running_spec.rb:
--------------------------------------------------------------------------------
1 | require_relative '../../helper_spec.rb'
2 |
3 | describe file('/opt/concourseci/bin/concourse-worker') do
4 | it { should_not exist }
5 | end
6 |
7 | # Concourse worker process to be running with concourse user
8 | describe command('pgrep -u root -f concourse\\ worker -c') do
9 | its(:exit_status) { should eq 1 }
10 | its(:stdout) { should match '0' }
11 | end
12 |
--------------------------------------------------------------------------------
/test/integration/web/serverspec/c_binary_spec.rb:
--------------------------------------------------------------------------------
1 | ../../simple/serverspec/c_binary_spec.rb
--------------------------------------------------------------------------------
/test/integration/web/serverspec/ca_binary_web_spec.rb:
--------------------------------------------------------------------------------
1 | ../../simple/serverspec/ca_binary_web_spec.rb
--------------------------------------------------------------------------------
/test/integration/web/serverspec/d_job_spec.rb:
--------------------------------------------------------------------------------
1 | ../../simple/serverspec/d_job_spec.rb
--------------------------------------------------------------------------------
/test/integration/web/serverspec/e_manage_spec.rb:
--------------------------------------------------------------------------------
1 | ../../simple/serverspec/e_manage_spec.rb
--------------------------------------------------------------------------------
/test/integration/web/serverspec/ssh_spec.rb:
--------------------------------------------------------------------------------
1 | ../../simple/serverspec/ssh_spec.rb
--------------------------------------------------------------------------------
/test/integration/web/serverspec/z_web_control_scripts_spec.rb:
--------------------------------------------------------------------------------
1 | require_relative '../../helper_spec.rb'
2 |
3 | PS_WEB = 'ps ax|grep "/opt/concourseci/bin/concourse web"|grep -v grep'.freeze
4 |
5 | ## Web stop
6 | describe 'concourse web stopped' do
7 | describe command('sudo service concourse-web stop') do
8 | it 'initd script should stop web and return OK' do
9 | expect(subject.stdout).to match('[ OK ]')
10 | expect(subject.exit_status).to eq(0)
11 | end
12 | end
13 |
14 | describe command('sudo service concourse-web status') do
15 | it 'initd script should return stopped' do
16 | expect(subject.stdout).to match('not running')
17 | end
18 | end
19 |
20 | describe command(PS_WEB.to_s) do
21 | it 'ps should report no process running' do
22 | expect(subject.stdout).to eq('')
23 | end
24 | end
25 | end
26 | ## Web running
27 | describe 'concourse web running' do
28 | describe command('sudo service concourse-web start') do
29 | it 'initd script should start and return running' do
30 | expect(subject.stdout).to match('[ OK ]')
31 | expect(subject.exit_status).to eq(0)
32 | end
33 | end
34 |
35 | describe command('sudo service concourse-web status') do
36 | it 'initd script should return running' do
37 | expect(subject.stdout).to match('running with PID:')
38 | expect(subject.exit_status).to eq(0)
39 | end
40 | end
41 |
42 | describe command(PS_WEB.to_s) do
43 | it 'ps should report process running' do
44 | expect(subject.stdout).to match('/opt/concourseci/bin/concourse')
45 | expect(subject.exit_status).to eq(0)
46 | end
47 | end
48 | end
49 |
--------------------------------------------------------------------------------
/test/integration/web/web.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Check for python3
4 | hosts: concourse-web
5 | gather_facts: False
6 | become: True
7 | tasks:
8 | - name: install python 3
9 | raw: test -e /usr/bin/python || (apt -y update && apt install -y python3-minimal)
10 | changed_when: False
11 |
12 | - name: Install some needed packages
13 | hosts: concourse-web
14 | gather_facts: False
15 | become: True
16 | tasks:
17 | - name: install packages
18 | apt:
19 | name: "{{ packages }}"
20 | vars:
21 | packages:
22 | - iproute2
23 | - gpg
24 | - python3-psycopg2
25 | - python3-passlib
26 | - python3-bcrypt
27 |
28 | - name: Provision Concourse
29 | hosts: concourse-web
30 | become: True
31 | roles:
32 | - { name: "hosts", tags: "hosts" }
33 | - { name: "postgresql", tags: "postgresql" }
34 | - { name: "ansible-concourse", tags: "concourse" }
35 | - { name: "fly", tags: "fly" }
36 |
--------------------------------------------------------------------------------
/test/integration/worker/serverspec/a_user_spec.rb:
--------------------------------------------------------------------------------
1 | ../../simple/serverspec/a_user_spec.rb
--------------------------------------------------------------------------------
/test/integration/worker/serverspec/b_worker_spec.rb:
--------------------------------------------------------------------------------
1 | ../../simple/serverspec/b_worker_spec.rb
--------------------------------------------------------------------------------
/test/integration/worker/serverspec/c_binary_spec.rb:
--------------------------------------------------------------------------------
1 | ../../simple/serverspec/c_binary_spec.rb
--------------------------------------------------------------------------------
/test/integration/worker/serverspec/cb_binary_worker_spec.rb:
--------------------------------------------------------------------------------
1 | ../../simple/serverspec/cb_binary_worker_spec.rb
--------------------------------------------------------------------------------
/test/integration/worker/serverspec/d-web_not_running_spec.rb:
--------------------------------------------------------------------------------
1 | require_relative '../../helper_spec.rb'
2 |
3 | describe file('/opt/concourseci/bin/concourse-web') do
4 | it { should_not exist }
5 | end
6 |
7 |
8 | # Concourse web process to be running with concourse user
9 | describe command('pgrep -u concourseci -f concourse\\ web -c') do
10 | its(:exit_status) { should eq 1 }
11 | its(:stdout) { should match '0' }
12 | end
13 |
--------------------------------------------------------------------------------
/test/integration/worker/serverspec/ssh_spec.rb:
--------------------------------------------------------------------------------
1 | ../../simple/serverspec/ssh_spec.rb
--------------------------------------------------------------------------------
/test/integration/worker/serverspec/z_worker_control_scripts_spec.rb:
--------------------------------------------------------------------------------
1 | require_relative '../../helper_spec.rb'
2 |
3 | PS_WRK = 'ps ax|grep "/opt/concourseci/bin/concourse worker"|grep -v grep'.freeze
4 |
5 | ## Worker stop
6 | describe 'concourse worker stopped' do
7 | describe command('sudo service concourse-worker stop') do
8 | it 'initd script should stop worker and return OK' do
9 | expect(subject.stdout).to match('[ OK ]')
10 | expect(subject.exit_status).to eq(0)
11 | end
12 | end
13 |
14 | describe command('sudo service concourse-worker status') do
15 | it 'initd script should return stopped' do
16 | expect(subject.stdout).to match('not running')
17 | expect(subject.exit_status).to eq(0)
18 | end
19 | end
20 |
21 | describe command(PS_WRK.to_s) do
22 | it 'ps should report no process running' do
23 | expect(subject.stdout).to eq('')
24 | end
25 | end
26 | end
27 |
28 | ## worker running
29 | describe 'concourse worker running' do
30 | describe command('sudo service concourse-worker start') do
31 | it 'initd script should start worker and return running' do
32 | expect(subject.stdout).to match('[ OK ]')
33 | expect(subject.exit_status).to eq(0)
34 | end
35 | end
36 |
37 | describe command('sudo service concourse-worker status') do
38 | it 'initd script should return running' do
39 | expect(subject.stdout).to match('running with PID:')
40 | expect(subject.exit_status).to eq(0)
41 | end
42 | end
43 |
44 | describe command(PS_WRK.to_s) do
45 | it 'ps should report process running' do
46 | expect(subject.stdout).to match('/opt/concourseci/bin/concourse')
47 | expect(subject.exit_status).to eq(0)
48 | end
49 | end
50 | end
51 |
--------------------------------------------------------------------------------
/test/integration/worker/worker.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Check for python 3
4 | hosts: concourse-worker
5 | gather_facts: False
6 | become: True
7 | tasks:
8 | - name: install python 3
9 | raw: test -e /usr/bin/python || (apt -y update && apt install -y python3-minimal)
10 | changed_when: False
11 |
12 | - name: Install some needed packages
13 | hosts: concourse-worker
14 | gather_facts: False
15 | become: True
16 | tasks:
17 | - name: install packages
18 | apt:
19 | name: "{{ packages }}"
20 | vars:
21 | packages:
22 | - iproute2
23 | - gpg
24 | - python3-psycopg2
25 | - python3-passlib
26 | - python3-bcrypt
27 |
28 | - name: Provision Concourse
29 | hosts: concourse-worker
30 | become: True
31 | roles:
32 | - { name: "hosts", tags: "hosts" }
33 | - { name: "ansible-concourse", tags: "concourse" }
34 |
--------------------------------------------------------------------------------
/test/setup_roles.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 | MY_PATH="$(dirname "${0}")"
4 | DIR="$(cd "${MY_PATH}" && pwd)" # absolutized and normalized
5 | ROLES_DIR=$(cd "${DIR}/helper_roles" && pwd)
6 |
7 | ansible-galaxy install -r "${ROLES_DIR}/roles_requirements.yml" --force -p "${ROLES_DIR}"
8 |
--------------------------------------------------------------------------------
/test/test-cluster.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | set -e
3 |
4 | # Create
5 | KITCHEN_YAML=.kitchen-cluster.yml bundle exec kitchen create web-ubuntu1804
6 | KITCHEN_YAML=.kitchen-cluster.yml bundle exec kitchen create worker-ubuntu1804
7 |
8 | # Converge
9 | KITCHEN_YAML=.kitchen-cluster.yml bundle exec kitchen converge web-ubuntu1804 -l debug
10 | KITCHEN_YAML=.kitchen-cluster.yml bundle exec kitchen converge worker-ubuntu1804 -l debug
11 | # run again
12 | KITCHEN_YAML=.kitchen-cluster.yml bundle exec kitchen converge web-ubuntu1804
13 |
14 | # pause a little
15 | sleep 10
16 |
17 | # Verify
18 | KITCHEN_YAML=.kitchen-cluster.yml bundle exec kitchen verify worker
19 | KITCHEN_YAML=.kitchen-cluster.yml bundle exec kitchen verify web
20 |
21 | # destory
22 | KITCHEN_YAML=.kitchen-cluster.yml bundle exec kitchen destroy web
23 | KITCHEN_YAML=.kitchen-cluster.yml bundle exec kitchen destroy worker
24 |
--------------------------------------------------------------------------------
/test/test-pipeline.yml:
--------------------------------------------------------------------------------
1 | ---
2 | resources:
3 | - name: resource-tutorial
4 | type: git
5 | source:
6 | uri: https://github.com/starkandwayne/concourse-tutorial.git
7 |
8 | jobs:
9 | - name: job-hello-world
10 | public: true
11 | plan:
12 | - get: resource-tutorial
13 | - task: hello-world
14 | file: resource-tutorial/01_task_hello_world/task_hello_world.yml
--------------------------------------------------------------------------------
/test/uploaded.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | # Upload pipeline
4 | fly -t docker sp -p test -c tests/test-pipeline.yml -n
5 | # unpause pipeline
6 | fly -t docker unpause-pipeline -p test
7 | # Trigger job
8 | fly -t docker trigger-job -j test/job-hello-world
9 | COUNTER=0
10 | pending() { /Users/ahelal/bin//fly -t docker builds | grep test/job-hello-world | grep pending; }
11 | pending
12 | pending_rc=$?
13 | while [ "${pending_rc}" = "0" ]; do
14 | sleep 2
15 | pending
16 | pending_rc=$?
17 | echo The counter is $COUNTER
18 | let COUNTER=COUNTER+1
19 | done
20 |
21 |
22 | # Check if
23 | fly -t docker builds | grep test/job-hello-world | grep succeeded
--------------------------------------------------------------------------------
/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | concourseci_ansible_system : "{{ ansible_system | lower }}"
4 | concourseci_binary_file : "concourse_{{ concourseci_ansible_system }}_amd64_{{ concourseci_version }}"
5 |
6 | concourse_fly : "{{ concourseci_bin_dir }}/fly -t manage"
7 |
8 | ## prior to version 4.x
9 | # https://github.com/concourse/concourse/releases/download/v4.2.3/concourse_linux_amd64
10 | version_4_filename : "concourse_{{ concourseci_ansible_system }}_amd64"
11 | ## version 5.x
12 | # https://github.com/concourse/concourse/releases/download/v5.1.0/concourse-5.1.0-linux-amd64.tgz
13 | version_5_filename : "concourse-{{ concourseci_version | replace('v', '') }}-{{ concourseci_ansible_system }}-amd64.tgz"
14 |
15 | ## release candidate
16 | concourseci_download_rc_url : "https://github.com/concourse/bin/releases/download/{{ concourseci_version }}/concourse_{{ concourseci_ansible_system }}_amd64"
17 | ## final release
18 | concourseci_download_fr_url : "https://github.com/concourse/concourse/releases/download/{{ concourseci_version }}"
19 |
20 | # concourse-5.1.0-linux-amd64.tgz #
21 | # concourse_linux_amd64 # concourse_{{ concourseci_ansible_system }}_amd64
22 |
23 | concourseci_worker_position : "{{ groups[concourseci_worker_group].index(inventory_hostname)| default(0) }}"
24 |
25 | ## MacOSx stuff
26 | concourseci_launchd_path : "/Library/LaunchDaemons"
27 | concourseci_launchd_web : "com.github.ahelal.concourse.web"
28 | concourseci_launchd_worker : "com.github.ahelal.concourse.worker"
29 |
--------------------------------------------------------------------------------
/web_arguments.txt:
--------------------------------------------------------------------------------
1 | Usage:
2 | concourse [OPTIONS] web [web-OPTIONS]
3 |
4 | Application Options:
5 | -v, --version Print the version of Concourse and exit [$CONCOURSE_VERSION]
6 |
7 | Help Options:
8 | -h, --help Show this help message
9 |
10 | [web command options]
11 | --log-level=[debug|info|error|fatal] Minimum level of logs to see. (default: info) [$CONCOURSE_LOG_LEVEL]
12 | --bind-ip= IP address on which to listen for web traffic. (default: 0.0.0.0) [$CONCOURSE_BIND_IP]
13 | --bind-port= Port on which to listen for HTTP traffic. (default: 8080) [$CONCOURSE_BIND_PORT]
14 | --tls-bind-port= Port on which to listen for HTTPS traffic. [$CONCOURSE_TLS_BIND_PORT]
15 | --tls-cert= File containing an SSL certificate. [$CONCOURSE_TLS_CERT]
16 | --tls-key= File containing an RSA private key, used to encrypt HTTPS traffic. [$CONCOURSE_TLS_KEY]
17 | --external-url= URL used to reach any ATC from the outside world. [$CONCOURSE_EXTERNAL_URL]
18 | --peer-url= URL used to reach this ATC from other ATCs in the cluster. [$CONCOURSE_PEER_URL]
19 | --encryption-key= A 16 or 32 length key used to encrypt sensitive information before storing it in the database. [$CONCOURSE_ENCRYPTION_KEY]
20 | --old-encryption-key= Encryption key previously used for encrypting sensitive information. If provided without a new key, data is encrypted. If provided with a new key, data is
21 | re-encrypted. [$CONCOURSE_OLD_ENCRYPTION_KEY]
22 | --debug-bind-ip= IP address on which to listen for the pprof debugger endpoints. (default: 127.0.0.1) [$CONCOURSE_DEBUG_BIND_IP]
23 | --debug-bind-port= Port on which to listen for the pprof debugger endpoints. (default: 8079) [$CONCOURSE_DEBUG_BIND_PORT]
24 | --intercept-idle-timeout= Length of time for a intercepted session to be idle before terminating. (default: 0m) [$CONCOURSE_INTERCEPT_IDLE_TIMEOUT]
25 | --global-resource-check-timeout= Time limit on checking for new versions of resources. (default: 1h) [$CONCOURSE_GLOBAL_RESOURCE_CHECK_TIMEOUT]
26 | --resource-checking-interval= Interval on which to check for new versions of resources. (default: 1m) [$CONCOURSE_RESOURCE_CHECKING_INTERVAL]
27 | --resource-type-checking-interval= Interval on which to check for new versions of resource types. (default: 1m) [$CONCOURSE_RESOURCE_TYPE_CHECKING_INTERVAL]
28 | --container-placement-strategy=[volume-locality|random] Method by which a worker is selected during container placement. (default: volume-locality) [$CONCOURSE_CONTAINER_PLACEMENT_STRATEGY]
29 | --baggageclaim-response-header-timeout= How long to wait for Baggageclaim to send the response header. (default: 1m) [$CONCOURSE_BAGGAGECLAIM_RESPONSE_HEADER_TIMEOUT]
30 | --cli-artifacts-dir= Directory containing downloadable CLI binaries. [$CONCOURSE_CLI_ARTIFACTS_DIR]
31 | --log-db-queries Log database queries. [$CONCOURSE_LOG_DB_QUERIES]
32 | --build-tracker-interval= Interval on which to run build tracking. (default: 10s) [$CONCOURSE_BUILD_TRACKER_INTERVAL]
33 | --default-build-logs-to-retain= Default build logs to retain, 0 means all [$CONCOURSE_DEFAULT_BUILD_LOGS_TO_RETAIN]
34 | --max-build-logs-to-retain= Maximum build logs to retain, 0 means not specified. Will override values configured in jobs [$CONCOURSE_MAX_BUILD_LOGS_TO_RETAIN]
35 | --default-task-cpu-limit= Default max number of cpu shares per task, 0 means unlimited [$CONCOURSE_DEFAULT_TASK_CPU_LIMIT]
36 | --default-task-memory-limit= Default maximum memory per task, 0 means unlimited [$CONCOURSE_DEFAULT_TASK_MEMORY_LIMIT]
37 |
38 | PostgreSQL Configuration:
39 | --postgres-host= The host to connect to. (default: 127.0.0.1) [$CONCOURSE_POSTGRES_HOST]
40 | --postgres-port= The port to connect to. (default: 5432) [$CONCOURSE_POSTGRES_PORT]
41 | --postgres-socket= Path to a UNIX domain socket to connect to. [$CONCOURSE_POSTGRES_SOCKET]
42 | --postgres-user= The user to sign in as. [$CONCOURSE_POSTGRES_USER]
43 | --postgres-password= The user's password. [$CONCOURSE_POSTGRES_PASSWORD]
44 | --postgres-sslmode=[disable|require|verify-ca|verify-full] Whether or not to use SSL. (default: disable) [$CONCOURSE_POSTGRES_SSLMODE]
45 | --postgres-ca-cert= CA cert file location, to verify when connecting with SSL. [$CONCOURSE_POSTGRES_CA_CERT]
46 | --postgres-client-cert= Client cert file location. [$CONCOURSE_POSTGRES_CLIENT_CERT]
47 | --postgres-client-key= Client key file location. [$CONCOURSE_POSTGRES_CLIENT_KEY]
48 | --postgres-connect-timeout= Dialing timeout. (0 means wait indefinitely) (default: 5m) [$CONCOURSE_POSTGRES_CONNECT_TIMEOUT]
49 | --postgres-database= The name of the database to use. (default: atc) [$CONCOURSE_POSTGRES_DATABASE]
50 |
51 | CredHub Credential Management:
52 | --credhub-url= CredHub server address used to access secrets. [$CONCOURSE_CREDHUB_URL]
53 | --credhub-path-prefix= Path under which to namespace credential lookup. (default: /concourse) [$CONCOURSE_CREDHUB_PATH_PREFIX]
54 | --credhub-ca-cert= Paths to PEM-encoded CA cert files to use to verify the CredHub server SSL cert. [$CONCOURSE_CREDHUB_CA_CERT]
55 | --credhub-client-cert= Path to the client certificate for mutual TLS authorization. [$CONCOURSE_CREDHUB_CLIENT_CERT]
56 | --credhub-client-key= Path to the client private key for mutual TLS authorization. [$CONCOURSE_CREDHUB_CLIENT_KEY]
57 | --credhub-insecure-skip-verify Enable insecure SSL verification. [$CONCOURSE_CREDHUB_INSECURE_SKIP_VERIFY]
58 | --credhub-client-id= Client ID for CredHub authorization. [$CONCOURSE_CREDHUB_CLIENT_ID]
59 | --credhub-client-secret= Client secret for CredHub authorization. [$CONCOURSE_CREDHUB_CLIENT_SECRET]
60 |
61 | Kubernetes Credential Management:
62 | --kubernetes-in-cluster Enables the in-cluster client. [$CONCOURSE_KUBERNETES_IN_CLUSTER]
63 | --kubernetes-config-path= Path to Kubernetes config when running ATC outside Kubernetes. [$CONCOURSE_KUBERNETES_CONFIG_PATH]
64 | --kubernetes-namespace-prefix= Prefix to use for Kubernetes namespaces under which secrets will be looked up. (default: concourse-) [$CONCOURSE_KUBERNETES_NAMESPACE_PREFIX]
65 |
66 | AWS SecretsManager Credential Management:
67 | --aws-secretsmanager-access-key= AWS Access key ID [$CONCOURSE_AWS_SECRETSMANAGER_ACCESS_KEY]
68 | --aws-secretsmanager-secret-key= AWS Secret Access Key [$CONCOURSE_AWS_SECRETSMANAGER_SECRET_KEY]
69 | --aws-secretsmanager-session-token= AWS Session Token [$CONCOURSE_AWS_SECRETSMANAGER_SESSION_TOKEN]
70 | --aws-secretsmanager-region= AWS region to send requests to [$AWS_REGION]
71 | --aws-secretsmanager-pipeline-secret-template= AWS Secrets Manager secret identifier template used for pipeline specific parameter (default: /concourse/{{.Team}}/{{.Pipeline}}/{{.Secret}})
72 | [$CONCOURSE_AWS_SECRETSMANAGER_PIPELINE_SECRET_TEMPLATE]
73 | --aws-secretsmanager-team-secret-template= AWS Secrets Manager secret identifier template used for team specific parameter (default: /concourse/{{.Team}}/{{.Secret}})
74 | [$CONCOURSE_AWS_SECRETSMANAGER_TEAM_SECRET_TEMPLATE]
75 |
76 | AWS SSM Credential Management:
77 | --aws-ssm-access-key= AWS Access key ID [$CONCOURSE_AWS_SSM_ACCESS_KEY]
78 | --aws-ssm-secret-key= AWS Secret Access Key [$CONCOURSE_AWS_SSM_SECRET_KEY]
79 | --aws-ssm-session-token= AWS Session Token [$CONCOURSE_AWS_SSM_SESSION_TOKEN]
80 | --aws-ssm-region= AWS region to send requests to [$AWS_REGION]
81 | --aws-ssm-pipeline-secret-template= AWS SSM parameter name template used for pipeline specific parameter (default: /concourse/{{.Team}}/{{.Pipeline}}/{{.Secret}})
82 | [$CONCOURSE_AWS_SSM_PIPELINE_SECRET_TEMPLATE]
83 | --aws-ssm-team-secret-template= AWS SSM parameter name template used for team specific parameter (default: /concourse/{{.Team}}/{{.Secret}}) [$CONCOURSE_AWS_SSM_TEAM_SECRET_TEMPLATE]
84 |
85 | Vault Credential Management:
86 | --vault-url= Vault server address used to access secrets. [$CONCOURSE_VAULT_URL]
87 | --vault-path-prefix= Path under which to namespace credential lookup. (default: /concourse) [$CONCOURSE_VAULT_PATH_PREFIX]
88 | --vault-cache Cache returned secrets for their lease duration in memory [$CONCOURSE_VAULT_CACHE]
89 | --vault-max-lease= If the cache is enabled, and this is set, override secrets lease duration with a maximum value [$CONCOURSE_VAULT_MAX_LEASE]
90 | --vault-ca-cert= Path to a PEM-encoded CA cert file to use to verify the vault server SSL cert. [$CONCOURSE_VAULT_CA_CERT]
91 | --vault-ca-path= Path to a directory of PEM-encoded CA cert files to verify the vault server SSL cert. [$CONCOURSE_VAULT_CA_PATH]
92 | --vault-client-cert= Path to the client certificate for Vault authorization. [$CONCOURSE_VAULT_CLIENT_CERT]
93 | --vault-client-key= Path to the client private key for Vault authorization. [$CONCOURSE_VAULT_CLIENT_KEY]
94 | --vault-server-name= If set, is used to set the SNI host when connecting via TLS. [$CONCOURSE_VAULT_SERVER_NAME]
95 | --vault-insecure-skip-verify Enable insecure SSL verification. [$CONCOURSE_VAULT_INSECURE_SKIP_VERIFY]
96 | --vault-client-token= Client token for accessing secrets within the Vault server. [$CONCOURSE_VAULT_CLIENT_TOKEN]
97 | --vault-auth-backend= Auth backend to use for logging in to Vault. [$CONCOURSE_VAULT_AUTH_BACKEND]
98 | --vault-auth-backend-max-ttl= Time after which to force a re-login. If not set, the token will just be continuously renewed. [$CONCOURSE_VAULT_AUTH_BACKEND_MAX_TTL]
99 | --vault-retry-max= The maximum time between retries when logging in or re-authing a secret. (default: 5m) [$CONCOURSE_VAULT_RETRY_MAX]
100 | --vault-retry-initial= The initial time between retries when logging in or re-authing a secret. (default: 1s) [$CONCOURSE_VAULT_RETRY_INITIAL]
101 | --vault-auth-param=NAME=VALUE Paramter to pass when logging in via the backend. Can be specified multiple times. [$CONCOURSE_VAULT_AUTH_PARAM]
102 |
103 | Developer Options:
104 | -n, --noop Don't actually do any automatic scheduling or checking. [$CONCOURSE_NOOP]
105 |
106 | Static Worker (optional):
107 | --worker-garden-url= A Garden API endpoint to register as a worker. [$CONCOURSE_WORKER_GARDEN_URL]
108 | --worker-baggageclaim-url= A Baggageclaim API endpoint to register with the worker. [$CONCOURSE_WORKER_BAGGAGECLAIM_URL]
109 | --worker-resource=TYPE:IMAGE A resource type to advertise for the worker. Can be specified multiple times. [$CONCOURSE_WORKER_RESOURCE]
110 |
111 | Metrics & Diagnostics:
112 | --metrics-host-name= Host string to attach to emitted metrics. [$CONCOURSE_METRICS_HOST_NAME]
113 | --metrics-attribute=NAME:VALUE A key-value attribute to attach to emitted metrics. Can be specified multiple times. [$CONCOURSE_METRICS_ATTRIBUTE]
114 |
115 | Metric Emitter (Datadog):
116 | --datadog-agent-host= Datadog agent host to expose dogstatsd metrics [$CONCOURSE_DATADOG_AGENT_HOST]
117 | --datadog-agent-port= Datadog agent port to expose dogstatsd metrics [$CONCOURSE_DATADOG_AGENT_PORT]
118 | --datadog-prefix= Prefix for all metrics to easily find them in Datadog [$CONCOURSE_DATADOG_PREFIX]
119 |
120 | Metric Emitter (InfluxDB):
121 | --influxdb-url= InfluxDB server address to emit points to. [$CONCOURSE_INFLUXDB_URL]
122 | --influxdb-database= InfluxDB database to write points to. [$CONCOURSE_INFLUXDB_DATABASE]
123 | --influxdb-username= InfluxDB server username. [$CONCOURSE_INFLUXDB_USERNAME]
124 | --influxdb-password= InfluxDB server password. [$CONCOURSE_INFLUXDB_PASSWORD]
125 | --influxdb-insecure-skip-verify Skip SSL verification when emitting to InfluxDB. [$CONCOURSE_INFLUXDB_INSECURE_SKIP_VERIFY]
126 |
127 | Metric Emitter (Lager):
128 | --emit-to-logs Emit metrics to logs. [$CONCOURSE_EMIT_TO_LOGS]
129 |
130 | Metric Emitter (NewRelic):
131 | --newrelic-account-id= New Relic Account ID [$CONCOURSE_NEWRELIC_ACCOUNT_ID]
132 | --newrelic-api-key= New Relic Insights API Key [$CONCOURSE_NEWRELIC_API_KEY]
133 | --newrelic-service-prefix= An optional prefix for emitted New Relic events [$CONCOURSE_NEWRELIC_SERVICE_PREFIX]
134 |
135 | Metric Emitter (Prometheus):
136 | --prometheus-bind-ip= IP to listen on to expose Prometheus metrics. [$CONCOURSE_PROMETHEUS_BIND_IP]
137 | --prometheus-bind-port= Port to listen on to expose Prometheus metrics. [$CONCOURSE_PROMETHEUS_BIND_PORT]
138 |
139 | Metric Emitter (Riemann):
140 | --riemann-host= Riemann server address to emit metrics to. [$CONCOURSE_RIEMANN_HOST]
141 | --riemann-port= Port of the Riemann server to emit metrics to. (default: 5555) [$CONCOURSE_RIEMANN_PORT]
142 | --riemann-service-prefix= An optional prefix for emitted Riemann services [$CONCOURSE_RIEMANN_SERVICE_PREFIX]
143 | --riemann-tag=TAG Tag to attach to emitted metrics. Can be specified multiple times. [$CONCOURSE_RIEMANN_TAG]
144 |
145 | Web Server:
146 | --x-frame-options= The value to set for X-Frame-Options. If omitted, the header is not set. [$CONCOURSE_X_FRAME_OPTIONS]
147 |
148 | Garbage Collection:
149 | --gc-interval= Interval on which to perform garbage collection. (default: 30s) [$CONCOURSE_GC_INTERVAL]
150 | --gc-one-off-grace-period= Grace period before reaping one-off task containers (default: 5m) [$CONCOURSE_GC_ONE_OFF_GRACE_PERIOD]
151 |
152 | Syslog Drainer Configuration:
153 | --syslog-hostname= Client hostname with which the build logs will be sent to the syslog server. (default: atc-syslog-drainer) [$CONCOURSE_SYSLOG_HOSTNAME]
154 | --syslog-address= Remote syslog server address with port (Example: 0.0.0.0:514). [$CONCOURSE_SYSLOG_ADDRESS]
155 | --syslog-transport= Transport protocol for syslog messages (Currently supporting tcp, udp & tls). [$CONCOURSE_SYSLOG_TRANSPORT]
156 | --syslog-drain-interval= Interval over which checking is done for new build logs to send to syslog server (duration measurement units are s/m/h; eg. 30s/30m/1h) (default: 30s)
157 | [$CONCOURSE_SYSLOG_DRAIN_INTERVAL]
158 | --syslog-ca-cert= Paths to PEM-encoded CA cert files to use to verify the Syslog server SSL cert. [$CONCOURSE_SYSLOG_CA_CERT]
159 |
160 | Authentication:
161 | --cookie-secure Force sending secure flag on http cookies [$CONCOURSE_COOKIE_SECURE]
162 | --auth-duration= Length of time for which tokens are valid. Afterwards, users will have to log back in. (default: 24h) [$CONCOURSE_AUTH_DURATION]
163 | --session-signing-key= File containing an RSA private key, used to sign auth tokens. [$CONCOURSE_SESSION_SIGNING_KEY]
164 | --add-local-user=USERNAME:PASSWORD List of username:password combinations for all your local users. The password can be bcrypted - if so, it must have a minimum cost of 10. [$CONCOURSE_ADD_LOCAL_USER]
165 |
166 | Authentication (Main Team):
167 | --main-team-local-user=USERNAME List of whitelisted local concourse users. These are the users you've added at atc startup with the --add-local-user flag. [$CONCOURSE_MAIN_TEAM_LOCAL_USER]
168 | --main-team-allow-all-users Setting this flag will whitelist all logged in users in the system. ALL OF THEM. If, for example, you've configured GitHub, any user with a GitHub account will have
169 | access to your team. [$CONCOURSE_MAIN_TEAM_ALLOW_ALL_USERS]
170 |
171 | Authentication (Main Team) (CloudFoundry):
172 | --main-team-cf-user=USERNAME List of whitelisted CloudFoundry users. [$CONCOURSE_MAIN_TEAM_CF_USER]
173 | --main-team-cf-org=ORG_NAME List of whitelisted CloudFoundry orgs [$CONCOURSE_MAIN_TEAM_CF_ORG]
174 | --main-team-cf-space=ORG_NAME:SPACE_NAME List of whitelisted CloudFoundry spaces [$CONCOURSE_MAIN_TEAM_CF_SPACE]
175 | --main-team-cf-space-guid=SPACE_GUID (Deprecated) List of whitelisted CloudFoundry space guids [$CONCOURSE_MAIN_TEAM_CF_SPACE_GUID]
176 |
177 | Authentication (Main Team) (GitHub):
178 | --main-team-github-user=USERNAME List of whitelisted GitHub users [$CONCOURSE_MAIN_TEAM_GITHUB_USER]
179 | --main-team-github-org=ORG_NAME List of whitelisted GitHub orgs [$CONCOURSE_MAIN_TEAM_GITHUB_ORG]
180 | --main-team-github-team=ORG_NAME:TEAM_NAME List of whitelisted GitHub teams [$CONCOURSE_MAIN_TEAM_GITHUB_TEAM]
181 |
182 | Authentication (Main Team) (GitLab):
183 | --main-team-gitlab-user=USERNAME List of whitelisted GitLab users [$CONCOURSE_MAIN_TEAM_GITLAB_USER]
184 | --main-team-gitlab-group=GROUP_NAME List of whitelisted GitLab groups [$CONCOURSE_MAIN_TEAM_GITLAB_GROUP]
185 |
186 | Authentication (Main Team) (LDAP):
187 | --main-team-ldap-user=USERNAME List of whitelisted LDAP users [$CONCOURSE_MAIN_TEAM_LDAP_USER]
188 | --main-team-ldap-group=GROUP_NAME List of whitelisted LDAP groups [$CONCOURSE_MAIN_TEAM_LDAP_GROUP]
189 |
190 | Authentication (Main Team) (OAuth2):
191 | --main-team-oauth-user=USERNAME List of whitelisted OAuth2 users [$CONCOURSE_MAIN_TEAM_OAUTH_USER]
192 | --main-team-oauth-group=GROUP_NAME List of whitelisted OAuth2 groups [$CONCOURSE_MAIN_TEAM_OAUTH_GROUP]
193 |
194 | Authentication (Main Team) (OIDC):
195 | --main-team-oidc-user=USERNAME List of whitelisted OIDC users [$CONCOURSE_MAIN_TEAM_OIDC_USER]
196 | --main-team-oidc-group=GROUP_NAME List of whitelisted OIDC groups [$CONCOURSE_MAIN_TEAM_OIDC_GROUP]
197 |
198 | Authentication (CloudFoundry):
199 | --cf-client-id= (Required) Client id [$CONCOURSE_CF_CLIENT_ID]
200 | --cf-client-secret= (Required) Client secret [$CONCOURSE_CF_CLIENT_SECRET]
201 | --cf-api-url= (Required) The base API URL of your CF deployment. It will use this information to discover information about the authentication provider. [$CONCOURSE_CF_API_URL]
202 | --cf-ca-cert= CA Certificate [$CONCOURSE_CF_CA_CERT]
203 | --cf-skip-ssl-validation Skip SSL validation [$CONCOURSE_CF_SKIP_SSL_VALIDATION]
204 |
205 | Authentication (GitHub):
206 | --github-client-id= (Required) Client id [$CONCOURSE_GITHUB_CLIENT_ID]
207 | --github-client-secret= (Required) Client secret [$CONCOURSE_GITHUB_CLIENT_SECRET]
208 | --github-host= Hostname of GitHub Enterprise deployment (No scheme, No trailing slash) [$CONCOURSE_GITHUB_HOST]
209 | --github-ca-cert= CA certificate of GitHub Enterprise deployment [$CONCOURSE_GITHUB_CA_CERT]
210 |
211 | Authentication (GitLab):
212 | --gitlab-client-id= (Required) Client id [$CONCOURSE_GITLAB_CLIENT_ID]
213 | --gitlab-client-secret= (Required) Client secret [$CONCOURSE_GITLAB_CLIENT_SECRET]
214 | --gitlab-host= Hostname of Gitlab Enterprise deployment (Include scheme, No trailing slash) [$CONCOURSE_GITLAB_HOST]
215 |
216 | Authentication (LDAP):
217 | --ldap-display-name= The auth provider name displayed to users on the login page [$CONCOURSE_LDAP_DISPLAY_NAME]
218 | --ldap-host= (Required) The host and optional port of the LDAP server. If port isn't supplied, it will be guessed based on the TLS configuration. 389 or 636.
219 | [$CONCOURSE_LDAP_HOST]
220 | --ldap-bind-dn= (Required) Bind DN for searching LDAP users and groups. Typically this is a read-only user. [$CONCOURSE_LDAP_BIND_DN]
221 | --ldap-bind-pw= (Required) Bind Password for the user specified by 'bind-dn' [$CONCOURSE_LDAP_BIND_PW]
222 | --ldap-insecure-no-ssl Required if LDAP host does not use TLS. [$CONCOURSE_LDAP_INSECURE_NO_SSL]
223 | --ldap-insecure-skip-verify Skip certificate verification [$CONCOURSE_LDAP_INSECURE_SKIP_VERIFY]
224 | --ldap-start-tls Start on insecure port, then negotiate TLS [$CONCOURSE_LDAP_START_TLS]
225 | --ldap-ca-cert= CA certificate [$CONCOURSE_LDAP_CA_CERT]
226 | --ldap-user-search-base-dn= BaseDN to start the search from. For example 'cn=users,dc=example,dc=com' [$CONCOURSE_LDAP_USER_SEARCH_BASE_DN]
227 | --ldap-user-search-filter= Optional filter to apply when searching the directory. For example '(objectClass=person)' [$CONCOURSE_LDAP_USER_SEARCH_FILTER]
228 | --ldap-user-search-username= Attribute to match against the inputted username. This will be translated and combined with the other filter as '(=)'.
229 | [$CONCOURSE_LDAP_USER_SEARCH_USERNAME]
230 | --ldap-user-search-scope= Can either be: 'sub' - search the whole sub tree or 'one' - only search one level. Defaults to 'sub'. [$CONCOURSE_LDAP_USER_SEARCH_SCOPE]
231 | --ldap-user-search-id-attr= A mapping of attributes on the user entry to claims. Defaults to 'uid'. [$CONCOURSE_LDAP_USER_SEARCH_ID_ATTR]
232 | --ldap-user-search-email-attr= A mapping of attributes on the user entry to claims. Defaults to 'mail'. [$CONCOURSE_LDAP_USER_SEARCH_EMAIL_ATTR]
233 | --ldap-user-search-name-attr= A mapping of attributes on the user entry to claims. [$CONCOURSE_LDAP_USER_SEARCH_NAME_ATTR]
234 | --ldap-group-search-base-dn= BaseDN to start the search from. For example 'cn=groups,dc=example,dc=com' [$CONCOURSE_LDAP_GROUP_SEARCH_BASE_DN]
235 | --ldap-group-search-filter= Optional filter to apply when searching the directory. For example '(objectClass=posixGroup)' [$CONCOURSE_LDAP_GROUP_SEARCH_FILTER]
236 | --ldap-group-search-scope= Can either be: 'sub' - search the whole sub tree or 'one' - only search one level. Defaults to 'sub'. [$CONCOURSE_LDAP_GROUP_SEARCH_SCOPE]
237 | --ldap-group-search-user-attr= Adds an additional requirement to the filter that an attribute in the group match the user's attribute value. The exact filter being added is: (=) [$CONCOURSE_LDAP_GROUP_SEARCH_USER_ATTR]
239 | --ldap-group-search-group-attr= Adds an additional requirement to the filter that an attribute in the group match the user's attribute value. The exact filter being added is: (=) [$CONCOURSE_LDAP_GROUP_SEARCH_GROUP_ATTR]
241 | --ldap-group-search-name-attr= The attribute of the group that represents its name. [$CONCOURSE_LDAP_GROUP_SEARCH_NAME_ATTR]
242 |
243 | Authentication (OAuth2):
244 | --oauth-display-name= The auth provider name displayed to users on the login page [$CONCOURSE_OAUTH_DISPLAY_NAME]
245 | --oauth-client-id= (Required) Client id [$CONCOURSE_OAUTH_CLIENT_ID]
246 | --oauth-client-secret= (Required) Client secret [$CONCOURSE_OAUTH_CLIENT_SECRET]
247 | --oauth-auth-url= (Required) Authorization URL [$CONCOURSE_OAUTH_AUTH_URL]
248 | --oauth-token-url= (Required) Token URL [$CONCOURSE_OAUTH_TOKEN_URL]
249 | --oauth-userinfo-url= UserInfo URL [$CONCOURSE_OAUTH_USERINFO_URL]
250 | --oauth-scope= Any additional scopes that need to be requested during authorization [$CONCOURSE_OAUTH_SCOPE]
251 | --oauth-groups-key= The groups key indicates which claim to use to map external groups to Concourse teams. [$CONCOURSE_OAUTH_GROUPS_KEY]
252 | --oauth-ca-cert= CA Certificate [$CONCOURSE_OAUTH_CA_CERT]
253 | --oauth-skip-ssl-validation Skip SSL validation [$CONCOURSE_OAUTH_SKIP_SSL_VALIDATION]
254 |
255 | Authentication (OIDC):
256 | --oidc-display-name= The auth provider name displayed to users on the login page [$CONCOURSE_OIDC_DISPLAY_NAME]
257 | --oidc-issuer= (Required) An OIDC issuer URL that will be used to discover provider configuration using the .well-known/openid-configuration [$CONCOURSE_OIDC_ISSUER]
258 | --oidc-client-id= (Required) Client id [$CONCOURSE_OIDC_CLIENT_ID]
259 | --oidc-client-secret= (Required) Client secret [$CONCOURSE_OIDC_CLIENT_SECRET]
260 | --oidc-scope= Any additional scopes that need to be requested during authorization [$CONCOURSE_OIDC_SCOPE]
261 | --oidc-groups-key= The groups key indicates which claim to use to map external groups to Concourse teams. [$CONCOURSE_OIDC_GROUPS_KEY]
262 | --oidc-hosted-domains= List of whitelisted domains when using Google, only users from a listed domain will be allowed to log in [$CONCOURSE_OIDC_HOSTED_DOMAINS]
263 | --oidc-ca-cert= CA Certificate [$CONCOURSE_OIDC_CA_CERT]
264 | --oidc-skip-ssl-validation Skip SSL validation [$CONCOURSE_OIDC_SKIP_SSL_VALIDATION]
265 |
266 | TSA Configuration:
267 | --tsa-log-level=[debug|info|error|fatal] Minimum level of logs to see. (default: info) [$CONCOURSE_TSA_LOG_LEVEL]
268 | --tsa-bind-ip= IP address on which to listen for SSH. (default: 0.0.0.0) [$CONCOURSE_TSA_BIND_IP]
269 | --tsa-bind-port= Port on which to listen for SSH. (default: 2222) [$CONCOURSE_TSA_BIND_PORT]
270 | --tsa-bind-debug-port= Port on which to listen for TSA pprof server. (default: 8089) [$CONCOURSE_TSA_BIND_DEBUG_PORT]
271 | --tsa-peer-ip= IP address of this TSA, reachable by the ATCs. Used for forwarded worker addresses. [$CONCOURSE_TSA_PEER_IP]
272 | --tsa-host-key= Path to private key to use for the SSH server. [$CONCOURSE_TSA_HOST_KEY]
273 | --tsa-authorized-keys= Path to file containing keys to authorize, in SSH authorized_keys format (one public key per line). [$CONCOURSE_TSA_AUTHORIZED_KEYS]
274 | --tsa-team-authorized-keys=NAME=PATH Path to file containing keys to authorize, in SSH authorized_keys format (one public key per line). [$CONCOURSE_TSA_TEAM_AUTHORIZED_KEYS]
275 | --tsa-atc-url= ATC API endpoints to which workers will be registered. [$CONCOURSE_TSA_ATC_URL]
276 | --tsa-session-signing-key= Path to private key to use when signing tokens in reqests to the ATC during registration. [$CONCOURSE_TSA_SESSION_SIGNING_KEY]
277 | --tsa-heartbeat-interval= interval on which to heartbeat workers to the ATC (default: 30s) [$CONCOURSE_TSA_HEARTBEAT_INTERVAL]
--------------------------------------------------------------------------------
/worker_arguments.txt:
--------------------------------------------------------------------------------
1 | Usage:
2 | concourse [OPTIONS] worker [worker-OPTIONS]
3 |
4 | Application Options:
5 | -v, --version Print the version of Concourse and exit [$CONCOURSE_VERSION]
6 |
7 | Help Options:
8 | -h, --help Show this help message
9 |
10 | [worker command options]
11 | --name= The name to set for the worker during registration. If not specified, the hostname will be used. [$CONCOURSE_NAME]
12 | --tag= A tag to set during registration. Can be specified multiple times. [$CONCOURSE_TAG]
13 | --team= The name of the team that this worker will be assigned to. [$CONCOURSE_TEAM]
14 | --http-proxy= HTTP proxy endpoint to use for containers. [$http_proxy]
15 | --https-proxy= HTTPS proxy endpoint to use for containers. [$https_proxy]
16 | --no-proxy= Blacklist of addresses to skip the proxy when reaching. [$no_proxy]
17 | --ephemeral If set, the worker will be immediately removed upon stalling. [$CONCOURSE_EPHEMERAL]
18 | --bind-debug-port= Port on which to listen for beacon pprof server. (default: 9099) [$CONCOURSE_BIND_DEBUG_PORT]
19 | --certs-dir= Directory to use when creating the resource certificates volume. [$CONCOURSE_CERTS_DIR]
20 | --work-dir= Directory in which to place container data. [$CONCOURSE_WORK_DIR]
21 | --bind-ip= IP address on which to listen for the Garden server. (default: 127.0.0.1) [$CONCOURSE_BIND_IP]
22 | --bind-port= Port on which to listen for the Garden server. (default: 7777) [$CONCOURSE_BIND_PORT]
23 | --peer-ip= IP used to reach this worker from the ATC nodes. [$CONCOURSE_PEER_IP]
24 | --log-level=[debug|info|error|fatal] Minimum level of logs to see. (default: info) [$CONCOURSE_LOG_LEVEL]
25 |
26 | TSA Configuration:
27 | --tsa-host= TSA host to forward the worker through. Can be specified multiple times. (default: 127.0.0.1:2222) [$CONCOURSE_TSA_HOST]
28 | --tsa-public-key= File containing a public key to expect from the TSA. [$CONCOURSE_TSA_PUBLIC_KEY]
29 | --tsa-worker-private-key= File containing the private key to use when authenticating to the TSA. [$CONCOURSE_TSA_WORKER_PRIVATE_KEY]
30 |
31 | Garden Configuration:
32 | --garden-log-level=[debug|info|error|fatal] Minimum level of logs to see. (default: info) [$CONCOURSE_GARDEN_LOG_LEVEL]
33 | --garden-time-format=[unix-epoch|rfc3339] format of log timestamps. (default: unix-epoch) [$CONCOURSE_GARDEN_TIME_FORMAT]
34 |
35 | Server Configuration:
36 | --garden-bind-ip= Bind with TCP on the given IP. [$CONCOURSE_GARDEN_BIND_IP]
37 | --garden-bind-port= Bind with TCP on the given port. (default: 7777) [$CONCOURSE_GARDEN_BIND_PORT]
38 | --garden-bind-socket= Bind with Unix on the given socket path. (default: /tmp/garden.sock) [$CONCOURSE_GARDEN_BIND_SOCKET]
39 | --garden-debug-bind-ip= Bind the debug server on the given IP. [$CONCOURSE_GARDEN_DEBUG_BIND_IP]
40 | --garden-debug-bind-port= Bind the debug server to the given port. (default: 17013) [$CONCOURSE_GARDEN_DEBUG_BIND_PORT]
41 | --garden-skip-setup Skip the preparation part of the host that requires root privileges [$CONCOURSE_GARDEN_SKIP_SETUP]
42 |
43 | Container Lifecycle:
44 | --garden-depot= Directory in which to store container data. (default: /var/run/gdn/depot) [$CONCOURSE_GARDEN_DEPOT]
45 | --garden-properties-path= Path in which to store properties. [$CONCOURSE_GARDEN_PROPERTIES_PATH]
46 | --garden-console-sockets-path= Path in which to store temporary sockets [$CONCOURSE_GARDEN_CONSOLE_SOCKETS_PATH]
47 | --garden-cleanup-process-dirs-on-wait Clean up proccess dirs on first invocation of wait [$CONCOURSE_GARDEN_CLEANUP_PROCESS_DIRS_ON_WAIT]
48 | --garden-disable-privileged-containers Disable creation of privileged containers [$CONCOURSE_GARDEN_DISABLE_PRIVILEGED_CONTAINERS]
49 | --garden-uid-map-start= The lowest numerical subordinate user ID the user is allowed to map (default: 1) [$CONCOURSE_GARDEN_UID_MAP_START]
50 | --garden-uid-map-length= The number of numerical subordinate user IDs the user is allowed to map [$CONCOURSE_GARDEN_UID_MAP_LENGTH]
51 | --garden-gid-map-start= The lowest numerical subordinate group ID the user is allowed to map (default: 1) [$CONCOURSE_GARDEN_GID_MAP_START]
52 | --garden-gid-map-length= The number of numerical subordinate group IDs the user is allowed to map [$CONCOURSE_GARDEN_GID_MAP_LENGTH]
53 | --garden-default-rootfs= Default rootfs to use when not specified on container creation. [$CONCOURSE_GARDEN_DEFAULT_ROOTFS]
54 | --garden-default-grace-time= Default time after which idle containers should expire. [$CONCOURSE_GARDEN_DEFAULT_GRACE_TIME]
55 | --garden-destroy-containers-on-startup Clean up all the existing containers on startup. [$CONCOURSE_GARDEN_DESTROY_CONTAINERS_ON_STARTUP]
56 | --garden-apparmor= Apparmor profile to use for unprivileged container processes [$CONCOURSE_GARDEN_APPARMOR]
57 |
58 | Binary Tools:
59 | --garden-assets-dir= Directory in which to extract packaged assets (default: /var/gdn/assets) [$CONCOURSE_GARDEN_ASSETS_DIR]
60 | --garden-dadoo-bin= Path to the 'dadoo' binary. [$CONCOURSE_GARDEN_DADOO_BIN]
61 | --garden-nstar-bin= Path to the 'nstar' binary. [$CONCOURSE_GARDEN_NSTAR_BIN]
62 | --garden-tar-bin= Path to the 'tar' binary. [$CONCOURSE_GARDEN_TAR_BIN]
63 | --garden-iptables-bin= path to the iptables binary (default: /sbin/iptables) [$CONCOURSE_GARDEN_IPTABLES_BIN]
64 | --garden-iptables-restore-bin= path to the iptables-restore binary (default: /sbin/iptables-restore) [$CONCOURSE_GARDEN_IPTABLES_RESTORE_BIN]
65 | --garden-init-bin= Path execute as pid 1 inside each container. [$CONCOURSE_GARDEN_INIT_BIN]
66 |
67 | Runtime:
68 | --garden-runtime-plugin= Path to the runtime plugin binary. (default: runc) [$CONCOURSE_GARDEN_RUNTIME_PLUGIN]
69 | --garden-runtime-plugin-extra-arg= Extra argument to pass to the runtime plugin. Can be specified multiple times. [$CONCOURSE_GARDEN_RUNTIME_PLUGIN_EXTRA_ARG]
70 |
71 | Image Graph:
72 | --garden-graph= Directory on which to store imported rootfs graph data. [$CONCOURSE_GARDEN_GRAPH]
73 | --garden-graph-cleanup-threshold-in-megabytes= Disk usage of the graph dir at which cleanup should trigger, or -1 to disable graph cleanup. (default: -1) [$CONCOURSE_GARDEN_GRAPH_CLEANUP_THRESHOLD_IN_MEGABYTES]
74 | --garden-persistent-image= Image that should never be garbage collected. Can be specified multiple times. [$CONCOURSE_GARDEN_PERSISTENT_IMAGE]
75 |
76 | Image:
77 | --garden-image-plugin= Path to image plugin binary. [$CONCOURSE_GARDEN_IMAGE_PLUGIN]
78 | --garden-image-plugin-extra-arg= Extra argument to pass to the image plugin to create unprivileged images. Can be specified multiple times. [$CONCOURSE_GARDEN_IMAGE_PLUGIN_EXTRA_ARG]
79 | --garden-privileged-image-plugin= Path to privileged image plugin binary. [$CONCOURSE_GARDEN_PRIVILEGED_IMAGE_PLUGIN]
80 | --garden-privileged-image-plugin-extra-arg= Extra argument to pass to the image plugin to create privileged images. Can be specified multiple times. [$CONCOURSE_GARDEN_PRIVILEGED_IMAGE_PLUGIN_EXTRA_ARG]
81 |
82 | Docker Image Fetching:
83 | --garden-docker-registry= Docker registry API endpoint. (default: registry-1.docker.io) [$CONCOURSE_GARDEN_DOCKER_REGISTRY]
84 | --garden-insecure-docker-registry= Docker registry to allow connecting to even if not secure. Can be specified multiple times. [$CONCOURSE_GARDEN_INSECURE_DOCKER_REGISTRY]
85 |
86 | Container Networking:
87 | --garden-network-pool= Network range to use for dynamically allocated container subnets. (default: 10.254.0.0/22) [$CONCOURSE_GARDEN_NETWORK_POOL]
88 | --garden-allow-host-access Allow network access to the host machine. [$CONCOURSE_GARDEN_ALLOW_HOST_ACCESS]
89 | --garden-deny-network= Network ranges to which traffic from containers will be denied. Can be specified multiple times. [$CONCOURSE_GARDEN_DENY_NETWORK]
90 | --garden-dns-server= DNS server IP address to use instead of automatically determined servers. Can be specified multiple times. [$CONCOURSE_GARDEN_DNS_SERVER]
91 | --garden-additional-dns-server= DNS server IP address to append to the automatically determined servers. Can be specified multiple times. [$CONCOURSE_GARDEN_ADDITIONAL_DNS_SERVER]
92 | --garden-additional-host-entry= Per line hosts entries. Can be specified multiple times and will be appended verbatim in order to /etc/hosts [$CONCOURSE_GARDEN_ADDITIONAL_HOST_ENTRY]
93 | --garden-external-ip= IP address to use to reach container's mapped ports. Autodetected if not specified. [$CONCOURSE_GARDEN_EXTERNAL_IP]
94 | --garden-port-pool-start= Start of the ephemeral port range used for mapped container ports. (default: 61001) [$CONCOURSE_GARDEN_PORT_POOL_START]
95 | --garden-port-pool-size= Size of the port pool used for mapped container ports. (default: 4534) [$CONCOURSE_GARDEN_PORT_POOL_SIZE]
96 | --garden-port-pool-properties-path= Path in which to store port pool properties. [$CONCOURSE_GARDEN_PORT_POOL_PROPERTIES_PATH]
97 | --garden-mtu= MTU size for container network interfaces. Defaults to the MTU of the interface used for outbound access by the host. Max allowed value is 1500. [$CONCOURSE_GARDEN_MTU]
98 | --garden-network-plugin= Path to network plugin binary. [$CONCOURSE_GARDEN_NETWORK_PLUGIN]
99 | --garden-network-plugin-extra-arg= Extra argument to pass to the network plugin. Can be specified multiple times. [$CONCOURSE_GARDEN_NETWORK_PLUGIN_EXTRA_ARG]
100 |
101 | Limits:
102 | --garden-cpu-quota-per-share= Maximum number of microseconds each cpu share assigned to a container allows per quota period (default: 0) [$CONCOURSE_GARDEN_CPU_QUOTA_PER_SHARE]
103 | --garden-tcp-memory-limit= Set hard limit for the tcp buf memory, value in bytes (default: 0) [$CONCOURSE_GARDEN_TCP_MEMORY_LIMIT]
104 | --garden-default-container-blockio-weight= Default block IO weight assigned to a container (default: 0) [$CONCOURSE_GARDEN_DEFAULT_CONTAINER_BLOCKIO_WEIGHT]
105 | --garden-max-containers= Maximum number of containers that can be created. (default: 0) [$CONCOURSE_GARDEN_MAX_CONTAINERS]
106 | --garden-disable-swap-limit Disable swap memory limit [$CONCOURSE_GARDEN_DISABLE_SWAP_LIMIT]
107 |
108 | Metrics:
109 | --garden-metrics-emission-interval= Interval on which to emit metrics. (default: 1m) [$CONCOURSE_GARDEN_METRICS_EMISSION_INTERVAL]
110 | --garden-dropsonde-origin= Origin identifier for Dropsonde-emitted metrics. (default: garden-linux) [$CONCOURSE_GARDEN_DROPSONDE_ORIGIN]
111 | --garden-dropsonde-destination= Destination for Dropsonde-emitted metrics. (default: 127.0.0.1:3457) [$CONCOURSE_GARDEN_DROPSONDE_DESTINATION]
112 |
113 | Containerd:
114 | --garden-containerd-socket= Path to a containerd socket. [$CONCOURSE_GARDEN_CONTAINERD_SOCKET]
115 | --garden-use-containerd-for-processes Use containerd to run processes in containers. [$CONCOURSE_GARDEN_USE_CONTAINERD_FOR_PROCESSES]
116 |
117 | DNS Proxy Configuration:
118 | --garden-dns-proxy-enable Enable proxy DNS server. [$CONCOURSE_GARDEN_DNS_PROXY_ENABLE]
119 |
120 | Baggageclaim Configuration:
121 | --baggageclaim-log-level=[debug|info|error|fatal] Minimum level of logs to see. (default: info) [$CONCOURSE_BAGGAGECLAIM_LOG_LEVEL]
122 | --baggageclaim-bind-ip= IP address on which to listen for API traffic. (default: 127.0.0.1) [$CONCOURSE_BAGGAGECLAIM_BIND_IP]
123 | --baggageclaim-bind-port= Port on which to listen for API traffic. (default: 7788) [$CONCOURSE_BAGGAGECLAIM_BIND_PORT]
124 | --baggageclaim-bind-debug-port= Port on which to listen for baggageclaim pprof server. (default: 8099) [$CONCOURSE_BAGGAGECLAIM_BIND_DEBUG_PORT]
125 | --baggageclaim-volumes= Directory in which to place volume data. [$CONCOURSE_BAGGAGECLAIM_VOLUMES]
126 | --baggageclaim-driver=[detect|naive|btrfs|overlay] Driver to use for managing volumes. (default: detect) [$CONCOURSE_BAGGAGECLAIM_DRIVER]
127 | --baggageclaim-btrfs-bin= Path to btrfs binary (default: btrfs) [$CONCOURSE_BAGGAGECLAIM_BTRFS_BIN]
128 | --baggageclaim-mkfs-bin= Path to mkfs.btrfs binary (default: mkfs.btrfs) [$CONCOURSE_BAGGAGECLAIM_MKFS_BIN]
129 | --baggageclaim-overlays-dir= Path to directory in which to store overlay data [$CONCOURSE_BAGGAGECLAIM_OVERLAYS_DIR]
130 | --baggageclaim-reap-interval= Interval on which to reap expired volumes. (default: 10s) [$CONCOURSE_BAGGAGECLAIM_REAP_INTERVAL]
--------------------------------------------------------------------------------