├── .config └── ansible-lint.yml ├── .github └── workflows │ ├── ansible-lint.yml │ └── molecule.yml ├── .gitignore ├── .yamllint ├── CHANGELOG.md ├── CONTRIBUTING.md ├── CONTRIBUTORS.md ├── LICENSE ├── README.md ├── defaults └── main.yml ├── examples ├── README_VAGRANT.md ├── Vagrantfile ├── bin │ └── preinstall ├── site.yml └── vagrant_hosts ├── files └── README.md ├── handlers └── main.yml ├── meta └── main.yml ├── molecule ├── _shared │ ├── Dockerfile.j2 │ ├── base.yml │ ├── converge.yml │ ├── prepare.yml │ └── verify.yml ├── almalinux-8 │ ├── molecule.yml │ └── verify.yml ├── almalinux-9 │ ├── molecule.yml │ └── verify.yml ├── centos-7 │ ├── molecule.yml │ └── verify.yml ├── centos-8-stream │ ├── molecule.yml │ └── verify.yml ├── centos-9-stream │ ├── molecule.yml │ └── verify.yml ├── debian-10 │ ├── molecule.yml │ └── verify.yml ├── debian-11 │ ├── molecule.yml │ └── verify.yml ├── debian-12 │ ├── molecule.yml │ └── verify.yml ├── default │ └── .gitkeep ├── fedora-38 │ ├── molecule.yml │ └── verify.yml ├── fedora-39 │ ├── molecule.yml │ └── verify.yml ├── oraclelinux-7 │ ├── molecule.yml │ └── verify.yml ├── oraclelinux-8 │ ├── molecule.yml │ └── verify.yml ├── oraclelinux-9 │ ├── molecule.yml │ └── verify.yml ├── ubuntu-20.04 │ ├── molecule.yml │ └── verify.yml ├── ubuntu-22.04 │ ├── molecule.yml │ └── verify.yml └── ubuntu-23.04 │ ├── molecule.yml │ └── verify.yml ├── requirements.yml ├── tasks ├── asserts.yml ├── cni.yml ├── docker.yml ├── get_gossip_key.yml ├── host_volume.yml ├── install.yml ├── install_podman.yml ├── main.yml ├── selinux.yml ├── tls.yml └── user_group.yml ├── templates ├── base.hcl.j2 ├── client.hcl.j2 ├── custom.json.j2 ├── nomad_debian.init.j2 ├── nomad_systemd.service.j2 ├── nomad_sysvinit.j2 └── server.hcl.j2 ├── tests ├── inventory └── test.yml ├── vars ├── Archlinux.yml ├── Debian.yml ├── Flatcar.yml ├── RedHat.yml ├── VMware Photon OS.yml └── main.yml └── version.txt /.config/ansible-lint.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # .ansible-lint 3 | exclude_paths: 4 | - .cache/ # implicit unless exclude_paths is defined in config 5 | - .yamllint 6 | - molecule/ 7 | - tests/ 8 | - .github/ 9 | 10 | # install collection dependencies 11 | offline: false 12 | -------------------------------------------------------------------------------- /.github/workflows/ansible-lint.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: ansible-lint 3 | on: 4 | pull_request: 5 | branches: ["main", "master", "stable", "release/v*"] 6 | jobs: 7 | build: 8 | name: Ansible Lint 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/checkout@v4 12 | 13 | - name: Run ansible-lint 14 | uses: ansible/ansible-lint@v6.22.1 15 | -------------------------------------------------------------------------------- /.github/workflows/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: molecule 3 | on: 4 | pull_request: 5 | branches: ["main", "master", "stable", "release/v*"] 6 | 7 | jobs: 8 | build: 9 | runs-on: ubuntu-latest 10 | strategy: 11 | fail-fast: false 12 | max-parallel: 4 13 | matrix: 14 | scenario: 15 | - almalinux-8 16 | - almalinux-9 17 | - centos-7 18 | - centos-8-stream 19 | - centos-9-stream 20 | - debian-10 21 | - debian-11 22 | - debian-12 23 | - fedora-38 24 | - fedora-39 25 | # - oraclelinux-7 26 | - oraclelinux-8 27 | - oraclelinux-9 28 | - ubuntu-20.04 29 | - ubuntu-22.04 30 | - ubuntu-23.04 31 | 32 | steps: 33 | - uses: actions/checkout@v2 34 | with: 35 | path: "${{ github.repository }}" 36 | 37 | - name: Molecule 38 | uses: gofrolist/molecule-action@v2 39 | with: 40 | molecule_working_dir: "${{ github.repository }}" 41 | molecule_options: --base-config molecule/_shared/base.yml 42 | molecule_args: --scenario-name ${{ matrix.scenario }} 43 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .vagrant 3 | .tm_properties 4 | *.retry 5 | examples/hashistack 6 | examples/hosts 7 | files/*.zip 8 | files/nomad 9 | nomad_*_SHA256SUMS 10 | *.swp 11 | -------------------------------------------------------------------------------- /.yamllint: -------------------------------------------------------------------------------- 1 | --- 2 | extends: default 3 | 4 | rules: 5 | braces: 6 | max-spaces-inside: 1 7 | level: error 8 | brackets: 9 | max-spaces-inside: 1 10 | level: error 11 | line-length: disable 12 | truthy: 13 | allowed-values: ["true", "false"] 14 | check-keys: false 15 | comments: 16 | min-spaces-from-content: 1 # same as ansible-lint 17 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | ## NEXT 2 | 3 | - Add host_volume for nomad client (thanks @ducminhle) 4 | - Update CONTRIBUTORS 5 | - Update documentation 6 | 7 | ## 1.9.5 8 | 9 | - Nomad v0.10.3 10 | - Fix Nomad Debian initscript issue (thanks @raposalorx) 11 | - Add Amazon Linux support to check (thanks @blade2005) 12 | - Update Contributors 13 | - Update documentation 14 | 15 | ## 1.9.4 16 | 17 | - Correct dmsetup issue (thanks @cimermanGregor) 18 | - Update CONTRIBUTORS 19 | 20 | ## v1.9.3 21 | 22 | - Nomad v0.10.2 23 | - Remove root user from docker group (thanks @jadams) 24 | - Use JSON for custom configuration/fix template (thanks @heri16) 25 | 26 | ## v1.9.2 27 | 28 | - Nomad v0.10.1 29 | - Production hardening of systemd unit (thanks @bdossantos) 30 | - Update documentation 31 | 32 | ## v1.9.1 33 | 34 | - Nomad v0.10.0 35 | - add nomad_consul_tags option (thanks @camskkz) 36 | - Update documentation 37 | 38 | ## v1.9.0 39 | 40 | - Nomad v0.9.6 41 | - Fedora support (thanks @rbjorklin) 42 | - Update documentation 43 | 44 | ## v1.8.9 45 | 46 | - Nomad v0.9.5 47 | - Update documentation 48 | 49 | ## v1.8.8 50 | 51 | - Nomad v0.9.4 52 | - Update documentation 53 | 54 | ## v1.8.7 55 | 56 | - Nomad v0.9.2 57 | - Use SELinux module in tasks/selinux 58 | - Remove invalid nomad_log_dir configuration (#39) 59 | - Update documentation 60 | 61 | ## v1.8.6 62 | 63 | - Correct args for install task 64 | 65 | ## v1.8.5 66 | 67 | - Fix: install: set explicit bash shell to use pipefail 68 | - Update documentation 69 | 70 | ## v1.8.4 71 | 72 | - Nomad v0.9.1 73 | - Add Arch Linux support (thanks @bilke) 74 | - chroot_env config for exec and Java drivers (thanks @pxsloot) 75 | - Update shell task with pipefail 76 | 77 | ## v1.8.3 78 | 79 | - Clean up task items for best practices 80 | - Update documentation 81 | 82 | ## v1.8.2 83 | 84 | - Nomad v0.9.0 85 | - Add consul token support (thanks @bewiwi) 86 | - Update documentation 87 | 88 | ## v1.8.1 89 | 90 | - Nomad v0.8.7 91 | - Improve tasks and templates (thanks @blaet) 92 | - Correct init script (thanks @shoreflyer) 93 | - nomad_verify_server_hostname/nomad_verify_https_client (thanks @jsecchiero) 94 | - Update CONTRIBUTORS 95 | 96 | ## v1.8.0 97 | 98 | - Nomad v0.8.6 99 | - Consistent boolean usage 100 | - Improve Consul bootstrapping (thanks @RodolpheFouquet) 101 | - Systemd daemon reload (thanks @mrvovanness) 102 | - Advertise ports (thanks @Tsuki) 103 | - Update CONTRIBUTORS 104 | 105 | ## v1.7.9 106 | 107 | - Nomad v0.8.4 108 | - Explicit owner and mode for config files (thanks @groggemans) 109 | - Update Jinja2 tests (thanks @ccf) 110 | - Update documentation 111 | 112 | ## v1.7.8 113 | 114 | - Nomad v0.8.3 115 | - Add Vault and ACL support (thanks @groggemans) 116 | - Handle installing different arch at the same time (thanks @lanefu) 117 | - Update documentation 118 | 119 | ## v1.7.7 120 | 121 | - Nomad version 0.7.0 122 | - Explicit owner and mode for config files (thanks @groggemans) 123 | - Add initial TLS (thanks @jsecchiero) 124 | - Restart service when modified (thanks @jsecchiero) 125 | - Enable debian > 6.3 (thanks @jsecchiero) 126 | - Update documentation (thanks @jsecchiero) 127 | 128 | ## v1.7.6 129 | 130 | - Nomad 0.6.3 131 | - Finish cluster_nodes -> nomad_instances renaming 132 | - Update CONTRIBUTORS 133 | - Typo fixes (thanks @kjagiello) 134 | 135 | ## v1.7.5 136 | 137 | - Nomad v0.6.2 138 | - Re-instate nomad_use_consul functionality (thanks @awheeler) 139 | 140 | ## v1.7.4 141 | 142 | - Proper client/server template rendering (thanks @awheeler) 143 | 144 | ## v1.7.3 145 | 146 | - Nomad version 0.6.0 147 | 148 | ## v1.7.2 149 | 150 | - Conditionally include options and meta to avoid error when empty 151 | - Rename `nomad_cluster_nodes` label to `nomad_instances` 152 | 153 | ## v1.7.1 154 | 155 | - Clean up docker tasks 156 | - Fix debian init and client only config (thanks @groggemans) 157 | 158 | ## v1.7.0 159 | 160 | - Update README (thanks @groggemans) 161 | - Add meta parameters to client template (thanks @groggemans) 162 | - Add options parameters to client template (thanks @groggemans) 163 | - Update and fix CONTRIBUTORS file (thanks @groggemans) 164 | - Small syntax fixes and init script updates (thanks @groggemans) 165 | - Update and extend config templates (thanks @groggemans) 166 | - Main tasks cleanup (thanks @groggemans) 167 | - Initial reordering of role defaults (thanks @groggemans) 168 | - Move asserts and checks to there own file (thanks @groggemans) 169 | - CHANGELOG++ 170 | 171 | ## v1.6.5 172 | 173 | - Add custom configuration option (thanks @awheeler) 174 | - Fixed systemd service file when nomad_custom_config used (thanks @awheeler) 175 | - Update documentation 176 | - Update task meta 177 | - Update role meta 178 | - Update CONTRIBUTORS 179 | 180 | ## v1.6.4 181 | 182 | - Fix log portion of start line in init script - fixes #13 183 | - Fix bad nomad_docker_enable variable refs 184 | - Update CONTRIBUTING 185 | 186 | ## v1.6.3 187 | 188 | - Remove bootstrap task 189 | 190 | ## v1.6.2 191 | 192 | - Move bootstrap into server config 193 | - Remove bootstrap node role 194 | - Use node role in startup scripts 195 | - Update startup scripts 196 | - Update install playbook 197 | 198 | ## v1.6.1 199 | 200 | - Fix install task issue 201 | 202 | ## v1.6.0 203 | 204 | - Use all directory variables in all templates 205 | - Addresses #8 206 | - Addresses #9 207 | - New variables: 208 | - `nomad_lockfile` 209 | - `nomad_run_dir` 210 | - Updated init script templates 211 | - Updated systemd unit template 212 | - Convert to local action plays 213 | 214 | ## v1.5.7 215 | 216 | - Nomad version 0.5.6 217 | - Update documentation 218 | 219 | ## v1.5.6 220 | 221 | - Add iface env var 222 | 223 | ## v1.5.5 224 | 225 | - Fix cluster_nodes references 226 | 227 | ## v1.5.4 228 | 229 | - Make nomad user account dynamic and also a system account 230 | 231 | ## v1.5.3 232 | 233 | - Nomad version 0.5.5 234 | - Updated documentation 235 | 236 | ## v1.5.2 237 | 238 | - Initial ARM support (thanks @lanefu) 239 | 240 | ## v1.5.1 241 | 242 | - Enable the service when starting 243 | - Prefer compact YAML in tasks 244 | - Task cleanup 245 | 246 | ## v1.5.0 247 | 248 | - Version fix 249 | 250 | ## v1.4.6 251 | 252 | - Better conditionals for init scripts fixes #5 253 | - Change to compact YAML 254 | - Misc task updates 255 | 256 | ## v1.4.5 257 | 258 | - Switch init scripts to send SIGTERM to address #2 259 | - Add leave_on_terminate and set to True by default 260 | 261 | ## v1.4.4 262 | 263 | - Nomad 0.5.4 264 | - Fixed typo in install tasks fixes #6 (thanks @asemt) 265 | - Added nomad_group_name and use nomad_iface (thanks @dggreenbaum) 266 | - Updated documentation 267 | 268 | ## v1.4.3 269 | 270 | - Nomad 0.5.4 271 | 272 | ## v1.4.2 273 | 274 | - Checks for existing packages and summary files 275 | - Nomad 0.5.2 276 | 277 | ## v1.4.1 278 | 279 | - Nomad 0.5.1 280 | - Add NOMAD_VERSION environment variable 281 | - Fix typo in default variables 282 | 283 | ## v1.4.0 284 | 285 | - Nomad 0.5.0 286 | - Automatic SHA determination 287 | - Streamline and split out install, Docker, and SELinux tasks 288 | - Remove deprecated task files 289 | - Establish OS vars 290 | - Update documentation 291 | 292 | ## v1.3.3 293 | 294 | - Correct var 295 | - More SELinux config 296 | 297 | ## v1.3.2 298 | 299 | - Disable SELinux when Docker is used 300 | 301 | ## v1.3.1 302 | 303 | - Update/validate CentOS 7 box 304 | - Update documentation 305 | - Update failure cases for CentOS 306 | 307 | ## v1.3.0 308 | 309 | - Remove Docker majority of bits except for supporting packages, etc. 310 | - Docker will only be installed for Vagrant based clusters via the 311 | Vagrant provisioner when environment variable `NOMAD_DOCKER_ENABLE="true"` 312 | is set 313 | - Update start scripts to be smarter about node role 314 | - Add cgroups packages on Debian/Ubuntu 315 | - Run Nomad as root for now 316 | 317 | ## v1.2.1 318 | 319 | - Renamed vars to be more in line with Nomad terminology 320 | - Switched to merged config style with base, bootstrap, server, client 321 | 322 | ## v1.2.0 323 | 324 | - Dropping native Docker subsystem support in favor of external role 325 | - Attempting to run nomad as nomad user 326 | 327 | ## v1.1.9 328 | 329 | - Remove deprecated tasks 330 | - Add conditionals to tasks 331 | 332 | ## v1.1.8 333 | 334 | - Update doc meta 335 | 336 | ## v1.1.7 337 | 338 | - Update default variables 339 | - Update supported versions 340 | - Update documentation — now w/ more header meta (h/t @dochang) 341 | 342 | ## v1.1.6 343 | 344 | - Update OS packages 345 | - Update signing key tasks 346 | 347 | ## v1.1.5 348 | 349 | - Keyserver quoting 350 | 351 | ## v1.1.4 352 | 353 | - Update supported versions 354 | - Fix up unarchive task quoting 355 | 356 | ## v1.1.3 357 | 358 | - Fix package name vars 359 | 360 | ## v1.1.2 361 | 362 | - Tests run best when they exist! 363 | 364 | ## v1.1.1 365 | 366 | - Switch to galaxy_tags 367 | - Enable CI 368 | 369 | ## v1.1.0 370 | 371 | - Prepare role for Galaxy 372 | - Add optional Docker installation support 373 | - Update initial configuration 374 | - Update documentation 375 | - Update start scripts 376 | 377 | ## v1.0.2 378 | 379 | - Enable and start nomad service 380 | - Fixup initial configuration paths 381 | - Update documentation 382 | 383 | ## v1.0.1 384 | 385 | - Remove unused variables 386 | - Update documentation 387 | 388 | ## v1.0.0 389 | 390 | - Installs Nomad on each node 391 | - Installs example configuration for server and client 392 | - Installs example init, systemd, and upstart scripts 393 | - Correct versions 394 | - Update Galaxy meta 395 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | When contributing to this repository, please first discuss the change you wish 4 | to make via issue, email, or any other method with the owners of this repository before making a change. 5 | 6 | Do note that this project has a code of conduct; please be sure to follow it 7 | in all of your project interactions. 8 | 9 | ## Pull Request Process 10 | 11 | 1. Ensure any install or build artifacts are removed before the end of 12 | the layer when doing a build 13 | 2. Update the README.md or README_VAGRANT.md with details of changes to the 14 | interface, this includes new environment variables, exposed ports, useful 15 | file locations and container parameters 16 | 3. Increase the version numbers in any examples files and the README.md 17 | to the new version that this Pull Request would represent. The versioning scheme we use is (mostly) [SemVer](http://semver.org/) 18 | 4. You may merge the Pull Request in once you have the sign-off of two other 19 | project contributors, or if you do not have permission to do that, you can 20 | request the second reviewer to merge it for you 21 | 22 | ## Code of Conduct 23 | 24 | ### Our Pledge 25 | 26 | In the interest of fostering an open and welcoming environment, we as 27 | contributors and maintainers pledge to making participation in our project 28 | and our community a harassment-free experience for everyone, regardless of age, 29 | body size, disability, ethnicity, gender identity and expression, level of 30 | experience, nationality, personal appearance, race, religion, or sexual 31 | identity and orientation. 32 | 33 | ### Our Standards 34 | 35 | Examples of behavior that contributes to creating a positive environment 36 | include: 37 | 38 | * Showing empathy towards other community members 39 | * Using welcoming and inclusive language 40 | * Being respectful of differing viewpoints and experiences 41 | * Gracefully accepting constructive criticism 42 | * Focusing on what is best for the community 43 | 44 | Examples of unacceptable behavior by participants include: 45 | 46 | * Use of sexualized language or imagery and unwelcome sexual attention 47 | or advances 48 | * Insulting/derogatory comments, and personal or political attacks 49 | * Public or private harassment 50 | * Publishing others' private information, such as a physical or electronic 51 | address, without explicit permission 52 | * Other conduct which could reasonably be considered inappropriate in a 53 | professional setting 54 | 55 | ### Our Responsibilities 56 | 57 | Project maintainers are responsible for clarifying the standards of acceptable 58 | behavior and are expected to take appropriate and fair corrective action in 59 | response to any instances of unacceptable behavior. 60 | 61 | Project maintainers have the right and responsibility to remove, edit, or 62 | reject comments, commits, code, wiki edits, issues, and other contributions 63 | that are not aligned to this Code of Conduct, or to ban temporarily or 64 | permanently any contributor for other behaviors that they deem inappropriate, 65 | threatening, offensive, or harmful. 66 | 67 | ### Scope 68 | 69 | This Code of Conduct applies both within project spaces and in public spaces 70 | when an individual is representing the project or its community. Examples of 71 | representing a project or community include using an official project e-mail 72 | address, posting via an official social media account, or acting as an 73 | appointed representative at an online or offline event. Representation of a 74 | project may be further defined and clarified by project maintainers. 75 | 76 | ### Enforcement 77 | 78 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 79 | reported by contacting the ansible-community project leadership on Github 80 | 81 | All complaints will be reviewed and investigated and will result in a response 82 | that is deemed necessary and appropriate to the circumstances. The project 83 | team is obligated to maintain confidentiality with regard to the reporter of 84 | an incident. Further details of specific enforcement policies may be posted 85 | separately. 86 | 87 | Project maintainers who do not follow or enforce the Code of Conduct in good 88 | faith may face temporary or permanent repercussions as determined by other 89 | members of the project's leadership. 90 | 91 | ### Attribution 92 | 93 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] 94 | 95 | [homepage]: http://contributor-covenant.org 96 | [version]: http://contributor-covenant.org/version/1/4/ 97 | -------------------------------------------------------------------------------- /CONTRIBUTORS.md: -------------------------------------------------------------------------------- 1 | # Contributors 2 | 3 | Thank you to all these fine folk for helping with ansible-nomad! 4 | 5 | - [@dggreenbaum](https://github.com/dggreenbaum) 6 | - [@asemt](https://github.com/asemt) 7 | - [@lanefu](https://github.com/lanefu) 8 | - [@awheeler](https://github.com/awheeler) 9 | - [@groggemans](https://github.com/groggemans) 10 | - [@kjagiello](https://github.com/kjagiello) 11 | - [@jsecchiero](https://github.com/jsecchiero) 12 | - [@burkostya](https://github.com/burkostya) 13 | - [@ccf](https://github.com/ccf) 14 | - [@RodolpheFouquet](https://github.com/RodolpheFouquet) 15 | - [@mrvovanness](https://github.com/mrvovanness) 16 | - [@Tsuki](https://github.com/Tsuki) 17 | - [@blaet](https://github.com/blaet) 18 | - [@shoreflyer](https://github.com/shoreflyer) 19 | - [@bewiwi](https://github.com/bewiwi) 20 | - [@bilke](https://github.com/bilke) 21 | - [@pxsloot](https://github.com/pxsloot) 22 | - [@rbjorklin](https://github.com/rbjorklin) 23 | - [@camskkz](https://github.com/camskkz) 24 | - [@bdossantos](https://github.com/bdossantos) 25 | - [@jadams](https://github.com/jadams) 26 | - [@heri16](https://github.com/heri16) 27 | - [@cimermanGregor](https://github.com/cimermanGregor) 28 | - [@raposalorx](https://github.com/raposalorx) 29 | - [@blade2005](https://github.com/blade2005) 30 | - [@ducminhle](https://github.com/ducminhle) 31 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2016, Brian Shumate 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | * Redistributions in binary form must reproduce the above copyright notice, 11 | this list of conditions and the following disclaimer in the documentation 12 | and/or other materials provided with the distribution. 13 | 14 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 15 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 17 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 18 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 20 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 21 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 22 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Ansible-Nomad 2 | 3 | ---- 4 | 5 | This role was previously maintained by Brian Shumate and is now curated by [@ansible-community/hashicorp-tools](https://github.com/ansible-community). 6 | 7 | ---- 8 | 9 | 10 | 11 | This Ansible role performs basic [Nomad](https://nomadproject.io/) 12 | installation, including filesystem structure, and example configuration. 13 | 14 | It will also bootstrap a minimal cluster of 3 server nodes, and can do this 15 | in a development environment based on Vagrant and VirtualBox. See 16 | [README_VAGRANT.md](https://github.com/ansible-community/ansible-nomad/blob/master/examples/README_VAGRANT.md) for more details about the Vagrant setup. 17 | 18 | ## Requirements 19 | 20 | This role requires an Arch Linux, Debian, RHEL, or Ubuntu distribution; the role is tested 21 | with the following specific software versions: 22 | 23 | * Ansible: 2.7.10 24 | * nomad: 0.12.1 25 | * Arch Linux 26 | * CentOS: 7 27 | * Debian: 8 28 | * RHEL: 7 29 | * Ubuntu: >= 20.04 30 | * unzip for [unarchive module](https://docs.ansible.com/ansible/latest/modules/unarchive_module.html#notes) 31 | 32 | ## Role Variables 33 | 34 | The role defines most of its variables in `defaults/main.yml`: 35 | 36 | ### `nomad_debug` 37 | - Nomad debug mode 38 | - Default value: **no** 39 | 40 | ### `nomad_skip_ensure_all_hosts` 41 | - Allow running the role even if not all instances are connected 42 | - Default value: **no** 43 | 44 | ### `nomad_allow_purge_config` 45 | - Allow purging obsolete configuration files. For example, remove server configuration if instance is no longer a server 46 | - Default value: **no** 47 | 48 | ### `nomad_version` 49 | 50 | - Nomad version to install 51 | - Default value: **1.1.1** 52 | 53 | ### `nomad_architecture_map` 54 | 55 | - This variable does not need to be changed in most cases 56 | - Default value: Dictionary translating ansible_architecture to HashiCorp 57 | architecture naming convention 58 | 59 | ### `nomad_architecture` 60 | 61 | - Host architecture 62 | - Default value: determined by `{{ nomad_architecture_map[ansible_architecture] }}` 63 | 64 | ### `nomad_pkg` 65 | 66 | - Nomad package filename 67 | - Default value: `nomad_{{ nomad_version }}_linux_{{ nomad_architecture }}.zip` 68 | 69 | ### `nomad_zip_url` 70 | 71 | - Nomad download URL 72 | - Default value: `https://releases.hashicorp.com/nomad/{{ nomad_version }}/nomad_{{ nomad_version }}_linux_{{ nomad_architecture }}.zip` 73 | 74 | ### `nomad_checksum_file_url` 75 | 76 | - Nomad checksum file URL 77 | - Default value: `https://releases.hashicorp.com/nomad/{{ nomad_version }}/nomad_{{ nomad_version}}_SHA256SUMS` 78 | 79 | ### `nomad_bin_dir` 80 | 81 | - Nomad binary installation path 82 | - Default value: `/usr/local/bin` 83 | 84 | ### `nomad_config_dir` 85 | 86 | - Nomad configuration file path 87 | - Default value: `/etc/nomad.d` 88 | 89 | ### `nomad_data_dir` 90 | 91 | - Nomad data path 92 | - Default value: `/var/nomad` 93 | 94 | ### `nomad_lockfile` 95 | 96 | - Nomad lockfile path 97 | - Default value: `/var/lock/subsys/nomad` 98 | 99 | ### `nomad_run_dir` 100 | 101 | - Nomad run path 102 | - Default value: `/var/run/nomad` 103 | 104 | ### `nomad_manage_user` 105 | 106 | - Manage Nomad user? 107 | - Default value: **yes** 108 | 109 | ### `nomad_user` 110 | 111 | - Nomad OS username 112 | - Default value: **root** 113 | 114 | ### `nomad_manage_group` 115 | 116 | - Manage Nomad group? 117 | - Default value: **no** 118 | 119 | ### `nomad_group` 120 | 121 | - Nomad OS group 122 | - Default value: **bin** 123 | 124 | ### `nomad_region` 125 | 126 | - Default region 127 | - Default value: **global** 128 | 129 | ### `nomad_datacenter` 130 | 131 | - Nomad datacenter label 132 | - Default value: **dc1** 133 | 134 | ### `nomad_log_level` 135 | 136 | - Logging level 137 | - Default value: **INFO** 138 | 139 | ### `nomad_syslog_enable` 140 | 141 | - Log to syslog 142 | - Default value: **true** 143 | 144 | ### `nomad_iface` 145 | 146 | - Nomad network interface 147 | - Default value: `{{ ansible_default_ipv4.interface }}` 148 | 149 | ### `nomad_node_name` 150 | 151 | - Nomad node name 152 | - Default value: `{{ inventory_hostname_short }}` 153 | 154 | ### `nomad_node_role` 155 | 156 | - Nomad node role 157 | - options: *client*, *server*, *both* 158 | - Default value: **client** 159 | 160 | ### `nomad_leave_on_terminate` 161 | 162 | - Send leave on termination 163 | - Default value: **yes** 164 | 165 | ### `nomad_leave_on_interrupt` 166 | 167 | - Send leave on interrupt 168 | - Default value: **no** 169 | 170 | ### `nomad_disable_update_check` 171 | 172 | - Disable update check 173 | - Default value: **no** 174 | 175 | ### `nomad_retry_max` 176 | 177 | - Max retry join attempts 178 | - Default value: **0** 179 | 180 | ### `nomad_retry_join` 181 | 182 | - Enable retry join? 183 | - Default value: **no** 184 | 185 | ### `nomad_retry_interval` 186 | 187 | - Retry join interval 188 | - Default value: **30s** 189 | 190 | ### `nomad_rejoin_after_leave` 191 | 192 | - Rejoin after leave? 193 | - Default value: **no** 194 | 195 | ### `nomad_enabled_schedulers` 196 | 197 | - List of enabled schedulers 198 | - Default value: **service, batch, system** 199 | 200 | ### `nomad_num_schedulers` 201 | 202 | - Number of schedulers 203 | - Default value: `{{ ansible_processor_vcpus }}` 204 | 205 | ### `nomad_node_gc_threshold` 206 | 207 | - Node garbage collection threshold 208 | - Default value: **24h** 209 | 210 | ### `nomad_job_gc_threshold` 211 | 212 | - Job garbage collection threshold 213 | - Default value: **4h** 214 | 215 | ### `nomad_eval_gc_threshold` 216 | 217 | - Eval garbage collection threshold 218 | - Default value: **1h** 219 | 220 | ### `nomad_deployment_gc_threshold` 221 | 222 | - Deployment garbage collection threshold 223 | - Default value: **1h** 224 | 225 | ### `nomad_encrypt_enable` 226 | 227 | - Enable Gossip Encryption even if `nomad_encrypt` is not set 228 | - Default value: false 229 | 230 | ### `nomad_encrypt` 231 | 232 | - Set the encryption key; should be the same across a cluster. If not present and `nomad_encrypt_enable` is true, the key will be generated & retrieved from the bootstrapped server. 233 | - Default value: **""** 234 | 235 | ### `nomad_raft_multiplier` 236 | 237 | - Specifies the raft multiplier to use 238 | - Default value: **1** 239 | 240 | ### `nomad_raft_protocol` 241 | 242 | - Specifies the version of raft protocal, which used by nomad servers for communication 243 | - Default value: **2** 244 | 245 | ### `nomad_authoritative_region` 246 | 247 | - Specifies the authoritative region, which provides a single source of truth for global configurations such as ACL Policies and global ACL tokens. 248 | - Default value: **""** 249 | 250 | ### `nomad_node_class` 251 | 252 | - Nomad node class 253 | - Default value: **""** 254 | 255 | ### `nomad_node_pool` 256 | 257 | - Used for restricting which client nodes are eligible to receive which workloads. 258 | By default, tasks are opted-out of non-default node pools. This means job authors don’t have to repeatedly add the same constraints to every job just to avoid certain nodes. 259 | - Default value: **""** 260 | 261 | ### `nomad_no_host_uuid` 262 | 263 | - Force the UUID generated by the client to be randomly generated 264 | - Default value: **no** 265 | 266 | ### `nomad_max_kill_timeout` 267 | 268 | - Max kill timeout 269 | - Default value: **30s** 270 | 271 | ### `nomad_network_interface` 272 | 273 | - Nomad scheduler will choose from the IPs of this interface for allocating tasks 274 | - Default value: none 275 | 276 | ### `nomad_network_speed` 277 | 278 | - Overide network link speed (0 = no overide) 279 | - Default value: **0** 280 | 281 | ### `nomad_cpu_total_compute` 282 | 283 | - Overide cpu compute (0 = no overide) 284 | - Default value: **0** 285 | 286 | ### `nomad_gc_interval` 287 | 288 | - Client garbage collection interval 289 | - Default value: **1m** 290 | 291 | ### `nomad_gc_max_allocs` 292 | 293 | - Maximum number of allocations which a client will track before triggering a garbage collection 294 | - Default value: **50** 295 | 296 | ### `nomad_gc_disk_usage_threshold` 297 | 298 | - Disk usage threshold percentage for garbage collection 299 | - Default value: **80** 300 | 301 | ### `nomad_gc_inode_usage_threshold` 302 | 303 | - Inode usage threshold percentage for garbage collection 304 | - Default value: **70** 305 | 306 | ### `nomad_gc_parallel_destroys` 307 | 308 | - Garbage collection max parallel destroys 309 | - Default value: **2** 310 | 311 | ### `nomad_reserved` 312 | 313 | - Reserved client resources 314 | - Default value: `cpu: {{ nomad_reserved_cpu }}, memory: {{ nomad_reserved_memory }}, disk: {{ nomad_reserved_disk }}, ports: {{ nomad_reserved_ports }}` 315 | 316 | ### `nomad_reserved_cpu` 317 | 318 | - Reserved client CPU 319 | - Default value: **0** 320 | 321 | ### `nomad_reserved_memory` 322 | 323 | - Reserved client memory 324 | - Default value: **0** 325 | 326 | ### `nomad_reserved_disk` 327 | 328 | - Reserved client disk 329 | - Default value: **0** 330 | 331 | ### `nomad_reserved_ports` 332 | 333 | - Reserved client ports 334 | - Default value: **22** 335 | 336 | ### `nomad_host_volumes` 337 | 338 | - List host_volume is used to make volumes available to jobs (Stateful Workloads). By default, a directory is created. Specify the `state` parameter to change it. 339 | - Default value: **[]** 340 | - Example: 341 | 342 | ```yaml 343 | nomad_host_volumes: 344 | - name: data 345 | path: /var/data 346 | owner: root 347 | group: bin 348 | mode: 0755 349 | read_only: false 350 | - name: config 351 | path: /etc/conf 352 | owner: root 353 | group: bin 354 | mode: 0644 355 | read_only: false 356 | - name: docker socket 357 | path: /run/docker.sock 358 | read_only: true 359 | state: file 360 | ``` 361 | 362 | ### `nomad_host_networks` 363 | 364 | - List host_network is used to make different networks available to jobs instead of selecting a default interface. This is very useful especially in case of multiple nics. 365 | - Default value: **[]** 366 | - Example: 367 | 368 | ```yaml 369 | nomad_host_networks: 370 | - name: public 371 | cidr: 100.101.102.103/24 372 | reserved_ports: 22,80 373 | - name: private 374 | interface: eth0 375 | reserved_ports: 443 376 | ``` 377 | 378 | ### `nomad_options` 379 | 380 | - Driver options 381 | - Key value dict 382 | - Default value: **{}** 383 | 384 | ### `nomad_chroot_env` 385 | 386 | - chroot environment definition for the Exec and Java drivers 387 | - Key value dict 388 | - Default value: false 389 | 390 | ### `nomad_meta` 391 | 392 | - Meta data 393 | - Key value dict 394 | - Default value: **{}** 395 | 396 | ### `nomad_bind_address` 397 | 398 | - Bind interface address 399 | - Default value: `{{ hostvars[inventory_hostname]['ansible_'+ nomad_iface ]['ipv4']['address'] }}` 400 | 401 | ### `nomad_advertise_address` 402 | 403 | - Network interface address to advertise to other nodes 404 | - Default value: `{{ hostvars[inventory_hostname]['ansible_'+ nomad_iface ]['ipv4']['address'] }}` 405 | 406 | ### `nomad_ports` 407 | 408 | - Ports used by Nomad 409 | - Default value: `http: {{ nomad_ports_http }}, rpc: {{ nomad_ports_rpc }}, serf: {{ nomad_ports_serf }}` 410 | 411 | ### `nomad_ports_http` 412 | 413 | - Http port 414 | - Default value: **4646** 415 | 416 | ### `nomad_ports_rpc` 417 | 418 | - RPC port 419 | - Default value: **4647** 420 | 421 | ### `nomad_ports_serf` 422 | 423 | - Serf port 424 | - Default value: **4648** 425 | 426 | ### `nomad_podman_enable` 427 | 428 | - Installs the podman plugin 429 | - Default value: **false** 430 | 431 | ### `nomad_cni_enable` 432 | 433 | - Installs the cni plugins 434 | - Default value: **false** 435 | 436 | ### `nomad_docker_enable` 437 | 438 | - Install Docker subsystem on nodes? 439 | - Default value: **false** 440 | 441 | ### `nomad_template_config` 442 | - Allow you configure client's [template config](https://developer.hashicorp.com/nomad/docs/configuration/client#template-parameters). 443 | - Default: {} 444 | 445 | Example: 446 | 447 | ```yaml 448 | nomad_template_config: 449 | vault_retry: 450 | attempts: 12 451 | backoff: "750ms" 452 | max_backoff: "2m" 453 | wait: 454 | min: "10s" 455 | max: "4m" 456 | ``` 457 | 458 | ### `nomad_plugins` 459 | - Allow you configure nomad plugins. 460 | - Default: {} 461 | 462 | Example: 463 | 464 | ```yaml 465 | nomad_plugins: 466 | nomad-driver-podman: 467 | config: 468 | volumes: 469 | enabled: true 470 | selinuxlabel: z 471 | recover_stopped: true 472 | ``` 473 | 474 | ### `nomad_group_name` 475 | 476 | - Ansible group that contains all cluster nodes 477 | - Default value: **nomad_instances** 478 | 479 | ### `nomad_servers` 480 | 481 | It's typically not necessary to manually alter this list. 482 | 483 | - List of server nodes 484 | - Default value: List of all nodes in `nomad_group_name` with 485 | `nomad_node_role` set to *server* or *both* 486 | 487 | ### `nomad_gather_server_facts` 488 | 489 | This feature makes it possible to gather the `nomad_bind_address` and 490 | `nomad_advertise_address` from servers that are currently not targeted by the 491 | playbook. 492 | 493 | To make this possible the `delegate_facts` option is used. This option is broken 494 | in many Ansible versions, so this feature might not always work. 495 | 496 | - Gather facts from servers that are not currently targeted 497 | - Default value: 'no' 498 | 499 | ### `nomad_use_consul` 500 | 501 | - Bootstrap nomad via native consul zero-configuration support 502 | assumes consul default ports etc. 503 | - Default value: **False** 504 | 505 | ### `nomad_consul_address` 506 | 507 | - The address of your consul API, use it in combination with nomad_use_consul=True. If you want to use https, use `nomad_consul_ssl`. Do NOT append https. 508 | - Default value: **localhost:8500** 509 | 510 | ### `nomad_consul_ssl` 511 | 512 | - If `true` then uses https. 513 | - Default value: **false** 514 | 515 | ### `nomad_consul_ca_file` 516 | 517 | - Public key of consul CA, use in combination with `nomad_consul_cert_file` and `nomad_consul_key_file`. 518 | - Default value: "" 519 | 520 | ### `nomad_consul_grpc_ca_file` 521 | 522 | - Public key of consul CA to validate the gRPC TLS, use in combination with `nomad_consul_cert_file` and `nomad_consul_key_file`. 523 | - Default value: **nomad_consul_ca_file** 524 | 525 | ### `nomad_consul_cert_file` 526 | 527 | - The public key which can be used to access consul. 528 | - Default value: "" 529 | 530 | ### `nomad_consul_key_file` 531 | 532 | - The private key counterpart of `nomad_consul_cert_file`. 533 | - Default value: "" 534 | 535 | ### `nomad_consul_servers_service_name` 536 | 537 | - The name of the consul service for your nomad servers 538 | - Default value: **nomad-servers** 539 | 540 | ### `nomad_consul_clients_service_name` 541 | 542 | - The name of the consul service for your nomad clients 543 | - Default value: **nomad-clients** 544 | 545 | ### `nomad_consul_token` 546 | 547 | - Token to use for consul interaction 548 | - Default value: **""** 549 | 550 | ### `nomad_bootstrap_expect` 551 | 552 | - Specifies the number of server nodes to wait for before bootstrapping. 553 | - Default value: `{{ nomad_servers | count or 3 }}} 554 | 555 | ### `nomad_acl_enabled` 556 | 557 | - Enable ACLs 558 | - Default value: **no** 559 | 560 | ### `nomad_acl_token_ttl` 561 | 562 | - TTL for tokens 563 | - Default value: **"30s"** 564 | 565 | ### `nomad_acl_policy_ttl` 566 | 567 | - TTL for policies 568 | - Default value: **"30s"** 569 | 570 | ### `nomad_acl_replication_token` 571 | 572 | - Token to use for acl replication on non authoritive servers 573 | - Default value: **""** 574 | 575 | ### `nomad_vault_enabled` 576 | 577 | - Enable vault 578 | - Default value: **no** 579 | 580 | ### `nomad_vault_address` 581 | 582 | - Vault address to use 583 | - Default value: `{{ vault_address | default('0.0.0.0') }}` 584 | 585 | ### `nomad_vault_allow_unauthenticated` 586 | 587 | - Allow users to use vault without providing their own token 588 | - Default value: **yes** 589 | 590 | ### `nomad_vault_create_from_role` 591 | 592 | - Role to create tokens from 593 | - Default value: **""** 594 | 595 | ### `nomad_vault_ca_file` 596 | 597 | - Path of CA cert to use with vault 598 | - Default value: **""** 599 | 600 | ### `nomad_vault_ca_path` 601 | 602 | - Path of a folder containing CA cert(s) to use with vault 603 | - Default value: **""** 604 | 605 | ### `nomad_vault_cert_file` 606 | 607 | - Path to a certificate to use with vault 608 | - Default value: **""** 609 | 610 | ### `nomad_vault_key_file` 611 | 612 | - Path to a private key file to use with vault 613 | - Default value: **""** 614 | 615 | ### `nomad_vault_tls_server_name` 616 | 617 | - Optional string used to set SNI host when connecting to vault 618 | - Default value: **""** 619 | 620 | ### `nomad_vault_tls_skip_verify` 621 | 622 | - Specifies if SSL peer validation should be enforced 623 | - Default value: **no** 624 | 625 | ### `nomad_vault_token` 626 | 627 | - Vault token used by nomad. Will only be installed on servers. 628 | - Default value: **""** 629 | 630 | ### `nomad_vault_namespace` 631 | 632 | - Vault namespace used by nomad 633 | - Default value: **""** 634 | 635 | ### `nomad_docker_enable` 636 | 637 | - Enable docker 638 | - Default value: **no** 639 | 640 | ### `nomad_docker_dmsetup` 641 | 642 | - Run dmsetup on ubuntu (only if docker is enabled) 643 | - Default value: **yes** 644 | 645 | ### `nomad_tls_enable` 646 | 647 | - Enable TLS 648 | - Default value: false 649 | 650 | ### `nomad_tls_copy_keys`: false 651 | 652 | - Whether to copy certs from local machine (controller). 653 | - Default value: false 654 | 655 | ### `nomad_tls_files_remote_src` 656 | 657 | - Whether to copy certs from remote machine itself. 658 | - Default value: false 659 | 660 | ### `nomad_tls_dir` 661 | 662 | - The remote dir where the certs are stored. 663 | - Default value: `/etc/nomad/ssl` 664 | 665 | ### `nomad_ca_file` 666 | 667 | - Use a ca for tls connection, nomad_cert_file and nomad_key_file are needed 668 | - Default value: ca.cert 669 | 670 | ### `nomad_cert_file` 671 | 672 | - Use a certificate for tls connection, nomad_ca_file and nomad_key_file are needed 673 | - Default value: server.crt 674 | 675 | ### `nomad_key_file` 676 | 677 | - Use a key for tls connection, nomad_cert_file and nomad_key_file are needed 678 | - Default value: server.key 679 | 680 | ### `nomad_rpc_upgrade_mode` 681 | 682 | - Use a certificate for tls connection, nomad_ca_file and nomad_key_file are needed, used only when the cluster is being upgraded to TLS, and removed after the migration is complete. This allows the agent to accept both TLS and plaintext traffic. 683 | - Default value: **false** 684 | 685 | ### `nomad_verify_server_hostname` 686 | 687 | - Use a key for tls connection, nomad_cert_file and nomad_key_file are needed. Specifies if outgoing TLS connections should verify the server's hostname. 688 | - Default value: **true** 689 | 690 | ### `nomad_verify_https_client` 691 | 692 | - Use a key for tls connection, nomad_cert_file and nomad_key_file are needed. Specifies agents should require client certificates for all incoming HTTPS requests. The client certificates must be signed by the same CA as Nomad. 693 | - Default value: **true** 694 | 695 | ### `nomad_telemetry` 696 | 697 | - Specifies whether to enable Nomad's telemetry configuration. 698 | - Default value: **false** 699 | 700 | ### `nomad_telemetry_disable_hostname` 701 | 702 | - Specifies if gauge values should be prefixed with the local hostname. 703 | - Default value: "false" 704 | 705 | ### `nomad_telemetry_collection_interval` 706 | 707 | - Specifies the time interval at which the Nomad agent collects telemetry data. 708 | - Default value: "1s" 709 | 710 | ### `nomad_telemetry_use_node_name` 711 | 712 | - Specifies if gauge values should be prefixed with the name of the node, instead of the hostname. If set it will override disable_hostname value. 713 | - Default value: "false" 714 | 715 | ### `nomad_telemetry_publish_allocation_metrics` 716 | 717 | - Specifies if Nomad should publish runtime metrics of allocations. 718 | - Default value: "false" 719 | 720 | ### `nomad_telemetry_publish_node_metrics` 721 | 722 | - Specifies if Nomad should publish runtime metrics of nodes. 723 | - Default value: "false" 724 | 725 | ### `nomad_telemetry_backwards_compatible_metrics` 726 | 727 | - Specifies if Nomad should publish metrics that are backwards compatible with versions below 0.7, as post version 0.7, Nomad emits tagged metrics. All new metrics will only be added to tagged metrics. Note that this option is used to transition monitoring to tagged metrics and will eventually be deprecated. 728 | - Default value: "false" 729 | 730 | ### `nomad_telemetry_disable_tagged_metrics` 731 | 732 | - Specifies if Nomad should not emit tagged metrics and only emit metrics compatible with versions below Nomad 0.7. Note that this option is used to transition monitoring to tagged metrics and will eventually be deprecated. 733 | - Default value: "false" 734 | 735 | ### `nomad_telemetry_filter_default` 736 | 737 | - This controls whether to allow metrics that have not been specified by the filter. Defaults to true, which will allow all metrics when no filters are provided. When set to false with no filters, no metrics will be sent. 738 | - Default value: "true" 739 | 740 | ### `nomad_telemetry_prefix_filter` 741 | 742 | - This is a list of filter rules to apply for allowing/blocking metrics by prefix. A leading "+" will enable any metrics with the given prefix, and a leading "-" will block them. If there is overlap between two rules, the more specific rule will take precedence. Blocking will take priority if the same prefix is listed multiple times. 743 | - Default value: [] 744 | 745 | ### `nomad_telemetry_disable_dispatched_job_summary_metrics` 746 | 747 | - Specifies if Nomad should ignore jobs dispatched from a parameterized job when publishing job summary statistics. Since each job has a small memory overhead for tracking summary statistics, it is sometimes desired to trade these statistics for more memory when dispatching high volumes of jobs. 748 | - Default value: "false" 749 | 750 | ### `nomad_telemetry_statsite_address` 751 | 752 | - Specifies the address of a statsite server to forward metrics data to. 753 | - Default value: "" 754 | 755 | ### `nomad_telemetry_statsd_address` 756 | 757 | - Specifies the address of a statsd server to forward metrics to. 758 | - Default value: "" 759 | 760 | ### `nomad_telemetry_datadog_address` 761 | 762 | - Specifies the address of a DataDog statsd server to forward metrics to. 763 | - Default value: "" 764 | 765 | ### `nomad_telemetry_datadog_tags` 766 | 767 | - Specifies a list of global tags that will be added to all telemetry packets sent to DogStatsD. It is a list of strings, where each string looks like "my_tag_name:my_tag_value". 768 | - Default value: [] 769 | 770 | ### `nomad_telemetry_prometheus_metrics` 771 | 772 | - Specifies whether the agent should make Prometheus formatted metrics available at /v1/metrics?format=prometheus. 773 | - Default value: "false" 774 | 775 | ### `nomad_telemetry_circonus_api_token` 776 | 777 | - Specifies a valid Circonus API Token used to create/manage check. If provided, metric management is enabled. 778 | - Default value: "" 779 | 780 | ### `nomad_telemetry_circonus_api_app` 781 | 782 | - Specifies a valid app name associated with the API token. 783 | - Default value: "nomad" 784 | 785 | ### `nomad_telemetry_circonus_api_url` 786 | 787 | - Specifies the base URL to use for contacting the Circonus API. 788 | - Default value: "https://api.circonus.com/v2" 789 | 790 | ### `nomad_telemetry_circonus_submission_interval` 791 | 792 | - Specifies the interval at which metrics are submitted to Circonus. 793 | - Default value: "10s" 794 | 795 | ### `nomad_telemetry_circonus_submission_url` 796 | 797 | - Specifies the check.config.submission_url field, of a Check API object, from a previously created HTTPTRAP check. 798 | - Default value: "" 799 | 800 | ### `nomad_telemetry_circonus_check_id` 801 | 802 | - Specifies the Check ID (not check bundle) from a previously created HTTPTRAP check. The numeric portion of the check._cid field in the Check API object. 803 | - Default value: "" 804 | 805 | ### `nomad_telemetry_circonus_check_force_metric_activation` 806 | 807 | - Specifies if force activation of metrics which already exist and are not currently active. If check management is enabled, the default behavior is to add new metrics as they are encountered. If the metric already exists in the check, it will not be activated. This setting overrides that behavior. 808 | - Default value: "false" 809 | 810 | ### `nomad_telemetry_circonus_check_instance_id` 811 | 812 | - Serves to uniquely identify the metrics coming from this instance. It can be used to maintain metric continuity with transient or ephemeral instances as they move around within an infrastructure. By default, this is set to hostname:application name (e.g. "host123:nomad"). 813 | - Default value: "" 814 | 815 | ### `nomad_telemetry_circonus_check_search_tag` 816 | 817 | - Specifies a special tag which, when coupled with the instance id, helps to narrow down the search results when neither a Submission URL or Check ID is provided. By default, this is set to service:app (e.g. "service:nomad"). 818 | - Default value: "" 819 | 820 | ### `nomad_telemetry_circonus_check_display_name` 821 | 822 | - Specifies a name to give a check when it is created. This name is displayed in the Circonus UI Checks list. 823 | - Default value: "" 824 | 825 | ### `nomad_telemetry_circonus_check_tags` 826 | 827 | - Comma separated list of additional tags to add to a check when it is created. 828 | - Default value: "" 829 | 830 | ### `nomad_telemetry_circonus_broker_id` 831 | 832 | - Specifies the ID of a specific Circonus Broker to use when creating a new check. The numeric portion of broker._cid field in a Broker API object. If metric management is enabled and neither a Submission URL nor Check ID is provided, an attempt will be made to search for an existing check using Instance ID and Search Tag. If one is not found, a new HTTPTRAP check will be created. By default, this is a random Enterprise Broker is selected, or, the default Circonus Public Broker. 833 | - Default value: "" 834 | 835 | ### `nomad_telemetry_circonus_broker_select_tag` 836 | 837 | - Specifies a special tag which will be used to select a Circonus Broker when a Broker ID is not provided. The best use of this is to as a hint for which broker should be used based on where this particular instance is running (e.g. a specific geographic location or datacenter, dc:sfo). 838 | - Default value: "" 839 | 840 | ### `nomad_autopilot` 841 | 842 | - Enable Nomad Autopilot 843 | - To enable Autopilot features (with the exception of dead server cleanup), the raft_protocol setting in the server stanza must be set to 3 on all servers, see parameter nomad_raft_protocol 844 | - Default value: **false** 845 | 846 | ### `nomad_autopilot_cleanup_dead_servers` 847 | 848 | - Specifies automatic removal of dead server nodes periodically and whenever a new server is added to the cluster. 849 | - Default value: **true** 850 | 851 | ### `nomad_autopilot_last_contact_threshold` 852 | 853 | - Specifies the maximum amount of time a server can go without contact from the leader before being considered unhealthy. 854 | - Default value: **200ms** 855 | 856 | ### `nomad_autopilot_max_trailing_logs` 857 | 858 | - Specifies the maximum number of log entries that a server can trail the leader by before being considered unhealthy. 859 | - Default value: **250** 860 | 861 | ### `nomad_autopilot_server_stabilization_time` 862 | 863 | - Specifies the minimum amount of time a server must be stable in the 'healthy' state before being added to the cluster. Only takes effect if all servers are running Raft protocol version 3 or higher. 864 | - Default value: **10s** 865 | 866 | 867 | ### `nomad_ui` 868 | 869 | - Specifies if you want to add specific label in the UI, later with `nomad_ui_label_text`, `nomad_ui_label_background_color` and `nomad_ui_label_text_color` . 870 | - Default value: false 871 | 872 | e.g 873 | 874 | ```yaml 875 | nomad_ui: true 876 | nomad_ui_label_text: "Staging Cluster" 877 | nomad_ui_label_background_color: "yellow" 878 | nomad_ui_label_text_color: "#000000" 879 | ``` 880 | 881 | ### `nomad_ui_label_text` 882 | 883 | - Specifies a label to display on the UI (e.g. "Staging Cluster"). 884 | - Default value: "Staging Cluster" 885 | 886 | ### `nomad_ui_label_background_color` 887 | 888 | - Specifies the background color of the label on the UI (e.g. "yellow"). 889 | - Default value: "yellow" 890 | 891 | ### `nomad_ui_label_text_color` 892 | 893 | - Specifies the color of the label on the UI (e.g. "#000000"). 894 | - Default value: "#000000" 895 | 896 | ### `nomad_artifact` 897 | 898 | - Specifies environment variables for artifact (e.g. "GITLAB_READONLY_TOKEN"). 899 | - Default value: "" 900 | 901 | e.g 902 | 903 | ```yaml 904 | nomad_artifact: 905 | { 906 | set_environment_variables: "GITLAB_READONLY_TOKEN,GITLAB_KEYCLOAK_THEMES_READONLY_TOKEN", 907 | } 908 | ``` 909 | 910 | #### Custom Configuration Section 911 | 912 | As Nomad loads the configuration from files and directories in lexical order, 913 | typically merging on top of previously parsed configuration files, you may set 914 | custom configurations via `nomad_config_custom`, which will be expanded into a file named `custom.json` within your `nomad_config_dir` which will 915 | be loaded after all other configuration by default. 916 | 917 | An example usage for enabling `vault`: 918 | 919 | ```yaml 920 | vars: 921 | nomad_config_custom: 922 | vault: 923 | enabled : true 924 | ca_path : "/etc/certs/ca" 925 | cert_file : "/var/certs/vault.crt" 926 | key_file : "/var/certs/vault.key" 927 | address : "https://vault.service.consul:8200" 928 | create_from_role : "nomad-cluster" 929 | ``` 930 | 931 | ## Dependencies 932 | 933 | Ansible requires GNU tar and this role performs some local use of the 934 | unarchive module, so ensure that your system has `gtar`/`unzip` installed. 935 | Jinja2 templates use ipaddr filter that need `netaddr` python library. 936 | 937 | ## Example Playbook 938 | 939 | Basic nomad installation is possible using the included `site.yml` playbook: 940 | 941 | ``` 942 | ansible-playbook -i site.yml 943 | ``` 944 | 945 | You can also simply pass variables in using the `--extra-vars` option to the 946 | `ansible-playbook` command: 947 | 948 | ``` 949 | ansible-playbook -i hosts site.yml --extra-vars "nomad_datacenter=maui" 950 | ``` 951 | 952 | ### Vagrant and VirtualBox 953 | 954 | See `examples/README_VAGRANT.md` for details on quick Vagrant deployments 955 | under VirtualBox for testing, etc. 956 | 957 | ## License 958 | 959 | BSD 960 | 961 | ## Author Information 962 | 963 | [Brian Shumate](http://brianshumate.com) 964 | 965 | ## Contributors 966 | 967 | Special thanks to the folks listed in [CONTRIBUTORS.md](https://github.com/ansible-community/ansible-nomad/blob/master/CONTRIBUTORS.md) for their 968 | contributions to this project. 969 | 970 | Contributions are welcome, provided that you can agree to the terms outlined 971 | in [CONTRIBUTING.md](https://github.com/ansible-community/ansible-nomad/blob/master/CONTRIBUTING.md) 972 | -------------------------------------------------------------------------------- /defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # File: main.yml - Main default variables for nomad 3 | 4 | # assert supported os/versions 5 | os_supported_matrix: 6 | AlmaLinux: 7 | min_version: "" 8 | Archlinux: 9 | min_version: "" 10 | Flatcar: 11 | min_version: "" 12 | # RHEL-based 13 | RedHat: 14 | min_version: "7" 15 | CentOS: 16 | min_version: "7" 17 | Fedora: 18 | min_version: "" 19 | Amazon: 20 | min_version: "" 21 | # Debian based 22 | OracleLinux: 23 | min_version: "8" 24 | Debian: 25 | min_version: "10" 26 | Ubuntu: 27 | min_version: "20.04" 28 | VMware Photon OS: 29 | min_version: "4" 30 | 31 | ## Core 32 | nomad_debug: false 33 | 34 | ## Asserts 35 | nomad_skip_ensure_all_hosts: "{{ lookup('env', 'NOMAD_SKIP_ENSURE_ALL_HOSTS') | default('false', true) }}" 36 | 37 | ## Config Purge 38 | nomad_allow_purge_config: "{{ lookup('env', 'NOMAD_ALLOW_PURGE_CONFIG') | default('false', true) }}" 39 | 40 | ### Package 41 | nomad_version: "{{ lookup('env', 'NOMAD_VERSION') | default('1.1.1', true) }}" 42 | nomad_architecture_map: 43 | amd64: amd64 44 | x86_64: amd64 45 | armv7l: arm 46 | aarch64: arm64 47 | 32-bit: "386" 48 | 64-bit: amd64 49 | nomad_architecture: "{{ nomad_architecture_map[ansible_architecture] }}" 50 | nomad_pkg: nomad_{{ nomad_version }}_linux_{{ nomad_architecture }}.zip 51 | nomad_zip_url: https://releases.hashicorp.com/nomad/{{ nomad_version }}/nomad_{{ nomad_version }}_linux_{{ nomad_architecture }}.zip 52 | nomad_checksum_file_url: https://releases.hashicorp.com/nomad/{{ nomad_version }}/nomad_{{ nomad_version }}_SHA256SUMS 53 | nomad_podman_enable: false 54 | nomad_podman_version: "{{ lookup('env', 'NOMAD_PODMAN_VERSION') | default('0.3.0', true) }}" 55 | nomad_podman_pkg: nomad-driver-podman_{{ nomad_podman_version }}_linux_{{ nomad_architecture }}.zip 56 | nomad_podman_url: https://releases.hashicorp.com/nomad-driver-podman/{{ nomad_podman_version }} 57 | nomad_podman_zip_url: "{{ nomad_podman_url }}/{{ nomad_podman_pkg }}" 58 | nomad_podman_checksum_file_url: "{{ nomad_podman_url }}/nomad-driver-podman_{{ nomad_podman_version }}_SHA256SUMS" 59 | 60 | ### Paths 61 | nomad_bin_dir: /usr/local/bin 62 | nomad_config_dir: /etc/nomad.d 63 | nomad_data_dir: /var/nomad 64 | nomad_plugin_dir: "{{ nomad_data_dir }}/plugins" 65 | nomad_lockfile: /var/lock/subsys/nomad 66 | nomad_run_dir: /var/run/nomad 67 | 68 | ### Initialization and startup script templates 69 | nomad_systemd_template: nomad_systemd.service.j2 70 | nomad_systemd_unit_path: /lib/systemd/system 71 | 72 | ### System user and group 73 | nomad_manage_user: true 74 | nomad_user: root 75 | nomad_manage_group: false 76 | nomad_group: bin 77 | 78 | ### Nomad settings 79 | nomad_datacenter: dc1 80 | nomad_region: global 81 | nomad_log_level: INFO 82 | nomad_syslog_enable: true 83 | nomad_iface: "{{ lookup('env', 'NOMAD_IFACE') | default(ansible_default_ipv4.interface, true) }}" 84 | nomad_node_name: "{{ inventory_hostname_short }}" 85 | nomad_node_role: "{{ lookup('env', 'NOMAD_NODE_ROLE') | default('client', true) }}" 86 | nomad_leave_on_terminate: true 87 | nomad_leave_on_interrupt: false 88 | nomad_disable_update_check: false 89 | 90 | #### Server settings 91 | nomad_retry_max: 0 92 | nomad_retry_join: false 93 | nomad_retry_interval: 30s 94 | nomad_rejoin_after_leave: false 95 | nomad_enabled_schedulers: 96 | - service 97 | - batch 98 | - system 99 | nomad_num_schedulers: "{{ ansible_processor_vcpus }}" 100 | nomad_node_gc_threshold: 24h 101 | nomad_job_gc_threshold: 4h 102 | nomad_eval_gc_threshold: 1h 103 | nomad_deployment_gc_threshold: 1h 104 | nomad_encrypt_enable: "{{ lookup('env', 'NOMAD_ENCRYPT_ENABLE') | default('false', true) }}" 105 | nomad_raft_protocol: 2 106 | nomad_raft_multiplier: 1 107 | 108 | #### Client settings 109 | nomad_node_class: "" 110 | nomad_no_host_uuid: false 111 | nomad_max_kill_timeout: 30s 112 | nomad_network_speed: 0 113 | nomad_cpu_total_compute: 0 114 | nomad_gc_interval: 1m 115 | nomad_gc_max_allocs: 50 116 | nomad_gc_disk_usage_threshold: 80 117 | nomad_gc_inode_usage_threshold: 70 118 | nomad_gc_parallel_destroys: 2 119 | nomad_reserved: 120 | cpu: "{{ nomad_reserved_cpu | default('0', true) }}" 121 | memory: "{{ nomad_reserved_memory | default('0', true) }}" 122 | disk: "{{ nomad_reserved_disk | default('0', true) }}" 123 | ports: "{{ nomad_reserved_ports | default('22', true) }}" 124 | nomad_host_volumes: [] 125 | nomad_host_networks: [] 126 | nomad_options: {} 127 | nomad_meta: {} 128 | nomad_bootstrap_expect: "{{ nomad_servers | count or 3 }}" 129 | nomad_chroot_env: false 130 | nomad_plugins: {} 131 | nomad_template_config: {} 132 | ### Addresses 133 | nomad_bind_address: "{{ hostvars[inventory_hostname]['ansible_' + nomad_iface]['ipv4']['address'] }}" 134 | nomad_advertise_address: "{{ hostvars[inventory_hostname]['ansible_' + nomad_iface]['ipv4']['address'] }}" 135 | 136 | ### Ports 137 | nomad_ports: 138 | http: "{{ nomad_ports_http | default('4646', true) }}" 139 | rpc: "{{ nomad_ports_rpc | default('4647', true) }}" 140 | serf: "{{ nomad_ports_serf | default('4648', true) }}" 141 | 142 | ### Servers 143 | nomad_group_name: nomad_instances 144 | nomad_servers: "{% if nomad_use_consul == false %}{% set _nomad_servers = [] %}{% for host in groups[nomad_group_name] %}{% set _nomad_node_role = hostvars[host]['nomad_node_role'] 145 | | default('client', true) %}{% if (_nomad_node_role == 'server' or _nomad_node_role == 'both') %}{% if _nomad_servers.append(host) %}{% endif %}{% endif %}{% endfor 146 | %}{{ _nomad_servers }}{% else %}[]{% endif %}" 147 | nomad_gather_server_facts: false 148 | 149 | ### Consul 150 | nomad_use_consul: false 151 | nomad_consul_address: localhost:8500 152 | nomad_consul_ssl: false 153 | nomad_consul_ca_file: "" 154 | nomad_consul_grpc_ca_file: "{{ nomad_consul_ca_file }}" 155 | nomad_consul_cert_file: "" 156 | nomad_consul_key_file: "" 157 | nomad_consul_token: "" 158 | nomad_consul_servers_service_name: nomad-servers 159 | nomad_consul_clients_service_name: nomad-clients 160 | nomad_consul_tags: [] 161 | ### ACLs 162 | nomad_acl_enabled: "{{ lookup('env', 'NOMAD_ACL_ENABLED') | default('no', true) }}" 163 | nomad_acl_token_ttl: 30s 164 | nomad_acl_policy_ttl: 30s 165 | nomad_acl_replication_token: "" 166 | 167 | ### Vault 168 | nomad_vault_enabled: "{{ lookup('env', 'NOMAD_VAULT_ENABLED') | default('no', true) }}" 169 | nomad_vault_address: "{{ vault_address | default('0.0.0.0', true) }}" 170 | nomad_vault_allow_unauthenticated: true 171 | nomad_vault_create_from_role: "" 172 | nomad_vault_task_token_ttl: "" 173 | nomad_vault_ca_file: "" 174 | nomad_vault_ca_path: "" 175 | nomad_vault_cert_file: "" 176 | nomad_vault_key_file: "" 177 | nomad_vault_tls_server_name: "" 178 | nomad_vault_tls_skip_verify: false 179 | nomad_vault_token: "" 180 | nomad_vault_namespace: "" 181 | 182 | ### Docker 183 | nomad_docker_enable: "{{ lookup('env', 'NOMAD_DOCKER_ENABLE') | default('false', true) }}" 184 | nomad_docker_dmsetup: true 185 | 186 | ### TlS 187 | nomad_tls_enable: false 188 | nomad_tls_copy_keys: false 189 | nomad_tls_files_remote_src: false 190 | nomad_tls_dir: "{{ lookup('env', 'NOMAD_TLS_DIR') | default('/etc/nomad/ssl', true) }}" 191 | nomad_ca_file: "{{ lookup('env', 'NOMAD_CA_FILE') | default('ca.crt', true) }}" 192 | nomad_cert_file: "{{ lookup('env', 'NOMAD_CERT_FILE') | default('server.crt', true) }}" 193 | nomad_key_file: "{{ lookup('env', 'NOMAD_KEY_FILE') | default('server.key', true) }}" 194 | nomad_rpc_upgrade_mode: false 195 | nomad_verify_server_hostname: true 196 | nomad_verify_https_client: true 197 | 198 | ### Autopilot 199 | nomad_autopilot_cleanup_dead_servers: true 200 | nomad_autopilot_last_contact_threshold: 200ms 201 | nomad_autopilot_max_trailing_logs: 250 202 | nomad_autopilot_server_stabilization_time: 10s 203 | 204 | ### UI 205 | nomad_ui: false 206 | nomad_ui_label_text: "Staging Cluster" 207 | nomad_ui_label_background_color: "yellow" 208 | nomad_ui_label_text_color: "#000000" 209 | 210 | ### Artifact 211 | nomad_artifact: {} 212 | 213 | ### CNI 214 | nomad_cni_enable: false 215 | nomad_cni_dir: /opt/cni/bin 216 | nomad_cni_version: "{{ lookup('env', 'NOMAD_CNI_VERSION') | default('0.9.1', true) }}" 217 | nomad_cni_pkg: cni-plugins-linux-{{ nomad_architecture }}-v{{ nomad_cni_version }}.tgz 218 | nomad_cni_url: https://github.com/containernetworking/plugins/releases/download/v{{ nomad_cni_version }} 219 | nomad_cni_zip_url: "{{ nomad_cni_url }}/{{ nomad_cni_pkg }}" 220 | nomad_cni_checksum_file_url: "{{ nomad_cni_zip_url }}.sha256" 221 | -------------------------------------------------------------------------------- /examples/README_VAGRANT.md: -------------------------------------------------------------------------------- 1 | # Nomad with Ansible 2 | 3 | This project provides documentation and a collection of scripts to help you 4 | automate the deployment of [Nomad](https://nomadproject.io) using 5 | [Ansible](http://www.ansibleworks.com/). These are the instructions for 6 | deploying a development cluster on Vagrant and VirtualBox. 7 | 8 | The documentation and scripts are merely a starting point designed to both 9 | help familiarize you with the processes and quickly bootstrap an environment 10 | for development. You may wish to expand on them and customize 11 | them with additional features specific to your needs later. 12 | 13 | ## Vagrant Development Cluster 14 | 15 | In some situations deploying a small cluster on your local development 16 | machine can be handy. This document describes such a scenario using the 17 | following technologies: 18 | 19 | * [Nomad](https://nomadproject.io) 20 | * [VirtualBox](https://www.virtualbox.org/) 21 | * [Vagrant](http://www.vagrantup.com/) with Ansible provisioner and 22 | supporting plugin 23 | * [Ansible](http://www.ansibleworks.com/) 24 | 25 | Each of the virtual machines for this guide are configured with 26 | 1.5GB RAM, 2 CPU cores, and 2 network interfaces. The first interface uses 27 | NAT and has connection via the host to the outside world. The second 28 | interface is a private network and is used for nomad intra-cluster 29 | communication in addition to access from the host machine. 30 | 31 | The Vagrant configuration file (`Vagrantfile`) is responsible for 32 | configuring the virtual machines and a baseline OS installation. 33 | 34 | The Ansible playbooks then further refine OS configuration, perform nomad 35 | software download and installation, and the initialization of nodes 36 | into a ready to use cluster. 37 | 38 | ## Designed for Ansible Galaxy 39 | 40 | This role is designed to be installed via the `ansible-galaxy` command 41 | instead of being directly run from the git repository. 42 | 43 | You should install it like this: 44 | 45 | ``` 46 | ansible-galaxy install brianshumate.nomad 47 | ``` 48 | 49 | You'll want to make sure you have write access to `/etc/ansible/roles/` since 50 | that is where the role will be installed by default, or define your own 51 | Ansible role path by creating a `$HOME/.ansible.cfg` or even `./anisible.cfg` 52 | file with these contents: 53 | 54 | ``` 55 | [defaults] 56 | roles_path = PATH_TO_ROLES 57 | ``` 58 | 59 | Change `PATH_TO_ROLES` to a directory that you have write access to. 60 | 61 | ## Quick Start 62 | 63 | Begin from the top level directory of this project and use the following 64 | steps to get up and running: 65 | 66 | 1. Install [VirtualBox](https://www.virtualbox.org/wiki/Downloads), [Vagrant](http://downloads.vagrantup.com/), [vagrant-hosts](https://github.com/adrienthebo/vagrant-hosts), and [Ansible](http://www.ansibleworks.com/docs/intro_installation.html#latest-releases-via-pip). 67 | 2. Edit `/etc/hosts` or use the included `bin/preinstall` script to add 68 | the following entries to your development system's `/etc/hosts` file: 69 | * 10.1.42.70 nomad1.local nomad1 70 | * 10.1.42.71 nomad2.local nomad2 71 | * 10.1.42.72 nomad3.local nomad3 72 | 3. cd `$PATH_TO_ROLES/brianshumate.conusul/examples` 73 | 4. `vagrant up` 74 | 75 | By default, this project will install Debian 8 based cluster nodes. If you 76 | prefer, it can also install CentOS 7 based nodes by changing the command 77 | in step 4 to the following: 78 | 79 | ``` 80 | BOX_NAME="centos/7" vagrant up 81 | ``` 82 | 83 | ## Notes 84 | 85 | 1. This project functions with the following software versions: 86 | * Nomad version 0.12.1 87 | * Ansible version 2.8.0 88 | * VirtualBox version 5.2.30 89 | * Vagrant version 2.2.4 90 | * Vagrant Hosts version 2.8.3 91 | 2. This project uses Debian 8 (Jessie) by default, but you can choose other OS 92 | with the *BOX_NAME* environment variable 93 | 3. The `bin/preinstall` shell script performs the following actions for you: 94 | * Adds each node's host information to the host machine's `/etc/hosts` 95 | * Optionally installs the Vagrant hosts plugin 96 | 4. If you see an error like *vm: The '' provisioner could not be found.* 97 | make sure you have vagrant-hosts plugin installed 98 | 99 | ## References 100 | 101 | 1. https://www.nomadproject.io/ 102 | 2. http://www.ansible.com/ 103 | 3. http://www.vagrantup.com/ 104 | 4. https://www.virtualbox.org/ 105 | 5. https://github.com/adrienthebo/vagrant-hosts 106 | -------------------------------------------------------------------------------- /examples/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # Vagrantfile for bootstrapping a development nomad cluster with 5 | # VirtualBox provider and Ansible provisioner 6 | 7 | ANSIBLE_PLAYBOOK = ENV['ANSIBLE_PLAYBOOK'] || "site.yml" 8 | VAGRANTFILE_API_VERSION = "2" 9 | BOX_MEM = ENV['BOX_MEM'] || "1536" 10 | BOX_NAME = ENV['BOX_NAME'] || "debian/jessie64" 11 | CLUSTER_HOSTS = ENV['CLUSTER_HOSTS'] || "vagrant_hosts" 12 | 13 | Vagrant.require_version ">= 1.5.0" 14 | 15 | Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| 16 | 17 | # Configure 3 nomad nodes 18 | config.vm.define :nomad1 do |nomad1_config| 19 | nomad1_config.vm.box = BOX_NAME 20 | nomad1_config.vm.network :private_network, ip: "10.1.42.70" 21 | nomad1_config.vm.hostname = "nomad1.local" 22 | nomad1_config.ssh.forward_agent = true 23 | nomad1_config.vm.provider "virtualbox" do |v| 24 | v.name = "nomad-node1" 25 | v.customize ["modifyvm", :id, "--memory", BOX_MEM] 26 | v.customize ["modifyvm", :id, "--ioapic", "on"] 27 | v.customize ["modifyvm", :id, "--cpus", "2"] 28 | v.customize ["modifyvm", :id, "--natdnshostresolver1", "on"] 29 | v.customize ["modifyvm", :id, "--natdnsproxy1", "on"] 30 | end 31 | if ENV['NOMAD_DOCKER_ENABLE'] == "true" 32 | nomad1_config.vm.provision "docker" 33 | end 34 | nomad1_config.vm.provision :hosts do |provisioner| 35 | provisioner.add_host '10.1.42.70', ['nomad1.local'] 36 | provisioner.add_host '10.1.42.71', ['nomad2.local'] 37 | provisioner.add_host '10.1.42.72', ['nomad3.local'] 38 | end 39 | end 40 | config.vm.define :nomad2 do |nomad2_config| 41 | nomad2_config.vm.box = BOX_NAME 42 | nomad2_config.vm.network :private_network, ip: "10.1.42.71" 43 | nomad2_config.vm.hostname = "nomad2.local" 44 | nomad2_config.ssh.forward_agent = true 45 | nomad2_config.vm.provider "virtualbox" do |v| 46 | v.name = "nomad-node2" 47 | v.customize ["modifyvm", :id, "--memory", BOX_MEM] 48 | v.customize ["modifyvm", :id, "--ioapic", "on"] 49 | v.customize ["modifyvm", :id, "--cpus", "2"] 50 | v.customize ["modifyvm", :id, "--natdnshostresolver1", "on"] 51 | v.customize ["modifyvm", :id, "--natdnsproxy1", "on"] 52 | end 53 | if ENV['NOMAD_DOCKER_ENABLE'] == "true" 54 | nomad2_config.vm.provision "docker" 55 | end 56 | nomad2_config.vm.provision :hosts do |provisioner| 57 | provisioner.add_host '10.1.42.70', ['nomad1.local'] 58 | provisioner.add_host '10.1.42.71', ['nomad2.local'] 59 | provisioner.add_host '10.1.42.72', ['nomad3.local'] 60 | end 61 | end 62 | config.vm.define :nomad3 do |nomad3_config| 63 | nomad3_config.vm.box = BOX_NAME 64 | nomad3_config.vm.network :private_network, ip: "10.1.42.72" 65 | nomad3_config.vm.hostname = "nomad3.local" 66 | nomad3_config.ssh.forward_agent = true 67 | nomad3_config.vm.provider "virtualbox" do |v| 68 | v.name = "nomad-node3" 69 | v.customize ["modifyvm", :id, "--memory", BOX_MEM] 70 | v.customize ["modifyvm", :id, "--ioapic", "on"] 71 | v.customize ["modifyvm", :id, "--cpus", "2"] 72 | v.customize ["modifyvm", :id, "--natdnshostresolver1", "on"] 73 | v.customize ["modifyvm", :id, "--natdnsproxy1", "on"] 74 | end 75 | if ENV['NOMAD_DOCKER_ENABLE'] == "true" 76 | nomad3_config.vm.provision "docker" 77 | end 78 | nomad3_config.vm.provision :hosts do |provisioner| 79 | provisioner.add_host '10.1.42.70', ['nomad1.local'] 80 | provisioner.add_host '10.1.42.71', ['nomad2.local'] 81 | provisioner.add_host '10.1.42.72', ['nomad3.local'] 82 | end 83 | nomad3_config.vm.provision :ansible do |ansible| 84 | ansible.inventory_path = CLUSTER_HOSTS 85 | # Extra Ansible variables can be defined here 86 | ansible.extra_vars = { 87 | nomad_magic: 9001, 88 | } 89 | ansible.playbook = ANSIBLE_PLAYBOOK 90 | ansible.limit = "all" 91 | end 92 | end 93 | end 94 | -------------------------------------------------------------------------------- /examples/bin/preinstall: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # File: examples/bin/preinstall - convenience script to add nomad 4 | # VM node host information to /etc/hosts for Vagrant 5 | 6 | nomad1="10\.1\.42\.70" 7 | 8 | # Log stuff 9 | function logmsg { 10 | msgtype="$1" 11 | msgtxt="$2" 12 | case "${msgtype}" in 13 | greeting) 14 | printf "🌞 ${txtylw}${msgtxt}\n" 15 | ;; 16 | info) 17 | printf "💬 ${txtwht}${msgtxt}\n" 18 | ;; 19 | success) 20 | printf "✅ ${txtgrn}${msgtxt}\n" 21 | ;; 22 | notice) 23 | printf "🚩 ${txtylw}${msgtxt}\n" 24 | ;; 25 | alert) 26 | printf "⛔️ ${txtred}${msgtxt}\n" >&2 27 | ;; 28 | *) 29 | printf "⁉️ ${txtwht}${msgtxt}\n" >&2 30 | ;; 31 | esac 32 | } 33 | 34 | # Check if sudo will need password 35 | function sudocheck { 36 | logmsg info "Enter your user account password for sudo if prompted" 37 | sudo true 38 | } 39 | 40 | # Add hosts entries if necessary 41 | function add_hosts { 42 | if grep nomad1 /etc/hosts > /dev/null 2>&1; then 43 | logmsg success "nomad VM node information present in /etc/hosts" 44 | else 45 | sudocheck 46 | sudo sh -c "echo '# Nomad Vagrant virtual machine hosts 47 | 10.1.42.70 nomad1.local nomad1 48 | 10.1.42.71 nomad2.local nomad2 49 | 10.1.42.72 nomad3.local nomad3 50 | ' >> /etc/hosts" 51 | logmsg success "Nomad node host information added to /etc/hosts" 52 | fi 53 | } 54 | 55 | # Install Vagrant Hosts plugin if necessary 56 | function vagrant_hosts_plugin { 57 | if vagrant plugin list | grep vagrant-hosts > /dev/null 2>&1; then 58 | logmsg success "Vagrant Hosts plugin is installed" 59 | else 60 | vagrant plugin install vagrant-hosts > /dev/null 2>&1 61 | logmsg success "Installed Vagrant Hosts plugin" 62 | fi 63 | } 64 | 65 | add_hosts 66 | vagrant_hosts_plugin 67 | -------------------------------------------------------------------------------- /examples/site.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # File: site.yml - Example nomad site playbook 3 | 4 | - name: Installing Nomad 5 | hosts: nomad_instances 6 | become: true 7 | become_user: root 8 | 9 | tasks: 10 | - name: Nomad role 11 | ansible.builtin.include_role: 12 | name: ansible-community.nomad 13 | 14 | - name: Start nomad 15 | ansible.builtin.service: 16 | name: nomad 17 | state: started 18 | enabled: true 19 | -------------------------------------------------------------------------------- /examples/vagrant_hosts: -------------------------------------------------------------------------------- 1 | # File: vagrant_hosts 2 | # nomad cluster node hosts configuration for Vagrant 3 | # 4 | # NB: Replace the hosts below with your preferred node hostnames and continue 5 | # the 'nodeN' pattern for additional nodes past 'nomad3' 6 | # Do not modify the labels (text appearing between []), however 7 | 8 | [nomad_instances] 9 | nomad1.local nomad_node_role=server ansible_ssh_user=vagrant ansible_ssh_private_key_file=./.vagrant/machines/nomad1/virtualbox/private_key 10 | nomad2.local nomad_node_role=server ansible_ssh_user=vagrant ansible_ssh_private_key_file=./.vagrant/machines/nomad2/virtualbox/private_key 11 | nomad3.local nomad_node_role=client ansible_ssh_user=vagrant ansible_ssh_private_key_file=./.vagrant/machines/nomad3/virtualbox/private_key 12 | -------------------------------------------------------------------------------- /files/README.md: -------------------------------------------------------------------------------- 1 | # Files 2 | 3 | This directory is for files, including ephemeral ones which are 4 | downloaded and copied to the inventory hosts during plays. 5 | -------------------------------------------------------------------------------- /handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # File: main.yml handlers file for nomad 3 | 4 | - name: Restart nomad 5 | ansible.builtin.service: 6 | name: nomad 7 | state: restarted 8 | 9 | - name: Reload systemd daemon 10 | ansible.builtin.systemd: 11 | daemon_reload: true 12 | 13 | - name: Enable nomad at startup (systemd) 14 | ansible.builtin.systemd: 15 | name: nomad 16 | enabled: true 17 | -------------------------------------------------------------------------------- /meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: Brian Shumate 4 | description: Nomad cluster role 5 | company: Brian Shumate 6 | license: BSD 7 | min_ansible_version: "2.9" 8 | role_name: nomad 9 | namespace: community 10 | platforms: 11 | - name: ArchLinux 12 | versions: 13 | - all 14 | - name: EL 15 | versions: 16 | - "6" 17 | - "7" 18 | - name: Ubuntu 19 | versions: 20 | - vivid 21 | - xenial 22 | - name: Debian 23 | versions: 24 | - jessie 25 | - name: Windows 26 | versions: 27 | - 2012R2 28 | 29 | galaxy_tags: 30 | - clustering 31 | - monitoring 32 | - networking 33 | - scheduling 34 | - system 35 | 36 | dependencies: [] 37 | -------------------------------------------------------------------------------- /molecule/_shared/Dockerfile.j2: -------------------------------------------------------------------------------- 1 | # Molecule managed 2 | 3 | {% if item.registry is defined %} 4 | FROM {{ item.registry.url }}/{{ item.image }} 5 | {% else %} 6 | FROM {{ item.image }} 7 | {% endif %} 8 | 9 | {% if item.env is defined %} 10 | {% for var, value in item.env.items() %} 11 | {% if value %} 12 | ENV {{ var }} {{ value }} 13 | {% endif %} 14 | {% endfor %} 15 | {% endif %} 16 | 17 | RUN if [ $(command -v apt-get) ]; then \ 18 | if grep -q "Debian GNU/Linux 10" /etc/os-release; then \ 19 | apt-get update && apt-get install -y systemd python sudo bash ca-certificates iproute2 python-apt-common && apt-get clean; \ 20 | elif grep -q "Debian GNU/Linux 11" /etc/os-release; then \ 21 | apt-get update && apt-get install -y systemd python sudo bash ca-certificates iproute2 python-apt-common && apt-get clean; \ 22 | else \ 23 | apt-get update && apt-get install -y systemd python3 sudo bash ca-certificates iproute2 python3-apt && apt-get clean; \ 24 | fi \ 25 | elif [ $(command -v dnf) ]; then \ 26 | dnf makecache && dnf --assumeyes install systemd python3 sudo bash iproute && dnf clean all; \ 27 | elif [ $(command -v yum) ]; then \ 28 | yum makecache fast && yum install -y systemd python sudo yum-plugin-ovl bash iproute && sed -i 's/plugins=0/plugins=1/g' /etc/yum.conf && yum clean all; \ 29 | fi 30 | -------------------------------------------------------------------------------- /molecule/_shared/base.yml: -------------------------------------------------------------------------------- 1 | --- 2 | scenario: 3 | test_sequence: 4 | - dependency 5 | - syntax 6 | - create 7 | - prepare 8 | - converge 9 | - verify 10 | - destroy 11 | dependency: 12 | name: galaxy 13 | driver: 14 | name: docker 15 | provisioner: 16 | name: ansible 17 | config_options: 18 | defaults: 19 | deprecation_warnings: false 20 | callback_whitelist: timer,profile_tasks 21 | fact_caching: jsonfile 22 | fact_caching_connection: ./cache 23 | forks: 100 24 | connection: 25 | pipelining: true 26 | playbooks: 27 | prepare: ../_shared/prepare.yml 28 | converge: ../_shared/converge.yml 29 | inventory: 30 | group_vars: 31 | nomad_instances: 32 | nomad_node_role: both 33 | verifier: 34 | name: ansible 35 | -------------------------------------------------------------------------------- /molecule/_shared/converge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Converge 3 | hosts: all 4 | roles: 5 | - role: ansible-nomad 6 | 7 | vars: 8 | # TODO: Probably we need to install syslog-ng/rsyslog first 9 | nomad_syslog_enable: False 10 | -------------------------------------------------------------------------------- /molecule/_shared/prepare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Prepare 3 | hosts: localhost 4 | connection: local 5 | 6 | tasks: 7 | - name: Install OS packages 8 | package: 9 | name: unzip 10 | become: true 11 | 12 | - name: Install netaddr dependency on controlling host 13 | pip: 14 | name: netaddr 15 | become: false 16 | -------------------------------------------------------------------------------- /molecule/_shared/verify.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Verify 3 | hosts: all 4 | tasks: 5 | 6 | - name: Verify that /etc/nomad.d/base.hcl exists 7 | ansible.builtin.file: 8 | path: /etc/nomad.d/base.hcl 9 | state: file 10 | register: result_nomad_base_hcl 11 | 12 | - name: Validate that /etc/nomad.d/base.hcl exists 13 | ansible.builtin.assert: 14 | that: 15 | - result_nomad_base_hcl.state == 'file' 16 | - result_nomad_base_hcl.owner == 'root' 17 | - result_nomad_base_hcl.group == 'root' 18 | - result_nomad_base_hcl.mode == '0644' 19 | 20 | - name: Verify that /etc/nomad.d/client.hcl exists 21 | ansible.builtin.file: 22 | path: /etc/nomad.d/client.hcl 23 | state: file 24 | register: result_nomad_client_hcl 25 | 26 | - name: Validate that /etc/nomad.d/client.hcl exists 27 | ansible.builtin.assert: 28 | that: 29 | - result_nomad_client_hcl.state == 'file' 30 | - result_nomad_client_hcl.owner == 'root' 31 | - result_nomad_client_hcl.group == 'root' 32 | - result_nomad_client_hcl.mode == '0644' 33 | 34 | - name: Verify that /etc/nomad.d/server.hcl exists 35 | ansible.builtin.file: 36 | path: /etc/nomad.d/server.hcl 37 | state: file 38 | register: result_nomad_server_hcl 39 | 40 | - name: Validate that /etc/nomad.d/server.hcl exists 41 | ansible.builtin.assert: 42 | that: 43 | - result_nomad_server_hcl.state == 'file' 44 | - result_nomad_server_hcl.owner == 'root' 45 | - result_nomad_server_hcl.group == 'root' 46 | - result_nomad_server_hcl.mode == '0644' 47 | 48 | - name: Verify that serivce nomad is running 49 | ansible.builtin.service: 50 | name: nomad 51 | state: started 52 | register: result_nomad_service 53 | 54 | - name: Validate that serivce nomad is running 55 | ansible.builtin.assert: 56 | that: 57 | - result_nomad_service.state == 'started' 58 | - result_nomad_service.changed == false 59 | - result_nomad_service.name == 'nomad' 60 | -------------------------------------------------------------------------------- /molecule/almalinux-8/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platforms: 3 | - name: almalinux-8 4 | groups: 5 | - nomad_instances 6 | image: almalinux:8 7 | dockerfile: ../_shared/Dockerfile.j2 8 | capabilities: 9 | - SYS_ADMIN 10 | cgroupns_mode: host 11 | volumes: 12 | - /sys/fs/cgroup:/sys/fs/cgroup:rw 13 | privileged: true 14 | command: /usr/lib/systemd/systemd 15 | -------------------------------------------------------------------------------- /molecule/almalinux-8/verify.yml: -------------------------------------------------------------------------------- 1 | ../_shared/verify.yml -------------------------------------------------------------------------------- /molecule/almalinux-9/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platforms: 3 | - name: almalinux-9 4 | groups: 5 | - nomad_instances 6 | image: almalinux:9 7 | dockerfile: ../_shared/Dockerfile.j2 8 | capabilities: 9 | - SYS_ADMIN 10 | cgroupns_mode: host 11 | volumes: 12 | - /sys/fs/cgroup:/sys/fs/cgroup:rw 13 | privileged: true 14 | command: /usr/lib/systemd/systemd 15 | -------------------------------------------------------------------------------- /molecule/almalinux-9/verify.yml: -------------------------------------------------------------------------------- 1 | ../_shared/verify.yml -------------------------------------------------------------------------------- /molecule/centos-7/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platforms: 3 | - name: centos-7 4 | groups: 5 | - nomad_instances 6 | image: dokken/centos-7 7 | dockerfile: ../_shared/Dockerfile.j2 8 | capabilities: 9 | - SYS_ADMIN 10 | cgroupns_mode: host 11 | volumes: 12 | - /sys/fs/cgroup:/sys/fs/cgroup:rw 13 | privileged: true 14 | command: /usr/lib/systemd/systemd 15 | -------------------------------------------------------------------------------- /molecule/centos-7/verify.yml: -------------------------------------------------------------------------------- 1 | ../_shared/verify.yml -------------------------------------------------------------------------------- /molecule/centos-8-stream/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platforms: 3 | - name: centos-stream-8 4 | groups: 5 | - nomad_instances 6 | image: dokken/centos-stream-8 7 | dockerfile: ../_shared/Dockerfile.j2 8 | capabilities: 9 | - SYS_ADMIN 10 | cgroupns_mode: host 11 | volumes: 12 | - /sys/fs/cgroup:/sys/fs/cgroup:rw 13 | privileged: true 14 | command: /usr/lib/systemd/systemd 15 | -------------------------------------------------------------------------------- /molecule/centos-8-stream/verify.yml: -------------------------------------------------------------------------------- 1 | ../_shared/verify.yml -------------------------------------------------------------------------------- /molecule/centos-9-stream/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platforms: 3 | - name: centos-stream-9 4 | groups: 5 | - nomad_instances 6 | image: dokken/centos-stream-9 7 | dockerfile: ../_shared/Dockerfile.j2 8 | capabilities: 9 | - SYS_ADMIN 10 | cgroupns_mode: host 11 | volumes: 12 | - /sys/fs/cgroup:/sys/fs/cgroup:rw 13 | privileged: true 14 | command: /usr/lib/systemd/systemd 15 | -------------------------------------------------------------------------------- /molecule/centos-9-stream/verify.yml: -------------------------------------------------------------------------------- 1 | ../_shared/verify.yml -------------------------------------------------------------------------------- /molecule/debian-10/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platforms: 3 | - name: debian-10 4 | groups: 5 | - nomad_instances 6 | image: dokken/debian-10 7 | dockerfile: ../_shared/Dockerfile.j2 8 | capabilities: 9 | - SYS_ADMIN 10 | cgroupns_mode: host 11 | volumes: 12 | - /sys/fs/cgroup:/sys/fs/cgroup:rw 13 | privileged: true 14 | command: /bin/systemd 15 | -------------------------------------------------------------------------------- /molecule/debian-10/verify.yml: -------------------------------------------------------------------------------- 1 | ../_shared/verify.yml -------------------------------------------------------------------------------- /molecule/debian-11/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platforms: 3 | - name: debian-11 4 | groups: 5 | - nomad_instances 6 | image: dokken/debian-11 7 | dockerfile: ../_shared/Dockerfile.j2 8 | capabilities: 9 | - SYS_ADMIN 10 | cgroupns_mode: host 11 | volumes: 12 | - /sys/fs/cgroup:/sys/fs/cgroup:rw 13 | privileged: true 14 | command: /bin/systemd 15 | -------------------------------------------------------------------------------- /molecule/debian-11/verify.yml: -------------------------------------------------------------------------------- 1 | ../_shared/verify.yml -------------------------------------------------------------------------------- /molecule/debian-12/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platforms: 3 | - name: debian-12 4 | groups: 5 | - nomad_instances 6 | image: dokken/debian-12 7 | dockerfile: ../_shared/Dockerfile.j2 8 | capabilities: 9 | - SYS_ADMIN 10 | cgroupns_mode: host 11 | volumes: 12 | - /sys/fs/cgroup:/sys/fs/cgroup:rw 13 | privileged: true 14 | command: /bin/systemd 15 | -------------------------------------------------------------------------------- /molecule/debian-12/verify.yml: -------------------------------------------------------------------------------- 1 | ../_shared/verify.yml -------------------------------------------------------------------------------- /molecule/default/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ansible-community/ansible-nomad/a0f567ef40ed8f7fefd37061054417e54bc822d2/molecule/default/.gitkeep -------------------------------------------------------------------------------- /molecule/fedora-38/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platforms: 3 | - name: fedora-38 4 | groups: 5 | - nomad_instances 6 | image: dokken/fedora-38 7 | dockerfile: ../_shared/Dockerfile.j2 8 | capabilities: 9 | - SYS_ADMIN 10 | cgroupns_mode: host 11 | volumes: 12 | - /sys/fs/cgroup:/sys/fs/cgroup:rw 13 | privileged: true 14 | command: /usr/lib/systemd/systemd 15 | -------------------------------------------------------------------------------- /molecule/fedora-38/verify.yml: -------------------------------------------------------------------------------- 1 | ../_shared/verify.yml -------------------------------------------------------------------------------- /molecule/fedora-39/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platforms: 3 | - name: fedora-39 4 | groups: 5 | - nomad_instances 6 | image: dokken/fedora-39 7 | dockerfile: ../_shared/Dockerfile.j2 8 | capabilities: 9 | - SYS_ADMIN 10 | cgroupns_mode: host 11 | volumes: 12 | - /sys/fs/cgroup:/sys/fs/cgroup:rw 13 | privileged: true 14 | command: /usr/lib/systemd/systemd 15 | -------------------------------------------------------------------------------- /molecule/fedora-39/verify.yml: -------------------------------------------------------------------------------- 1 | ../_shared/verify.yml -------------------------------------------------------------------------------- /molecule/oraclelinux-7/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platforms: 3 | - name: oraclelinux-7 4 | groups: 5 | - nomad_instances 6 | image: dokken/oraclelinux-7 7 | dockerfile: ../_shared/Dockerfile.j2 8 | capabilities: 9 | - SYS_ADMIN 10 | cgroupns_mode: host 11 | volumes: 12 | - /sys/fs/cgroup:/sys/fs/cgroup:rw 13 | privileged: true 14 | command: /usr/lib/systemd/systemd 15 | -------------------------------------------------------------------------------- /molecule/oraclelinux-7/verify.yml: -------------------------------------------------------------------------------- 1 | ../_shared/verify.yml -------------------------------------------------------------------------------- /molecule/oraclelinux-8/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platforms: 3 | - name: oraclelinux-8 4 | groups: 5 | - nomad_instances 6 | image: dokken/oraclelinux-8 7 | dockerfile: ../_shared/Dockerfile.j2 8 | capabilities: 9 | - SYS_ADMIN 10 | cgroupns_mode: host 11 | volumes: 12 | - /sys/fs/cgroup:/sys/fs/cgroup:rw 13 | privileged: true 14 | command: /usr/lib/systemd/systemd 15 | -------------------------------------------------------------------------------- /molecule/oraclelinux-8/verify.yml: -------------------------------------------------------------------------------- 1 | ../_shared/verify.yml -------------------------------------------------------------------------------- /molecule/oraclelinux-9/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platforms: 3 | - name: oraclelinux-9 4 | groups: 5 | - nomad_instances 6 | image: dokken/oraclelinux-9 7 | dockerfile: ../_shared/Dockerfile.j2 8 | capabilities: 9 | - SYS_ADMIN 10 | cgroupns_mode: host 11 | volumes: 12 | - /sys/fs/cgroup:/sys/fs/cgroup:rw 13 | privileged: true 14 | command: /usr/lib/systemd/systemd 15 | -------------------------------------------------------------------------------- /molecule/oraclelinux-9/verify.yml: -------------------------------------------------------------------------------- 1 | ../_shared/verify.yml -------------------------------------------------------------------------------- /molecule/ubuntu-20.04/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platforms: 3 | - name: ubuntu-22.04 4 | groups: 5 | - nomad_instances 6 | image: dokken/ubuntu-22.04 7 | dockerfile: ../_shared/Dockerfile.j2 8 | capabilities: 9 | - SYS_ADMIN 10 | cgroupns_mode: host 11 | volumes: 12 | - /sys/fs/cgroup:/sys/fs/cgroup:rw 13 | privileged: true 14 | command: /bin/systemd 15 | -------------------------------------------------------------------------------- /molecule/ubuntu-20.04/verify.yml: -------------------------------------------------------------------------------- 1 | ../_shared/verify.yml -------------------------------------------------------------------------------- /molecule/ubuntu-22.04/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platforms: 3 | - name: ubuntu-22.04 4 | groups: 5 | - nomad_instances 6 | image: dokken/ubuntu-22.04 7 | dockerfile: ../_shared/Dockerfile.j2 8 | capabilities: 9 | - SYS_ADMIN 10 | cgroupns_mode: host 11 | volumes: 12 | - /sys/fs/cgroup:/sys/fs/cgroup:rw 13 | privileged: true 14 | command: /bin/systemd 15 | -------------------------------------------------------------------------------- /molecule/ubuntu-22.04/verify.yml: -------------------------------------------------------------------------------- 1 | ../_shared/verify.yml -------------------------------------------------------------------------------- /molecule/ubuntu-23.04/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | platforms: 3 | - name: ubuntu-23.04 4 | groups: 5 | - nomad_instances 6 | image: dokken/ubuntu-23.04 7 | dockerfile: ../_shared/Dockerfile.j2 8 | capabilities: 9 | - SYS_ADMIN 10 | cgroupns_mode: host 11 | volumes: 12 | - /sys/fs/cgroup:/sys/fs/cgroup:rw 13 | privileged: true 14 | command: /bin/systemd 15 | -------------------------------------------------------------------------------- /molecule/ubuntu-23.04/verify.yml: -------------------------------------------------------------------------------- 1 | ../_shared/verify.yml -------------------------------------------------------------------------------- /requirements.yml: -------------------------------------------------------------------------------- 1 | --- 2 | collections: 3 | - name: ansible.utils 4 | version: 2.9.0 5 | - name: ansible.posix 6 | version: 1.4.0 7 | -------------------------------------------------------------------------------- /tasks/asserts.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # File: asserts.yml - Asserts for this playbook 3 | 4 | - name: Ping hosts 5 | ansible.builtin.ping: 6 | 7 | - name: Ensure all hosts are reachable 8 | run_once: true 9 | ansible.builtin.assert: 10 | that: 11 | - ansible_play_hosts == ansible_play_hosts_all 12 | when: not nomad_skip_ensure_all_hosts | bool 13 | 14 | - name: Os_supported_matrix | check distribution 15 | ansible.builtin.assert: 16 | quiet: true 17 | fail_msg: "{{ ansible_distribution }} is not supported for this role" 18 | that: 19 | - ansible_distribution in os_supported_matrix 20 | 21 | - name: Get os version to compare with 22 | ansible.builtin.set_fact: 23 | version_to_compare: "{{ item.value.min_version }}" 24 | loop: "{{ lookup('dict', os_supported_matrix) }}" 25 | no_log: true 26 | when: 27 | - ansible_distribution in item.key 28 | 29 | - name: Os_supported_matrix | check distribution version 30 | ansible.builtin.assert: 31 | quiet: true 32 | fail_msg: "{{ ansible_distribution_version }} is not supported for this role" 33 | that: 34 | - ansible_distribution_version is version(version_to_compare, '>=') 35 | when: 36 | - version_to_compare is defined 37 | - version_to_compare | length > 0 38 | 39 | - name: Check nomad_group_name is included in groups 40 | ansible.builtin.fail: 41 | msg: nomad_group_name must be included in groups. 42 | when: nomad_group_name not in groups 43 | -------------------------------------------------------------------------------- /tasks/cni.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # File: cni_plugin.yml - package installation tasks for Nomad CNI Plugin 3 | 4 | - name: Create cni directory 5 | ansible.builtin.file: 6 | dest: "{{ nomad_cni_dir }}" 7 | state: directory 8 | owner: "{{ nomad_user }}" 9 | group: "{{ nomad_group }}" 10 | mode: "0750" 11 | 12 | - name: Check CNI package checksum file 13 | ansible.builtin.stat: 14 | path: "{{ role_path }}/files/nomad_cni_{{ nomad_cni_version }}_SHA256SUMS" 15 | become: false 16 | run_once: true 17 | tags: installation 18 | register: nomad_cni_checksum 19 | delegate_to: 127.0.0.1 20 | 21 | - name: Get Nomad CNI package checksum file 22 | ansible.builtin.get_url: 23 | url: "{{ nomad_cni_checksum_file_url }}" 24 | dest: "{{ role_path }}/files/nomad_cni_{{ nomad_cni_version }}_SHA256SUMS" 25 | mode: "0640" 26 | become: false 27 | run_once: true 28 | tags: installation 29 | when: not nomad_cni_checksum.stat.exists 30 | delegate_to: 127.0.0.1 31 | 32 | - name: Get Nomad CNI package checksum # noqa no-changed-when 33 | ansible.builtin.shell: | 34 | set -o pipefail 35 | grep "{{ nomad_cni_pkg }}" "{{ role_path }}/files/nomad_cni_{{ nomad_cni_version }}_SHA256SUMS" | awk '{print $1}' 36 | args: 37 | executable: /bin/bash 38 | become: false 39 | register: nomad_cni_sha256 40 | tags: installation 41 | delegate_to: 127.0.0.1 42 | 43 | - name: Check Nomad CNI package file 44 | ansible.builtin.stat: 45 | path: "{{ role_path }}/files/{{ nomad_cni_pkg }}" 46 | become: false 47 | register: nomad_cni_package 48 | delegate_to: 127.0.0.1 49 | 50 | - name: Download Nomad CNI 51 | ansible.builtin.get_url: 52 | url: "{{ nomad_cni_zip_url }}" 53 | dest: "{{ role_path }}/files/{{ nomad_cni_pkg }}" 54 | mode: "0640" 55 | checksum: sha256:{{ nomad_cni_sha256.stdout }} 56 | timeout: "42" 57 | become: false 58 | tags: installation 59 | delegate_to: 127.0.0.1 60 | when: not nomad_cni_package.stat.exists 61 | 62 | - name: Create Temporary Directory for Extraction 63 | ansible.builtin.tempfile: 64 | state: directory 65 | prefix: ansible-nomad. 66 | become: false 67 | register: install_temp 68 | tags: installation 69 | delegate_to: 127.0.0.1 70 | 71 | - name: Unarchive Nomad CNI 72 | ansible.builtin.unarchive: 73 | src: "{{ role_path }}/files/{{ nomad_cni_pkg }}" 74 | dest: "{{ install_temp.path }}/" 75 | creates: "{{ install_temp.path }}/bridge" 76 | become: false 77 | tags: installation 78 | delegate_to: 127.0.0.1 79 | 80 | - name: Install Nomad CNI 81 | ansible.builtin.copy: 82 | src: "{{ item }}" 83 | dest: "{{ nomad_cni_dir }}" 84 | owner: "{{ nomad_user }}" 85 | group: "{{ nomad_group }}" 86 | mode: "0755" 87 | with_fileglob: 88 | - "{{ install_temp.path }}/*" 89 | tags: installation 90 | notify: Restart nomad 91 | 92 | - name: Cleanup 93 | ansible.builtin.file: 94 | path: "{{ install_temp.path }}" 95 | state: absent 96 | become: false 97 | tags: installation 98 | delegate_to: 127.0.0.1 99 | -------------------------------------------------------------------------------- /tasks/docker.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Add Nomad user to docker group 3 | ansible.builtin.user: 4 | name: "{{ nomad_user }}" 5 | groups: docker 6 | append: true 7 | when: 8 | - nomad_user != 'root' 9 | -------------------------------------------------------------------------------- /tasks/get_gossip_key.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Configure encryption 3 | no_log: true 4 | block: 5 | - name: Check for gossip encryption key 6 | when: 7 | - nomad_encrypt is not defined 8 | block: 9 | - name: Check for server configuration 10 | ansible.builtin.stat: 11 | path: "{{ nomad_config_dir }}/server.hcl" 12 | register: server_config_state 13 | ignore_errors: true 14 | 15 | - name: Get encryption key 16 | when: 17 | - server_config_state.stat.exists | bool 18 | no_log: true 19 | block: 20 | - name: Check for gossip encryption key on previously boostrapped server # noqa no-changed-when 21 | ansible.builtin.shell: grep encrypt {{ nomad_config_dir }}/server.hcl | awk '{print $3}' | sed -e 's/^"//' -e 's/"$//' 22 | register: nomad_raw_key_result 23 | ignore_errors: true 24 | 25 | - name: Save gossip encryption key from existing configuration 26 | ansible.builtin.set_fact: 27 | nomad_encrypt: "{{ nomad_raw_key_result.stdout }}" 28 | 29 | # Key provided by extra vars or the above block 30 | - name: Write gossip encryption key locally for use with new servers 31 | ansible.builtin.copy: 32 | content: "{{ nomad_encrypt }}" 33 | dest: /tmp/nomad_raw.key 34 | mode: "0600" 35 | become: false 36 | vars: 37 | ansible_become: false 38 | no_log: true 39 | delegate_to: localhost 40 | changed_when: false 41 | when: nomad_encrypt is defined 42 | 43 | - name: Generate new key if none was found 44 | when: 45 | - lookup('first_found', dict(files=['/tmp/nomad_raw.key'], skip=true)) | ternary(false, true) 46 | - not server_config_state.stat.exists | bool 47 | no_log: true 48 | run_once: true 49 | block: 50 | - name: Generate gossip encryption key # noqa no-changed-when 51 | ansible.builtin.command: 52 | cmd: nomad operator keygen 53 | when: nomad_version is version('1.4.0', '<') 54 | register: nomad_keygen 55 | - name: Generate gossip encryption key # noqa no-changed-when 56 | ansible.builtin.command: 57 | cmd: nomad operator gossip keyring generate 58 | when: nomad_version is version('1.4.0', '>=') 59 | register: nomad_keygen 60 | 61 | - name: Write key locally to share with other nodes 62 | ansible.builtin.copy: 63 | content: "{{ nomad_keygen.stdout }}" 64 | dest: /tmp/nomad_raw.key 65 | mode: "0600" 66 | become: false 67 | vars: 68 | ansible_become: false 69 | delegate_to: localhost 70 | 71 | - name: Read gossip encryption key for servers that require it 72 | ansible.builtin.set_fact: 73 | nomad_encrypt: "{{ lookup('file', '/tmp/nomad_raw.key') }}" 74 | no_log: true 75 | when: 76 | - nomad_encrypt is not defined 77 | 78 | - name: Delete gossip encryption key file 79 | ansible.builtin.file: 80 | path: /tmp/nomad_raw.key 81 | state: absent 82 | run_once: true 83 | delegate_to: localhost 84 | changed_when: false 85 | become: false 86 | -------------------------------------------------------------------------------- /tasks/host_volume.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create volume {{ item['name'] }} 3 | ansible.builtin.file: 4 | path: "{{ item['path'] }}" 5 | owner: "{{ item['owner'] | default(nomad_user) }}" 6 | group: "{{ item['group'] | default(nomad_group) }}" 7 | state: "{{ item['state'] | default('directory') }}" 8 | mode: "{{ item['mode'] | default('0755') }}" 9 | with_items: "{{ nomad_host_volumes }}" 10 | -------------------------------------------------------------------------------- /tasks/install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # File: install.yml - package installation tasks for Nomad # noqa 106 3 | 4 | - name: Install OS packages 5 | ansible.builtin.package: 6 | name: "{{ item }}" 7 | state: present 8 | with_items: "{{ nomad_os_packages }}" 9 | tags: installation 10 | when: not ansible_facts['os_family'] == "VMware Photon OS" 11 | 12 | - name: Install OS packages # noqa no-changed-when 13 | ansible.builtin.command: tdnf install {{ item }} 14 | with_items: "{{ nomad_os_packages }}" 15 | tags: installation 16 | when: ansible_facts['os_family'] == "VMware Photon OS" 17 | 18 | - name: Check Nomad package checksum file 19 | ansible.builtin.stat: 20 | path: "{{ role_path }}/files/nomad_{{ nomad_version }}_SHA256SUMS" 21 | become: false 22 | run_once: true 23 | tags: installation 24 | register: nomad_checksum 25 | delegate_to: 127.0.0.1 26 | 27 | - name: Get Nomad package checksum file 28 | ansible.builtin.get_url: 29 | url: "{{ nomad_checksum_file_url }}" 30 | dest: "{{ role_path }}/files/nomad_{{ nomad_version }}_SHA256SUMS" 31 | mode: "0644" 32 | become: false 33 | run_once: true 34 | tags: installation 35 | when: not nomad_checksum.stat.exists 36 | delegate_to: 127.0.0.1 37 | 38 | - name: Get Nomad package checksum # noqa no-changed-when 39 | ansible.builtin.shell: | 40 | set -o pipefail 41 | grep "{{ nomad_pkg }}" "{{ role_path }}/files/nomad_{{ nomad_version }}_SHA256SUMS" | awk '{print $1}' 42 | args: 43 | executable: /bin/bash 44 | become: false 45 | register: nomad_sha256 46 | tags: installation 47 | delegate_to: 127.0.0.1 48 | 49 | - name: Check Nomad package file 50 | ansible.builtin.stat: 51 | path: "{{ role_path }}/files/{{ nomad_pkg }}" 52 | become: false 53 | register: nomad_package 54 | delegate_to: 127.0.0.1 55 | 56 | - name: Download Nomad 57 | ansible.builtin.get_url: 58 | url: "{{ nomad_zip_url }}" 59 | dest: "{{ role_path }}/files/{{ nomad_pkg }}" 60 | checksum: sha256:{{ nomad_sha256.stdout }} 61 | timeout: "42" 62 | mode: "0644" 63 | become: false 64 | tags: installation 65 | delegate_to: 127.0.0.1 66 | when: not nomad_package.stat.exists 67 | 68 | - name: Create Temporary Directory for Extraction 69 | ansible.builtin.tempfile: 70 | state: directory 71 | prefix: ansible-nomad. 72 | become: false 73 | register: install_temp 74 | tags: installation 75 | delegate_to: 127.0.0.1 76 | 77 | - name: Unarchive Nomad 78 | ansible.builtin.unarchive: 79 | src: "{{ role_path }}/files/{{ nomad_pkg }}" 80 | dest: "{{ install_temp.path }}/" 81 | creates: "{{ install_temp.path }}/nomad" 82 | become: false 83 | tags: installation 84 | delegate_to: 127.0.0.1 85 | 86 | - name: Install Nomad 87 | ansible.builtin.copy: 88 | src: "{{ install_temp.path }}/nomad" 89 | dest: "{{ nomad_bin_dir }}" 90 | owner: "{{ nomad_user }}" 91 | group: "{{ nomad_group }}" 92 | mode: "0755" 93 | tags: installation 94 | 95 | - name: Cleanup 96 | ansible.builtin.file: 97 | path: "{{ install_temp.path }}" 98 | state: absent 99 | become: false 100 | tags: installation 101 | delegate_to: 127.0.0.1 102 | -------------------------------------------------------------------------------- /tasks/install_podman.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # File: install.yml - package installation tasks for Nomad Podman 3 | 4 | - name: Check Nomad Podman package checksum file 5 | ansible.builtin.stat: 6 | path: "{{ role_path }}/files/nomad_podman_{{ nomad_podman_version }}_SHA256SUMS" 7 | become: false 8 | run_once: true 9 | tags: installation 10 | register: nomad_podman_checksum 11 | delegate_to: 127.0.0.1 12 | 13 | - name: Get Nomad Podman package checksum file 14 | ansible.builtin.get_url: 15 | url: "{{ nomad_podman_checksum_file_url }}" 16 | dest: "{{ role_path }}/files/nomad_podman_{{ nomad_podman_version }}_SHA256SUMS" 17 | mode: "0640" 18 | become: false 19 | run_once: true 20 | tags: installation 21 | when: not nomad_podman_checksum.stat.exists 22 | delegate_to: 127.0.0.1 23 | 24 | - name: Get Nomad Podman package checksum # noqa no-changed-when 25 | ansible.builtin.shell: | 26 | set -o pipefail 27 | grep "{{ nomad_podman_pkg }}" "{{ role_path }}/files/nomad_podman_{{ nomad_podman_version }}_SHA256SUMS" | awk '{print $1}' 28 | args: 29 | executable: /bin/bash 30 | become: false 31 | register: nomad_podman_sha256 32 | tags: installation 33 | delegate_to: 127.0.0.1 34 | 35 | - name: Check Nomad Podman package file 36 | ansible.builtin.stat: 37 | path: "{{ role_path }}/files/{{ nomad_podman_pkg }}" 38 | become: false 39 | register: nomad_podman_package 40 | delegate_to: 127.0.0.1 41 | 42 | - name: Download Nomad Podman 43 | ansible.builtin.get_url: 44 | url: "{{ nomad_podman_zip_url }}" 45 | dest: "{{ role_path }}/files/{{ nomad_podman_pkg }}" 46 | mode: "0640" 47 | checksum: sha256:{{ nomad_podman_sha256.stdout }} 48 | timeout: "42" 49 | become: false 50 | tags: installation 51 | delegate_to: 127.0.0.1 52 | when: not nomad_podman_package.stat.exists 53 | 54 | - name: Create Temporary Directory for Extraction 55 | ansible.builtin.tempfile: 56 | state: directory 57 | prefix: ansible-nomad. 58 | become: false 59 | register: install_temp 60 | tags: installation 61 | delegate_to: 127.0.0.1 62 | 63 | - name: Unarchive Nomad Podman 64 | ansible.builtin.unarchive: 65 | src: "{{ role_path }}/files/{{ nomad_podman_pkg }}" 66 | dest: "{{ install_temp.path }}/" 67 | creates: "{{ install_temp.path }}/nomad-driver-podman" 68 | become: false 69 | tags: installation 70 | delegate_to: 127.0.0.1 71 | 72 | - name: Install Nomad Podman 73 | ansible.builtin.copy: 74 | src: "{{ install_temp.path }}/nomad-driver-podman" 75 | dest: "{{ nomad_plugin_dir }}" 76 | owner: "{{ nomad_user }}" 77 | group: "{{ nomad_group }}" 78 | mode: "0755" 79 | tags: installation 80 | notify: Restart nomad 81 | 82 | - name: Cleanup 83 | ansible.builtin.file: 84 | path: "{{ install_temp.path }}" 85 | state: absent 86 | become: false 87 | tags: installation 88 | delegate_to: 127.0.0.1 89 | -------------------------------------------------------------------------------- /tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # File: main.yml - Main tasks for Nomad 3 | 4 | - name: Include checks/asserts 5 | ansible.builtin.include_tasks: 6 | file: asserts.yml 7 | 8 | - name: Include OS variables 9 | ansible.builtin.include_vars: "{{ ansible_os_family }}.yml" 10 | 11 | # Gathers facts (bind address) from servers not currently targeted. 12 | # 'delegate_facts' is currently rather buggy in Ansible so this might not 13 | # always work. Hence 'nomad_gather_server_facts' defaults to 'no'. 14 | - name: Gather facts from other servers # noqa ignore-errors 15 | ansible.builtin.setup: 16 | delegate_to: "{{ item }}" 17 | delegate_facts: true 18 | with_items: "{{ nomad_servers | difference(ansible_play_hosts) }}" 19 | ignore_errors: true 20 | when: nomad_gather_server_facts | bool 21 | 22 | - name: Expose bind_address, advertise_address and node_role as facts 23 | ansible.builtin.set_fact: 24 | nomad_bind_address: "{{ nomad_bind_address }}" 25 | nomad_advertise_address: "{{ nomad_advertise_address }}" 26 | nomad_node_role: "{{ nomad_node_role }}" 27 | 28 | - name: Include user and group settings 29 | ansible.builtin.include_tasks: 30 | file: user_group.yml 31 | 32 | - name: Include Docker tasks 33 | ansible.builtin.include_tasks: 34 | file: docker.yml 35 | when: nomad_docker_enable | bool 36 | 37 | - name: Install OS packages 38 | ansible.builtin.include_tasks: 39 | file: install.yml 40 | 41 | - name: Disable SELinux (RHEL) 42 | ansible.builtin.include_tasks: 43 | file: selinux.yml 44 | when: ansible_os_family == "RedHat" 45 | 46 | - name: Create directories 47 | ansible.builtin.file: 48 | dest: "{{ item }}" 49 | state: directory 50 | owner: "{{ nomad_user }}" 51 | group: "{{ nomad_group }}" 52 | mode: "0755" 53 | with_items: 54 | - "{{ nomad_data_dir }}" 55 | - "{{ nomad_plugin_dir }}" 56 | 57 | - name: Install Podman plugin 58 | ansible.builtin.include_tasks: 59 | file: install_podman.yml 60 | when: nomad_podman_enable | bool 61 | 62 | - name: Install CNI plugin 63 | ansible.builtin.include_tasks: 64 | file: cni.yml 65 | when: nomad_cni_enable | bool 66 | 67 | - name: Create config directory 68 | ansible.builtin.file: 69 | dest: "{{ nomad_config_dir }}" 70 | state: directory 71 | owner: root 72 | group: root 73 | mode: "0755" 74 | 75 | - name: Base configuration 76 | ansible.builtin.template: 77 | src: base.hcl.j2 78 | dest: "{{ nomad_config_dir }}/base.hcl" 79 | owner: root 80 | group: root 81 | mode: "0644" 82 | notify: 83 | - Restart nomad 84 | 85 | - name: Get Gossip Key 86 | ansible.builtin.include_tasks: 87 | file: get_gossip_key.yml 88 | when: 89 | - _nomad_node_server | bool 90 | - nomad_encrypt_enable | bool 91 | - nomad_encrypt is not defined 92 | 93 | - name: Create TLS configuration 94 | ansible.builtin.include_tasks: 95 | file: tls.yml 96 | when: nomad_tls_enable | bool 97 | 98 | - name: Server configuration 99 | ansible.builtin.template: 100 | src: server.hcl.j2 101 | dest: "{{ nomad_config_dir }}/server.hcl" 102 | owner: root 103 | group: root 104 | mode: "0644" 105 | when: 106 | - _nomad_node_server | bool 107 | notify: 108 | - Restart nomad 109 | 110 | - name: Remove Server configuration 111 | ansible.builtin.file: 112 | dest: "{{ nomad_config_dir }}/server.hcl" 113 | state: absent 114 | when: 115 | - nomad_allow_purge_config | bool 116 | - not _nomad_node_server | bool 117 | notify: 118 | - Restart nomad 119 | 120 | - name: Client configuration 121 | ansible.builtin.template: 122 | src: client.hcl.j2 123 | dest: "{{ nomad_config_dir }}/client.hcl" 124 | owner: root 125 | group: root 126 | mode: "0644" 127 | when: 128 | - _nomad_node_client | bool 129 | notify: 130 | - Restart nomad 131 | 132 | - name: Remove Client configuration 133 | ansible.builtin.file: 134 | dest: "{{ nomad_config_dir }}/client.hcl" 135 | state: absent 136 | when: 137 | - nomad_allow_purge_config | bool 138 | - not _nomad_node_client | bool 139 | notify: 140 | - Restart nomad 141 | 142 | - name: Custom configuration 143 | ansible.builtin.template: 144 | src: custom.json.j2 145 | dest: "{{ nomad_config_dir }}/custom.json" 146 | owner: root 147 | group: root 148 | mode: "0644" 149 | when: 150 | - nomad_config_custom is defined 151 | notify: 152 | - Restart nomad 153 | 154 | - name: Remove custome configuration 155 | ansible.builtin.file: 156 | dest: "{{ nomad_config_dir }}/custom.json" 157 | state: absent 158 | when: 159 | - nomad_allow_purge_config | bool 160 | - nomad_config_custom is not defined 161 | notify: 162 | - Restart nomad 163 | 164 | - name: Host volume 165 | ansible.builtin.include_tasks: 166 | file: host_volume.yml 167 | 168 | - name: SYSV init script 169 | ansible.builtin.template: 170 | src: nomad_sysvinit.j2 171 | dest: /etc/init.d/nomad 172 | owner: root 173 | group: root 174 | mode: "0755" 175 | when: not ansible_service_mgr == "systemd" and not ansible_os_family == "Debian" 176 | 177 | - name: Debian init script 178 | ansible.builtin.template: 179 | src: nomad_debian.init.j2 180 | dest: /etc/init.d/nomad 181 | owner: root 182 | group: root 183 | mode: "0755" 184 | when: not ansible_service_mgr == "systemd" and ansible_os_family == "Debian" 185 | 186 | - name: Extract systemd version 187 | ansible.builtin.shell: 188 | cmd: set -o pipefail && systemctl --version systemd | head -n 1 | cut -d ' ' -f2 189 | args: 190 | executable: /bin/bash 191 | changed_when: false 192 | check_mode: false 193 | register: systemd_version 194 | when: 195 | - ansible_service_mgr == "systemd" 196 | - not ansible_os_family == "FreeBSD" 197 | - not ansible_os_family == "Solaris" 198 | tags: skip_ansible_lint 199 | 200 | - name: Create systemd unit 201 | ansible.builtin.template: 202 | src: "{{ nomad_systemd_template }}" 203 | dest: "{{ nomad_systemd_unit_path }}/nomad.service" 204 | owner: root 205 | group: root 206 | mode: "0644" 207 | notify: 208 | - Reload systemd daemon 209 | - Enable nomad at startup (systemd) 210 | when: ansible_service_mgr == "systemd" 211 | 212 | - name: Start Nomad 213 | ansible.builtin.service: 214 | name: nomad 215 | enabled: true 216 | state: started 217 | when: not ansible_service_mgr == "systemd" 218 | -------------------------------------------------------------------------------- /tasks/selinux.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # File: selinux.yml - SELinux tasks for Nomad 3 | 4 | # SELinux disrupts LXC (shrug) 5 | 6 | - name: Disable SELinux for Docker Driver 7 | ansible.posix.selinux: 8 | state: disabled 9 | when: nomad_docker_enable == "true" 10 | -------------------------------------------------------------------------------- /tasks/tls.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # File: tls.yml - TLS tasks for Nomad 3 | 4 | - name: Create CA, certificate and private key 5 | when: nomad_tls_copy_keys | bool 6 | block: 7 | - name: Create SSL directory 8 | ansible.builtin.file: 9 | dest: "{{ nomad_tls_dir }}" 10 | state: directory 11 | owner: "{{ nomad_user }}" 12 | group: "{{ nomad_group }}" 13 | mode: "0755" 14 | 15 | - name: Copy CA certificate 16 | ansible.builtin.copy: 17 | remote_src: "{{ nomad_tls_files_remote_src }}" 18 | src: "{{ nomad_ca_file }}" 19 | dest: "{{ nomad_tls_dir }}/{{ nomad_ca_file | basename }}" 20 | owner: "{{ nomad_user }}" 21 | group: "{{ nomad_group }}" 22 | mode: "0644" 23 | notify: Restart nomad 24 | 25 | - name: Copy certificate 26 | ansible.builtin.copy: 27 | remote_src: "{{ nomad_tls_files_remote_src }}" 28 | src: "{{ nomad_cert_file }}" 29 | dest: "{{ nomad_tls_dir }}/{{ nomad_cert_file | basename }}" 30 | owner: "{{ nomad_user }}" 31 | group: "{{ nomad_group }}" 32 | mode: "0644" 33 | notify: Restart nomad 34 | 35 | - name: Copy key 36 | ansible.builtin.copy: 37 | remote_src: "{{ nomad_tls_files_remote_src }}" 38 | src: "{{ nomad_key_file }}" 39 | dest: "{{ nomad_tls_dir }}/{{ nomad_key_file | basename }}" 40 | owner: "{{ nomad_user }}" 41 | group: "{{ nomad_group }}" 42 | mode: "0600" 43 | notify: Restart nomad 44 | -------------------------------------------------------------------------------- /tasks/user_group.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # File: user_group.yml - User and group settings 3 | 4 | - name: Add Nomad group 5 | ansible.builtin.group: 6 | name: "{{ nomad_group }}" 7 | state: present 8 | when: 9 | - nomad_manage_group | bool 10 | 11 | - name: Add Nomad user 12 | ansible.builtin.user: 13 | name: "{{ nomad_user }}" 14 | comment: Nomad user 15 | group: "{{ nomad_group }}" 16 | system: true 17 | when: 18 | - nomad_manage_user | bool 19 | -------------------------------------------------------------------------------- /templates/base.hcl.j2: -------------------------------------------------------------------------------- 1 | name = "{{ nomad_node_name }}" 2 | region = "{{ nomad_region }}" 3 | datacenter = "{{ nomad_datacenter }}" 4 | 5 | enable_debug = {{ nomad_debug | bool | lower }} 6 | disable_update_check = {{ nomad_disable_update_check | bool | lower }} 7 | 8 | 9 | bind_addr = "{{ nomad_bind_address }}" 10 | advertise { 11 | http = "{{ nomad_advertise_address }}:{{ nomad_ports.http }}" 12 | rpc = "{{ nomad_advertise_address }}:{{ nomad_ports.rpc }}" 13 | serf = "{{ nomad_advertise_address }}:{{ nomad_ports.serf }}" 14 | } 15 | ports { 16 | http = {{ nomad_ports['http'] }} 17 | rpc = {{ nomad_ports['rpc'] }} 18 | serf = {{ nomad_ports['serf'] }} 19 | } 20 | 21 | {% if nomad_use_consul | bool == True %} 22 | consul { 23 | # The address to the Consul agent. 24 | address = "{{ nomad_consul_address }}" 25 | ssl = {{ nomad_consul_ssl | bool | lower }} 26 | ca_file = "{{ nomad_consul_ca_file }}" 27 | grpc_ca_file = "{{ nomad_consul_grpc_ca_file }}" 28 | cert_file = "{{ nomad_consul_cert_file }}" 29 | key_file = "{{ nomad_consul_key_file }}" 30 | token = "{{ nomad_consul_token }}" 31 | # The service name to register the server and client with Consul. 32 | server_service_name = "{{ nomad_consul_servers_service_name }}" 33 | client_service_name = "{{ nomad_consul_clients_service_name }}" 34 | tags = {{ nomad_consul_tags | to_json }} 35 | 36 | # Enables automatically registering the services. 37 | auto_advertise = true 38 | 39 | # Enabling the server and client to bootstrap using Consul. 40 | server_auto_join = true 41 | client_auto_join = true 42 | } 43 | {% endif %} 44 | 45 | data_dir = "{{ nomad_data_dir }}" 46 | 47 | log_level = "{{ nomad_log_level }}" 48 | enable_syslog = {{ nomad_syslog_enable | bool | lower }} 49 | 50 | leave_on_terminate = {{ nomad_leave_on_terminate | bool | lower }} 51 | leave_on_interrupt = {{ nomad_leave_on_interrupt | bool | lower }} 52 | 53 | {% if nomad_tls_enable | bool %} 54 | tls { 55 | http = true 56 | rpc = true 57 | ca_file = "{{ nomad_tls_dir }}/{{ nomad_ca_file | basename }}" 58 | cert_file = "{{ nomad_tls_dir }}/{{ nomad_cert_file | basename }}" 59 | key_file = "{{ nomad_tls_dir }}/{{ nomad_key_file | basename }}" 60 | rpc_upgrade_mode = {{ nomad_rpc_upgrade_mode | bool | lower }} 61 | verify_server_hostname = "{{ nomad_verify_server_hostname | bool | lower }}" 62 | verify_https_client = "{{ nomad_verify_https_client | bool | lower }}" 63 | } 64 | {% endif %} 65 | 66 | acl { 67 | enabled = {{ nomad_acl_enabled | bool | lower }} 68 | token_ttl = "{{ nomad_acl_token_ttl }}" 69 | policy_ttl = "{{ nomad_acl_policy_ttl }}" 70 | replication_token = "{{ nomad_acl_replication_token }}" 71 | } 72 | 73 | vault { 74 | enabled = {{ nomad_vault_enabled | bool | lower }} 75 | address = "{{ nomad_vault_address }}" 76 | allow_unauthenticated = {{ nomad_vault_allow_unauthenticated | bool | lower }} 77 | create_from_role = "{{ nomad_vault_create_from_role }}" 78 | task_token_ttl = "{{ nomad_vault_task_token_ttl }}" 79 | ca_file = "{{ nomad_vault_ca_file }}" 80 | ca_path = "{{ nomad_vault_ca_path }}" 81 | cert_file = "{{ nomad_vault_cert_file }}" 82 | key_file = "{{ nomad_vault_key_file }}" 83 | tls_server_name = "{{ nomad_vault_tls_server_name }}" 84 | tls_skip_verify = {{ nomad_vault_tls_skip_verify | bool | lower }} 85 | {%if nomad_node_role != 'client' %} 86 | token = "{{ nomad_vault_token }}" 87 | {% endif %} 88 | namespace = "{{ nomad_vault_namespace }}" 89 | } 90 | 91 | {% if nomad_telemetry | default(False) | bool == True %} 92 | telemetry { 93 | disable_hostname = "{{ nomad_telemetry_disable_hostname | default(false) | bool | lower }}" 94 | collection_interval = "{{ nomad_telemetry_collection_interval | default("1s") }}" 95 | use_node_name = "{{ nomad_telemetry_use_node_name | default(false) | bool | lower }}" 96 | publish_allocation_metrics = "{{ nomad_telemetry_publish_allocation_metrics | default(false) | bool | lower }}" 97 | publish_node_metrics = "{{ nomad_telemetry_publish_node_metrics | default(false) | bool | lower }}" 98 | {% if nomad_version is version('1.0.0', '<') %} 99 | backwards_compatible_metrics = "{{ nomad_telemetry_backwards_compatible_metrics | default(false) | bool | lower }}" 100 | disable_tagged_metrics = "{{ nomad_telemetry_disable_tagged_metrics | default(false) | bool | lower }}" 101 | {% endif %} 102 | filter_default = "{{ nomad_telemetry_filter_default | default("true") }}" 103 | prefix_filter = {{ nomad_telemetry_prefix_filter | default([]) }} 104 | disable_dispatched_job_summary_metrics = "{{ nomad_telemetry_disable_dispatched_job_summary_metrics | default(false) | bool | lower }}" 105 | statsite_address = "{{ nomad_telemetry_statsite_address | default("") }}" 106 | statsd_address = "{{ nomad_telemetry_statsd_address | default("") }}" 107 | datadog_address = "{{ nomad_telemetry_datadog_address | default("") }}" 108 | datadog_tags = {{ nomad_telemetry_datadog_tags | default([]) }} 109 | prometheus_metrics = "{{ nomad_telemetry_prometheus_metrics | default(false) | bool | lower }}" 110 | circonus_api_token = "{{ nomad_telemetry_circonus_api_token | default("") }}" 111 | circonus_api_app = "{{ nomad_telemetry_circonus_api_app | default("nomad") }}" 112 | circonus_api_url = "{{ nomad_telemetry_circonus_api_url | default("https://api.circonus.com/v2") }}" 113 | circonus_submission_interval = "{{ circonus_submission_interval | default("10s") }}" 114 | circonus_submission_url = "{{ circonus_submission_url | default("") }}" 115 | circonus_check_id = "{{ circonus_check_id | default("") }}" 116 | circonus_check_force_metric_activation = "{{ circonus_check_force_metric_activation | default(false) | bool | lower }}" 117 | circonus_check_instance_id = "{{ circonus_check_instance_id | default("") }}" 118 | circonus_check_search_tag = "{{ circonus_check_search_tag | default("") }}" 119 | circonus_check_display_name = "{{ circonus_check_display_name | default("") }}" 120 | circonus_check_tags = "{{ circonus_check_tags | default("") }}" 121 | circonus_broker_id = "{{ circonus_broker_id | default("") }}" 122 | circonus_broker_select_tag = "{{ circonus_broker_select_tag | default("") }}" 123 | } 124 | {% endif %} 125 | 126 | {% if nomad_autopilot | default(False) | bool == True %} 127 | autopilot { 128 | cleanup_dead_servers = {{ nomad_autopilot_cleanup_dead_servers | bool | lower }} 129 | last_contact_threshold = "{{ nomad_autopilot_last_contact_threshold }}" 130 | max_trailing_logs = {{ nomad_autopilot_max_trailing_logs }} 131 | server_stabilization_time = "{{ nomad_autopilot_server_stabilization_time }}" 132 | } 133 | {% endif %} 134 | 135 | {% if nomad_ui | default(False) | bool == True %} 136 | ui { 137 | enabled = true 138 | label { 139 | text = "{{ nomad_ui_label_text }}" 140 | background_color = "{{ nomad_ui_label_background_color }}" 141 | text_color = "{{ nomad_ui_label_text_color }}" 142 | } 143 | } 144 | {% endif %} 145 | -------------------------------------------------------------------------------- /templates/client.hcl.j2: -------------------------------------------------------------------------------- 1 | client { 2 | enabled = {{ _nomad_node_client | bool | lower }} 3 | 4 | node_class = "{{ nomad_node_class }}" 5 | no_host_uuid = {{ nomad_no_host_uuid | bool | lower }} 6 | 7 | {% if nomad_node_pool is defined and nomad_node_pool|length %} 8 | node_pool = "{{ nomad_node_pool }}" 9 | {% endif %} 10 | 11 | {% if nomad_use_consul == False %} 12 | servers = [ 13 | {%- set comma = joiner(",") -%} 14 | {%- for server in nomad_servers -%} 15 | {{ comma() }}"{{ hostvars[server]['nomad_advertise_address'] | ansible.utils.ipwrap }}:{{ nomad_ports.rpc }}" 16 | {%- endfor -%} ] 17 | {% endif %} 18 | 19 | max_kill_timeout = "{{ nomad_max_kill_timeout }}" 20 | 21 | {% if nomad_network_interface is defined -%} 22 | network_interface = "{{ nomad_network_interface }}" 23 | {% endif -%} 24 | network_speed = {{ nomad_network_speed }} 25 | cpu_total_compute = {{ nomad_cpu_total_compute }} 26 | 27 | gc_interval = "{{ nomad_gc_interval }}" 28 | gc_max_allocs = "{{ nomad_gc_max_allocs }}" 29 | gc_disk_usage_threshold = {{ nomad_gc_disk_usage_threshold }} 30 | gc_inode_usage_threshold = {{ nomad_gc_inode_usage_threshold }} 31 | gc_parallel_destroys = {{ nomad_gc_parallel_destroys }} 32 | 33 | reserved { 34 | cpu = {{ nomad_reserved['cpu'] }} 35 | memory = {{ nomad_reserved['memory'] }} 36 | disk = {{ nomad_reserved['disk'] }} 37 | } 38 | 39 | {% for nomad_host_volume in nomad_host_volumes %} 40 | host_volume "{{ nomad_host_volume['name'] }}" { 41 | path = "{{ nomad_host_volume['path'] }}" 42 | read_only = {{ nomad_host_volume['read_only'] | bool | lower }} 43 | } 44 | {% endfor %} 45 | 46 | {% for nomad_host_network in nomad_host_networks %} 47 | host_network "{{ nomad_host_network['name'] }}" { 48 | {% if 'cidr' in nomad_host_network %} 49 | cidr = "{{ nomad_host_network['cidr'] | default}}" 50 | {% else %} 51 | interface = "{{ nomad_host_network['interface'] }}" 52 | {% endif %} 53 | reserved_ports = "{{ nomad_host_network['reserved_ports'] }}" 54 | } 55 | {% endfor %} 56 | 57 | {% if nomad_chroot_env != False -%} 58 | chroot_env = { 59 | {% for key, value in nomad_chroot_env.items() %} 60 | "{{ key }}" = "{{ value }}" 61 | {% endfor -%} 62 | } 63 | {% endif %} 64 | 65 | {% if nomad_options -%} 66 | options = { 67 | {% for key, value in nomad_options.items() %} 68 | "{{ key }}" = "{{ value }}" 69 | {% endfor -%} 70 | } 71 | {% endif %} 72 | 73 | {% if nomad_meta -%} 74 | meta = { 75 | {% for key, value in nomad_meta.items() %} 76 | "{{ key }}" = "{{ value }}" 77 | {% endfor -%} 78 | } 79 | {% endif %} 80 | 81 | {% macro template_config(config, count=1, width=4) %} 82 | {% set next_count = count + 1 %} 83 | {% for key, value in config.items() %} 84 | {% if value is mapping %} 85 | {{ key | indent(count*width, first=True) }} { 86 | {{ template_config(value, count=next_count, width=width) }} 87 | {{ '}' | indent(count*width, first=True) }} 88 | {% else %} 89 | {{ key | indent(count*width, first=True) }} = {% if value is string %}"{{ value }}"{% else %}{{ value | string | lower }}{% endif %} 90 | 91 | {% endif %} 92 | {% endfor %} 93 | {% endmacro %} 94 | 95 | {% if nomad_template_config|length >0 %} 96 | template { 97 | {{ template_config(nomad_template_config) | replace('\'', '\"') }} 98 | } 99 | {% endif %} 100 | 101 | {% if nomad_artifact -%} 102 | artifact { 103 | {% for key, value in nomad_artifact.items() %} 104 | "{{ key }}" = "{{ value }}" 105 | {% endfor -%} 106 | } 107 | {% endif %} 108 | } 109 | 110 | {% macro plugin_config(config, count=1, width=4) %} 111 | {% set next_count = count + 1 %} 112 | {% for key, value in config.items() %} 113 | {% if value is mapping %} 114 | {{ key | indent(count*width, first=True) }} { 115 | {{ plugin_config(value, count=next_count, width=width) }} 116 | {{ '}' | indent(count*width, first=True) }} 117 | {% else %} 118 | {{ key | indent(count*width, first=True) }} = {% if value is string %}"{{ value }}"{% else %}{{ value | string | lower }}{% endif %} 119 | 120 | {% endif %} 121 | {% endfor %} 122 | {% endmacro %} 123 | 124 | {% for key, value in nomad_plugins.items() %} 125 | plugin "{{ key }}" { 126 | {{ plugin_config(value) | replace('\'', '\"') }} 127 | } 128 | {% endfor %} 129 | -------------------------------------------------------------------------------- /templates/custom.json.j2: -------------------------------------------------------------------------------- 1 | {# nomad_config_custom variables are free-style, passed through a hash -#} 2 | {% if nomad_config_custom -%} 3 | {{ nomad_config_custom | to_nice_json }} 4 | {% else %} 5 | {} 6 | {% endif %} 7 | -------------------------------------------------------------------------------- /templates/nomad_debian.init.j2: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | ### BEGIN INIT INFO 3 | # Provides: nomad 4 | # Required-Start: $local_fs $remote_fs 5 | # Required-Stop: $local_fs $remote_fs 6 | # Default-Start: 2 3 4 5 7 | # Default-Stop: 0 1 6 8 | # Short-Description: distributed scheduler 9 | # Description: distributed, highly available, datacenter-aware scheduler 10 | ### END INIT INFO 11 | # shellcheck disable=SC2015 12 | # shellcheck disable=SC1090 13 | # shellcheck disable=SC1091 14 | # shellcheck disable=SC2034 15 | 16 | PATH="{{ nomad_bin_dir }}:/usr/sbin:/usr/bin:/sbin:/bin" 17 | DESC="Nomad, a distributed, highly available, datacenter-aware scheduler" 18 | NAME="nomad" 19 | DAEMON="{{ nomad_bin_dir }}/${NAME}" 20 | PIDFILE="{{ nomad_run_dir }}/${NAME}.pid" 21 | DAEMON_ARGS="agent -config {{ nomad_config_dir }} 2>&1" 22 | USER="{{ nomad_user }}" 23 | SCRIPTNAME="/etc/init.d/${NAME}" 24 | 25 | [ -x "${DAEMON}" ] || exit 0 26 | 27 | [ -r /etc/default/"${NAME}" ] && . /etc/default/"${NAME}" 28 | 29 | [ -f /etc/default/rcS ] && . /etc/default/rcS 30 | 31 | . /lib/lsb/init-functions 32 | 33 | mkrundir() { 34 | [ ! -d "{{ nomad_run_dir }}" ] && mkdir -p "{{ nomad_run_dir }}" 35 | chown "${USER}" "{{ nomad_run_dir }}" 36 | } 37 | 38 | do_start() { 39 | mkrundir 40 | start-stop-daemon --start --quiet --pidfile "${PIDFILE}" --exec "${DAEMON}" \ 41 | --chuid "${USER}" --background --make-pidfile --test > /dev/null \ 42 | || return 1 43 | start-stop-daemon --start --quiet --pidfile "${PIDFILE}" --exec "${DAEMON}" \ 44 | --chuid "${USER}" --background --make-pidfile -- \ 45 | ${DAEMON_ARGS} \ 46 | || return 2 47 | 48 | for i in $(seq 1 30); do 49 | if ! start-stop-daemon --quiet --stop --test --pidfile "${PIDFILE}" \ 50 | --exec "${DAEMON}" --user "${USER}"; then 51 | sleep 1 52 | continue 53 | fi 54 | if "${DAEMON}" info >/dev/null; then 55 | return 0 56 | fi 57 | done 58 | return 2 59 | } 60 | 61 | do_stop() { 62 | start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 \ 63 | --pidfile "${PIDFILE}" \ 64 | --name "${NAME}" 65 | RETVAL="$?" 66 | [ "${RETVAL}" = 2 ] && return 2 67 | start-stop-daemon --stop --quiet --oknodo --retry=0/30/KILL/5 \ 68 | --exec "${DAEMON}" 69 | [ "$?" = 2 ] && return 2 70 | rm -f "${PIDFILE}" 71 | return "${RETVAL}" 72 | } 73 | 74 | do_reload() { 75 | start-stop-daemon --stop --signal 1 --quiet --pidfile "${PIDFILE}" \ 76 | --name "${NAME}" 77 | return 0 78 | } 79 | 80 | case "$1" in 81 | start) 82 | [ "${VERBOSE}" != no ] && log_daemon_msg "Starting ${DESC}" "${NAME}" 83 | do_start 84 | case "$?" in 85 | 0|1) [ "${VERBOSE}" != no ] && log_end_msg 0 ;; 86 | 2) [ "${VERBOSE}" != no ] && log_end_msg 1 ;; 87 | esac 88 | ;; 89 | stop) 90 | [ "${VERBOSE}" != no ] && log_daemon_msg "Stopping ${DESC}" "${NAME}" 91 | do_stop 92 | case "$?" in 93 | 0|1) [ "${VERBOSE}" != no ] && log_end_msg 0 ;; 94 | 2) [ "${VERBOSE}" != no ] && log_end_msg 1 ;; 95 | esac 96 | ;; 97 | restart|force-reload) 98 | log_daemon_msg "Restarting ${DESC}" "${NAME}" 99 | do_stop 100 | case "$?" in 101 | 0|1) 102 | do_start 103 | case "$?" in 104 | 0) log_end_msg 0 ;; 105 | 1) log_end_msg 1 ;; 106 | *) log_end_msg 1 ;; 107 | esac 108 | ;; 109 | *) 110 | # Stop failed 111 | log_end_msg 1 112 | ;; 113 | esac 114 | ;; 115 | *) 116 | echo "Usage: ${SCRIPTNAME} {start|stop|restart|force-reload}" >&2 117 | exit 3 118 | ;; 119 | esac 120 | 121 | : 122 | -------------------------------------------------------------------------------- /templates/nomad_systemd.service.j2: -------------------------------------------------------------------------------- 1 | ### BEGIN INIT INFO 2 | # Provides: nomad 3 | # Required-Start: $local_fs $remote_fs 4 | # Required-Stop: $local_fs $remote_fs 5 | # Default-Start: 2 3 4 5 6 | # Default-Stop: 0 1 6 7 | # Short-Description: distributed scheduler 8 | # Description: distributed, highly available, datacenter-aware scheduler 9 | ### END INIT INFO 10 | 11 | [Unit] 12 | Description=nomad agent 13 | Documentation=https://nomadproject.io/docs/ 14 | After=network-online.target 15 | Wants=network-online.target 16 | StartLimitBurst=3 17 | StartLimitIntervalSec=10 18 | 19 | [Service] 20 | User={{ nomad_user }} 21 | Group={{ nomad_group }} 22 | ExecStart={{ nomad_bin_dir }}/nomad agent -config={{ nomad_config_dir }} 23 | 24 | ExecReload=/bin/kill -HUP $MAINPID 25 | KillMode=process 26 | KillSignal=SIGINT 27 | LimitNOFILE=infinity 28 | LimitNPROC=infinity 29 | Restart=always 30 | RestartSec=120 31 | {% if systemd_version.stdout is version('226', '>=') %} 32 | TasksMax=infinity 33 | {% endif %} 34 | 35 | [Install] 36 | WantedBy=multi-user.target 37 | -------------------------------------------------------------------------------- /templates/nomad_sysvinit.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # chkconfig: 2345 95 95 4 | # description: distributed, highly available, datacenter-aware scheduler 5 | # processname: nomad 6 | # pidfile: {{ nomad_run_dir }}/pidfile 7 | # shellcheck disable=SC2015 8 | # shellcheck disable=SC1091 9 | 10 | . /etc/init.d/functions 11 | 12 | nomad="{{nomad_bin_dir}}/nomad" 13 | CONFIG_PATH="{{ nomad_config_dir }}" 14 | PID_FILE="{{ nomad_run_dir }}/nomad.pid" 15 | 16 | [ -e /etc/sysconfig/nomad ] && . /etc/sysconfig/nomad 17 | 18 | 19 | GOMAXPROCS=$(nproc) 20 | export GOMAXPROCS 21 | 22 | mkrundir() { 23 | [ ! -d "{{ nomad_run_dir }}" ] && mkdir -p "{{ nomad_run_dir }}" 24 | chown "{{ nomad_user }}" "{{ nomad_run_dir }}" 25 | } 26 | 27 | KILLPROC_OPT="-p ${PID_FILE}" 28 | mkpidfile() { 29 | mkrundir 30 | [ ! -f "$PID_FILE" ] && pidofproc "$nomad" > "$PID_FILE" 31 | if ! chown "{{ nomad_user }}" "{{ nomad_run_dir }}"; then 32 | rm "$PID_FILE" 33 | KILLPROC_OPT="" 34 | fi 35 | } 36 | 37 | start() { 38 | echo -n "Starting nomad: " 39 | mkrundir 40 | [ -f "$PID_FILE" ] && rm "$PID_FILE" 41 | daemon --user="{{ nomad_user }}" \ 42 | --pidfile="$PID_FILE" \ 43 | "$nomad" agent -config "${CONFIG_PATH}" \& 44 | retcode=$? 45 | touch "{{ nomad_lockfile }}" 46 | return $retcode 47 | } 48 | 49 | stop() { 50 | echo -n "Shutting down nomad: " 51 | if ("${nomad}" agent-info -address=http://{{ nomad_advertise_address }}:{{ nomad_ports.http }} 2>/dev/null | grep -q 'server = false' 2>/dev/null) ; then 52 | "$nomad" leave 53 | fi 54 | 55 | mkpidfile 56 | killproc $KILLPROC_OPT $nomad -SIGTERM 57 | 58 | retcode=$? 59 | rm -f "{{ nomad_lockfile }}" "$PID_FILE" 60 | return $retcode 61 | } 62 | 63 | case "$1" in 64 | start) 65 | start 66 | ;; 67 | stop) 68 | stop 69 | ;; 70 | status) 71 | "$nomad" status -address=http://{{ nomad_advertise_address }}:{{ nomad_ports.http }} 72 | ;; 73 | restart) 74 | stop 75 | sleep 3 76 | start 77 | ;; 78 | reload) 79 | mkpidfile 80 | killproc $KILLPROC_OPT $nomad -HUP 81 | ;; 82 | condrestart) 83 | [ -f "{{ nomad_lockfile }}" ] && restart || : 84 | ;; 85 | *) 86 | echo "Usage: nomad {start|stop|status|reload|restart}" 87 | exit 1 88 | ;; 89 | esac 90 | exit $? 91 | -------------------------------------------------------------------------------- /templates/server.hcl.j2: -------------------------------------------------------------------------------- 1 | server { 2 | enabled = {{ _nomad_node_server | bool | lower }} 3 | 4 | {% if _nomad_node_server | bool -%} 5 | bootstrap_expect = {{ nomad_bootstrap_expect }} 6 | {%- endif %} 7 | 8 | {% if nomad_authoritative_region is defined %} 9 | authoritative_region = "{{ nomad_authoritative_region }}" 10 | {% endif %} 11 | 12 | {% if nomad_use_consul == False %} 13 | {% if nomad_retry_join | bool -%} 14 | retry_join = [ 15 | {%- set comma = joiner(",") -%} 16 | {% for server in nomad_servers -%} 17 | {{ comma() }}"{{ hostvars[server]['nomad_advertise_address'] | ansible.utils.ipwrap }}" 18 | {%- endfor -%} ] 19 | retry_max = {{ nomad_retry_max }} 20 | retry_interval = "{{ nomad_retry_interval }}" 21 | {% else -%} 22 | start_join = [ 23 | {%- set comma = joiner(",") -%} 24 | {% for server in nomad_servers -%} 25 | {{ comma() }}"{{ hostvars[server]['nomad_advertise_address'] | ansible.utils.ipwrap }}" 26 | {%- endfor -%} ] 27 | {%- endif %} 28 | {% endif %} 29 | 30 | rejoin_after_leave = {{ nomad_rejoin_after_leave | bool | lower }} 31 | 32 | enabled_schedulers = [ 33 | {%- set comma = joiner(",") -%} 34 | {% for scheduler in nomad_enabled_schedulers -%} 35 | {{ comma() }}"{{ scheduler }}" 36 | {%- endfor -%} ] 37 | num_schedulers = {{ nomad_num_schedulers }} 38 | 39 | node_gc_threshold = "{{ nomad_node_gc_threshold }}" 40 | eval_gc_threshold = "{{ nomad_eval_gc_threshold }}" 41 | job_gc_threshold = "{{ nomad_job_gc_threshold }}" 42 | deployment_gc_threshold = "{{ nomad_deployment_gc_threshold }}" 43 | 44 | encrypt = "{{ nomad_encrypt | default('') }}" 45 | 46 | raft_multiplier = {{ nomad_raft_multiplier }} 47 | raft_protocol = {{ nomad_raft_protocol }} 48 | } 49 | -------------------------------------------------------------------------------- /tests/inventory: -------------------------------------------------------------------------------- 1 | localhost nomad_node_role=bootstrap 2 | -------------------------------------------------------------------------------- /tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | become: yes 5 | become_user: root 6 | roles: 7 | - ansible-nomad 8 | -------------------------------------------------------------------------------- /vars/Archlinux.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # File: Archlinux.yml - Archlinux variables for Nomad 3 | 4 | nomad_os_packages: 5 | - unzip 6 | 7 | nomad_syslog_enable: false 8 | -------------------------------------------------------------------------------- /vars/Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # File: vars/Debian.yml - Debian OS variables for Nomad 3 | 4 | nomad_os_packages: 5 | - curl 6 | - git 7 | - "{% if (ansible_distribution == 'Ubuntu' and ansible_distribution_version is version('22.10', '<')) or (ansible_distribution == 'Debian' and ansible_distribution_version 8 | is version('12', '<')) %}libcgroup1{% else %}libcgroup2{% endif %}" 9 | - unzip 10 | - "{% if (ansible_distribution == 'Ubuntu' and ansible_distribution_version is version('19', '<')) or (ansible_distribution == 'Debian' and ansible_distribution_version 11 | is version('11', '<')) %}cgroup-bin{% else %}cgroup-tools{% endif %}" 12 | -------------------------------------------------------------------------------- /vars/Flatcar.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # File: Flatcar.yml - Flatcar variables for Nomad 3 | 4 | nomad_os_packages: [] 5 | nomad_systemd_unit_path: /etc/systemd/system 6 | -------------------------------------------------------------------------------- /vars/RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # File: vars/RedHat.yml - Red Hat OS variables for Nomad 3 | 4 | nomad_os_packages: 5 | - "{% if (ansible_distribution == 'AlmaLinux' and ansible_distribution_version is version('9', '>=')) %}curl-minimal{% else %}curl{% endif %}" 6 | - git 7 | - "{% if (ansible_distribution == 'Fedora' and ansible_distribution_version is version('28', '<')) or (ansible_distribution == 'CentOS' and ansible_distribution_version 8 | is version('8', '<')) or (ansible_distribution == 'Amazon' and ansible_distribution_version is version('3', '<')) or (ansible_distribution == 'OracleLinux' and 9 | ansible_distribution_version is version('8', '<')) %}libselinux-python{% else %}python3-libselinux{% endif %}" 10 | - unzip 11 | -------------------------------------------------------------------------------- /vars/VMware Photon OS.yml: -------------------------------------------------------------------------------- 1 | --- 2 | nomad_os_packages: 3 | - unzip 4 | -------------------------------------------------------------------------------- /vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Pure internal helper variables 3 | 4 | _nomad_node_client: "{{ (nomad_node_role == 'client') or (nomad_node_role == 'both') }}" 5 | _nomad_node_server: "{{ (nomad_node_role == 'server') or (nomad_node_role == 'both') }}" 6 | -------------------------------------------------------------------------------- /version.txt: -------------------------------------------------------------------------------- 1 | v1.9.6 2 | --------------------------------------------------------------------------------