├── .ansible-lint ├── .editorconfig ├── .github ├── CODEOWNERS ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md ├── PULL_REQUEST_TEMPLATE.md └── tads-header.png ├── .gitignore ├── .travis.yml ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── Makefile ├── README.example.md ├── README.md ├── ansible ├── .yamllint ├── all.yml ├── deploy.yml ├── group_vars │ ├── .gitignore │ ├── all.yml │ ├── dev.yml │ ├── dev_overrides.sample.yml │ ├── localhost.yml │ ├── localhost_bindmounts.sample.yml │ ├── localhost_overrides.sample.yml │ ├── production.yml │ ├── production_encrypted.yml │ ├── test.yml │ ├── vagrant.yml │ └── vagrant_overrides.sample.yml ├── install-dependencies.yml ├── inventories │ ├── localhost │ └── production.sample-baremetal ├── library │ └── docker_info_facts ├── molecule │ └── default │ │ ├── Dockerfile.j2 │ │ ├── molecule.yml │ │ ├── playbook.yml │ │ └── verify.yml ├── provision-00-common.yml ├── provision-01-docker.yml ├── provision-02-docker-swarm.yml ├── provision-03-dev.yml ├── provision.yml ├── requirements.yml ├── roles │ └── docker-stack │ │ ├── .yamllint │ │ ├── defaults │ │ └── main.yml │ │ ├── molecule │ │ └── default │ │ │ ├── Dockerfile.j2 │ │ │ ├── molecule.yml │ │ │ ├── playbook.yml │ │ │ ├── prepare.yml │ │ │ ├── stacks │ │ │ ├── simple_stack │ │ │ │ └── simple_stack.yml.j2 │ │ │ ├── stack_with_resources │ │ │ │ ├── resources │ │ │ │ │ └── test.txt │ │ │ │ └── stack_with_resources.yml.j2 │ │ │ └── stack_with_secrets │ │ │ │ └── stack_with_secrets.yml.j2 │ │ │ └── verify.yml │ │ └── tasks │ │ └── main.yml ├── stacks │ ├── example_app │ │ ├── example_app.yml.j2 │ │ └── resources │ │ │ └── example.txt │ └── traefik │ │ └── traefik.yml.j2 └── vault_keys │ └── .gitignore ├── scripts ├── commands │ ├── ansible-playbook.sh │ ├── ansible-vault.sh │ ├── ansible.sh │ ├── install-dependencies.sh │ ├── terraform.sh │ └── vagrant.sh ├── includes │ ├── ansible.sh │ ├── common.sh │ ├── localhost_ansible.sh │ ├── remote_ansible.sh │ └── vagrant_ansible.sh └── tests │ ├── Dockerfile │ ├── entrypoint.sh │ ├── launcher.sh │ ├── shunit2.sh │ ├── tests.sh │ ├── utils.sh │ └── watch.sh ├── tads ├── terraform ├── environments │ └── production │ │ ├── .gitignore │ │ ├── main.tf │ │ └── outputs.tf └── modules │ └── aws_tads │ ├── data.tf │ ├── elb.tf │ ├── nodes.tf │ ├── outputs.tf │ ├── variables.tf │ └── vpc.tf └── vagrant ├── .gitignore ├── Vagrantfile └── vagrant.sample.yml /.ansible-lint: -------------------------------------------------------------------------------- 1 | skip_list: 2 | - 306 # because of geerlingguy.docker role 3 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | # EditorConfig is awesome: https://EditorConfig.org 2 | # For Visual Studio Code, please install this extension: https://github.com/editorconfig/editorconfig-vscode 3 | 4 | root = true 5 | 6 | [*] 7 | indent_style = space 8 | indent_size = 4 9 | end_of_line = lf 10 | charset = utf-8 11 | trim_trailing_whitespace = true 12 | insert_final_newline = true 13 | 14 | [Makefile] 15 | indent_style = tab 16 | 17 | [*.json] 18 | indent_size = 2 19 | 20 | [*.tf] 21 | indent_size = 2 22 | 23 | [*.{yml,yaml,yml.j2,yaml.j2}] 24 | indent_size = 2 25 | 26 | [*.js] 27 | indent_size = 2 28 | 29 | [*.{diff,md}] 30 | trim_trailing_whitespace = false 31 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | @Thomvaill 2 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: bug 6 | assignees: Thomvaill 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. Clone the project from master 16 | 2. Change fileA, fileB, fileC 17 | 3. Run command X 18 | 19 | **Expected behavior** 20 | A clear and concise description of what you expected to happen. 21 | 22 | **Environment (please complete the following information):** 23 | - OS/distro: 24 | - Ansible version: 25 | - Terraform version: 26 | - Cloud provider: 27 | - Vagrant version: 28 | 29 | **Additional context** 30 | Add any other context about the problem here. 31 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: enhancement 6 | assignees: Thomvaill 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Description 4 | 5 | 6 | ## Motivation and Context 7 | 8 | 9 | 10 | 11 | ## How Has This Been Tested? 12 | 13 | 14 | 15 | 16 | 17 | ## Types of changes 18 | 19 | - [ ] Bug fix (non-breaking change which fixes an issue) 20 | - [ ] New feature (non-breaking change which adds functionality) 21 | - [ ] Breaking change (fix or feature that would cause existing functionality to change) 22 | 23 | 24 | ## Checklist: 25 | 26 | 27 | - [ ] My code follows the code style of this project. 28 | - [ ] My change requires a change to the documentation. 29 | - [ ] I have updated the documentation accordingly. 30 | - [ ] I've read the [CONTRIBUTION](https://github.com/Thomvaill/tads-boilerplate/blob/master/CONTRIBUTING.md) guide 31 | - [ ] I have added tests to cover my changes. 32 | - [ ] All new and existing tests passed. 33 | -------------------------------------------------------------------------------- /.github/tads-header.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/thomvaill/tads-boilerplate/fe72d9c3b3e034b70d669fcca30776ac9e4a2bc3/.github/tads-header.png -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .vscode 2 | *.retry 3 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: shell 2 | services: 3 | - docker 4 | before_install: 5 | - sudo pip uninstall --yes pyopenssl && sudo pip install -U pyopenssl # fix for molecule 6 | - ./tads install-dependencies --no-password --force --dev 7 | install: 8 | - ./tads terraform production init 9 | jobs: 10 | include: 11 | - stage: test 12 | name: Lint 13 | script: make lint 14 | - stage: test 15 | name: Test scripts 16 | script: make test-scripts 17 | - stage: test 18 | name: Test Ansible roles 19 | script: make test-ansible-roles 20 | - stage: test 21 | name: Ansible End-to-End test 22 | script: make test-ansible-e2e 23 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to make participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, sex characteristics, gender identity and expression, 9 | level of experience, education, socio-economic status, nationality, personal 10 | appearance, race, religion, or sexual identity and orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies within all project spaces, and it also applies when 49 | an individual is representing the project or its community in public spaces. 50 | Examples of representing a project or community include using an official 51 | project e-mail address, posting via an official social media account, or acting 52 | as an appointed representative at an online or offline event. Representation of 53 | a project may be further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at [@thomvaill](https://twitter.com/thomvaill) in PM. All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 72 | 73 | [homepage]: https://www.contributor-covenant.org 74 | 75 | For answers to common questions about this code of conduct, see 76 | https://www.contributor-covenant.org/faq 77 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to T.A.D.S. 2 | 3 | :+1::tada: First of all, thanks for taking the time to contribute! :tada::+1: 4 | 5 | All your contributions are very welcome, whether it's: 6 | 7 | - Reporting a bug 8 | - Discussing the current state of the code 9 | - Submitting a fix 10 | - Proposing new features 11 | - Becoming a maintainer 12 | 13 | Thank you so much! :clap: 14 | 15 | ## Development 16 | 17 | ```bash 18 | ./tads install-dependencies --dev 19 | make lint 20 | make test 21 | ``` 22 | 23 | ## Pull Requests 24 | 25 | Pull requests are the best way to propose changes to the codebase (we use [Github Flow](https://guides.github.com/introduction/flow/index.html)). We actively welcome your pull requests. 26 | 27 | 1. Fork the repo and create your branch from `master` 28 | 2. If you've added code that should be tested, add tests 29 | 3. Update the documentation 30 | 4. Ensure the test suite passes (`make test`) 31 | 5. Make sure your code lints (`make lint`) 32 | 6. Issue that pull request! 33 | 34 | ## Issues and Feature Requests 35 | 36 | We use GitHub issues to track bugs and feature requests. Please ensure your description is clear and has sufficient instructions to be able to reproduce the issue. 37 | 38 | ## Coding Style 39 | 40 | - Referer to `.editorconfig`. We encourrage you to use a [compatible IDE or plugin](https://editorconfig.org/#download) 41 | - Run `make lint` to conform to our lint rules 42 | 43 | ## License 44 | 45 | By contributing to T.A.D.S., you agree that your contributions will be licensed under its MIT License. 46 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Thomas Vaillant 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | #:## Help 2 | # This will output the help for each task 3 | # thanks to https://marmelab.com/blog/2016/02/29/auto-documented-makefile.html 4 | .PHONY: help 5 | 6 | help: ## This help 7 | @awk 'BEGIN { \ 8 | print "T.A.D.S. Makefile";\ 9 | \ 10 | FS = ":.*?## "} \ 11 | /^[a-zA-Z_-]+:.*?## / {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2} \ 12 | /^#:## / {printf "\n\033[35m%s\033[0m\n", $$2} ' \ 13 | $(MAKEFILE_LIST) 14 | 15 | .DEFAULT_GOAL := help 16 | 17 | #:## Lint tasks 18 | lint: lint-scripts lint-terraform lint-ansible ## Execute all lint tasks 19 | 20 | lint-scripts: ## Perform a shellcheck linting on all scripts 21 | shellcheck tads scripts/**/*.sh 22 | 23 | lint-terraform: ## Perform a "terraform validate" linting 24 | ./tads terraform production validate 25 | 26 | lint-ansible: ## Perform an ansible-lint linting 27 | ansible-lint ansible/*.yml 28 | 29 | #:## Test tasks 30 | test: test-scripts test-ansible-roles test-ansible-e2e ## Execute all test tasks 31 | 32 | test-scripts: ## Run scripts integration tests 33 | ./scripts/tests/launcher.sh 34 | 35 | test-scripts-watch: ## Run scripts integration tests in watch mode 36 | ./scripts/tests/watch.sh 37 | 38 | test-ansible-roles: ## Test each Ansible role 39 | for d in ansible/roles/*; do (cd $${d} && molecule test); done 40 | 41 | test-ansible-e2e: ## End-to-End Ansible test 42 | cd ansible && molecule test 43 | -------------------------------------------------------------------------------- /README.example.md: -------------------------------------------------------------------------------- 1 | # YourCompany infrastructure repository 2 | 3 | This repository implements [Infrastructure as Code](https://en.wikipedia.org/wiki/Infrastructure_as_code), and more globally the DevOps mindset. 4 | It includes all the configuration and all the scripts needed to deploy YourCompany stacks either locally or remotely. 5 | This repository should be considered as a single source of truth. 6 | You should also use this repository to set up your development environment. 7 | 8 | This project heavily uses Ansible. If you are not familiar with it, you should read the [Ansible Quickstart guide](https://docs.ansible.com/ansible/latest/user_guide/quickstart.html) before getting started. 9 | 10 | This project was bootstrapped with [T.A.D.S. boilerplate](https://github.com/Thomvaill/tads-boilerplate). 11 | 12 | ## Installation 13 | 14 | ```bash 15 | git clone 16 | cd 17 | ./tads install-dependencies 18 | ``` 19 | 20 | ... this will install project dependencies: Ansible, Vagrant, Virtualbox, and Terraform. 21 | 22 | ## Development environment 23 | 24 | ### Commands 25 | 26 | The `./tads` executable is a companion CLI which is a wrapper around Ansible, Vagrant and Terraform commands. 27 | 28 | ```bash 29 | ./tads ansible-playbook localhost provision 30 | ``` 31 | 32 | ... this will configure your local machine to be able to run YourCompany stacks: it will install Docker and set up a Swarm cluster with one node: your localhost. 33 | 34 | ```bash 35 | ./tads ansible-playbook localhost deploy 36 | ``` 37 | 38 | ... this will deploy all YourCompany's stacks. To deploy only specific stacks, use `--tags` option. Example: `./tads ansible-playbook localhost deploy --tags stack-traefik,stack-XXX`. 39 | 40 | You should run this command every time you change the configuration of your stacks. 41 | 42 | Your application is now accessible on https://yourcompany.localhost/ 43 | 44 | ### Bind mounts 45 | 46 | To be able to develop locally, you should bind mount your code into your containers. To do so: 47 | 48 | - Copy `ansible/groups_vars/localhost_bindmounts.sample.yml` to `ansible/groups_vars/localhost_bindmounts.yml` 49 | - Specify the correct paths in it 50 | - Run `./tads ansible-playbook localhost deploy` to update changes 51 | 52 | You can also override some Ansible variables locally doing the same with the `ansible/groups_vars/localhost_overrides.sample.yml` file (useful for credentials and applicative environment variables). 53 | 54 | ## Test in a production-like environment with Vagrant 55 | 56 | This will deploy your stacks on a 3-nodes production like-environment, locally with Vagrant. To do so, this will create 3 virtual machines. 57 | 58 | 1. Copy `vagrant/vagrant.sample.yml` to `vagrant/vagrant.yml` and adjust its settings 59 | 2. Run `./tads vagrant up` 60 | 3. Run `./tads ansible-playbook vagrant all` 61 | 62 | Now, you will be able to test your stacks deployed on Vagrant on https://yourcompany.test/ 63 | 64 | **Tips:** 65 | 66 | - To destroy your cluster: `./tads vagrant destroy` 67 | - To SSH into the first node: `./tads vagrant ssh vagrant-1` 68 | 69 | ## Deploy to production 70 | 71 | 1. Make sure you have a correct SSH key pair 72 | 2. Make sure you have the correct ansible-vault key in `ansible/vault_keys/production` 73 | 3. To create the environment with Terraform: `./tads terraform production apply` 74 | 4. To provision: `./tads ansible-playbook production provision` 75 | 5. To deploy: `./tads ansible-playbook production deploy` 76 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # T.A.D.S. boilerplate 2 | 3 | [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) 4 | 5 |
6 | 7 | Thanks for stopping by! Unfortunately, I can no longer maintain this project due to time constraints and the overall shift away from Docker Swarm in favor of Kubernetes. 8 | I’m keeping the repo archived because I still believe Docker Swarm offered a great DevOps experience: letting developers and Ops share the same tool seamlessly from development to production, something that’s a bit harder to replicate with Kubernetes. While there won’t be any further updates, please feel free to explore and fork as you like! 9 | 10 |
11 | 12 |
The power of Ansible and Terraform + the simplicity of Swarm = DevOps on :fire::fire::fire:
13 |
14 | 15 | ![T.A.D.S. logo](.github/tads-header.png) 16 | 17 | - [:tada: What is it?](#tada-what-is-it) 18 | - [:dart: Who is it for?](#dart-who-is-it-for) 19 | - [:muscle: Philosophy](#muscle-philosophy) 20 | - [:lock: Knowledge prerequisites](#lock-knowledge-prerequisites) 21 | - [:lock: Technical prerequisites](#lock-technical-prerequisites) 22 | - [:rocket: Quick start](#rocket-quick-start) 23 | - [1. Make this repository yours](#1-make-this-repository-yours) 24 | - [2. Install the required dependencies](#2-install-the-required-dependencies) 25 | - [3. Provision your local machine and deploy the example stack](#3-provision-your-local-machine-and-deploy-the-example-stack) 26 | - [4. Write your own Docker Swarm Compose files](#4-write-your-own-docker-swarm-compose-files) 27 | - [5. Test on a Vagrant cluster](#5-test-on-a-vagrant-cluster) 28 | - [6. Edit and encrypt your production environment variables](#6-edit-and-encrypt-your-production-environment-variables) 29 | - [7.a. Create, provision and deploy your production environment with Terraform](#7a-create-provision-and-deploy-your-production-environment-with-terraform) 30 | - [7.b. Provision and deploy your production environment to an existing infrastructure](#7b-provision-and-deploy-your-production-environment-to-an-existing-infrastructure) 31 | - [8. Add other remote environments](#8-add-other-remote-environments) 32 | - [9. Make your team members autonomous](#9-make-your-team-members-autonomous) 33 | - [:question: FAQ](#question-faq) 34 | - [Contributing](#contributing) 35 | - [Acknowledgments](#acknowledgments) 36 | - [License](#license) 37 | 38 | ## :tada: What is it? 39 | 40 | A boilerplate to create a full Infrastructure as Code (IaC) repository, from provisioning to deployment with: 41 | 42 | - **Terraform** to create your cloud infrastructure 43 | - **Vagrant** to reproduce a production-like environment locally 44 | - **Ansible** to provision Virtual Machines and set up the **Docker Swarm** cluster 45 | - **Ansible** again to deploy your stacks 46 | 47 | It handles different environments: 48 | 49 | - `localhost`: a single node Docker Swarm cluster on your machine, useful for development ([demo](https://asciinema.org/a/282625)) 50 | - `vagrant`: a 3 nodes production-like cluster deployed with Vagrant on your machine, useful for testing ([demo](https://asciinema.org/a/282636)) 51 | - `production`: your production environment! It can be created by Terraform or you can use an existing bare metal/VMs infrastructure ([demo](https://asciinema.org/a/282640)) 52 | - other remote production-like environments of your choice: staging, QA... 53 | 54 | On top of that, it features: 55 | 56 | - A companion CLI (`./tads`), which is a wrapper around Terraform, Ansible and Vagrant commands. For example: `ansible-playbook -i inventories/production -D --vault-id production@vault_keys/production deploy.yml` becomes `./tads ansible-playbook production deploy`. More convenient, don't you think? :smirk: 57 | - Docker Swarm Compose files templated with Jinja2, so you can define your services once, while being able to customize them in each environment, from the same file 58 | - An example which implements [dockerswarm.rocks' recommended good practices](https://dockerswarm.rocks/): traefik reverse proxy with HTTPS (even locally), and more coming soon 59 | - A smart `/etc/hosts` management to access your local and Vagrant applications with `.localhost` and `.test` https URIs 60 | - AES-256 encryption of your production credentials with ansible-vault 61 | 62 | With T.A.D.S., you will be able to onboard a new developer on your project in less than 3 minutes, with just 3 commands! Even if you have a complex microservices architecture. Forget about your outdated wikis or installation procedures, they are no longer needed! See the [example user README](README.example.md) to get a preview of what your new procedures could look like. 63 | 64 | [![Example of a fresh development environment setup with T.A.D.S. in 02:30!](https://asciinema.org/a/282625.svg)](https://asciinema.org/a/282625) 65 |
Example of a fresh development environment setup with T.A.D.S. in 02:30!
66 | 67 | ## :dart: Who is it for? 68 | 69 | If you recognize yourself into some of these statements, this project is definitely for you: 70 | 71 | - I am the only one who understands how the production environment works 72 | - I still have to execute SSH commands in production and this makes me sad because I cannot rollback or be reviewed :( 73 | - Setting up a new development environment for a new team member takes an entire day, and a lot of resources 74 | - My team suffers from "Microservices Hell": we have to install many services before being able to dev 75 | - Developers use docker-compose on their machine, while we use something else in production 76 | - I want to do Infrastructure as Code (IaC) 77 | - I want to promote the DevOps mindset in my team 78 | - I don't need/want Kubernetes features and complexity 79 | - I don't want to be vendor locked by a service like AWS ECS 80 | - I start a new project and I want to bootstrap it quickly with good practices presets 81 | 82 | On the contrary, this project might not be for you if: 83 | 84 | - You have a large cluster (more than 100 machines) 85 | - You need Kubernetes features like autoscaling 86 | 87 | ... but don't be sad, I am thinking of creating a similar project for K8s ;) Tell me if you want to help! 88 | 89 | ## :muscle: Philosophy 90 | 91 | - Every environment infrastructure, including dev, is versioned into one repository 92 | - Same development environment installation procedure for everyone 93 | - No SSH, no manual actions, everything must be code 94 | - Every change to infrastructure must be code reviewed to: 95 | - Avoid mistakes 96 | - Make other (including non-DevOps) team members able to learn 97 | - Everyone, not only DevOps team members, is able to: 98 | - Create their development environment in a minute with just one command 99 | - Reproduce a production-like environment locally 100 | - Understand the whole infrastructure 101 | - Propose modifications to the infrastructure, while being able to test them locally 102 | - This project is a boilerplate, not a framework: modify it to fulfill your needs! 103 | - The companion CLI is written in Bash so it is easy to understand what a command does, and it is easy to modify command behaviors or to add new ones 104 | 105 | ## :lock: Knowledge prerequisites 106 | 107 | Before going further, I assume that you already have the knowledge **and practice** with Docker Swarm mode, Ansible, Terraform, and Infrastructure as Code in general. 108 | If it is not the case, I urge you to study and practice **before**. You can use this material as a starter: 109 | 110 | - [Getting started with swarm mode](https://docs.docker.com/engine/swarm/swarm-tutorial/) and [Docker Swarm Rocks](https://dockerswarm.rocks/) 111 | - [Ansible Quickstart guide](https://docs.ansible.com/ansible/latest/user_guide/quickstart.html) 112 | - [Terraform getting started](https://learn.hashicorp.com/terraform/getting-started/install.html) 113 | 114 | ## :lock: Technical prerequisites 115 | 116 | - **Local machine**: 117 | - Ubuntu >= 18.04 or similar (PRs are welcome to update this list) 118 | - Ansible >= 2.8 119 | - Vagrant >= 2.0 *(optional)* 120 | - Virtualbox >= 5.2 *(optional)* 121 | - Terraform >= 0.12 *(optional)* 122 | - **Remote environments**: 123 | - A cloud provider account (tested on AWS and Digital Ocean so far) 124 | - ***OR*** 125 | - An existing bare metal / VMs infrastructure, with Linux based OSes (tested on Ubuntu server 18.04 and Debian 9 so far) 126 | 127 | Have a look at [Install the required dependencies](#2-install-the-required-dependencies) for installation procedures. 128 | 129 | **OS X:** It should not be that hard to make the project run on OS X. PRs are welcome! I am also thinking of creating a dockerized version of the project to improve compatibility. 130 | 131 | ## :rocket: Quick start 132 | 133 | ### 1. Make this repository yours 134 | 135 | Clone this repo, create your own and push the code to it. 136 | 137 | ```bash 138 | git clone --single-branch https://github.com/Thomvaill/tads-boilerplate.git 139 | cd 140 | git remote set-url origin 141 | git push 142 | ``` 143 | 144 | ### 2. Install the required dependencies 145 | 146 | This will install Ansible, Vagrant, Virtualbox and Terraform on your local machine: 147 | 148 | ```bash 149 | ./tads install-dependencies 150 | ``` 151 | 152 | You can also manually install the dependencies if your preferer. 153 | 154 | ### 3. Provision your local machine and deploy the example stack 155 | 156 | 1. Copy `ansible/group_vars/localhost_overrides.sample.yml` to `ansible/group_vars/localhost_overrides.yml` 157 | 2. Add `ansible_user` variable with your user name 158 | 159 | ```raw 160 | ansible_user: 161 | ``` 162 | 163 | 3. Provision and Deploy 164 | 165 | ```bash 166 | ./tads ansible-playbook localhost provision 167 | ./tads ansible-playbook localhost deploy 168 | ``` 169 | 170 | The first `./tads` command will: 171 | 172 | - Install Docker on your local machine 173 | - Set up a Swarm cluster with one manager node 174 | - Hardcode `yourcompany.localhost` to your `/etc/hosts` file 175 | 176 | And the second one will deploy `traefik` and `example_app` stacks. 177 | If everything went well, you are now able to access it at this URL: https://yourcompany.localhost/ 178 | 179 | ### 4. Write your own Docker Swarm Compose files 180 | 181 | Now that the example stack is running on your machine, you can deploy your own services. 182 | 183 | First, you probably need to change the `domains` dict in `ansible/group_vars/all.yml`. 184 | This file contains all Ansible variables default values. These values can be overridden later in other group_vars files. 185 | You are free to add your variables in it. 186 | 187 | Then, you can write your own Docker Swarm Compose files, following this naming convention: `ansible/stacks//.yml.j2` 188 | These files are [Jinja2 templates](https://docs.ansible.com/ansible/latest/user_guide/playbooks_templating.html). 189 | You are highly encouraged to use Ansible variables in them, so your template file can be used across all your environments. 190 | Have a look at `ansible/stacks/example_app/example_app.yml.j2` to see a good example. 191 | 192 | Finally, do not forget to add your new stacks to `ansible/deploy.yml`. 193 | 194 | To help you with the `ansible/group_vars` directory, here is a representation of Ansible groups: 195 | 196 | ```raw 197 | all 198 | ├── dev 199 | | ├── localhost 200 | | └── vagrant 201 | ├── production 202 | ├── staging 203 | └── any_other_remote_environment... 204 | ``` 205 | 206 | Each group has its `_overrides` counterpart, which enables you to override some variables locally in a `xxx_overrides.yml` file, 207 | which is not versionned. 208 | Have a look at `.sample.yml` files to see some examples. 209 | 210 | While developing, perform some `./tads ansible-playbook localhost deploy` to test your deployment. 211 | Do not forget to run `./tads ansible-playbook localhost provision` again if you have changed domain names. 212 | 213 | **Tips:** 214 | 215 | - You can use `./tads ansible-playbook localhost all` to provision and deploy in a single command 216 | - You can use tags to go quicker. Example: `./tads ansible-playbook localhost all --tags dev,stack-traefik` 217 | - Always reference your Docker images with tags! It is a bad practice to rely on the `:latest` tag because you don't control what will be pushed to production. With specific tags, you will have idempotent deployments and you will be able to perform rollbacks 218 | 219 | ### 5. Test on a Vagrant cluster 220 | 221 | Now that you are happy with your localhost environment, you should test the provisioning and the deployment 222 | on an environment which looks more like a production environment. 223 | For instance, on localhost, you can have just one node! And maybe you forgot some dependencies that are already installed on your computer. 224 | With Vagrant, you will be able to test your stacks on a fresh 3 nodes Swarm cluster. 225 | 226 | 1. Copy `vagrant/vagrant.sample.yml` to `vagrant/vagrant.yml` and adjust its settings 227 | 2. Run `./tads vagrant up` 228 | 3. Run `./tads ansible-playbook vagrant all` 229 | 230 | Now, you will be able to test your stacks deployed on Vagrant. If you have kept the example app, you can test it on https://yourcompany.test/ 231 | 232 | **Tips:** 233 | 234 | - To destroy your cluster: `./tads vagrant destroy` 235 | - To SSH into the first node: `./tads vagrant ssh vagrant-1` 236 | 237 | ### 6. Edit and encrypt your production environment variables 238 | 239 | Before going further, you should edit your production group_vars files: 240 | 241 | - `ansible/group_vars/production.yml` 242 | - `ansible/group_vars/production_encrypted.yml` 243 | 244 | When you are done, **do not commit `production_encrypted.yml`**! You have to encrypt it first: 245 | 246 | - `./tads ansible-vault production init-key` 247 | - `./tads ansible-vault production encrypt ansible/group_vars/production_encrypted.yml` 248 | 249 | The first command has generated a random key in `ansible/vault_keys/production`. 250 | You must not commit this file. You should keep it in a safe place, and share it with your authorized team members securely. 251 | If you lose it, you won't be able to decrypt your files anymore. The second one has encrypted your file 252 | with AES-256. You can now commit it. 253 | 254 | You can still edit this file by running `./tads ansible-vault production edit ansible/group_vars/production_encrypted.yml`. Always check that you do not commit an unencrypted version of this file by mistake. 255 | 256 | ### 7.a. Create, provision and deploy your production environment with Terraform 257 | 258 | Now that everything is fine locally, it is time to create and deploy your production environment! 259 | 260 | The `terraform/environments/production` is an AWS example. PRs are welcome for other providers! 261 | To make it work, you should: 262 | 263 | - Have a working SSH key pair 264 | - Have a registered domain name managed by Route53 265 | - [Install AWS CLI](https://docs.aws.amazon.com/fr_fr/cli/latest/userguide/cli-chap-install.html): `pip3 install awscli --upgrade --user` 266 | - Configure your credentials: `aws configure` 267 | 268 | Terraform will use this default profile credentials. 269 | 270 | Then, you can run `./tads terraform production init` and `./tads terraform production apply`. 271 | This example will create: 272 | 273 | - A custom VPC 274 | - 3 subnets into separate Availability Zones, for high availability 275 | - 3 manager nodes and 1 worker node (with spread placement groups, for high availability) 276 | - 1 classic ELB to distribute TCP traffic on port 80 and 443 to the manager nodes (traefik is responsible for SSL termination) 277 | 278 | The CLI will also create the corresponding Ansible inventory for you in `ansible/inventories/production` from Terraform outputs. You should commit it. 279 | You should also commit the Terraform state file, or better: [use a remote state](https://www.terraform.io/docs/state/remote.html). 280 | 281 | Then, you have to [create an alias in Route53 to the ELB](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-to-elb-load-balancer.html). 282 | 283 | Finally, you can run `./tads ansible-playbook production all` and your website will be available! 284 | 285 | **Disclaimer:** 286 | 287 | - This is an example, you should not use it as is in production! 288 | - Although resources created by the example are eligible to free tier, charges may occur depending on your situation 289 | - Use `./tads terraform production destroy` with caution :-) 290 | 291 | ### 7.b. Provision and deploy your production environment to an existing infrastructure 292 | 293 | If you don't want to use a cloud provider, you can use classic Virtual Machines. 294 | For a production environment, you should have at least 3 manager nodes, so 3 VMs. 295 | They should be fresh installs. Ubuntu server 18.04 or Debian 9 is fine. 296 | 297 | 1. Make sure you can SSH into the VMs with your key pair 298 | 2. Copy `ansible/inventories/production.sample-baremetal` to `ansible/inventories/production` 299 | 3. Edit it 300 | 4. Run `./tads ansible-playbook production all` and your website will be available! 301 | 302 | ### 8. Add other remote environments 303 | 304 | You can add other remote environments, like production. 305 | 306 | For Terraform, you just have to duplicate `terraform/environments/production` to the directory of your choice, eg `staging`. 307 | After editing it, you can run `./tads terraform staging apply`, it will create the `ansible/inventories/staging` inventory file. 308 | 309 | For an existing bare metal infrastructure, you just have to create the `ansible/inventories/staging` inventory file. 310 | 311 | Then, in Ansible, you have to create these files: 312 | 313 | - `ansible/group_vars/staging_encrypted.yml` 314 | - `ansible/group_vars/staging.yml` 315 | 316 | Then, create the ansible-vault key and encrypt the file: 317 | 318 | - `./tads ansible-vault staging init-key` 319 | - `./tads ansible-vault staging encrypt ansible/group_vars/staging_encrypted.yml` 320 | 321 | Finally, provision and deploy! `./tads ansible-playbook staging all` 322 | 323 | ### 9. Make your team members autonomous 324 | 325 | It is one of this project's goals: DevOps is not a job, it is a mindset! 326 | Now that you have a beautiful IaC, it is time to onboard your team members. 327 | 328 | 1. Replace this `README.md` by `README.example.md` and customize it, so your team can use the project easily 329 | 2. Make your developers use this project to configure their machine and develop 330 | 3. Let your developers update/create stacks on their own, show them how to test their changes locally 331 | 4. Enjoy :-) 332 | 333 | ## :question: FAQ 334 | 335 | ### Where is the companion CLI documentation? 336 | 337 | There is no documentation of the CLI since you will probably modify it, or add new commands! 338 | To get some help, just run `./tads`. Do not hesitate also to have a look at the source into the `scripts` directory. This CLI is just a wrapper of Terraform, Ansible and Vagrant commands. 339 | 340 | ### What if I don't want to deploy all the stacks locally? 341 | 342 | Use Ansible tags! Example if you just want to deploy the `traefik` and `myapp` stack: `./tads ansible-playbook localhost deploy --tags stack-traefik,stack-myapp`. 343 | 344 | ### How to do Continuous Delivery / Deployment? 345 | 346 | I have not taken the time to develop this feature properly yet. But basically what you can do is: 347 | 348 | - Always referer to your Docker images with tags 349 | - Manage those tags into a separate `tags.yml` file 350 | - Write a script that can update a tag, perform a `./tads ansible-playbook production deploy`, and in case of success commit and push the file 351 | - Make your CI/CD tool use this script in your deployment pipeline 352 | 353 | ### How to manage persistant storage? 354 | 355 | This problematic is beyond the scope of this project and depends a lot on your infrastructure / cloud provider. 356 | I advise you to have a look at [REX-Ray](https://github.com/rexray/rexray). 357 | 358 | This might be a future feature to implement this plugin in the boilerplate. 359 | 360 | ## Contributing 361 | 362 | Pull Requests are more than welcome! Please read [CONTRIBUTING.md](CONTRIBUTING.md) for more details. 363 | 364 | Development: 365 | 366 | ```bash 367 | ./tads install-dependencies --dev 368 | make lint 369 | make test 370 | ``` 371 | 372 | ## Acknowledgments 373 | 374 | - John Patterson for his [ansible-swarm-playbook](https://github.com/nextrevision/ansible-swarm-playbook) and his [article](https://thisendout.com/2016/09/13/deploying-docker-swarm-with-ansible/) 375 | - Jeff Geerling for his [pip](https://github.com/geerlingguy/ansible-role-pip) and [docker](https://github.com/geerlingguy/ansible-role-docker) Ansible roles 376 | - The [forMetris](https://github.com/forMetris) team for being first users/testers of the project, and especially Axel Marbois for his contribution 377 | 378 | ## License 379 | 380 | This project is licensed under the MIT license, Copyright (c) 2019 Thomas Vaillant. For more information see [LICENSE](LICENSE) file. 381 | -------------------------------------------------------------------------------- /ansible/.yamllint: -------------------------------------------------------------------------------- 1 | --- 2 | # Based on ansible-lint config 3 | extends: default 4 | 5 | rules: 6 | braces: 7 | max-spaces-inside: 1 8 | level: error 9 | brackets: 10 | max-spaces-inside: 1 11 | level: error 12 | colons: 13 | max-spaces-after: -1 14 | level: error 15 | commas: 16 | max-spaces-after: -1 17 | level: error 18 | comments: disable 19 | comments-indentation: disable 20 | document-start: disable 21 | empty-lines: 22 | max: 3 23 | level: error 24 | hyphens: 25 | level: error 26 | indentation: disable 27 | key-duplicates: enable 28 | line-length: disable 29 | new-line-at-end-of-file: disable 30 | new-lines: 31 | type: unix 32 | trailing-spaces: disable 33 | truthy: disable 34 | -------------------------------------------------------------------------------- /ansible/all.yml: -------------------------------------------------------------------------------- 1 | - import_playbook: provision.yml 2 | - import_playbook: deploy.yml 3 | -------------------------------------------------------------------------------- /ansible/deploy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ### 3 | # deploy playbook 4 | ## 5 | # In this file, you should: 6 | # - Create global overlay networks which are shared between stacks (eg. the traefik one) 7 | # - Define your stacks with the "docker-stack" role 8 | # 9 | # You can use tags, so you can deploy only one stack 10 | # Example: ./tads ansible-playbook production deploy --tags stack-traefik 11 | ## 12 | 13 | - hosts: dockerswarm_manager[0] # executed on the first Swarm manager 14 | pre_tasks: 15 | - name: Create Docker traefik/apps network 16 | docker_network: 17 | name: apps 18 | driver: overlay 19 | 20 | roles: 21 | # TRAEFIK 22 | - role: docker-stack 23 | tags: 24 | - stack-traefik 25 | vars: 26 | docker_stack_name: traefik 27 | 28 | # EXAMPLE_APP 29 | - role: docker-stack 30 | tags: 31 | - stack-example_app 32 | vars: 33 | docker_stack_name: example_app 34 | docker_stack_secrets: 35 | - name: example_app_password-v1 36 | data: "{{ credentials.example_app.password }}" 37 | -------------------------------------------------------------------------------- /ansible/group_vars/.gitignore: -------------------------------------------------------------------------------- 1 | localhost_bindmounts.yml 2 | *_overrides.yml 3 | -------------------------------------------------------------------------------- /ansible/group_vars/all.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ### 3 | # Variables default values 4 | ## 5 | # We recommend to use production values here 6 | # These variables can be overridden later in other group_vars files 7 | # 8 | # Here is the Ansible groups structure: 9 | # all 10 | # ├── dev 11 | # | ├── localhost 12 | # | └── vagrant 13 | # ├── production 14 | # ├── staging 15 | # └── any_other_remote_environment... 16 | # 17 | # Each group has its "_overrides" counterpart, which enables you to override 18 | # some variables locally in a xxx_overrides.yml file, which is not versionned 19 | # Have a look at .sample.yml files to see some examples 20 | ## 21 | 22 | 23 | # Environment 24 | # Useful variable to perform Jinja2 conditions in your stack templates 25 | # or to pass as an environment variable to your Docker services 26 | app_env: production 27 | 28 | # Allowed SSH keys 29 | # It is better to define them for each environment rather than globally 30 | # You should leave it empty here 31 | docker_authorized_ssh_keys: [] 32 | 33 | # Enable traefik ACME feature 34 | # If enabled, traefik will use Let's Encrypt to get an SSL certificate automatically 35 | # Should be disabled only on local environments (localhost or vagrant) 36 | letsencrypt: True 37 | 38 | # Domains 39 | # You should list all domain names that you use in the "domains" dict 40 | # This dict will be used by Ansible to hardcode them in /etc/hosts when you use local environments (localhost or vagrant) 41 | # That is why it is also recommended to use a "domains_tld" variable, so you don't have to repeat yourself in other group_vars files 42 | domains_tld: com 43 | domains: 44 | main: "yourcompany.{{ domains_tld }}" 45 | main_www: "www.yourcompany.{{ domains_tld }}" 46 | 47 | # Bind mounts 48 | # Must only be used in localhost environment 49 | # Useful to mount code into your containers, to develop 50 | dev_bindmounts: 51 | 52 | # geerlingguy.docker role configuration 53 | docker_edition: ce 54 | -------------------------------------------------------------------------------- /ansible/group_vars/dev.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ### 3 | # local environments variables 4 | ## 5 | # This group applies to local environments (localhost and vagrant) 6 | # 7 | # You may override some variables from all.yml here... 8 | ## 9 | 10 | # We can't use ACME in local environments 11 | # Traefik's self signed certificate will be used 12 | letsencrypt: False 13 | 14 | # Credentials 15 | # You should set default development credentials here 16 | credentials: 17 | example_app: 18 | username: dev 19 | password: 123456789 20 | -------------------------------------------------------------------------------- /ansible/group_vars/dev_overrides.sample.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ### 3 | # local environments overrides 4 | ## 5 | # This file should be copied to "dev_overrides.yml" and be customized for this specific machine 6 | # It must not be versionned 7 | # 8 | # This group applies to local environments (localhost and vagrant) 9 | # 10 | # You may override some variables from all.yml here... 11 | ## 12 | -------------------------------------------------------------------------------- /ansible/group_vars/localhost.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ### 3 | # localhost environment variables 4 | ## 5 | 6 | # We use the .localhost reserved TLD 7 | # @see https://tools.ietf.org/id/draft-chapin-rfc2606bis-00.html#legacy 8 | domains_tld: localhost 9 | -------------------------------------------------------------------------------- /ansible/group_vars/localhost_bindmounts.sample.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ### 3 | # localhost environment bind mounts 4 | ## 5 | # This file should be copied to "localhost_bindmounts.yml" and be customized for this specific machine 6 | # It must not be versionned 7 | # 8 | # Bind mounts are useful to mount some code into your Docker containers, to develop 9 | ## 10 | 11 | dev_bindmounts: 12 | example_app: ~/git/example_app/src 13 | -------------------------------------------------------------------------------- /ansible/group_vars/localhost_overrides.sample.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ### 3 | # localhost environment overrides 4 | ## 5 | # This file should be copied to "localhost_overrides.yml" and be customized for this specific machine 6 | # It must not be versionned 7 | # 8 | # You may override some variables from all.yml here... 9 | ## 10 | -------------------------------------------------------------------------------- /ansible/group_vars/production.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ### 3 | # production environment variables 4 | ## 5 | # You may override some variables from all.yml here... 6 | ## 7 | 8 | # docker_authorized_ssh_keys: 9 | # - "ssh-rsa XXX userA@host" 10 | # - "ssh-rsa YYY userB@host" 11 | -------------------------------------------------------------------------------- /ansible/group_vars/production_encrypted.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ### 3 | # production environment ENCRYPTED variables 4 | ## 5 | # This file must be encrypted before being commited! 6 | # 7 | # To do so: 8 | # 1. Run ./tads ansible-vault init-key production 9 | # 2. Save the generated key (in ansible/vault_keys/production) in a secure place for you and your authorized team members 10 | # 3. Run ./tads ansible-vault encrypt ansible/group_vars/production_encrypted.yml 11 | # 12 | # Nobody will be able to see or use the contents of this file unless he has the key (AES-256 encrypted) 13 | # 14 | # This file should be typically used to store credentials 15 | ## 16 | 17 | # Credentials 18 | credentials: 19 | example_app: 20 | username: prod 21 | password: my_ultra_secured_production_pa$$w0rd 22 | -------------------------------------------------------------------------------- /ansible/group_vars/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ### 3 | # test (molecule) variables 4 | ## 5 | 6 | letsencrypt: False 7 | 8 | credentials: 9 | example_app: 10 | username: test 11 | password: 123456789 12 | -------------------------------------------------------------------------------- /ansible/group_vars/vagrant.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ### 3 | # vagrant environment variables 4 | ## 5 | 6 | # We use the .test reserved TLD 7 | # @see https://tools.ietf.org/id/draft-chapin-rfc2606bis-00.html#legacy 8 | domains_tld: test 9 | -------------------------------------------------------------------------------- /ansible/group_vars/vagrant_overrides.sample.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ### 3 | # vagrant environment overrides 4 | ## 5 | # This file should be copied to "vagrant_overrides.yml" and be customized for this specific machine 6 | # It must not be versionned 7 | # 8 | # You may override some variables from all.yml here... 9 | ## 10 | -------------------------------------------------------------------------------- /ansible/install-dependencies.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ### 3 | # install-dependencies playbook 4 | ## 5 | # Used by ./tads install-dependencies 6 | ## 7 | 8 | - hosts: localhost 9 | tasks: 10 | - name: Install Vagrant, Virtualbox and other dependencies (linux) 11 | become: yes 12 | package: 13 | name: "{{ item }}" 14 | state: present 15 | with_items: 16 | - virtualbox 17 | - vagrant 18 | - jq 19 | when: ansible_facts['os_family'] != "Darwin" 20 | 21 | - name: Install Vagrant & Virtualbox (mac) 22 | become: no 23 | homebrew_cask: 24 | name: "{{ item }}" 25 | state: present 26 | with_items: 27 | - virtualbox 28 | - vagrant 29 | when: ansible_facts['os_family'] == "Darwin" 30 | 31 | - name: Other dependencies (mac) 32 | become: no 33 | homebrew: 34 | name: "{{ item }}" 35 | state: present 36 | with_items: 37 | - coreutils # for GNU ls 38 | - jq 39 | when: ansible_facts['os_family'] == "Darwin" 40 | 41 | - name: Install Terraform (linux) 42 | shell: > 43 | wget -O /tmp/terraform.zip "https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_amd64.zip" 44 | && unzip -d ~/.local/bin /tmp/terraform.zip 45 | && rm -f /tmp/terraform.zip 46 | environment: 47 | TERRAFORM_VERSION: 0.12.12 48 | args: 49 | creates: ~/.local/bin/terraform 50 | warn: False 51 | when: ansible_facts['os_family'] != "Darwin" 52 | 53 | - name: Install Terraform Homebrew Tap (Mac) 54 | homebrew_tap: 55 | name: hashicorp/tap 56 | state: present 57 | when: ansible_facts['os_family'] == "Darwin" 58 | 59 | - name: Install Terraform (Mac) 60 | homebrew: 61 | name: hashicorp/tap/terraform 62 | state: present 63 | when: ansible_facts['os_family'] == "Darwin" 64 | 65 | - hosts: localhost 66 | tags: 67 | - dev_dependencies 68 | tasks: 69 | - name: Install ansible-lint and molecule 70 | become: True 71 | pip: 72 | name: 73 | - ansible-lint 74 | - docker 75 | - molecule==2.22 76 | - molecule[docker] 77 | 78 | - name: Install shellcheck 79 | become: True 80 | shell: > 81 | (wget -qO- "https://storage.googleapis.com/shellcheck/shellcheck-${SHELLCHECK_VERSION}.linux.x86_64.tar.xz" | tar -xJv --directory /tmp) 82 | && cp "/tmp/shellcheck-${SHELLCHECK_VERSION}/shellcheck" /usr/bin/ 83 | && rm -rf /tmp/shellcheck-${SHELLCHECK_VERSION} 84 | environment: 85 | SHELLCHECK_VERSION: v0.6.0 86 | args: 87 | creates: /usr/bin/shellcheck 88 | warn: False 89 | -------------------------------------------------------------------------------- /ansible/inventories/localhost: -------------------------------------------------------------------------------- 1 | ### 2 | # localhost environment inventory file 3 | ## 4 | # This file should not be edited 5 | # Please use these files instead: 6 | # - ansible/group_vars/localhost.yml 7 | # - ansible/group_vars/localhost_overrides.yml 8 | # - ansible/group_vars/localhost_bindmounts.yml 9 | ## 10 | 11 | 127.0.0.1 12 | 13 | [localhost] 14 | 127.0.0.1 15 | 16 | [localhost:vars] 17 | dockerswarm_iface=lo 18 | swarm_dev_ip=127.0.0.1 19 | 20 | [localhost_bindmounts] 21 | 127.0.0.1 22 | 23 | [localhost_overrides] 24 | 127.0.0.1 25 | 26 | [dev] 27 | 127.0.0.1 28 | 29 | [dev_overrides] 30 | 127.0.0.1 31 | 32 | [docker] 33 | 127.0.0.1 34 | 35 | [dockerswarm_manager] 36 | 127.0.0.1 37 | 38 | [dockerswarm_worker] 39 | 40 | -------------------------------------------------------------------------------- /ansible/inventories/production.sample-baremetal: -------------------------------------------------------------------------------- 1 | ### 2 | # production environment inventory file EXAMPLE 3 | ## 4 | # This example should be used if you don't use Terraform, 5 | # ie. you have an existing bare metal/VM infrastructure. 6 | # 7 | # This example shows a 3 nodes cluster: 3 managers, 0 workers 8 | # 9 | # Usage: 10 | # - copy this file to "production" 11 | # - replace and 12 | # - ./tads ansible-playbook production provision 13 | # - ./tads ansible-playbook production deploy 14 | ## 15 | 16 | node-1 ansible_user=root ansible_host= ansible_port= dockerswarm_advertise_addr= 17 | node-2 ansible_user=root ansible_host= ansible_port= dockerswarm_advertise_addr= 18 | node-3 ansible_user=root ansible_host= ansible_port= dockerswarm_advertise_addr= 19 | 20 | [production] 21 | node-[1:3] 22 | 23 | [docker:children] 24 | production 25 | 26 | [production_encrypted:children] 27 | production 28 | 29 | [dockerswarm_manager] 30 | node-[1:3] 31 | 32 | [dockerswarm_worker] 33 | # if you add more nodes, you can define them as workers here 34 | 35 | [docker:vars] 36 | dockerswarm_iface=eth0 37 | -------------------------------------------------------------------------------- /ansible/library/docker_info_facts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | ## 3 | # This Ansible Python module is used by provision-02-docker-swarm.yml 4 | # to set up the Swarm cluster 5 | ## 6 | # 7 | # Source: https://github.com/nextrevision/ansible-swarm-playbook/blob/master/library/docker_info_facts 8 | # Copyright 2016, This End Out, LLC. 9 | # 10 | # Licensed under the Apache License, Version 2.0 (the "License"); 11 | # you may not use this file except in compliance with the License. 12 | # You may obtain a copy of the License at 13 | # 14 | # http://www.apache.org/licenses/LICENSE-2.0 15 | # 16 | # Unless required by applicable law or agreed to in writing, software 17 | # distributed under the License is distributed on an "AS IS" BASIS, 18 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 19 | # See the License for the specific language governing permissions and 20 | # limitations under the License. 21 | ## 22 | 23 | DOCUMENTATION = """ 24 | --- 25 | module: docker_info_facts 26 | short_description: 27 | - A module for injecting Docker info as facts. 28 | description: 29 | - A module for injecting Docker info as facts. 30 | author: nextrevision 31 | """ 32 | 33 | EXAMPLES = """ 34 | - name: load docker info facts 35 | docker_info_facts: 36 | """ 37 | 38 | docker_lib_missing=False 39 | 40 | try: 41 | from docker import Client 42 | except: 43 | try: 44 | from docker import APIClient as Client 45 | except: 46 | docker_lib_missing=True 47 | 48 | 49 | def _get_docker_info(): 50 | try: 51 | return Client().info(), False 52 | except Exception as e: 53 | return {}, e.message 54 | 55 | 56 | def main(): 57 | module = AnsibleModule( 58 | argument_spec=dict(), 59 | supports_check_mode=False 60 | ) 61 | 62 | if docker_lib_missing: 63 | msg = "Could not load docker python library; please install docker-py or docker library" 64 | module.fail_json(msg=msg) 65 | 66 | info, err = _get_docker_info() 67 | 68 | if err: 69 | module.fail_json(msg=err) 70 | 71 | module.exit_json( 72 | changed=True, 73 | ansible_facts={'docker_info': info}) 74 | 75 | 76 | from ansible.module_utils.basic import * 77 | if __name__ == '__main__': 78 | main() 79 | -------------------------------------------------------------------------------- /ansible/molecule/default/Dockerfile.j2: -------------------------------------------------------------------------------- 1 | # Molecule managed 2 | 3 | {% if item.registry is defined %} 4 | FROM {{ item.registry.url }}/{{ item.image }} 5 | {% else %} 6 | FROM {{ item.image }} 7 | {% endif %} 8 | 9 | {% if item.env is defined %} 10 | {% for var, value in item.env.items() %} 11 | {% if value %} 12 | ENV {{ var }} {{ value }} 13 | {% endif %} 14 | {% endfor %} 15 | {% endif %} 16 | 17 | # Dependencies 18 | RUN runDeps=" \ 19 | python sudo bash ca-certificates iproute2 \ 20 | " \ 21 | && apt-get update && apt-get install -y --no-install-recommends $runDeps && rm -rf /var/lib/apt/lists/* 22 | 23 | # Docker does not support AUFS over AUFS (Docker-in-Docker) 24 | RUN mkdir -p /etc/docker \ 25 | && echo '{"storage-driver": "vfs"}' > /etc/docker/daemon.json 26 | -------------------------------------------------------------------------------- /ansible/molecule/default/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependency: 3 | name: galaxy 4 | options: 5 | role-file: ./requirements.yml 6 | driver: 7 | name: docker 8 | lint: 9 | name: yamllint 10 | platforms: 11 | - name: instance 12 | image: ubuntu:18.04 13 | privileged: True 14 | volumes: 15 | - /sys/fs/cgroup:/sys/fs/cgroup:ro 16 | groups: 17 | - docker 18 | - dockerswarm_manager 19 | - test 20 | provisioner: 21 | name: ansible 22 | inventory: 23 | group_vars: 24 | docker: 25 | dockerswarm_iface: lo 26 | swarm_dev_ip: 127.0.0.1 27 | lint: 28 | name: ansible-lint 29 | verifier: 30 | name: ansible 31 | lint: 32 | name: ansible-lint 33 | -------------------------------------------------------------------------------- /ansible/molecule/default/playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - import_playbook: ../../all.yml 3 | -------------------------------------------------------------------------------- /ansible/molecule/default/verify.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Verify 3 | hosts: all 4 | tasks: 5 | - name: Check that traefik is deployed 6 | command: "docker service inspect traefik_traefik" 7 | changed_when: False 8 | 9 | - name: Check that example_app is deployed 10 | command: "docker service inspect example_app_helloworld" 11 | changed_when: False 12 | 13 | # Unfortunately this test does not work on Travis :-( 14 | # > "stderr_lines": ["* Trying 127.0.0.1...", "* TCP_NODELAY set", "* Connected to 127.0.0.1 (127.0.0.1) port 443 (#0)", "* ALPN, offering h2", "* ALPN, offering http/1.1", "* successfully set certificate verify locations:", "* CAfile: /etc/ssl/certs/ca-certificates.crt", " CApath: /etc/ssl/certs", "} [5 bytes data]", "* TLSv1.3 (OUT), TLS handshake, Client hello (1):", "} [512 bytes data]", "* OpenSSL SSL_connect: SSL_ERROR_SYSCALL in connection to 127.0.0.1:443 ", "* Closing connection 0"] 15 | # > it may be because of the Docker-in-Docker-in-Docker... 16 | # > so we test traefik in HTTP instead, and container execution 17 | # 18 | # - name: Check that example_app is working 19 | # command: 'curl -k -I --fail --silent -H "Host: yourcompany.com" https://127.0.0.1/' 20 | # changed_when: False 21 | # args: 22 | # warn: False 23 | 24 | # This one neither :-( 25 | # > rc #56 26 | # 27 | # - name: Check that traefik is working 28 | # command: 'curl -I --fail --silent -H "Host: yourcompany.com" http://127.0.0.1/' 29 | # changed_when: False 30 | # args: 31 | # warn: False 32 | 33 | - name: Check that 1 traefik container is running 34 | shell: | 35 | set -o pipefail 36 | {% raw %}docker container ps --filter "label=com.docker.swarm.service.name=traefik_traefik" --format "{{.ID}}" | wc -l{% endraw %} # noqa 206 37 | register: nb_traefik_containers 38 | changed_when: False 39 | failed_when: nb_traefik_containers.stdout != "1" 40 | args: 41 | warn: False 42 | executable: /bin/bash 43 | 44 | - name: Check that 2 example_app containers are running 45 | shell: | 46 | set -o pipefail 47 | {% raw %}docker container ps --filter "label=com.docker.swarm.service.name=example_app_helloworld" --format "{{.ID}}" | wc -l{% endraw %} # noqa 206 48 | register: nb_example_app_containers 49 | changed_when: False 50 | failed_when: nb_example_app_containers.stdout != "2" 51 | args: 52 | warn: False 53 | executable: /bin/bash 54 | -------------------------------------------------------------------------------- /ansible/provision-00-common.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ### 3 | # provision-00-common playbook 4 | ## 5 | # Perform basic OS configuration 6 | ## 7 | 8 | # Ignore SSH host key checking on first run 9 | # Useful when VMs have just been created with Terraform or Vagrant and host keys 10 | # are not yet in your ~/.ssh/known_hosts file 11 | # 12 | # Vagrant and Remote environments 13 | - hosts: docker:!localhost:!test 14 | gather_facts: no 15 | tasks: 16 | - name: Check known_hosts for each hostname 17 | command: ssh-keygen -F {{ hostvars[inventory_hostname]['ansible_host'] }} 18 | delegate_to: localhost 19 | register: has_entry_in_known_hosts_file 20 | changed_when: false 21 | ignore_errors: yes 22 | 23 | - name: Ignore host key on first run 24 | when: has_entry_in_known_hosts_file.rc == 1 25 | set_fact: 26 | ansible_ssh_common_args: '-o StrictHostKeyChecking=no' 27 | 28 | 29 | # Basic OS config 30 | # 31 | # All environments except localhost 32 | - hosts: docker:!localhost:!test 33 | tasks: 34 | - name: Set timezone to Etc/UTC 35 | become: True 36 | timezone: 37 | name: Etc/UTC 38 | 39 | - name: Add SSH authorized_keys 40 | become: True 41 | authorized_key: 42 | user: "{{ ansible_user }}" 43 | key: "{{ item }}" 44 | with_items: "{{ docker_authorized_ssh_keys }}" 45 | 46 | - name: Install Pip 47 | become: True 48 | apt: 49 | name: python3-pip 50 | update_cache: true 51 | 52 | # ADD YOUR OWN TASKS HERE 53 | -------------------------------------------------------------------------------- /ansible/provision-01-docker.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ### 3 | # provision-01-docker playbook 4 | ## 5 | # Install Docker, Python, pip and pip packages required by Ansible 6 | ## 7 | 8 | - hosts: docker 9 | become: True 10 | roles: 11 | - role: geerlingguy.pip 12 | pip_executable: pip3 13 | vars: 14 | pip_install_packages: 15 | - name: jsondiff # needed by Ansible docker_stack module 16 | - name: pyyaml # needed by Ansible docker_stack module 17 | - name: docker 18 | # - name: docker-py # We do not use docker-py because it does not handle "docker config" correctly 19 | 20 | - role: geerlingguy.docker 21 | vars: 22 | docker_package_state: present 23 | docker_install_compose: False 24 | 25 | post_tasks: 26 | - name: Ensure current user is added to the docker group 27 | user: 28 | name: "{{ ansible_user }}" 29 | groups: docker 30 | append: yes 31 | register: user_status 32 | when: "'test' not in group_names" 33 | 34 | - hosts: docker:!localhost 35 | tasks: 36 | - name: Reload current user groups 37 | meta: reset_connection # reset connection so the new docker group is taken into account. Not available for localhost :( 38 | -------------------------------------------------------------------------------- /ansible/provision-02-docker-swarm.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ### 3 | # provision-02-docker-swarm playbook 4 | ## 5 | # Set up the Swarm cluster and put it into the desired state 6 | # 7 | # Playbook inspired by https://github.com/nextrevision/ansible-swarm-playbook 8 | # Big thanks to John Patterson for his work! 9 | ## 10 | 11 | # use dockerswarm_advertise_addr instead of iface when ethX has multiple IP addresses 12 | - hosts: docker 13 | tasks: 14 | - name: define dockerswarm_advertise_addr_string (defined) 15 | set_fact: 16 | dockerswarm_advertise_addr_string: "{{ dockerswarm_advertise_addr }}" 17 | when: dockerswarm_advertise_addr is defined 18 | - name: define dockerswarm_advertise_addr_string (defined) 19 | set_fact: 20 | dockerswarm_advertise_addr_string: "{{ dockerswarm_iface | default('eth0') }}" 21 | when: dockerswarm_advertise_addr is not defined 22 | 23 | # determine the status of each manager node and break them 24 | # into two groups: 25 | # - dockerswarm_manager_operational (swarm is running and active) 26 | # - dockerswarm_manager_bootstrap (host needs to be joined to the cluster) 27 | - hosts: docker:&dockerswarm_manager 28 | tasks: 29 | - name: load docker info as facts 30 | docker_info_facts: 31 | changed_when: False 32 | 33 | - name: create dockerswarm_manager_operational group 34 | add_host: 35 | hostname: "{{ item }}" 36 | groups: dockerswarm_manager_operational 37 | with_items: "{{ ansible_play_hosts | default(play_hosts) }}" 38 | when: hostvars[item]['docker_info']['Swarm']['LocalNodeState'] == 'active' 39 | run_once: true 40 | changed_when: False 41 | 42 | - name: create dockerswarm_manager_bootstrap group 43 | add_host: 44 | hostname: "{{ item }}" 45 | groups: dockerswarm_manager_bootstrap 46 | with_items: "{{ ansible_play_hosts | default(play_hosts) }}" 47 | when: hostvars[item]['docker_info']['Swarm']['LocalNodeState'] != 'active' 48 | run_once: true 49 | changed_when: False 50 | 51 | # determine the status of each worker node and break them 52 | # into two groups: 53 | # - dockerswarm_worker_operational (host is joined to the swarm cluster) 54 | # - dockerswarm_worker_bootstrap (host needs to be joined to the cluster) 55 | - hosts: docker:&dockerswarm_worker 56 | tasks: 57 | - name: load docker info as facts 58 | docker_info_facts: 59 | changed_when: False 60 | 61 | - name: create dockerswarm_worker_operational group 62 | add_host: 63 | hostname: "{{ item }}" 64 | groups: dockerswarm_worker_operational 65 | with_items: "{{ ansible_play_hosts | default(play_hosts) }}" 66 | when: hostvars[item]['docker_info']['Swarm']['LocalNodeState'] == 'active' 67 | run_once: true 68 | changed_when: False 69 | 70 | - name: create dockerswarm_worker_bootstrap group 71 | add_host: 72 | hostname: "{{ item }}" 73 | groups: dockerswarm_worker_bootstrap 74 | with_items: "{{ ansible_play_hosts | default(play_hosts) }}" 75 | when: hostvars[item]['docker_info']['Swarm']['LocalNodeState'] != 'active' 76 | run_once: true 77 | changed_when: False 78 | 79 | # when the dockerswarm_manager_operational group is empty, meaning there 80 | # are no hosts running swarm, we need to initialize one of the hosts 81 | # then add it to the dockerswarm_manager_operational group 82 | - hosts: dockerswarm_manager_bootstrap[0] 83 | tasks: 84 | - name: initialize swarm cluster 85 | command: > 86 | docker swarm init 87 | --advertise-addr={{ dockerswarm_advertise_addr_string | default('eth0') }}:2377 88 | when: "'dockerswarm_manager_operational' not in groups" 89 | register: bootstrap_first_node 90 | 91 | - name: add initialized host to dockerswarm_manager_operational group # noqa 503 92 | add_host: 93 | hostname: "{{ item }}" 94 | groups: dockerswarm_manager_operational 95 | with_items: "{{ ansible_play_hosts | default(play_hosts) }}" 96 | when: bootstrap_first_node.changed 97 | 98 | # retrieve the swarm tokens and populate a list of ips listening on 99 | # the swarm port 2377 100 | - hosts: dockerswarm_manager_operational[0] 101 | vars: 102 | iface: "{{ dockerswarm_iface | default('eth0') }}" 103 | tasks: 104 | - name: retrieve swarm manager token 105 | command: docker swarm join-token -q manager 106 | register: dockerswarm_manager_token 107 | changed_when: False 108 | 109 | - name: retrieve swarm worker token 110 | command: docker swarm join-token -q worker 111 | register: dockerswarm_worker_token 112 | changed_when: False 113 | 114 | - name: populate list of manager ips from dockerswarm_advertise_addr 115 | add_host: 116 | hostname: "{{ dockerswarm_advertise_addr }}" 117 | groups: dockerswarm_manager_ips 118 | when: dockerswarm_advertise_addr is defined 119 | changed_when: False 120 | 121 | - name: populate list of manager ips from iface 122 | add_host: 123 | hostname: "{{ hostvars[item]['ansible_' + iface]['ipv4']['address'] }}" 124 | groups: dockerswarm_manager_ips 125 | when: dockerswarm_advertise_addr is not defined 126 | with_items: "{{ ansible_play_hosts | default(play_hosts) }}" 127 | changed_when: False 128 | 129 | # join the hosts not yet initialized to the swarm cluster 130 | - hosts: dockerswarm_manager_bootstrap:!dockerswarm_manager_operational 131 | vars: 132 | token: "{{ hostvars[groups['dockerswarm_manager_operational'][0]]['dockerswarm_manager_token']['stdout'] }}" 133 | tasks: 134 | - name: join manager nodes to cluster # noqa 301 135 | command: > 136 | docker swarm join 137 | --advertise-addr={{ dockerswarm_advertise_addr_string | default('eth0') }}:2377 138 | --token={{ token }} 139 | {{ groups['dockerswarm_manager_ips'][0] }}:2377 140 | # join the remaining workers to the swarm cluster 141 | - hosts: dockerswarm_worker_bootstrap 142 | vars: 143 | token: "{{ hostvars[groups['dockerswarm_manager_operational'][0]]['dockerswarm_worker_token']['stdout'] }}" 144 | tasks: 145 | - name: join worker nodes to cluster # noqa 301 146 | command: > 147 | docker swarm join 148 | --advertise-addr={{ dockerswarm_advertise_addr_string | default('eth0') }} 149 | --token={{ token }} 150 | {{ groups['dockerswarm_manager_ips'][0] }}:2377 151 | -------------------------------------------------------------------------------- /ansible/provision-03-dev.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ### 3 | # provision-03-dev playbook 4 | ## 5 | # Update /etc/hosts of local machine for localhost and vagrant environments 6 | ## 7 | 8 | - hosts: vagrant 9 | tasks: 10 | - name: "Update /etc/hosts on VMs" 11 | become: True 12 | lineinfile: 13 | path: /etc/hosts 14 | line: "{{ swarm_dev_ip }} {{ item.value }}" 15 | with_dict: "{{ domains }}" 16 | 17 | - hosts: dev 18 | tags: 19 | - etc_hosts 20 | tasks: 21 | - name: "Update /etc/hosts on host" 22 | become: True 23 | lineinfile: 24 | path: /etc/hosts 25 | line: "{{ swarm_dev_ip }} {{ item.value }}" 26 | with_dict: "{{ domains }}" 27 | delegate_to: localhost 28 | run_once: True 29 | 30 | # ADD YOUR OWN DEV TASKS HERE 31 | -------------------------------------------------------------------------------- /ansible/provision.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ### 3 | # provision playbook 4 | ## 5 | 6 | - import_playbook: provision-00-common.yml 7 | tags: [common] 8 | 9 | - import_playbook: provision-01-docker.yml 10 | tags: [docker] 11 | 12 | - import_playbook: provision-02-docker-swarm.yml 13 | tags: [docker-swarm] 14 | 15 | - import_playbook: provision-03-dev.yml 16 | tags: [dev] 17 | 18 | # ADD YOUR CUSTOM PROVISIONING PLAYBOOKS HERE 19 | -------------------------------------------------------------------------------- /ansible/requirements.yml: -------------------------------------------------------------------------------- 1 | ### 2 | # ansible-galaxy requirements 3 | ## 4 | # Automatically installed before each T.A.D.S. Ansible command 5 | ## 6 | 7 | - src: geerlingguy.pip 8 | version: 2.1.0 9 | 10 | - src: geerlingguy.docker 11 | version: 3.1.2 12 | -------------------------------------------------------------------------------- /ansible/roles/docker-stack/.yamllint: -------------------------------------------------------------------------------- 1 | --- 2 | # Based on ansible-lint config 3 | extends: default 4 | 5 | rules: 6 | braces: 7 | max-spaces-inside: 1 8 | level: error 9 | brackets: 10 | max-spaces-inside: 1 11 | level: error 12 | colons: 13 | max-spaces-after: -1 14 | level: error 15 | commas: 16 | max-spaces-after: -1 17 | level: error 18 | comments: disable 19 | comments-indentation: disable 20 | document-start: disable 21 | empty-lines: 22 | max: 3 23 | level: error 24 | hyphens: 25 | level: error 26 | indentation: disable 27 | key-duplicates: enable 28 | line-length: disable 29 | new-line-at-end-of-file: disable 30 | new-lines: 31 | type: unix 32 | trailing-spaces: disable 33 | truthy: disable 34 | -------------------------------------------------------------------------------- /ansible/roles/docker-stack/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ## 3 | # docker-stack role defaults 4 | ## 5 | 6 | docker_stack_directory_local_path: "stacks/{{ docker_stack_name }}" 7 | docker_stack_yaml_local_path: "{{ docker_stack_directory_local_path }}/{{ docker_stack_name }}.yml.j2" 8 | docker_stack_resources_directory_name: "resources" 9 | docker_stack_resources_local_path: "{{ docker_stack_directory_local_path }}/{{ docker_stack_resources_directory_name }}" 10 | docker_stack_secrets: {} 11 | -------------------------------------------------------------------------------- /ansible/roles/docker-stack/molecule/default/Dockerfile.j2: -------------------------------------------------------------------------------- 1 | # Molecule managed 2 | 3 | {% if item.registry is defined %} 4 | FROM {{ item.registry.url }}/{{ item.image }} 5 | {% else %} 6 | FROM {{ item.image }} 7 | {% endif %} 8 | 9 | {% if item.env is defined %} 10 | {% for var, value in item.env.items() %} 11 | {% if value %} 12 | ENV {{ var }} {{ value }} 13 | {% endif %} 14 | {% endfor %} 15 | {% endif %} 16 | 17 | # Dependencies 18 | RUN runDeps=" \ 19 | python sudo bash ca-certificates iproute2 \ 20 | python-pip \ 21 | apt-transport-https curl software-properties-common gnupg-agent \ 22 | " \ 23 | && apt-get update && apt-get install -y --no-install-recommends $runDeps && rm -rf /var/lib/apt/lists/* 24 | 25 | # Docker 26 | RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \ 27 | && add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" \ 28 | && apt-get update && apt-get install -y --no-install-recommends docker-ce && rm -rf /var/lib/apt/lists/* 29 | 30 | # Docker does not support AUFS over AUFS (Docker-in-Docker) 31 | RUN mkdir -p /etc/docker \ 32 | && echo '{"storage-driver": "vfs"}' > /etc/docker/daemon.json 33 | 34 | # Ansible docker_stack module dependencies 35 | RUN pip install --upgrade setuptools \ 36 | && pip install jsondiff pyyaml docker 37 | 38 | CMD /usr/bin/dockerd 39 | -------------------------------------------------------------------------------- /ansible/roles/docker-stack/molecule/default/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependency: 3 | name: galaxy 4 | driver: 5 | name: docker 6 | lint: 7 | name: yamllint 8 | platforms: 9 | - name: instance 10 | image: ubuntu:18.04 11 | command: /usr/bin/dockerd 12 | privileged: True 13 | volumes: 14 | - /sys/fs/cgroup:/sys/fs/cgroup:ro 15 | provisioner: 16 | name: ansible 17 | lint: 18 | name: ansible-lint 19 | verifier: 20 | name: ansible 21 | lint: 22 | name: ansible-lint 23 | -------------------------------------------------------------------------------- /ansible/roles/docker-stack/molecule/default/playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Converge 3 | hosts: all 4 | roles: 5 | - role: docker-stack 6 | vars: 7 | docker_stack_name: simple_stack 8 | 9 | - role: docker-stack 10 | vars: 11 | docker_stack_name: stack_with_resources 12 | 13 | - role: docker-stack 14 | vars: 15 | docker_stack_name: stack_with_secrets 16 | docker_stack_secrets: 17 | - name: secret1 18 | data: "test" 19 | - name: secret2 20 | data: "test" 21 | -------------------------------------------------------------------------------- /ansible/roles/docker-stack/molecule/default/prepare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Prepare 3 | hosts: all 4 | tasks: 5 | - name: Test if Swarm is initialized 6 | command: docker node ls 7 | ignore_errors: True 8 | register: swarm_status 9 | 10 | - name: Initialize Swarm cluster 11 | command: > 12 | docker swarm init 13 | --advertise-addr=127.0.0.1:2377 14 | when: swarm_status.rc != 0 15 | -------------------------------------------------------------------------------- /ansible/roles/docker-stack/molecule/default/stacks/simple_stack/simple_stack.yml.j2: -------------------------------------------------------------------------------- 1 | version: "3.6" 2 | services: 3 | helloworld: 4 | image: nginxdemos/hello:0.2 5 | deploy: 6 | mode: replicated 7 | replicas: 1 8 | -------------------------------------------------------------------------------- /ansible/roles/docker-stack/molecule/default/stacks/stack_with_resources/resources/test.txt: -------------------------------------------------------------------------------- 1 | HelloWorld! 2 | -------------------------------------------------------------------------------- /ansible/roles/docker-stack/molecule/default/stacks/stack_with_resources/stack_with_resources.yml.j2: -------------------------------------------------------------------------------- 1 | version: "3.6" 2 | 3 | configs: 4 | example_app_config: 5 | file: ./resources/test.txt 6 | 7 | services: 8 | helloworld: 9 | image: nginxdemos/hello:0.2 10 | configs: 11 | - example_app_config 12 | deploy: 13 | mode: replicated 14 | replicas: 1 15 | -------------------------------------------------------------------------------- /ansible/roles/docker-stack/molecule/default/stacks/stack_with_secrets/stack_with_secrets.yml.j2: -------------------------------------------------------------------------------- 1 | version: "3.6" 2 | secrets: 3 | secret1: 4 | external: true 5 | secret2: 6 | external: true 7 | 8 | services: 9 | helloworld: 10 | image: nginxdemos/hello:0.2 11 | secrets: 12 | - secret1 13 | - secret2 14 | deploy: 15 | mode: replicated 16 | replicas: 1 17 | -------------------------------------------------------------------------------- /ansible/roles/docker-stack/molecule/default/verify.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Verify 3 | hosts: all 4 | tasks: 5 | - name: Check that simple_stack is deployed 6 | command: "docker service inspect simple_stack_helloworld" 7 | changed_when: False 8 | 9 | - name: Check that stack_with_secrets is deployed 10 | command: "docker service inspect stack_with_secrets_helloworld" 11 | changed_when: False 12 | 13 | - name: Check that secret 1 and 2 exist 14 | command: "docker secret inspect {{ item }}" 15 | with_items: 16 | - secret1 17 | - secret2 18 | changed_when: False 19 | 20 | - name: Check that stack_with_resources is deployed 21 | command: "docker service inspect stack_with_resources_helloworld" 22 | changed_when: False 23 | 24 | - name: Check that example_app_config exists 25 | command: "docker config inspect stack_with_resources_example_app_config" 26 | changed_when: False 27 | -------------------------------------------------------------------------------- /ansible/roles/docker-stack/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ## 3 | # docker-stack role tasks 4 | ## 5 | 6 | - name: Create secrets 7 | docker_secret: 8 | name: "{{ item.name }}" 9 | data: "{{ item.data }}" 10 | state: present 11 | with_items: "{{ docker_stack_secrets }}" 12 | no_log: True 13 | 14 | - name: Create stack configuration directory 15 | file: 16 | path: "~/.tads/stacks/{{ docker_stack_name }}" 17 | state: directory 18 | recurse: yes 19 | 20 | - name: Copy stack YAML 21 | template: 22 | src: "{{ docker_stack_yaml_local_path }}" 23 | dest: "~/.tads/stacks/{{ docker_stack_name }}/{{ docker_stack_name }}.yml" 24 | 25 | - name: Test stack resources existance 26 | stat: 27 | path: "{{ docker_stack_resources_local_path }}" 28 | delegate_to: localhost 29 | register: stat_result 30 | 31 | - name: Copy stack resources 32 | copy: 33 | src: "{{ docker_stack_resources_local_path }}" 34 | dest: "~/.tads/stacks/{{ docker_stack_name }}" 35 | when: stat_result.stat.exists 36 | 37 | - name: Deploy stack 38 | docker_stack: 39 | state: present 40 | name: "{{ docker_stack_name }}" 41 | compose: 42 | - "~/.tads/stacks/{{ docker_stack_name }}/{{ docker_stack_name }}.yml" 43 | register: stack_deploy 44 | 45 | - debug: 46 | var: stack_deploy.stack_spec_diff # noqa 503 47 | when: stack_deploy.changed 48 | -------------------------------------------------------------------------------- /ansible/stacks/example_app/example_app.yml.j2: -------------------------------------------------------------------------------- 1 | ## 2 | # example_app stack 3 | ## 4 | # This file is a Jinja2 template 5 | # Which means you can use variables defined in group_vars here 6 | # @see https://docs.ansible.com/ansible/latest/user_guide/playbooks_templating.html 7 | ## 8 | version: "3.6" 9 | 10 | networks: 11 | apps: 12 | driver: overlay 13 | external: true # this network is created by Ansible, because it is shared by multiple stacks 14 | 15 | secrets: 16 | example_app_password-v1: 17 | external: true # this secret is created by Ansible 18 | # tip: if you update secret content, you should increment the version in its name 19 | # because Ansible cannot update a secret 20 | 21 | configs: 22 | example_app_config: 23 | file: ./resources/example.txt # all the files in the resources/ directory are copied and available here 24 | 25 | services: 26 | # HELLOWORLD 27 | helloworld: 28 | image: nginxdemos/hello:0.2 # always use tags! 29 | environment: 30 | EXAMPLE_USERNAME: {{ credentials.example_app.username }} # it's easy to set environment vars from group_vars 31 | EXAMPLE_PASSWORD_FILE: /run/secrets/example_app_password-v1 32 | networks: 33 | - apps 34 | secrets: 35 | - example_app_password-v1 36 | configs: 37 | - example_app_config 38 | {% if dev_bindmounts.example_app is defined %} 39 | volumes: 40 | - {{ dev_bindmounts.example_app }}:/usr/share/nginx/html # dev bind mounts can be set in ansible/group_vars/localhost_bindmounts.yml 41 | {% endif %} 42 | deploy: 43 | mode: replicated 44 | replicas: 2 45 | labels: 46 | - traefik.enable=true 47 | - traefik.port=80 48 | - traefik.frontend.rule=Host:{{ domains.main }},{{ domains.main_www }} 49 | - traefik.docker.network=apps 50 | - traefik.frontend.entryPoints=http,https 51 | - traefik.backend.loadbalancer.swarm=true 52 | - traefik.frontend.redirect.regex=^https?://{{ domains.main_www }}/(.*) 53 | - traefik.frontend.redirect.replacement=https://{{ domains.main }}/$${1} 54 | resources: 55 | limits: 56 | cpus: '0.20' 57 | memory: 30M 58 | -------------------------------------------------------------------------------- /ansible/stacks/example_app/resources/example.txt: -------------------------------------------------------------------------------- 1 | All the files of this directory will be copied next to the Docker stack YAML file 2 | So they are available 3 | -------------------------------------------------------------------------------- /ansible/stacks/traefik/traefik.yml.j2: -------------------------------------------------------------------------------- 1 | ## 2 | # traefik stack 3 | ## 4 | # This file is a Jinja2 template 5 | # Which means you can use variables defined in group_vars here 6 | # @see https://docs.ansible.com/ansible/latest/user_guide/playbooks_templating.html 7 | ## 8 | version: "3.6" 9 | 10 | networks: 11 | traefik: 12 | driver: overlay 13 | apps: 14 | driver: overlay 15 | external: true # this network is created by Ansible, because it is shared by multiple stacks 16 | 17 | services: 18 | # TRAEFIK 19 | # Reverse proxy deployed on each node (global) 20 | traefik: 21 | image: traefik:v1.7.4 22 | command: 23 | - --api 24 | - --docker 25 | - --docker.swarmMode 26 | - --docker.domain={{ domains.main }} # variable from group_vars 27 | - --docker.watch 28 | - --docker.exposedByDefault=false 29 | - --logLevel=ERROR 30 | - --api.dashboard=false 31 | - --api.entryPoint=traefik 32 | - --entrypoints=Name:http Address::80 Redirect.EntryPoint:https 33 | - --entrypoints=Name:https Address::443 TLS 34 | - --accessLog 35 | 36 | {% if letsencrypt %} # you can also use conditional statements 37 | - --acme 38 | - --acme.acmeLogging 39 | - --acme.storage=traefik/acme/account 40 | - --acme.entryPoint=https 41 | - --acme.email=admin@{{ domains.main }} 42 | - --acme.httpchallenge.entrypoint=http # you should use a DNS challenge for wildcard certs (https://docs.traefik.io/https/acme/) 43 | - --acme.domains={{ domains.main }},{{ domains.main_www }} 44 | {% endif %} 45 | 46 | - --defaultentrypoints=http,https 47 | - --consul 48 | - --consul.prefix=traefik 49 | - --consul.endpoint=traefik_consul:8500 50 | volumes: 51 | - /var/run/docker.sock:/var/run/docker.sock 52 | - /dev/null:/traefik.toml 53 | dns: 8.8.8.8 54 | networks: 55 | - traefik 56 | - apps 57 | ports: 58 | - target: 80 59 | published: 80 60 | mode: host 61 | - target: 443 62 | published: 443 63 | mode: host 64 | deploy: 65 | mode: global 66 | placement: 67 | constraints: 68 | - node.role == manager 69 | resources: 70 | limits: 71 | cpus: '0.20' 72 | memory: 30M 73 | reservations: 74 | cpus: '0.10' 75 | memory: 20M 76 | 77 | # CONSUL 78 | # Used by Traefik to store its state 79 | traefik_consul: 80 | image: progrium/consul 81 | command: -server -bootstrap -ui-dir /ui 82 | networks: 83 | - traefik 84 | deploy: 85 | mode: replicated 86 | replicas: 1 87 | resources: 88 | limits: 89 | cpus: '0.20' 90 | memory: 30M 91 | -------------------------------------------------------------------------------- /ansible/vault_keys/.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | !.gitignore 3 | -------------------------------------------------------------------------------- /scripts/commands/ansible-playbook.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # T.A.D.S. ansible-playbook command 3 | # 4 | # Usage: ./tads ansible-playbook ENVIRONMENT PLAYBOOK [ANSIBLE OPTIONS] 5 | # 6 | 7 | set -euo pipefail 8 | 9 | readonly SELF_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 10 | readonly SELF_NAME="$(basename "${BASH_SOURCE[0]}")" 11 | readonly ROOT_PATH="$(cd "${SELF_PATH}/../.." && pwd)" 12 | 13 | # shellcheck source=scripts/includes/common.sh 14 | source "${SELF_PATH}/../includes/common.sh" 15 | 16 | # shellcheck source=scripts/includes/ansible.sh 17 | source "${SELF_PATH}/../includes/ansible.sh" 18 | 19 | usage() { 20 | local cmd="./tads" 21 | 22 | local environments 23 | environments="$(get_ansible_remote_environments | awk '{ print " " $1 }')" 24 | 25 | cat <<- EOF 26 | 27 | Usage: ${cmd} ansible-playbook ENVIRONMENT PLAYBOOK [ANSIBLE OPTIONS] 28 | 29 | Use Ansible to execute a playbook on your VMs. 30 | 31 | ANSIBLE OPTIONS: 32 | -C, --check Don't make any changes; instead, 33 | try to predict some of the changes that may occur 34 | -l SUBSET, --limit=SUBSET 35 | Further limit selected hosts to an additional pattern 36 | --list-hosts Outputs a list of matching hosts; does not execute 37 | --list-tags List all available tags 38 | --list-tasks List all tasks that would be executed 39 | --skip-tags=SKIP_TAGS 40 | Only run plays and tasks whose tags do not match these values 41 | --step One-step-at-a-time: confirm each task before running 42 | --syntax-check Perform a syntax check on the playbook, but do not execute it 43 | -t TAGS, --tags=TAGS 44 | Only run plays and tasks tagged with these values 45 | 46 | To list other Ansible options, run: ansible-playbook --help 47 | 48 | PLAYBOOKS: 49 | provision Provision your VMs: configure hosts, install Docker, set up Docker Swarm 50 | deploy Deploy your applicative stacks on the Swarm 51 | all Provision and deploy in a single command 52 | 53 | ENVIRONMENTS: 54 | localhost Your local machine, for development 55 | vagrant A local Docker Swarm cluster, made of Vagrant VMs, for testing 56 | ${environments} 57 | 58 | EOF 59 | exit 1 60 | } 61 | 62 | install_ansible_roles () { 63 | ansible-galaxy role install -r "${ROOT_PATH}/ansible/requirements.yml" 64 | } 65 | 66 | main () { 67 | local environment="${1:-}" 68 | local playbook="${2:-}" 69 | 70 | if [[ -z "${playbook}" ]]; then 71 | usage 72 | fi 73 | 74 | case "${environment}" in 75 | localhost) 76 | shift 2 77 | check_ansible 78 | install_ansible_roles 79 | # shellcheck source=scripts/includes/localhost_ansible.sh 80 | source "${SELF_PATH}/../includes/localhost_ansible.sh" 81 | if [[ "${playbook}" == "provision" || "${playbook}" == "all" ]]; then 82 | echo "You will be asked by Ansible to enter your account password to perform SUDO operations..." 83 | localhost_ansible_playbook "${ROOT_PATH}/ansible/${playbook}.yml" --ask-become-pass "$@" 84 | else 85 | localhost_ansible_playbook "${ROOT_PATH}/ansible/${playbook}.yml" "$@" 86 | fi 87 | ;; 88 | vagrant) 89 | shift 2 90 | check_ansible 91 | install_ansible_roles 92 | # shellcheck source=scripts/includes/vagrant_ansible.sh 93 | source "${SELF_PATH}/../includes/vagrant_ansible.sh" 94 | if [[ "${playbook}" == "provision" || "${playbook}" == "all" ]]; then 95 | echo "You will be asked by Ansible to enter your account password to perform SUDO operations..." 96 | vagrant_ansible_playbook "${ROOT_PATH}/ansible/${playbook}.yml" --ask-become-pass "$@" 97 | else 98 | vagrant_ansible_playbook "${ROOT_PATH}/ansible/${playbook}.yml" "$@" 99 | fi 100 | ;; 101 | "") 102 | usage 103 | ;; 104 | *) 105 | shift 2 106 | check_ansible 107 | install_ansible_roles 108 | # shellcheck source=scripts/includes/remote_ansible.sh 109 | source "${SELF_PATH}/../includes/remote_ansible.sh" 110 | remote_ansible_playbook "${environment}" "${ROOT_PATH}/ansible/${playbook}.yml" "$@" 111 | ;; 112 | esac 113 | } 114 | 115 | main "$@" 116 | -------------------------------------------------------------------------------- /scripts/commands/ansible-vault.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # T.A.D.S. ansible-vault command 3 | # 4 | # Usage: ./tads ansible-vault ENVIRONMENT COMMAND 5 | # 6 | 7 | set -euo pipefail 8 | 9 | readonly SELF_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 10 | readonly SELF_NAME="$(basename "${BASH_SOURCE[0]}")" 11 | readonly ROOT_PATH="$(cd "${SELF_PATH}/../.." && pwd)" 12 | 13 | # shellcheck source=scripts/includes/common.sh 14 | source "${SELF_PATH}/../includes/common.sh" 15 | 16 | # shellcheck source=scripts/includes/ansible.sh 17 | source "${SELF_PATH}/../includes/ansible.sh" 18 | 19 | usage() { 20 | local cmd="./tads" 21 | 22 | local environments 23 | environments="$(get_ansible_remote_environments | awk '{ print " " $1 }')" 24 | 25 | cat <<- EOF 26 | 27 | Usage: ${cmd} ansible-vault ENVIRONMENT COMMAND 28 | 29 | Use ansible-vault to encrypt your sensitive files, e.g. ansible/group_vars/production_encrypted.yml 30 | 31 | COMMANDS: 32 | init-key Generate a random key into ansible/vault_keys/ENVIRONMENT; 33 | should be executed only at the beginning of the project 34 | create Create a new encrypted file 35 | encrypt Encrypt the given file 36 | decrypt Decrypt the given file 37 | edit Edit the given file in place 38 | view View the given file 39 | ... ... 40 | 41 | To list other ansible-vault commands and options, run: ansible-vault --help 42 | 43 | ENVIRONMENTS: 44 | ${environments} 45 | 46 | EOF 47 | exit 1 48 | } 49 | 50 | init_ansible_vault_key () { 51 | local environment="$1" 52 | local key_path="${ROOT_PATH}/ansible/vault_keys/${environment}" 53 | 54 | if [[ -f "${key_path}" ]]; then 55 | echo_red "${key_path} already exists. Abort." 56 | exit 1 57 | fi 58 | 59 | local key 60 | key="$(LC_ALL=C tr -dc 'A-Za-z0-9!"#$%&'\''()*+,-./:;<=>?@[\]^_`{|}~' "${key_path}" 62 | 63 | echo "A new key has been generated in ${key_path}" 64 | echo " - keep this key secret" 65 | echo " - do not commit it" 66 | echo " - share it securely to your other authorized team members" 67 | echo " - do not lost it, you would not be able to decrypt your files!" 68 | } 69 | 70 | ansible_vault_cmd() { 71 | local environment="$1" 72 | shift 73 | local key_path="${ROOT_PATH}/ansible/vault_keys/${environment}" 74 | 75 | if [[ ! -f "${key_path}" ]]; then 76 | echo_red "Vault key not found for ENVIRONMENT: ${environment}" 77 | echo_red "If it's a new project, run this command to create it: ./tads ansible-vault ${environment} init-key" 78 | echo_red "Otherwise, please create ${key_path} and paste the key in it" 79 | exit 1 80 | fi 81 | 82 | [[ "${TADS_VERBOSE:-}" == true ]] && set -x 83 | ansible-vault "$@" --vault-id "${environment}@${key_path}" 84 | set +x 85 | } 86 | 87 | main () { 88 | if [[ "$#" -lt 2 ]]; then 89 | usage 90 | fi 91 | 92 | check_ansible 93 | 94 | local environment="$1" 95 | local command="$2" 96 | 97 | case "${command}" in 98 | "init-key") 99 | init_ansible_vault_key "${environment}" 100 | ;; 101 | *) 102 | ansible_vault_cmd "$@" 103 | ;; 104 | esac 105 | } 106 | 107 | main "$@" 108 | -------------------------------------------------------------------------------- /scripts/commands/ansible.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # T.A.D.S. ansible command 3 | # 4 | # Usage: ./tads ansible ENVIRONMENT COMMAND 5 | # 6 | 7 | set -euo pipefail 8 | 9 | readonly SELF_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 10 | readonly SELF_NAME="$(basename "${BASH_SOURCE[0]}")" 11 | readonly ROOT_PATH="$(cd "${SELF_PATH}/../.." && pwd)" 12 | 13 | # shellcheck source=scripts/includes/common.sh 14 | source "${SELF_PATH}/../includes/common.sh" 15 | 16 | # shellcheck source=scripts/includes/ansible.sh 17 | source "${SELF_PATH}/../includes/ansible.sh" 18 | 19 | usage() { 20 | local cmd="./tads" 21 | 22 | local environments 23 | environments="$(get_ansible_remote_environments | awk '{ print " " $1 }')" 24 | 25 | cat <<- EOF 26 | 27 | Usage: ${cmd} ansible ENVIRONMENT 28 | 29 | Execute a custom Ansible module on your VMs 30 | 31 | Examples: 32 | ${cmd} ansible vagrant docker --become -m apt -a "update_cache=yes upgrade=safe" 33 | ${cmd} ansible vagrant docker -m shell -a "echo 'test' > /tmp/test" 34 | 35 | To get some help, run: ansible --help 36 | 37 | ENVIRONMENTS: 38 | localhost Your local machine, for development 39 | vagrant A local Docker Swarm cluster, made of Vagrant VMs, for testing 40 | ${environments} 41 | 42 | EOF 43 | exit 1 44 | } 45 | 46 | main () { 47 | local environment="${1:-}" 48 | 49 | case "${environment}" in 50 | localhost) 51 | shift 52 | check_ansible 53 | # shellcheck source=scripts/includes/localhost_ansible.sh 54 | source "${SELF_PATH}/../includes/localhost_ansible.sh" 55 | localhost_ansible "$@" 56 | ;; 57 | vagrant) 58 | shift 59 | check_ansible 60 | # shellcheck source=scripts/includes/vagrant_ansible.sh 61 | source "${SELF_PATH}/../includes/vagrant_ansible.sh" 62 | vagrant_ansible "$@" 63 | ;; 64 | "") 65 | usage 66 | ;; 67 | *) 68 | shift 69 | check_ansible 70 | # shellcheck source=scripts/includes/remote_ansible.sh 71 | source "${SELF_PATH}/../includes/remote_ansible.sh" 72 | remote_ansible "${environment}" "$@" 73 | ;; 74 | esac 75 | } 76 | 77 | main "$@" 78 | -------------------------------------------------------------------------------- /scripts/commands/install-dependencies.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # T.A.D.S. install-dependencies command 3 | # 4 | # Usage: ./tads install-dependencies 5 | # 6 | 7 | set -euo pipefail 8 | 9 | readonly SELF_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 10 | readonly SELF_NAME="$(basename "${BASH_SOURCE[0]}")" 11 | readonly ROOT_PATH="$(cd "${SELF_PATH}/../.." && pwd)" 12 | 13 | # shellcheck source=scripts/includes/common.sh 14 | source "${SELF_PATH}/../includes/common.sh" 15 | 16 | # shellcheck source=scripts/includes/localhost_ansible.sh 17 | source "${SELF_PATH}/../includes/localhost_ansible.sh" 18 | 19 | main () { 20 | local no_password=false 21 | local force=false 22 | local dev_dependencies=false 23 | 24 | local options 25 | options=$(getopt --longoptions no-password,force,dev, --options "" -- "$@") 26 | 27 | eval set -- "$options" 28 | while true; do 29 | echo "$1" 30 | case "$1" in 31 | --no-password) 32 | no_password=true 33 | ;; 34 | --force) 35 | force=true 36 | ;; 37 | --dev) 38 | dev_dependencies=true 39 | ;; 40 | --) 41 | shift 42 | break 43 | ;; 44 | esac 45 | shift 46 | done 47 | 48 | echo "This script will install the following dependencies on your local machine using apt-get:" 49 | echo " - Ansible" 50 | echo " - Vagrant and Virtualbox" 51 | echo " - Terraform" 52 | echo "" 53 | 54 | if [[ ! "${force}" == true ]]; then 55 | local response 56 | read -r -p "Are you sure? [y/N] " response 57 | if [[ ! "${response}" =~ ^([yY][eE][sS]|[yY])$ ]]; then 58 | echo "Aborted." 59 | exit 60 | fi 61 | fi 62 | 63 | echo "Installing Ansible..." 64 | if ! command -v ansible > /dev/null; then 65 | if command -v apt-get > /dev/null; then 66 | echo "Your SUDO password may be asked" 67 | 68 | [[ "${TADS_VERBOSE:-}" == true ]] && set -x 69 | sudo apt-get update \ 70 | && sudo apt-get --yes install software-properties-common \ 71 | && sudo apt-add-repository --yes --update ppa:ansible/ansible \ 72 | && sudo apt-get --yes install ansible 73 | set +x 74 | else 75 | echo "Unable to work out how to install Ansible. Either install it first, manually," 76 | echo "or update the ${SELF_PATH}/${SELF_NAME} script" 77 | echo "to support installing automatically on your OS." 78 | fi 79 | else 80 | echo "Ansible is already installed. Skipping" 81 | fi 82 | 83 | echo "Installing Vagrant, Virtualbox and Terraform..." 84 | 85 | local playbook_skip_tags="dev_dependencies" 86 | [[ "${dev_dependencies}" == true ]] && playbook_skip_tags="" 87 | 88 | if [[ "${no_password}" == true ]]; then 89 | localhost_ansible_playbook "${ROOT_PATH}/ansible/install-dependencies.yml" --skip-tags "${playbook_skip_tags}" 90 | else 91 | echo "Your SUDO password will be asked" 92 | localhost_ansible_playbook "${ROOT_PATH}/ansible/install-dependencies.yml" --skip-tags "${playbook_skip_tags}" --ask-become-pass 93 | fi 94 | 95 | echo "Finished!" 96 | } 97 | 98 | main "$@" 99 | -------------------------------------------------------------------------------- /scripts/commands/terraform.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # T.A.D.S. terraform command 3 | # 4 | # Usage: ./tads terraform ENVIRONMENT COMMAND 5 | # 6 | 7 | set -euo pipefail 8 | 9 | readonly SELF_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 10 | readonly SELF_NAME="$(basename "${BASH_SOURCE[0]}")" 11 | readonly ROOT_PATH="$(cd "${SELF_PATH}/../.." && pwd)" 12 | 13 | readonly TADS_MIN_TERRAFORM_VERSION="0.12" 14 | 15 | # shellcheck source=scripts/includes/common.sh 16 | source "${SELF_PATH}/../includes/common.sh" 17 | 18 | usage() { 19 | local cmd="./tads" 20 | 21 | local environments 22 | # shellcheck disable=SC2012 23 | environments="$(ls -1 "${ROOT_PATH}/terraform/environments" | awk '{ print " " $1 }')" 24 | 25 | cat <<- EOF 26 | 27 | Usage: ${cmd} terraform ENVIRONMENT COMMAND 28 | 29 | Use Terraform to create VMs on your cloud provider 30 | 31 | COMMANDS: 32 | init Init Terraform environment 33 | apply Apply Terraform changes 34 | gen-ansible-inventory Force the corresponding Ansible inventory generation; 35 | this is done automatically after each "apply" command 36 | ... ... 37 | 38 | To list other Terraform commands and options, run: terraform -help 39 | 40 | ENVIRONMENTS: 41 | ${environments} 42 | 43 | 44 | EOF 45 | exit 1 46 | } 47 | 48 | terraform_cmd() { 49 | if ! command -v terraform > /dev/null; then 50 | echo_red "Terraform must be installed on your local machine. Please referer to README.md to see how." 51 | exit 1 52 | fi 53 | 54 | local current_terraform_version 55 | current_terraform_version="$(terraform --version | head -n1 | cut -d " " -f2 | cut -c2- || true)" 56 | 57 | if ! is_version_gte "${current_terraform_version}" "${TADS_MIN_TERRAFORM_VERSION}"; then 58 | echo_red "Your Terraform version (${current_terraform_version}) is not supported by T.A.D.S." 59 | echo_red "Please upgrade it to at least version ${TADS_MIN_TERRAFORM_VERSION}" 60 | exit 1 61 | fi 62 | 63 | local environment="$1" 64 | shift 65 | if [[ ! -d "${ROOT_PATH}/terraform/environments/${environment}" ]]; then 66 | echo_red "Terraform ENVIRONMENT does not exist: ${environment}" 67 | exit 1 68 | fi 69 | 70 | local command="$1" 71 | 72 | if [[ ! "${command}" == "gen-ansible-inventory" ]]; then 73 | [[ "${TADS_VERBOSE:-}" == true ]] && set -x 74 | (cd "${ROOT_PATH}/terraform/environments/${environment}"; terraform "$@") 75 | set +x 76 | fi 77 | 78 | if [[ "${command}" == "apply" || "${command}" == "gen-ansible-inventory" ]]; then 79 | gen_ansible_inventory_from_terraform "${environment}" 80 | fi 81 | } 82 | 83 | gen_ansible_inventory_from_terraform () { 84 | local environment="$1" 85 | local inventory_path="${ROOT_PATH}/ansible/inventories/${environment}" 86 | 87 | echo "Generating Ansible inventory from Terraform outputs..." 88 | 89 | local ssh_user 90 | local manager_ips 91 | local worker_ips 92 | ssh_user="$(terraform_cmd "${environment}" output ssh_user)" 93 | manager_ips="$(terraform_cmd "${environment}" output -json manager_ips | jq -r '.[]')" 94 | worker_ips="$(terraform_cmd "${environment}" output -json worker_ips 2>/dev/null | jq -r '.[]')" 95 | 96 | echo "# Inventory file for ${environment} environment" > "${inventory_path}" 97 | { 98 | echo "# Automatically generated by ./tads terraform" 99 | echo "" 100 | echo "# Manager nodes" 101 | } >> "${inventory_path}" 102 | 103 | local manager_index=1 104 | for manager_ip in ${manager_ips}; do 105 | echo "manager-${manager_index} ansible_user=${ssh_user} ansible_host=${manager_ip}" >> "${inventory_path}" 106 | manager_index=$((manager_index+1)) 107 | done 108 | 109 | { 110 | echo "" 111 | echo "# Worker nodes" 112 | } >> "${inventory_path}" 113 | 114 | local worker_index=1 115 | for worker_ip in ${worker_ips}; do 116 | echo "worker-${worker_index} ansible_user=${ssh_user} ansible_host=${worker_ip}" >> "${inventory_path}" 117 | worker_index=$((worker_index+1)) 118 | done 119 | 120 | local manager_nodes="manager-[1:$((manager_index-1))]" 121 | local worker_nodes 122 | worker_nodes=$([[ -n ${worker_ips} ]] && echo "worker-[1:$((worker_index-1))]") 123 | 124 | cat <> "${inventory_path}" 125 | 126 | [${environment}] 127 | ${manager_nodes} 128 | ${worker_nodes} 129 | 130 | [docker:children] 131 | ${environment} 132 | 133 | [${environment}_encrypted:children] 134 | ${environment} 135 | 136 | [dockerswarm_manager] 137 | ${manager_nodes} 138 | 139 | [dockerswarm_worker] 140 | ${worker_nodes} 141 | 142 | [docker:vars] 143 | dockerswarm_iface=eth0 144 | EOT 145 | 146 | echo "Inventory file generated: ${inventory_path}" 147 | } 148 | 149 | main () { 150 | if [[ "$#" -lt 2 ]]; then 151 | usage 152 | fi 153 | 154 | terraform_cmd "$@" 155 | } 156 | 157 | main "$@" 158 | -------------------------------------------------------------------------------- /scripts/commands/vagrant.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # T.A.D.S. vagrant command 3 | # 4 | # Usage: ./tads vagrant COMMAND 5 | # 6 | 7 | set -euo pipefail 8 | 9 | readonly SELF_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 10 | readonly SELF_NAME="$(basename "${BASH_SOURCE[0]}")" 11 | readonly ROOT_PATH="$(cd "${SELF_PATH}/../.." && pwd)" 12 | 13 | readonly TADS_MIN_VAGRANT_VERSION="2.0" 14 | 15 | # shellcheck source=scripts/includes/common.sh 16 | source "${SELF_PATH}/../includes/common.sh" 17 | 18 | usage() { 19 | local cmd="./tads" 20 | 21 | cat <<- EOF 22 | 23 | Usage: ${cmd} vagrant COMMAND 24 | 25 | Use Vagrant to create VMs locally for test purpose 26 | 27 | COMMANDS: 28 | up Create the VMs 29 | destroy Destroy the VMs 30 | ssh SSH into a VM 31 | ... ... 32 | 33 | To list other Vagrant commands and options, run: vagrant --help 34 | 35 | 36 | EOF 37 | exit 1 38 | } 39 | 40 | vagrant_cmd() { 41 | if ! command -v vagrant > /dev/null; then 42 | echo_red "Vagrant must be installed on your local machine. Please referer to README.md to see how." 43 | exit 1 44 | fi 45 | 46 | local current_vagrant_version 47 | current_vagrant_version="$(vagrant --version | head -n1 | cut -d " " -f2 || true)" 48 | 49 | if ! is_version_gte "${current_vagrant_version}" "${TADS_MIN_VAGRANT_VERSION}"; then 50 | echo_red "Your Vagrant version (${current_vagrant_version}) is not supported by T.A.D.S." 51 | echo_red "Please upgrade it to at least version ${TADS_MIN_VAGRANT_VERSION}" 52 | exit 1 53 | fi 54 | 55 | if ! command -v vboxmanage > /dev/null; then 56 | echo_red "VirtualBox must be installed on your local machine. Please referer to README.md to see how." 57 | exit 1 58 | fi 59 | 60 | if [[ ! -f "${ROOT_PATH}/vagrant/vagrant.yml" ]]; then 61 | echo_red "Please copy vagrant/vagrant.sample.yml to vagrant/vagrant.yml and edit it first!" 62 | exit 1 63 | fi 64 | 65 | [[ "${TADS_VERBOSE:-}" == true ]] && set -x 66 | (cd "${ROOT_PATH}/vagrant"; vagrant "$@") 67 | set +x 68 | } 69 | 70 | main () { 71 | if [[ "$#" -lt 1 ]]; then 72 | usage 73 | fi 74 | 75 | vagrant_cmd "$@" 76 | } 77 | 78 | main "$@" 79 | -------------------------------------------------------------------------------- /scripts/includes/ansible.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | readonly TADS_MIN_ANSIBLE_VERSION="2.8" 4 | 5 | # Global Ansible config 6 | export ANSIBLE_DEPRECATION_WARNINGS="False" 7 | 8 | get_ansible_remote_environments () { 9 | # Test for Mac GNU ls command (installed in `ansible/install-dependencies.yml`) 10 | if [ -f /usr/local/opt/coreutils/libexec/gnubin/ls ] ; then 11 | /usr/local/opt/coreutils/libexec/gnubin/ls -1 -I "localhost" -I "*.sample*" "${ROOT_PATH}/ansible/inventories" 12 | else 13 | ls -1 -I "localhost" -I "*.sample*" "${ROOT_PATH}/ansible/inventories" 14 | fi 15 | } 16 | 17 | check_ansible () { 18 | if ! command -v ansible > /dev/null; then 19 | echo_red "Ansible must be installed on your local machine. Please referer to README.md to see how." 20 | exit 1 21 | fi 22 | 23 | local current_ansible_version 24 | current_ansible_version="$(ansible --version | head -n1 | cut -d " " -f3 | cut -c1-4)" 25 | 26 | if ! is_version_gte "${current_ansible_version}" "${TADS_MIN_ANSIBLE_VERSION}"; then 27 | echo_red "Your Ansible version (${current_ansible_version}) is not supported by T.A.D.S." 28 | echo_red "Please upgrade it to at least version ${TADS_MIN_ANSIBLE_VERSION}" 29 | exit 1 30 | fi 31 | } 32 | -------------------------------------------------------------------------------- /scripts/includes/common.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | echo_red () { 4 | local red 5 | local reset 6 | red=$(tput setaf 1) 7 | reset=$(tput sgr0) 8 | echo -e "${red}" "$@" "${reset}" 9 | } 10 | 11 | is_version_gte () { 12 | local current_version="$1" 13 | local required_version="$2" 14 | 15 | [[ ! ${current_version} =~ ^[0-9\.]*$ ]] && exit 1 16 | [[ ! ${required_version} =~ ^[0-9\.]*$ ]] && exit 1 17 | 18 | [[ "$(printf '%s\n' "$required_version" "$current_version" | sort -V | head -n1)" == "$required_version" ]] 19 | } 20 | -------------------------------------------------------------------------------- /scripts/includes/localhost_ansible.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | localhost_ansible_playbook () { 4 | local localhost_inventory_path="${ROOT_PATH}/ansible/inventories/localhost" 5 | 6 | [[ "${TADS_VERBOSE:-}" == true ]] && set -x 7 | ansible-playbook -i "${localhost_inventory_path}" -D -c local "$@" 8 | set +x 9 | } 10 | 11 | localhost_ansible () { 12 | local localhost_inventory_path="${ROOT_PATH}/ansible/inventories/localhost" 13 | 14 | [[ "${TADS_VERBOSE:-}" == true ]] && set -x 15 | ansible -i "${localhost_inventory_path}" -D -c local "$@" 16 | set +x 17 | } 18 | -------------------------------------------------------------------------------- /scripts/includes/remote_ansible.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | remote_ansible_checks () { 4 | local environment="$1" 5 | 6 | local inventory_path="${ROOT_PATH}/ansible/inventories/${environment}" 7 | if [[ ! -f "${inventory_path}" ]]; then 8 | echo_red "Unknown ENVIRONMENT: ${environment}" 9 | echo_red "If it's a Terraform environment, please run: ./tads terraform ${environment} apply" 10 | echo_red "Otherwise, please create ${inventory_path}" 11 | exit 1 12 | fi 13 | } 14 | 15 | remote_ansible_playbook () { 16 | local environment="$1" 17 | shift 18 | 19 | remote_ansible_checks "${environment}" 20 | 21 | local vault_key_path="${ROOT_PATH}/ansible/vault_keys/${environment}" 22 | if [[ ! -f "${vault_key_path}" ]]; then 23 | echo_red "Vault key not found for ENVIRONMENT: ${environment}" 24 | echo_red "If it's a new project, run this command to create it: ./tads ansible-vault ${environment} init-key" 25 | echo_red "Otherwise, please create ${vault_key_path} and paste the key in it" 26 | exit 1 27 | fi 28 | 29 | local inventory_path="${ROOT_PATH}/ansible/inventories/${environment}" 30 | local vault_key_path="${ROOT_PATH}/ansible/vault_keys/${environment}" 31 | 32 | [[ "${TADS_VERBOSE:-}" == true ]] && set -x 33 | ansible-playbook -i "${inventory_path}" -D --vault-id "${environment}@${vault_key_path}" "$@" 34 | set +x 35 | } 36 | 37 | remote_ansible () { 38 | local environment="$1" 39 | shift 40 | 41 | remote_ansible_checks "${environment}" 42 | 43 | local inventory_path="${ROOT_PATH}/ansible/inventories/${environment}" 44 | 45 | [[ "${TADS_VERBOSE:-}" == true ]] && set -x 46 | ansible -i "${inventory_path}" -D "$@" 47 | set +x 48 | } 49 | -------------------------------------------------------------------------------- /scripts/includes/vagrant_ansible.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | vagrant_ansible_checks () { 4 | local vagrant_inventory_path="${ROOT_PATH}/vagrant/.vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory" 5 | if [[ ! -f "${vagrant_inventory_path}" ]]; then 6 | echo_red "Impossible to find vagrant auto-generated inventory file" 7 | echo_red "Please run: ./tads vagrant up" 8 | echo_red "If you still get this error, you can try: ./tads vagrant provision" 9 | exit 1 10 | fi 11 | } 12 | 13 | vagrant_ansible_playbook () { 14 | local vagrant_inventory_path="${ROOT_PATH}/vagrant/.vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory" 15 | 16 | vagrant_ansible_checks 17 | 18 | [[ "${TADS_VERBOSE:-}" == true ]] && set -x 19 | ansible-playbook -i "${vagrant_inventory_path}" -D "$@" 20 | set +x 21 | } 22 | 23 | vagrant_ansible () { 24 | local vagrant_inventory_path="${ROOT_PATH}/vagrant/.vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory" 25 | 26 | vagrant_ansible_checks 27 | 28 | [[ "${TADS_VERBOSE:-}" == true ]] && set -x 29 | ansible -i "${vagrant_inventory_path}" -D "$@" 30 | set +x 31 | } 32 | -------------------------------------------------------------------------------- /scripts/tests/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:18.04 2 | 3 | RUN deps="jq" \ 4 | && apt-get update && apt-get install -y --no-install-recommends $deps && rm -rf /var/lib/apt/lists/* 5 | 6 | COPY ./entrypoint.sh /entrypoint.sh 7 | ENTRYPOINT [ "/entrypoint.sh" ] 8 | 9 | VOLUME [ "/tmp/tads" ] 10 | -------------------------------------------------------------------------------- /scripts/tests/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | # Copy host files in the workdir 5 | cp -R /tmp/tads-host/* /tmp/tads 6 | 7 | # Clean dev files 8 | rm -rf /tmp/tads/terraform/environments/production/.terraform 9 | rm -f /tmp/tads/terraform/environments/production/terraform.tfstate 10 | rm -rf /tmp/tads/vagrant/.vagrant 11 | rm -f /tmp/tads/vagrant/vagrant.yml 12 | rm -f /tmp/tads/ansible/inventories/production 13 | rm -f /tmp/tads/ansible/vault_keys/* 14 | 15 | exec "$@" 16 | -------------------------------------------------------------------------------- /scripts/tests/launcher.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | readonly SELF_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 5 | readonly SELF_NAME="$(basename "${BASH_SOURCE[0]}")" 6 | readonly ROOT_PATH="$(cd "${SELF_PATH}/../.." && pwd)" 7 | 8 | # We run the tests with Docker so we have a fresh environment with nothing installed 9 | 10 | docker build -t tads-scripts-tests "${SELF_PATH}" 11 | 12 | docker run \ 13 | --rm \ 14 | -ti \ 15 | -e TESTS_DOCKER=true \ 16 | -e "DEBUG=${DEBUG:-}" \ 17 | -v "${ROOT_PATH}":/tmp/tads-host:ro \ 18 | tads-scripts-tests \ 19 | /tmp/tads/scripts/tests/tests.sh 20 | -------------------------------------------------------------------------------- /scripts/tests/shunit2.sh: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | # vim:et:ft=sh:sts=2:sw=2 3 | # 4 | # Copyright 2008-2019 Kate Ward. All Rights Reserved. 5 | # Released under the Apache 2.0 license. 6 | # http://www.apache.org/licenses/LICENSE-2.0 7 | # 8 | # shUnit2 -- Unit testing framework for Unix shell scripts. 9 | # https://github.com/kward/shunit2 10 | # 11 | # Author: kate.ward@forestent.com (Kate Ward) 12 | # 13 | # shUnit2 is a xUnit based unit test framework for Bourne shell scripts. It is 14 | # based on the popular JUnit unit testing framework for Java. 15 | # 16 | # $() are not fully portable (POSIX != portable). 17 | # shellcheck disable=SC2006 18 | # expr may be antiquated, but it is the only solution in some cases. 19 | # shellcheck disable=SC2003 20 | 21 | # Return if shunit2 already loaded. 22 | command [ -n "${SHUNIT_VERSION:-}" ] && exit 0 23 | SHUNIT_VERSION='2.1.8pre' 24 | 25 | # Return values that scripts can use. 26 | SHUNIT_TRUE=0 27 | SHUNIT_FALSE=1 28 | SHUNIT_ERROR=2 29 | 30 | # Logging functions. 31 | _shunit_warn() { 32 | ${__SHUNIT_CMD_ECHO_ESC} \ 33 | "${__shunit_ansi_yellow}shunit2:WARN${__shunit_ansi_none} $*" >&2 34 | } 35 | _shunit_error() { 36 | ${__SHUNIT_CMD_ECHO_ESC} \ 37 | "${__shunit_ansi_red}shunit2:ERROR${__shunit_ansi_none} $*" >&2 38 | } 39 | _shunit_fatal() { 40 | ${__SHUNIT_CMD_ECHO_ESC} \ 41 | "${__shunit_ansi_red}shunit2:FATAL${__shunit_ansi_none} $*" >&2 42 | exit ${SHUNIT_ERROR} 43 | } 44 | 45 | # Determine some reasonable command defaults. 46 | __SHUNIT_CMD_ECHO_ESC='echo -e' 47 | # shellcheck disable=SC2039 48 | command [ "`echo -e test`" = '-e test' ] && __SHUNIT_CMD_ECHO_ESC='echo' 49 | 50 | __SHUNIT_UNAME_S=`uname -s` 51 | case "${__SHUNIT_UNAME_S}" in 52 | BSD) __SHUNIT_CMD_EXPR='gexpr' ;; 53 | *) __SHUNIT_CMD_EXPR='expr' ;; 54 | esac 55 | __SHUNIT_CMD_TPUT='tput' 56 | 57 | # Commands a user can override if needed. 58 | SHUNIT_CMD_EXPR=${SHUNIT_CMD_EXPR:-${__SHUNIT_CMD_EXPR}} 59 | SHUNIT_CMD_TPUT=${SHUNIT_CMD_TPUT:-${__SHUNIT_CMD_TPUT}} 60 | 61 | # Enable color output. Options are 'never', 'always', or 'auto'. 62 | SHUNIT_COLOR=${SHUNIT_COLOR:-auto} 63 | 64 | # Specific shell checks. 65 | if command [ -n "${ZSH_VERSION:-}" ]; then 66 | setopt |grep "^shwordsplit$" >/dev/null 67 | if command [ $? -ne ${SHUNIT_TRUE} ]; then 68 | _shunit_fatal 'zsh shwordsplit option is required for proper operation' 69 | fi 70 | if command [ -z "${SHUNIT_PARENT:-}" ]; then 71 | _shunit_fatal "zsh does not pass \$0 through properly. please declare \ 72 | \"SHUNIT_PARENT=\$0\" before calling shUnit2" 73 | fi 74 | fi 75 | 76 | # 77 | # Constants 78 | # 79 | 80 | __SHUNIT_MODE_SOURCED='sourced' 81 | __SHUNIT_MODE_STANDALONE='standalone' 82 | __SHUNIT_PARENT=${SHUNIT_PARENT:-$0} 83 | 84 | # User provided test prefix to display in front of the name of the test being 85 | # executed. Define by setting the SHUNIT_TEST_PREFIX variable. 86 | __SHUNIT_TEST_PREFIX=${SHUNIT_TEST_PREFIX:-} 87 | 88 | # ANSI colors. 89 | __SHUNIT_ANSI_NONE='\033[0m' 90 | __SHUNIT_ANSI_RED='\033[1;31m' 91 | __SHUNIT_ANSI_GREEN='\033[1;32m' 92 | __SHUNIT_ANSI_YELLOW='\033[1;33m' 93 | __SHUNIT_ANSI_CYAN='\033[1;36m' 94 | 95 | # Set the constants readonly. 96 | __shunit_constants=`set |grep '^__SHUNIT_' |cut -d= -f1` 97 | echo "${__shunit_constants}" |grep '^Binary file' >/dev/null && \ 98 | __shunit_constants=`set |grep -a '^__SHUNIT_' |cut -d= -f1` 99 | for __shunit_const in ${__shunit_constants}; do 100 | if command [ -z "${ZSH_VERSION:-}" ]; then 101 | readonly "${__shunit_const}" 102 | else 103 | case ${ZSH_VERSION} in 104 | [123].*) readonly "${__shunit_const}" ;; 105 | *) readonly -g "${__shunit_const}" # Declare readonly constants globally. 106 | esac 107 | fi 108 | done 109 | unset __shunit_const __shunit_constants 110 | 111 | # 112 | # Internal variables. 113 | # 114 | 115 | # Variables. 116 | __shunit_lineno='' # Line number of executed test. 117 | __shunit_mode=${__SHUNIT_MODE_SOURCED} # Operating mode. 118 | __shunit_reportGenerated=${SHUNIT_FALSE} # Is report generated. 119 | __shunit_script='' # Filename of unittest script (standalone mode). 120 | __shunit_skip=${SHUNIT_FALSE} # Is skipping enabled. 121 | __shunit_suite='' # Suite of tests to execute. 122 | __shunit_clean=${SHUNIT_FALSE} # _shunit_cleanup() was already called. 123 | 124 | # ANSI colors (populated by _shunit_configureColor()). 125 | __shunit_ansi_none='' 126 | __shunit_ansi_red='' 127 | __shunit_ansi_green='' 128 | __shunit_ansi_yellow='' 129 | __shunit_ansi_cyan='' 130 | 131 | # Counts of tests. 132 | __shunit_testSuccess=${SHUNIT_TRUE} 133 | __shunit_testsTotal=0 134 | __shunit_testsPassed=0 135 | __shunit_testsFailed=0 136 | 137 | # Counts of asserts. 138 | __shunit_assertsTotal=0 139 | __shunit_assertsPassed=0 140 | __shunit_assertsFailed=0 141 | __shunit_assertsSkipped=0 142 | 143 | # 144 | # Macros. 145 | # 146 | 147 | # shellcheck disable=SC2016,SC2089 148 | _SHUNIT_LINENO_='eval __shunit_lineno=""; if command [ "${1:-}" = "--lineno" ]; then command [ -n "$2" ] && __shunit_lineno="[$2] "; shift 2; fi' 149 | 150 | #----------------------------------------------------------------------------- 151 | # Assertion functions. 152 | # 153 | 154 | # Assert that two values are equal to one another. 155 | # 156 | # Args: 157 | # message: string: failure message [optional] 158 | # expected: string: expected value 159 | # actual: string: actual value 160 | # Returns: 161 | # integer: success (TRUE/FALSE/ERROR constant) 162 | assertEquals() { 163 | # shellcheck disable=SC2090 164 | ${_SHUNIT_LINENO_} 165 | if command [ $# -lt 2 -o $# -gt 3 ]; then 166 | _shunit_error "assertEquals() requires two or three arguments; $# given" 167 | _shunit_assertFail 168 | return ${SHUNIT_ERROR} 169 | fi 170 | _shunit_shouldSkip && return ${SHUNIT_TRUE} 171 | 172 | shunit_message_=${__shunit_lineno} 173 | if command [ $# -eq 3 ]; then 174 | shunit_message_="${shunit_message_}$1" 175 | shift 176 | fi 177 | shunit_expected_=$1 178 | shunit_actual_=$2 179 | 180 | shunit_return=${SHUNIT_TRUE} 181 | if command [ "${shunit_expected_}" = "${shunit_actual_}" ]; then 182 | _shunit_assertPass 183 | else 184 | failNotEquals "${shunit_message_}" "${shunit_expected_}" "${shunit_actual_}" 185 | shunit_return=${SHUNIT_FALSE} 186 | fi 187 | 188 | unset shunit_message_ shunit_expected_ shunit_actual_ 189 | return ${shunit_return} 190 | } 191 | # shellcheck disable=SC2016,SC2034 192 | _ASSERT_EQUALS_='eval assertEquals --lineno "${LINENO:-}"' 193 | 194 | # Assert that two values are not equal to one another. 195 | # 196 | # Args: 197 | # message: string: failure message [optional] 198 | # expected: string: expected value 199 | # actual: string: actual value 200 | # Returns: 201 | # integer: success (TRUE/FALSE/ERROR constant) 202 | assertNotEquals() { 203 | # shellcheck disable=SC2090 204 | ${_SHUNIT_LINENO_} 205 | if command [ $# -lt 2 -o $# -gt 3 ]; then 206 | _shunit_error "assertNotEquals() requires two or three arguments; $# given" 207 | _shunit_assertFail 208 | return ${SHUNIT_ERROR} 209 | fi 210 | _shunit_shouldSkip && return ${SHUNIT_TRUE} 211 | 212 | shunit_message_=${__shunit_lineno} 213 | if command [ $# -eq 3 ]; then 214 | shunit_message_="${shunit_message_}$1" 215 | shift 216 | fi 217 | shunit_expected_=$1 218 | shunit_actual_=$2 219 | 220 | shunit_return=${SHUNIT_TRUE} 221 | if command [ "${shunit_expected_}" != "${shunit_actual_}" ]; then 222 | _shunit_assertPass 223 | else 224 | failSame "${shunit_message_}" "${shunit_expected_}" "${shunit_actual_}" 225 | shunit_return=${SHUNIT_FALSE} 226 | fi 227 | 228 | unset shunit_message_ shunit_expected_ shunit_actual_ 229 | return ${shunit_return} 230 | } 231 | # shellcheck disable=SC2016,SC2034 232 | _ASSERT_NOT_EQUALS_='eval assertNotEquals --lineno "${LINENO:-}"' 233 | 234 | # Assert that a container contains a content. 235 | # 236 | # Args: 237 | # message: string: failure message [optional] 238 | # container: string: container to analyze 239 | # content: string: content to find 240 | # Returns: 241 | # integer: success (TRUE/FALSE/ERROR constant) 242 | assertContains() { 243 | # shellcheck disable=SC2090 244 | ${_SHUNIT_LINENO_} 245 | if command [ $# -lt 2 -o $# -gt 3 ]; then 246 | _shunit_error "assertContains() requires two or three arguments; $# given" 247 | _shunit_assertFail 248 | return ${SHUNIT_ERROR} 249 | fi 250 | _shunit_shouldSkip && return ${SHUNIT_TRUE} 251 | 252 | shunit_message_=${__shunit_lineno} 253 | if command [ $# -eq 3 ]; then 254 | shunit_message_="${shunit_message_}$1" 255 | shift 256 | fi 257 | shunit_container_=$1 258 | shunit_content_=$2 259 | 260 | shunit_return=${SHUNIT_TRUE} 261 | if echo "$shunit_container_" | grep -F -- "$shunit_content_" > /dev/null; then 262 | _shunit_assertPass 263 | else 264 | failNotFound "${shunit_message_}" "${shunit_content_}" 265 | shunit_return=${SHUNIT_FALSE} 266 | fi 267 | 268 | unset shunit_message_ shunit_container_ shunit_content_ 269 | return ${shunit_return} 270 | } 271 | # shellcheck disable=SC2016,SC2034 272 | _ASSERT_CONTAINS_='eval assertContains --lineno "${LINENO:-}"' 273 | 274 | # Assert that a container does not contain a content. 275 | # 276 | # Args: 277 | # message: string: failure message [optional] 278 | # container: string: container to analyze 279 | # content: string: content to look for 280 | # Returns: 281 | # integer: success (TRUE/FALSE/ERROR constant) 282 | assertNotContains() { 283 | # shellcheck disable=SC2090 284 | ${_SHUNIT_LINENO_} 285 | if command [ $# -lt 2 -o $# -gt 3 ]; then 286 | _shunit_error "assertNotContains() requires two or three arguments; $# given" 287 | _shunit_assertFail 288 | return ${SHUNIT_ERROR} 289 | fi 290 | _shunit_shouldSkip && return ${SHUNIT_TRUE} 291 | 292 | shunit_message_=${__shunit_lineno} 293 | if command [ $# -eq 3 ]; then 294 | shunit_message_="${shunit_message_}$1" 295 | shift 296 | fi 297 | shunit_container_=$1 298 | shunit_content_=$2 299 | 300 | shunit_return=${SHUNIT_TRUE} 301 | if echo "$shunit_container_" | grep -F -- "$shunit_content_" > /dev/null; then 302 | failFound "${shunit_message_}" "${shunit_content_}" 303 | shunit_return=${SHUNIT_FALSE} 304 | else 305 | _shunit_assertPass 306 | fi 307 | 308 | unset shunit_message_ shunit_container_ shunit_content_ 309 | return ${shunit_return} 310 | } 311 | # shellcheck disable=SC2016,SC2034 312 | _ASSERT_NOT_CONTAINS_='eval assertNotContains --lineno "${LINENO:-}"' 313 | 314 | # Assert that a value is null (i.e. an empty string) 315 | # 316 | # Args: 317 | # message: string: failure message [optional] 318 | # actual: string: actual value 319 | # Returns: 320 | # integer: success (TRUE/FALSE/ERROR constant) 321 | assertNull() { 322 | # shellcheck disable=SC2090 323 | ${_SHUNIT_LINENO_} 324 | if command [ $# -lt 1 -o $# -gt 2 ]; then 325 | _shunit_error "assertNull() requires one or two arguments; $# given" 326 | _shunit_assertFail 327 | return ${SHUNIT_ERROR} 328 | fi 329 | _shunit_shouldSkip && return ${SHUNIT_TRUE} 330 | 331 | shunit_message_=${__shunit_lineno} 332 | if command [ $# -eq 2 ]; then 333 | shunit_message_="${shunit_message_}$1" 334 | shift 335 | fi 336 | assertTrue "${shunit_message_}" "[ -z '$1' ]" 337 | shunit_return=$? 338 | 339 | unset shunit_message_ 340 | return ${shunit_return} 341 | } 342 | # shellcheck disable=SC2016,SC2034 343 | _ASSERT_NULL_='eval assertNull --lineno "${LINENO:-}"' 344 | 345 | # Assert that a value is not null (i.e. a non-empty string) 346 | # 347 | # Args: 348 | # message: string: failure message [optional] 349 | # actual: string: actual value 350 | # Returns: 351 | # integer: success (TRUE/FALSE/ERROR constant) 352 | assertNotNull() { 353 | # shellcheck disable=SC2090 354 | ${_SHUNIT_LINENO_} 355 | if command [ $# -gt 2 ]; then # allowing 0 arguments as $1 might actually be null 356 | _shunit_error "assertNotNull() requires one or two arguments; $# given" 357 | _shunit_assertFail 358 | return ${SHUNIT_ERROR} 359 | fi 360 | _shunit_shouldSkip && return ${SHUNIT_TRUE} 361 | 362 | shunit_message_=${__shunit_lineno} 363 | if command [ $# -eq 2 ]; then 364 | shunit_message_="${shunit_message_}$1" 365 | shift 366 | fi 367 | shunit_actual_=`_shunit_escapeCharactersInString "${1:-}"` 368 | test -n "${shunit_actual_}" 369 | assertTrue "${shunit_message_}" $? 370 | shunit_return=$? 371 | 372 | unset shunit_actual_ shunit_message_ 373 | return ${shunit_return} 374 | } 375 | # shellcheck disable=SC2016,SC2034 376 | _ASSERT_NOT_NULL_='eval assertNotNull --lineno "${LINENO:-}"' 377 | 378 | # Assert that two values are the same (i.e. equal to one another). 379 | # 380 | # Args: 381 | # message: string: failure message [optional] 382 | # expected: string: expected value 383 | # actual: string: actual value 384 | # Returns: 385 | # integer: success (TRUE/FALSE/ERROR constant) 386 | assertSame() { 387 | # shellcheck disable=SC2090 388 | ${_SHUNIT_LINENO_} 389 | if command [ $# -lt 2 -o $# -gt 3 ]; then 390 | _shunit_error "assertSame() requires two or three arguments; $# given" 391 | _shunit_assertFail 392 | return ${SHUNIT_ERROR} 393 | fi 394 | _shunit_shouldSkip && return ${SHUNIT_TRUE} 395 | 396 | shunit_message_=${__shunit_lineno} 397 | if command [ $# -eq 3 ]; then 398 | shunit_message_="${shunit_message_}$1" 399 | shift 400 | fi 401 | assertEquals "${shunit_message_}" "$1" "$2" 402 | shunit_return=$? 403 | 404 | unset shunit_message_ 405 | return ${shunit_return} 406 | } 407 | # shellcheck disable=SC2016,SC2034 408 | _ASSERT_SAME_='eval assertSame --lineno "${LINENO:-}"' 409 | 410 | # Assert that two values are not the same (i.e. not equal to one another). 411 | # 412 | # Args: 413 | # message: string: failure message [optional] 414 | # expected: string: expected value 415 | # actual: string: actual value 416 | # Returns: 417 | # integer: success (TRUE/FALSE/ERROR constant) 418 | assertNotSame() { 419 | # shellcheck disable=SC2090 420 | ${_SHUNIT_LINENO_} 421 | if command [ $# -lt 2 -o $# -gt 3 ]; then 422 | _shunit_error "assertNotSame() requires two or three arguments; $# given" 423 | _shunit_assertFail 424 | return ${SHUNIT_ERROR} 425 | fi 426 | _shunit_shouldSkip && return ${SHUNIT_TRUE} 427 | 428 | shunit_message_=${__shunit_lineno} 429 | if command [ $# -eq 3 ]; then 430 | shunit_message_="${shunit_message_:-}$1" 431 | shift 432 | fi 433 | assertNotEquals "${shunit_message_}" "$1" "$2" 434 | shunit_return=$? 435 | 436 | unset shunit_message_ 437 | return ${shunit_return} 438 | } 439 | # shellcheck disable=SC2016,SC2034 440 | _ASSERT_NOT_SAME_='eval assertNotSame --lineno "${LINENO:-}"' 441 | 442 | # Assert that a value or shell test condition is true. 443 | # 444 | # In shell, a value of 0 is true and a non-zero value is false. Any integer 445 | # value passed can thereby be tested. 446 | # 447 | # Shell supports much more complicated tests though, and a means to support 448 | # them was needed. As such, this function tests that conditions are true or 449 | # false through evaluation rather than just looking for a true or false. 450 | # 451 | # The following test will succeed: 452 | # assertTrue 0 453 | # assertTrue "[ 34 -gt 23 ]" 454 | # The following test will fail with a message: 455 | # assertTrue 123 456 | # assertTrue "test failed" "[ -r '/non/existent/file' ]" 457 | # 458 | # Args: 459 | # message: string: failure message [optional] 460 | # condition: string: integer value or shell conditional statement 461 | # Returns: 462 | # integer: success (TRUE/FALSE/ERROR constant) 463 | assertTrue() { 464 | # shellcheck disable=SC2090 465 | ${_SHUNIT_LINENO_} 466 | if command [ $# -lt 1 -o $# -gt 2 ]; then 467 | _shunit_error "assertTrue() takes one or two arguments; $# given" 468 | _shunit_assertFail 469 | return ${SHUNIT_ERROR} 470 | fi 471 | _shunit_shouldSkip && return ${SHUNIT_TRUE} 472 | 473 | shunit_message_=${__shunit_lineno} 474 | if command [ $# -eq 2 ]; then 475 | shunit_message_="${shunit_message_}$1" 476 | shift 477 | fi 478 | shunit_condition_=$1 479 | 480 | # See if condition is an integer, i.e. a return value. 481 | shunit_match_=`expr "${shunit_condition_}" : '\([0-9]*\)'` 482 | shunit_return=${SHUNIT_TRUE} 483 | if command [ -z "${shunit_condition_}" ]; then 484 | # Null condition. 485 | shunit_return=${SHUNIT_FALSE} 486 | elif command [ -n "${shunit_match_}" -a "${shunit_condition_}" = "${shunit_match_}" ] 487 | then 488 | # Possible return value. Treating 0 as true, and non-zero as false. 489 | command [ "${shunit_condition_}" -ne 0 ] && shunit_return=${SHUNIT_FALSE} 490 | else 491 | # Hopefully... a condition. 492 | ( eval "${shunit_condition_}" ) >/dev/null 2>&1 493 | command [ $? -ne 0 ] && shunit_return=${SHUNIT_FALSE} 494 | fi 495 | 496 | # Record the test. 497 | if command [ ${shunit_return} -eq ${SHUNIT_TRUE} ]; then 498 | _shunit_assertPass 499 | else 500 | _shunit_assertFail "${shunit_message_}" 501 | fi 502 | 503 | unset shunit_message_ shunit_condition_ shunit_match_ 504 | return ${shunit_return} 505 | } 506 | # shellcheck disable=SC2016,SC2034 507 | _ASSERT_TRUE_='eval assertTrue --lineno "${LINENO:-}"' 508 | 509 | # Assert that a value or shell test condition is false. 510 | # 511 | # In shell, a value of 0 is true and a non-zero value is false. Any integer 512 | # value passed can thereby be tested. 513 | # 514 | # Shell supports much more complicated tests though, and a means to support 515 | # them was needed. As such, this function tests that conditions are true or 516 | # false through evaluation rather than just looking for a true or false. 517 | # 518 | # The following test will succeed: 519 | # assertFalse 1 520 | # assertFalse "[ 'apples' = 'oranges' ]" 521 | # The following test will fail with a message: 522 | # assertFalse 0 523 | # assertFalse "test failed" "[ 1 -eq 1 -a 2 -eq 2 ]" 524 | # 525 | # Args: 526 | # message: string: failure message [optional] 527 | # condition: string: integer value or shell conditional statement 528 | # Returns: 529 | # integer: success (TRUE/FALSE/ERROR constant) 530 | assertFalse() { 531 | # shellcheck disable=SC2090 532 | ${_SHUNIT_LINENO_} 533 | if command [ $# -lt 1 -o $# -gt 2 ]; then 534 | _shunit_error "assertFalse() requires one or two arguments; $# given" 535 | _shunit_assertFail 536 | return ${SHUNIT_ERROR} 537 | fi 538 | _shunit_shouldSkip && return ${SHUNIT_TRUE} 539 | 540 | shunit_message_=${__shunit_lineno} 541 | if command [ $# -eq 2 ]; then 542 | shunit_message_="${shunit_message_}$1" 543 | shift 544 | fi 545 | shunit_condition_=$1 546 | 547 | # See if condition is an integer, i.e. a return value. 548 | shunit_match_=`expr "${shunit_condition_}" : '\([0-9]*\)'` 549 | shunit_return=${SHUNIT_TRUE} 550 | if command [ -z "${shunit_condition_}" ]; then 551 | # Null condition. 552 | shunit_return=${SHUNIT_FALSE} 553 | elif command [ -n "${shunit_match_}" -a "${shunit_condition_}" = "${shunit_match_}" ] 554 | then 555 | # Possible return value. Treating 0 as true, and non-zero as false. 556 | command [ "${shunit_condition_}" -eq 0 ] && shunit_return=${SHUNIT_FALSE} 557 | else 558 | # Hopefully... a condition. 559 | ( eval "${shunit_condition_}" ) >/dev/null 2>&1 560 | command [ $? -eq 0 ] && shunit_return=${SHUNIT_FALSE} 561 | fi 562 | 563 | # Record the test. 564 | if command [ "${shunit_return}" -eq "${SHUNIT_TRUE}" ]; then 565 | _shunit_assertPass 566 | else 567 | _shunit_assertFail "${shunit_message_}" 568 | fi 569 | 570 | unset shunit_message_ shunit_condition_ shunit_match_ 571 | return "${shunit_return}" 572 | } 573 | # shellcheck disable=SC2016,SC2034 574 | _ASSERT_FALSE_='eval assertFalse --lineno "${LINENO:-}"' 575 | 576 | #----------------------------------------------------------------------------- 577 | # Failure functions. 578 | # 579 | 580 | # Records a test failure. 581 | # 582 | # Args: 583 | # message: string: failure message [optional] 584 | # Returns: 585 | # integer: success (TRUE/FALSE/ERROR constant) 586 | fail() { 587 | # shellcheck disable=SC2090 588 | ${_SHUNIT_LINENO_} 589 | if command [ $# -gt 1 ]; then 590 | _shunit_error "fail() requires zero or one arguments; $# given" 591 | return ${SHUNIT_ERROR} 592 | fi 593 | _shunit_shouldSkip && return ${SHUNIT_TRUE} 594 | 595 | shunit_message_=${__shunit_lineno} 596 | if command [ $# -eq 1 ]; then 597 | shunit_message_="${shunit_message_}$1" 598 | shift 599 | fi 600 | 601 | _shunit_assertFail "${shunit_message_}" 602 | 603 | unset shunit_message_ 604 | return ${SHUNIT_FALSE} 605 | } 606 | # shellcheck disable=SC2016,SC2034 607 | _FAIL_='eval fail --lineno "${LINENO:-}"' 608 | 609 | # Records a test failure, stating two values were not equal. 610 | # 611 | # Args: 612 | # message: string: failure message [optional] 613 | # expected: string: expected value 614 | # actual: string: actual value 615 | # Returns: 616 | # integer: success (TRUE/FALSE/ERROR constant) 617 | failNotEquals() { 618 | # shellcheck disable=SC2090 619 | ${_SHUNIT_LINENO_} 620 | if command [ $# -lt 2 -o $# -gt 3 ]; then 621 | _shunit_error "failNotEquals() requires one or two arguments; $# given" 622 | return ${SHUNIT_ERROR} 623 | fi 624 | _shunit_shouldSkip && return ${SHUNIT_TRUE} 625 | 626 | shunit_message_=${__shunit_lineno} 627 | if command [ $# -eq 3 ]; then 628 | shunit_message_="${shunit_message_}$1" 629 | shift 630 | fi 631 | shunit_expected_=$1 632 | shunit_actual_=$2 633 | 634 | shunit_message_=${shunit_message_%% } 635 | _shunit_assertFail "${shunit_message_:+${shunit_message_} }expected:<${shunit_expected_}> but was:<${shunit_actual_}>" 636 | 637 | unset shunit_message_ shunit_expected_ shunit_actual_ 638 | return ${SHUNIT_FALSE} 639 | } 640 | # shellcheck disable=SC2016,SC2034 641 | _FAIL_NOT_EQUALS_='eval failNotEquals --lineno "${LINENO:-}"' 642 | 643 | # Records a test failure, stating a value was found. 644 | # 645 | # Args: 646 | # message: string: failure message [optional] 647 | # content: string: found value 648 | # Returns: 649 | # integer: success (TRUE/FALSE/ERROR constant) 650 | failFound() { 651 | # shellcheck disable=SC2090 652 | ${_SHUNIT_LINENO_} 653 | if command [ $# -lt 1 -o $# -gt 2 ]; then 654 | _shunit_error "failFound() requires one or two arguments; $# given" 655 | return ${SHUNIT_ERROR} 656 | fi 657 | _shunit_shouldSkip && return ${SHUNIT_TRUE} 658 | 659 | shunit_message_=${__shunit_lineno} 660 | if command [ $# -eq 2 ]; then 661 | shunit_message_="${shunit_message_}$1" 662 | shift 663 | fi 664 | 665 | shunit_message_=${shunit_message_%% } 666 | _shunit_assertFail "${shunit_message_:+${shunit_message_} }Found" 667 | 668 | unset shunit_message_ 669 | return ${SHUNIT_FALSE} 670 | } 671 | # shellcheck disable=SC2016,SC2034 672 | _FAIL_FOUND_='eval failFound --lineno "${LINENO:-}"' 673 | 674 | # Records a test failure, stating a content was not found. 675 | # 676 | # Args: 677 | # message: string: failure message [optional] 678 | # content: string: content not found 679 | # Returns: 680 | # integer: success (TRUE/FALSE/ERROR constant) 681 | failNotFound() { 682 | # shellcheck disable=SC2090 683 | ${_SHUNIT_LINENO_} 684 | if command [ $# -lt 1 -o $# -gt 2 ]; then 685 | _shunit_error "failNotFound() requires one or two arguments; $# given" 686 | return ${SHUNIT_ERROR} 687 | fi 688 | _shunit_shouldSkip && return ${SHUNIT_TRUE} 689 | 690 | shunit_message_=${__shunit_lineno} 691 | if command [ $# -eq 2 ]; then 692 | shunit_message_="${shunit_message_}$1" 693 | shift 694 | fi 695 | shunit_content_=$1 696 | 697 | shunit_message_=${shunit_message_%% } 698 | _shunit_assertFail "${shunit_message_:+${shunit_message_} }Not found:<${shunit_content_}>" 699 | 700 | unset shunit_message_ shunit_content_ 701 | return ${SHUNIT_FALSE} 702 | } 703 | # shellcheck disable=SC2016,SC2034 704 | _FAIL_NOT_FOUND_='eval failNotFound --lineno "${LINENO:-}"' 705 | 706 | # Records a test failure, stating two values should have been the same. 707 | # 708 | # Args: 709 | # message: string: failure message [optional] 710 | # expected: string: expected value 711 | # actual: string: actual value 712 | # Returns: 713 | # integer: success (TRUE/FALSE/ERROR constant) 714 | failSame() 715 | { 716 | # shellcheck disable=SC2090 717 | ${_SHUNIT_LINENO_} 718 | if command [ $# -lt 2 -o $# -gt 3 ]; then 719 | _shunit_error "failSame() requires two or three arguments; $# given" 720 | return ${SHUNIT_ERROR} 721 | fi 722 | _shunit_shouldSkip && return ${SHUNIT_TRUE} 723 | 724 | shunit_message_=${__shunit_lineno} 725 | if command [ $# -eq 3 ]; then 726 | shunit_message_="${shunit_message_}$1" 727 | shift 728 | fi 729 | 730 | shunit_message_=${shunit_message_%% } 731 | _shunit_assertFail "${shunit_message_:+${shunit_message_} }expected not same" 732 | 733 | unset shunit_message_ 734 | return ${SHUNIT_FALSE} 735 | } 736 | # shellcheck disable=SC2016,SC2034 737 | _FAIL_SAME_='eval failSame --lineno "${LINENO:-}"' 738 | 739 | # Records a test failure, stating two values were not equal. 740 | # 741 | # This is functionally equivalent to calling failNotEquals(). 742 | # 743 | # Args: 744 | # message: string: failure message [optional] 745 | # expected: string: expected value 746 | # actual: string: actual value 747 | # Returns: 748 | # integer: success (TRUE/FALSE/ERROR constant) 749 | failNotSame() { 750 | # shellcheck disable=SC2090 751 | ${_SHUNIT_LINENO_} 752 | if command [ $# -lt 2 -o $# -gt 3 ]; then 753 | _shunit_error "failNotSame() requires one or two arguments; $# given" 754 | return ${SHUNIT_ERROR} 755 | fi 756 | _shunit_shouldSkip && return ${SHUNIT_TRUE} 757 | 758 | shunit_message_=${__shunit_lineno} 759 | if command [ $# -eq 3 ]; then 760 | shunit_message_="${shunit_message_}$1" 761 | shift 762 | fi 763 | failNotEquals "${shunit_message_}" "$1" "$2" 764 | shunit_return=$? 765 | 766 | unset shunit_message_ 767 | return ${shunit_return} 768 | } 769 | # shellcheck disable=SC2016,SC2034 770 | _FAIL_NOT_SAME_='eval failNotSame --lineno "${LINENO:-}"' 771 | 772 | #----------------------------------------------------------------------------- 773 | # Skipping functions. 774 | # 775 | 776 | # Force remaining assert and fail functions to be "skipped". 777 | # 778 | # This function forces the remaining assert and fail functions to be "skipped", 779 | # i.e. they will have no effect. Each function skipped will be recorded so that 780 | # the total of asserts and fails will not be altered. 781 | # 782 | # Args: 783 | # None 784 | startSkipping() { __shunit_skip=${SHUNIT_TRUE}; } 785 | 786 | # Resume the normal recording behavior of assert and fail calls. 787 | # 788 | # Args: 789 | # None 790 | endSkipping() { __shunit_skip=${SHUNIT_FALSE}; } 791 | 792 | # Returns the state of assert and fail call skipping. 793 | # 794 | # Args: 795 | # None 796 | # Returns: 797 | # boolean: (TRUE/FALSE constant) 798 | isSkipping() { return ${__shunit_skip}; } 799 | 800 | #----------------------------------------------------------------------------- 801 | # Suite functions. 802 | # 803 | 804 | # Stub. This function should contains all unit test calls to be made. 805 | # 806 | # DEPRECATED (as of 2.1.0) 807 | # 808 | # This function can be optionally overridden by the user in their test suite. 809 | # 810 | # If this function exists, it will be called when shunit2 is sourced. If it 811 | # does not exist, shunit2 will search the parent script for all functions 812 | # beginning with the word 'test', and they will be added dynamically to the 813 | # test suite. 814 | # 815 | # This function should be overridden by the user in their unit test suite. 816 | # Note: see _shunit_mktempFunc() for actual implementation 817 | # 818 | # Args: 819 | # None 820 | #suite() { :; } # DO NOT UNCOMMENT THIS FUNCTION 821 | 822 | # Adds a function name to the list of tests schedule for execution. 823 | # 824 | # This function should only be called from within the suite() function. 825 | # 826 | # Args: 827 | # function: string: name of a function to add to current unit test suite 828 | suite_addTest() { 829 | shunit_func_=${1:-} 830 | 831 | __shunit_suite="${__shunit_suite:+${__shunit_suite} }${shunit_func_}" 832 | __shunit_testsTotal=`expr ${__shunit_testsTotal} + 1` 833 | 834 | unset shunit_func_ 835 | } 836 | 837 | # Stub. This function will be called once before any tests are run. 838 | # 839 | # Common one-time environment preparation tasks shared by all tests can be 840 | # defined here. 841 | # 842 | # This function should be overridden by the user in their unit test suite. 843 | # Note: see _shunit_mktempFunc() for actual implementation 844 | # 845 | # Args: 846 | # None 847 | #oneTimeSetUp() { :; } # DO NOT UNCOMMENT THIS FUNCTION 848 | 849 | # Stub. This function will be called once after all tests are finished. 850 | # 851 | # Common one-time environment cleanup tasks shared by all tests can be defined 852 | # here. 853 | # 854 | # This function should be overridden by the user in their unit test suite. 855 | # Note: see _shunit_mktempFunc() for actual implementation 856 | # 857 | # Args: 858 | # None 859 | #oneTimeTearDown() { :; } # DO NOT UNCOMMENT THIS FUNCTION 860 | 861 | # Stub. This function will be called before each test is run. 862 | # 863 | # Common environment preparation tasks shared by all tests can be defined here. 864 | # 865 | # This function should be overridden by the user in their unit test suite. 866 | # Note: see _shunit_mktempFunc() for actual implementation 867 | # 868 | # Args: 869 | # None 870 | #setUp() { :; } # DO NOT UNCOMMENT THIS FUNCTION 871 | 872 | # Note: see _shunit_mktempFunc() for actual implementation 873 | # Stub. This function will be called after each test is run. 874 | # 875 | # Common environment cleanup tasks shared by all tests can be defined here. 876 | # 877 | # This function should be overridden by the user in their unit test suite. 878 | # Note: see _shunit_mktempFunc() for actual implementation 879 | # 880 | # Args: 881 | # None 882 | #tearDown() { :; } # DO NOT UNCOMMENT THIS FUNCTION 883 | 884 | #------------------------------------------------------------------------------ 885 | # Internal shUnit2 functions. 886 | # 887 | 888 | # Create a temporary directory to store various run-time files in. 889 | # 890 | # This function is a cross-platform temporary directory creation tool. Not all 891 | # OSes have the `mktemp` function, so one is included here. 892 | # 893 | # Args: 894 | # None 895 | # Outputs: 896 | # string: the temporary directory that was created 897 | _shunit_mktempDir() { 898 | # Try the standard `mktemp` function. 899 | ( exec mktemp -dqt shunit.XXXXXX 2>/dev/null ) && return 900 | 901 | # The standard `mktemp` didn't work. Use our own. 902 | # shellcheck disable=SC2039 903 | if command [ -r '/dev/urandom' -a -x '/usr/bin/od' ]; then 904 | _shunit_random_=`/usr/bin/od -vAn -N4 -tx4 "${_shunit_file_}" 932 | #! /bin/sh 933 | exit ${SHUNIT_TRUE} 934 | EOF 935 | command chmod +x "${_shunit_file_}" 936 | done 937 | 938 | unset _shunit_file_ 939 | } 940 | 941 | # Final cleanup function to leave things as we found them. 942 | # 943 | # Besides removing the temporary directory, this function is in charge of the 944 | # final exit code of the unit test. The exit code is based on how the script 945 | # was ended (e.g. normal exit, or via Ctrl-C). 946 | # 947 | # Args: 948 | # name: string: name of the trap called (specified when trap defined) 949 | _shunit_cleanup() { 950 | _shunit_name_=$1 951 | 952 | case "${_shunit_name_}" in 953 | EXIT) ;; 954 | INT) _shunit_signal_=130 ;; # 2+128 955 | TERM) _shunit_signal_=143 ;; # 15+128 956 | *) 957 | _shunit_error "unrecognized trap value (${_shunit_name_})" 958 | _shunit_signal_=0 959 | ;; 960 | esac 961 | if command [ "${_shunit_name_}" != 'EXIT' ]; then 962 | _shunit_warn "trapped and now handling the (${_shunit_name_}) signal" 963 | fi 964 | 965 | # Do our work. 966 | if command [ ${__shunit_clean} -eq ${SHUNIT_FALSE} ]; then 967 | # Ensure tear downs are only called once. 968 | __shunit_clean=${SHUNIT_TRUE} 969 | 970 | tearDown 971 | command [ $? -eq ${SHUNIT_TRUE} ] \ 972 | || _shunit_warn "tearDown() returned non-zero return code." 973 | oneTimeTearDown 974 | command [ $? -eq ${SHUNIT_TRUE} ] \ 975 | || _shunit_warn "oneTimeTearDown() returned non-zero return code." 976 | 977 | command rm -fr "${__shunit_tmpDir}" 978 | fi 979 | 980 | if command [ "${_shunit_name_}" != 'EXIT' ]; then 981 | # Handle all non-EXIT signals. 982 | trap - 0 # Disable EXIT trap. 983 | exit ${_shunit_signal_} 984 | elif command [ ${__shunit_reportGenerated} -eq ${SHUNIT_FALSE} ]; then 985 | _shunit_assertFail 'unknown failure encountered running a test' 986 | _shunit_generateReport 987 | exit ${SHUNIT_ERROR} 988 | fi 989 | 990 | unset _shunit_name_ _shunit_signal_ 991 | } 992 | 993 | # configureColor based on user color preference. 994 | # 995 | # Args: 996 | # color: string: color mode (one of `always`, `auto`, or `none`). 997 | _shunit_configureColor() { 998 | _shunit_color_=${SHUNIT_FALSE} # By default, no color. 999 | case $1 in 1000 | 'always') _shunit_color_=${SHUNIT_TRUE} ;; 1001 | 'auto') 1002 | command [ "`_shunit_colors`" -ge 8 ] && _shunit_color_=${SHUNIT_TRUE} 1003 | ;; 1004 | 'none') ;; 1005 | *) _shunit_fatal "unrecognized color option '$1'" ;; 1006 | esac 1007 | 1008 | case ${_shunit_color_} in 1009 | ${SHUNIT_TRUE}) 1010 | __shunit_ansi_none=${__SHUNIT_ANSI_NONE} 1011 | __shunit_ansi_red=${__SHUNIT_ANSI_RED} 1012 | __shunit_ansi_green=${__SHUNIT_ANSI_GREEN} 1013 | __shunit_ansi_yellow=${__SHUNIT_ANSI_YELLOW} 1014 | __shunit_ansi_cyan=${__SHUNIT_ANSI_CYAN} 1015 | ;; 1016 | ${SHUNIT_FALSE}) 1017 | __shunit_ansi_none='' 1018 | __shunit_ansi_red='' 1019 | __shunit_ansi_green='' 1020 | __shunit_ansi_yellow='' 1021 | __shunit_ansi_cyan='' 1022 | ;; 1023 | esac 1024 | 1025 | unset _shunit_color_ _shunit_tput_ 1026 | } 1027 | 1028 | # colors returns the number of supported colors for the TERM. 1029 | _shunit_colors() { 1030 | _shunit_tput_=`${SHUNIT_CMD_TPUT} colors 2>/dev/null` 1031 | if command [ $? -eq 0 ]; then 1032 | echo "${_shunit_tput_}" 1033 | else 1034 | echo 16 1035 | fi 1036 | unset _shunit_tput_ 1037 | } 1038 | 1039 | # The actual running of the tests happens here. 1040 | # 1041 | # Args: 1042 | # None 1043 | _shunit_execSuite() { 1044 | for _shunit_test_ in ${__shunit_suite}; do 1045 | __shunit_testSuccess=${SHUNIT_TRUE} 1046 | 1047 | # Disable skipping. 1048 | endSkipping 1049 | 1050 | # Execute the per-test setup function. 1051 | setUp 1052 | command [ $? -eq ${SHUNIT_TRUE} ] \ 1053 | || _shunit_fatal "setup() returned non-zero return code." 1054 | 1055 | # Execute the test. 1056 | echo "${__SHUNIT_TEST_PREFIX}${_shunit_test_}" 1057 | eval "${_shunit_test_}" 1058 | if command [ $? -ne ${SHUNIT_TRUE} ]; then 1059 | _shunit_error "${_shunit_test_}() returned non-zero return code." 1060 | __shunit_testSuccess=${SHUNIT_ERROR} 1061 | _shunit_incFailedCount 1062 | fi 1063 | 1064 | # Execute the per-test tear-down function. 1065 | tearDown 1066 | command [ $? -eq ${SHUNIT_TRUE} ] \ 1067 | || _shunit_fatal "tearDown() returned non-zero return code." 1068 | 1069 | # Update stats. 1070 | if command [ ${__shunit_testSuccess} -eq ${SHUNIT_TRUE} ]; then 1071 | __shunit_testsPassed=`expr ${__shunit_testsPassed} + 1` 1072 | else 1073 | __shunit_testsFailed=`expr ${__shunit_testsFailed} + 1` 1074 | fi 1075 | done 1076 | 1077 | unset _shunit_test_ 1078 | } 1079 | 1080 | # Generates the user friendly report with appropriate OK/FAILED message. 1081 | # 1082 | # Args: 1083 | # None 1084 | # Output: 1085 | # string: the report of successful and failed tests, as well as totals. 1086 | _shunit_generateReport() { 1087 | command [ "${__shunit_reportGenerated}" -eq ${SHUNIT_TRUE} ] && return 1088 | 1089 | _shunit_ok_=${SHUNIT_TRUE} 1090 | 1091 | # If no exit code was provided, determine an appropriate one. 1092 | command [ "${__shunit_testsFailed}" -gt 0 \ 1093 | -o ${__shunit_testSuccess} -eq ${SHUNIT_FALSE} ] \ 1094 | && _shunit_ok_=${SHUNIT_FALSE} 1095 | 1096 | echo 1097 | _shunit_msg_="Ran ${__shunit_ansi_cyan}${__shunit_testsTotal}${__shunit_ansi_none}" 1098 | if command [ "${__shunit_testsTotal}" -eq 1 ]; then 1099 | ${__SHUNIT_CMD_ECHO_ESC} "${_shunit_msg_} test." 1100 | else 1101 | ${__SHUNIT_CMD_ECHO_ESC} "${_shunit_msg_} tests." 1102 | fi 1103 | 1104 | if command [ ${_shunit_ok_} -eq ${SHUNIT_TRUE} ]; then 1105 | _shunit_msg_="${__shunit_ansi_green}OK${__shunit_ansi_none}" 1106 | command [ "${__shunit_assertsSkipped}" -gt 0 ] \ 1107 | && _shunit_msg_="${_shunit_msg_} (${__shunit_ansi_yellow}skipped=${__shunit_assertsSkipped}${__shunit_ansi_none})" 1108 | else 1109 | _shunit_msg_="${__shunit_ansi_red}FAILED${__shunit_ansi_none}" 1110 | _shunit_msg_="${_shunit_msg_} (${__shunit_ansi_red}failures=${__shunit_assertsFailed}${__shunit_ansi_none}" 1111 | command [ "${__shunit_assertsSkipped}" -gt 0 ] \ 1112 | && _shunit_msg_="${_shunit_msg_},${__shunit_ansi_yellow}skipped=${__shunit_assertsSkipped}${__shunit_ansi_none}" 1113 | _shunit_msg_="${_shunit_msg_})" 1114 | fi 1115 | 1116 | echo 1117 | ${__SHUNIT_CMD_ECHO_ESC} "${_shunit_msg_}" 1118 | __shunit_reportGenerated=${SHUNIT_TRUE} 1119 | 1120 | unset _shunit_msg_ _shunit_ok_ 1121 | } 1122 | 1123 | # Test for whether a function should be skipped. 1124 | # 1125 | # Args: 1126 | # None 1127 | # Returns: 1128 | # boolean: whether the test should be skipped (TRUE/FALSE constant) 1129 | _shunit_shouldSkip() { 1130 | command [ ${__shunit_skip} -eq ${SHUNIT_FALSE} ] && return ${SHUNIT_FALSE} 1131 | _shunit_assertSkip 1132 | } 1133 | 1134 | # Records a successful test. 1135 | # 1136 | # Args: 1137 | # None 1138 | _shunit_assertPass() { 1139 | __shunit_assertsPassed=`expr ${__shunit_assertsPassed} + 1` 1140 | __shunit_assertsTotal=`expr ${__shunit_assertsTotal} + 1` 1141 | } 1142 | 1143 | # Records a test failure. 1144 | # 1145 | # Args: 1146 | # message: string: failure message to provide user 1147 | _shunit_assertFail() { 1148 | __shunit_testSuccess=${SHUNIT_FALSE} 1149 | _shunit_incFailedCount 1150 | 1151 | \[ $# -gt 0 ] && ${__SHUNIT_CMD_ECHO_ESC} \ 1152 | "${__shunit_ansi_red}ASSERT:${__shunit_ansi_none}$*" 1153 | } 1154 | 1155 | # Increment the count of failed asserts. 1156 | # 1157 | # Args: 1158 | # none 1159 | _shunit_incFailedCount() { 1160 | __shunit_assertsFailed=`expr "${__shunit_assertsFailed}" + 1` 1161 | __shunit_assertsTotal=`expr "${__shunit_assertsTotal}" + 1` 1162 | } 1163 | 1164 | 1165 | # Records a skipped test. 1166 | # 1167 | # Args: 1168 | # None 1169 | _shunit_assertSkip() { 1170 | __shunit_assertsSkipped=`expr "${__shunit_assertsSkipped}" + 1` 1171 | __shunit_assertsTotal=`expr "${__shunit_assertsTotal}" + 1` 1172 | } 1173 | 1174 | # Prepare a script filename for sourcing. 1175 | # 1176 | # Args: 1177 | # script: string: path to a script to source 1178 | # Returns: 1179 | # string: filename prefixed with ./ (if necessary) 1180 | _shunit_prepForSourcing() { 1181 | _shunit_script_=$1 1182 | case "${_shunit_script_}" in 1183 | /*|./*) echo "${_shunit_script_}" ;; 1184 | *) echo "./${_shunit_script_}" ;; 1185 | esac 1186 | unset _shunit_script_ 1187 | } 1188 | 1189 | # Escape a character in a string. 1190 | # 1191 | # Args: 1192 | # c: string: unescaped character 1193 | # s: string: to escape character in 1194 | # Returns: 1195 | # string: with escaped character(s) 1196 | _shunit_escapeCharInStr() { 1197 | command [ -n "$2" ] || return # No point in doing work on an empty string. 1198 | 1199 | # Note: using shorter variable names to prevent conflicts with 1200 | # _shunit_escapeCharactersInString(). 1201 | _shunit_c_=$1 1202 | _shunit_s_=$2 1203 | 1204 | # Escape the character. 1205 | # shellcheck disable=SC1003,SC2086 1206 | echo ''${_shunit_s_}'' |command sed 's/\'${_shunit_c_}'/\\\'${_shunit_c_}'/g' 1207 | 1208 | unset _shunit_c_ _shunit_s_ 1209 | } 1210 | 1211 | # Escape a character in a string. 1212 | # 1213 | # Args: 1214 | # str: string: to escape characters in 1215 | # Returns: 1216 | # string: with escaped character(s) 1217 | _shunit_escapeCharactersInString() { 1218 | command [ -n "$1" ] || return # No point in doing work on an empty string. 1219 | 1220 | _shunit_str_=$1 1221 | 1222 | # Note: using longer variable names to prevent conflicts with 1223 | # _shunit_escapeCharInStr(). 1224 | for _shunit_char_ in '"' '$' "'" '`'; do 1225 | _shunit_str_=`_shunit_escapeCharInStr "${_shunit_char_}" "${_shunit_str_}"` 1226 | done 1227 | 1228 | echo "${_shunit_str_}" 1229 | unset _shunit_char_ _shunit_str_ 1230 | } 1231 | 1232 | # Extract list of functions to run tests against. 1233 | # 1234 | # Args: 1235 | # script: string: name of script to extract functions from 1236 | # Returns: 1237 | # string: of function names 1238 | _shunit_extractTestFunctions() { 1239 | _shunit_script_=$1 1240 | 1241 | # Extract the lines with test function names, strip of anything besides the 1242 | # function name, and output everything on a single line. 1243 | _shunit_regex_='^\s*((function test[A-Za-z0-9_-]*)|(test[A-Za-z0-9_-]* *\(\)))' 1244 | # shellcheck disable=SC2196 1245 | egrep "${_shunit_regex_}" "${_shunit_script_}" \ 1246 | |command sed 's/^[^A-Za-z0-9_-]*//;s/^function //;s/\([A-Za-z0-9_-]*\).*/\1/g' \ 1247 | |xargs 1248 | 1249 | unset _shunit_regex_ _shunit_script_ 1250 | } 1251 | 1252 | #------------------------------------------------------------------------------ 1253 | # Main. 1254 | # 1255 | 1256 | # Determine the operating mode. 1257 | if command [ $# -eq 0 -o "${1:-}" = '--' ]; then 1258 | __shunit_script=${__SHUNIT_PARENT} 1259 | __shunit_mode=${__SHUNIT_MODE_SOURCED} 1260 | else 1261 | __shunit_script=$1 1262 | command [ -r "${__shunit_script}" ] || \ 1263 | _shunit_fatal "unable to read from ${__shunit_script}" 1264 | __shunit_mode=${__SHUNIT_MODE_STANDALONE} 1265 | fi 1266 | 1267 | # Create a temporary storage location. 1268 | __shunit_tmpDir=`_shunit_mktempDir` 1269 | 1270 | # Provide a public temporary directory for unit test scripts. 1271 | # TODO(kward): document this. 1272 | SHUNIT_TMPDIR="${__shunit_tmpDir}/tmp" 1273 | command mkdir "${SHUNIT_TMPDIR}" 1274 | 1275 | # Setup traps to clean up after ourselves. 1276 | trap '_shunit_cleanup EXIT' 0 1277 | trap '_shunit_cleanup INT' 2 1278 | trap '_shunit_cleanup TERM' 15 1279 | 1280 | # Create phantom functions to work around issues with Cygwin. 1281 | _shunit_mktempFunc 1282 | PATH="${__shunit_tmpDir}:${PATH}" 1283 | 1284 | # Make sure phantom functions are executable. This will bite if `/tmp` (or the 1285 | # current `$TMPDIR`) points to a path on a partition that was mounted with the 1286 | # 'noexec' option. The noexec command was created with `_shunit_mktempFunc()`. 1287 | noexec 2>/dev/null || _shunit_fatal \ 1288 | 'Please declare TMPDIR with path on partition with exec permission.' 1289 | 1290 | # We must manually source the tests in standalone mode. 1291 | if command [ "${__shunit_mode}" = "${__SHUNIT_MODE_STANDALONE}" ]; then 1292 | # shellcheck disable=SC1090 1293 | command . "`_shunit_prepForSourcing \"${__shunit_script}\"`" 1294 | fi 1295 | 1296 | # Configure default output coloring behavior. 1297 | _shunit_configureColor "${SHUNIT_COLOR}" 1298 | 1299 | # Execute the oneTimeSetUp function (if it exists). 1300 | oneTimeSetUp 1301 | command [ $? -eq ${SHUNIT_TRUE} ] \ 1302 | || _shunit_fatal "oneTimeSetUp() returned non-zero return code." 1303 | 1304 | # Command line selected tests or suite selected tests 1305 | if command [ "$#" -ge 2 ]; then 1306 | # Argument $1 is either the filename of tests or '--'; either way, skip it. 1307 | shift 1308 | # Remaining arguments ($2 .. $#) are assumed to be test function names. 1309 | # Interate through all remaining args in "$@" in a POSIX (likely portable) way. 1310 | # Helpful tip: https://unix.stackexchange.com/questions/314032/how-to-use-arguments-like-1-2-in-a-for-loop 1311 | for _shunit_arg_ do 1312 | suite_addTest "${_shunit_arg_}" 1313 | done 1314 | unset _shunit_arg_ 1315 | else 1316 | # Execute the suite function defined in the parent test script. 1317 | # DEPRECATED as of 2.1.0. 1318 | suite 1319 | fi 1320 | 1321 | # If no tests or suite specified, dynamically build a list of functions. 1322 | if command [ -z "${__shunit_suite}" ]; then 1323 | shunit_funcs_=`_shunit_extractTestFunctions "${__shunit_script}"` 1324 | for shunit_func_ in ${shunit_funcs_}; do 1325 | suite_addTest "${shunit_func_}" 1326 | done 1327 | fi 1328 | unset shunit_func_ shunit_funcs_ 1329 | 1330 | # Execute the suite of unit tests. 1331 | _shunit_execSuite 1332 | 1333 | # Execute the oneTimeTearDown function (if it exists). 1334 | oneTimeTearDown 1335 | command [ $? -eq ${SHUNIT_TRUE} ] \ 1336 | || _shunit_fatal "oneTimeTearDown() returned non-zero return code." 1337 | 1338 | # Generate a report summary. 1339 | _shunit_generateReport 1340 | 1341 | # That's it folks. 1342 | command [ "${__shunit_testsFailed}" -eq 0 ] 1343 | exit $? 1344 | -------------------------------------------------------------------------------- /scripts/tests/tests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -uo pipefail 3 | 4 | readonly SELF_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 5 | readonly SELF_NAME="$(basename "${BASH_SOURCE[0]}")" 6 | readonly ROOT_PATH="$(cd "${SELF_PATH}/../.." && pwd)" 7 | 8 | [[ -z "${TESTS_DOCKER:-}" ]] \ 9 | && echo "Please run tests with launcher.sh!" && exit 1 10 | 11 | export TADS_ENV=test 12 | readonly _tads="${ROOT_PATH}/tads" 13 | 14 | # *** Functions 15 | test_is_version_gte () { 16 | assertTrue "is_version_gte 2.0.0 1" 17 | assertTrue "is_version_gte 2.0.0 1.0" 18 | assertTrue "is_version_gte 2.0.0 1.0.0" 19 | assertTrue "is_version_gte 2.0.0 1.9.0" 20 | assertTrue "is_version_gte 2.0.0 1.9.9" 21 | assertTrue "is_version_gte 2.0.0 2.0.0" 22 | 23 | assertFalse "is_version_gte 1 2.0.0" 24 | assertFalse "is_version_gte 1.0 2.0.0" 25 | assertFalse "is_version_gte 1.0.0 2.0.0" 26 | assertFalse "is_version_gte 1.9.0 2.0.0" 27 | assertFalse "is_version_gte 1.9.9 2.0.0" 28 | 29 | assertFalse "is_version_gte '' 2.0.0" 30 | assertFalse "is_version_gte 2.0.0" 31 | assertFalse "is_version_gte" 32 | assertFalse "is_version_gte string 2.0.0" 33 | assertFalse "is_version_gte 2.0.0 string" 34 | } 35 | 36 | # *** Common 37 | test_usage () { 38 | local result 39 | 40 | result="$(${_tads})" 41 | assertEquals "usage exit code should be 1" 1 "$?" 42 | assertContains "usage should be printed" \ 43 | "${result}" "Usage" 44 | } 45 | 46 | # *** Vagrant 47 | test_vagrant_usage () { 48 | local result 49 | 50 | result="$(${_tads} vagrant)" 51 | assertEquals "usage exit code should be 1" 1 "$?" 52 | assertContains "usage should be printed" \ 53 | "${result}" "Usage" 54 | } 55 | 56 | test_vagrant_up_not_installed () { 57 | local result 58 | 59 | result="$(${_tads} vagrant up)" 60 | assertEquals "Exit code should be 1" 1 "$?" 61 | assertContains "Warning msg should be printed" \ 62 | "${result}" "Vagrant must be installed on your local machine" 63 | } 64 | 65 | test_vagrant_up_outdated_version () { 66 | local result 67 | 68 | mock_command vagrant "Vagrant 1.0.0" 69 | 70 | result="$(${_tads} vagrant up)" 71 | assertEquals "Exit code should be 1" 1 "$?" 72 | assertContains "Warning msg should be printed" \ 73 | "${result}" "Please upgrade it to at least version 2.0" 74 | } 75 | 76 | test_vagrant_up_virtualbox_not_installed () { 77 | local result 78 | 79 | mock_command vagrant "Vagrant 2.0.0" 80 | 81 | result="$(${_tads} vagrant up)" 82 | assertEquals "Exit code should be 1" 1 "$?" 83 | assertContains "Warning msg should be printed" \ 84 | "${result}" "VirtualBox must be installed on your local machine" 85 | } 86 | 87 | test_vagrant_up_not_configured () { 88 | local result 89 | 90 | mock_command vagrant "Vagrant 2.0.0" 91 | mock_command vboxmanage 92 | 93 | result="$(${_tads} vagrant up)" 94 | assertEquals "Exit code should be 1" 1 "$?" 95 | assertContains "Warning msg should be printed" \ 96 | "${result}" "Please copy vagrant/vagrant.sample.yml to vagrant/vagrant.yml and edit it first!" 97 | } 98 | 99 | test_vagrant_up () { 100 | local result 101 | 102 | mock_command vagrant "Vagrant 2.0.0" 103 | mock_command vboxmanage 104 | mock_file "${ROOT_PATH}/vagrant/vagrant.yml" 105 | 106 | result="$(${_tads} vagrant up)" 107 | assertEquals "Exit code should be 0" 0 "$?" 108 | assertMockedCmdCalled "vagrant" "vagrant up" 109 | } 110 | 111 | # *** Terraform 112 | test_terraform_usage () { 113 | local result 114 | 115 | result="$(${_tads} terraform)" 116 | assertEquals "usage exit code should be 1" 1 "$?" 117 | assertContains "usage should be printed" \ 118 | "${result}" "Usage" 119 | } 120 | 121 | test_terraform_apply_not_installed () { 122 | local result 123 | 124 | result="$(${_tads} terraform production apply)" 125 | assertEquals "Exit code should be 1" 1 "$?" 126 | assertContains "Warning msg should be printed" \ 127 | "${result}" "Terraform must be installed on your local machine" 128 | } 129 | 130 | test_terraform_apply_outdated_version () { 131 | local result 132 | 133 | mock_command terraform "Terraform v0.1" 134 | 135 | result="$(${_tads} terraform production apply)" 136 | assertEquals "Exit code should be 1" 1 "$?" 137 | assertContains "Warning msg should be printed" \ 138 | "${result}" "Please upgrade it to at least version 0.12" 139 | } 140 | 141 | test_terraform_apply_unknown_env () { 142 | local result 143 | 144 | mock_command terraform "Terraform v0.12.12" 145 | 146 | result="$(${_tads} terraform test apply)" 147 | assertEquals "Exit code should be 1" 1 "$?" 148 | assertContains "Warning msg should be printed" \ 149 | "${result}" "Terraform ENVIRONMENT does not exist:" 150 | } 151 | 152 | test_terraform_apply () { 153 | local result 154 | 155 | local additional_mock_code 156 | read -r -d '' additional_mock_code <<'EOF' 157 | case "$@" in 158 | "output ssh_user") 159 | echo "ubuntu" 160 | ;; 161 | 162 | "output -json manager_ips") 163 | echo '["254.254.254.1","254.254.254.2","254.254.254.3"]' 164 | ;; 165 | 166 | "output -json worker_ips") 167 | echo '["254.254.254.4"]' 168 | ;; 169 | esac 170 | EOF 171 | 172 | local expected_inventory 173 | read -r -d '' expected_inventory <<'EOF' 174 | # Inventory file for production environment 175 | # Automatically generated by ./tads terraform 176 | 177 | # Manager nodes 178 | manager-1 ansible_user=ubuntu ansible_host=254.254.254.1 179 | manager-2 ansible_user=ubuntu ansible_host=254.254.254.2 180 | manager-3 ansible_user=ubuntu ansible_host=254.254.254.3 181 | 182 | # Worker nodes 183 | worker-1 ansible_user=ubuntu ansible_host=254.254.254.4 184 | 185 | [production] 186 | manager-[1:3] 187 | worker-[1:1] 188 | 189 | [docker:children] 190 | production 191 | 192 | [production_encrypted:children] 193 | production 194 | 195 | [dockerswarm_manager] 196 | manager-[1:3] 197 | 198 | [dockerswarm_worker] 199 | worker-[1:1] 200 | 201 | [docker:vars] 202 | dockerswarm_iface=eth0 203 | EOF 204 | 205 | mock_command terraform "Terraform v0.12.12" "${additional_mock_code}" 206 | 207 | result="$(${_tads} terraform production apply)" 208 | assertEquals "Exit code should be 0" 0 "$?" 209 | assertMockedCmdCalled "terraform" "terraform apply" 210 | assertFileContentEquals "${ROOT_PATH}/ansible/inventories/production" "${expected_inventory}" 211 | rm -f "${ROOT_PATH}/ansible/inventories/production" 212 | } 213 | 214 | # *** Ansible 215 | test_ansible_usage () { 216 | local result 217 | 218 | result="$(${_tads} ansible)" 219 | assertEquals "usage exit code should be 1" 1 "$?" 220 | assertContains "usage should be printed" \ 221 | "${result}" "Usage" 222 | } 223 | 224 | test_ansible_not_installed () { 225 | local result 226 | 227 | result="$(${_tads} ansible localhost -m command -a ls)" 228 | assertEquals "usage exit code should be 1" 1 "$?" 229 | assertContains "Warning msg should be printed" \ 230 | "${result}" "Ansible must be installed on your local machine" 231 | } 232 | 233 | test_ansible_outdated_version () { 234 | local result 235 | 236 | mock_command ansible "ansible 2.7.0" 237 | 238 | result="$(${_tads} ansible localhost -m command -a ls)" 239 | assertEquals "usage exit code should be 1" 1 "$?" 240 | assertContains "Warning msg should be printed" \ 241 | "${result}" "Please upgrade it to at least version 2.8" 242 | } 243 | 244 | test_ansible_local () { 245 | local result 246 | 247 | mock_command ansible "ansible 2.9.1" 248 | 249 | result="$(${_tads} ansible localhost -m command -a ls)" 250 | assertEquals "Exit code should be 0" 0 "$?" 251 | assertMockedCmdCalled \ 252 | "ansible" \ 253 | "ansible -i ${ROOT_PATH}/ansible/inventories/localhost -D -c local -m command -a ls" 254 | } 255 | 256 | test_ansible_vagrant_not_created () { 257 | local result 258 | 259 | mock_command ansible "ansible 2.9.1" 260 | 261 | result="$(${_tads} ansible vagrant -m command -a ls)" 262 | assertEquals "Exit code should be 1" 1 "$?" 263 | assertContains "Warning msg should be printed" \ 264 | "${result}" "Impossible to find vagrant auto-generated inventory file" 265 | } 266 | 267 | test_ansible_vagrant () { 268 | local result 269 | 270 | mock_command ansible "ansible 2.9.1" 271 | mock_file "${ROOT_PATH}/vagrant/.vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory" 272 | 273 | result="$(${_tads} ansible vagrant -m command -a ls)" 274 | assertEquals "Exit code should be 0" 0 "$?" 275 | assertMockedCmdCalled \ 276 | "ansible" \ 277 | "ansible -i ${ROOT_PATH}/vagrant/.vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory -D -m command -a ls" 278 | } 279 | 280 | test_ansible_production_not_created () { 281 | local result 282 | 283 | mock_command ansible "ansible 2.9.1" 284 | 285 | result="$(${_tads} ansible production -m command -a ls)" 286 | assertEquals "Exit code should be 1" 1 "$?" 287 | assertContains "Warning msg should be printed" \ 288 | "${result}" "Unknown ENVIRONMENT: production" 289 | } 290 | 291 | test_ansible_production () { 292 | local result 293 | 294 | mock_command ansible "ansible 2.9.1" 295 | mock_file "${ROOT_PATH}/ansible/inventories/production" 296 | 297 | result="$(${_tads} ansible production -m command -a ls)" 298 | assertEquals "Exit code should be 0" 0 "$?" 299 | assertMockedCmdCalled "ansible" "ansible -i ${ROOT_PATH}/ansible/inventories/production -D -m command -a ls" 300 | } 301 | 302 | # *** Ansible-Playbook 303 | test_ansible_playbook_usage () { 304 | local result 305 | 306 | result="$(${_tads} ansible-playbook)" 307 | assertEquals "usage exit code should be 1" 1 "$?" 308 | assertContains "usage should be printed" \ 309 | "${result}" "Usage" 310 | } 311 | 312 | test_ansible_playbook_not_installed () { 313 | local result 314 | 315 | result="$(${_tads} ansible-playbook localhost all)" 316 | assertEquals "usage exit code should be 1" 1 "$?" 317 | assertContains "Warning msg should be printed" \ 318 | "${result}" "Ansible must be installed on your local machine" 319 | } 320 | 321 | test_ansible_playbook_outdated_version () { 322 | local result 323 | 324 | mock_command ansible "ansible 2.7.0" 325 | 326 | result="$(${_tads} ansible-playbook localhost all)" 327 | assertEquals "usage exit code should be 1" 1 "$?" 328 | assertContains "Warning msg should be printed" \ 329 | "${result}" "Please upgrade it to at least version 2.8" 330 | } 331 | 332 | test_ansible_playbook_local () { 333 | local result 334 | 335 | mock_command ansible "ansible 2.9.1" 336 | mock_command ansible-galaxy 337 | mock_command ansible-playbook 338 | 339 | result="$(${_tads} ansible-playbook localhost all)" 340 | assertEquals "Exit code should be 0" 0 "$?" 341 | assertMockedCmdCalled \ 342 | "ansible-playbook" \ 343 | "ansible-playbook -i ${ROOT_PATH}/ansible/inventories/localhost -D -c local ${ROOT_PATH}/ansible/all.yml --ask-become-pass" 344 | } 345 | 346 | test_ansible_playbook_requirements () { 347 | local result 348 | 349 | mock_command ansible "ansible 2.9.1" 350 | mock_command ansible-galaxy 351 | mock_command ansible-playbook 352 | 353 | result="$(${_tads} ansible-playbook localhost all)" 354 | assertMockedCmdCalled \ 355 | "ansible-galaxy" \ 356 | "ansible-galaxy role install -r ${ROOT_PATH}/ansible/requirements.yml" 357 | } 358 | 359 | test_ansible_playbook_vagrant_not_created () { 360 | local result 361 | 362 | mock_command ansible "ansible 2.9.1" 363 | mock_command ansible-galaxy 364 | mock_command ansible-playbook 365 | 366 | result="$(${_tads} ansible-playbook vagrant all)" 367 | assertEquals "Exit code should be 1" 1 "$?" 368 | assertContains "Warning msg should be printed" \ 369 | "${result}" "Impossible to find vagrant auto-generated inventory file" 370 | } 371 | 372 | test_ansible_playbook_vagrant () { 373 | local result 374 | 375 | mock_command ansible "ansible 2.9.1" 376 | mock_command ansible-galaxy 377 | mock_command ansible-playbook 378 | mock_file "${ROOT_PATH}/vagrant/.vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory" 379 | 380 | result="$(${_tads} ansible-playbook vagrant all)" 381 | assertEquals "Exit code should be 0" 0 "$?" 382 | assertMockedCmdCalled \ 383 | "ansible-playbook" \ 384 | "ansible-playbook -i ${ROOT_PATH}/vagrant/.vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory -D ${ROOT_PATH}/ansible/all.yml --ask-become-pass" 385 | } 386 | 387 | test_ansible_playbook_production_not_created () { 388 | local result 389 | 390 | mock_command ansible "ansible 2.9.1" 391 | mock_command ansible-galaxy 392 | mock_command ansible-playbook 393 | 394 | result="$(${_tads} ansible-playbook production all)" 395 | assertEquals "Exit code should be 1" 1 "$?" 396 | assertContains "Warning msg should be printed" \ 397 | "${result}" "Unknown ENVIRONMENT: production" 398 | } 399 | 400 | test_ansible_playbook_production_no_key () { 401 | local result 402 | 403 | mock_command ansible "ansible 2.9.1" 404 | mock_command ansible-galaxy 405 | mock_command ansible-playbook 406 | mock_file "${ROOT_PATH}/ansible/inventories/production" 407 | 408 | result="$(${_tads} ansible-playbook production all)" 409 | assertEquals "Exit code should be 1" 1 "$?" 410 | assertContains "Warning msg should be printed" \ 411 | "${result}" "Vault key not found for ENVIRONMENT: production" 412 | } 413 | 414 | test_ansible_playbook_production () { 415 | local result 416 | 417 | mock_command ansible "ansible 2.9.1" 418 | mock_command ansible-galaxy 419 | mock_command ansible-playbook 420 | mock_file "${ROOT_PATH}/ansible/inventories/production" 421 | mock_file "${ROOT_PATH}/ansible/vault_keys/production" 422 | 423 | result="$(${_tads} ansible-playbook production all)" 424 | assertEquals "Exit code should be 0" 0 "$?" 425 | assertMockedCmdCalled \ 426 | "ansible-playbook" \ 427 | "ansible-playbook -i ${ROOT_PATH}/ansible/inventories/production -D --vault-id production@/tmp/tads/ansible/vault_keys/production ${ROOT_PATH}/ansible/all.yml" 428 | } 429 | 430 | # *** Ansible-Vault 431 | test_ansible_vault_usage () { 432 | local result 433 | 434 | result="$(${_tads} ansible-vault)" 435 | assertEquals "usage exit code should be 1" 1 "$?" 436 | assertContains "usage should be printed" \ 437 | "${result}" "Usage" 438 | } 439 | 440 | test_ansible_vault_init_key () { 441 | local result 442 | 443 | mock_command ansible "ansible 2.9.1" 444 | 445 | result="$(${_tads} ansible-vault test init-key)" 446 | assertEquals "usage exit code should be 0" 0 "$?" 447 | assertFileExists "${ROOT_PATH}/ansible/vault_keys/test" 448 | 449 | local key 450 | key="$(cat "${ROOT_PATH}/ansible/vault_keys/test")" 451 | assertEquals "Key must be 256 characters long" ${#key} 256 452 | 453 | rm -f "${ROOT_PATH}/ansible/vault_keys/test" 454 | } 455 | 456 | test_ansible_vault_no_key () { 457 | local result 458 | 459 | mock_command ansible "ansible 2.9.1" 460 | 461 | result="$(${_tads} ansible-vault production view)" 462 | assertEquals "usage exit code should be 1" 1 "$?" 463 | assertContains "Warning msg should be printed" \ 464 | "${result}" "Vault key not found for ENVIRONMENT: production" 465 | } 466 | 467 | test_ansible_vault () { 468 | local result 469 | 470 | mock_command ansible "ansible 2.9.1" 471 | mock_command ansible-vault 472 | mock_file "${ROOT_PATH}/ansible/vault_keys/production" 473 | 474 | result="$(${_tads} ansible-vault production view)" 475 | assertEquals "usage exit code should be 0" 0 "$?" 476 | assertMockedCmdCalled \ 477 | "ansible-vault" \ 478 | "ansible-vault view --vault-id production@${ROOT_PATH}/ansible/vault_keys/production" 479 | } 480 | 481 | 482 | 483 | 484 | # *** shunit2 485 | oneTimeSetUp () { 486 | # shellcheck source=scripts/tests/utils.sh 487 | source "${SELF_PATH}/utils.sh" 488 | 489 | # shellcheck source=scripts/includes/common.sh 490 | source "${SELF_PATH}/../includes/common.sh" 491 | } 492 | 493 | setUp () { 494 | setup_mocking 495 | } 496 | 497 | tearDown () { 498 | teardown_mocking 499 | } 500 | 501 | # shellcheck source=scripts/tests/shunit2.sh 502 | source "${SELF_PATH}/shunit2.sh" 503 | -------------------------------------------------------------------------------- /scripts/tests/utils.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | readonly CMD_MOCKS_PATH=/tmp/cmd_mocks 4 | readonly CMD_MOCKS_LOGS_PATH=/tmp/cmd_mocks_logs 5 | 6 | setup_mocking () { 7 | # Add mocks directory to $PATH 8 | mkdir "${CMD_MOCKS_LOGS_PATH}" 9 | mkdir "${CMD_MOCKS_PATH}" 10 | PATH="$PATH:${CMD_MOCKS_PATH}" 11 | 12 | # Init file mocks array 13 | FILE_MOCKS=() 14 | } 15 | 16 | teardown_mocking () { 17 | # Debug 18 | if [[ "${TADS_DEBUG:-}" == true ]]; then 19 | local file 20 | for file in "${CMD_MOCKS_LOGS_PATH}"/*; do 21 | [[ "${file}" == "${CMD_MOCKS_LOGS_PATH}/*" ]] && break 22 | echo "** DEBUG | calls for $(basename "${file}"):" 23 | cat "${file}" 24 | done 25 | fi 26 | 27 | # Remove command mocks 28 | rm -rf "${CMD_MOCKS_LOGS_PATH}" 29 | rm -rf "${CMD_MOCKS_PATH}" 30 | 31 | # Remove file mocks 32 | for file_path in "${FILE_MOCKS[@]}"; do 33 | rm -rf "${file_path}" 34 | done 35 | } 36 | 37 | mock_command () { 38 | local command_name 39 | local command_path 40 | command_name="$1" 41 | command_path="${CMD_MOCKS_PATH}/${command_name}" 42 | 43 | local version_string 44 | version_string="${2:-}" 45 | 46 | local additional_code 47 | additional_code="${3:-}" 48 | 49 | cat <> "${command_path}" 50 | #!/usr/bin/env bash 51 | set -euo pipefail 52 | 53 | if [[ "\${1:-}" == "--version" ]]; then 54 | echo "${version_string}" 55 | fi 56 | 57 | echo "\$(basename "\$0") \$@" >> ${CMD_MOCKS_LOGS_PATH}/${command_name} 58 | 59 | ${additional_code} 60 | EOT 61 | 62 | chmod u+x "${command_path}" 63 | } 64 | 65 | assertMockedCmdCalled () { 66 | local command_name 67 | local expected_result 68 | command_name="$1" 69 | expected_result="$2" 70 | 71 | local result 72 | [[ -f "${CMD_MOCKS_LOGS_PATH}/${command_name}" ]] \ 73 | && result="$(cat "${CMD_MOCKS_LOGS_PATH}/${command_name}")" || result="" 74 | 75 | assertContains "Mocked '${command_name}' should have been called" "${result}" "${expected_result}" 76 | } 77 | 78 | mock_file () { 79 | local file_path 80 | local file_dir 81 | local content 82 | file_path="$1" 83 | file_dir="$(dirname "${file_path}")" 84 | content="${2:-}" 85 | 86 | [[ ! -d "${file_dir}" ]] && mkdir -p "${file_dir}" 87 | 88 | echo "${content}" > "${file_path}" 89 | 90 | FILE_MOCKS+=("${file_path}") 91 | } 92 | 93 | assertFileExists () { 94 | local file_path 95 | file_path="$1" 96 | 97 | [[ ! -f "${file_path}" ]] && fail "File should exist: ${file_path}" 98 | } 99 | 100 | assertFileContentEquals () { 101 | local file_path 102 | local expected_content 103 | file_path="$1" 104 | expected_content="$2" 105 | 106 | assertFileExists "${file_path}" 107 | 108 | local content 109 | content="$(cat "${file_path}")" 110 | 111 | assertEquals "File content is not as expected" "${expected_content}" "${content}" 112 | } 113 | -------------------------------------------------------------------------------- /scripts/tests/watch.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | readonly SELF_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 5 | readonly SELF_NAME="$(basename "${BASH_SOURCE[0]}")" 6 | readonly ROOT_PATH="$(cd "${SELF_PATH}/../.." && pwd)" 7 | 8 | "${ROOT_PATH}"/scripts/tests/launcher.sh 9 | while inotifywait -e close_write "${ROOT_PATH}"/scripts/**/*.sh "${ROOT_PATH}/tads"; do 10 | "${ROOT_PATH}"/scripts/tests/launcher.sh 11 | done 12 | -------------------------------------------------------------------------------- /tads: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # T.A.D.S. main script 3 | # 4 | # Usage: ./tads COMMAND 5 | # 6 | 7 | set -euo pipefail 8 | 9 | readonly SELF_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 10 | readonly SELF_NAME="$(basename "${BASH_SOURCE[0]}")" 11 | 12 | # shellcheck source=scripts/includes/common.sh 13 | source "${SELF_PATH}/scripts/includes/common.sh" 14 | 15 | if [[ "${TADS_ENV:-}" == "test" ]]; then 16 | export TADS_VERBOSE=false 17 | else 18 | export TADS_VERBOSE=true 19 | fi 20 | 21 | usage() { 22 | local error_msg=${1:-} 23 | local cmd="./${SELF_NAME}" 24 | 25 | if [[ -n $error_msg ]]; then 26 | echo "" 27 | echo_red "${error_msg}" 28 | fi 29 | 30 | cat <<- EOF 31 | 32 | Usage: ${cmd} COMMAND 33 | 34 | A companion CLI to perform Terraform, Ansible and Docker Swarm (T.A.D.S.) tasks easily 35 | 36 | COMMANDS: 37 | install-dependencies Install T.A.D.S. dependencies 38 | vagrant Manage your local VMs (for test purpose) 39 | terraform Manage your cloud VMs 40 | ansible Execute a custom Ansible module on your VMs 41 | ansible-vault Manage your sensitive files 42 | ansible-playbook Execute an Ansible playbook on your VMs (provision or deploy) 43 | 44 | Run ${cmd} COMMAND to get some help regarding a specific command 45 | 46 | EOF 47 | exit 1 48 | } 49 | 50 | main () { 51 | local command="${1:-}" 52 | local commands_path="${SELF_PATH}/scripts/commands" 53 | 54 | case "${command}" in 55 | install-dependencies) 56 | shift 57 | "${commands_path}/install-dependencies.sh" "$@" 58 | ;; 59 | vagrant) 60 | shift 61 | "${commands_path}/vagrant.sh" "$@" 62 | ;; 63 | terraform) 64 | shift 65 | "${commands_path}/terraform.sh" "$@" 66 | ;; 67 | ansible) 68 | shift 69 | "${commands_path}/ansible.sh" "$@" 70 | ;; 71 | ansible-vault) 72 | shift 73 | "${commands_path}/ansible-vault.sh" "$@" 74 | ;; 75 | ansible-playbook) 76 | shift 77 | "${commands_path}/ansible-playbook.sh" "$@" 78 | ;; 79 | "") 80 | usage 81 | ;; 82 | *) 83 | usage "Unknown COMMAND: ${command}" 84 | ;; 85 | esac 86 | } 87 | 88 | main "$@" 89 | -------------------------------------------------------------------------------- /terraform/environments/production/.gitignore: -------------------------------------------------------------------------------- 1 | .terraform 2 | *.tfstate.backup 3 | -------------------------------------------------------------------------------- /terraform/environments/production/main.tf: -------------------------------------------------------------------------------- 1 | ### 2 | # Terraform production AWS example 3 | ## 4 | 5 | # Do not hardcode credentials here 6 | # Use environment variables or AWS CLI profile 7 | provider "aws" { 8 | version = "~> 2.36" 9 | region = "us-east-1" 10 | } 11 | 12 | module "tads" { 13 | source = "../../modules/aws_tads" 14 | 15 | environment = "production" 16 | 17 | swarm_vpc_cidr = "172.21.0.0/16" 18 | swarm_vpc_subnets = [ 19 | "172.21.0.0/20", # AZ a 20 | "172.21.16.0/20", # AZ b 21 | "172.21.32.0/20" # AZ c 22 | ] 23 | swarm_nb_manager_nodes = 3 24 | swarm_nb_worker_nodes = 1 25 | aws_nodes_instance_type = "t2.micro" # free tier 26 | 27 | # @see other variables in modules/aws_tads/variables.tf 28 | } 29 | -------------------------------------------------------------------------------- /terraform/environments/production/outputs.tf: -------------------------------------------------------------------------------- 1 | output "elb_url" { 2 | value = "${module.tads.elb_url}" 3 | } 4 | 5 | output "manager_ips" { 6 | value = "${module.tads.manager_ips}" 7 | } 8 | 9 | output "worker_ips" { 10 | value = "${module.tads.worker_ips}" 11 | } 12 | 13 | output "ssh_user" { 14 | value = "${module.tads.ssh_user}" 15 | } 16 | -------------------------------------------------------------------------------- /terraform/modules/aws_tads/data.tf: -------------------------------------------------------------------------------- 1 | # Available AZs in the current region 2 | data "aws_availability_zones" "available" {} 3 | 4 | # Latest Ubuntu 18.04 LTS AMI in the current region 5 | data "aws_ami" "latest-ubuntu" { 6 | most_recent = true 7 | owners = ["099720109477"] # Canonical 8 | 9 | filter { 10 | name = "name" 11 | values = ["ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-*"] 12 | } 13 | 14 | filter { 15 | name = "virtualization-type" 16 | values = ["hvm"] 17 | } 18 | 19 | filter { 20 | name = "root-device-type" 21 | values = ["ebs"] 22 | } 23 | 24 | filter { 25 | name = "architecture" 26 | values = ["x86_64"] 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /terraform/modules/aws_tads/elb.tf: -------------------------------------------------------------------------------- 1 | # A security group for the ELB so it is accessible via the web 2 | resource "aws_security_group" "swarm_elb" { 3 | name = "tads-${var.environment}-swarm-elb" 4 | vpc_id = "${aws_vpc.swarm.id}" 5 | 6 | # HTTP access from anywhere 7 | ingress { 8 | from_port = 80 9 | to_port = 80 10 | protocol = "tcp" 11 | cidr_blocks = ["0.0.0.0/0"] 12 | } 13 | 14 | # HTTPS access from anywhere 15 | ingress { 16 | from_port = 443 17 | to_port = 443 18 | protocol = "tcp" 19 | cidr_blocks = ["0.0.0.0/0"] 20 | } 21 | 22 | # Outbound internet access 23 | egress { 24 | from_port = 0 25 | to_port = 0 26 | protocol = "-1" 27 | cidr_blocks = ["0.0.0.0/0"] 28 | } 29 | } 30 | 31 | # The ELB (Classic) 32 | # Layer 4, no SSL termination, since it is handled by Traefik 33 | resource "aws_elb" "swarm" { 34 | name = "tads-${var.environment}-swarm" 35 | 36 | subnets = "${aws_subnet.swarm_nodes.*.id}" 37 | security_groups = ["${aws_security_group.swarm_elb.id}"] 38 | 39 | listener { 40 | instance_port = 80 41 | instance_protocol = "tcp" 42 | lb_port = 80 43 | lb_protocol = "tcp" 44 | } 45 | 46 | listener { 47 | instance_port = 443 48 | instance_protocol = "tcp" 49 | lb_port = 443 50 | lb_protocol = "tcp" 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /terraform/modules/aws_tads/nodes.tf: -------------------------------------------------------------------------------- 1 | # A security group for Swarm nodes 2 | resource "aws_security_group" "swarm_node" { 3 | name = "tads-${var.environment}-swarm-node" 4 | vpc_id = "${aws_vpc.swarm.id}" 5 | 6 | # Docker Swarm ports from this security group only 7 | ingress { 8 | description = "Docker container network discovery" 9 | from_port = 7946 10 | to_port = 7946 11 | protocol = "tcp" 12 | self = true 13 | } 14 | ingress { 15 | description = "Docker container network discovery" 16 | from_port = 7946 17 | to_port = 7946 18 | protocol = "udp" 19 | self = true 20 | } 21 | ingress { 22 | description = "Docker overlay network" 23 | from_port = 4789 24 | to_port = 4789 25 | protocol = "udp" 26 | self = true 27 | } 28 | 29 | # SSH for Ansible 30 | ingress { 31 | from_port = 22 32 | to_port = 22 33 | protocol = "tcp" 34 | cidr_blocks = "${var.ssh_cidr_blocks}" 35 | } 36 | 37 | # Outbound internet access 38 | egress { 39 | from_port = 0 40 | to_port = 0 41 | protocol = "-1" 42 | cidr_blocks = ["0.0.0.0/0"] 43 | } 44 | } 45 | 46 | # A security group for Swarm manager nodes only 47 | resource "aws_security_group" "swarm_manager_node" { 48 | name = "tads-${var.environment}-swarm-manager-node" 49 | vpc_id = "${aws_vpc.swarm.id}" 50 | 51 | # HTTP access from ELB 52 | ingress { 53 | from_port = 80 54 | to_port = 80 55 | protocol = "tcp" 56 | security_groups = ["${aws_security_group.swarm_elb.id}"] 57 | } 58 | 59 | # HTTPS access from ELB 60 | ingress { 61 | from_port = 443 62 | to_port = 443 63 | protocol = "tcp" 64 | security_groups = ["${aws_security_group.swarm_elb.id}"] 65 | } 66 | 67 | # Docker Swarm manager only 68 | ingress { 69 | description = "Docker Swarm management between managers" 70 | from_port = 2377 71 | to_port = 2377 72 | protocol = "tcp" 73 | security_groups = ["${aws_security_group.swarm_node.id}"] 74 | } 75 | } 76 | 77 | # Key Pair for SSH 78 | resource "aws_key_pair" "local" { 79 | key_name = "${var.ssh_pubkey_name}-${var.environment}" 80 | public_key = "${file(var.ssh_pubkey_path)}" 81 | } 82 | 83 | ## MANAGER NODES 84 | # Spread placement group for Swarm manager nodes 85 | resource "aws_placement_group" "swarm_manager_nodes" { 86 | name = "tads-${var.environment}-swarm-manager-nodes" 87 | strategy = "spread" 88 | } 89 | 90 | # Launch Configuration for Swarm manager nodes 91 | resource "aws_launch_configuration" "swarm_manager_node" { 92 | associate_public_ip_address = true 93 | image_id = "${data.aws_ami.latest-ubuntu.id}" 94 | instance_type = "${var.aws_nodes_instance_type}" 95 | name_prefix = "tads-${var.environment}-swarm-manager-node" 96 | security_groups = ["${aws_security_group.swarm_node.id}", "${aws_security_group.swarm_manager_node.id}"] 97 | key_name = "${aws_key_pair.local.id}" 98 | user_data = "${local.nodes_user_data}" 99 | 100 | lifecycle { 101 | create_before_destroy = true 102 | } 103 | } 104 | 105 | # Swarm manager nodes auto-scaling group 106 | resource "aws_autoscaling_group" "swarm_manager_nodes" { 107 | desired_capacity = "${var.swarm_nb_manager_nodes}" 108 | max_size = "${var.swarm_nb_manager_nodes}" 109 | min_size = "${var.swarm_nb_manager_nodes}" 110 | launch_configuration = "${aws_launch_configuration.swarm_manager_node.id}" 111 | name = "tads-${var.environment}-swarm-manager-nodes" 112 | vpc_zone_identifier = "${aws_subnet.swarm_nodes.*.id}" 113 | placement_group = "${aws_placement_group.swarm_manager_nodes.id}" 114 | load_balancers = ["${aws_elb.swarm.id}"] 115 | 116 | tag { 117 | key = "Name" 118 | value = "tads-${var.environment}-swarm-manager-node" 119 | propagate_at_launch = true 120 | } 121 | 122 | tag { 123 | key = "tads-environment" 124 | value = "${var.environment}" 125 | propagate_at_launch = true 126 | } 127 | 128 | tag { 129 | key = "tads-node-type" 130 | value = "manager" # we use this tag to output instances IPs 131 | propagate_at_launch = true 132 | } 133 | } 134 | 135 | ## WORKER NODES 136 | # Spread placement group for Swarm worker nodes 137 | resource "aws_placement_group" "swarm_worker_nodes" { 138 | name = "tads-${var.environment}-swarm-worker-nodes" 139 | strategy = "spread" 140 | } 141 | 142 | # Launch Configuration for Swarm worker nodes 143 | resource "aws_launch_configuration" "swarm_worker_node" { 144 | associate_public_ip_address = true 145 | image_id = "${data.aws_ami.latest-ubuntu.id}" 146 | instance_type = "${var.aws_nodes_instance_type}" 147 | name_prefix = "tads-${var.environment}-swarm-worker-node" 148 | security_groups = ["${aws_security_group.swarm_node.id}"] 149 | key_name = "${aws_key_pair.local.id}" 150 | user_data = "${local.nodes_user_data}" 151 | 152 | lifecycle { 153 | create_before_destroy = true 154 | } 155 | } 156 | 157 | # Swarm worker nodes auto-scaling group 158 | resource "aws_autoscaling_group" "swarm_worker_nodes" { 159 | desired_capacity = "${var.swarm_nb_worker_nodes}" 160 | max_size = "${var.swarm_nb_worker_nodes}" 161 | min_size = "${var.swarm_nb_worker_nodes}" 162 | launch_configuration = "${aws_launch_configuration.swarm_worker_node.id}" 163 | name = "tads-${var.environment}-swarm-worker-nodes" 164 | vpc_zone_identifier = "${aws_subnet.swarm_nodes.*.id}" 165 | placement_group = "${aws_placement_group.swarm_worker_nodes.id}" 166 | 167 | tag { 168 | key = "Name" 169 | value = "tads-${var.environment}-swarm-worker-node" 170 | propagate_at_launch = true 171 | } 172 | 173 | tag { 174 | key = "tads-environment" 175 | value = "${var.environment}" 176 | propagate_at_launch = true 177 | } 178 | 179 | tag { 180 | key = "tads-node-type" 181 | value = "worker" # we use this tag to output instances IPs 182 | propagate_at_launch = true 183 | } 184 | } 185 | 186 | # Bootstrap script for instances 187 | locals { 188 | nodes_user_data = < ["#{host_suffix}[1:#{nb_swarm_nodes}]"], 71 | "vagrant_overrides:children" => ["vagrant"], 72 | "dev:children" => ["vagrant"], 73 | "dev_overrides:children" => ["vagrant"], 74 | 75 | "docker:children" => ["vagrant"], 76 | "docker:vars" => { 77 | "dockerswarm_iface" => "eth1", 78 | "swarm_dev_ip" => "#{subnet}.11" # first host 79 | }, 80 | 81 | "dockerswarm_manager" => ["#{host_suffix}[1:#{nb_swarm_managers}]"], 82 | "dockerswarm_worker" => ansible_dockerswarm_worker_group 83 | } 84 | end 85 | end 86 | 87 | end 88 | end 89 | 90 | end 91 | -------------------------------------------------------------------------------- /vagrant/vagrant.sample.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This file should be copied to "vagrant.yml" and be customized for this specific machine 3 | 4 | # Subnet to use for the VMs 5 | # Each machine's eth1 interface will be connected to this subnet 6 | subnet: "192.168.78" 7 | 8 | # Number of Docker Swarm nodes 9 | nb_swarm_nodes: 3 10 | 11 | # Memory per Swarm node (in Mb) (you should decrease this value if you suffer memory issues) 12 | memory_per_node: 2048 13 | 14 | # CPUs per node (you should decrease this value if you suffer performance issues) 15 | cpus_per_node: 2 16 | --------------------------------------------------------------------------------