├── .gitattributes ├── .gitignore ├── .gitmodules ├── LICENSE ├── README.md ├── create-servers.sh ├── create-swarm.sh ├── daemon.json ├── dci-aws ├── .gitignore ├── COMMIT ├── ansible.cfg ├── ansible_inventory.tf ├── cloustor.tf ├── common.tf ├── docker.d │ └── conf.yaml ├── elb.tf ├── examples │ ├── terraform.tfvars.centos-7.example │ ├── terraform.tfvars.oraclelinux-7.3.example │ ├── terraform.tfvars.rhel-7.1.example │ ├── terraform.tfvars.rhel-7.2.example │ ├── terraform.tfvars.rhel-7.3.example │ ├── terraform.tfvars.rhel-7.4.example │ ├── terraform.tfvars.sles-12.2.example │ ├── terraform.tfvars.ubuntu-1404.example │ └── terraform.tfvars.ubuntu-1604.example ├── extras.yml ├── group_vars │ ├── all.example │ ├── linux │ ├── managers │ └── windows ├── iam.tf ├── install.yml ├── instances.tf ├── inventory │ └── 2.groups ├── ipallocator.yml ├── logs.yml ├── modules │ └── ansible │ │ ├── inventory.tpl │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── provider.tf │ │ └── variables.tf ├── outputs.tf ├── provider.tf ├── roles │ ├── ansible-requirements │ │ └── tasks │ │ │ └── main.yml │ ├── cloudstor-install │ │ └── tasks │ │ │ ├── Debian.yml │ │ │ ├── RedHat.yml │ │ │ ├── Suse.yml │ │ │ └── main.yml │ ├── docker-dtr │ │ ├── defaults │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ └── tasks │ │ │ ├── get-running-replica-id.yml │ │ │ ├── install │ │ │ └── main.yml │ │ │ ├── join │ │ │ └── main.yml │ │ │ ├── logs │ │ │ ├── Debian.yml │ │ │ ├── RedHat.yml │ │ │ ├── Suse.yml │ │ │ ├── Windows.yml │ │ │ └── main.yml │ │ │ ├── main.yml │ │ │ ├── uninstall │ │ │ └── main.yml │ │ │ └── update │ │ │ └── main.yml │ ├── docker-ee │ │ ├── .gitignore │ │ ├── .travis.yml │ │ ├── Makefile │ │ ├── defaults │ │ │ └── main.yml │ │ ├── filter_plugins │ │ │ ├── redhat.py │ │ │ └── redhat.pyc │ │ ├── meta │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── install │ │ │ │ ├── .unix.post.yml │ │ │ │ ├── .unix.pre.yml │ │ │ │ ├── Debian.yml │ │ │ │ ├── Debian │ │ │ │ │ ├── Ubuntu.yml │ │ │ │ │ └── Ubuntu │ │ │ │ │ │ ├── 14.04.yml │ │ │ │ │ │ └── 16.04.yml │ │ │ │ ├── RedHat.yml │ │ │ │ ├── Suse.yml │ │ │ │ ├── Windows.yml │ │ │ │ └── main.yml │ │ │ ├── logs │ │ │ │ ├── Debian.yml │ │ │ │ ├── Debian │ │ │ │ │ ├── Ubuntu.yml │ │ │ │ │ └── Ubuntu │ │ │ │ │ │ ├── 14.04.yml │ │ │ │ │ │ └── 16.04.yml │ │ │ │ ├── RedHat.yml │ │ │ │ ├── RedHat │ │ │ │ │ ├── CentOS.yml │ │ │ │ │ ├── OracleLinux.yml │ │ │ │ │ └── RedHat.yml │ │ │ │ ├── Suse.yml │ │ │ │ ├── Windows.yml │ │ │ │ └── main.yml │ │ │ ├── main.yml │ │ │ ├── uninstall │ │ │ │ ├── .unix.yml │ │ │ │ ├── Debian.yml │ │ │ │ ├── RedHat.yml │ │ │ │ ├── Suse.yml │ │ │ │ ├── Windows.yml │ │ │ │ └── main.yml │ │ │ └── update │ │ │ │ ├── .unix.pre.yml │ │ │ │ ├── Debian.yml │ │ │ │ ├── RedHat.yml │ │ │ │ ├── Suse.yml │ │ │ │ ├── Windows.yml │ │ │ │ └── main.yml │ │ ├── tests │ │ │ └── main.yml │ │ └── vars │ │ │ ├── Debian.yml │ │ │ ├── RedHat.yml │ │ │ ├── RedHat │ │ │ ├── CentOS.yml │ │ │ ├── OracleLinux.yml │ │ │ └── RedHat.yml │ │ │ ├── Suse.yml │ │ │ ├── Windows.yml │ │ │ └── main.yml │ ├── docker-storage │ │ └── tasks │ │ │ ├── Debian.yml │ │ │ ├── RedHat.yml │ │ │ ├── RedHat │ │ │ ├── CentOS.yml │ │ │ ├── OracleLinux.yml │ │ │ └── RedHat.yml │ │ │ ├── Suse.yml │ │ │ └── main.yml │ ├── docker-swarm │ │ ├── defaults │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ └── tasks │ │ │ ├── cleanup │ │ │ └── main.yml │ │ │ ├── init │ │ │ └── main.yml │ │ │ ├── join │ │ │ ├── Debian.yml │ │ │ ├── RedHat.yml │ │ │ ├── Suse.yml │ │ │ ├── Windows.yml │ │ │ └── main.yml │ │ │ ├── leave │ │ │ ├── Debian.yml │ │ │ ├── RedHat.yml │ │ │ ├── Suse.yml │ │ │ ├── Windows.yml │ │ │ └── main.yml │ │ │ ├── main.yml │ │ │ └── validate │ │ │ └── main.yml │ ├── docker-ucp │ │ ├── defaults │ │ │ └── main.yml │ │ ├── files │ │ │ ├── .gitignore │ │ │ ├── .gitkeep │ │ │ ├── makecert.sh │ │ │ └── openssl.cnf.example │ │ ├── meta │ │ │ └── main.yml │ │ └── tasks │ │ │ ├── install │ │ │ └── main.yml │ │ │ ├── logs │ │ │ ├── Debian.yml │ │ │ ├── RedHat.yml │ │ │ ├── Suse.yml │ │ │ ├── Windows.yml │ │ │ └── main.yml │ │ │ ├── main.yml │ │ │ ├── uninstall │ │ │ └── main.yml │ │ │ ├── update │ │ │ └── main.yml │ │ │ ├── validate-managers │ │ │ └── main.yml │ │ │ └── validate-workers │ │ │ ├── Debian.yml │ │ │ ├── RedHat.yml │ │ │ ├── Suse.yml │ │ │ ├── Windows.yml │ │ │ └── main.yml │ ├── host-readiness │ │ └── tasks │ │ │ └── main.yml │ ├── load-balancer │ │ ├── tasks │ │ │ ├── Debian.yml │ │ │ ├── RedHat.yml │ │ │ ├── Sles.yml │ │ │ └── main.yml │ │ └── templates │ │ │ └── haproxy.cfg.j2 │ └── validate │ │ └── tasks │ │ ├── Debian.yml │ │ ├── RedHat.yml │ │ ├── RedHat │ │ ├── CentOS.yml │ │ ├── OracleLinux.yml │ │ └── RedHat.yml │ │ ├── Suse.yml │ │ ├── Windows.yml │ │ └── main.yml ├── s3.tf ├── security-group.tf ├── setup-windows.ps1 ├── terraform.tfvars.example ├── uninstall.yml ├── update.yml ├── variables.tf └── vpc.tf ├── delete-servers.sh ├── enable-monitoring.sh ├── generate-some-votes.sh ├── ghost.dockerapp ├── hash-config-secret.sh ├── logstash.conf ├── make-data.py ├── menu-ee ├── Dockerfile └── index.html ├── menu.dockerapp ├── menu ├── Dockerfile └── index.html ├── old-stack-proxy.yml ├── posta ├── postb ├── secret-ghost-db ├── stack-ee-ghost.yml ├── stack-ee-menu.yml ├── stack-ee-voting.yml ├── stack-elk.override.yml ├── stack-elk.yml ├── stack-ghost.yml ├── stack-menu.yml ├── stack-portainer.yml ├── stack-proxy-global.yml ├── stack-proxy.yml ├── stack-prune.yml ├── stack-rexray.yml ├── stack-sqlite-ghost.yml ├── stack-swarmprom.yml ├── stack-visualizer.yml ├── stack-voting.yml └── voting.dockerapp /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | posta 2 | postb 3 | 4 | 5 | # Created by https://www.gitignore.io/api/vim,macos,windows 6 | 7 | ### macOS ### 8 | *.DS_Store 9 | .AppleDouble 10 | .LSOverride 11 | 12 | # Icon must end with two \r 13 | Icon 14 | 15 | # Thumbnails 16 | ._* 17 | 18 | # Files that might appear in the root of a volume 19 | .DocumentRevisions-V100 20 | .fseventsd 21 | .Spotlight-V100 22 | .TemporaryItems 23 | .Trashes 24 | .VolumeIcon.icns 25 | .com.apple.timemachine.donotpresent 26 | 27 | # Directories potentially created on remote AFP share 28 | .AppleDB 29 | .AppleDesktop 30 | Network Trash Folder 31 | Temporary Items 32 | .apdisk 33 | 34 | ### Vim ### 35 | # swap 36 | .sw[a-p] 37 | .*.sw[a-p] 38 | # session 39 | Session.vim 40 | # temporary 41 | .netrwhist 42 | *~ 43 | # auto-generated tag files 44 | tags 45 | 46 | ### Windows ### 47 | # Windows thumbnail cache files 48 | Thumbs.db 49 | ehthumbs.db 50 | ehthumbs_vista.db 51 | 52 | # Folder config file 53 | Desktop.ini 54 | 55 | # Recycle Bin used on file shares 56 | $RECYCLE.BIN/ 57 | 58 | # Windows Installer files 59 | *.cab 60 | *.msi 61 | *.msm 62 | *.msp 63 | 64 | # Windows shortcuts 65 | *.lnk 66 | 67 | 68 | # End of https://www.gitignore.io/api/vim,macos,windows 69 | 70 | # Created by https://www.gitignore.io/api/vim,macos,windows 71 | 72 | ### macOS ### 73 | *.DS_Store 74 | .AppleDouble 75 | .LSOverride 76 | 77 | # Icon must end with two \r 78 | Icon 79 | 80 | # Thumbnails 81 | ._* 82 | 83 | # Files that might appear in the root of a volume 84 | .DocumentRevisions-V100 85 | .fseventsd 86 | .Spotlight-V100 87 | .TemporaryItems 88 | .Trashes 89 | .VolumeIcon.icns 90 | .com.apple.timemachine.donotpresent 91 | 92 | # Directories potentially created on remote AFP share 93 | .AppleDB 94 | .AppleDesktop 95 | Network Trash Folder 96 | Temporary Items 97 | .apdisk 98 | 99 | ### Vim ### 100 | # swap 101 | .sw[a-p] 102 | .*.sw[a-p] 103 | # session 104 | Session.vim 105 | # temporary 106 | .netrwhist 107 | *~ 108 | # auto-generated tag files 109 | tags 110 | 111 | ### Windows ### 112 | # Windows thumbnail cache files 113 | Thumbs.db 114 | ehthumbs.db 115 | ehthumbs_vista.db 116 | 117 | # Folder config file 118 | Desktop.ini 119 | 120 | # Recycle Bin used on file shares 121 | $RECYCLE.BIN/ 122 | 123 | # Windows Installer files 124 | *.cab 125 | *.msi 126 | *.msm 127 | *.msp 128 | 129 | # Windows shortcuts 130 | *.lnk 131 | 132 | 133 | # End of https://www.gitignore.io/api/vim,macos,windows 134 | 135 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "swarmprom"] 2 | path = swarmprom 3 | url = https://github.com/stefanprodan/swarmprom 4 | [submodule "docker-elk"] 5 | path = docker-elk 6 | url = https://github.com/deviantony/docker-elk 7 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Bret Fisher 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /create-servers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # set -x 3 | 4 | # create managers servers in digital ocean with pre-set environment vars 5 | # https://docs.docker.com/machine/drivers/digital-ocean/ 6 | 7 | # DO_TOKEN get the token from digitalocean.com (read/write) 8 | # DO_SIZE pick your droplet size from "doctl compute size list" 9 | # SSH_FINGERPRINT in the format of "8d:30:8a..." with a comand like "ssh-keygen -E md5 -lf ~/.ssh/id_rsa.pub" 10 | 11 | for server in {1..3}; do 12 | docker-machine create \ 13 | --driver=digitalocean \ 14 | --digitalocean-access-token="${DO_TOKEN}" \ 15 | --digitalocean-size="${DO_SIZE}" \ 16 | --digitalocean-ssh-key-fingerprint="${SSH_FINGERPRINT}" \ 17 | --digitalocean-tags=dogvscat \ 18 | --digitalocean-private-networking=true \ 19 | dvc${server} & 20 | done 21 | 22 | 23 | # if you wanted to create these locally in virtualbox, you might do this 24 | # remember to check if you have enough RAM 25 | # https://docs.docker.com/machine/drivers/virtualbox/ 26 | 27 | #for server in {1..3}; do 28 | #docker-machine create \ 29 | # --driver=virtualbox \ 30 | # --virtualbox-memory=2048 \ 31 | # dvc${server} & 32 | #done 33 | 34 | # if you wanted to create these locally in hyper-v (windows 10), you might do this from git bash 35 | # remember to check if you have enough RAM and if virtual switch is created 36 | # https://docs.docker.com/machine/drivers/hyper-v/ 37 | 38 | #for server in {1..3}; do 39 | #docker-machine create \ 40 | # --driver=hyperv \ 41 | # --hyperv-memory=2048 \ 42 | # --hyperv-virtual-switch="Primary Virtual Swtich" \ 43 | # dvc${server} & 44 | #done 45 | -------------------------------------------------------------------------------- /create-swarm.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | # since we created droplets with a private NIC on eth1, lets use that for swarm comms 5 | LEADER_IP=$(docker-machine ssh dvc1 ifconfig eth1 | grep 'inet addr' | cut -d: -f2 | awk '{print $1}') 6 | 7 | # create a swarm as all managers 8 | docker-machine ssh dvc1 docker swarm init --advertise-addr "$LEADER_IP" 9 | 10 | # note that if you use eth1 above (private network in digitalocean) it makes the below 11 | # a bit tricky, because docker-machine lists the public IP's but we need the 12 | # private IP of manager for join commands, so we can't simply envvar the token 13 | # like lots of scripts do... we'd need to fist get private IP of first node 14 | 15 | # TODO: provide flexable numbers at cli for x managers and x workers 16 | JOIN_TOKEN=$(docker-machine ssh dvc1 docker swarm join-token -q manager) 17 | 18 | for i in 2 3; do 19 | docker-machine ssh dvc$i docker swarm join --token "$JOIN_TOKEN" "$LEADER_IP":2377 20 | done 21 | 22 | docker-machine env dvc1 23 | 24 | -------------------------------------------------------------------------------- /daemon.json: -------------------------------------------------------------------------------- 1 | { 2 | "metrics-addr" : "0.0.0.0:9323", 3 | "experimental" : true 4 | } 5 | -------------------------------------------------------------------------------- /dci-aws/.gitignore: -------------------------------------------------------------------------------- 1 | # docker ee license 2 | docker_subscription.lic 3 | 4 | #terraform resources 5 | inventory/1.hosts 6 | 7 | # ansible resources 8 | all 9 | .logs 10 | 11 | # client bundles 12 | ucp-bundle-admin 13 | ucp-bundle* 14 | 15 | # Created by https://www.gitignore.io/api/vim,macos,ansible,windows,terraform 16 | 17 | ### Ansible ### 18 | *.retry 19 | 20 | ### macOS ### 21 | *.DS_Store 22 | .AppleDouble 23 | .LSOverride 24 | 25 | # Icon must end with two \r 26 | Icon 27 | 28 | # Thumbnails 29 | ._* 30 | 31 | # Files that might appear in the root of a volume 32 | .DocumentRevisions-V100 33 | .fseventsd 34 | .Spotlight-V100 35 | .TemporaryItems 36 | .Trashes 37 | .VolumeIcon.icns 38 | .com.apple.timemachine.donotpresent 39 | 40 | # Directories potentially created on remote AFP share 41 | .AppleDB 42 | .AppleDesktop 43 | Network Trash Folder 44 | Temporary Items 45 | .apdisk 46 | 47 | ### Terraform ### 48 | # Compiled files 49 | *.tfstate 50 | *.tfstate.* 51 | 52 | # Module directory 53 | .terraform/ 54 | 55 | # .tfvars files 56 | *.tfvars 57 | 58 | ### Vim ### 59 | # swap 60 | .sw[a-p] 61 | .*.sw[a-p] 62 | # session 63 | Session.vim 64 | # temporary 65 | .netrwhist 66 | *~ 67 | # auto-generated tag files 68 | tags 69 | 70 | ### Windows ### 71 | # Windows thumbnail cache files 72 | Thumbs.db 73 | ehthumbs.db 74 | ehthumbs_vista.db 75 | 76 | # Folder config file 77 | Desktop.ini 78 | 79 | # Recycle Bin used on file shares 80 | $RECYCLE.BIN/ 81 | 82 | # Windows Installer files 83 | *.cab 84 | *.msi 85 | *.msm 86 | *.msp 87 | 88 | # Windows shortcuts 89 | *.lnk 90 | 91 | 92 | # End of https://www.gitignore.io/api/vim,macos,ansible,windows,terraform 93 | 94 | -------------------------------------------------------------------------------- /dci-aws/COMMIT: -------------------------------------------------------------------------------- 1 | 201c8e4badeeb98fffee04334b27d04cf10ff69b 2 | -------------------------------------------------------------------------------- /dci-aws/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | host_key_checking = False 3 | forks = 12 4 | inventory = inventory 5 | squash_actions = apk,apt,dnf,homebrew,package,pacman,pkgng,shell,win_firewall_rule,win_shell,yum,zypper 6 | display_skipped_hosts = false 7 | any_errors_fatal = true 8 | 9 | [ssh_connection] 10 | pipelining = True 11 | control_path = /tmp/ansible-ssh-%%h-%%p-%%r 12 | ssh_args = -C -o ControlMaster=auto -o ControlPersist=1800s 13 | [defaults] 14 | callback_plugins = /etc/ansible/plugins 15 | callback_whitelist = logstash 16 | -------------------------------------------------------------------------------- /dci-aws/ansible_inventory.tf: -------------------------------------------------------------------------------- 1 | # Used in the md5 calculation for the EBS volume tagging. 2 | # Ensures that we can find the drive and that it's unique. 3 | resource "random_string" "aws_stack_id" { 4 | length = 16 5 | special = false 6 | } 7 | 8 | locals { 9 | aws_efs_gp = "${join("", aws_efs_file_system.cloudstor-gp.*.id)}" 10 | aws_efs_maxio = "${join("", aws_efs_file_system.cloudstor-maxio.*.id)}" 11 | } 12 | 13 | # Pass additional options 14 | data "template_file" "extra_opts" { 15 | template = < # Format: sub-xxx-xxx-xxx-xxx 14 | # docker_ee_package_version: 3:17.06.2~ee~7~3-0~ubuntu 15 | # 16 | # docker_ee_subscriptions_centos: # Format: sub-xxx-xxx-xxx-xxx 17 | # docker_ee_package_version: 17.06.2.ee.7-3.el7.centos 18 | # 19 | # docker_ee_subscriptions_redhat: # Format: sub-xxx-xxx-xxx-xxx 20 | # docker_ee_package_version: 17.06.2.ee.7-3.el7.rhel 21 | # 22 | # docker_ee_subscriptions_oracle: # Format: sub-xxx-xxx-xxx-xxx 23 | # docker_ee_package_version: 17.06.2.ee.8-3.el7 24 | # 25 | # docker_ee_subscriptions_sles: # Format: sub-xxx-xxx-xxx-xxx 26 | # docker_ee_package_version: 2:17.06.2.ee.7-3 27 | 28 | 29 | # docker_ee_package_version_win: 17.06.2-ee-7 30 | 31 | # 32 | # UCP. 33 | # 34 | 35 | # docker_ucp_version: 2.2.9 36 | 37 | # Subscription and certificate directories. 38 | # These paths are relative to the root folder (containing inventory/, group_vars/, etc...) 39 | docker_ucp_certificate_directory: ssl_cert 40 | # docker_ucp_license_path: "{{ playbook_dir }}/docker_subscription.lic" 41 | 42 | # docker_ucp_admin_password: 43 | 44 | # DNS name of the UCP load balancer endpoint (e.g. "ucp.example.com"). 45 | # This is also used for the SAN (subject alternative names) in any generated UCP certificate, 46 | # and as the address which DTR nodes will use to contact UCP. 47 | # docker_ucp_lb: 48 | 49 | # 50 | # Docker storage volume. 51 | # 52 | # If this is set to a block device then the device will be formatted as ext4 (if not already formatted), 53 | # and mounted at /var/lib/docker. 54 | # docker_storage_volume="/dev/xvdb" 55 | # DTR 56 | # 57 | 58 | # docker_dtr_version: 2.4.3 59 | # docker_dtr_replica_id: # (A 12-character long hexadecimal number: e.g. 1234567890ab) 60 | 61 | # DNS name of the DTR load balancer endpoint (e.g. "dtr.example.com"). 62 | # This is also used for the SAN (subject alternative names) in any generated DTR certificate. 63 | # docker_dtr_lb: 64 | 65 | # The address the initial swarm leader will advertise. The default behavior is to use 66 | # the publicly routable address of `ansible_default_ipv4`. Override this to force swarm to 67 | # listen on a non-default interface 68 | # docker_swarm_listen_address: 69 | 70 | # 71 | # Cloudstor 72 | # 73 | 74 | # Set to "disabled" to prevent the plugin being installed (even if cloudstor_plugin_options is set). 75 | cloudstor_plugin_version: 1.0 76 | 77 | # If defined and cloudstor_plugin_version is not "disabled", the playbook will 78 | # install the Cloudstor plugin with these settings on each node: 79 | # cloudstor_plugin_options: "CLOUD_PLATFORM=AWS EFS_ID_REGULAR=fs-AAA EFS_ID_MAXIO=fs-BBB EFS_SUPPORTED=1 AWS_STACK_ID=S2UCVL19bO2DOhi6" 80 | -------------------------------------------------------------------------------- /dci-aws/group_vars/linux: -------------------------------------------------------------------------------- 1 | --- 2 | ansible_become: yes 3 | ansible_user: ubuntu 4 | -------------------------------------------------------------------------------- /dci-aws/group_vars/managers: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BretFisher/dogvscat/fc16b7359b2e06002b0dc8d67ce8115fbe30a4f5/dci-aws/group_vars/managers -------------------------------------------------------------------------------- /dci-aws/group_vars/windows: -------------------------------------------------------------------------------- 1 | # it is suggested that these be encrypted with ansible-vault: 2 | # ansible-vault edit group_vars/windows.yml 3 | 4 | windows_enabled: yes 5 | ansible_connection: winrm 6 | ansible_winrm_server_cert_validation: ignore 7 | ansible_become: no 8 | ansible_winrm_operation_timeout_sec: 250 9 | ansible_winrm_read_timeout_sec: 360 10 | ansible_winrm_message_encryption: never 11 | 12 | #ansible_port: 5986 13 | #ansible_winrm_transport: ntlm 14 | #ansible_user: Administrator 15 | #ansible_password: 16 | -------------------------------------------------------------------------------- /dci-aws/iam.tf: -------------------------------------------------------------------------------- 1 | # IAM: 2 | 3 | # Create an IAM role for the Web Servers. 4 | resource "aws_iam_role" "dtr_iam_role" { 5 | name = "${var.deployment}_dtr_iam_role" 6 | 7 | assume_role_policy = < 0 }}" 12 | - name: "Check if Secret is set." 13 | shell: "docker secret ls -q --filter name=azure_ucp_admin.toml" 14 | register: _aznicips_already_set 15 | - name: "Set _aznicips_already_set fact." 16 | set_fact: 17 | aznicips_already_set: "{{ _aznicips_already_set.stdout_lines | length > 0 }}" 18 | - name: "Setup and Create az-nic-ips secret" 19 | when: "not aznicips_already_set" 20 | block: 21 | - name: "Create Docker secret with Azure Service Principal" 22 | shell: docker secret create azure_ucp_admin.toml - 23 | args: 24 | stdin: | 25 | AZURE_CLIENT_ID = "{{ azure_app_id }}" 26 | AZURE_CLIENT_SECRET = "{{ azure_app_secret }}" 27 | AZURE_TENANT_ID = "{{ azure_tenant_id }}" 28 | AZURE_SUBSCRIPTION_ID = "{{ azure_subscription_id }}" 29 | - name: "Setup and Create az-nic-ips service" 30 | when: "not aznicips_already_running" 31 | block: 32 | - name: "Deploy global service to execute on all nodes" 33 | shell: > 34 | docker service create 35 | --mode=global 36 | --name az-nic-ips 37 | --secret=azure_ucp_admin.toml 38 | --log-driver json-file 39 | --log-opt max-size=1m 40 | --env IPCOUNT=128 41 | --name ipallocator 42 | docker4x/az-nic-ips:latest -------------------------------------------------------------------------------- /dci-aws/logs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Get the logs. 4 | # 5 | # TODO: Add load-balancer log collection 6 | - hosts: all:!load-balancers 7 | roles: 8 | - { role: docker-ee, docker_ee_role_mode: "logs" } 9 | - { role: docker-ucp, docker_ucp_role_mode: "logs" } 10 | - { role: docker-dtr, docker_dtr_role_mode: "logs" } 11 | tags: 12 | - logs 13 | - always 14 | no_log: True 15 | -------------------------------------------------------------------------------- /dci-aws/modules/ansible/inventory.tpl: -------------------------------------------------------------------------------- 1 | # This hostfile has been generated by terraform. 2 | 3 | [linux-ucp-manager-primary] 4 | ${linux_manager_primary} 5 | 6 | [linux-ucp-manager-replicas] 7 | ${linux_manager_replicas} 8 | 9 | [linux-dtr-worker-primary] 10 | ${linux_dtr_primary} 11 | 12 | [linux-dtr-worker-replicas] 13 | ${linux_dtr_replicas} 14 | 15 | [linux-workers] 16 | ${linux_workers} 17 | 18 | [windows-workers] 19 | ${windows_workers} 20 | 21 | [linux-databases] 22 | ${linux_databases} 23 | 24 | [linux-build-servers] 25 | ${linux_build} 26 | 27 | [windows-databases] 28 | ${windows_databases} 29 | 30 | [windows-build-servers] 31 | ${windows_build} 32 | 33 | [all:vars] 34 | infra_stack=${infra_stack} 35 | ${load_balancers} 36 | ${extra_vars} 37 | 38 | [ucp-load-balancer] 39 | ${linux_ucp_lbs} 40 | 41 | [dtr-load-balancer] 42 | ${linux_dtr_lbs} 43 | -------------------------------------------------------------------------------- /dci-aws/modules/ansible/main.tf: -------------------------------------------------------------------------------- 1 | # Split Primary and Replicas 2 | locals { 3 | linux_ucp_manager_primary_name = "${element(var.linux_ucp_manager_names, 0)}" # Primary manager 4 | linux_ucp_manager_primary_ip = "${element(var.linux_ucp_manager_ips, 0)}" # Primary manager 5 | 6 | linux_ucp_manager_replica_names = "${slice(var.linux_ucp_manager_names, 1, length(var.linux_ucp_manager_names))}" 7 | linux_ucp_manager_replica_ips = "${slice(var.linux_ucp_manager_ips, 1, length(var.linux_ucp_manager_ips))}" 8 | 9 | linux_dtr_worker_primary_name = "${element(var.linux_dtr_worker_names, 0)}" # Linux is always primary DTR 10 | linux_dtr_worker_primary_ip = "${element(var.linux_dtr_worker_ips, 0)}" 11 | 12 | linux_dtr_worker_replica_names = "${slice(var.linux_dtr_worker_names, 1, length(var.linux_dtr_worker_names))}" # Linux DTR replica 13 | linux_dtr_worker_replica_ips = "${slice(var.linux_dtr_worker_ips, 1, length(var.linux_dtr_worker_ips))}" # Linux DTR replica 14 | 15 | load_balancers = "${var.docker_ucp_lb == "" ? "#" : ""}docker_ucp_lb=${var.docker_ucp_lb}\n${var.docker_dtr_lb == "" ? "#" : ""}docker_dtr_lb=${var.docker_dtr_lb}" 16 | } 17 | 18 | # Template for ansible inventory 19 | data "template_file" "inventory" { 20 | template = "${file("${path.module}/inventory.tpl")}" 21 | 22 | vars { 23 | linux_manager_primary = "${format("%s ansible_user=%s ansible_host=%s", local.linux_ucp_manager_primary_name, var.linux_user, local.linux_ucp_manager_primary_ip)}" 24 | linux_manager_replicas = "${join("\n", formatlist("%s ansible_user=%s ansible_host=%s", local.linux_ucp_manager_replica_names, var.linux_user, local.linux_ucp_manager_replica_ips))}" 25 | linux_dtr_primary = "${format("%s ansible_user=%s ansible_host=%s", local.linux_dtr_worker_primary_name, var.linux_user, local.linux_dtr_worker_primary_ip)}" 26 | linux_dtr_replicas = "${join("\n", formatlist("%s ansible_user=%s ansible_host=%s", local.linux_dtr_worker_replica_names, var.linux_user, local.linux_dtr_worker_replica_ips))}" 27 | linux_workers = "${length(var.linux_worker_names) > 0 ? join("\n", formatlist("%s ansible_user=%s ansible_host=%s", var.linux_worker_names, var.linux_user, var.linux_worker_ips)) : ""}" 28 | 29 | windows_workers = "${length(var.windows_worker_names) > 0 30 | ? join("\n", formatlist("%s ansible_host=%s ansible_user=${var.windows_user} ansible_password='%s'", var.windows_worker_names, var.windows_worker_ips, var.windows_worker_passwords)) 31 | : ""}" 32 | 33 | linux_ucp_lbs = "${length(var.linux_ucp_lb_names) > 0 ? join("\n", formatlist("%s ansible_user=%s ansible_host=%s", var.linux_ucp_lb_names, var.linux_user, var.linux_ucp_lb_ips)) : ""}" 34 | linux_dtr_lbs = "${length(var.linux_dtr_lb_names) > 0 ? join("\n", formatlist("%s ansible_user=%s ansible_host=%s", var.linux_dtr_lb_names, var.linux_user, var.linux_dtr_lb_ips)) : ""}" 35 | 36 | # extra configs 37 | linux_databases = "${length(var.linux_database_names) > 0 ? join("\n", formatlist("%s ansible_user=%s ansible_host=%s", var.linux_database_names, var.linux_user, var.linux_database_ips)) : ""}" 38 | linux_build = "${length(var.linux_build_server_names) > 0 ? join("\n", formatlist("%s ansible_user=%s ansible_host=%s", var.linux_build_server_names, var.linux_user, var.linux_build_server_ips)) : ""}" 39 | windows_databases = "${length(var.windows_database_names) > 0 ? join("\n", formatlist("%s ansible_host=%s", var.windows_database_names, var.windows_database_ips)) : ""}" 40 | windows_build = "${length(var.windows_build_server_names) > 0 ? join("\n", formatlist("%s ansible_host=%s", var.windows_build_server_names, var.windows_build_server_ips)) : ""}" 41 | infra_stack = "${var.infra_stack}" 42 | load_balancers = "${local.load_balancers}" 43 | extra_vars = "${var.extra_vars}" 44 | } 45 | } 46 | 47 | resource "local_file" "ansible_inventory" { 48 | content = "${data.template_file.inventory.rendered}" 49 | filename = "${var.inventory_file}" 50 | } 51 | -------------------------------------------------------------------------------- /dci-aws/modules/ansible/outputs.tf: -------------------------------------------------------------------------------- 1 | # Display inventory 2 | output "hosts_content" { 3 | value = "${data.template_file.inventory.rendered}" 4 | } 5 | 6 | # Display inventory file location 7 | output "hosts_file" { 8 | value = "Ansible hosts file location: ${var.inventory_file}" 9 | } 10 | -------------------------------------------------------------------------------- /dci-aws/modules/ansible/provider.tf: -------------------------------------------------------------------------------- 1 | provider "local" { 2 | version = "~> 1.1" 3 | } 4 | 5 | provider "null" { 6 | version = "~> 1.0" 7 | } 8 | 9 | provider "template" { 10 | version = "~> 1.0" 11 | } 12 | -------------------------------------------------------------------------------- /dci-aws/modules/ansible/variables.tf: -------------------------------------------------------------------------------- 1 | # Ansible Inventory File Path 2 | variable "inventory_file" { 3 | description = "Ansible-compatible inventory file used to store the list of hosts" 4 | default = "hosts" 5 | } 6 | 7 | # Linux User 8 | variable "linux_user" { 9 | description = "The user to setup and use within the Linux vm" 10 | default = "docker" 11 | } 12 | 13 | # Windows User 14 | variable "windows_user" { 15 | description = "The user to setup and use within the Windows vm" 16 | default = "Administrator" 17 | } 18 | 19 | variable "windows_worker_passwords" { 20 | description = "The passwords to use within the Windows VMs" 21 | type = "list" 22 | } 23 | 24 | # Linux UCP Managers 25 | variable "linux_ucp_manager_names" { 26 | description = "The list of Linux UCP Manager names" 27 | type = "list" 28 | default = [] 29 | } 30 | 31 | variable "linux_ucp_manager_ips" { 32 | description = "The list of Linux UCP Manager IPs" 33 | type = "list" 34 | default = [] 35 | } 36 | 37 | # Linux DTR Workers 38 | variable "linux_dtr_worker_names" { 39 | description = "The list of Linux DTR names" 40 | type = "list" 41 | default = [] 42 | } 43 | 44 | variable "linux_dtr_worker_ips" { 45 | description = "The list of Linux DTR IPs" 46 | type = "list" 47 | default = [] 48 | } 49 | 50 | # Linux Workers 51 | variable "linux_worker_names" { 52 | description = "The list of Linux Worker names" 53 | type = "list" 54 | default = [] 55 | } 56 | 57 | variable "linux_worker_ips" { 58 | description = "The list of Linux Worker IPs" 59 | type = "list" 60 | default = [] 61 | } 62 | 63 | # Windows Workers 64 | variable "windows_worker_names" { 65 | description = "The list of Windows Worker names" 66 | type = "list" 67 | default = [] 68 | } 69 | 70 | variable "windows_worker_ips" { 71 | description = "The list of Windows Worker IPs" 72 | type = "list" 73 | default = [] 74 | } 75 | 76 | ## Extra Instances 77 | # Linux Database Server 78 | variable "linux_database_names" { 79 | description = "The list of Linux Database names" 80 | type = "list" 81 | default = [] 82 | } 83 | 84 | variable "linux_database_ips" { 85 | description = "The list of Linux Database IPs" 86 | type = "list" 87 | default = [] 88 | } 89 | 90 | # Linux Build Server 91 | variable "linux_build_server_names" { 92 | description = "The list of Linux Build Server names" 93 | type = "list" 94 | default = [] 95 | } 96 | 97 | variable "linux_build_server_ips" { 98 | description = "The list of Linux Build Server IPs" 99 | type = "list" 100 | default = [] 101 | } 102 | 103 | # Windows Database Server 104 | variable "windows_database_names" { 105 | description = "The list of Windows Database names" 106 | type = "list" 107 | default = [] 108 | } 109 | 110 | variable "windows_database_ips" { 111 | description = "The list of Windows Database IPs" 112 | type = "list" 113 | default = [] 114 | } 115 | 116 | # Windows Build Server 117 | variable "windows_build_server_names" { 118 | description = "The list of Windows Build Server names" 119 | type = "list" 120 | default = [] 121 | } 122 | 123 | variable "windows_build_server_ips" { 124 | description = "The list of Windows Build Server IPs" 125 | type = "list" 126 | default = [] 127 | } 128 | 129 | # Load balancers 130 | 131 | variable "docker_ucp_lb" { 132 | description = "UCP load balancer DNS name" 133 | default = "" 134 | } 135 | 136 | variable "docker_dtr_lb" { 137 | description = "DTR load balancer DNS name" 138 | default = "" 139 | } 140 | 141 | variable "linux_ucp_lb_ips" { 142 | description = "UCP load balancer IPs" 143 | default = [] 144 | } 145 | 146 | variable "linux_ucp_lb_names" { 147 | description = "UCP load balancer names" 148 | default = [] 149 | } 150 | 151 | variable "linux_dtr_lb_ips" { 152 | description = "DTR load balancer DNS name" 153 | default = [] 154 | } 155 | 156 | variable "linux_dtr_lb_names" { 157 | description = "DTR load balancer names" 158 | default = [] 159 | } 160 | 161 | # Additional vars 162 | variable "infra_stack" { 163 | description = "The infra stack being deployed" 164 | default = "" 165 | } 166 | 167 | variable "extra_vars" { 168 | description = "Any additional vars to add" 169 | default = "" 170 | } 171 | -------------------------------------------------------------------------------- /dci-aws/outputs.tf: -------------------------------------------------------------------------------- 1 | output "UCPDNSTarget" { 2 | description = "Use this name to update your DNS records" 3 | value = "${aws_elb.ucp.dns_name}" 4 | } 5 | 6 | output "AppDNSTarget" { 7 | description = "Use this name to update your DNS records" 8 | value = "${aws_elb.apps.dns_name}" 9 | } 10 | 11 | output "DTRDNSTarget" { 12 | description = "Use this name to update your DNS records" 13 | value = "${aws_elb.dtr.dns_name}" 14 | } 15 | -------------------------------------------------------------------------------- /dci-aws/provider.tf: -------------------------------------------------------------------------------- 1 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 2 | # CREATE ALL THE RESOURCES TO DEPLOY AN APP IN AN AUTO SCALING GROUP WITH AN ELB 3 | # This template runs a simple "Hello, World" web server in Auto Scaling Group (ASG) with an Elastic Load Balancer 4 | # (ELB) in front of it to distribute traffic across the EC2 Instances in the ASG. 5 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 6 | 7 | # ------------------------------------------------------------------------------ 8 | # CONFIGURE OUR AWS CONNECTION 9 | # ------------------------------------------------------------------------------ 10 | 11 | provider "aws" { 12 | version = "~> 1.13" 13 | 14 | region = "${var.region}" 15 | } 16 | 17 | # --------------------------------------------------------------------------------------------------------------------- 18 | # GET THE LIST OF AVAILABILITY ZONES IN THE CURRENT REGION 19 | # Every AWS accout has slightly different availability zones in each region. For example, one account might have 20 | # us-east-1a, us-east-1b, and us-east-1c, while another will have us-east-1a, us-east-1b, and us-east-1d. This resource 21 | # queries AWS to fetch the list for the current account and region. 22 | # --------------------------------------------------------------------------------------------------------------------- 23 | 24 | data "aws_availability_zones" "available" {} 25 | 26 | # Use this data source to get the access to the effective Account ID, User ID, and ARN in which Terraform is authorized. 27 | data "aws_caller_identity" "current" {} 28 | -------------------------------------------------------------------------------- /dci-aws/roles/ansible-requirements/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # 2 | # Debian/Ubuntu. 3 | # 4 | 5 | - name: Check if apt-get is installed. 6 | raw: apt-get --version 7 | failed_when: False 8 | register: has_apt 9 | 10 | - name: Install python2 [apt-get]. 11 | raw: apt-get update -q && apt-get install -qy python-minimal python-pip 12 | register: installed_via_apt 13 | when: has_apt.rc == 0 14 | 15 | - name: install certain python modules for docker 16 | pip: 17 | name: "{{ item.name }}" 18 | version: "{{ item.version }}" 19 | state: present 20 | with_items: 21 | - { name: docker, version: 3.3.0 } 22 | 23 | # 24 | # RHEL/CentOS/Oracle. 25 | # 26 | 27 | - name: Check if yum is installed. 28 | raw: yum --version 29 | register: has_yum 30 | failed_when: False 31 | 32 | - name: Clean yum 33 | raw: yum clean all 34 | when: has_yum.rc == 0 and installed_via_apt.get('skipped') 35 | 36 | - name: Install python2 [yum]. 37 | raw: yum install -y python 38 | register: installed_via_yum 39 | when: has_yum.rc == 0 and installed_via_apt.get('skipped') 40 | 41 | # 42 | # SLES. 43 | # 44 | 45 | - name: Check if zypper is installed. 46 | raw: zypper --version 47 | failed_when: False 48 | register: has_zypper 49 | 50 | - name: Install python2 [zypper]. 51 | raw: zypper install -f -y -n python 52 | register: installed_via_zypper 53 | when: has_zypper.rc == 0 and installed_via_apt.get('skipped') and installed_via_yum.get('skipped') 54 | 55 | # 56 | # Verify 57 | # 58 | 59 | - name: Ensure python2 has been installed properly. 60 | raw: test -e /usr/bin/python 61 | -------------------------------------------------------------------------------- /dci-aws/roles/cloudstor-install/tasks/Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Install NFS support" 3 | become: true 4 | apt: 5 | name: nfs-common 6 | state: present 7 | update_cache: yes 8 | when: 9 | - cloudstor_plugin_options is defined 10 | tags: 11 | - nfs 12 | -------------------------------------------------------------------------------- /dci-aws/roles/cloudstor-install/tasks/RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Install NFS support" 3 | become: true 4 | yum: 5 | name: nfs-utils 6 | state: present 7 | update_cache: yes 8 | when: 9 | - cloudstor_plugin_options is defined 10 | tags: 11 | - nfs 12 | -------------------------------------------------------------------------------- /dci-aws/roles/cloudstor-install/tasks/Suse.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Install NFS support" 3 | become: true 4 | zypper: 5 | name: nfs-client 6 | state: present 7 | update_cache: yes 8 | when: 9 | - cloudstor_plugin_options is defined 10 | tags: 11 | - nfs 12 | -------------------------------------------------------------------------------- /dci-aws/roles/cloudstor-install/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Check if plugin is already registered." 3 | become: true 4 | shell: "docker plugin inspect cloudstor" 5 | register: cloudstor_plugin_status 6 | failed_when: cloudstor_plugin_status.rc > 1 7 | when: cloudstor_plugin_options is defined 8 | 9 | - include_tasks: "{{ os_family }}.yml" 10 | when: ansible_os_family == os_family 11 | tags: 12 | - nfs 13 | with_items: 14 | - "Debian" 15 | - "RedHat" 16 | - "Suse" 17 | loop_control: 18 | loop_var: os_family 19 | ignore_errors: yes 20 | 21 | - name: "Install Cloudstor plugin" 22 | become: true 23 | shell: docker plugin install --alias cloudstor --grant-all-permissions docker4x/cloudstor:{{cloudstor_plugin_version}} {{cloudstor_plugin_options}} 24 | when: 25 | - cloudstor_plugin_options is defined and 26 | cloudstor_plugin_status.rc == 1 27 | ignore_errors: yes 28 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-dtr/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Package. 3 | docker_dtr_image_repository: docker 4 | docker_dtr_version: latest 5 | 6 | # UCP. 7 | docker_ucp_username: admin 8 | # Don't use UCP's loadbalancer when installing DTR. 9 | # See: https://success.docker.com/article/How_to_install_backup_or_upgrade_DTR_when_receiving_a_container_wait_error 10 | docker_ucp_lb: "{{ hostvars[groups[ucp_nodes][0]]['ansible_host'] }}" 11 | 12 | # Parameters. 13 | docker_dtr_https_port: 443 14 | docker_dtr_http_port: 80 15 | docker_dtr_lb: "{{ ansible_host }}" # Fall back to host address (for testing only) 16 | infra_stack: "unknown" # If "aws", configures for S3 storage. 17 | 18 | # Logs. 19 | docker_logs_directory: "{{ playbook_dir }}/.logs" 20 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-dtr/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Antony Méchin 3 | description: Install Docker Trusted Registry (DTR). 4 | company: Docker, Inc. 5 | 6 | min_ansible_version: 2.2 7 | 8 | platforms: 9 | #- name: EL 10 | # versions: 11 | # - all 12 | # - 5 13 | # - 6 14 | # - 7 15 | #- name: GenericUNIX 16 | # versions: 17 | # - all 18 | # - any 19 | #- name: Solaris 20 | # versions: 21 | # - all 22 | # - 10 23 | # - 11.0 24 | # - 11.1 25 | # - 11.2 26 | # - 11.3 27 | #- name: Fedora 28 | # versions: 29 | # - all 30 | # - 16 31 | # - 17 32 | # - 18 33 | # - 19 34 | # - 20 35 | # - 21 36 | # - 22 37 | # - 23 38 | #- name: Windows 39 | # versions: 40 | # - all 41 | # - 2012R2 42 | #- name: SmartOS 43 | # versions: 44 | # - all 45 | # - any 46 | #- name: opensuse 47 | # versions: 48 | # - all 49 | # - 12.1 50 | # - 12.2 51 | # - 12.3 52 | # - 13.1 53 | # - 13.2 54 | #- name: Amazon 55 | # versions: 56 | # - all 57 | # - 2013.03 58 | # - 2013.09 59 | #- name: GenericBSD 60 | # versions: 61 | # - all 62 | # - any 63 | #- name: FreeBSD 64 | # versions: 65 | # - all 66 | # - 8.0 67 | # - 8.1 68 | # - 8.2 69 | # - 8.3 70 | # - 8.4 71 | # - 9.0 72 | # - 9.1 73 | # - 9.1 74 | # - 9.2 75 | # - 9.3 76 | # - 10.0 77 | # - 10.1 78 | # - 10.2 79 | - name: Ubuntu 80 | versions: 81 | # - all 82 | # - lucid 83 | # - maverick 84 | # - natty 85 | # - oneiric 86 | # - precise 87 | # - quantal 88 | # - raring 89 | # - saucy 90 | - trusty 91 | # - utopic 92 | # - vivid 93 | # - wily 94 | #- name: SLES 95 | # versions: 96 | # - all 97 | # - 10SP3 98 | # - 10SP4 99 | # - 11 100 | # - 11SP1 101 | # - 11SP2 102 | # - 11SP3 103 | #- name: GenericLinux 104 | # versions: 105 | # - all 106 | # - any 107 | #- name: Debian 108 | # versions: 109 | # - all 110 | # - etch 111 | # - jessie 112 | # - lenny 113 | # - squeeze 114 | # - wheezy 115 | 116 | dependencies: [] 117 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-dtr/tasks/get-running-replica-id.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Get running dtr-registry-." 3 | shell: bash -c "{% raw %} docker ps --filter name=dtr-registry- -q | xargs docker inspect --format '{{ json .Config.Env }}' | grep -o -e 'DTR_REPLICA_ID=.\{12\}' | cut -d= -f2 {% endraw %}" 4 | register: _replica_id 5 | 6 | - name: "Store replica id" 7 | set_fact: 8 | current_replica_id: "{{ _replica_id.stdout_lines[0] | default('not a replica') }}" 9 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-dtr/tasks/join/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Pull DTR (version: {{ docker_dtr_version }})." 3 | shell: > 4 | docker image pull 5 | "{{ docker_dtr_image_repository }}/dtr:{{ docker_dtr_version }}" 6 | register: pulled 7 | retries: 10 8 | until: pulled is succeeded 9 | 10 | # FIXME: Give a safer filter (e.g. including the replica-id). 11 | - name: "Check if DTR is already running." 12 | shell: "docker container ls -q --filter name=dtr-registry-" 13 | register: _already_running 14 | 15 | - name: "Set is_already_running fact." 16 | set_fact: 17 | already_running: "{{ _already_running.stdout_lines | length > 0 }}" 18 | docker_primary_ucp_ip_address: "{{ hostvars[groups[ucp_nodes][0]]['ansible_host'] }}" 19 | existing_replica_id: "{{ hostvars[groups[dtr_primary][0]]['current_replica_id'] | default(docker_dtr_replica_id) }}" 20 | 21 | - name: "Join DTR replica (version: {{ docker_dtr_version }})." 22 | shell: > 23 | docker run 24 | --rm 25 | --name dtr 26 | {{ docker_dtr_image_repository }}/dtr:{{ docker_dtr_version }} 27 | join 28 | --ucp-url "{{ docker_primary_ucp_ip_address }}" 29 | --ucp-node "{{ ansible_nodename }}" 30 | --ucp-username "{{ docker_ucp_username }}" 31 | --ucp-password "{{ docker_ucp_admin_password }}" 32 | --ucp-insecure-tls 33 | --existing-replica-id "{{ existing_replica_id }}" 34 | when: not already_running 35 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-dtr/tasks/logs/Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Check if DTR is already running." 3 | shell: docker container ls -q --filter name=dtr-registry- --format '{% raw %}{{ .Names }} {% endraw %}' 4 | register: _running 5 | ignore_errors: True 6 | 7 | - name: "Set 'running' fact." 8 | set_fact: 9 | running: "{{ _running.stdout_lines | length > 0 }}" 10 | 11 | - name: "Get replica id" 12 | shell: "echo {{ _running.stdout_lines[0] }} | cut -d'-' -f3" 13 | register: _replica_id 14 | when: running 15 | 16 | - name: "Set 'current_replica_id' fact." 17 | set_fact: 18 | current_replica_id: "{{ _replica_id.stdout_lines[0] }}" 19 | when: running 20 | 21 | - name: "Get logs." 22 | shell: "docker logs {{ container }}" 23 | register: _logs 24 | with_items: 25 | - "dtr-api-{{ current_replica_id }}" 26 | - "dtr-registry-{{ current_replica_id }}" 27 | - "dtr-garant-{{ current_replica_id }}" 28 | ignore_errors: True 29 | loop_control: 30 | loop_var: container 31 | when: running 32 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-dtr/tasks/logs/RedHat.yml: -------------------------------------------------------------------------------- 1 | Debian.yml -------------------------------------------------------------------------------- /dci-aws/roles/docker-dtr/tasks/logs/Suse.yml: -------------------------------------------------------------------------------- 1 | Debian.yml -------------------------------------------------------------------------------- /dci-aws/roles/docker-dtr/tasks/logs/Windows.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Check if DTR is already running." 3 | win_shell: docker container ls -q --filter name=dtr-registry- --format '{% raw %}{{ .Names }} {% endraw %}' 4 | register: _running 5 | ignore_errors: True 6 | 7 | - name: "Set 'running' fact." 8 | set_fact: 9 | running: "{{ _running.stdout_lines | length > 0 }}" 10 | 11 | - name: "Get replica id" 12 | win_shell: "{{ _running.stdout_lines[0] }}.split('-')[2]" 13 | register: _replica_id 14 | when: running 15 | 16 | - name: "Set 'current_replica_id' fact." 17 | set_fact: 18 | current_replica_id: "{{ _replica_id.stdout_lines[0] }}" 19 | when: running 20 | 21 | - name: "Get logs." 22 | win_shell: "docker logs {{ container }}" 23 | register: _logs 24 | with_items: 25 | - "dtr-api-{{ current_replica_id }}" 26 | - "dtr-registry-{{ current_replica_id }}" 27 | - "dtr-garant-{{ current_replica_id }}" 28 | ignore_errors: True 29 | loop_control: 30 | loop_var: container 31 | when: running 32 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-dtr/tasks/logs/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Get logs. 4 | # 5 | - name: "Get." 6 | include_tasks: "{{ os_family }}.yml" 7 | when: ansible_os_family == os_family 8 | with_items: 9 | - "Debian" 10 | - "RedHat" 11 | - "Suse" 12 | - "Windows" 13 | loop_control: 14 | loop_var: os_family 15 | 16 | - name: "Get logs." 17 | when: _logs is succeeded 18 | block: 19 | # 20 | # Format. 21 | # 22 | - set_fact: 23 | out: "{{ out|default({}) | combine( {item.container: item.stdout_lines | join('\n') | string } ) }}" 24 | err: "{{ err|default({}) | combine( {item.container: item.stderr_lines | join('\n') | string } ) }}" 25 | when: item.rc == 0 26 | with_items: "{{ _logs.get('results', []) }}" 27 | 28 | # 29 | # Create folders. 30 | # 31 | - name: "Create folders." 32 | local_action: 33 | module: file 34 | path: "{{ docker_logs_directory }}/{{ ansible_nodename }}/dtr" 35 | state: directory 36 | 37 | # 38 | # Write to disk. 39 | # 40 | - name: "Save logs (stdout) to file." 41 | when: "item.value | length > 0" 42 | local_action: 43 | module: copy 44 | content: "{{ item.value }}" 45 | dest: "{{ docker_logs_directory }}/{{ ansible_nodename }}/dtr/{{ item.key }}.out.log" 46 | with_dict: "{{ out | default({}) }}" 47 | 48 | - name: "Save logs (stderr) to file." 49 | when: "item.value | length > 0" 50 | local_action: 51 | module: copy 52 | content: "{{ item.value }}" 53 | dest: "{{ docker_logs_directory }}/{{ ansible_nodename }}/dtr/{{ item.key }}.err.log" 54 | with_dict: "{{ err | default({}) }}" 55 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-dtr/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Install. 4 | # 5 | - name: "Install." 6 | include_tasks: "install/main.yml" 7 | when: docker_dtr_role_mode == "install" 8 | 9 | # 10 | # Update. 11 | # 12 | - name: "Update." 13 | include_tasks: "update/main.yml" 14 | when: docker_dtr_role_mode == "update" 15 | 16 | # 17 | # Join. 18 | # 19 | - name: "Join." 20 | include_tasks: "join/main.yml" 21 | when: docker_dtr_role_mode == "join" 22 | 23 | # 24 | # Get-Running-Replica-Id. 25 | # 26 | - name: "Get running replica id." 27 | include_tasks: "get-running-replica-id.yml" 28 | when: docker_dtr_role_mode == "get-running-replica-id" 29 | 30 | # 31 | # Uninstall. 32 | # 33 | - name: "Uninstall." 34 | include_tasks: "uninstall/main.yml" 35 | when: docker_dtr_role_mode == "uninstall" 36 | 37 | # 38 | # Pull logs. 39 | # 40 | - name: "Logs." 41 | include_tasks: "logs/main.yml" 42 | when: docker_dtr_role_mode == "logs" 43 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-dtr/tasks/uninstall/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Check if DTR is already running." 3 | shell: docker container ls -q --filter name=dtr-registry- --format '{% raw %}{{ .Names }} {% endraw %}' 4 | register: _running 5 | ignore_errors: True 6 | 7 | - name: "Set 'running' fact." 8 | set_fact: 9 | running: "{{ _running.stdout_lines | length > 0 }}" 10 | 11 | - name: "Uninstall." 12 | when: running 13 | block: 14 | - include_tasks: "../get-running-replica-id.yml" 15 | 16 | - set_fact: 17 | existing_replica_id: "{{ hostvars[groups[dtr_primary][0]]['current_replica_id'] | default(docker_dtr_replica_id) }}" 18 | 19 | - name: "Remove node from DTR." 20 | shell: > 21 | docker run 22 | --rm 23 | --name dtr-remove 24 | {{ docker_dtr_image_repository }}/dtr:{{ docker_dtr_version }} 25 | remove 26 | --ucp-insecure-tls 27 | --ucp-url "{{ docker_ucp_lb }}" 28 | --ucp-username "{{ docker_ucp_username }}" 29 | --ucp-password "{{ docker_ucp_admin_password }}" 30 | --replica-ids "{{ current_replica_id }}" 31 | --existing-replica-id "{{ existing_replica_id }}" 32 | register: removed 33 | ignore_errors: true 34 | when: current_replica_id != existing_replica_id 35 | 36 | - name: "Destroy DTR." 37 | shell: > 38 | docker run 39 | --rm 40 | --name dtr-destroy 41 | {{ docker_dtr_image_repository }}/dtr:{{ docker_dtr_version }} 42 | destroy 43 | --ucp-insecure-tls 44 | --ucp-url "{{ docker_ucp_lb }}" 45 | --ucp-username "{{ docker_ucp_username }}" 46 | --ucp-password "{{ docker_ucp_admin_password }}" 47 | --replica-id "{{ current_replica_id }}" 48 | when: removed|failed or removed|skipped 49 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-dtr/tasks/update/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Check if DTR is running." 3 | shell: docker container ls -q --filter name=dtr-registry- --format '{% raw %}{{ .Names }} {% endraw %}' 4 | register: _running 5 | ignore_errors: True 6 | 7 | - name: "Set is_already_running fact." 8 | set_fact: 9 | running: "{{ _running.stdout_lines | length > 0 }}" 10 | 11 | # 12 | # Exit if DTR is not running. 13 | # 14 | - name: "Check if DTR is running." 15 | meta: end_play 16 | when: not running 17 | 18 | # 19 | # Compare versions. 20 | # 21 | - name: "Get currently running version." 22 | shell: docker container ls --filter name=dtr-registry --format {% raw %} '{{with .Image -}}{{index (split . ":") 1 }}{{- end}}' {% endraw %} 23 | register: extracted_version 24 | 25 | - name: "Check if new version is higher (is {{extracted_version.stdout}} < {{docker_dtr_version}})" 26 | set_fact: 27 | need_update: "{{ extracted_version.stdout | version_compare(docker_dtr_version, '<')}}" 28 | 29 | - name: "Update needed" 30 | when: need_update 31 | block: 32 | - name: "Pull desired version of DTR ({{ docker_dtr_version }})." 33 | shell: > 34 | docker image pull 35 | "{{ docker_dtr_image_repository }}/dtr:{{ docker_dtr_version }}" 36 | register: pulled 37 | retries: 10 38 | until: pulled is succeeded 39 | 40 | - name: "Get replica id" 41 | shell: "echo {{ _running.stdout_lines[0] }} | cut -d'-' -f3" 42 | register: replica_id 43 | 44 | - name: "Upgrade DTR." 45 | shell: > 46 | docker run 47 | --rm 48 | --name dtr 49 | --volume /var/run/docker.sock:/var/run/docker.sock 50 | upgrade 51 | --existing-replica-id "{{ replica_id.stdout }}" 52 | --ucp-url "{{ docker_ucp_lb }}" 53 | --ucp-insecure-tls 54 | --ucp-username "{{ docker_ucp_username }}" 55 | --ucp-password "{{ docker_ucp_admin_password }}" 56 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/.gitignore: -------------------------------------------------------------------------------- 1 | tests/subscriptions.json 2 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/.travis.yml: -------------------------------------------------------------------------------- 1 | --- 2 | language: python 3 | python: "2.7" 4 | 5 | sudo: required 6 | 7 | services: 8 | - docker 9 | 10 | before_install: 11 | - echo -e "{\"Ubuntu\": \"$UBUNTU_SUBSCRIPTION\", \"CentOS\": \"$CENTOS_SUBSCRIPTION\"}" > tests/subscriptions.json 12 | 13 | script: 14 | - make test 15 | 16 | notifications: 17 | webhooks: https://galaxy.ansible.com/api/v1/notifications/ 18 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/Makefile: -------------------------------------------------------------------------------- 1 | # Test the role (docker-ee-install). 2 | 3 | PWD=$(shell pwd) 4 | VERSION ?= latest 5 | ROLE_NAME = docker-ee-install 6 | ROLE_PATH = /etc/ansible/roles/$(ROLE_NAME) 7 | EEs ?= 8 | TEST_VERSION = ansible --version 9 | TEST_SYNTAX = ansible-playbook -vv -i 'localhost,' -c local $(ROLE_PATH)/tests/main.yml --syntax-check 10 | TEST_SUBSCRIPTIONS = $(shell cat $(PWD)/tests/subscriptions.json) 11 | TEST_PLAYBOOK = ansible-playbook -vv -i 'localhost,' -c local -e '$(TEST_SUBSCRIPTIONS)' -e docker_ee_version=$(VERSION) $(EEs) $(ROLE_PATH)/tests 12 | TEST_CMD = $(TEST_VERSION) && $(TEST_SYNTAX) && $(TEST_PLAYBOOK) 13 | TEST_IMAGE_UBUNTU ?= williamyeh/ansible:ubuntu 14 | TEST_IMAGE_CENTOS ?= williamyeh/ansible:centos 15 | DOCKER_COMMAND = docker run --rm -e "ROLE_NAME=$(ROLE_NAME)" -v /var/run/docker.sock:/var/run/docker.sock -v $(PWD):$(ROLE_PATH) 16 | 17 | # Supported Docker EE versions. 18 | VERSIONS = 17.06 17.03 latest 19 | 20 | # Supported distributions (and versions). 21 | DISTROS = ubuntu centos 22 | CENTOS_VERSIONS = 7 23 | UBUNTU_VERSIONS = 16.04 14.04 24 | 25 | .PHONY: test 26 | 27 | # 28 | # Pull. 29 | # 30 | 31 | pull/ubuntu/%: 32 | docker image pull "$(TEST_IMAGE_UBUNTU)$*" 33 | 34 | pull/centos/%: 35 | docker image pull "$(TEST_IMAGE_CENTOS)$*" 36 | 37 | pull/ubuntu: $(addprefix pull/ubuntu/, $(UBUNTU_VERSIONS)) 38 | 39 | pull/centos: $(addprefix pull/centos/, $(CENTOS_VERSIONS)) 40 | 41 | pull: pull/ubuntu pull/centos 42 | 43 | # 44 | # Test. 45 | # 46 | 47 | # The following pattern uses VERSION, DISTROS, _VERSIONS 48 | # to generate rules like test/latest/ubuntu/16.04. 49 | # 50 | # $(1): Docker EE version to test. 51 | # $(2): Distro to test upon. 52 | # $(3): Distro version to test upon. 53 | define generate_test 54 | test/install/$(1)/$(2)/%: pull/$(2)/% 55 | @echo "$$@..." 56 | @mkdir -p .$$(@D) 57 | @if ! $$(DOCKER_COMMAND) "$$(TEST_IMAGE_$(shell echo $(2) | tr a-z A-Z))$$*" /bin/bash -c "$$(TEST_CMD)"/main.yml > .$$@.log; then \ 58 | cat .$$@.log; \ 59 | exit 1; \ 60 | fi 61 | 62 | test/update/$(2)/%: pull/$(2)/% 63 | @echo "$$@..." 64 | @mkdir -p .$$(@D) 65 | @if ! $$(DOCKER_COMMAND) "$$(TEST_IMAGE_$(shell echo $(2) | tr a-z A-Z))$$*" /bin/bash -c "$$(TEST_CMD)"/update.yml > .$$@.log; then \ 66 | cat .$$@.log; \ 67 | exit 1; \ 68 | fi 69 | 70 | test/install/$(1)/$(2): $$(addprefix test/install/$(1)/$(2)/, $$($(shell echo $(2) | tr a-z A-Z)_VERSIONS)) 71 | 72 | endef 73 | 74 | define generate_test_for_version 75 | test/$(1): $$(addprefix test/$(1)/, $$(DISTROS)) 76 | 77 | endef 78 | 79 | # Generate a rule for every combination (version, distro, distro_version). 80 | $(eval \ 81 | $(foreach docker, $(VERSIONS), \ 82 | $(call generate_test_for_version,$(docker)) \ 83 | $(foreach distro, $(DISTROS), \ 84 | $(call generate_test,$(docker),$(distro))))) 85 | 86 | # Add convenient rule -> test: test/... 87 | test: $(addprefix test/, $(VERSIONS)) 88 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | docker_ee_version: "latest" 3 | docker_ee_package_versions: "{{ {} }}" 4 | docker_ee_package_version: "{{ docker_ee_package_versions[ansible_distribution] | default(docker_ee_package_versions[ansible_os_family]) | default(docker_ee_version) }}" 5 | docker_ee_release_channel: "stable" 6 | docker_ee_subscription: "{{ docker_ee_subscriptions[ansible_distribution] | default(docker_ee_subscriptions[ansible_os_family] | default(omit)) }}" 7 | docker_ucp_image_repository: docker 8 | infra_stack: unknown 9 | docker_ee_role_mode: "install" 10 | 11 | # Logs. 12 | docker_logs_directory: "{{ playbook_dir }}/.logs" 13 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/filter_plugins/redhat.py: -------------------------------------------------------------------------------- 1 | def release(string, size = 3): 2 | """ 3 | Turn the return of /etc/redhat-release (e.g. "CentOS Linux release 7.4.1708 (Core)") to a version (e.g. 7, 7.4 or 7.4.1708). 4 | """ 5 | import re 6 | version = re.match(re.compile(r'^.* ([\d.]+) \(.+\)$'), string.strip()).group(1) 7 | return '.'.join(str(version).split('.')[:size]) 8 | 9 | class FilterModule(object): 10 | """Filter for turning a full yum version to it's simple representation""" 11 | 12 | def filters(self): 13 | return { 14 | 'release': release, 15 | } 16 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/filter_plugins/redhat.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BretFisher/dogvscat/fc16b7359b2e06002b0dc8d67ce8115fbe30a4f5/dci-aws/roles/docker-ee/filter_plugins/redhat.pyc -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Antony Méchin 3 | description: Install Docker Enterprise Edition (EE). 4 | company: Docker, Inc. 5 | 6 | min_ansible_version: 2.4 7 | 8 | platforms: 9 | #- name: EL 10 | # versions: 11 | # - all 12 | # - 5 13 | # - 6 14 | # - 7 15 | #- name: GenericUNIX 16 | # versions: 17 | # - all 18 | # - any 19 | #- name: Solaris 20 | # versions: 21 | # - all 22 | # - 10 23 | # - 11.0 24 | # - 11.1 25 | # - 11.2 26 | # - 11.3 27 | #- name: Fedora 28 | # versions: 29 | # - all 30 | # - 16 31 | # - 17 32 | # - 18 33 | # - 19 34 | # - 20 35 | # - 21 36 | # - 22 37 | # - 23 38 | - name: Windows 39 | versions: 40 | # - all 41 | - Microsoft Windows Server 2016 Datacenter 42 | # - 2012R2 43 | #- name: SmartOS 44 | # versions: 45 | # - all 46 | # - any 47 | #- name: opensuse 48 | # versions: 49 | # - all 50 | # - 12.1 51 | # - 12.2 52 | # - 12.3 53 | # - 13.1 54 | # - 13.2 55 | #- name: Amazon 56 | # versions: 57 | # - all 58 | # - 2013.03 59 | # - 2013.09 60 | #- name: GenericBSD 61 | # versions: 62 | # - all 63 | # - any 64 | #- name: FreeBSD 65 | # versions: 66 | # - all 67 | # - 8.0 68 | # - 8.1 69 | # - 8.2 70 | # - 8.3 71 | # - 8.4 72 | # - 9.0 73 | # - 9.1 74 | # - 9.1 75 | # - 9.2 76 | # - 9.3 77 | # - 10.0 78 | # - 10.1 79 | # - 10.2 80 | - name: Ubuntu 81 | versions: 82 | # - all 83 | # - lucid 84 | # - maverick 85 | # - natty 86 | # - oneiric 87 | # - precise 88 | # - quantal 89 | # - raring 90 | # - saucy 91 | - trusty 92 | # - utopic 93 | # - vivid 94 | # - wily 95 | #- name: SLES 96 | # versions: 97 | # - all 98 | # - 10SP3 99 | # - 10SP4 100 | # - 11 101 | # - 11SP1 102 | # - 11SP2 103 | # - 11SP3 104 | #- name: GenericLinux 105 | # versions: 106 | # - all 107 | # - any 108 | #- name: Debian 109 | # versions: 110 | # - all 111 | # - etch 112 | # - jessie 113 | # - lenny 114 | # - squeeze 115 | # - wheezy 116 | 117 | dependencies: [] 118 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/tasks/install/.unix.post.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Check installation. 4 | # 5 | 6 | - name: "Ensure /etc/docker dir exists." 7 | file: 8 | path: /etc/docker 9 | state: directory 10 | 11 | 12 | # 13 | # Set additional daemon options 14 | # 15 | 16 | - name: "Configure additional engine options" 17 | copy: 18 | content: "{{ docker_ee_daemon_options | to_nice_json }}" 19 | dest: /etc/docker/daemon.json 20 | mode: 0644 21 | when: docker_ee_daemon_options is defined 22 | register: daemon_options 23 | 24 | - name: "Remove daemon config when empty" 25 | file: 26 | path: /etc/docker/daemon.json 27 | state: absent 28 | when: docker_ee_daemon_options is not defined 29 | register: daemon_options 30 | 31 | # 32 | # Permissions. 33 | # 34 | 35 | - name: "Get current user name (no sudo)." 36 | become: False 37 | set_fact: 38 | current_user: "{{ ansible_user }}" 39 | 40 | - name: "Adding existing user {{ current_user }} to group docker." 41 | user: 42 | name: "{{ current_user }}" 43 | groups: docker 44 | append: yes 45 | when: 46 | - current_user != "root" 47 | 48 | # 49 | # Check if docker works correctly. 50 | # 51 | 52 | - name: "Test Docker." 53 | shell: "docker version" 54 | register: run_docker 55 | failed_when: False 56 | 57 | - name: "Reload service docker (if not already running)" 58 | systemd: 59 | name: docker 60 | state: restarted 61 | when: run_docker.rc != 0 or daemon_options.changed 62 | 63 | - name: "Enable service docker." 64 | service: 65 | name: docker 66 | enabled: yes 67 | state: running 68 | 69 | - name: "Test Docker." 70 | shell: "docker version" 71 | when: run_docker.rc != 0 72 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/tasks/install/.unix.pre.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Set facts. 4 | # 5 | 6 | - name: "Set 'docker_ee_repository' fact." 7 | set_fact: 8 | docker_ee_repository: "{{ docker_ee_release_channel }}{% if docker_ee_version != 'latest' %}-{{ docker_ee_version }}{% endif %}" 9 | 10 | - name: "Set 'docker_ee_package' fact." 11 | set_fact: 12 | docker_ee_package: "docker-ee{% if docker_ee_package_version != 'latest' %}-{{ docker_ee_package_version }}{% endif %}" 13 | 14 | # 15 | # Remove previously installed versions of Docker. 16 | # 17 | - name: "Ensure old versions of Docker are not installed." 18 | package: 19 | name: '{{ package }}' 20 | state: absent 21 | with_items: 22 | - containerd 23 | - docker 24 | - docker-ce 25 | - docker-common 26 | - docker-engine 27 | - docker-libnetwork 28 | - docker-runc 29 | - yast2-docker 30 | - runc 31 | loop_control: 32 | loop_var: package 33 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/tasks/install/Debian/Ubuntu.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include_tasks: "Ubuntu/{{ distribution_version }}.yml" 3 | when: ansible_distribution_version == distribution_version 4 | with_items: 5 | - "14.04" 6 | - "16.04" 7 | loop_control: 8 | loop_var: distribution_version 9 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/tasks/install/Debian/Ubuntu/14.04.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Discover where /var/lib/ is mounted" 3 | shell: 4 | "df -h /var/lib/ | grep /dev/ | awk '{print $6}'" 5 | register: root_mount_point_check 6 | 7 | - set_fact: 8 | root_mount_point: "{{ root_mount_point_check.stdout }}" 9 | 10 | - name: "Check if {{ root_mount_point }} is a shared mount" 11 | shell: 12 | "cat /proc/1/mountinfo | grep shared | grep '/ {{ root_mount_point }} ' | awk '{ print $5 }'" 13 | register: root_shared_mountpoint_check 14 | 15 | - set_fact: 16 | root_shared_mountpoint: "{{ root_shared_mountpoint_check.stdout }}" 17 | 18 | - name: "Make {{ root_mount_point }} a shared mount" 19 | become: true 20 | when: root_mount_point != root_shared_mountpoint 21 | shell: 22 | "mount --make-shared {{ root_mount_point }}" 23 | 24 | - name: "Create upstart to root shared on startup" 25 | become: true 26 | when: root_mount_point != root_shared_mountpoint 27 | copy: 28 | content: | 29 | # This makes the root filesystem shared so the kubelet 30 | # containers can properly bind mount volumes 31 | description "Make root's volume shared" 32 | start on startup 33 | task 34 | exec mount --make-shared {{ root_mount_point }} 35 | dest: /etc/init/mount-shared-root.conf 36 | 37 | - name: "Verify {{ root_mount_point }} is shared" 38 | shell: 39 | "cat /proc/1/mountinfo | grep shared | grep '/ {{ root_mount_point }} ' | awk '{ print $5 }'" 40 | register: verify_shared_check 41 | failed_when: verify_shared_check.stdout != root_mount_point 42 | 43 | - name: "Discover where /var/lib/docker is mounted" 44 | shell: 45 | "df -h /var/lib/docker | grep /dev/ | awk '{print $6}'" 46 | register: docker_mount_point_check 47 | 48 | - set_fact: 49 | docker_mount_point: "{{ docker_mount_point_check.stdout }}" 50 | 51 | - name: "Check if {{ docker_mount_point }} is a shared mount" 52 | shell: 53 | "cat /proc/1/mountinfo | grep shared | grep '/ {{ docker_mount_point }} ' | awk '{ print $5 }'" 54 | register: shared_mountpoint_check 55 | 56 | - set_fact: 57 | shared_mountpoint: "{{ shared_mountpoint_check.stdout }}" 58 | 59 | - name: "Make {{ docker_mount_point }} a shared mount" 60 | become: true 61 | when: docker_mount_point != shared_mountpoint 62 | shell: 63 | "mount --make-shared {{ docker_mount_point }}" 64 | 65 | - name: "Create upstart to make root volume shared on startup" 66 | become: true 67 | when: docker_mount_point != shared_mountpoint 68 | copy: 69 | content: | 70 | # This makes the root filesystem shared so the kubelet 71 | # containers can properly bind mount volumes 72 | description "Make root volume shared" 73 | start on startup 74 | task 75 | exec mount --make-shared {{ docker_mount_point }} 76 | dest: /etc/init/mount-shared-docker.conf 77 | 78 | - name: "Verify {{ docker_mount_point }} is shared" 79 | shell: 80 | "cat /proc/1/mountinfo | grep shared | grep '/ {{ docker_mount_point }} ' | awk '{ print $5 }'" 81 | register: verify_shared_check 82 | failed_when: verify_shared_check.stdout != docker_mount_point 83 | 84 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/tasks/install/Debian/Ubuntu/16.04.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/tasks/install/Windows.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Enable Windows Container feature." 3 | win_feature: 4 | name: "Containers" 5 | state: present 6 | register: win_container_feat 7 | 8 | - name: "Restart" 9 | win_reboot: 10 | force: yes 11 | when: win_container_feat.reboot_required 12 | 13 | - name: "Wait for system to become reachable over WinRM" 14 | wait_for_connection: 15 | timeout: 1200 16 | 17 | - name: "Install NuGet 2.8.5.201." 18 | win_shell: Install-PackageProvider -Name NuGet -MinimumVersion 2.8.5.201 -Force 19 | 20 | # 21 | # Windows firewall 22 | # 23 | - name: "Check for Windows Firewall" 24 | win_shell: Invoke-Command {% raw %} {Get-NetFirewallProfile -Profile Domain | Select-Object -ExpandProperty Enabled} {% endraw %} 25 | register: windows_firewall 26 | 27 | - name: "Seting fact for Windows Firewall" 28 | set_fact: 29 | windows_firewall_active: "{{ windows_firewall.stdout_lines[0] == 'True'}}" 30 | 31 | - name: "Opening windows ports" 32 | win_firewall_rule: 33 | name: "{{ item.name }}" 34 | localport: "{{ item.port }}" 35 | protocol: "{{ item.protocol }}" 36 | direction: in 37 | action: allow 38 | when: windows_firewall_active 39 | with_items: 40 | - name: "Port for the UCP web UI and API" 41 | port: 80 42 | protocol: tcp 43 | - name: "Port for the UCP web UI and API" 44 | port: 443 45 | protocol: tcp 46 | - name: "Port for the Docker Swarm manager. Used for backwards compatibility" 47 | port: 2376 48 | protocol: tcp 49 | - name: "Port for communication between swarm nodes" 50 | port: 2377 51 | protocol: tcp 52 | - name: "Port for overlay networking (udp)" 53 | port: 4789 54 | protocol: udp 55 | - name: "Port for gossip-based clustering (tcp)" 56 | port: 7946 57 | protocol: tcp 58 | - name: "Port for gossip-based clustering (udp)" 59 | port: 7946 60 | protocol: udp 61 | - name: "Port for a TLS proxy that provides access to UCP, Docker Engine, and Docker Swarm" 62 | port: 12376 63 | protocol: tcp 64 | - name: "Port for internal node configuration, cluster configuration, and HA" 65 | port: 12379 66 | protocol: tcp 67 | - name: "Port for internal node configuration, cluster configuration, and HA" 68 | port: 12380 69 | protocol: tcp 70 | - name: "Port for the certificate authority" 71 | port: 12381 72 | protocol: tcp 73 | - name: "Port for the UCP certificate authority" 74 | port: 12382 75 | protocol: tcp 76 | - name: "Port for the authentication storage backend" 77 | port: 12383 78 | protocol: tcp 79 | - name: "Port for the authentication storage backend for replication across managers" 80 | port: 12384 81 | protocol: tcp 82 | - name: "Port for the authentication service API" 83 | port: 12385 84 | protocol: tcp 85 | - name: "Port for the authentication worker" 86 | port: 12386 87 | protocol: tcp 88 | - name: "Port for the metrics service" 89 | port: 12387 90 | protocol: tcp 91 | 92 | - name: "Install Docker provider." 93 | win_psmodule: 94 | name: "DockerProvider" 95 | state: present 96 | 97 | - name: "Install Docker {{ docker_ee_package_version_win }}" 98 | win_shell: Install-Package Docker -ProviderName DockerProvider -Force {% if docker_ee_package_version_win is defined %} -RequiredVersion {{ docker_ee_package_version_win }} {% endif %} 99 | args: 100 | creates: 'C:\Program Files\Docker\' 101 | 102 | - name: "Configure additional engine options" 103 | win_copy: 104 | content: "{{ docker_ee_daemon_options_win | to_nice_json }}" 105 | dest: 'C:\Program Files\Docker\config\daemon.json' 106 | when: docker_ee_daemon_options_win is defined 107 | 108 | - name: "Remove daemon config when empty" 109 | win_file: 110 | path: 'C:\Program Files\Docker\config\daemon.json' 111 | state: absent 112 | when: docker_ee_daemon_options_win is not defined 113 | 114 | - name: "Start docker service" 115 | win_service: 116 | name: "docker" 117 | state: started 118 | start_mode: auto 119 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/tasks/install/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Installation ({{ ansible_os_family }})." 3 | include_tasks: "{{ os_family }}.yml" 4 | when: ansible_os_family == os_family 5 | with_items: 6 | - "Debian" 7 | - "RedHat" 8 | - "Suse" 9 | - "Windows" 10 | loop_control: 11 | loop_var: os_family 12 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/tasks/logs/Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include_tasks: "Debian/{{ distribution }}.yml" 3 | when: ansible_distribution == distribution 4 | with_items: 5 | - "Ubuntu" 6 | loop_control: 7 | loop_var: distribution 8 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/tasks/logs/Debian/Ubuntu.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include_tasks: "Ubuntu/{{ distribution_version }}.yml" 3 | when: ansible_distribution_version == distribution_version 4 | with_items: 5 | - "14.04" 6 | - "16.04" 7 | loop_control: 8 | loop_var: distribution_version 9 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/tasks/logs/Debian/Ubuntu/14.04.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Get logs." 3 | command: bash -c "cat /var/log/syslog | grep -i docker" 4 | register: _logs 5 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/tasks/logs/Debian/Ubuntu/16.04.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Get logs." 3 | command: "journalctl --no-pager -u docker.service" 4 | register: _logs 5 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/tasks/logs/RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include_tasks: "RedHat/{{ distribution }}.yml" 3 | when: ansible_distribution == distribution 4 | with_items: 5 | - "CentOS" 6 | - "OracleLinux" 7 | - "RedHat" 8 | loop_control: 9 | loop_var: distribution 10 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/tasks/logs/RedHat/CentOS.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Get logs." 3 | command: "journalctl --no-pager -u docker.service" 4 | register: _logs 5 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/tasks/logs/RedHat/OracleLinux.yml: -------------------------------------------------------------------------------- 1 | RedHat.yml -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/tasks/logs/RedHat/RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Get logs." 3 | command: bash -c "cat /var/log/messages | grep -i docker" 4 | register: _logs 5 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/tasks/logs/Suse.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Get logs." 3 | command: bash -c "cat /var/log/messages | grep -i docker" 4 | register: _logs 5 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/tasks/logs/Windows.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Get logs." 3 | win_shell: Get-EventLog -LogName Application -Source Docker -After (Get-Date).AddHours(-1) # | Sort-Object Time 4 | register: _logs 5 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/tasks/logs/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Get logs ({{ ansible_os_family }})." 3 | include_tasks: "{{ os_family }}.yml" 4 | when: ansible_os_family == os_family 5 | with_items: 6 | - "Debian" 7 | - "RedHat" 8 | - "Suse" 9 | - "Windows" 10 | loop_control: 11 | loop_var: os_family 12 | 13 | - name: "Create folders." 14 | local_action: 15 | module: file 16 | path: "{{ docker_logs_directory }}/{{ ansible_nodename }}" 17 | state: directory 18 | 19 | - name: "Save logs to file." 20 | local_action: 21 | module: copy 22 | content: "{{ _logs.stdout_lines | join('\n') | string }}" 23 | dest: "{{ docker_logs_directory }}/{{ ansible_nodename }}/engine.log" 24 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Include global and platform-specific variables. 4 | # 5 | - include_vars: "main.yml" 6 | 7 | - include_vars: "{{ os_family }}.yml" 8 | when: ansible_os_family == os_family 9 | with_items: 10 | - "Debian" 11 | - "Windows" 12 | - "RedHat" 13 | - "Suse" 14 | loop_control: 15 | loop_var: os_family 16 | 17 | - include_vars: "RedHat/{{ distribution }}.yml" 18 | when: distribution == ansible_distribution 19 | with_items: 20 | - "CentOS" 21 | - "RedHat" 22 | - "OracleLinux" 23 | loop_control: 24 | loop_var: distribution 25 | 26 | # 27 | # Modes. 28 | # 29 | - name: "Install." 30 | include_tasks: "install/main.yml" 31 | when: docker_ee_role_mode == "install" 32 | 33 | - name: "Uninstall." 34 | include_tasks: "uninstall/main.yml" 35 | when: docker_ee_role_mode == "uninstall" 36 | 37 | - name: "Update." 38 | include_tasks: "update/main.yml" 39 | when: docker_ee_role_mode == "update" 40 | 41 | - name: "Logs." 42 | include_tasks: "logs/main.yml" 43 | when: docker_ee_role_mode == "logs" 44 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/tasks/uninstall/.unix.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Remove installed versions of Docker. 4 | # 5 | - name: "Remove installed versions of Docker." 6 | package: 7 | name: '{{ package }}' 8 | state: absent 9 | with_items: 10 | - docker 11 | - docker-ce 12 | - docker-common 13 | - docker-engine 14 | - docker-ee 15 | loop_control: 16 | loop_var: package 17 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/tasks/uninstall/Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include_tasks: ".unix.yml" 3 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/tasks/uninstall/RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include_tasks: ".unix.yml" 3 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/tasks/uninstall/Suse.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include_tasks: ".unix.yml" 3 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/tasks/uninstall/Windows.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Uninstall Docker." 3 | win_shell: Uninstall-Package Docker -ProviderName DockerProvider -Force 4 | args: 5 | removes: C:\Program Files\docker\dockerd.exe 6 | poll: 5 7 | async: 60 8 | 9 | - name: "Remove Docker provider." 10 | win_psmodule: 11 | name: DockerProvider 12 | state: absent 13 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/tasks/uninstall/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Uninstall ({{ ansible_os_family }})." 3 | include_tasks: "{{ os_family }}.yml" 4 | when: ansible_os_family == os_family 5 | with_items: 6 | - "Debian" 7 | - "RedHat" 8 | - "Suse" 9 | - "Windows" 10 | loop_control: 11 | loop_var: os_family 12 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/tasks/update/.unix.pre.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Stop Docker service." 3 | systemd: 4 | name: docker 5 | state: stopped 6 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/tasks/update/Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Pre installation tasks. 4 | # 5 | - include_tasks: ".unix.pre.yml" 6 | 7 | # 8 | # Include system specific tasks. 9 | # 10 | - include_tasks: "../install/Debian.yml" 11 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/tasks/update/RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Pre installation tasks. 4 | # 5 | - include_tasks: ".unix.pre.yml" 6 | 7 | # 8 | # Include system specific tasks. 9 | # 10 | - include_tasks: "../install/RedHat.yml" 11 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/tasks/update/Suse.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Pre installation tasks. 4 | # 5 | - include_tasks: ".unix.pre.yml" 6 | 7 | # 8 | # Include system specific tasks. 9 | # 10 | - include_tasks: "../install/Suse.yml" 11 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/tasks/update/Windows.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Stop docker service." 3 | win_service: 4 | name: docker 5 | state: stopped 6 | 7 | - name: "Install Docker ({{ docker_ee_package_version_win }})." 8 | win_shell: Install-Package -Name docker -ProviderName DockerProvider -Update -Force {% if docker_ee_package_version_win is defined %} -RequiredVersion {{ docker_ee_package_version_win }} {% endif %} 9 | 10 | - name: "Restart docker service." 11 | win_service: 12 | name: docker 13 | state: started 14 | start_mode: auto 15 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/tasks/update/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Update ({{ ansible_os_family }})." 3 | include_tasks: "{{ os_family }}.yml" 4 | when: ansible_os_family == os_family 5 | with_items: 6 | - "Debian" 7 | - "RedHat" 8 | - "Suse" 9 | - "Windows" 10 | loop_control: 11 | loop_var: os_family 12 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/tests/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | roles: 4 | - docker-ee-install 5 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/vars/Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # The last {{ ansible_distribution | lower }} will work for all debian-based 3 | # distribution (as long as {{ ansible_distribution }} is correctly set). 4 | docker_ee_repository_url: "https://storebits.docker.com/ee/{{ ansible_distribution | lower }}/{{ docker_ee_subscription }}/{{ ansible_distribution | lower }}" 5 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/vars/RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # See RedHat/CentOS.yml (CentOS) and RedHat/RedHat.yml (RHEL). 3 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/vars/RedHat/CentOS.yml: -------------------------------------------------------------------------------- 1 | --- 2 | docker_ee_repository_url: "https://storebits.docker.com/ee/{{ ansible_distribution | lower }}/{{ docker_ee_subscription }}/centos" 3 | docker_ee_extras_name: "{{ docker_ee_repository_url }}/centos/docker-ee.repo" 4 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/vars/RedHat/OracleLinux.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # {{ ansible_distribution }} on RHEL is `RedHat`, not `RHEL`. 3 | docker_ee_repository_url: "https://storebits.docker.com/ee/{{ ansible_distribution | lower }}/{{ docker_ee_subscription }}/oraclelinux" 4 | docker_ee_extras_name: "{% if infra_stack == \"aws\" %}rhui-REGION-rhel-server-extras{% else %}rhel-7-server-extras-rpms{% endif %}" 5 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/vars/RedHat/RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # {{ ansible_distribution }} on RHEL is `RedHat`, not `RHEL`. 3 | docker_ee_repository_url: "https://storebits.docker.com/ee/{{ ansible_distribution | lower }}/{{ docker_ee_subscription }}/rhel" 4 | docker_ee_extras_name: "{% if infra_stack == \"aws\" %}rhui-REGION-rhel-server-extras{% else %}rhel-7-server-extras-rpms{% endif %}" 5 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/vars/Suse.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # {{ ansible_distribution }} 3 | docker_ee_repository_url: "https://storebits.docker.com/ee/{{ ansible_distribution | lower }}/{{ docker_ee_subscription }}/sles" 4 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/vars/Windows.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ee/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-storage/tasks/Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Install AUFS dependencies (Ubuntu 14.04)." 4 | when: ansible_distribution == 'Ubuntu' and ansible_distribution_version == '14.04' 5 | block: 6 | - shell: "uname -r" 7 | register: uname_out 8 | - name: "Install linux-image-extra-{{ uname_out.stdout }}." 9 | apt: 10 | name: "linux-image-extra-{{ uname_out.stdout }}" 11 | state: present 12 | - name: "Install linux-image-extra-virtual." 13 | apt: 14 | name: linux-image-extra-virtual 15 | state: present 16 | 17 | - set_fact: 18 | docker_storage_fstype: ext4 19 | docker_storage_driver: aufs3 20 | 21 | # if EE2.0 22 | #set_fact: 23 | # docker_storage_fstype: ext4 24 | # docker_storage_driver: overlay2 25 | # when: ansible_distribution == 'Ubuntu' and ansible_distribution_version == '16.04' 26 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-storage/tasks/RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include_tasks: "RedHat/{{ ansible_distribution }}.yml" 3 | when: distribution == ansible_distribution 4 | with_items: 5 | - "CentOS" 6 | - "RedHat" 7 | - "OracleLinux" 8 | loop_control: 9 | loop_var: distribution 10 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-storage/tasks/RedHat/CentOS.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - set_fact: 4 | docker_storage_fstype: xfs 5 | docker_storage_driver: devicemapper 6 | 7 | # if EE2.0 8 | #set_fact: 9 | # docker_storage_fstype: xfs 10 | # docker_storage_driver: overlay2 11 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-storage/tasks/RedHat/OracleLinux.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - set_fact: 4 | docker_storage_fstype: btrfs 5 | docker_storage_driver: devicemapper 6 | 7 | # if EE2.0 8 | #set_fact: 9 | # docker_storage_fstype: xfs 10 | # docker_storage_driver: overlay2 11 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-storage/tasks/RedHat/RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - set_fact: 4 | docker_storage_fstype: xfs 5 | docker_storage_driver: overlay2 6 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-storage/tasks/Suse.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - set_fact: 4 | docker_storage_fstype: btrfs 5 | docker_storage_driver: btrfs 6 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-storage/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - meta: end_play 4 | when: docker_storage_volume is not defined 5 | 6 | - name: "Installation ( {{ ansible_os_family }} )." 7 | include_tasks: "{{ os_family }}.yml" 8 | when: ansible_os_family == os_family 9 | with_items: 10 | - "Debian" 11 | - "RedHat" 12 | - "Suse" 13 | loop_control: 14 | loop_var: os_family 15 | 16 | ## EE 17.06 17 | # 18 | #if rhel 19 | # docker_storage_fstype = xfs 20 | # docker_storage_driver = overlay2 21 | #if centos7 22 | # docker_storage_fstype = xfs 23 | # docker_storage_driver = devicemapper 24 | #if ubuntu 25 | # docker_storage_fstype = ext4 26 | # docker_storage_driver = aufs3 27 | #if sles 28 | # docker_storage_fstype = btrfs 29 | # docker_storage_driver = btrfs 30 | #if oracle 31 | # docker_storage_fstype = btrfs 32 | # docker_storage_driver = devicemapper 33 | # 34 | ## EE 2.0 35 | # 36 | #if rhel 37 | # docker_storage_fstype = xfs 38 | # docker_storage_driver = overlay2 39 | #if centos7 40 | # docker_storage_fstype = xfs 41 | # docker_storage_driver = overlay2 42 | #if ubuntu 14.04 43 | # docker_storage_fstype = ext4 44 | # docker_storage_driver = aufs3 45 | #if ubuntu 16.04 46 | # docker_storage_fstype = ext4 47 | # docker_storage_driver = overlay2 48 | #if sles 49 | # docker_storage_fstype = btrfs 50 | # docker_storage_driver = btrfs 51 | #if oracle 52 | # docker_storage_fstype = xfs 53 | # docker_storage_driver = overlay2 54 | 55 | - set_fact: 56 | mkfs_opts: "" 57 | 58 | - name: "mkfs opts" 59 | when: docker_storage_fstype == 'xfs' 60 | set_fact: 61 | mkfs_opts: "-n ftype=1" 62 | 63 | - name: "mkfs.{{ docker_storage_fstype }}" 64 | filesystem: 65 | dev: "{{ docker_storage_volume }}" 66 | force: "no" 67 | opts: "{{ mkfs_opts }}" 68 | fstype: "{{ docker_storage_fstype }}" 69 | 70 | - name: "Mount /var/lib/docker" 71 | mount: 72 | fstype: "{{ docker_storage_fstype }}" 73 | path: "/var/lib/docker" 74 | src: "{{ docker_storage_volume }}" 75 | state: "mounted" 76 | 77 | - when: docker_ee_daemon_options is not defined 78 | set_fact: 79 | docker_ee_daemon_options: { } 80 | 81 | - name: "Add storage-driver={{ docker_storage_driver }} to docker_ee_daemon_options." 82 | set_fact: 83 | docker_ee_daemon_options: "{{ docker_ee_daemon_options|combine({ 'storage-driver': docker_storage_driver }) }}" 84 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-swarm/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | docker_ucp_image_repository: docker 3 | docker_ucp_version: latest 4 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-swarm/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Antony Méchin 3 | description: Initialize a Docker Swarm cluster. 4 | company: Docker, Inc. 5 | 6 | min_ansible_version: 1.9 7 | 8 | platforms: 9 | #- name: EL 10 | # versions: 11 | # - all 12 | # - 5 13 | # - 6 14 | # - 7 15 | #- name: GenericUNIX 16 | # versions: 17 | # - all 18 | # - any 19 | #- name: Solaris 20 | # versions: 21 | # - all 22 | # - 10 23 | # - 11.0 24 | # - 11.1 25 | # - 11.2 26 | # - 11.3 27 | #- name: Fedora 28 | # versions: 29 | # - all 30 | # - 16 31 | # - 17 32 | # - 18 33 | # - 19 34 | # - 20 35 | # - 21 36 | # - 22 37 | # - 23 38 | #- name: Windows 39 | # versions: 40 | # - all 41 | # - 2012R2 42 | #- name: SmartOS 43 | # versions: 44 | # - all 45 | # - any 46 | #- name: opensuse 47 | # versions: 48 | # - all 49 | # - 12.1 50 | # - 12.2 51 | # - 12.3 52 | # - 13.1 53 | # - 13.2 54 | #- name: Amazon 55 | # versions: 56 | # - all 57 | # - 2013.03 58 | # - 2013.09 59 | #- name: GenericBSD 60 | # versions: 61 | # - all 62 | # - any 63 | #- name: FreeBSD 64 | # versions: 65 | # - all 66 | # - 8.0 67 | # - 8.1 68 | # - 8.2 69 | # - 8.3 70 | # - 8.4 71 | # - 9.0 72 | # - 9.1 73 | # - 9.1 74 | # - 9.2 75 | # - 9.3 76 | # - 10.0 77 | # - 10.1 78 | # - 10.2 79 | - name: Ubuntu 80 | versions: 81 | # - all 82 | # - lucid 83 | # - maverick 84 | # - natty 85 | # - oneiric 86 | # - precise 87 | # - quantal 88 | # - raring 89 | # - saucy 90 | - trusty 91 | # - utopic 92 | # - vivid 93 | # - wily 94 | #- name: SLES 95 | # versions: 96 | # - all 97 | # - 10SP3 98 | # - 10SP4 99 | # - 11 100 | # - 11SP1 101 | # - 11SP2 102 | # - 11SP3 103 | #- name: GenericLinux 104 | # versions: 105 | # - all 106 | # - any 107 | #- name: Debian 108 | # versions: 109 | # - all 110 | # - etch 111 | # - jessie 112 | # - lenny 113 | # - squeeze 114 | # - wheezy 115 | 116 | dependencies: [] 117 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-swarm/tasks/cleanup/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Clean up down nodes, if any." 3 | command: bash -c "docker node rm $(docker node ls | grep \" Down \" | cut -d' ' -f1)" 4 | failed_when: False 5 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-swarm/tasks/init/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Check Docker Swarm current state. 4 | # 5 | - name: Check if already active 6 | command: "docker info --format {% raw %} '{{ .Swarm.LocalNodeState }}' {% endraw %}" 7 | register: activity_status 8 | 9 | # 10 | # Get eth0 advertise IP for Docker Swarm. 11 | # 12 | - name: Get advertise IP for swarm. 13 | shell: "ip route get 1.1.1.1 | awk 'NR==1 {print $NF}'" 14 | args: 15 | executable: /bin/bash 16 | register: eth0_ip 17 | 18 | - name: Show the IP we found 19 | debug: 20 | var: eth0_ip 21 | 22 | - name: Advertise the eth0 to be used 23 | debug: 24 | msg: "Advertised IP is: {{ eth0_ip.stdout }}" 25 | 26 | # 27 | # Initialize Docker Swarm. 28 | # 29 | - name: Initialize swarm. 30 | command: docker swarm init --advertise-addr {{ docker_swarm_listen_address | default(eth0_ip.stdout) }} 31 | when: activity_status.stdout == "inactive" 32 | 33 | # 34 | # Export tokens. 35 | # 36 | - name: Export manager token. 37 | command: docker swarm join-token manager -q 38 | register: swarm-manager-token 39 | 40 | - name: Export worker token. 41 | command: docker swarm join-token worker -q 42 | register: swarm-worker-token 43 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-swarm/tasks/join/Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check if already active. 3 | command: "docker info --format '{{ '{{' }}.Swarm.LocalNodeState {{ '}}' }}'" 4 | register: _swarm_node_status 5 | 6 | - name: "Host is not a part the swarm yet." 7 | when: _swarm_node_status.stdout == "inactive" 8 | block: 9 | - name: Join Docker Swarm. 10 | command: docker swarm join --token "{{ hostvars[groups[docker_swarm_leader][0]][docker_swarm_token_type].stdout }}" "{{ docker_swarm_listen_address | default(hostvars[groups[docker_swarm_leader][0]]['ansible_default_ipv4']['address']) }}" 11 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-swarm/tasks/join/RedHat.yml: -------------------------------------------------------------------------------- 1 | Debian.yml -------------------------------------------------------------------------------- /dci-aws/roles/docker-swarm/tasks/join/Suse.yml: -------------------------------------------------------------------------------- 1 | Debian.yml -------------------------------------------------------------------------------- /dci-aws/roles/docker-swarm/tasks/join/Windows.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Check if Windows Node is in a swarm." 3 | win_shell: "docker info --format '{{ '{{' }}.Swarm.LocalNodeState {{ '}}' }}'" 4 | register: swarm_node_status 5 | 6 | - name: "Prepare Windows node to join swarm" 7 | when: "swarm_node_status.stdout_lines[0] == 'inactive'" 8 | win_scheduled_task: 9 | name: swarm-join-prep-task 10 | username: SYSTEM 11 | actions: 12 | - path: PowerShell.exe 13 | arguments: > 14 | docker run 15 | --rm {{ docker_ucp_image_repository }}/ucp-agent-win:{{ docker_ucp_version }} 16 | windows-script 17 | {% if use_dev_version | default(False) %} --image-version dev: {% endif %} 18 | | powershell -noprofile -noninteractive -command 'Invoke-Expression -Command $input' 19 | - path: cmd.exe 20 | arguments: /c schtasks.exe /Delete /TN "swarm-join-prep-task" /F 21 | triggers: 22 | - type: registration 23 | 24 | - name: "Wait for Windows node to be prepared to join swarm" 25 | when: "swarm_node_status.stdout_lines[0] == 'inactive'" 26 | win_scheduled_task_stat: 27 | name: swarm-join-prep-task 28 | register: prepare_task_stat 29 | until: (prepare_task_stat.state is defined and prepare_task_stat.state.status != "TASK_STATE_RUNNING") or (prepare_task_stat.task_exists == False) 30 | retries: 1000 31 | delay: 10 32 | 33 | - name: "Join Windows node to swarm" 34 | when: "swarm_node_status.stdout_lines[0] == 'inactive'" 35 | win_scheduled_task: 36 | name: swarm-join-task 37 | username: SYSTEM 38 | actions: 39 | - path: PowerShell.exe 40 | arguments: > 41 | docker swarm join 42 | --token {{ hostvars[groups[docker_swarm_leader][0]][docker_swarm_token_type].stdout }} 43 | {{ docker_swarm_listen_address | default(hostvars[groups[docker_swarm_leader][0]]['ansible_default_ipv4']['address']) }} 44 | - path: cmd.exe 45 | arguments: /c schtasks.exe /Delete /TN "swarm-join-task" /F 46 | triggers: 47 | - type: registration 48 | 49 | - name: "Wait for the Windows node network to stabilize" 50 | wait_for_connection: 51 | delay: 20 52 | 53 | - name: "Wait for the Windows node to join the swarm" 54 | when: "swarm_node_status.stdout_lines[0] == 'inactive'" 55 | win_scheduled_task_stat: 56 | name: swarm-join-task 57 | register: task_stat 58 | until: (task_stat.state is defined and task_stat.state.status != "TASK_STATE_RUNNING") or (task_stat.task_exists == False) 59 | retries: 30 60 | delay: 10 61 | 62 | - name: "Check if join succeeded" 63 | when: "swarm_node_status.stdout_lines[0] == 'inactive'" 64 | win_shell: "docker info --format '{{ '{{' }}.Swarm.LocalNodeState {{ '}}' }}'" 65 | register: swarm_node_status_after_join 66 | failed_when: "swarm_node_status_after_join.stdout_lines[0] == 'inactive'" 67 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-swarm/tasks/join/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Joining ({{ ansible_os_family }})." 3 | include_tasks: "{{ os_family }}.yml" 4 | when: ansible_os_family == os_family 5 | with_items: 6 | - "Debian" 7 | - "RedHat" 8 | - "Suse" 9 | - "Windows" 10 | loop_control: 11 | loop_var: os_family 12 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-swarm/tasks/leave/Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Check if the node is member of a swarm." 3 | shell: "docker info | grep \"Swarm: active\"" 4 | register: active 5 | ignore_errors: true 6 | 7 | - name: "Leave swarm." 8 | when: active is succeeded and active.stdout != "" 9 | block: 10 | - include_tasks: "../cleanup/main.yml" 11 | 12 | - name: "Demote node if it is a manager" 13 | command: "docker node demote {{ ansible_nodename }}" 14 | failed_when: False 15 | 16 | - name: "Leave swarm." 17 | command: "docker swarm leave" 18 | failed_when: False 19 | register: left 20 | 21 | #Ignoring errors b/c https://github.com/moby/moby/issues/34140 22 | - name: "Leave swarm (--force)." 23 | command: "docker swarm leave --force" 24 | ignore_errors: true 25 | when: left.rc != 0 26 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-swarm/tasks/leave/RedHat.yml: -------------------------------------------------------------------------------- 1 | Debian.yml -------------------------------------------------------------------------------- /dci-aws/roles/docker-swarm/tasks/leave/Suse.yml: -------------------------------------------------------------------------------- 1 | Debian.yml -------------------------------------------------------------------------------- /dci-aws/roles/docker-swarm/tasks/leave/Windows.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Check if the node is member of a swarm." 3 | win_shell: "docker info | Select-String -Pattern \"Swarm: active\"" 4 | register: active 5 | ignore_errors: true 6 | 7 | - name: "Leave swarm." 8 | when: active is succeeded and active.stdout != "" 9 | block: 10 | - name: "Demote node if it is a manager" 11 | win_shell: "docker node demote {{ ansible_nodename }}" 12 | failed_when: False 13 | 14 | # Leave swarm asynchronously. 15 | # 16 | # Because docker swarm leave does something funky with network interfaces, 17 | # WinRM gets disconnected. 18 | - name: "Leave swarm." 19 | win_command: "docker swarm leave" 20 | poll: 5 21 | async: 45 22 | register: left 23 | failed_when: False 24 | 25 | - name: "Leave swarm (--force)." 26 | win_command: "docker swarm leave --force" 27 | poll: 5 28 | async: 45 29 | when: left is failed 30 | failed_when: False 31 | 32 | - name: "Check if the node is member of a swarm." 33 | win_shell: "docker info | Select-String -Pattern \"Swarm: active\"" 34 | register: active 35 | ignore_errors: true 36 | 37 | - assert: 38 | that: 39 | - active is failed or active.stdout == "" 40 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-swarm/tasks/leave/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Joining ({{ ansible_os_family }})." 3 | include_tasks: "{{ os_family }}.yml" 4 | when: ansible_os_family == os_family 5 | with_items: 6 | - "Debian" 7 | - "RedHat" 8 | - "Suse" 9 | - "Windows" 10 | loop_control: 11 | loop_var: os_family 12 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-swarm/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Init. 4 | # 5 | - name: "Initialize." 6 | include_tasks: "init/main.yml" 7 | when: docker_swarm_role_mode == "init" 8 | 9 | # 10 | # Join. 11 | # 12 | - name: "Join." 13 | include_tasks: "join/main.yml" 14 | when: docker_swarm_role_mode == "join" 15 | 16 | # 17 | # Validate. 18 | # 19 | - name: "Validate." 20 | include_tasks: "validate/main.yml" 21 | when: docker_swarm_role_mode == "validate" 22 | 23 | # 24 | # Leave. 25 | # 26 | - name: "Leave." 27 | include_tasks: "leave/main.yml" 28 | when: docker_swarm_role_mode == "leave" 29 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-swarm/tasks/validate/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include_tasks: "../cleanup/main.yml" 3 | 4 | - name: Get number of Docker Swarm workers. 5 | shell: expr `docker info --format '{% raw %}{{.Swarm.Nodes}}{% endraw %}'` - `docker info --format '{% raw %}{{.Swarm.Managers}}{% endraw %}'` 6 | register: _number_of_swarm_workers 7 | 8 | - name: Get number of Docker Swarm managers. 9 | shell: docker info --format '{% raw %}{{.Swarm.Managers}}{% endraw %}' 10 | register: _number_of_swarm_managers 11 | 12 | - assert: 13 | that: 14 | - "_number_of_swarm_workers.stdout == expected_docker_swarm_workers" 15 | - "_number_of_swarm_managers.stdout == expected_docker_swarm_managers" 16 | msg: "'_number_of_swarm_managers' ({{ _number_of_swarm_managers.stdout }}) must be exactly {{ expected_docker_swarm_managers }} and '_number_of_swarm_workers' ({{ _number_of_swarm_workers.stdout }}) must be exactly {{ expected_docker_swarm_workers }}." 17 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ucp/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Package. 3 | docker_ucp_image_repository: docker 4 | docker_ucp_version: latest 5 | 6 | # Subscription path and certificates directory path. 7 | docker_ucp_certificate_directory: "{{ playbook_dir }}/ssl_certificates" 8 | docker_ucp_license_path: "{{ playbook_dir }}/docker_subscription.lic" 9 | 10 | # 11 | docker_ucp_admin_username: admin 12 | docker_ucp_lb: "{{ ansible_host }}" 13 | 14 | # 15 | docker_logs_directory: "{{ playbook_dir }}/.logs" 16 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ucp/files/.gitignore: -------------------------------------------------------------------------------- 1 | *.pem 2 | *.crt 3 | *.key 4 | openssl.cnf 5 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ucp/files/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BretFisher/dogvscat/fc16b7359b2e06002b0dc8d67ce8115fbe30a4f5/dci-aws/roles/docker-ucp/files/.gitkeep -------------------------------------------------------------------------------- /dci-aws/roles/docker-ucp/files/makecert.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | openssl req -nodes -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -days 3650 -subj "/C=US/ST=Somewhere/L=Anywhere/O=Computer/CN=*.dogvscat.biz" -config openssl.cnf 3 | # cat cert.crt key.key > full.pem 4 | cp cert.pem ca.pem 5 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ucp/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Antony Méchin 3 | description: Install Docker Universal Control Plane (UCP). 4 | company: Docker, Inc. 5 | 6 | min_ansible_version: 2.2 7 | 8 | platforms: 9 | #- name: EL 10 | # versions: 11 | # - all 12 | # - 5 13 | # - 6 14 | # - 7 15 | #- name: GenericUNIX 16 | # versions: 17 | # - all 18 | # - any 19 | #- name: Solaris 20 | # versions: 21 | # - all 22 | # - 10 23 | # - 11.0 24 | # - 11.1 25 | # - 11.2 26 | # - 11.3 27 | #- name: Fedora 28 | # versions: 29 | # - all 30 | # - 16 31 | # - 17 32 | # - 18 33 | # - 19 34 | # - 20 35 | # - 21 36 | # - 22 37 | # - 23 38 | #- name: Windows 39 | # versions: 40 | # - all 41 | # - 2012R2 42 | #- name: SmartOS 43 | # versions: 44 | # - all 45 | # - any 46 | #- name: opensuse 47 | # versions: 48 | # - all 49 | # - 12.1 50 | # - 12.2 51 | # - 12.3 52 | # - 13.1 53 | # - 13.2 54 | #- name: Amazon 55 | # versions: 56 | # - all 57 | # - 2013.03 58 | # - 2013.09 59 | #- name: GenericBSD 60 | # versions: 61 | # - all 62 | # - any 63 | #- name: FreeBSD 64 | # versions: 65 | # - all 66 | # - 8.0 67 | # - 8.1 68 | # - 8.2 69 | # - 8.3 70 | # - 8.4 71 | # - 9.0 72 | # - 9.1 73 | # - 9.1 74 | # - 9.2 75 | # - 9.3 76 | # - 10.0 77 | # - 10.1 78 | # - 10.2 79 | - name: Ubuntu 80 | versions: 81 | # - all 82 | # - lucid 83 | # - maverick 84 | # - natty 85 | # - oneiric 86 | # - precise 87 | # - quantal 88 | # - raring 89 | # - saucy 90 | - trusty 91 | # - utopic 92 | # - vivid 93 | # - wily 94 | #- name: SLES 95 | # versions: 96 | # - all 97 | # - 10SP3 98 | # - 10SP4 99 | # - 11 100 | # - 11SP1 101 | # - 11SP2 102 | # - 11SP3 103 | #- name: GenericLinux 104 | # versions: 105 | # - all 106 | # - any 107 | #- name: Debian 108 | # versions: 109 | # - all 110 | # - etch 111 | # - jessie 112 | # - lenny 113 | # - squeeze 114 | # - wheezy 115 | 116 | dependencies: [] 117 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ucp/tasks/install/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Check if UCP is running." 3 | shell: "docker ps -q --filter name=ucp-controller" 4 | register: _ucp_already_running 5 | 6 | - name: "Set ucp_already_running fact." 7 | set_fact: 8 | ucp_already_running: "{{ _ucp_already_running.stdout_lines | length > 0 }}" 9 | 10 | - name: "Install." 11 | when: "not ucp_already_running" 12 | block: 13 | - name: "Pull UCP (version: {{ docker_ucp_version }})." 14 | shell: > 15 | docker image pull 16 | "{{ docker_ucp_image_repository }}/ucp:{{ docker_ucp_version }}" 17 | register: pulled 18 | retries: 10 19 | until: pulled is succeeded 20 | 21 | - name: "Check for certificate (at {{ docker_ucp_certificate_directory }}/cert.pem)." 22 | local_action: stat path="{{ docker_ucp_certificate_directory }}/cert.pem" 23 | become: no 24 | register: certificate_stat 25 | 26 | - name: "Import certificates." 27 | when: certificate_stat.stat.exists == true 28 | block: 29 | # Start block (certificates exist). 30 | - name: "Create volume for certificates." 31 | docker_volume: 32 | name: ucp-controller-server-certs 33 | state: present 34 | register: volume_created 35 | 36 | - name: "Get volume path." 37 | shell: "docker volume inspect ucp-controller-server-certs --format {% raw %}'{{ .Mountpoint }}'{% endraw %}" 38 | register: certificates_volume 39 | when: volume_created is succeeded 40 | 41 | - name: "Copy certificate" 42 | copy: 43 | src: "{{ docker_ucp_certificate_directory }}/{{ file }}" 44 | dest: "{{ certificates_volume.stdout_lines[0] }}" 45 | with_items: 46 | - "ca.pem" 47 | - "cert.pem" 48 | - "key.pem" 49 | loop_control: 50 | loop_var: file 51 | when: volume_created is succeeded 52 | # End block (certificates exist). 53 | 54 | - name: "Check for license file (at {{ docker_ucp_license_path }})." 55 | local_action: stat path="{{ docker_ucp_license_path }}" 56 | become: no 57 | register: license_stat 58 | 59 | - set_fact: 60 | subscription_file: "{{ docker_ucp_license_path | basename }}" 61 | 62 | - name: "Read license file." 63 | when: license_stat.stat.exists == true 64 | block: 65 | # Start block (license file exists). 66 | - name: "Copy subscription." 67 | copy: 68 | src: "{{ docker_ucp_license_path }}" 69 | dest: "/tmp/" 70 | 71 | - block: 72 | # Start block (rescue). 73 | - name: "Install UCP (version: {{ docker_ucp_version }})." 74 | shell: > 75 | docker run 76 | --rm 77 | --name ucp 78 | --volume /var/run/docker.sock:/var/run/docker.sock 79 | {% if license_stat.stat.exists == true %} --volume "/tmp/{{ subscription_file}}:/config/docker_subscription.lic:ro" {% endif %} 80 | --env "UCP_ADMIN_USERNAME={{ docker_ucp_admin_username }}" 81 | --env "UCP_ADMIN_PASSWORD={{ docker_ucp_admin_password }}" 82 | {{ docker_ucp_image_repository }}/ucp:{{ docker_ucp_version }} 83 | install 84 | {% if use_dev_version | default(False) %} --image-version "dev:" {% endif %} 85 | --host-address "{{ docker_swarm_listen_address | default(ansible_default_ipv4['address']) }}" 86 | --san "{{ docker_ucp_lb }}" 87 | {% if ansible_host != docker_ucp_lb %} --san "{{ ansible_host }}" {% endif %} 88 | {% if certificate_stat.stat.exists == true %} --external-server-cert {% endif %} 89 | # End block (rescue). 90 | rescue: 91 | - shell: docker logs ucp-reconcile | tee ucp-reconcile.log 92 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ucp/tasks/logs/Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Get logs." 3 | command: "docker logs {{ container }}" 4 | register: _logs 5 | with_items: 6 | - "ucp-controller" 7 | - "ucp-reconcile" 8 | - "ucp-swarm-manager" 9 | failed_when: False 10 | loop_control: 11 | loop_var: container 12 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ucp/tasks/logs/RedHat.yml: -------------------------------------------------------------------------------- 1 | Debian.yml -------------------------------------------------------------------------------- /dci-aws/roles/docker-ucp/tasks/logs/Suse.yml: -------------------------------------------------------------------------------- 1 | Debian.yml -------------------------------------------------------------------------------- /dci-aws/roles/docker-ucp/tasks/logs/Windows.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Get logs." 3 | win_shell: "docker logs {{ container }}" 4 | register: _logs 5 | with_items: 6 | - "ucp-controller" 7 | - "ucp-reconcile" 8 | - "ucp-swarm-manager" 9 | ignore_errors: True 10 | loop_control: 11 | loop_var: container 12 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ucp/tasks/logs/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Get logs. 4 | # 5 | - name: "Get." 6 | include_tasks: "{{ os_family }}.yml" 7 | when: ansible_os_family == os_family 8 | with_items: 9 | - "Debian" 10 | - "RedHat" 11 | - "Suse" 12 | - "Windows" 13 | loop_control: 14 | loop_var: os_family 15 | 16 | - name: "Get logs." 17 | when: _logs is succeeded 18 | block: 19 | # 20 | # Format. 21 | # 22 | - set_fact: 23 | out: "{{ out|default({}) | combine( {item.container: item.stdout_lines | join('\n') | string } ) }}" 24 | err: "{{ err|default({}) | combine( {item.container: item.stderr_lines | join('\n') | string } ) }}" 25 | when: item.rc == 0 26 | with_items: "{{ _logs.results }}" 27 | 28 | # 29 | # Create folders. 30 | # 31 | - name: "Create folders." 32 | local_action: 33 | module: file 34 | path: "{{ docker_logs_directory }}/{{ ansible_nodename }}/ucp" 35 | state: directory 36 | 37 | # 38 | # Write to disk. 39 | # 40 | - name: "Save logs (stdout) to file." 41 | when: "item.value | length > 0" 42 | local_action: 43 | module: copy 44 | content: "{{ item.value }}" 45 | dest: "{{ docker_logs_directory }}/{{ ansible_nodename }}/ucp/{{ item.key }}.out.log" 46 | with_dict: "{{ out | default({}) }}" 47 | 48 | - name: "Save logs (stderr) to file." 49 | when: "item.value | length > 0" 50 | local_action: 51 | module: copy 52 | content: "{{ item.value }}" 53 | dest: "{{ docker_logs_directory }}/{{ ansible_nodename }}/ucp/{{ item.key }}.err.log" 54 | with_dict: "{{ err | default({}) }}" 55 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ucp/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Install. 4 | # 5 | - name: "Install." 6 | include_tasks: "install/main.yml" 7 | when: docker_ucp_role_mode == "install" 8 | 9 | # 10 | # Validate managers. 11 | # 12 | - name: "Validate managers." 13 | include_tasks: "validate-managers/main.yml" 14 | when: docker_ucp_role_mode == "validate-managers" 15 | 16 | # 17 | # Validate workers. 18 | # 19 | - name: "Validate workers." 20 | include_tasks: "validate-workers/main.yml" 21 | when: docker_ucp_role_mode == "validate-workers" 22 | 23 | # 24 | # Uninstall. 25 | # 26 | - name: "Uninstall UCP." 27 | include_tasks: "uninstall/main.yml" 28 | when: docker_ucp_role_mode == "uninstall" 29 | 30 | # 31 | # Update. 32 | # 33 | - name: "Update UCP." 34 | include_tasks: "update/main.yml" 35 | when: docker_ucp_role_mode == "update" 36 | 37 | # 38 | # Pull logs. 39 | # 40 | - name: "Logs." 41 | include_tasks: "logs/main.yml" 42 | when: docker_ucp_role_mode == "logs" 43 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ucp/tasks/uninstall/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Get InstanceID if UCP is running." 3 | shell: "docker container inspect ucp-controller --format {% raw %} '{{ index .Config.Labels \"com.docker.ucp.InstanceID\" }}' {% endraw %}" 4 | register: id 5 | ignore_errors: true 6 | 7 | - name: "Leave UCP." 8 | when: id is succeeded 9 | block: 10 | - name: "Leave UCP." 11 | shell: > 12 | docker run 13 | --rm 14 | --name ucp 15 | --volume /var/run/docker.sock:/var/run/docker.sock 16 | {{ docker_ucp_image_repository }}/ucp:{{ docker_ucp_version }} 17 | uninstall-ucp 18 | {% if use_dev_version | default(False) %} --image-version "dev:" {% endif %} 19 | --id "{{ id.stdout_lines[0] }}" 20 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ucp/tasks/update/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Check if UCP is running." 3 | shell: "docker service ls --filter name=ucp-agent" 4 | register: _running 5 | 6 | - name: "Set is_already_running fact." 7 | set_fact: 8 | running: "{{ _running.stdout_lines | length > 0 }}" 9 | 10 | # 11 | # Exit if UCP is not running. 12 | # 13 | - name: "Check if UCP is running." 14 | meta: end_play 15 | when: not running 16 | 17 | # 18 | # Compare versions. 19 | # 20 | - name: "Get currently running version." 21 | shell: docker container ls --filter name=ucp-controller --format {% raw %} '{{with .Image -}}{{index (split . ":") 1 }}{{- end}}' {% endraw %} 22 | register: extracted_version 23 | 24 | - name: "Check if new version is higher (is {{extracted_version.stdout}} < {{docker_ucp_version}})" 25 | set_fact: 26 | need_update: "{{ extracted_version.stdout | version_compare(docker_ucp_version, '<')}}" 27 | 28 | - name: "Update needed" 29 | when: need_update 30 | block: 31 | - name: "Pull desired version of UCP ({{ docker_ucp_version }})." 32 | shell: > 33 | docker image pull 34 | name: "{{ docker_ucp_image_repository }}/ucp:{{ docker_ucp_version }}" 35 | register: pulled 36 | retries: 10 37 | until: pulled is succeeded 38 | 39 | - name: "Get InstanceID if UCP is running." 40 | shell: "docker container inspect ucp-controller --format {% raw %} '{{ index .Config.Labels \"com.docker.ucp.InstanceID\" }}' {% endraw %}" 41 | register: id 42 | 43 | - name: "Upgrade UCP." 44 | shell: > 45 | docker run 46 | --rm 47 | --name ucp 48 | --volume /var/run/docker.sock:/var/run/docker.sock 49 | --env "UCP_ADMIN_USERNAME={{ docker_ucp_admin_username }}" 50 | --env "UCP_ADMIN_PASSWORD={{ docker_ucp_admin_password }}" 51 | upgrade 52 | --id "{{ id.stdout }}" 53 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ucp/tasks/validate-managers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Wait for Docker UCP to be accessible on 443." 3 | wait_for: 4 | port: 443 5 | timeout: 600 6 | 7 | # Sometimes, even if the port is open, we get a HTTP 500 (Internal Server 8 | # Error). Sleep a few seconds to make sure we aren't too fast for UCP. 9 | - name: "Sleep (5 seconds)." 10 | wait_for: 11 | timeout: 5 12 | 13 | - name: "Wait for Docker UCP to be accessible via {{ ansible_host }}." 14 | uri: 15 | url: "https://{{ ansible_host }}/_ping" 16 | validate_certs: False 17 | register: health 18 | until: health.status == 200 19 | retries: 20 20 | delay: 10 21 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ucp/tasks/validate-workers/Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Validate that the ucp-proxy container is running 4 | # 5 | - name: Check for ucp-proxy container. 6 | shell: docker inspect ucp-proxy 7 | # Note[Stephen Lane-Walsh]: This _should_ use the docker_container module, but as of now it will not 8 | # work the way we want with it, so shell commands it is. 9 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ucp/tasks/validate-workers/RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Validate that the ucp-proxy container is running 4 | # 5 | - name: Check for ucp-proxy container. 6 | shell: docker inspect ucp-proxy 7 | # Note[Stephen Lane-Walsh]: This _should_ use the docker_container module, but as of now it will not 8 | # work the way we want with it, so shell commands it is. 9 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ucp/tasks/validate-workers/Suse.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Validate that the ucp-proxy container is running 4 | # 5 | - name: Check for ucp-proxy container. 6 | shell: docker inspect ucp-proxy 7 | # Note[Stephen Lane-Walsh]: This _should_ use the docker_container module, but as of now it will not 8 | # work the way we want with it, so shell commands it is. 9 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ucp/tasks/validate-workers/Windows.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Validate that the ucp-proxy container is running 4 | # 5 | #- name: Check for ucp-proxy container. 6 | # win_shell: docker inspect ucp-proxy 7 | ## Note[Stephen Lane-Walsh]: This _should_ use the docker_container module, but as of now it will not 8 | # work the way we want with it, so shell commands it is. 9 | -------------------------------------------------------------------------------- /dci-aws/roles/docker-ucp/tasks/validate-workers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Include system specific tasks. 4 | # 5 | 6 | - include_tasks: "{{ os_family }}.yml" 7 | when: ansible_os_family == os_family 8 | with_items: 9 | - "Debian" 10 | - "RedHat" 11 | - "Suse" 12 | - "Windows" 13 | loop_control: 14 | loop_var: os_family 15 | -------------------------------------------------------------------------------- /dci-aws/roles/host-readiness/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # 2 | # First boot may not be complete. 3 | # Give it 5 minutes to come up. 4 | # 5 | - name: Wait for host to be available 6 | wait_for_connection: 7 | timeout: 600 8 | -------------------------------------------------------------------------------- /dci-aws/roles/load-balancer/tasks/Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Check for ufw." 4 | shell: "systemctl is-active ufw --quiet" 5 | register: ufw_active_result 6 | failed_when: False 7 | 8 | - name: "Update firewall (ufw)." 9 | ufw: 10 | from_port: "{{ item }}" 11 | rule: "allow" 12 | with_items: "{{ [ 8181 ] | union(ports) | union(ssl_ports) }}" 13 | -------------------------------------------------------------------------------- /dci-aws/roles/load-balancer/tasks/RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "Check for firewalld." 4 | shell: "systemctl is-active firewalld --quiet" 5 | register: firewalld_active_result 6 | failed_when: False 7 | 8 | - name: "Update firewall (firewalld)." 9 | firewalld: 10 | port: "{{ item }}/tcp" 11 | permanent: true 12 | state: enabled 13 | with_items: "{{ [ 8181 ] | union(ports) | union(ssl_ports) }}" 14 | -------------------------------------------------------------------------------- /dci-aws/roles/load-balancer/tasks/Sles.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Update firewall (SuSEfirewall2)" 3 | shell: 4 | /sbin/SuSEfirewall2 open EXT TCP "{{ item }}" 5 | with_items: "{{ [ 8181 ] | union(ports) | union(ssl_ports) }}" 6 | -------------------------------------------------------------------------------- /dci-aws/roles/load-balancer/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - set_fact: 3 | cert: "" 4 | cert_bindmount: "" 5 | 6 | - name: "Check for certificate." 7 | local_action: stat path="{{ role_path }}/../../{{ docker_ucp_certificate_directory }}/cert.pem" 8 | become: False 9 | register: certificate_stat 10 | 11 | - name: "Copy certificate." 12 | copy: 13 | src: "{{ role_path }}/../../{{ docker_ucp_certificate_directory }}/{{ item }}" 14 | dest: "/etc/ssl/{{ item }}" 15 | with_items: 16 | - "ca.pem" 17 | - "cert.pem" 18 | - "key.pem" 19 | when: certificate_stat.stat.exists == true 20 | 21 | # HAProxy is very particular about its certs 22 | - name: "Generate combined certificate." 23 | shell: "cat /etc/ssl/key.pem /etc/ssl/cert.pem /etc/ssl/ca.pem > /etc/ssl/combined.pem" 24 | when: certificate_stat.stat.exists == true 25 | 26 | - set_fact: 27 | cert: "crt /etc/ssl/combined.pem" 28 | cert_bindmount: "--volume /etc/ssl/combined.pem:/etc/ssl/combined.pem:ro" 29 | when: certificate_stat.stat.exists == true 30 | 31 | - name: "Configure HAProxy." 32 | template: 33 | src: haproxy.cfg.j2 34 | dest: /etc/haproxy.cfg 35 | mode: 0644 36 | register: config 37 | 38 | - include_tasks: "{{ os_family }}.yml" 39 | when: ansible_os_family == os_family 40 | tags: 41 | - jq 42 | with_items: 43 | - "Debian" 44 | - "RedHat" 45 | loop_control: 46 | loop_var: os_family 47 | 48 | - set_fact: portlist="" 49 | 50 | - name: "Generate Port List." 51 | set_fact: portlist="{{portlist}} --expose {{port}} --publish {{port}}:{{port}}" 52 | with_items: "{{ [ 8181 ] | union(ports) | union(ssl_ports) }}" 53 | loop_control: 54 | loop_var: port 55 | 56 | - name: "Remove existing HAProxy Container." 57 | shell: "docker stop --time 60 load-balancer; docker rm -f load-balancer" 58 | failed_when: False 59 | 60 | - name: "Start HAProxy Container." 61 | shell: "docker run --name load-balancer --detach --restart unless-stopped {{ cert_bindmount }} --volume /etc/haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro {{ portlist }} haproxy:1.8-alpine haproxy -d -f /usr/local/etc/haproxy/haproxy.cfg" 62 | -------------------------------------------------------------------------------- /dci-aws/roles/load-balancer/templates/haproxy.cfg.j2: -------------------------------------------------------------------------------- 1 | global 2 | log /dev/log local0 3 | log /dev/log local1 notice 4 | 5 | defaults 6 | log global 7 | mode http 8 | option httplog 9 | option dontlognull 10 | timeout connect 5000 11 | timeout client 50000 12 | timeout server 50000 13 | 14 | listen stats 15 | bind :8181 16 | mode http 17 | stats enable 18 | stats hide-version 19 | stats realm Haproxy\ Statistics 20 | stats uri / 21 | 22 | {% for port in ports %} 23 | frontend {{ port }} 24 | mode tcp 25 | option tcplog 26 | bind :{{ port }} 27 | default_backend upstream_servers_{{ port }} 28 | 29 | backend upstream_servers_{{ port }} 30 | mode tcp 31 | option httpchk GET /_ping HTTP/1.1\r\nHost:\ {{ frontend }} 32 | {% for backend in backends %} 33 | server {{ backend }} {{ hostvars[backend].ansible_host }}:{{ port }} weight 100 check 34 | {% endfor %} 35 | {% endfor %} 36 | 37 | {% for port in ssl_ports %} 38 | frontend {{ port }} 39 | mode tcp 40 | option tcplog 41 | bind :{{ port }} {{ cert }} 42 | default_backend upstream_servers_{{ port }} 43 | 44 | backend upstream_servers_{{ port }} 45 | mode tcp 46 | option httpchk GET /_ping HTTP/1.1\r\nHost:\ {{ frontend }} 47 | {% for backend in backends %} 48 | server {{ backend }} {{ hostvars[backend].ansible_host }}:{{ port }} weight 100 check check-ssl verify none 49 | {% endfor %} 50 | {% endfor %} 51 | -------------------------------------------------------------------------------- /dci-aws/roles/validate/tasks/Debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - assert: 3 | that: 4 | - docker_ee_subscriptions_ubuntu is defined 5 | msg: "You need 'docker_ee_subscriptions_ubuntu' defined in order to install Docker EE on Ubuntu-based hosts." 6 | -------------------------------------------------------------------------------- /dci-aws/roles/validate/tasks/RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include_tasks: "RedHat/{{ ansible_distribution }}.yml" 3 | when: distribution == ansible_distribution 4 | with_items: 5 | - "CentOS" 6 | - "RedHat" 7 | - "OracleLinux" 8 | loop_control: 9 | loop_var: distribution 10 | -------------------------------------------------------------------------------- /dci-aws/roles/validate/tasks/RedHat/CentOS.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - assert: 3 | that: 4 | - docker_ee_subscriptions_centos is defined 5 | msg: "You need 'docker_ee_subscriptions_centos' defined in order to install Docker EE on CentOS hosts." 6 | -------------------------------------------------------------------------------- /dci-aws/roles/validate/tasks/RedHat/OracleLinux.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - assert: 3 | that: 4 | - docker_ee_subscriptions_oracle is defined 5 | msg: "You need 'docker_ee_subscriptions_oracle' defined in order to install Docker EE on OracleLinux hosts." 6 | -------------------------------------------------------------------------------- /dci-aws/roles/validate/tasks/RedHat/RedHat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - assert: 3 | that: 4 | - docker_ee_subscriptions_redhat is defined 5 | msg: "You need 'docker_ee_subscriptions_redhat' defined in order to install Docker EE on RHEL hosts." 6 | -------------------------------------------------------------------------------- /dci-aws/roles/validate/tasks/Suse.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - assert: 3 | that: 4 | - docker_ee_subscriptions_sles is defined 5 | msg: "You need 'docker_ee_subscriptions_sles' defined in order to install Docker EE on Suse-based hosts." 6 | -------------------------------------------------------------------------------- /dci-aws/roles/validate/tasks/Windows.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /dci-aws/roles/validate/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include_tasks: "{{ ansible_os_family }}.yml" 3 | when: ansible_os_family == os_family 4 | with_items: 5 | - "Debian" 6 | - "Windows" 7 | - "RedHat" 8 | - "Suse" 9 | loop_control: 10 | loop_var: os_family 11 | 12 | - assert: 13 | that: docker_ucp_admin_password is defined 14 | msg: "'docker_ucp_admin_password' is mandatory." 15 | -------------------------------------------------------------------------------- /dci-aws/s3.tf: -------------------------------------------------------------------------------- 1 | # S3 Bucket: 2 | 3 | resource "aws_s3_bucket" "dtr_storage_bucket" { 4 | bucket_prefix = "${lower(var.deployment)}-dtrstorage-" 5 | acl = "private" 6 | 7 | tags { 8 | Name = "${var.deployment}-DTRStorage" 9 | Environment = "${var.deployment}" 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /dci-aws/security-group.tf: -------------------------------------------------------------------------------- 1 | # Security Groups: 2 | 3 | resource "aws_security_group" "ddc" { 4 | name = "${var.deployment}_ddc-default" 5 | description = "Default Security Group for Docker EE" 6 | vpc_id = "${local.vpc_id}" 7 | 8 | ingress { 9 | from_port = 22 10 | to_port = 22 11 | protocol = "tcp" 12 | cidr_blocks = ["0.0.0.0/0"] 13 | } 14 | 15 | ingress { 16 | from_port = 443 17 | to_port = 443 18 | protocol = "tcp" 19 | cidr_blocks = ["0.0.0.0/0"] 20 | } 21 | 22 | # Kubernetes API 23 | ingress { 24 | from_port = 6443 25 | to_port = 6443 26 | protocol = "tcp" 27 | cidr_blocks = ["0.0.0.0/0"] 28 | } 29 | 30 | # WinRM HTTP & HTTPS remote access - needed for Ansible 31 | ingress { 32 | from_port = 5985 33 | to_port = 5985 34 | protocol = "tcp" 35 | cidr_blocks = ["0.0.0.0/0"] 36 | } 37 | 38 | ingress { 39 | from_port = 5986 40 | to_port = 5986 41 | protocol = "tcp" 42 | cidr_blocks = ["0.0.0.0/0"] 43 | } 44 | 45 | # best to comment RDP access out after initial deployment testing! 46 | ingress { 47 | from_port = 3389 48 | to_port = 3389 49 | protocol = "tcp" 50 | cidr_blocks = ["0.0.0.0/0"] 51 | } 52 | 53 | ingress { 54 | from_port = 0 55 | to_port = 0 56 | protocol = "-1" 57 | self = true 58 | } 59 | 60 | egress { 61 | from_port = 0 62 | to_port = 0 63 | protocol = "-1" 64 | cidr_blocks = ["0.0.0.0/0"] 65 | } 66 | 67 | timeouts { 68 | delete = "1h" 69 | } 70 | } 71 | 72 | resource "aws_security_group" "apps" { 73 | name = "${var.deployment}_apps-default" 74 | description = "Default Security Group for Docker EE applications" 75 | vpc_id = "${local.vpc_id}" 76 | 77 | ingress { 78 | from_port = 443 79 | to_port = 443 80 | protocol = "tcp" 81 | cidr_blocks = ["0.0.0.0/0"] 82 | } 83 | 84 | ingress { 85 | from_port = 8080 86 | to_port = 8080 87 | protocol = "tcp" 88 | cidr_blocks = ["0.0.0.0/0"] 89 | } 90 | 91 | egress { 92 | from_port = 0 93 | to_port = 0 94 | protocol = "-1" 95 | cidr_blocks = ["0.0.0.0/0"] 96 | } 97 | 98 | timeouts { 99 | delete = "1h" 100 | } 101 | } 102 | 103 | resource "aws_security_group" "elb" { 104 | name = "${var.deployment}_elb-default" 105 | description = "Default Security Group for Docker EE ELBs" 106 | vpc_id = "${local.vpc_id}" 107 | 108 | ingress { 109 | from_port = 443 110 | to_port = 443 111 | protocol = "tcp" 112 | cidr_blocks = ["0.0.0.0/0"] 113 | } 114 | 115 | # Kubernetes API 116 | ingress { 117 | from_port = 6443 118 | to_port = 6443 119 | protocol = "tcp" 120 | cidr_blocks = ["0.0.0.0/0"] 121 | } 122 | 123 | egress { 124 | from_port = 0 125 | to_port = 0 126 | protocol = "-1" 127 | cidr_blocks = ["0.0.0.0/0"] 128 | } 129 | 130 | timeouts { 131 | delete = "1h" 132 | } 133 | } 134 | 135 | resource "aws_security_group" "dtr" { 136 | name = "${var.deployment}_dtr-default" 137 | description = "Default Security Group for Docker EE DTR ELB" 138 | vpc_id = "${local.vpc_id}" 139 | 140 | ingress { 141 | from_port = 443 142 | to_port = 443 143 | protocol = "tcp" 144 | cidr_blocks = ["0.0.0.0/0"] 145 | } 146 | 147 | ingress { 148 | from_port = 80 149 | to_port = 80 150 | protocol = "tcp" 151 | cidr_blocks = ["0.0.0.0/0"] 152 | } 153 | 154 | egress { 155 | from_port = 0 156 | to_port = 0 157 | protocol = "-1" 158 | cidr_blocks = ["0.0.0.0/0"] 159 | } 160 | 161 | timeouts { 162 | delete = "1h" 163 | } 164 | } 165 | -------------------------------------------------------------------------------- /dci-aws/setup-windows.ps1: -------------------------------------------------------------------------------- 1 | # Based on https://stackoverflow.com/questions/41383840/attach-ebs-volume-to-windows-ec2-with-powershell#41384463 2 | $diskNumber = (Get-Disk | Where-Object { ($_.OperationalStatus -eq "Offline") -and ($_."PartitionStyle" -eq "RAW") }).Number 3 | Initialize-Disk -Number $diskNumber -PartitionStyle "MBR" 4 | $part = New-Partition -DiskNumber $diskNumber -UseMaximumSize -IsActive -AssignDriveLetter 5 | Format-Volume -DriveLetter $part.DriveLetter -Confirm:$FALSE 6 | 7 | # Remove the old version of Docker that is installed by default 8 | Uninstall-Package Docker -ProviderName DockerMsftProvider 9 | 10 | # Configure Docker to use new location 11 | Set-Content -Encoding String "C:\ProgramData\Docker\config\daemon.json" "{ `"data-root`": `"d:\\`" }" 12 | -------------------------------------------------------------------------------- /dci-aws/terraform.tfvars.example: -------------------------------------------------------------------------------- 1 | # Docker EE Cluster Topology 2 | #################################### 3 | linux_ucp_manager_count = 3 4 | linux_ucp_worker_count = 3 5 | linux_dtr_count = 3 6 | windows_ucp_worker_count = 0 7 | 8 | deployment = "docker-ee" # VM/Hostname prefix string. Prepended to all resources. 9 | 10 | # Docker EE Configuration 11 | #################################### 12 | ansible_inventory = "inventory/1.hosts" 13 | ucp_license_path = "./docker_subscription.lic" 14 | ucp_admin_password = "" # If unset, check $ansible_inventory for generated value 15 | 16 | # AWS Credentials 17 | ################################### 18 | # Nota Bene: you need to have ~/.aws/credentials in place 19 | region = "" # The region to deploy (e.g. us-east-2) 20 | key_name = "" # (a short string identifying your AWS SSH key, e.g. "alice") 21 | 22 | # Make sure this key has no password to enable automatic retrieval of Windows instance passwords. 23 | private_key_path = ".../key.pem" # The path to the private key corresponding to key_name 24 | 25 | 26 | # AWS AMIs for Docker EE 27 | ################################### 28 | # Linux instances will use the newest AMI matching this pattern 29 | linux_ami_name = "ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server-20180306" 30 | linux_ami_owner = "099720109477" # OwnerID from 'aws ec2 describe-images' 31 | 32 | # Windows instances will use the newest AMI matching this pattern 33 | windows_ami_name = "Windows_Server-2016-English-Full-Containers-2017.11.29" 34 | windows_ami_owner = "801119661308" # OwnerID from 'aws ec2 describe-images' 35 | 36 | # VM Credentials and Domains 37 | #################################### 38 | linux_user = "" 39 | #windows_user = "Administrator" 40 | #windows_admin_password = "" 41 | 42 | # Docker EE VM Settings 43 | #################################### 44 | #See instances.auto.tfvars to change Instance Types 45 | #linux_manager_volume_size = 100 46 | #linux_worker_volume_size = 20 47 | #windows_worker_volume_size = 100 48 | #dtr_instance_volume_size = 100 49 | 50 | # Docker EE Uninstallation 51 | #################################### 52 | #linux_ucp_uninstall_command = "" 53 | #windows_ucp_uninstall_command = "" 54 | #linux_dtr_uninstall_command = "" 55 | 56 | # Load balancer DNS names 57 | #################################### 58 | #docker_ucp_lb = "ucp.example.com" 59 | #docker_dtr_lb = "dtr.example.com" 60 | 61 | # AWS Configuration Options 62 | ################################### 63 | # VPC configuration 64 | # By default, a new VPC will be created with all appropriate routes and subnets. 65 | # However, if you wish to use a pre-existing VPC then: 66 | # 1. Set the existing VPC ID here 67 | # 2. Allocate a block of IP addresses within, as vpc_cidr. 68 | # Terraform will create one subnet within this block for each availablility zone. 69 | # This will take up a further 4 bits in the address, so vpc_cidr can't be larger than a /22. 70 | # After running terraform, you'll need to associate the new subsets with some routing 71 | # table and ensure that they have Internet access. 72 | #vpc_id = "vpc-0417d63da5aeb8ed6" 73 | #vpc_cidr = "10.0.16.0/20" 74 | 75 | efs_supported = 1 # 1 if the region supports EFS (0 if not) 76 | 77 | # OPTIONAL: Database VM 78 | #################################### 79 | #linux_database_count = 0 80 | #windows_database_count = 0 81 | 82 | # OPTIONAL: Build VM 83 | #################################### 84 | #linux_build_server_count = 0 85 | #windows_build_server_count = 0 86 | 87 | -------------------------------------------------------------------------------- /dci-aws/uninstall.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Get replica id" 3 | hosts: dtr-primary 4 | roles: 5 | - role: docker-dtr 6 | tags: 7 | - dtr 8 | - DTR 9 | vars: 10 | docker_dtr_role_mode: "get-running-replica-id" 11 | 12 | - name: "Uninstall DTR" 13 | hosts: dtr:!dtr-primary 14 | roles: 15 | - role: docker-dtr 16 | tags: 17 | - dtr 18 | - DTR 19 | serial: 1 20 | vars: 21 | docker_dtr_role_mode: "uninstall" 22 | ucp_nodes: managers 23 | dtr_primary: dtr-primary 24 | 25 | - hosts: dtr-primary 26 | roles: 27 | - role: docker-dtr 28 | tags: 29 | - dtr 30 | - DTR 31 | vars: 32 | docker_dtr_role_mode: "uninstall" 33 | ucp_nodes: managers 34 | dtr_primary: dtr-primary 35 | 36 | - name: "Uninstall UCP" 37 | hosts: ucp-primary 38 | roles: 39 | - role: docker-ucp 40 | tags: 41 | - ucp 42 | - UCP 43 | vars: 44 | docker_ucp_role_mode: "uninstall" 45 | 46 | - name: "Leave swarm" 47 | hosts: workers 48 | roles: 49 | - role: docker-swarm 50 | tags: 51 | - swarm 52 | vars: 53 | docker_swarm_role_mode: "leave" 54 | 55 | - hosts: managers 56 | roles: 57 | - role: docker-swarm 58 | tags: 59 | - swarm 60 | vars: 61 | docker_swarm_role_mode: "leave" 62 | 63 | - name: "Uninstall Docker EE" 64 | hosts: all 65 | roles: 66 | - role: docker-ee 67 | tags: 68 | - engine 69 | vars: 70 | docker_ee_role_mode: "uninstall" 71 | -------------------------------------------------------------------------------- /dci-aws/update.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Install Docker EE" 3 | hosts: all 4 | roles: 5 | - role: docker-ee 6 | tags: 7 | - engine 8 | vars: 9 | docker_ee_subscriptions: 10 | Ubuntu: "{{ docker_ee_subscriptions_ubuntu|default(omit) }}" 11 | RedHat: "{{ docker_ee_subscriptions_redhat|default(omit) }}" 12 | CentOS: "{{ docker_ee_subscriptions_centos|default(omit) }}" 13 | OracleLinux: "{{ docker_ee_subscriptions_oracle|default(omit) }}" 14 | SLES: "{{ docker_ee_subscriptions_sles|default(omit) }}" 15 | docker_ee_role_mode: "update" 16 | 17 | - name: "Update UCP" 18 | hosts: ucp 19 | roles: 20 | - role: docker-ucp 21 | tags: 22 | - ucp 23 | - UCP 24 | vars: 25 | docker_ucp_role_mode: "update" 26 | 27 | - name: "Update DTR" 28 | hosts: dtr 29 | roles: 30 | - role: docker-dtr 31 | serial: 1 32 | tags: 33 | - dtr 34 | - DTR 35 | vars: 36 | docker_dtr_role_mode: "update" 37 | ucp_nodes: managers 38 | 39 | # 40 | # Get the logs. 41 | # 42 | - import_playbook: "logs.yml" 43 | -------------------------------------------------------------------------------- /dci-aws/variables.tf: -------------------------------------------------------------------------------- 1 | # Cloudstor requirement 2 | variable "efs_supported" { 3 | description = "Set to '1' if the AWS region supports EFS, or 0 if not (see https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services/)." 4 | } 5 | 6 | variable "vpc_id" { 7 | description = "If set, create sub-nets within a pre-existing VPC instead of creating a new one." 8 | default = "" 9 | } 10 | 11 | variable "vpc_cidr" { 12 | description = "CIDR block for the VPC created, or for the Docker EE allocation within an existing VPC. Another 4 bits will be used as the subnet ID, so a /22 is about the maximum possible." 13 | default = "172.31.0.0/16" 14 | } 15 | 16 | # --------------------------------------------------------------------------------------------------------------------- 17 | # OPTIONAL PARAMETERS 18 | # --------------------------------------------------------------------------------------------------------------------- 19 | 20 | variable "key_name" { 21 | description = "The name of the key pair to associate with the instance" 22 | } 23 | 24 | variable "private_key_path" { 25 | description = "The private key corresponding to 'key_name'" 26 | } 27 | 28 | # Linux nodes disk 29 | variable "linux_manager_volume_size" { 30 | description = "The volume size in GB for Linux managers" 31 | default = "100" 32 | } 33 | 34 | variable "linux_worker_volume_size" { 35 | description = "The volume size in GB for Linux workers" 36 | default = "20" 37 | } 38 | 39 | variable "dtr_instance_volume_size" { 40 | description = "The volume size in GB for DTR instances" 41 | default = "100" 42 | } 43 | 44 | # Windows nodes disk 45 | variable "windows_worker_volume_size" { 46 | description = "The volume size in GB for Windows workers" 47 | default = "100" 48 | } 49 | 50 | # AMIs 51 | variable "linux_ami_owner" { 52 | description = "The OwnerID of the Linux AMI (from 'aws ec2 describe-images')" 53 | default = "099720109477" 54 | } 55 | 56 | variable "linux_ami_name" { 57 | description = "Linux instances will use the newest AMI matching this pattern" 58 | default = "ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server-20180306" 59 | } 60 | 61 | variable "windows_ami_owner" { 62 | description = "The OwnerID of the Windows AMI (from 'aws ec2 describe-images')" 63 | default = "801119661308" 64 | } 65 | 66 | variable "windows_ami_name" { 67 | description = "Windows instances will use the newest AMI matching this pattern" 68 | default = "Windows_Server-2016-English-Full-Containers-2017.11.29" 69 | } 70 | -------------------------------------------------------------------------------- /dci-aws/vpc.tf: -------------------------------------------------------------------------------- 1 | # Create the stack VPC 2 | resource "aws_vpc" "docker" { 3 | count = "${local.create_vpc}" 4 | cidr_block = "${var.vpc_cidr}" 5 | enable_dns_support = true 6 | enable_dns_hostnames = true 7 | 8 | tags = { 9 | Name = "${format("%s-vpc", "${var.deployment}")}" 10 | } 11 | } 12 | 13 | locals { 14 | create_vpc = "${length(var.vpc_id) == 0 ? 1 : 0}" 15 | vpc_id = "${local.create_vpc ? join("", aws_vpc.docker.*.id) : var.vpc_id}" 16 | } 17 | 18 | # Create the associated subnet - Need to loop based on Count of AZ 19 | # CIDR block mapping from vars 20 | resource "aws_subnet" "pubsubnet" { 21 | vpc_id = "${local.vpc_id}" 22 | count = "${length("${data.aws_availability_zones.available.names}")}" 23 | 24 | cidr_block = "${cidrsubnet("${var.vpc_cidr}", 4, count.index)}" 25 | map_public_ip_on_launch = true 26 | availability_zone = "${element(data.aws_availability_zones.available.names, count.index)}" 27 | 28 | tags = { 29 | Name = "${format("%s-Subnet-%d", "${var.deployment}", count.index + 1)}" 30 | } 31 | } 32 | 33 | ## Public route table association 34 | resource "aws_route_table_association" "public" { 35 | count = "${length("${data.aws_availability_zones.available.names}") * local.create_vpc}" 36 | subnet_id = "${element(aws_subnet.pubsubnet.*.id, count.index)}" 37 | route_table_id = "${aws_route_table.public_igw.id}" 38 | } 39 | 40 | resource "aws_internet_gateway" "igw" { 41 | vpc_id = "${local.vpc_id}" 42 | count = "${local.create_vpc}" 43 | 44 | tags { 45 | Name = "InternetGateway" 46 | } 47 | } 48 | 49 | resource "aws_route_table" "public_igw" { 50 | vpc_id = "${local.vpc_id}" 51 | count = "${local.create_vpc}" 52 | 53 | route { 54 | cidr_block = "0.0.0.0/0" 55 | gateway_id = "${aws_internet_gateway.igw.id}" 56 | } 57 | 58 | tags { 59 | Name = "${format("%s-rt", "${var.deployment}")}" 60 | } 61 | } 62 | 63 | resource "aws_route" "internet_access" { 64 | count = "${local.create_vpc}" 65 | route_table_id = "${aws_route_table.public_igw.id}" 66 | destination_cidr_block = "0.0.0.0/0" 67 | gateway_id = "${aws_internet_gateway.igw.id}" 68 | 69 | timeouts { 70 | create = "15m" 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /delete-servers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | # delete all docker machines starting with dvc 5 | for server in {1..3}; do 6 | docker-machine rm -y dvc${server} & 7 | done 8 | 9 | # delete all storage in DO (be sure you are ok deleting ALL storage in an account) 10 | # doctl compute volume ls --format ID --no-header | while read -r id; do doctl compute volume rm -f "$id"; done 11 | 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /enable-monitoring.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | # enable monitoring 5 | for server in {1..3}; do 6 | docker-machine scp daemon.json dvc${server}:/etc/docker/ && 7 | docker-machine ssh dvc${server} systemctl restart docker & 8 | done 9 | -------------------------------------------------------------------------------- /generate-some-votes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # this short script will post some HTTP data to the Voting app to generate votes 4 | # this makes for a better demo and if you can see the results then you know the solution works 5 | 6 | # create POST data files with ab friendly formats 7 | python make-data.py 8 | 9 | # create 3000 votes 10 | ab -n 1000 -c 50 -p posta -T "application/x-www-form-urlencoded" http://vote.dogvs.cat/ 11 | ab -n 1000 -c 50 -p postb -T "application/x-www-form-urlencoded" http://vote.dogvs.cat/ 12 | ab -n 1000 -c 50 -p posta -T "application/x-www-form-urlencoded" http://vote.dogvs.cat/ 13 | -------------------------------------------------------------------------------- /ghost.dockerapp: -------------------------------------------------------------------------------- 1 | # This section contains your application metadata. 2 | # Version of the application 3 | version: 0.1.0 4 | # Name of the application 5 | name: ghost 6 | # A short description of the application 7 | description: 8 | # Namespace to use when pushing to a registry. This is typically your Hub username. 9 | namespace: dogvscat 10 | # List of application maintainers with name and email for each 11 | maintainers: 12 | - name: Bret Fisher 13 | email: bret@bretfisher.com 14 | 15 | --- 16 | # This section contains the Compose file that describes your application services. 17 | version: '3.5' 18 | 19 | services: 20 | 21 | ghost: 22 | image: ghost:2-alpine 23 | networks: 24 | - proxy 25 | - ghost 26 | volumes: 27 | - content:/var/lib/ghost/content 28 | environment: 29 | # see https://docs.ghost.org/docs/config#section-running-ghost-with-config-env-variables 30 | NODE_ENV: production 31 | # database__client: mysql 32 | # database__connection__host: db 33 | # database__connection__user: root 34 | # database__connection__password: YOURDBPASSWORDhereee 35 | # database__connection__database: ghost 36 | url: http://ghost.dogvs.cat 37 | deploy: 38 | replicas: 1 39 | labels: 40 | traefik.http.routers.ghost.rule: Host(`ghost.dogvs.cat`) 41 | traefik.http.routers.ghost.entryPoints: websecure 42 | traefik.http.routers.ghost.service: ghost 43 | traefik.http.services.ghost.loadbalancer.server.port: 2368 44 | # logging: 45 | # driver: "gelf" 46 | # options: 47 | # gelf-address: "udp://127.0.0.1:5000" 48 | 49 | # TODO: backup for static content 50 | 51 | 52 | networks: 53 | ghost: {} 54 | proxy: 55 | external: true 56 | 57 | volumes: 58 | content: 59 | driver: rexray/dobs 60 | driver_opts: 61 | size: 1 62 | 63 | 64 | 65 | --- 66 | # This section contains the default values for your application settings. 67 | {} 68 | -------------------------------------------------------------------------------- /hash-config-secret.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | # this is part of a solution to Swarm Stack Configs that change 5 | # TODO: explain how this works in readme 6 | 7 | export LOGSTASH_CONF=$(shasum logstash.conf -a 512 | cut -c1-16) 8 | 9 | -------------------------------------------------------------------------------- /logstash.conf: -------------------------------------------------------------------------------- 1 | input { 2 | gelf { 3 | port => 5000 4 | } 5 | } 6 | 7 | ## Add your filters / logstash plugins configuration here 8 | 9 | output { 10 | elasticsearch { 11 | hosts => "elasticsearch:9200" 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /make-data.py: -------------------------------------------------------------------------------- 1 | # this creates urlencode-friendly files without EOL 2 | import urllib 3 | outfile = open('postb', 'w') 4 | params = ({ 'vote': 'b' }) 5 | encoded = urllib.urlencode(params) 6 | outfile.write(encoded) 7 | outfile.close() 8 | outfile = open('posta', 'w') 9 | params = ({ 'vote': 'a' }) 10 | encoded = urllib.urlencode(params) 11 | outfile.write(encoded) 12 | outfile.close() 13 | 14 | -------------------------------------------------------------------------------- /menu-ee/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nginx:alpine 2 | 3 | COPY index.html /usr/share/nginx/html/index.html 4 | 5 | -------------------------------------------------------------------------------- /menu-ee/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Dog vs. Cat 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 |
15 | 28 | Fork me on GitHub 29 | 30 | 31 | -------------------------------------------------------------------------------- /menu.dockerapp: -------------------------------------------------------------------------------- 1 | # This section contains your application metadata. 2 | # Version of the application 3 | version: 0.1.0 4 | # Name of the application 5 | name: menu 6 | # A short description of the application 7 | description: 8 | # Namespace to use when pushing to a registry. This is typically your Hub username. 9 | namespace: dogvscat 10 | # List of application maintainers with name and email for each 11 | maintainers: 12 | - name: Bret Fisher 13 | email: bret@bretfisher.com 14 | 15 | --- 16 | # This section contains the Compose file that describes your application services. 17 | version: '3.4' 18 | 19 | services: 20 | 21 | menu: 22 | image: bretfisher/dogvscat-menu 23 | build: 24 | context: menu 25 | deploy: 26 | replicas: 3 27 | labels: 28 | traefik.http.routers.dogvs.rule: Host(`www.dogvs.cat`) || Host(`dogvs.cat`) 29 | traefik.http.routers.dogvs.entryPoints: websecure 30 | traefik.http.routers.dogvs.service: dogvs 31 | traefik.http.services.dogvs.loadbalancer.server.port: 80 32 | # logging: 33 | # driver: "gelf" 34 | # options: 35 | # gelf-address: "udp://127.0.0.1:5000" 36 | networks: 37 | - proxy 38 | 39 | networks: 40 | proxy: 41 | external: true 42 | 43 | 44 | --- 45 | # This section contains the default values for your application settings. 46 | {} 47 | -------------------------------------------------------------------------------- /menu/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nginx:alpine 2 | 3 | COPY index.html /usr/share/nginx/html/index.html 4 | 5 | -------------------------------------------------------------------------------- /menu/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Dog vs. Cat 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 |
15 | 37 | Fork me on GitHub 38 | 39 | 40 | -------------------------------------------------------------------------------- /old-stack-proxy.yml: -------------------------------------------------------------------------------- 1 | version: '3.3' 2 | 3 | # this is an alternate proxy setup using haproxy 4 | 5 | services: 6 | 7 | proxy: 8 | image: vfarcic/docker-flow-proxy 9 | ports: 10 | - '80:80' 11 | - '443:443' 12 | # high public port for healthchecks from external LB 13 | # ping url: /v1/docker-flow-proxy/ping 14 | - '9433:8080' 15 | networks: 16 | - proxy 17 | environment: 18 | LISTENER_ADDRESS: "swarm-listener" 19 | MODE: "swarm" 20 | DEBUG: "true" 21 | # secrets: 22 | # - cert-docker-dev 23 | deploy: 24 | mode: global 25 | healthcheck: 26 | test: ["CMD", "wget", "-qO-", "http://localhost:8080/v1/docker-flow-proxy/ping"] 27 | 28 | swarm-listener: 29 | image: vfarcic/docker-flow-swarm-listener 30 | networks: 31 | - proxy 32 | volumes: 33 | - '/var/run/docker.sock:/var/run/docker.sock' 34 | environment: 35 | - >- 36 | DF_NOTIFY_CREATE_SERVICE_URL=http://proxy:8080/v1/docker-flow-proxy/reconfigure 37 | - >- 38 | DF_NOTIFY_REMOVE_SERVICE_URL=http://proxy:8080/v1/docker-flow-proxy/remove 39 | deploy: 40 | placement: 41 | constraints: 42 | - node.role == manager 43 | 44 | networks: 45 | proxy: 46 | external: true 47 | # secrets: 48 | # cert-docker-dev: 49 | # file: ./nginx/dev-full.pem 50 | -------------------------------------------------------------------------------- /posta: -------------------------------------------------------------------------------- 1 | vote=a -------------------------------------------------------------------------------- /postb: -------------------------------------------------------------------------------- 1 | vote=b -------------------------------------------------------------------------------- /secret-ghost-db: -------------------------------------------------------------------------------- 1 | YOURDBPASSWORDhere 2 | -------------------------------------------------------------------------------- /stack-ee-ghost.yml: -------------------------------------------------------------------------------- 1 | version: '3.7' 2 | 3 | x-default-opts: 4 | &default-opts 5 | logging: 6 | options: 7 | max-size: "1m" 8 | # driver: "gelf" 9 | # options: 10 | # gelf-address: "udp://127.0.0.1:5000" 11 | 12 | services: 13 | 14 | ghost: 15 | <<: *default-opts 16 | image: ghost:1-alpine 17 | environment: 18 | # see https://docs.ghost.org/docs/config#section-running-ghost-with-config-env-variables 19 | database__client: mysql 20 | database__connection__host: db 21 | database__connection__user: root 22 | database__connection__password: YOURDBPASSWORDhere 23 | database__connection__database: ghost 24 | url: http://ghost.dogvscat.biz 25 | deploy: 26 | replicas: 1 27 | labels: 28 | com.docker.lb.port: 2368 29 | com.docker.lb.hosts: ghost.dogvscat.biz 30 | 31 | 32 | db: 33 | <<: *default-opts 34 | image: mysql:5.7 35 | volumes: 36 | - db:/var/lib/mysql 37 | secrets: 38 | - ghost-db-password 39 | deploy: 40 | endpoint_mode: dnsrr 41 | environment: 42 | MYSQL_ROOT_PASSWORD_FILE: /run/secrets/ghost-db-password 43 | 44 | db-backup: 45 | <<: *default-opts 46 | image: mysql:5.7 47 | command: sh -c "while true; do /usr/bin/mysqldump -u root --password=$$(< $$MYSQL_ROOT_PASSWORD_FILE) --all-databases --host=db > /backup/backup$$(date +'%H').sql ; sleep 3600; done" 48 | volumes: 49 | - db-backup:/backup 50 | secrets: 51 | - ghost-db-password 52 | environment: 53 | MYSQL_ROOT_PASSWORD_FILE: /run/secrets/ghost-db-password 54 | 55 | #TODO: backup for static content 56 | 57 | 58 | volumes: 59 | db: 60 | driver: cloudstor:latest 61 | driver_opts: 62 | size: 1 63 | db-backup: 64 | driver: cloudstor:latest 65 | driver_opts: 66 | size: 1 67 | 68 | secrets: 69 | ghost-db-password: 70 | external: true 71 | 72 | 73 | 74 | -------------------------------------------------------------------------------- /stack-ee-menu.yml: -------------------------------------------------------------------------------- 1 | version: '3.2' 2 | 3 | services: 4 | 5 | menu: 6 | image: bretfisher/dogvscat-menu:ee 7 | build: 8 | context: menu 9 | networks: 10 | - menu 11 | deploy: 12 | replicas: 3 13 | labels: 14 | com.docker.lb.port: 80 15 | com.docker.lb.hosts: www.dogvscat.biz 16 | com.docker.lb.network: menu 17 | 18 | networks: 19 | menu: {} -------------------------------------------------------------------------------- /stack-ee-voting.yml: -------------------------------------------------------------------------------- 1 | version: '3.2' 2 | 3 | services: 4 | 5 | redis: 6 | image: redis:alpine 7 | command: redis-server --appendonly yes 8 | volumes: 9 | - redis-data:/data 10 | networks: 11 | - frontend 12 | deploy: 13 | endpoint_mode: dnsrr 14 | 15 | db: 16 | image: postgres:9.6 17 | volumes: 18 | - db-data:/var/lib/postgresql/data 19 | networks: 20 | - backend 21 | deploy: 22 | endpoint_mode: dnsrr 23 | 24 | vote: 25 | image: bretfisher/examplevotingapp_vote 26 | networks: 27 | - frontend 28 | deploy: 29 | replicas: 1 30 | labels: 31 | com.docker.lb.port: 80 32 | com.docker.lb.hosts: vote.dogvscat.biz 33 | com.docker.lb.network: frontend 34 | 35 | result: 36 | image: bretfisher/examplevotingapp_result 37 | networks: 38 | - backend 39 | deploy: 40 | labels: 41 | com.docker.lb.port: 80 42 | com.docker.lb.hosts: result.dogvscat.biz 43 | com.docker.lb.network: backend 44 | 45 | worker: 46 | image: bretfisher/examplevotingapp_worker:java 47 | networks: 48 | - frontend 49 | - backend 50 | deploy: 51 | replicas: 1 52 | 53 | 54 | networks: 55 | frontend: {} 56 | backend: {} 57 | 58 | volumes: 59 | db-data: 60 | driver: cloudstor:latest 61 | driver_opts: 62 | size: 1 63 | redis-data: 64 | driver: cloudstor:latest 65 | driver_opts: 66 | size: 1 67 | 68 | -------------------------------------------------------------------------------- /stack-elk.override.yml: -------------------------------------------------------------------------------- 1 | version: '3.3' 2 | 3 | services: 4 | logstash: 5 | ports: 6 | - "12201:12201/udp" 7 | 8 | configs: 9 | logstash_pipeline: 10 | file: logstash.conf 11 | -------------------------------------------------------------------------------- /stack-elk.yml: -------------------------------------------------------------------------------- 1 | version: '3.7' 2 | 3 | x-default-opts: 4 | &default-opts 5 | logging: 6 | options: 7 | max-size: "1m" 8 | # driver: "gelf" 9 | # options: 10 | # gelf-address: "udp://127.0.0.1:5000" 11 | 12 | services: 13 | 14 | elasticsearch: 15 | <<: *default-opts 16 | image: docker.elastic.co/elasticsearch/elasticsearch-oss:6.4.0 17 | configs: 18 | - source: elastic_config 19 | target: /usr/share/elasticsearch/config/elasticsearch.yml 20 | environment: 21 | ES_JAVA_OPTS: "-Xmx256m -Xms256m" 22 | networks: 23 | - elk 24 | volumes: 25 | - elasticsearch:/usr/share/elasticsearch/data 26 | deploy: 27 | mode: replicated 28 | replicas: 1 29 | 30 | logstash: 31 | <<: *default-opts 32 | image: docker.elastic.co/logstash/logstash-oss:6.4.0 33 | ports: 34 | - "5000:5000" 35 | # - "5000:5000/udp" 36 | - "9600:9600" 37 | configs: 38 | - source: logstash_config 39 | target: /usr/share/logstash/config/logstash.yml 40 | - source: logstash_pipeline 41 | target: /usr/share/logstash/pipeline/logstash.conf 42 | environment: 43 | LS_JAVA_OPTS: "-Xmx256m -Xms256m" 44 | networks: 45 | - elk 46 | deploy: 47 | mode: replicated 48 | replicas: 1 49 | 50 | kibana: 51 | <<: *default-opts 52 | image: docker.elastic.co/kibana/kibana-oss:6.4.0 53 | configs: 54 | - source: kibana_config 55 | target: /usr/share/kibana/config/kibana.yml 56 | networks: 57 | - elk 58 | - proxy 59 | deploy: 60 | mode: replicated 61 | replicas: 1 62 | labels: 63 | traefik.http.routers.kibana.rule: Host(`kibana.dogvs.cat`) 64 | traefik.http.routers.kibana.entryPoints: websecure 65 | traefik.http.routers.kibana.service: kibana 66 | traefik.http.routers.kibana.middlewares: kibana-auth 67 | traefik.http.services.kibana.loadbalancer.server.port: 5601 68 | traefik.http.middlewares.kibana-auth.basicauth.users: ${KIBANA_USER:-admin}:${KIBANA_PASSWORD:-$2y$05$oZcj4FgdSFEYNCH0EH/3Vu7Hp72gkvEadHzMsDhGFkbvS/ZrKMKfK} 69 | # TODO: make secrets secret 70 | 71 | configs: 72 | 73 | elastic_config: 74 | file: ./docker-elk/elasticsearch/config/elasticsearch.yml 75 | logstash_config: 76 | file: ./docker-elk/logstash/config/logstash.yml 77 | logstash_pipeline: 78 | file: ./logstash.conf 79 | kibana_config: 80 | file: ./docker-elk/kibana/config/kibana.yml 81 | 82 | networks: 83 | elk: 84 | proxy: 85 | external: true 86 | 87 | volumes: 88 | elasticsearch: 89 | driver: rexray/dobs 90 | driver_opts: 91 | size: 10 92 | -------------------------------------------------------------------------------- /stack-ghost.yml: -------------------------------------------------------------------------------- 1 | version: '3.7' 2 | 3 | x-default-opts: 4 | &default-opts 5 | logging: 6 | options: 7 | max-size: "1m" 8 | # driver: "gelf" 9 | # options: 10 | # gelf-address: "udp://127.0.0.1:5000" 11 | 12 | services: 13 | 14 | ghost: 15 | <<: *default-opts 16 | image: ghost:alpine 17 | networks: 18 | - proxy 19 | - ghost 20 | environment: 21 | # see https://docs.ghost.org/docs/config#section-running-ghost-with-config-env-variables 22 | database__client: mysql 23 | database__connection__host: db 24 | database__connection__user: root 25 | database__connection__password: YOURDBPASSWORDhere 26 | database__connection__database: ghost 27 | url: http://ghost.dogvs.cat 28 | deploy: 29 | replicas: 1 30 | labels: 31 | traefik.http.routers.ghost.rule: Host(`ghost.dogvs.cat`) 32 | traefik.http.routers.ghost.entryPoints: websecure 33 | traefik.http.routers.ghost.service: ghost 34 | traefik.http.services.ghost.loadbalancer.server.port: 2368 35 | 36 | db: 37 | <<: *default-opts 38 | image: mysql:5.7 39 | volumes: 40 | - db:/var/lib/mysql 41 | secrets: 42 | - db-password 43 | networks: 44 | - ghost 45 | deploy: 46 | endpoint_mode: dnsrr 47 | environment: 48 | MYSQL_ROOT_PASSWORD_FILE: /run/secrets/db-password 49 | 50 | # NOTE: table locking is a concern. Maybe use --single-transaction=TRUE for innoDB 51 | db-backup: 52 | <<: *default-opts 53 | image: mysql:5.7 54 | command: sh -c "while true; do /usr/bin/mysqldump -u root --password=$$(< $$MYSQL_ROOT_PASSWORD_FILE) --all-databases --host=db > /backup/backup$$(date +'%H').sql ; sleep 3600; done" 55 | volumes: 56 | - db-backup:/backup 57 | secrets: 58 | - db-password 59 | networks: 60 | - ghost 61 | environment: 62 | MYSQL_ROOT_PASSWORD_FILE: /run/secrets/db-password 63 | deploy: 64 | restart_policy: 65 | delay: 600s 66 | 67 | #TODO: backup for static content 68 | 69 | 70 | networks: 71 | ghost: {} 72 | proxy: 73 | external: true 74 | 75 | volumes: 76 | db: 77 | driver: rexray/dobs 78 | driver_opts: 79 | size: 1 80 | db-backup: 81 | driver: rexray/dobs 82 | driver_opts: 83 | size: 1 84 | 85 | secrets: 86 | db-password: 87 | file: ./secret-ghost-db 88 | name: db-password 89 | 90 | 91 | 92 | -------------------------------------------------------------------------------- /stack-menu.yml: -------------------------------------------------------------------------------- 1 | version: '3.7' 2 | 3 | x-default-opts: 4 | &default-opts 5 | logging: 6 | options: 7 | max-size: "1m" 8 | # driver: "gelf" 9 | # options: 10 | # gelf-address: "udp://127.0.0.1:5000" 11 | 12 | services: 13 | 14 | menu: 15 | <<: *default-opts 16 | image: dogvscat/menu 17 | build: 18 | context: menu 19 | deploy: 20 | replicas: 3 21 | labels: 22 | traefik.http.routers.dogvs.rule: Host(`www.dogvs.cat`) || Host(`dogvs.cat`) 23 | traefik.http.routers.dogvs.entryPoints: websecure 24 | traefik.http.routers.dogvs.service: dogvs 25 | traefik.http.services.dogvs.loadbalancer.server.port: 80 26 | networks: 27 | - proxy 28 | 29 | networks: 30 | proxy: 31 | external: true 32 | 33 | -------------------------------------------------------------------------------- /stack-portainer.yml: -------------------------------------------------------------------------------- 1 | version: "3.7" 2 | 3 | x-default-opts: 4 | &default-opts 5 | logging: 6 | options: 7 | max-size: "1m" 8 | # driver: "gelf" 9 | # options: 10 | # gelf-address: "udp://127.0.0.1:5000" 11 | 12 | services: 13 | web: 14 | <<: *default-opts 15 | image: portainer/portainer 16 | # command: -H "tcp://tasks.agent:9001" 17 | deploy: 18 | replicas: 1 19 | # placement: 20 | # constraints: [node.role == manager] 21 | # place on managers when not using agent, place on workers if using agent 22 | labels: 23 | traefik.http.routers.portainer.rule: Host(`portainer.dogvs.cat`) 24 | traefik.http.routers.dogvs.entryPoints: websecure 25 | traefik.http.routers.portainer.service: portainer 26 | traefik.http.services.portainer.loadbalancer.server.port: 9000 27 | networks: 28 | - proxy 29 | - portainer 30 | volumes: 31 | - web:/data 32 | agent: 33 | <<: *default-opts 34 | image: portainer/agent 35 | networks: 36 | - portainer 37 | volumes: 38 | - /var/run/docker.sock:/var/run/docker.sock 39 | deploy: 40 | mode: global 41 | environment: 42 | AGENT_CLUSTER_ADDR: tasks.agent 43 | 44 | networks: 45 | proxy: 46 | external: true 47 | portainer: {} 48 | 49 | 50 | volumes: 51 | web: 52 | driver: rexray/dobs 53 | driver_opts: 54 | size: 1 55 | -------------------------------------------------------------------------------- /stack-proxy.yml: -------------------------------------------------------------------------------- 1 | # simple proxy example using traefik single container 2 | # for a more full-featured solution with multi-node proxy, 3 | # HA, consul backend, and more, see stack-proxy-global.yml 4 | 5 | version: '3.7' 6 | 7 | x-default-opts: 8 | &default-opts 9 | logging: 10 | options: 11 | max-size: "1m" 12 | # driver: "gelf" 13 | # options: 14 | # gelf-address: "udp://127.0.0.1:5000" 15 | 16 | services: 17 | 18 | traefik: 19 | <<: *default-opts 20 | image: traefik:2.2 21 | networks: 22 | - proxy 23 | volumes: 24 | - acme:/etc/traefik/acme 25 | - /var/run/docker.sock:/var/run/docker.sock 26 | ports: 27 | - "80:80" 28 | - "443:443" 29 | command: 30 | # Use your favourite settings here, but add: 31 | - --providers.docker 32 | - --providers.docker.swarmMode 33 | - --providers.docker.network=proxy 34 | - --providers.docker.watch 35 | - --api 36 | - --entryPoints.web.address=:80 37 | - --entryPoints.websecure.address=:443 38 | - --entrypoints.websecure.http.tls=true 39 | # - --entrypoints.web.http.redirections.entryPoint.to=websecure # force HTTPS 40 | # - --entrypoints.websecure.http.tls.certresolver=default 41 | ## optional LetsEncrypt settings 42 | # - --certificatesResolvers.default.acme.email=${TRAEFIK_ACME_EMAIL} 43 | # - --certificatesResolvers.default.acme.storage=/etc/traefik/acme/acme.json 44 | # - --certificatesResolvers.default.acme.tlsChallenge=true 45 | # - --certificatesResolvers.default.acme.caServer=https://acme-staging-v02.api.letsencrypt.org/directory 46 | # - --certificatesResolvers.default.acme.caServer=https://acme-v02.api.letsencrypt.org/directory 47 | deploy: 48 | placement: 49 | constraints: [node.role == manager] 50 | # Dynamic Configuration 51 | labels: 52 | traefik.http.routers.api.rule: Host(`traefik.dogvs.cat`) 53 | traefik.http.routers.api.entryPoints: websecure 54 | traefik.http.routers.api.service: api@internal 55 | traefik.http.routers.api.middlewares: auth 56 | traefik.http.services.dummy.loadbalancer.server.port: 4242 57 | # sample users/password test/test and test2/test2 58 | traefik.http.middlewares.auth.basicauth.users: test:$$apr1$$H6uskkkW$$IgXLP6ewTrSuBkTrqE8wj/,test2:$$apr1$$d9hr9HBB$$4HxwgUir3HP4EsggP/QNo0 59 | 60 | volumes: 61 | acme: 62 | driver: rexray/dobs 63 | driver_opts: 64 | size: 1 65 | 66 | networks: 67 | proxy: 68 | driver: overlay 69 | name: proxy 70 | -------------------------------------------------------------------------------- /stack-prune.yml: -------------------------------------------------------------------------------- 1 | version: '3.7' 2 | 3 | x-default-opts: 4 | &default-opts 5 | logging: 6 | options: 7 | max-size: "1m" 8 | # driver: "gelf" 9 | # options: 10 | # gelf-address: "udp://127.0.0.1:5000" 11 | 12 | services: 13 | image-prune: 14 | <<: *default-opts 15 | image: docker 16 | command: sh -c "while true; do docker image prune -af --filter \"until=24h\"; sleep 86400; done" 17 | networks: 18 | - bridge 19 | volumes: 20 | - /var/run/docker.sock:/var/run/docker.sock 21 | deploy: 22 | mode: global 23 | 24 | networks: 25 | bridge: 26 | external: true 27 | name: bridge 28 | -------------------------------------------------------------------------------- /stack-rexray.yml: -------------------------------------------------------------------------------- 1 | version: "3.7" 2 | 3 | # 2. TODO: would prefer on-failure restart_policy, but need to run this in script to look 4 | # for if plugin exists first before reinstalling 5 | # 3. TODO: would prefer this picks a driver version, and support driver updates 6 | 7 | x-default-opts: 8 | &default-opts 9 | logging: 10 | options: 11 | max-size: "1m" 12 | # driver: "gelf" 13 | # options: 14 | # gelf-address: "udp://127.0.0.1:5000" 15 | 16 | services: 17 | plugin-rexray: 18 | <<: *default-opts 19 | image: mavenugo/swarm-exec:17.03.0-ce 20 | volumes: 21 | - /var/run/docker.sock:/var/run/docker.sock 22 | secrets: 23 | - rexray_do_token 24 | environment: 25 | - REXRAY_DO_TOKEN_FILE=/run/secrets/rexray_do_token 26 | command: sh -c "docker plugin install --grant-all-permissions rexray/dobs DOBS_REGION=nyc3 DOBS_TOKEN=$$(cat $$REXRAY_DO_TOKEN_FILE) DOBS_CONVERTUNDERSCORES=true" 27 | deploy: 28 | mode: global 29 | restart_policy: 30 | condition: none 31 | 32 | secrets: 33 | rexray_do_token: 34 | external: true 35 | name: rexray_do_token 36 | 37 | -------------------------------------------------------------------------------- /stack-sqlite-ghost.yml: -------------------------------------------------------------------------------- 1 | version: '3.7' 2 | 3 | x-default-opts: 4 | &default-opts 5 | logging: 6 | options: 7 | max-size: "1m" 8 | # driver: "gelf" 9 | # options: 10 | # gelf-address: "udp://127.0.0.1:5000" 11 | 12 | services: 13 | 14 | ghost: 15 | <<: *default-opts 16 | image: ghost:2-alpine 17 | networks: 18 | - proxy 19 | - ghost 20 | volumes: 21 | - content:/var/lib/ghost/content 22 | environment: 23 | # see https://docs.ghost.org/docs/config#section-running-ghost-with-config-env-variables 24 | NODE_ENV: production 25 | # database__client: mysql 26 | # database__connection__host: db 27 | # database__connection__user: root 28 | # database__connection__password: YOURDBPASSWORDhereee 29 | # database__connection__database: ghost 30 | url: http://ghost.dogvs.cat 31 | deploy: 32 | replicas: 1 33 | labels: 34 | traefik.http.routers.ghost.rule: Host(`ghost.dogvs.cat`) 35 | traefik.http.routers.ghost.entryPoints: websecure 36 | traefik.http.routers.ghost.service: ghost 37 | traefik.http.services.ghost.loadbalancer.server.port: 2368 38 | 39 | # TODO: backup for static content 40 | 41 | 42 | networks: 43 | ghost: {} 44 | proxy: 45 | external: true 46 | 47 | volumes: 48 | content: 49 | driver: rexray/dobs 50 | driver_opts: 51 | size: 1 52 | 53 | 54 | -------------------------------------------------------------------------------- /stack-visualizer.yml: -------------------------------------------------------------------------------- 1 | version: '3.7' 2 | 3 | x-default-opts: 4 | &default-opts 5 | logging: 6 | options: 7 | max-size: "1m" 8 | # driver: "gelf" 9 | # options: 10 | # gelf-address: "udp://127.0.0.1:5000" 11 | 12 | services: 13 | 14 | viz: 15 | <<: *default-opts 16 | image: dockersamples/visualizer 17 | deploy: 18 | replicas: 1 19 | placement: 20 | constraints: [node.role == manager] 21 | volumes: 22 | - /var/run/docker.sock:/var/run/docker.sock 23 | ports: 24 | - 4040:8080 25 | 26 | -------------------------------------------------------------------------------- /stack-voting.yml: -------------------------------------------------------------------------------- 1 | version: '3.7' 2 | 3 | x-default-opts: 4 | &default-opts 5 | logging: 6 | options: 7 | max-size: "1m" 8 | # driver: "gelf" 9 | # options: 10 | # gelf-address: "udp://127.0.0.1:5000" 11 | 12 | services: 13 | 14 | redis: 15 | <<: *default-opts 16 | image: redis:alpine 17 | command: redis-server --appendonly yes 18 | volumes: 19 | - redis-data:/data 20 | networks: 21 | - frontend 22 | deploy: 23 | endpoint_mode: dnsrr 24 | 25 | db: 26 | <<: *default-opts 27 | image: postgres:9.6 28 | volumes: 29 | - db-data:/var/lib/postgresql/data 30 | networks: 31 | - backend 32 | deploy: 33 | endpoint_mode: dnsrr 34 | 35 | vote: 36 | <<: *default-opts 37 | image: bretfisher/examplevotingapp_vote 38 | networks: 39 | - frontend 40 | - proxy 41 | deploy: 42 | replicas: 2 43 | labels: 44 | traefik.http.routers.vote.rule: Host(`vote.dogvs.cat`) 45 | traefik.http.routers.vote.entryPoints: websecure 46 | traefik.http.routers.vote.service: vote 47 | traefik.http.services.vote.loadbalancer.server.port: 80 48 | 49 | result: 50 | <<: *default-opts 51 | image: bretfisher/examplevotingapp_result 52 | networks: 53 | - backend 54 | - proxy 55 | deploy: 56 | replicas: 2 57 | labels: 58 | traefik.http.routers.result.rule: Host(`result.dogvs.cat`) 59 | traefik.http.routers.result.entryPoints: websecure 60 | traefik.http.routers.result.service: result 61 | traefik.http.services.result.loadbalancer.server.port: 80 62 | traefik.http.services.result.loadbalancer.sticky: "true" 63 | 64 | worker: 65 | <<: *default-opts 66 | image: bretfisher/examplevotingapp_worker:java 67 | networks: 68 | - frontend 69 | - backend 70 | deploy: 71 | replicas: 2 72 | labels: 73 | traefik.enable: "true" 74 | 75 | networks: 76 | frontend: {} 77 | backend: {} 78 | proxy: 79 | external: true 80 | 81 | volumes: 82 | db-data: 83 | driver: ${DOCKER_VOL_DRIVER:-local} 84 | # for example set DOCKER_VOL_DRIVER="rexray/dobs" 85 | driver_opts: 86 | size: 1 87 | redis-data: 88 | driver: ${DOCKER_VOL_DRIVER:-local} 89 | # for example set DOCKER_VOL_DRIVER="rexray/dobs" 90 | driver_opts: 91 | size: 1 92 | 93 | -------------------------------------------------------------------------------- /voting.dockerapp: -------------------------------------------------------------------------------- 1 | # This section contains your application metadata. 2 | # Version of the application 3 | version: 0.1.0 4 | # Name of the application 5 | name: voting 6 | # A short description of the application 7 | description: 8 | # Namespace to use when pushing to a registry. This is typically your Hub username. 9 | namespace: dogvscat 10 | # List of application maintainers with name and email for each 11 | maintainers: 12 | - name: Bret Fisher 13 | email: bret@bretfisher.com 14 | 15 | --- 16 | # This section contains the Compose file that describes your application services. 17 | version: '3.7' 18 | 19 | services: 20 | 21 | redis: 22 | image: redis:alpine 23 | command: redis-server --appendonly yes 24 | volumes: 25 | - redis-data:/data 26 | networks: 27 | - frontend 28 | deploy: 29 | endpoint_mode: dnsrr 30 | # logging: 31 | # driver: "gelf" 32 | # options: 33 | # gelf-address: "udp://127.0.0.1:5000" 34 | 35 | db: 36 | image: postgres:9.6 37 | volumes: 38 | - db-data:/var/lib/postgresql/data 39 | networks: 40 | - backend 41 | deploy: 42 | endpoint_mode: dnsrr 43 | # logging: 44 | # driver: "gelf" 45 | # options: 46 | # gelf-address: "udp://127.0.0.1:5000" 47 | 48 | vote: 49 | image: bretfisher/examplevotingapp_vote 50 | networks: 51 | - frontend 52 | - proxy 53 | deploy: 54 | replicas: 2 55 | labels: 56 | traefik.http.routers.vote.rule: Host(`vote.dogvs.cat`) 57 | traefik.http.routers.vote.entryPoints: websecure 58 | traefik.http.routers.vote.service: vote 59 | traefik.http.services.vote.loadbalancer.server.port: 80 60 | # logging: 61 | # driver: "gelf" 62 | # options: 63 | # gelf-address: "udp://127.0.0.1:5000" 64 | 65 | result: 66 | image: bretfisher/examplevotingapp_result 67 | networks: 68 | - backend 69 | - proxy 70 | deploy: 71 | replicas: 2 72 | labels: 73 | traefik.http.routers.result.rule: Host(`result.dogvs.cat`) 74 | traefik.http.routers.result.entryPoints: websecure 75 | traefik.http.routers.result.service: result 76 | traefik.http.services.result.loadbalancer.server.port: 80 77 | traefik.http.services.result.loadbalancer.sticky: "true" 78 | # logging: 79 | # driver: "gelf" 80 | # options: 81 | # gelf-address: "udp://127.0.0.1:5000" 82 | 83 | worker: 84 | image: bretfisher/examplevotingapp_worker:java 85 | networks: 86 | - frontend 87 | - backend 88 | deploy: 89 | replicas: 2 90 | # logging: 91 | # driver: "gelf" 92 | # options: 93 | # gelf-address: "udp://127.0.0.1:5000" 94 | 95 | networks: 96 | frontend: {} 97 | backend: {} 98 | proxy: 99 | external: true 100 | 101 | volumes: 102 | db-data: 103 | driver: rexray/dobs 104 | driver_opts: 105 | size: 1 106 | redis-data: 107 | driver: rexray/dobs 108 | driver_opts: 109 | size: 1 110 | 111 | 112 | --- 113 | # This section contains the default values for your application settings. 114 | {} 115 | --------------------------------------------------------------------------------