├── .gitignore ├── Vagrantfile ├── ansible.cfg ├── demo.yml ├── provision ├── cleanup.retry ├── cleanup.yml ├── configure_common.retry ├── configure_common.yml ├── configure_develop.yml ├── configure_host.retry ├── configure_host.yml ├── configure_masters.retry ├── configure_masters.yml ├── configure_repos.yml ├── configure_slaves.retry ├── configure_slaves.yml ├── configure_stack.yml ├── development ├── development.retry ├── files │ ├── front-proxy-ca.csr │ ├── front-proxy-client.csr │ ├── reg.dev.twleansw.com.crt │ ├── thoughtworks.csr │ └── vault.j2 ├── group_vars │ ├── all │ │ ├── rancher-catalog.yml │ │ ├── rancher-mysql.yml │ │ ├── rancher-nginx.yml │ │ ├── rancher-security.yml │ │ ├── rancher.yml │ │ └── vault.yml │ ├── master.yml │ ├── repository.yml │ └── slaves.yml ├── init_ansible.yml ├── init_certs.yml ├── init_vars.yml ├── library │ ├── dnspod.py │ ├── kube.py │ ├── kubectl.py │ ├── marathon.py │ ├── nexus.py │ ├── nsupdate.py │ ├── rancher.py │ ├── rancher_compose.py │ └── xml.py ├── roles │ ├── artifactory │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── common │ │ ├── defaults │ │ │ └── main.yml │ │ ├── files │ │ │ └── nsenter │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── additional.yml │ │ │ ├── config.yml │ │ │ ├── config │ │ │ │ ├── debian.yml │ │ │ │ ├── redhat.yml │ │ │ │ └── suse.yml │ │ │ ├── install.yml │ │ │ ├── install │ │ │ │ ├── debian.yml │ │ │ │ ├── redhat.yml │ │ │ │ └── suse.yml │ │ │ └── main.yml │ │ ├── templates │ │ │ └── hosts.j2 │ │ └── vars │ │ │ ├── debian.yml │ │ │ ├── redhat.yml │ │ │ └── suse.yml │ ├── develop │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ ├── bower.j2 │ │ │ ├── gradle.j2 │ │ │ ├── maven.j2 │ │ │ └── npm.j2 │ ├── docker │ │ ├── defaults │ │ │ └── main.yml │ │ ├── files │ │ │ ├── check-docker.sh │ │ │ ├── reg.dev.twleansw.com.crt │ │ │ └── suse │ │ │ │ ├── containerd-0.2.4+git0366d7e-28.1.x86_64.rpm │ │ │ │ ├── docker-1.12.3-158.1.x86_64.rpm │ │ │ │ ├── docker-client-1.7.1-5.2.x86_64.rpm │ │ │ │ ├── docker-image-migrator-1.0.2-14.2.x86_64.rpm │ │ │ │ ├── docker.key │ │ │ │ ├── python-backports.ssl_match_hostname-3.5.0.1-1.1.noarch.rpm │ │ │ │ ├── python-docker-py-1.10.4-7.1.noarch.rpm │ │ │ │ ├── python-docker-pycreds-0.2.1-5.1.noarch.rpm │ │ │ │ ├── python-ipaddress-1.0.16-2.1.noarch.rpm │ │ │ │ ├── python-requests-2.9.1-1.1.noarch.rpm │ │ │ │ ├── python-setuptools-20.2.2-2.1.noarch.rpm │ │ │ │ ├── python-six-1.10.0-1.1.noarch.rpm │ │ │ │ ├── python-websocket-client-0.32.0-3.1.noarch.rpm │ │ │ │ └── runc-0.1.1+git02f8fa7-21.1.x86_64.rpm │ │ ├── handlers │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── additional.yml │ │ │ ├── config.yml │ │ │ ├── config │ │ │ │ ├── debian.yml │ │ │ │ ├── redhat.yml │ │ │ │ └── suse.yml │ │ │ ├── install.yml │ │ │ ├── install │ │ │ │ ├── debian.yml │ │ │ │ ├── redhat.yml │ │ │ │ └── suse.yml │ │ │ ├── main.yml │ │ │ ├── manage.yml │ │ │ ├── service.yml │ │ │ └── swarm.yml │ │ ├── templates │ │ │ ├── config.json.j2 │ │ │ ├── debian │ │ │ │ ├── default │ │ │ │ │ └── docker.j2 │ │ │ │ ├── init │ │ │ │ │ └── docker.conf.j2 │ │ │ │ └── systemd │ │ │ │ │ └── docker-systemd-service.j2 │ │ │ ├── redhat │ │ │ │ ├── sysconfig │ │ │ │ │ └── docker.j2 │ │ │ │ └── systemd │ │ │ │ │ └── docker.conf.j2 │ │ │ └── suse │ │ │ │ ├── default │ │ │ │ └── docker.j2 │ │ │ │ └── sysconfig │ │ │ │ └── docker.j2 │ │ └── vars │ │ │ ├── centos.yml │ │ │ ├── debian.yml │ │ │ ├── main.yml │ │ │ ├── redhat.yml │ │ │ ├── suse.yml │ │ │ └── ubuntu.yml │ ├── etcd │ │ ├── defaults │ │ │ └── main.yml │ │ ├── files │ │ │ └── vault.sh │ │ ├── handlers │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── gogs │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ ├── app.ini.j2 │ │ │ └── gogs.j2 │ ├── kubernetes-agent │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── certs.yml │ │ │ ├── config.yml │ │ │ ├── install.yml │ │ │ └── main.yml │ │ └── templates │ │ │ ├── kubeadm-init.yml │ │ │ └── ssl │ │ │ ├── apiserver-kubelet-client.cnf │ │ │ ├── apiserver.cnf │ │ │ ├── ca-config.json │ │ │ ├── front-proxy-ca.cnf │ │ │ └── front-proxy-client.cnf │ ├── kubernetes-balancing │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ └── kube-balancing.cfg │ ├── kubernetes │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── certs.yml │ │ │ ├── config.yml │ │ │ ├── install.yml │ │ │ └── main.yml │ │ └── templates │ │ │ ├── kube-haproxy.cfg │ │ │ ├── kube-init.yml │ │ │ └── ssl │ │ │ ├── apiserver-kubelet-client.cnf │ │ │ ├── apiserver.cnf │ │ │ ├── ca-config.json │ │ │ ├── front-proxy-ca.cnf │ │ │ └── front-proxy-client.cnf │ ├── ldap │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── mysql │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── nexus │ │ ├── defaults │ │ │ └── main.yml │ │ ├── files │ │ │ ├── groovy │ │ │ │ ├── admin.groovy │ │ │ │ ├── initialize.groovy │ │ │ │ ├── privileges.groovy │ │ │ │ ├── repositories.groovy │ │ │ │ ├── roles.groovy │ │ │ │ ├── setup_anonymous.groovy │ │ │ │ ├── setup_base_url.groovy │ │ │ │ ├── setup_capability.groovy │ │ │ │ ├── setup_ldap.groovy │ │ │ │ └── users.groovy │ │ │ └── provision.groovy │ │ ├── handlers │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ └── tasks │ │ │ ├── config.yml │ │ │ ├── config │ │ │ ├── api.yml │ │ │ ├── initialize.yml │ │ │ ├── privileges.yml │ │ │ ├── repositories.yml │ │ │ ├── roles.yml │ │ │ └── users.yml │ │ │ ├── domain.yml │ │ │ ├── install.yml │ │ │ └── main.yml │ ├── nginx │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ ├── nginx.conf.j2 │ │ │ └── vhost.conf.j2 │ ├── rancher-agent │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ ├── default │ │ │ └── docker.j2 │ │ │ ├── init │ │ │ └── docker.conf.j2 │ │ │ ├── portworx.j2 │ │ │ └── project.json.j2 │ ├── rancher-balancing │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ └── kube-balancing.cfg │ ├── rancher-stack │ │ ├── defaults │ │ │ └── main.yml │ │ ├── files │ │ │ └── jenkins.txt │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ ├── jenkins.j2 │ │ │ ├── jenkins │ │ │ ├── docker-compose.yml │ │ │ └── rancher-compose.yml │ │ │ └── portworx.j2 │ ├── rancher │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── config.yml │ │ │ ├── install.yml │ │ │ └── main.yml │ │ └── templates │ │ │ ├── apikey.json.j2 │ │ │ ├── catalog.json.j2 │ │ │ ├── openldap.json.j2 │ │ │ └── user.json.j2 │ └── vault │ │ ├── defaults │ │ └── main.yml │ │ ├── files │ │ └── vault.sh │ │ ├── handlers │ │ └── main.yml │ │ ├── meta │ │ └── main.yml │ │ └── tasks │ │ └── main.yml ├── scaleworks ├── scaleworks.retry └── test.yml └── thoughtworks.csr /.gitignore: -------------------------------------------------------------------------------- 1 | /.bundle 2 | .vscode 3 | .vagrant 4 | .DS_Store 5 | /db/*.sqlite3 6 | /log/*.log 7 | /tmp 8 | /public/system 9 | .vagrant 10 | .sass-cache 11 | .idea/* 12 | *.key 13 | *.pem 14 | *.crt 15 | *.retry 16 | tags 17 | coverage 18 | /db/schema.rb 19 | /db/projects 20 | /db/gab.yaml 21 | -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # Vagrantfile API/syntax version. Don't touch unless you know what you're doing! 5 | VAGRANTFILE_API_VERSION = "2" 6 | 7 | # cluster configure 8 | cluster = { 9 | "master-1" => { :ip => "10.245.6.2", :cpus => 2, :memory => 4096 }, 10 | "slave-1" => { :ip => "10.245.6.3", :cpus => 1, :memory => 2048 }, 11 | "slave-2" => { :ip => "10.245.6.4", :cpus => 1, :memory => 2048 }, 12 | } 13 | 14 | Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| 15 | ############################################################################### 16 | # Global plugin settings # 17 | ############################################################################### 18 | if Vagrant.has_plugin?("vagrant-cachier") 19 | config.cache.scope = :box 20 | config.cache.auto_detect = true 21 | config.cache.synced_folder_opts = { 22 | type: :nfs, 23 | mount_options: ['rw', 'vers=3', 'tcp', 'nolock'] 24 | } 25 | end 26 | if Vagrant.has_plugin?("vagrant-vbguest") 27 | config.vbguest.auto_update = false 28 | config.vbguest.no_remote = true 29 | end 30 | if Vagrant.has_plugin?("vagrant-hostmanager") 31 | config.hostmanager.enabled = false 32 | config.hostmanager.manage_host = true 33 | config.hostmanager.ignore_private_ip = false 34 | end 35 | 36 | # box 37 | config.vm.box = "debian/jessie64" 38 | #config.vm.box = "ubuntu/trusty64" 39 | #config.vm.box = "opensuse/openSUSE-42.1-x86_64" 40 | #config.vm.box = "trueability/sles-12-sp1" 41 | #config.vm.box = "centos/7" 42 | config.vm.box_check_update = false 43 | # ssh 44 | config.ssh.username = 'vagrant' 45 | config.ssh.insert_key = false 46 | config.ssh.forward_agent = true 47 | config.ssh.private_key_path = ["#{ENV['HOME']}/.ssh/id_rsa", "#{ENV['HOME']}/.vagrant.d/insecure_private_key"] 48 | ## synced folders 49 | config.vm.synced_folder ".", "/vagrant", disabled: true 50 | 51 | 52 | cluster.each_with_index do |(hostname, info), index| 53 | config.vm.define hostname do |cfg| 54 | 55 | # virtualbox 56 | cfg.vm.provider :virtualbox do |vb| 57 | vb.name = "#{hostname}" 58 | vb.cpus = info[:cpus] 59 | vb.memory = info[:memory] 60 | vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"] 61 | end 62 | 63 | # network 64 | cfg.vm.network :private_network, ip: "#{info[:ip]}", nictype: "virtio" 65 | 66 | # provision 67 | if index == cluster.size - 1 68 | cfg.vm.provision "ansible" do |ansible| 69 | ansible.limit = "all" 70 | ansible.playbook = "provision/development.yml" 71 | ansible.inventory_path = "provision/development" 72 | end 73 | end 74 | end 75 | end 76 | end 77 | -------------------------------------------------------------------------------- /demo.yml: -------------------------------------------------------------------------------- 1 | TfAUTVH7xsrKioHkAokYvx1yAEk57Fyepa6d5G5r -------------------------------------------------------------------------------- /provision/cleanup.retry: -------------------------------------------------------------------------------- 1 | toc-deliflow02 2 | toc-deliflow03 3 | toc-deliflow04 4 | -------------------------------------------------------------------------------- /provision/cleanup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # ansible-playbook -e "RANCHER_PROJECT_NAME=kube" -i provision/scaleworks provision/cleanup.yml 3 | - hosts: "{{ RANCHER_PROJECT_NAME }}" 4 | gather_facts: yes 5 | tasks: 6 | - name: stop all docker containers 7 | shell: docker stop $(docker ps -a -q) 8 | ignore_errors: true 9 | become: yes 10 | - name: remove all docker containers 11 | shell: docker rm $(docker ps -a -q) 12 | ignore_errors: true 13 | become: yes 14 | - name: remove all docker images 15 | shell: docker rmi -f $(docker images -q) 16 | ignore_errors: true 17 | become: yes 18 | - name: remove all volumes 19 | shell: docker volume rm $(docker volume ls -qf dangling=true) 20 | ignore_errors: true 21 | become: yes 22 | - name: stop docker 23 | shell: service docker stop 24 | ignore_errors: true 25 | become: yes 26 | - name: remove all volumes 27 | shell: rm -rf /var/lib/docker && rm -rf /data/lib/* 28 | ignore_errors: true 29 | become: yes 30 | -------------------------------------------------------------------------------- /provision/configure_common.retry: -------------------------------------------------------------------------------- 1 | toc-common01 2 | -------------------------------------------------------------------------------- /provision/configure_common.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # ansible-playbook -i provision/scaleworks provision/configure_common.yml 3 | - hosts: common 4 | become: false 5 | gather_facts: yes 6 | vars: 7 | certs_data_dir: "{{ inventory_dir }}/files" 8 | nginx_vhosts: 9 | - listen: "8080" 10 | server_name: "localhost" 11 | ssl: false 12 | extra_parameters: | 13 | location / { 14 | proxy_set_header Host $host; 15 | proxy_set_header X-Forwarded-Proto $scheme; 16 | proxy_set_header X-Forwarded-Port $server_port; 17 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 18 | proxy_set_header Upgrade $http_upgrade; 19 | proxy_set_header Connection "upgrade"; 20 | proxy_pass http://rancher; 21 | proxy_http_version 1.1; 22 | proxy_read_timeout 900s; 23 | } 24 | - listen: "443" 25 | server_name: "localhost" 26 | ssl: true 27 | ssl_certificate: /etc/nginx/ssl/thoughtworks.pem 28 | ssl_certificate_key: /etc/nginx/ssl/thoughtworks.key 29 | extra_parameters: | 30 | location / { 31 | proxy_set_header Host $host; 32 | proxy_set_header X-Forwarded-Proto $scheme; 33 | proxy_set_header X-Forwarded-Port $server_port; 34 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 35 | proxy_set_header Upgrade $http_upgrade; 36 | proxy_set_header Connection "upgrade"; 37 | proxy_pass http://rancher; 38 | proxy_http_version 1.1; 39 | proxy_read_timeout 900s; 40 | } 41 | nginx_upstreams: 42 | - name: rancher 43 | servers: "{% set vars = [] %}{% for host in rancher_masters_host %}{{ vars.append(hostvars[host]['ansible_host'] + ':8080') }}{% endfor %}{{ vars }}" 44 | mysql_data_dir: /data/lib/mysql 45 | pre_tasks: 46 | - name: ensure nginx ssl directory 47 | file: 48 | path: "/etc/nginx/ssl" 49 | state: directory 50 | owner: root 51 | group: root 52 | become: yes 53 | - name: copy certs 54 | copy: 55 | src: "{{ certs_data_dir }}/{{ item }}" 56 | dest: "/etc/nginx/ssl/{{ item }}" 57 | with_items: 58 | - thoughtworks.key 59 | - thoughtworks.pem 60 | become: true 61 | roles: 62 | - nginx 63 | - kubernetes-balancing 64 | - mysql 65 | -------------------------------------------------------------------------------- /provision/configure_develop.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # ansible-playbook -e "RANCHER_PROJECT_NAME=infra" -i provision/scaleworks provision/configure_develop.yml 3 | - hosts: "{{ RANCHER_PROJECT_NAME }}" 4 | gather_facts: yes 5 | roles: 6 | - develop 7 | -------------------------------------------------------------------------------- /provision/configure_host.retry: -------------------------------------------------------------------------------- 1 | toc-demo01 2 | -------------------------------------------------------------------------------- /provision/configure_host.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # ansible-playbook -e "RANCHER_PROJECT_NAME=kube" -i provision/scaleworks provision/configure_host.yml 3 | - hosts: "{{ RANCHER_PROJECT_NAME }}" 4 | become: true 5 | gather_facts: yes 6 | vars: 7 | additional_disks: 8 | - disk: /dev/sdb 9 | fstype: ext4 10 | mount_options: defaults 11 | mount: /data 12 | docker_opts: "-H unix:///var/run/docker.sock -H tcp://0.0.0.0:2375 --registry-mirror=http://thoughtworks.io:5000 --insecure-registry=thoughtworks.io:5000 --insecure-registry=thoughtworks.io:5001 --insecure-registry=thoughtworks.io:5002" 13 | docker_certs: 14 | - host: reg.dev.twleansw.com 15 | cert: reg.dev.twleansw.com.crt 16 | docker_registries: 17 | - host: "https://index.docker.io/v1/" 18 | auth: "Zmx5MndpbmQ6cHdjNTAycg==" 19 | email: "fly2wind@gmail.com" 20 | - host: "http://thoughtworks.io:5000/" 21 | auth: "Zmx5MndpbmQ6cHdjNTAycg==" 22 | email: "fly2wind@gmail.com" 23 | - host: "reg.dev.twleansw.com" 24 | auth: "YWRtaW46TGVhbnN3ZGV2MQ==" 25 | email: "admin@admin.com" 26 | docker_additonal_directory: /data/lib/docker 27 | roles: 28 | - common 29 | - docker 30 | -------------------------------------------------------------------------------- /provision/configure_masters.retry: -------------------------------------------------------------------------------- 1 | toc-master01 2 | toc-master02 3 | toc-master03 4 | -------------------------------------------------------------------------------- /provision/configure_masters.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # ansible-playbook -i provision/scaleworks provision/configure_masters.yml 3 | - hosts: masters 4 | become: true 5 | gather_facts: yes 6 | vars: 7 | etcd_data_dir: /data/lib/etcd 8 | pre_tasks: 9 | - copy: src=~/.ssh/id_rsa dest=/root/.ssh/id_rsa owner=root group=root mode=0600 10 | roles: 11 | - rancher 12 | -------------------------------------------------------------------------------- /provision/configure_repos.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # ansible-playbook -i provision/scaleworks provision/configure_repos.yml 3 | - hosts: repository 4 | gather_facts: yes 5 | vars: 6 | certs_data_dir: "{{ inventory_dir }}/files" 7 | gogs_web_port: 3000 8 | gogs_ssh_port: 10022 9 | gogs_data_dir: /data/lib/gogs 10 | nexus_port: 8081 11 | nexus_data_dir: "/data/lib/nexus" 12 | nexus_local_users: 13 | - username: fly2wind 14 | first_name: Docker 15 | last_name: User 16 | email: fly2wind@gmail.com 17 | password: pwc502r 18 | roles: 19 | - nx-admin 20 | nginx_vhosts: 21 | - listen: "80" 22 | server_name: "repo.thoughtworks.io" 23 | ssl: false 24 | extra_parameters: | 25 | location / { 26 | proxy_pass http://repo; 27 | proxy_set_header Host $host; 28 | proxy_set_header X-Forwarded-Proto $scheme; 29 | proxy_set_header X-Forwarded-Port $server_port; 30 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 31 | } 32 | - listen: "443" 33 | server_name: "thoughtworks.io" 34 | ssl: true 35 | ssl_certificate: /etc/nginx/ssl/thoughtworks.pem 36 | ssl_certificate_key: /etc/nginx/ssl/thoughtworks.key 37 | extra_parameters: | 38 | location / { 39 | proxy_pass http://registry; 40 | proxy_set_header Host $host; 41 | proxy_set_header X-Forwarded-Proto $scheme; 42 | proxy_set_header X-Forwarded-Port $server_port; 43 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 44 | } 45 | - listen: "443" 46 | server_name: "repo.thoughtworks.io" 47 | ssl: true 48 | ssl_certificate: /etc/nginx/ssl/thoughtworks.pem 49 | ssl_certificate_key: /etc/nginx/ssl/thoughtworks.key 50 | extra_parameters: | 51 | location / { 52 | proxy_pass http://repo; 53 | proxy_set_header Host $host; 54 | proxy_set_header X-Forwarded-Proto $scheme; 55 | proxy_set_header X-Forwarded-Port $server_port; 56 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 57 | } 58 | - listen: "80" 59 | server_name: "git.thoughtworks.io" 60 | ssl: false 61 | extra_parameters: | 62 | location / { 63 | proxy_pass http://git; 64 | proxy_set_header Host $host; 65 | proxy_set_header X-Forwarded-Proto $scheme; 66 | proxy_set_header X-Forwarded-Port $server_port; 67 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 68 | } 69 | nginx_upstreams: 70 | - name: repo 71 | servers: 72 | - "{{ hostvars[groups['repository'][0]]['ansible_host'] }}:{{ nexus_port }}" 73 | - name: registry 74 | servers: 75 | - "{{ hostvars[groups['repository'][0]]['ansible_host'] }}:5000" 76 | - name: git 77 | servers: 78 | - "{{ hostvars[groups['repository'][0]]['ansible_host'] }}:{{ gogs_web_port }}" 79 | pre_tasks: 80 | - name: ensure nginx ssl directory 81 | file: 82 | path: "/etc/nginx/ssl" 83 | state: directory 84 | owner: root 85 | group: root 86 | become: yes 87 | - name: copy certs 88 | copy: 89 | src: "{{ certs_data_dir }}/{{ item }}" 90 | dest: "/etc/nginx/ssl/{{ item }}" 91 | with_items: 92 | - thoughtworks.key 93 | - thoughtworks.pem 94 | become: true 95 | roles: 96 | - gogs 97 | - nexus 98 | - nginx 99 | -------------------------------------------------------------------------------- /provision/configure_slaves.retry: -------------------------------------------------------------------------------- 1 | toc-infra01 2 | toc-infra02 3 | toc-infra03 4 | -------------------------------------------------------------------------------- /provision/configure_slaves.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # ansible-playbook -e "RANCHER_PROJECT_NAME=infra" -i provision/scaleworks provision/configure_slaves.yml 3 | - hosts: "{{ RANCHER_PROJECT_NAME }}" 4 | gather_facts: yes 5 | roles: 6 | - rancher-agent 7 | -------------------------------------------------------------------------------- /provision/configure_stack.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # ansible-playbook -e "RANCHER_PROJECT_NAME=infra" -e "RANCHER_STACK_NAME=jenkins" -i provision/scaleworks provision/configure_stack.yml 3 | - hosts: "{{ RANCHER_PROJECT_NAME }}" 4 | gather_facts: yes 5 | roles: 6 | - rancher-stack 7 | -------------------------------------------------------------------------------- /provision/development: -------------------------------------------------------------------------------- 1 | [master] 2 | master-1 ansible_host=10.245.6.2 ansible_user=root ansible_ssh_private_key_file=~/.ssh/id_rsa 3 | 4 | [slaves] 5 | slave-1 ansible_host=10.245.6.3 ansible_user=root ansible_ssh_private_key_file=~/.ssh/id_rsa 6 | slave-2 ansible_host=10.245.6.4 ansible_user=root ansible_ssh_private_key_file=~/.ssh/id_rsa 7 | 8 | [database] 9 | master-1 ansible_host=10.245.6.2 ansible_user=root ansible_ssh_private_key_file=~/.ssh/id_rsa 10 | 11 | [repository] 12 | master-1 ansible_host=10.245.6.2 ansible_user=root ansible_ssh_private_key_file=~/.ssh/id_rsa 13 | -------------------------------------------------------------------------------- /provision/development.retry: -------------------------------------------------------------------------------- 1 | master-1 2 | slave-1 3 | slave-2 4 | -------------------------------------------------------------------------------- /provision/files/front-proxy-ca.csr: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE REQUEST----- 2 | MIICWjCCAUICAQAwFTETMBEGA1UEAwwKa3ViZXJuZXRlczCCASIwDQYJKoZIhvcN 3 | AQEBBQADggEPADCCAQoCggEBALPINUD1T8zNCpi6CIrmQzig3IEvMEedg02li5X9 4 | W1qEmjr/nlUz2vWsgdSah3Qz5e6CFpwto1q+zlB7GJ7xoS58oA1S84aCxc1W/lxG 5 | GxKA5LmKhqJwvqRV79vLrMUZX1K59hFdmZaWoCFwwJGNYv/RQf5pwoLvyk9yIU4p 6 | mgh80cBL+FCmlntEN2brJ+smdhyVmQF3XiCgktC2bqFBPmzfc/JzwaAw9Pt+YX4+ 7 | P7N8I9ijGU/QM7wHqCXttiXd2SFZaTxhfbqN4ly73wGphXypCDyWghxRsfpfHoYL 8 | PnJG8jy1PzQA9zNMOrBeEWhBPveOFDKTK+d6SsBXZ5UvJ8kCAwEAAaAAMA0GCSqG 9 | SIb3DQEBBQUAA4IBAQB0Ia8CRY6AamWYZAb1jDGCHGqkMz7KyveSLAq92+Fc6p+0 10 | uRM/bVe4rv+LpqPUYwIq6JxeVXIsjUjLstFL9x0Uo7kDeZVqg9f56DXPI8gyk5Pq 11 | pb9lA2h59jX70IMf4/0lhfLjpEE/83MzJzLP99C9mIzW33NtGaFiWuYbNN82JAj7 12 | wcoxXFVSV4wWIqYfkeJkydi+H9TPOgpbdQ1NLS79dltQGKap2AiLFNNUjbC8yNQL 13 | ekiA/PRUTCZV7McKUfrV+r2AcwBpKaLr9rxCqdGf3SxlRSq8PKt6+6+PCbXvPMhp 14 | FA4NlW3NtmXZ9JTnsn9ifZ53nHudCoTdCdmJvFXN 15 | -----END CERTIFICATE REQUEST----- 16 | -------------------------------------------------------------------------------- /provision/files/front-proxy-client.csr: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE REQUEST----- 2 | MIICYjCCAUoCAQAwHTEbMBkGA1UEAwwSZnJvbnQtcHJveHktY2xpZW50MIIBIjAN 3 | BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArzKjmCv6+QYTt7iy8qxVoQ+RsTyo 4 | xQi/8D6qgg4mQFhnRYraW/MK1JujfpJWbITrTufx0HOiD4Emw0ojfuHdmQkJlceY 5 | fHpyY19kn28wzbwTCo+02N6Oz3h51fJGdrKL3fj0IJUNKSbdlSlQdh4RI+OnBXzD 6 | qWNOSgT4Xj3+gtrhZsyXpoW/iWrDT6N23oRuVgxbOb996MwFCeaEHM5CfzDxCxRQ 7 | ajMK+cgIUch3d1GogU93zQ9oTtXgIgXmXslJS+XNekdhvkPHzQIwHjxe/avN3jFv 8 | tkL/axeav6M4ut9YR0fAYT3pMMvQ9tnbRRB+qpuk8/pPmVVj3D1OYR/fcwIDAQAB 9 | oAAwDQYJKoZIhvcNAQEFBQADggEBACi/nnN/ze9gkETdkYSt99Bl8D4c7BaIR0l9 10 | lSPxMD+5SF8x30O7yqw4tsG2hiLlzP7ATo2UUWovPl3MVzWqwgUnBs0hNWFfh4KH 11 | uiwrjY5HyZGqoC7D1mgf1rCMg2BnA835f8iAbjUxif1XyRcjJlw49KnP/slXjGyR 12 | a9JdmkbRxSsn/i9vSFKJPNTFXqsDzsAKWCCYIRhMAYRxmIHIC1tiUa6/KLaCQqJV 13 | frQI4CORsKW4MH8Pn4ZBh3yPQY6tl2pq8tnYgSgBfyAycBC/yXgCgo/sjoqajAJ1 14 | 8JtiAq8F+bFx+hIk3iS8jF09vzRdAtV7poyS5fsdfgMpdPSgFII= 15 | -----END CERTIFICATE REQUEST----- 16 | -------------------------------------------------------------------------------- /provision/files/reg.dev.twleansw.com.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICoTCCAgoCCQCeLGuu05cCLDANBgkqhkiG9w0BAQUFADCBlDELMAkGA1UEBhMC 3 | Q04xETAPBgNVBAgTCFNoYW5nSGFpMQswCQYDVQQHEwJTSDERMA8GA1UEChMIRGVs 4 | aWZsb3cxDzANBgNVBAsTBlBlb3BsZTEbMBkGA1UEAxQSKi5kZXYudHdsZWFuc3cu 5 | Y29tMSQwIgYJKoZIhvcNAQkBFhV5bGxpQHRob3VnaHR3b3Jrcy5jb20wHhcNMTYw 6 | OTA3MDc0NjU3WhcNMTcwOTA3MDc0NjU3WjCBlDELMAkGA1UEBhMCQ04xETAPBgNV 7 | BAgTCFNoYW5nSGFpMQswCQYDVQQHEwJTSDERMA8GA1UEChMIRGVsaWZsb3cxDzAN 8 | BgNVBAsTBlBlb3BsZTEbMBkGA1UEAxQSKi5kZXYudHdsZWFuc3cuY29tMSQwIgYJ 9 | KoZIhvcNAQkBFhV5bGxpQHRob3VnaHR3b3Jrcy5jb20wgZ8wDQYJKoZIhvcNAQEB 10 | BQADgY0AMIGJAoGBAOOCYA2xrw7Fl6gxKROdZaVVu/R1HdX9TXbbKXsWKdLvYgIC 11 | dRwjYeVDEPSGEfrRlvFN2B7aGxn2aFleKzcylkNTvrFIx6hG5g/Iqz77myujWXCY 12 | ddp00XtiZiAJqBDva2hQZzBG6N4jy/ADnLhLNpdx5wEssNLJxO0loH1pnHWpAgMB 13 | AAEwDQYJKoZIhvcNAQEFBQADgYEAix74u3m8U6fcQrQ4H/tBizmFtQhetoQ0hFxP 14 | RsmD76mE1xaIN9+nRD97MqiQOBSr/+vXzPl9WHgmBZxkBe1qKjccWv6L5oZMlxF3 15 | X/rLuCcHjO+RKgqhwiUmkpekwsYWAqX7EDbo86r2FPV3xaF4xFA4iCYnfgfB+xBa 16 | 3meUfAw= 17 | -----END CERTIFICATE----- 18 | -------------------------------------------------------------------------------- /provision/files/thoughtworks.csr: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE REQUEST----- 2 | MIIC8DCCAdgCAQAwgZExCzAJBgNVBAYTAkNOMRAwDgYDVQQIEwdTSEFOTlhJMQ4w 3 | DAYDVQQHEwVYSSdBTjEVMBMGA1UEChMMVGhvdWdodFdvcmtzMQwwCgYDVQQLEwNU 4 | T0MxGDAWBgNVBAMTD3Rob3VnaHR3b3Jrcy5pbzEhMB8GCSqGSIb3DQEJARYSZmx5 5 | MndpbmRAZ21haWwuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA 6 | 0r2T4/BcqmepLczNlgFsbAX2juS+eZ4tdenyfF2s8QwrIipO9wrlLhGJCy7mRjIP 7 | yf1msPTWLpoy5WxPVY5IbI3Iz06AYNRaBejrdsWu7CbfbMbG6nQJIAXyr8iTzhMD 8 | TmaJvG3of8Tp8luw6SliFTnLs6YtUwe/AMmrIAqL5XhVSd6bZd7F121J3Q0t8RHm 9 | VlBtlgw2TQtRPVeY1t7jZ6x2NFxFNPlLVNv+UOByXSXPYAL+O/Dcb8e7jH4uEKas 10 | 0mg7+g8vb03vVlD85Crf3zuARC+mc9RrBqM9R8SDPmiKHCbgwBSTX1t+0dzxQRjB 11 | kdljL6Hzshw6AZRnamvCKQIDAQABoBkwFwYJKoZIhvcNAQkHMQoTCDEyMzQ1Njc4 12 | MA0GCSqGSIb3DQEBBQUAA4IBAQBRmaF42BT5LJ6WB4jh+IQTf4TxElc74Tunf9Cg 13 | 4cATtmJoJzuVBB6hKmQyjssgwDZ7d8byjwlr6QxU/cWPe4l0do1uG7W1fGRlaigm 14 | BZ8G/WngOkynVeHTbp/A+DzNIGN1l5pxN3fnEshwbNJt2nr9flHEwr5UjamRUrD6 15 | RkmypveWMOol12VPXNoEV2KDWN18SCSWoPtrf9HznsdAucbqG7NAxRqBh0csz8r0 16 | 3g11n0OkZHB3HiHCknTDObFpxy7UiIZpFpUHNbeh6RyNfUp/oTubLJ+0Z3ySsRHS 17 | JaXTmvGNUSgIWWLQ+NBb89+Y5n6LRO+Pz+s40gDXSkTHSPdA 18 | -----END CERTIFICATE REQUEST----- 19 | -------------------------------------------------------------------------------- /provision/files/vault.j2: -------------------------------------------------------------------------------- 1 | VAULT_ROOT_TOKEN: {{ vault_token.stdout }} 2 | -------------------------------------------------------------------------------- /provision/group_vars/all/rancher-catalog.yml: -------------------------------------------------------------------------------- 1 | rancher_catalogs: 2 | catalogs: 3 | library: 4 | url: https://git.rancher.io/rancher-catalog.git 5 | branch: master 6 | community: 7 | url: https://git.rancher.io/community-catalog.git 8 | branch: master 9 | ThoughtWorks: 10 | url: https://github.com/fly2wind/basecatalog.git 11 | branch: master 12 | LeanSW: 13 | url: https://github.com/tw-leansw/leansw-rancher-catalog.git 14 | branch: master 15 | -------------------------------------------------------------------------------- /provision/group_vars/all/rancher-mysql.yml: -------------------------------------------------------------------------------- 1 | MYSQL_ROOT_USER: root 2 | MYSQL_ROOT_PASS: "P@ss123456" 3 | 4 | 5 | RANCHER_MYSQL_HOST: "{{ hostvars[groups['common'][0]]['ansible_host'] }}" 6 | RANCHER_MYSQL_PORT: 3306 7 | RANCHER_MYSQL_USER: cattle 8 | RANCHER_MYSQL_PASS: !vault | 9 | $ANSIBLE_VAULT;1.1;AES256 10 | 36333636373734393735633566666536663331663333343331363632313431363738333430356632 11 | 3764396564366131633337643762656238346132306264660a646661653639383365353935316139 12 | 62653036353236373562353130623831633338383135393735393066376132363430666566323664 13 | 3932636338663461380a613630386166373436323666366232353166346261313738666363383432 14 | 3735 15 | RANCHER_MYSQL_DATABASE: cattle 16 | -------------------------------------------------------------------------------- /provision/group_vars/all/rancher-nginx.yml: -------------------------------------------------------------------------------- 1 | rancher_masters_host: "{{ groups['masters'] }}" -------------------------------------------------------------------------------- /provision/group_vars/all/rancher-security.yml: -------------------------------------------------------------------------------- 1 | LDAP_HOST: ldap.thoughtworks.io 2 | LDAP_PORT: 389 3 | LDAP_USER: !vault | 4 | $ANSIBLE_VAULT;1.1;AES256 5 | 31323165313766366661376161303163343138343336646661333432303833343362643531313130 6 | 3939386432613033336562386530636161373062316239650a653532656134656337323739326234 7 | 38333538616433393831626131386439383866643838343037303637633339353538323965393061 8 | 3539643565363037630a376337656632386265653031303333393563643333316133633130383333 9 | 3233 10 | LDAP_BIND_DN: !vault | 11 | $ANSIBLE_VAULT;1.1;AES256 12 | 63386565623065613433333130313230356437383432623237386262343464333530346531663236 13 | 3162356537393130353363636437636261343134316536350a656231366363356163373265633233 14 | 61343837393230653936363831346165616533353561316661616161656363393462623239306465 15 | 6363343834303236610a623433623565616130396339313637363261356265356261613233626263 16 | 38653637346536653066343037343061366462663132653331373631653231623661653162633763 17 | 66343330613766623765373861616130356232613636303432666639373336643166333338633239 18 | 30623265383262333338646435303036633735356564346130313637616434366631326233616232 19 | 33333238346265343165633630663031633230343131613461613465306532306265623062336332 20 | 31623361656132326635343064336532353238313661613064346430353862623962 21 | LDAP_BIND_PASS: !vault | 22 | $ANSIBLE_VAULT;1.1;AES256 23 | 33663837383661303761316535383566343736616332306539393466633135303665343962313834 24 | 6434653765613566646138616637373261653039343965640a343236323731396436313834663935 25 | 65643865306530353666663533396430376562623663383437376236663163363334623436313363 26 | 6430303837346536360a383438396431376465666331623137303961656132396362303464643931 27 | 3739 28 | LDAP_BIND_ENCRYPT: !vault | 29 | $ANSIBLE_VAULT;1.1;AES256 30 | 64343533623832363732396366383837636631363764663330643638633538393064666564343030 31 | 3863363634626534643136643631343662393164366464320a333965666330656235393136626136 32 | 36613739653138633431643533343664333566313865393238636165643530643839386535393735 33 | 3862613764323532660a633631663662643231376165626637643161323964313930643532646565 34 | 38633939653533613535393035303466383263363333313631353037356661323631 35 | LDAP_DOMAIN: !vault | 36 | $ANSIBLE_VAULT;1.1;AES256 37 | 31386237356433386635366166656264643633616139356561343662333134646436656566383032 38 | 3663393331336463386239383434353065303339373662300a333365353035643737396532616537 39 | 35336238326161303637376531353134346435353334616338363263393837353632666639326634 40 | 3736373836343361650a383165633166323539326637636530313861653661646133643363336361 41 | 36656263393335613030376237613766326239633435636436623038313935643034 42 | LDAP_BASE_DN: !vault | 43 | $ANSIBLE_VAULT;1.1;AES256 44 | 62666534373165323239383130373839396433386637383839653431653936336230626333663063 45 | 3537396164346638613961323861393639623331356537300a626633656565633935633961653763 46 | 62303564316266656236633063313331356336363361363039396430643234343362613634613964 47 | 3031356230396461640a386465313939666163383965666637333663323136313862663361363931 48 | 64356661323637363334666632366664393662663730623161656236636162333438646362323235 49 | 38316531616565613163303766353936643838343539323839313532643539343931346364663731 50 | 303139356163373466393738313139333534 51 | LDAP_USER_SEARCH_BASE: !vault | 52 | $ANSIBLE_VAULT;1.1;AES256 53 | 35633535633963616137363064646434363266633532306464346434623533353561613531613864 54 | 6537333335623962366430623731653836633932393736300a333737666463656238383563363535 55 | 33663030363666373666613563623263386336623564626136306637336365306666373239623036 56 | 3264616434343038660a323932343737323037336463663637346166626535643162656435306631 57 | 3831 58 | LDAP_USER_SEARCH_FILTER: !vault | 59 | $ANSIBLE_VAULT;1.1;AES256 60 | 30353361323361383264636232343066663064636530396434363134313532613066303630643937 61 | 6266366266623762626233386261643335373362306537320a316262643733336533623062633363 62 | 33373937306438373962613361303430343134356164323538616266643238663638383263383732 63 | 6539663037316337300a316238323666633935363837353765316162373435396430303639623337 64 | 31613832393461353234643364336535346262396132653234333466376365383665 65 | LDAP_GROUP_SEARCH_BASE: !vault | 66 | $ANSIBLE_VAULT;1.1;AES256 67 | 34353865633134393738306133636334313439376137396565633536643834396538646662383465 68 | 3931653731656633623832343239613736613839343864660a306361346436323365363064303035 69 | 35353633346532636230346565383238616261373161336363333061356266326262663565643739 70 | 3734623934656466310a316363653330336335356265643937376237626632303030636637656565 71 | 3239 72 | LDAP_HROUP_SEARCH_FILTER: !vault | 73 | $ANSIBLE_VAULT;1.1;AES256 74 | 64376238643335366138396333343437356365383332613931623035396336653263353164356461 75 | 3638333266366530633333363663323364393738373333380a376564623039326163336437616465 76 | 61633264333334386132333066316434616565646635613337373032303865393933633831313261 77 | 6166366339646361630a373662633466396330633030323235313430316665336332313166646538 78 | 39356566306665633336646237333731353963363438336564316234303035396461633332613030 79 | 3332663536623137633464313064346633323831646634613563 80 | -------------------------------------------------------------------------------- /provision/group_vars/all/rancher.yml: -------------------------------------------------------------------------------- 1 | RANCHER_APIKEY_PUBLIC: !vault | 2 | $ANSIBLE_VAULT;1.1;AES256 3 | 34313061326262383966656535656333376537646166616437373035353930333735613334373865 4 | 3662623435326530363561383631376362333439316331320a663734356664346462613363616234 5 | 63636166646461366334353432616665363336623163333033333938646639333836653963313337 6 | 3532646338633137620a316132633661373565393738303231323039376162633331666339633534 7 | 63653133643566663138353839633864666536313933653666303366313832653636 8 | RANCHER_APIKEY_SECRET: !vault | 9 | $ANSIBLE_VAULT;1.1;AES256 10 | 61323634643162366432386265346336633535633434646133623462343361353965303031363337 11 | 6239663966336132336338623261663864313137393364630a346263633431386636633634373530 12 | 30336662393961333533343832646432353665353764366239333937363537336266303538343562 13 | 3237313133346637330a353064346661366239633862323339333136613639383036326535383332 14 | 39633061373037363238316338303339616438663233356664336435386664396266303634643735 15 | 3164353464396563303134633265373831336534363538663038 16 | 17 | RANCHER_MASTER_HOST: "{{ hostvars[groups['common'][0]]['ansible_host'] }}" 18 | RANCHER_MASTER_PORT: 8080 19 | 20 | 21 | # dns 22 | host_dns: true 23 | host_dns_domain: thoughtworks.local 24 | 25 | # 26 | etcd_cluster_group: masters 27 | -------------------------------------------------------------------------------- /provision/group_vars/all/vault.yml: -------------------------------------------------------------------------------- 1 | VAULT_ROOT_TOKEN: 4b0cb3af-7b44-3e42-0064-347be9378cf0 2 | -------------------------------------------------------------------------------- /provision/group_vars/master.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # docker 3 | docker_opts: "" 4 | docker_registries: 5 | - host: "https://index.docker.io/v1/" 6 | auth: "Zmx5MndmmbmQ6cHdjNTAycg==" 7 | email: "fly2wind@xxxxx.com" 8 | docker_rpmpackage: no 9 | -------------------------------------------------------------------------------- /provision/group_vars/repository.yml: -------------------------------------------------------------------------------- 1 | --- 2 | docker_opts: "" 3 | docker_registries: 4 | - host: "https://index.docker.io/v1/" 5 | auth: "Zmx5MndpbmQ6cmmdjNTAycg==" 6 | email: "xxx@xxx.com" 7 | docker_rpmpackage: no 8 | 9 | 10 | gogs_domain: git.thoughtworks.io 11 | 12 | GOGS_MYSQL_LOGIN_HOST: "{{ hostvars[groups['common'][0]]['ansible_host'] }}" 13 | GOGS_MYSQL_LOGIN_PORT: 3306 14 | GOGS_MYSQL_LOGIN_USER: root 15 | GOGS_MYSQL_LOGIN_PASS: "P@ss123456" 16 | -------------------------------------------------------------------------------- /provision/group_vars/slaves.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # docker 3 | docker_opts: "--registry-mirror=http://master-1:5000 --insecure-registry=master-1:5000" 4 | docker_registries: 5 | - host: "https://index.docker.io/v1/" 6 | auth: "xxxxxx==" 7 | email: "xxx@xxx.com" 8 | - host: "http://master-1:5000" 9 | auth: "xxxxxxxxx==" 10 | email: "xxx@xxx.com" 11 | docker_rpmpackage: no 12 | -------------------------------------------------------------------------------- /provision/init_ansible.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # ansible-playbook -i provision/scaleworks provision/init_ansible.yml 3 | - hosts: all 4 | become: true 5 | gather_facts: no 6 | pre_tasks: 7 | - name: install python 8 | raw: test -e /usr/bin/python || (apt -y update && apt install -y python-minimal) 9 | -------------------------------------------------------------------------------- /provision/init_certs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # ansible-playbook -i provision/scaleworks provision/init_certs.yml 3 | - hosts: localhost 4 | become: no 5 | gather_facts: no 6 | vars: 7 | certs_name: certs 8 | certs_data_dir: "{{ inventory_dir }}/files" 9 | certs_key_file: "{{ certs_data_dir }}/{{ certs_name }}-key.pem" 10 | certs_csr_file: "{{ certs_data_dir }}/{{ certs_name }}-csr.pem" 11 | certs_cert_file: "{{ certs_data_dir }}/{{ certs_name }}-cert.pem" 12 | certs_dhparam_file: "{{ certs_data_dir }}/{{ certs_name }}-dhparam.pem" 13 | certs_key_length: "2048" 14 | certs_dhparam_length: "2048" 15 | certs_domain: "*.thoughtworks.io" 16 | certs_country: "CN" 17 | certs_state: "Shaanxi" 18 | certs_city: "Xi‘an" 19 | certs_org: "ThoughtWorks" 20 | certs_subject: "/C={{ certs_country }}/ST={{ certs_state }}/L={{ certs_city }}/O={{ certs_org }}/CN={{ certs_domain }}" 21 | certs_lifetime: 1825 22 | tasks: 23 | - name: generate rsa key 24 | command: openssl genrsa -out {{ certs_key_file }} {{ certs_key_length }} 25 | args: 26 | creates: "{{ certs_key_file }}" 27 | - name: generate csr 28 | command: openssl req -new -subj {{ certs_subject }} -key {{ certs_key_file }} -out {{ certs_csr_file }} 29 | args: 30 | creates: "{{ certs_csr_file }}" 31 | - name: generate self-signed certificate 32 | command: openssl req -x509 -extensions v3_ca -days {{ certs_lifetime }} -key {{ certs_key_file }} -in {{ certs_csr_file }} -out {{ certs_cert_file }} 33 | args: 34 | creates: "{{ certs_cert_file }}" 35 | -------------------------------------------------------------------------------- /provision/init_vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | vars_prompt: 4 | - name: "ldap_server" 5 | prompt: "LDAP server address" 6 | default: "ldap.thoughtworks.io" 7 | - name: "ldap_port" 8 | prompt: "LDAP server port" 9 | default: "389" 10 | - name: "ldap_basedn" 11 | prompt: "LDAP base dn" 12 | default: "OU=Enterprise,OU=Principal,DC=corporate,DC=thoughtworks,DC=com" 13 | - name: "ldap_binddn" 14 | prompt: "LDAP bind dn" 15 | default: "CN=Qiang Shawn Ma,OU=Xian,OU=Employees,OU=Enterprise,OU=Principal,DC=corporate,DC=thoughtworks,DC=com" 16 | - name: "ldap_bindpass" 17 | prompt: "LDAP bind password" 18 | encrypt: "sha512_crypt" 19 | confirm: yes 20 | tasks: 21 | - debug: msg="{{ ldap_bindpass }}" -------------------------------------------------------------------------------- /provision/library/kubectl.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | # 4 | # (c) 2016, Nandaja Varma 5 | # 6 | # 7 | # 8 | # This module is free software: you can redistribute it and/or modify 9 | # it under the terms of the GNU General Public License as published by 10 | # the Free Software Foundation, either version 3 of the License, or 11 | # (at your option) any later version. 12 | # 13 | # This software is distributed in the hope that it will be useful, 14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 | # GNU General Public License for more details. 17 | # 18 | # You should have received a copy of the GNU General Public License 19 | # along with this software. If not, see . 20 | 21 | import json 22 | 23 | class Kubectl: 24 | 25 | def __init__(self, module): 26 | self.module = module 27 | self.action = self._validated_params('action') 28 | 29 | 30 | def run(self): 31 | action_func = { 32 | 'create': self.kubectl_create, 33 | 'get': self.kubectl_get, 34 | 'delete': self.kubectl_delete, 35 | 'exec': self.kubectl_exec, 36 | 'stop': self.kubectl_stop, 37 | 'run': self.kubectl_run 38 | }.get(self.action) 39 | 40 | try: 41 | return action_func() 42 | except: 43 | msg = "No method found for given action" 44 | self.get_output(rc=3, out=msg, err=msg) 45 | 46 | 47 | def kubectl_create(self): 48 | filename = self.module.params['filename'] 49 | if filename: 50 | return "cat %s | kubectl %s -f -" % (filename, self.action) 51 | 52 | def kubectl_run(self): 53 | name = self._validated_params('name') 54 | image = self._validated_params('image') 55 | return "kubectl %s %s --image=%s" % (self.action, 56 | name, image) 57 | 58 | def kubectl_exec(self): 59 | pod = self._validated_params('pod') 60 | container = self.module.params['container'] 61 | c_opts = '-c %s' % container if container else '' 62 | command = self._validated_params('command') 63 | if ',' in command: 64 | command = command.replace(',',';') 65 | return "kubectl %s %s %s %s" %(self.action, pod, c_opts, 66 | command) 67 | 68 | def kubectl_delete(self): 69 | filename = self.module.params['filename'] 70 | if filename: 71 | return "cat %s | kubectl %s -f -" % (filename, self.action) 72 | ropts = self._validated_params('type') or '' 73 | label = self.module.params['label'] or '' 74 | if label: 75 | return "kubectl %s %s -l %s" % (self.action, ropts, label) 76 | name = self.module.params['name'] 77 | name = ' '.join(name.split(',')) if name else '--all' 78 | return "kubectl %s %s %s" % (self.action, ropts, name) 79 | 80 | 81 | def kubectl_stop(self): 82 | filename = self.module.params['filename'] 83 | if filename: 84 | return "cat %s | kubectl %s -f -" % (filename, self.action) 85 | ropts = self._validated_params('type') or '' 86 | uid = self.module.params['uid'] or '' 87 | if uid: 88 | return "kubectl %s %s %s" % (self.action, ropts, uid) 89 | label = self.module.params['label'] or '' 90 | if label: 91 | return "kubectl %s %s -l %s" % (self.action, ropts, label) 92 | name = self.module.params['name'] 93 | name = ' '.join(name.split(',')) if name else '--all' 94 | return "kubectl %s %s %s" % (self.action, ropts, name) 95 | 96 | 97 | 98 | def kubectl_get(self): 99 | res_type = self.module.params['type'] 100 | ropts = res_type if res_type else '' 101 | 102 | name = self.module.params['name'] 103 | nopts = name if (name and ropts) else '' 104 | 105 | filename = self.module.params['filename'] 106 | fopts = '-f %s' % filename if (filename and not ropts) else '' 107 | 108 | output = self.module.params['output'] 109 | o_opts = '-o %s' % output if output else '' 110 | 111 | options = filter(None, [ropts, nopts, fopts, o_opts]) 112 | return 'kubectl {0} {1}'.format(self.action, ' '.join(options)) 113 | 114 | def _validated_params(self, opt): 115 | value = self.module.params[opt] 116 | if value is None: 117 | msg = "Please provide %s option in the playbook!" % opt 118 | self.module.fail_json(msg=msg) 119 | return value 120 | 121 | def get_output(self, rc=0, out=None, err=None): 122 | if rc: 123 | self.module.fail_json(msg=err, rc=rc, err=err, out=out) 124 | else: 125 | self.module.exit_json(changed=1, msg=json.dumps(out)) 126 | 127 | def main(): 128 | module = AnsibleModule( 129 | argument_spec = dict( 130 | action = dict(required=True, choices=["create", 131 | "stop", "run", "exec", "get", "delete"]), 132 | name = dict(required=False), 133 | type = dict(required=False), 134 | filename = dict(required=False), 135 | label = dict(required=False), 136 | uid = dict(required=False), 137 | container = dict(required=False), 138 | command = dict(required=False), 139 | pod = dict(required=False), 140 | output = dict(required=False), 141 | image = dict(required=False) 142 | ), 143 | supports_check_mode = True 144 | ) 145 | 146 | 147 | kube = Kubectl(module) 148 | cmd = kube.run() 149 | rc, out, err = module.run_command(cmd, use_unsafe_shell=True) 150 | kube.get_output(rc, out, err) 151 | 152 | 153 | 154 | from ansible.module_utils.basic import * 155 | 156 | if __name__ == '__main__': 157 | main() 158 | -------------------------------------------------------------------------------- /provision/library/nsupdate.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | try: 4 | import dns.update 5 | import dns.query 6 | import dns.tsigkeyring 7 | import dns.message 8 | import dns.resolver 9 | HAVE_DNSPYTHON=True 10 | except: 11 | HAVE_DNSPYTHON=False 12 | 13 | 14 | class Record(object): 15 | def __init__(self, module): 16 | self.module = module 17 | self.state = module.params['state'] 18 | self.server = module.params['server'] 19 | if module.params['zone'][-1] != '.': 20 | self.zone = module.params['zone'] + '.' 21 | else: 22 | self.zone = module.params['zone'] 23 | self.record = module.params['record'] 24 | self.type = module.params['type'] 25 | self.ttl = module.params['ttl'] 26 | self.value = module.params['value'] 27 | if module.params['key_name']: 28 | self.keyring = dns.tsigkeyring.from_text({ 29 | module.params['key_name'] : module.params['key_secret'] 30 | }) 31 | else: 32 | self.keyring = None 33 | 34 | def create_record(self): 35 | update = dns.update.Update(self.zone, keyring=self.keyring) 36 | update.add(self.record, self.ttl, self.type, self.value) 37 | 38 | try: 39 | response = dns.query.tcp(update, self.server, timeout=10) 40 | if dns.message.Message.rcode(response) == 0: 41 | return True 42 | else: 43 | return False 44 | except: 45 | self.module.fail_json(msg='Connection to DNS server failed') 46 | 47 | def modify_record(self): 48 | update = dns.update.Update(self.zone, keyring=self.keyring) 49 | update.replace(self.record, self.ttl, self.type, self.value) 50 | 51 | try: 52 | response = dns.query.tcp(update, self.server, timeout=10) 53 | if dns.message.Message.rcode(response) == 0: 54 | return True 55 | else: 56 | return False 57 | except: 58 | self.module.fail_json(msg='Connection to DNS server failed') 59 | 60 | def remove_record(self): 61 | update = dns.update.Update(self.zone, keyring=self.keyring) 62 | update.delete(self.record, self.type) 63 | 64 | try: 65 | response = dns.query.tcp(update, self.server, timeout=10) 66 | if dns.message.Message.rcode(response) == 0: 67 | return True 68 | else: 69 | return False 70 | except: 71 | self.module.fail_json(msg='Connection to DNS server failed') 72 | 73 | def record_exists(self): 74 | update = dns.update.Update(self.zone, keyring=self.keyring) 75 | update.present(self.record, self.type) 76 | 77 | try: 78 | response = dns.query.tcp(update, self.server, timeout=10) 79 | if dns.message.Message.rcode(response) == 0: 80 | update.present(self.record, self.type, self.value) 81 | response = dns.query.tcp(update, self.server, timeout=10) 82 | if dns.message.Message.rcode(response) == 0: 83 | return True 84 | else: 85 | return 2 86 | else: 87 | return False 88 | except: 89 | self.module.fail_json(msg='Connection to DNS server failed') 90 | 91 | def main(): 92 | module = AnsibleModule( 93 | argument_spec = dict( 94 | state=dict(required=False, default='present', choices=['present', 'absent'], type='str'), 95 | server=dict(required=True, type='str'), 96 | key_name=dict(required=False, type='str'), 97 | key_secret=dict(required=False, type='str'), 98 | zone=dict(required=True, type='str'), 99 | record=dict(required=True, type='str'), 100 | type=dict(required=False, default='A', type='str'), 101 | ttl=dict(required=False, default=3600, type='int'), 102 | value=dict(required=False, default=None, type='str') 103 | ), 104 | supports_check_mode=True 105 | ) 106 | 107 | record = Record(module) 108 | 109 | success = None 110 | result = {} 111 | result['server'] = record.server 112 | result['zone'] = record.zone 113 | result['record'] = record.record 114 | result['type'] = record.type 115 | result['ttl'] = record.ttl 116 | result['value'] = record.value 117 | result['state'] = record.state 118 | 119 | if not HAVE_DNSPYTHON: 120 | module.fail_json(msg='python library dnspython required: pip install dnspython') 121 | 122 | exists = record.record_exists() 123 | 124 | if record.state == 'absent': 125 | if exists: 126 | if module.check_mode: 127 | module.exit_json(changed=True) 128 | success = record.remove_record() 129 | if success != True: 130 | module.fail_json(msg='Failed to delete DNS record') 131 | result['changed'] = True 132 | elif record.state == 'present': 133 | if not exists: 134 | if module.check_mode: 135 | module.exit_json(changed=True) 136 | success = record.create_record() 137 | result['changed'] = True 138 | elif exists == 2: 139 | success = record.modify_record() 140 | result['changed'] = True 141 | else: 142 | result['changed'] = False 143 | if success is not None and success != True: 144 | module.fail_json(msg='Failed to update DNS record') 145 | 146 | module.exit_json(**result) 147 | 148 | # import module snippets 149 | from ansible.module_utils.basic import * 150 | main() 151 | -------------------------------------------------------------------------------- /provision/library/rancher.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | from ansible.module_utils.basic import AnsibleModule 3 | import gdapi 4 | import json 5 | 6 | def main(): 7 | module = AnsibleModule( 8 | argument_spec = dict( 9 | url = dict(required=False), 10 | access_key = dict(required=False), 11 | secret_key = dict(required=False), 12 | name = dict(required=False), 13 | account = dict(required=False), 14 | publicValue = dict(required=False), 15 | secretValue = dict(required=False), 16 | state = dict(default='present', choices=['present', 'absent']) 17 | ), 18 | supports_check_mode = True 19 | ) 20 | 21 | url = module.params['url'] 22 | access_key = module.params['access_key'] 23 | secret_key = module.params['secret_key'] 24 | name = module.params['name'] 25 | account = module.params['account'] 26 | publicValue = module.params['publicValue'] 27 | secretValue = module.params['secretValue'] 28 | state = module.params['state'] 29 | 30 | try: 31 | client = gdapi.Client(url=url, access_key=access_key, secret_key=secret_key) 32 | apikey = False 33 | for entry in client.list_api_key(): 34 | if entry.name == name and entry.publicValue == publicValue: 35 | apikey = entry 36 | break 37 | 38 | changed = False 39 | if state == 'present': 40 | if apikey: 41 | changed = True 42 | response = client.update(apikey, {'accountId': account, 'name': name, 'publicValue': publicValue, 'secretValue': secretValue}) 43 | module.exit_json(changed=True, key=module.params) 44 | else: 45 | response = client.create_api_key({'accountId': account, 'name': name, 'publicValue': publicValue, 'secretValue': secretValue}) 46 | module.exit_json(changed=True, key=module.params) 47 | elif state == 'absent': 48 | if apikey: 49 | apikey.deactivate() 50 | apikey.remove() 51 | module.exit_json(changed=True) 52 | else: 53 | module.exit_json(changed=False) 54 | 55 | module.exit_json(changed=False) 56 | except gdapi.ApiError as e: 57 | module.fail_json(msg=str(e.error)) 58 | except Exception as e: 59 | module.fail_json(msg=str(e)) 60 | 61 | 62 | if __name__ == '__main__': 63 | main() 64 | -------------------------------------------------------------------------------- /provision/roles/artifactory/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # image 3 | artifactory_name: repository-server 4 | artifactory_version: 4 5 | 6 | # network 7 | artifactory_license: "cHJvZHVjdHM6CiAgYXJ0aWZhY3Rvcnk6CiAgICBwcm9kdWN0OiBaWGh3YVhKbGN6b2dNakF4TmkweE1DMHlNMVF3TlRvME1Ub3dOQzQzTkRaYUNtbGtPaUJrTm1ZMk56UTVNQzFoWkRJMUxUUXlZamN0T0RNeVlTMWhPRGRqWWpKa05UUTNOVElLYjNkdVpYSTZJRlJoYVdKaGFRcHdjbTl3WlhKMGFXVnpPaUI3ZlFwemFXZHVZWFIxY21VNklHNTFiR3dLZEhKcFlXdzZJSFJ5ZFdVS2RIbHdaVG9nVkZKSlFVd0tkbUZzYVdSR2NtOXRPaUF5TURFMkxUQTVMVEl6VkRBMU9qUXhPakEwTGpjME5sb0sKICAgIHNpZ25hdHVyZTogTFhGQjNTTTBobWlpRlNHeGFyemRBbDJqRUY5UkxVNVdDWFFuVVVGdTNCc3JWT1JLUlZLOXpJaDJ3cG9VL0xPWDhaZUhYaFJyZzBCMWFuUVYzRjlMc0liUUFRWnd0aXIzb2hHSDJidEJObzg3dVB4MEQvbG9HVnJKMkp0QzI0YkhQd1VnNHFpSHQxeEFZMi8vR2VvdW02N0FXY1JqdllrSHpGV1U1NzlPTE1vajY1Ukk1QllRYXJYTmgrNVMyUzJsbUQ2RU5CN0tNS1p5VkJDZ0hNdXpUY3ltb2pGZkhkMGdZeHZzNEVjWlBnTTVWMWlwZExiZnFDS3UvSU8xa3ZXM3Qvd0F5bGJmMExQV0F4T0lmQWp4Q3Avb05jZGFxWUVGZ2FVQldFSkFvMG5TcmVBdVRTK0o2VHIyc09heEpDWVF2S245eTRrMys3OEtiTEluQVEzcTFBPT0KdmVyc2lvbjogMQo=" 8 | -------------------------------------------------------------------------------- /provision/roles/artifactory/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /provision/roles/artifactory/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: "Shawn Ma" 4 | company: ThoughtWorks, Inc. 5 | license: BSD 6 | min_ansible_version: 2.0 7 | platforms: 8 | - name: Ubuntu 9 | versions: 10 | - trusty 11 | categories: 12 | - docker 13 | dependencies: [] 14 | -------------------------------------------------------------------------------- /provision/roles/artifactory/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: start artifactory server container 3 | docker: 4 | name: "repository-server" 5 | image: "baselibrary/artifactory:4" 6 | ports: 7 | - "8081:8081" 8 | volumes: 9 | - "/var/lib/artifactory/etc:/var/opt/jfrog/artifactory/etc" 10 | - "/var/lib/artifactory/logs:/var/opt/jfrog/artifactory/logs" 11 | - "/var/lib/artifactory/data:/var/opt/jfrog/artifactory/data" 12 | - "/var/lib/artifactory/backup:/var/opt/jfrog/artifactory/backup" 13 | restart_policy: always 14 | become: yes 15 | -------------------------------------------------------------------------------- /provision/roles/common/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | host_dns: no 3 | host_dns_domain: thoughtworks.local 4 | -------------------------------------------------------------------------------- /provision/roles/common/files/nsenter: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/baselibrary/infrastructure/42461a91044857e872a123fefceed0861412fcd4/provision/roles/common/files/nsenter -------------------------------------------------------------------------------- /provision/roles/common/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: apt update 3 | apt: update_cache=yes cache_valid_time=86400 -------------------------------------------------------------------------------- /provision/roles/common/tasks/additional.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: partition additional disks 3 | shell: | 4 | if 5 | [ -b {{ item.disk }} ] 6 | then 7 | [ -b {{ item.disk }}1 ] || parted --script "{{ item.disk }}" mklabel gpt mkpart primary 1MiB 100% 8 | fi 9 | args: 10 | creates: "{{ item.disk }}1" 11 | executable: "/bin/bash" 12 | with_items: "{{ additional_disks }}" 13 | become: yes 14 | 15 | - name: create filesystem on the first partition 16 | filesystem: 17 | dev: "{{ item.disk }}1" 18 | force: "{{ item.force|d(omit) }}" 19 | fstype: "{{ item.fstype }}" 20 | opts: "{{ item.fsopts|d(omit) }}" 21 | with_items: "{{ additional_disks }}" 22 | become: yes 23 | 24 | - name: ensure the mount directory exists 25 | file: 26 | path: "{{ item.mount }}" 27 | owner: "{{ disk_user | default('root') }}" 28 | group: "{{ disk_group | default('root') }}" 29 | state: directory 30 | with_items: "{{ additional_disks }}" 31 | become: yes 32 | 33 | - name: "get uuid for partition" 34 | command: blkid -s UUID -o value "{{ item.disk }}1" 35 | register: uuid_disks 36 | with_items: "{{ additional_disks }}" 37 | changed_when: False 38 | become: yes 39 | 40 | - name: mount additional disk 41 | mount: 42 | name: "{{ item.0.mount }}" 43 | fstype: "{{ item.0.fstype }}" 44 | opts: "{{ item.0.mount_options|d(omit) }}" 45 | passno: "0" 46 | src: "UUID={{ item.1.stdout }}" 47 | state: "{{ item.0.mount_state|d('mounted') }}" 48 | with_together: 49 | - "{{ additional_disks }}" 50 | - "{{ uuid_disks.results }}" 51 | become: yes 52 | -------------------------------------------------------------------------------- /provision/roles/common/tasks/config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "config {{ ansible_distribution|lower }} system" 3 | include: "config/{{ ansible_os_family|lower }}.yml" 4 | 5 | - name: configure system authorizedkey 6 | authorized_key: 7 | key: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}" 8 | user: root 9 | become: yes 10 | 11 | - name: configure system hostname 12 | hostname: 13 | name: "{{ inventory_hostname }}" 14 | become: yes 15 | 16 | - name: configure system hosts 17 | template: 18 | src: hosts.j2 19 | dest: /etc/hosts 20 | mode: 0644 21 | become: yes 22 | -------------------------------------------------------------------------------- /provision/roles/common/tasks/config/debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /provision/roles/common/tasks/config/redhat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /provision/roles/common/tasks/config/suse.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /provision/roles/common/tasks/install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "install common package in {{ ansible_distribution|lower }} system" 3 | include: "install/{{ ansible_os_family|lower }}.yml" 4 | tags: 5 | - development 6 | - common 7 | - install 8 | -------------------------------------------------------------------------------- /provision/roles/common/tasks/install/debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install common package 3 | apt: 4 | name: "{{ item }}" 5 | state: present 6 | become: yes 7 | with_items: 8 | - ntp 9 | - ntpdate 10 | - ca-certificates 11 | - apt-transport-https 12 | -------------------------------------------------------------------------------- /provision/roles/common/tasks/install/redhat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: add epel repository 3 | yum_repository: 4 | name: epel 5 | description: "EPEL YUM repo" 6 | baseurl: "http://download.fedoraproject.org/pub/epel/$releasever/$basearch/" 7 | gpgkey: https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-$releasever 8 | enabled: yes 9 | become: yes 10 | 11 | - name: install common package 12 | yum: 13 | name: "{{ item }}" 14 | state: present 15 | become: yes 16 | with_items: 17 | - python-pip 18 | -------------------------------------------------------------------------------- /provision/roles/common/tasks/install/suse.yml: -------------------------------------------------------------------------------- 1 | --- 2 | #- name: install common package 3 | # zypper: 4 | # name: "{{ item }}" 5 | # state: present 6 | # become: yes 7 | # with_items: 8 | # - ntp 9 | # - python-pip 10 | -------------------------------------------------------------------------------- /provision/roles/common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: gather os specific variables 3 | include_vars: "{{ item }}" 4 | with_first_found: 5 | - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml" 6 | - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml" 7 | - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml" 8 | - "{{ ansible_distribution|lower }}.yml" 9 | - "{{ ansible_os_family|lower }}.yml" 10 | - main.yml 11 | 12 | - include: additional.yml 13 | when: additional_disks is defined 14 | tags: 15 | - development 16 | - common 17 | - additional 18 | 19 | - include: install.yml 20 | tags: 21 | - development 22 | - common 23 | - install 24 | 25 | - include: config.yml 26 | tags: 27 | - development 28 | - common 29 | - config 30 | -------------------------------------------------------------------------------- /provision/roles/common/templates/hosts.j2: -------------------------------------------------------------------------------- 1 | 127.0.0.1 localhost 2 | {% for item in groups['all'] %} 3 | {% if hostvars[item].private_ipv4 is defined %} 4 | {{ hostvars[item].private_ipv4 }} {% if host_dns %}{{ item }}.{{ host_dns_domain }}{% endif %} {{ item }} 5 | {% endif %} 6 | {% endfor %} 7 | -------------------------------------------------------------------------------- /provision/roles/common/vars/debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /provision/roles/common/vars/redhat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /provision/roles/common/vars/suse.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /provision/roles/develop/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /provision/roles/develop/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /provision/roles/develop/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: "Shawn Ma" 4 | company: ThoughtWorks, Inc. 5 | license: BSD 6 | min_ansible_version: 2.0 7 | platforms: 8 | - name: Ubuntu 9 | versions: 10 | - trusty 11 | categories: 12 | - docker 13 | dependencies: [] 14 | -------------------------------------------------------------------------------- /provision/roles/develop/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: ensuers dir exists 3 | file: 4 | path: "/root/{{ item }}" 5 | state: directory 6 | become: yes 7 | with_items: 8 | - ".m2" 9 | - ".gradle" 10 | 11 | - name: config maven 12 | template: 13 | src: "maven.j2" 14 | dest: "/root/.m2/settings.xml" 15 | become: yes 16 | 17 | - name: config gradle 18 | template: 19 | src: "gradle.j2" 20 | dest: "/root/.gradle/init.gradle" 21 | become: yes 22 | 23 | - name: config npmjs 24 | template: 25 | src: "npm.j2" 26 | dest: "/root/.npmrc" 27 | become: yes 28 | 29 | - name: config bowerjs 30 | template: 31 | src: "bower.j2" 32 | dest: "/root/.bowerrc" 33 | become: yes 34 | -------------------------------------------------------------------------------- /provision/roles/develop/templates/bower.j2: -------------------------------------------------------------------------------- 1 | { 2 | "registry" : { 3 | "search" : [ 4 | "http://{{ hostvars[groups['repository'][0]]['ansible_host'] }}/repository/bower-public" 5 | ], 6 | "register" : "http://{{ hostvars[groups['repository'][0]]['ansible_host'] }}/repository/bower-public" 7 | }, 8 | "resolvers" : [ "bower-nexus3-resolver" ] 9 | } 10 | -------------------------------------------------------------------------------- /provision/roles/develop/templates/gradle.j2: -------------------------------------------------------------------------------- 1 | allprojects{ 2 | repositories { 3 | def REPOSITORY_URL = 'http://{{ hostvars[groups['repository'][0]]['ansible_host'] }}/repository/maven-public/' 4 | all { ArtifactRepository repo -> 5 | if(repo instanceof MavenArtifactRepository) { 6 | def url = repo.url.toString() 7 | if(url.startsWith('https://repo1.maven.org/maven2') || url.startsWith('https://jcenter.bintray.com')) { 8 | project.logger.lifecycle "Project repository ${repo.url} removed. Only $REPOSITORY_URL is allowed" 9 | remove repo 10 | } 11 | } 12 | } 13 | maven { 14 | url REPOSITORY_URL 15 | } 16 | } 17 | } 18 | 19 | buildscript{ 20 | repositories { 21 | def REPOSITORY_URL = 'http://{{ hostvars[groups['repository'][0]]['ansible_host'] }}/repository/maven-public/' 22 | all { ArtifactRepository repo -> 23 | if(repo instanceof MavenArtifactRepository) { 24 | def url = repo.url.toString() 25 | if(url.startsWith('https://repo1.maven.org/maven2') || url.startsWith('https://jcenter.bintray.com')) { 26 | project.logger.lifecycle "Project repository ${repo.url} removed. Only $REPOSITORY_URL is allowed" 27 | remove repo 28 | } 29 | } 30 | } 31 | maven { 32 | url REPOSITORY_URL 33 | } 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /provision/roles/develop/templates/maven.j2: -------------------------------------------------------------------------------- 1 | 2 | 5 | 6 | 7 | thoughtworks 8 | ThoughtWorks Maven Repository Mirror. 9 | http://{{ hostvars[groups['repository'][0]]['ansible_host'] }}/repository/maven-public/ 10 | external:* 11 | 12 | 13 | 14 | 15 | thoughtworks 16 | 17 | 18 | thoughtworks-repository 19 | ThoughtWorks Maven Repository Group 20 | http://{{ hostvars[groups['repository'][0]]['ansible_host'] }}/repository/maven-public/ 21 | 22 | 23 | 24 | 25 | 26 | thoughtworks 27 | 28 | 29 | -------------------------------------------------------------------------------- /provision/roles/develop/templates/npm.j2: -------------------------------------------------------------------------------- 1 | registry=http://{{ hostvars[groups['repository'][0]]['ansible_host'] }}/repository/npm-public/ 2 | -------------------------------------------------------------------------------- /provision/roles/docker/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # rpm install 3 | docker_rpmpackage: no 4 | 5 | # daemon options 6 | docker_opts: --registry-mirror=http://fly2wind.m.alauda.cn 7 | 8 | # 9 | # certs 10 | # 11 | # A list of certs. e.g. 12 | # docker_certs: 13 | # - "ca.crt" 14 | docker_certs: [] 15 | # 16 | # registry 17 | # 18 | # A list of registries. e.g. 19 | # docker_registries: 20 | # - host: docker.chatid.com 21 | # auth: path/to/ca.crt 22 | docker_registries: [] 23 | 24 | # additonal directory 25 | # docker_additonal_directory: 26 | 27 | # You can set any interface, that is listened by docker engine. 28 | docker_swarm_interface: "{{ ansible_default_ipv4['interface'] }}" 29 | docker_swarm_addr: "{{ hostvars[inventory_hostname]['ansible_' + docker_swarm_interface]['ipv4']['address'] }}" 30 | docker_swarm_port: 2377 31 | 32 | # Switches disabling the docker-engine, docker group and swarm-mode setup. 33 | skip_engine: false 34 | skip_swarm: false 35 | skip_docker_py: false 36 | -------------------------------------------------------------------------------- /provision/roles/docker/files/check-docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | N=`ps aux | grep /usr/bin/docker | grep -v grep | wc -l` 3 | if [ $N = 0 ]; then 4 | exit 1 5 | fi 6 | exit 0 -------------------------------------------------------------------------------- /provision/roles/docker/files/reg.dev.twleansw.com.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICoTCCAgoCCQCeLGuu05cCLDANBgkqhkiG9w0BAQUFADCBlDELMAkGA1UEBhMC 3 | Q04xETAPBgNVBAgTCFNoYW5nSGFpMQswCQYDVQQHEwJTSDERMA8GA1UEChMIRGVs 4 | aWZsb3cxDzANBgNVBAsTBlBlb3BsZTEbMBkGA1UEAxQSKi5kZXYudHdsZWFuc3cu 5 | Y29tMSQwIgYJKoZIhvcNAQkBFhV5bGxpQHRob3VnaHR3b3Jrcy5jb20wHhcNMTYw 6 | OTA3MDc0NjU3WhcNMTcwOTA3MDc0NjU3WjCBlDELMAkGA1UEBhMCQ04xETAPBgNV 7 | BAgTCFNoYW5nSGFpMQswCQYDVQQHEwJTSDERMA8GA1UEChMIRGVsaWZsb3cxDzAN 8 | BgNVBAsTBlBlb3BsZTEbMBkGA1UEAxQSKi5kZXYudHdsZWFuc3cuY29tMSQwIgYJ 9 | KoZIhvcNAQkBFhV5bGxpQHRob3VnaHR3b3Jrcy5jb20wgZ8wDQYJKoZIhvcNAQEB 10 | BQADgY0AMIGJAoGBAOOCYA2xrw7Fl6gxKROdZaVVu/R1HdX9TXbbKXsWKdLvYgIC 11 | dRwjYeVDEPSGEfrRlvFN2B7aGxn2aFleKzcylkNTvrFIx6hG5g/Iqz77myujWXCY 12 | ddp00XtiZiAJqBDva2hQZzBG6N4jy/ADnLhLNpdx5wEssNLJxO0loH1pnHWpAgMB 13 | AAEwDQYJKoZIhvcNAQEFBQADgYEAix74u3m8U6fcQrQ4H/tBizmFtQhetoQ0hFxP 14 | RsmD76mE1xaIN9+nRD97MqiQOBSr/+vXzPl9WHgmBZxkBe1qKjccWv6L5oZMlxF3 15 | X/rLuCcHjO+RKgqhwiUmkpekwsYWAqX7EDbo86r2FPV3xaF4xFA4iCYnfgfB+xBa 16 | 3meUfAw= 17 | -----END CERTIFICATE----- 18 | -------------------------------------------------------------------------------- /provision/roles/docker/files/suse/containerd-0.2.4+git0366d7e-28.1.x86_64.rpm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/baselibrary/infrastructure/42461a91044857e872a123fefceed0861412fcd4/provision/roles/docker/files/suse/containerd-0.2.4+git0366d7e-28.1.x86_64.rpm -------------------------------------------------------------------------------- /provision/roles/docker/files/suse/docker-1.12.3-158.1.x86_64.rpm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/baselibrary/infrastructure/42461a91044857e872a123fefceed0861412fcd4/provision/roles/docker/files/suse/docker-1.12.3-158.1.x86_64.rpm -------------------------------------------------------------------------------- /provision/roles/docker/files/suse/docker-client-1.7.1-5.2.x86_64.rpm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/baselibrary/infrastructure/42461a91044857e872a123fefceed0861412fcd4/provision/roles/docker/files/suse/docker-client-1.7.1-5.2.x86_64.rpm -------------------------------------------------------------------------------- /provision/roles/docker/files/suse/docker-image-migrator-1.0.2-14.2.x86_64.rpm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/baselibrary/infrastructure/42461a91044857e872a123fefceed0861412fcd4/provision/roles/docker/files/suse/docker-image-migrator-1.0.2-14.2.x86_64.rpm -------------------------------------------------------------------------------- /provision/roles/docker/files/suse/docker.key: -------------------------------------------------------------------------------- 1 | -----BEGIN PGP PUBLIC KEY BLOCK----- 2 | Version: GnuPG v1.4.5 (GNU/Linux) 3 | 4 | mQENBFImAl0BCACkjaXGvVLHBGTVXVP0khtpUVHqFvCRtaIIMHaX/5oTr3nyehDQ 5 | Ex9VLsSRcNa0QxtnCHFRQzjWWqe+i6pBginnSjucgmjnIKyJsF4l6R+rwAiinHQX 6 | C4s6Lqg/wH9xDPRBrMYFqlc/7MVf0Glhk1+lAxgQjolMt+5AbbrWlBbwc/i+++zl 7 | ES3MaeH8aiwup/ogjhmk0SbCQQ/ib21p3XWBwx2oz/KM6Voq9tKDvMczjzNRY3ZT 8 | 6Di3FsUSKI7kgljiNiuN+675YwqEqxWEJgdE5a7Zb67giH1Ik08b5wQiF5jSAICD 9 | DxW7/ibWBvZJnqhqQT2xJpLC5VaJqwkN8o83ABEBAAG0PlZpcnR1YWxpemF0aW9u 10 | IE9CUyBQcm9qZWN0IDxWaXJ0dWFsaXphdGlvbkBidWlsZC5vcGVuc3VzZS5vcmc+ 11 | iQE8BBMBAgAmBQJWMX0aAhsDBQkIKiq9BgsJCAcDAgQVAggDBBYCAwECHgECF4AA 12 | CgkQoZP7tXIXT8I4uQf/QDe252+Duq358xt9qQINqIzFrc5qAIzrGM1Rb2YbzhRG 13 | QE6UokAmUd/sievba210z5RWSjFNDRswIR39tYTqCrPG2j0+ne/WaEx0HKqrSXYS 14 | W3LA6iir232bGSdWY4LbANJk9HUI+QuqgSZol5wKIMRtQU4yzyRtapO7huNQRp91 15 | 1rnEkd51Crt4f9MAqvdp7Pi07eUFFVJAcw561SJdROxTHkZW5jWQy92ElTO+RyZw 16 | g4uWf0dFEp3XsYA+PkHi7mDgPIJiltb/yvpmVr0kbe6XTjl+dSBx/C6dDFzncJBg 17 | r0zK45+m0DhhoSTrxesO463bwd+HH0FfCGL5c/f/LYhGBBMRAgAGBQJSJgJdAAoJ 18 | EDswEbdrnWUjucgAoK+hIu0GfjwJoODK+XW/uDN2nqQBAJ4lN90WkygQhpMEIupB 19 | 6+FWiCNvxA== 20 | =hSz3 21 | -----END PGP PUBLIC KEY BLOCK----- 22 | -------------------------------------------------------------------------------- /provision/roles/docker/files/suse/python-backports.ssl_match_hostname-3.5.0.1-1.1.noarch.rpm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/baselibrary/infrastructure/42461a91044857e872a123fefceed0861412fcd4/provision/roles/docker/files/suse/python-backports.ssl_match_hostname-3.5.0.1-1.1.noarch.rpm -------------------------------------------------------------------------------- /provision/roles/docker/files/suse/python-docker-py-1.10.4-7.1.noarch.rpm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/baselibrary/infrastructure/42461a91044857e872a123fefceed0861412fcd4/provision/roles/docker/files/suse/python-docker-py-1.10.4-7.1.noarch.rpm -------------------------------------------------------------------------------- /provision/roles/docker/files/suse/python-docker-pycreds-0.2.1-5.1.noarch.rpm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/baselibrary/infrastructure/42461a91044857e872a123fefceed0861412fcd4/provision/roles/docker/files/suse/python-docker-pycreds-0.2.1-5.1.noarch.rpm -------------------------------------------------------------------------------- /provision/roles/docker/files/suse/python-ipaddress-1.0.16-2.1.noarch.rpm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/baselibrary/infrastructure/42461a91044857e872a123fefceed0861412fcd4/provision/roles/docker/files/suse/python-ipaddress-1.0.16-2.1.noarch.rpm -------------------------------------------------------------------------------- /provision/roles/docker/files/suse/python-requests-2.9.1-1.1.noarch.rpm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/baselibrary/infrastructure/42461a91044857e872a123fefceed0861412fcd4/provision/roles/docker/files/suse/python-requests-2.9.1-1.1.noarch.rpm -------------------------------------------------------------------------------- /provision/roles/docker/files/suse/python-setuptools-20.2.2-2.1.noarch.rpm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/baselibrary/infrastructure/42461a91044857e872a123fefceed0861412fcd4/provision/roles/docker/files/suse/python-setuptools-20.2.2-2.1.noarch.rpm -------------------------------------------------------------------------------- /provision/roles/docker/files/suse/python-six-1.10.0-1.1.noarch.rpm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/baselibrary/infrastructure/42461a91044857e872a123fefceed0861412fcd4/provision/roles/docker/files/suse/python-six-1.10.0-1.1.noarch.rpm -------------------------------------------------------------------------------- /provision/roles/docker/files/suse/python-websocket-client-0.32.0-3.1.noarch.rpm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/baselibrary/infrastructure/42461a91044857e872a123fefceed0861412fcd4/provision/roles/docker/files/suse/python-websocket-client-0.32.0-3.1.noarch.rpm -------------------------------------------------------------------------------- /provision/roles/docker/files/suse/runc-0.1.1+git02f8fa7-21.1.x86_64.rpm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/baselibrary/infrastructure/42461a91044857e872a123fefceed0861412fcd4/provision/roles/docker/files/suse/runc-0.1.1+git02f8fa7-21.1.x86_64.rpm -------------------------------------------------------------------------------- /provision/roles/docker/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart docker 3 | service: name=docker state=restarted -------------------------------------------------------------------------------- /provision/roles/docker/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: "Shawn Ma" 4 | company: ThoughtWorks, Inc. 5 | license: BSD 6 | min_ansible_version: 2.0 7 | platforms: 8 | - name: Ubuntu 9 | versions: 10 | - trusty 11 | categories: 12 | - docker 13 | dependencies: [] -------------------------------------------------------------------------------- /provision/roles/docker/tasks/additional.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: ensure docker directory 3 | file: 4 | path: "{{ docker_additonal_directory }}" 5 | owner: root 6 | group: root 7 | mode: 0644 8 | state: directory 9 | when: docker_additonal_directory is defined 10 | become: yes 11 | 12 | - name: link docker directory to additional disks 13 | file: 14 | src: "{{ docker_additonal_directory }}" 15 | dest: /var/lib/docker 16 | state: link 17 | when: docker_additonal_directory is defined 18 | become: yes 19 | -------------------------------------------------------------------------------- /provision/roles/docker/tasks/config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "config docker in {{ ansible_distribution|lower }} system" 3 | include: "config/{{ ansible_os_family|lower }}.yml" 4 | 5 | - name: ensuers docker certs dir exists 6 | file: 7 | path: /etc/docker/certs.d 8 | state: directory 9 | become: yes 10 | when: docker_certs is defined 11 | 12 | - name: ensuers docker certs host dir exists 13 | file: 14 | path: "/etc/docker/certs.d/{{ item.host }}" 15 | state: directory 16 | with_items: "{{ docker_certs }}" 17 | become: yes 18 | when: docker_certs is defined 19 | 20 | - name: 21 | copy: 22 | src: "{{ item.cert }}" 23 | dest: "/etc/docker/certs.d/{{ item.host }}/ca.crt" 24 | with_items: "{{ docker_certs }}" 25 | become: yes 26 | when: docker_certs is defined 27 | 28 | - name: ensuers docker config dir exists 29 | file: 30 | path=/root/.docker 31 | state=directory 32 | become: yes 33 | when: docker_registries is defined 34 | 35 | - name: config docker login 36 | template: 37 | src=config.json.j2 38 | dest=/root/.docker/config.json 39 | owner=root 40 | group=root 41 | mode=0644 42 | become: yes 43 | when: docker_registries is defined 44 | -------------------------------------------------------------------------------- /provision/roles/docker/tasks/config/debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: cleanup docker files 3 | file: 4 | path: /etc/init.d/docker 5 | state: absent 6 | become: yes 7 | 8 | - name: configure docker upstart service 9 | template: 10 | src: debian/default/docker.j2 11 | dest: /etc/default/docker 12 | owner: root 13 | group: root 14 | mode: 0644 15 | when: ansible_service_mgr == "upstart" 16 | notify: 17 | - restart docker 18 | become: yes 19 | 20 | - name: ensure docker systemd service directory 21 | file: 22 | path: /etc/systemd/system/docker.service.d 23 | state: directory 24 | when: ansible_service_mgr == "systemd" 25 | become: yes 26 | 27 | - name: configure docker systemd service 28 | template: 29 | src: debian/systemd/docker-systemd-service.j2 30 | dest: /etc/systemd/system/docker.service.d/override.conf 31 | owner: root 32 | group: root 33 | mode: 0644 34 | when: ansible_service_mgr == "systemd" 35 | register: dropinfile 36 | notify: 37 | - restart docker 38 | become: yes 39 | 40 | - name: reload systemd 41 | command: 'systemctl daemon-reload' 42 | when: ansible_service_mgr == "systemd" and dropinfile.changed 43 | -------------------------------------------------------------------------------- /provision/roles/docker/tasks/config/redhat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: create docker config directory 3 | file: 4 | path: "/etc/systemd/system/docker.service.d" 5 | owner: root 6 | group: root 7 | mode: 0755 8 | state: directory 9 | become: yes 10 | 11 | - name: configure docker config file 12 | template: 13 | src: redhat/sysconfig/docker.j2 14 | dest: /etc/sysconfig/docker 15 | owner: root 16 | group: root 17 | mode: 0644 18 | become: yes 19 | notify: 20 | - restart docker 21 | 22 | - name: configure docker service 23 | template: 24 | src: redhat/systemd/docker.conf.j2 25 | dest: /etc/systemd/system/docker.service.d/docker.conf 26 | owner: root 27 | group: root 28 | mode: 0644 29 | notify: 30 | - restart docker 31 | -------------------------------------------------------------------------------- /provision/roles/docker/tasks/config/suse.yml: -------------------------------------------------------------------------------- 1 | - name: configure docker package 2 | template: 3 | src: suse/sysconfig/docker.j2 4 | dest: /etc/sysconfig/docker 5 | owner: root 6 | group: root 7 | mode: 0644 8 | notify: 9 | - restart docker 10 | become: yes 11 | -------------------------------------------------------------------------------- /provision/roles/docker/tasks/install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "install docker-engine in {{ ansible_distribution|lower }} system" 3 | include: "install/{{ ansible_os_family|lower }}.yml" 4 | -------------------------------------------------------------------------------- /provision/roles/docker/tasks/install/debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: add docker apt key 3 | apt_key: 4 | id: 58118E89F3A912897C070ADBF76221572C52609D 5 | keyserver: keyserver.ubuntu.com 6 | state: present 7 | become: yes 8 | 9 | - name: add docker apt repository 10 | apt_repository: 11 | repo: 'deb https://apt.dockerproject.org/repo {{ansible_distribution|lower}}-{{ ansible_distribution_release }} main' 12 | update_cache: yes 13 | state: present 14 | become: yes 15 | 16 | - name: install docker dependencies 17 | apt: 18 | name: "{{ item }}" 19 | state: present 20 | become: yes 21 | with_items: "{{ docker_required_packages }}" 22 | 23 | - name: install docker package 24 | apt: 25 | name: "{{ docker_package_name }}={{ docker_package_version }}" 26 | state: present 27 | become: yes 28 | 29 | - name: install pip 30 | easy_install: 31 | name: pip 32 | state: latest 33 | become: yes 34 | 35 | - name: install docker client 36 | pip: 37 | name: "docker-py==1.9.0" 38 | state: present 39 | become: yes 40 | -------------------------------------------------------------------------------- /provision/roles/docker/tasks/install/redhat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: add docker yum repository 3 | yum_repository: 4 | name: "docker" 5 | description: "Docker Repository" 6 | baseurl: "https://yum.dockerproject.org/repo/main/centos/$releasever/" 7 | gpgkey: "https://yum.dockerproject.org/gpg" 8 | enabled: yes 9 | become: yes 10 | 11 | - name: install docker package 12 | yum: 13 | name: "{{ docker_package_name }}-{{ docker_package_version }}" 14 | state: present 15 | become: yes 16 | 17 | - name: install docker client 18 | pip: 19 | name: "docker-py==1.9.0" 20 | state: present 21 | become: yes 22 | -------------------------------------------------------------------------------- /provision/roles/docker/tasks/install/suse.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - block: 3 | - name: add docker apt key 4 | rpm_key: 5 | key: http://download.opensuse.org/repositories/Virtualization:/containers/openSUSE_Leap_42.1/repodata/repomd.xml.key 6 | state: present 7 | become: yes 8 | 9 | - name: add docker apt repository 10 | zypper_repository: 11 | name: Virtualization:containers.repo 12 | repo: http://download.opensuse.org/repositories/Virtualization:/containers/openSUSE_Leap_42.1/ 13 | state: present 14 | become: yes 15 | 16 | - name: install docker dependencies 17 | zypper: 18 | name: "{{ item }}" 19 | state: present 20 | become: yes 21 | with_items: "{{ docker_required_packages }}" 22 | 23 | - name: install docker package 24 | zypper: 25 | name: "{{ docker_package_name }}={{ docker_package_version }}" 26 | oldpackage: yes 27 | state: present 28 | become: yes 29 | 30 | - name: install pip 31 | easy_install: 32 | name: pip 33 | state: latest 34 | become: yes 35 | 36 | - name: install docker client 37 | pip: 38 | name: docker-py 39 | state: present 40 | become: yes 41 | when: not docker_rpmpackage 42 | 43 | - block: 44 | - name: prepare docker packages 45 | copy: 46 | src: "{{ ansible_os_family|lower }}/{{ item }}" 47 | dest: "/tmp/{{ item }}" 48 | owner: root 49 | group: root 50 | mode: 0755 51 | force: no 52 | with_items: "{{ docker_rpm_packages }}" 53 | become: yes 54 | 55 | - name: install docker dependencies 56 | zypper: 57 | name: "{{ item }}" 58 | state: present 59 | with_items: "{{ docker_required_packages }}" 60 | become: yes 61 | 62 | - name: install docker packages 63 | zypper: 64 | name: "/tmp/{{ item }}" 65 | disable_gpg_check: yes 66 | force: yes 67 | state: installed 68 | with_items: "{{docker_rpm_packages}}" 69 | ignore_errors: yes 70 | become: yes 71 | when: docker_rpmpackage 72 | -------------------------------------------------------------------------------- /provision/roles/docker/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: gather os specific variables 3 | include_vars: "{{ item }}" 4 | with_first_found: 5 | - "{{ ansible_distribution|lower }}-{{ ansible_distribution_version|lower|replace('/', '_') }}.yml" 6 | - "{{ ansible_distribution|lower }}-{{ ansible_distribution_release }}.yml" 7 | - "{{ ansible_distribution|lower }}-{{ ansible_distribution_major_version|lower|replace('/', '_') }}.yml" 8 | - "{{ ansible_distribution|lower }}.yml" 9 | - "{{ ansible_os_family|lower }}.yml" 10 | - main.yml 11 | 12 | - include: additional.yml 13 | tags: 14 | - development 15 | - docker 16 | - additional 17 | 18 | - include: install.yml 19 | tags: 20 | - development 21 | - docker 22 | - install 23 | 24 | - include: config.yml 25 | tags: 26 | - development 27 | - docker 28 | - config 29 | 30 | - include: service.yml 31 | tags: 32 | - development 33 | - docker 34 | - service 35 | 36 | #- include: swarm.yml 37 | # tags: 38 | # - development 39 | # - docker 40 | # - swarm 41 | 42 | #- include: manage.yml 43 | # tags: 44 | # - development 45 | # - docker 46 | # - manage 47 | -------------------------------------------------------------------------------- /provision/roles/docker/tasks/manage.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /provision/roles/docker/tasks/service.yml: -------------------------------------------------------------------------------- 1 | - name: register docker service 2 | service: 3 | name: docker 4 | enabled: yes 5 | state: started 6 | become: yes 7 | -------------------------------------------------------------------------------- /provision/roles/docker/tasks/swarm.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: check the docker info 3 | shell: docker info 4 | changed_when: False 5 | register: docker_info 6 | 7 | - name: init "swarm mode" on the first swarm manager 8 | shell: docker swarm init --listen-addr {{ docker_swarm_addr }}:{{ docker_swarm_port }} --advertise-addr {{ docker_swarm_addr }} 9 | when: "docker_info.stdout.find('Swarm: active') == -1 and inventory_hostname == groups['master'][0]" 10 | 11 | - name: get the swarm worker join-token 12 | shell: docker swarm join-token -q worker 13 | changed_when: false 14 | register: docker_worker_token 15 | delegate_to: "{{ groups['masters'][0] }}" 16 | delegate_facts: true 17 | when: "'slaves' in group_names" 18 | 19 | - name: get the swarm manager join-token 20 | shell: docker swarm join-token -q manager 21 | changed_when: false 22 | register: docker_manager_token 23 | delegate_to: "{{ groups['masters'][0] }}" 24 | delegate_facts: true 25 | when: "'master' in group_names and inventory_hostname != groups['master'][0]" 26 | 27 | - name: declare the address of the first swarm manager 28 | set_fact: 29 | docker_swarm_manager_address: "{{ docker_swarm_addr }}:{{ docker_swarm_port }}" 30 | when: "inventory_hostname == groups['master'][0]" 31 | 32 | - name: distribute the fact containing address of the first swarm manager 33 | set_fact: 34 | docker_swarm_manager_address: "{{hostvars[groups['master'][0]]['docker_swarm_manager_address'] }}" 35 | when: "inventory_hostname != groups['master'][0]" 36 | 37 | 38 | - name: join the pending swarm worker nodes 39 | shell: docker swarm join --listen-addr {{ docker_swarm_addr }}:{{ docker_swarm_port }} --advertise-addr {{ docker_swarm_addr }} --token "{{ docker_worker_token.stdout }}" {{ docker_swarm_manager_address }} 40 | changed_when: false 41 | when: "docker_info.stdout.find('Swarm: active') == -1 and docker_info.stdout.find('Swarm: pending') == -1 and 'slaves' in group_names" 42 | 43 | - name: join the pending swarm manager nodes 44 | shell: docker swarm join --listen-addr {{ docker_swarm_addr }}:{{ docker_swarm_port }} --advertise-addr {{ docker_swarm_addr }} --token "{{ docker_manager_token.stdout }}" {{ docker_swarm_manager_address }} 45 | changed_when: false 46 | when: "docker_info.stdout.find('Swarm: active') == -1 and docker_info.stdout.find('Swarm: pending') == -1 and 'master' in group_names and inventory_hostname != groups['master'][0]" 47 | -------------------------------------------------------------------------------- /provision/roles/docker/templates/config.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "auths": { 3 | {% for info in docker_registries %} 4 | "{{ info.host }}": { 5 | "auth": "{{ info.auth }}", 6 | "email": "{{ info.email }}" 7 | }{% if not loop.last -%},{%- endif %} 8 | 9 | {% endfor %} 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /provision/roles/docker/templates/debian/default/docker.j2: -------------------------------------------------------------------------------- 1 | # Docker Upstart and SysVinit configuration file 2 | 3 | # 4 | # THIS FILE DOES NOT APPLY TO SYSTEMD 5 | # 6 | # Please see the documentation for "systemd drop-ins": 7 | # https://docs.docker.com/engine/articles/systemd/ 8 | # 9 | 10 | # Customize location of Docker binary (especially for development testing). 11 | #DOCKERD="/usr/local/bin/dockerd" 12 | 13 | # Use DOCKER_OPTS to modify the daemon startup options. 14 | DOCKER_OPTS="-H unix:///var/run/docker.sock -H tcp://0.0.0.0:2375 {{ docker_opts }}" 15 | 16 | # If you need Docker to use an HTTP proxy, it can also be specified here. 17 | #export http_proxy="http://127.0.0.1:3128/" 18 | 19 | # This is also a handy place to tweak where Docker's temporary files go. 20 | #export TMPDIR="/mnt/bigdrive/docker-tmp" 21 | -------------------------------------------------------------------------------- /provision/roles/docker/templates/debian/init/docker.conf.j2: -------------------------------------------------------------------------------- 1 | description "Docker daemon" 2 | 3 | start on (filesystem and net-device-up IFACE!=lo) 4 | stop on runlevel [!2345] 5 | limit nofile 524288 1048576 6 | limit nproc 524288 1048576 7 | 8 | respawn 9 | 10 | kill timeout 20 11 | 12 | pre-start script 13 | # see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount 14 | if grep -v '^#' /etc/fstab | grep -q cgroup \ 15 | || [ ! -e /proc/cgroups ] \ 16 | || [ ! -d /sys/fs/cgroup ]; then 17 | exit 0 18 | fi 19 | if ! mountpoint -q /sys/fs/cgroup; then 20 | mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup 21 | fi 22 | ( 23 | cd /sys/fs/cgroup 24 | for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do 25 | mkdir -p $sys 26 | if ! mountpoint -q $sys; then 27 | if ! mount -n -t cgroup -o $sys cgroup $sys; then 28 | rmdir $sys || true 29 | fi 30 | fi 31 | done 32 | ) 33 | end script 34 | 35 | script 36 | # modify these in /etc/default/$UPSTART_JOB (/etc/default/docker) 37 | DOCKERD=/usr/bin/docker 38 | DOCKER_OPTS= 39 | if [ -f /etc/default/$UPSTART_JOB ]; then 40 | . /etc/default/$UPSTART_JOB 41 | fi 42 | exec "$DOCKERD" daemon $DOCKER_OPTS 43 | end script 44 | 45 | # Don't emit "started" event until docker.sock is ready. 46 | # See https://github.com/docker/docker/issues/6647 47 | post-start script 48 | DOCKER_OPTS= 49 | DOCKER_SOCKET= 50 | if [ -f /etc/default/$UPSTART_JOB ]; then 51 | . /etc/default/$UPSTART_JOB 52 | fi 53 | 54 | if ! printf "%s" "$DOCKER_OPTS" | grep -qE -e '-H|--host'; then 55 | DOCKER_SOCKET=/var/run/docker.sock 56 | else 57 | DOCKER_SOCKET=$(printf "%s" "$DOCKER_OPTS" | grep -oP -e '(-H|--host)\W*unix://\K(\S+)') 58 | fi 59 | 60 | if [ -n "$DOCKER_SOCKET" ]; then 61 | while ! [ -e "$DOCKER_SOCKET" ]; do 62 | initctl status $UPSTART_JOB | grep -qE "(stop|respawn)/" && exit 1 63 | echo "Waiting for $DOCKER_SOCKET" 64 | sleep 0.1 65 | done 66 | echo "$DOCKER_SOCKET is up" 67 | fi 68 | end script -------------------------------------------------------------------------------- /provision/roles/docker/templates/debian/systemd/docker-systemd-service.j2: -------------------------------------------------------------------------------- 1 | [Service] 2 | ExecStart= 3 | ExecStart=/usr/bin/dockerd -H fd:// {{ docker_opts }} 4 | -------------------------------------------------------------------------------- /provision/roles/docker/templates/redhat/sysconfig/docker.j2: -------------------------------------------------------------------------------- 1 | ## Path : System/Management 2 | ## Description : Extra cli switches for docker daemon 3 | ## Type : string 4 | ## Default : "" 5 | ## ServiceRestart : docker 6 | # 7 | DOCKER_OPTS="-H unix:///var/run/docker.sock -H tcp://0.0.0.0:2375 {{ docker_opts }}" 8 | -------------------------------------------------------------------------------- /provision/roles/docker/templates/redhat/systemd/docker.conf.j2: -------------------------------------------------------------------------------- 1 | [Service] 2 | EnvironmentFile=-/etc/sysconfig/docker 3 | ExecStart= 4 | ExecStart=/usr/bin/docker daemon $DOCKER_OPTS 5 | -------------------------------------------------------------------------------- /provision/roles/docker/templates/suse/default/docker.j2: -------------------------------------------------------------------------------- 1 | 2 | ## Path : System/Management 3 | ## Description : Extra cli switches for docker daemon 4 | ## Type : string 5 | ## Default : "" 6 | ## ServiceRestart : docker 7 | # 8 | DOCKER_OPTS="-H unix:///var/run/docker.sock -H tcp://0.0.0.0:2375 {{ docker_daemon_opts }}" 9 | -------------------------------------------------------------------------------- /provision/roles/docker/templates/suse/sysconfig/docker.j2: -------------------------------------------------------------------------------- 1 | 2 | ## Path : System/Management 3 | ## Description : Extra cli switches for docker daemon 4 | ## Type : string 5 | ## Default : "" 6 | ## ServiceRestart : docker 7 | # 8 | DOCKER_OPTS="-H unix:///var/run/docker.sock -H tcp://0.0.0.0:2375 {{ docker_opts }}" 9 | -------------------------------------------------------------------------------- /provision/roles/docker/vars/centos.yml: -------------------------------------------------------------------------------- 1 | # docker-engine is the default package name 2 | docker_package_name: docker-engine 3 | docker_package_version: "1.12.6-0.el7.centos" 4 | -------------------------------------------------------------------------------- /provision/roles/docker/vars/debian.yml: -------------------------------------------------------------------------------- 1 | # docker-engine is the default package name 2 | docker_package_name: docker-engine 3 | docker_package_version: "1.12.6-0~{{ ansible_distribution|lower}}-{{ ansible_distribution_release }}" 4 | 5 | # required 6 | docker_required_packages: 7 | - ca-certificates 8 | - apt-transport-https 9 | - python-pip 10 | -------------------------------------------------------------------------------- /provision/roles/docker/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /provision/roles/docker/vars/redhat.yml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/baselibrary/infrastructure/42461a91044857e872a123fefceed0861412fcd4/provision/roles/docker/vars/redhat.yml -------------------------------------------------------------------------------- /provision/roles/docker/vars/suse.yml: -------------------------------------------------------------------------------- 1 | # docker-engine is the default package name 2 | docker_package_name: docker 3 | docker_package_version: "1.12.3-158.1" 4 | 5 | # required 6 | docker_required_packages: 7 | - libapparmor1 8 | - bridge-utils 9 | - python-setuptools 10 | 11 | # rpm package 12 | docker_rpm_packages: 13 | - docker-1.12.3-158.1.x86_64.rpm 14 | - containerd-0.2.4+git0366d7e-28.1.x86_64.rpm 15 | - runc-0.1.1+git02f8fa7-21.1.x86_64.rpm 16 | - python-backports.ssl_match_hostname-3.5.0.1-1.1.noarch.rpm 17 | - python-websocket-client-0.32.0-3.1.noarch.rpm 18 | - python-ipaddress-1.0.16-2.1.noarch.rpm 19 | - python-requests-2.9.1-1.1.noarch.rpm 20 | - python-docker-py-1.10.4-7.1.noarch.rpm 21 | -------------------------------------------------------------------------------- /provision/roles/docker/vars/ubuntu.yml: -------------------------------------------------------------------------------- 1 | # docker-engine is the default package name 2 | docker_package_name: docker-engine 3 | docker_package_version: "1.12.6-0~{{ ansible_distribution|lower}}-{{ ansible_distribution_release }}" 4 | 5 | # required 6 | docker_required_packages: 7 | - ca-certificates 8 | - apt-transport-https 9 | - linux-image-extra-{{ ansible_kernel }} 10 | - linux-image-extra-virtual 11 | - python-setuptools 12 | -------------------------------------------------------------------------------- /provision/roles/etcd/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # image 3 | etcd_name: etcd-server 4 | etcd_image: quay.io/coreos/etcd 5 | etcd_version: v3.2 6 | 7 | # group 8 | etcd_cluster_group: masters 9 | 10 | # storage 11 | etcd_data_dir: "/var/lib/etcd" 12 | 13 | # network 14 | etcd_peer_port: 2380 15 | etcd_peer_scheme: http 16 | etcd_client_port: 2379 17 | etcd_client_scheme: http 18 | 19 | # config 20 | etcd_listen_peer_urls: "{{ etcd_peer_scheme }}://0.0.0.0:{{ etcd_peer_port }}" 21 | etcd_listen_client_urls: "{{ etcd_client_scheme }}://0.0.0.0:{{ etcd_client_port }}" 22 | 23 | etcd_advertise_client_urls: "{{ etcd_client_scheme }}://{{ hostvars[inventory_hostname]['ansible_host'] }}:{{ etcd_client_port }}" 24 | etcd_initial_advertise_peer_urls: "{{ etcd_peer_scheme }}://{{ hostvars[inventory_hostname]['ansible_host'] }}:{{ etcd_peer_port }}" 25 | 26 | etcd_initial_cluster: "{% for host in groups[etcd_cluster_group] -%}{{ host }}={{ etcd_peer_scheme }}://{{ hostvars[host]['ansible_host'] }}:{{ etcd_peer_port }}{% if not loop.last %},{% endif %}{% endfor %}" 27 | etcd_initial_cluster_token: "etcd-cluster" 28 | etcd_initial_cluster_state: "new" 29 | -------------------------------------------------------------------------------- /provision/roles/etcd/files/vault.sh: -------------------------------------------------------------------------------- 1 | # Store Secrets using Hashicorp Vault 2 | 3 | # Learn how to store and manage secrets using Hashicorp Vault 4 | 5 | # Step 1 - Configuration 6 | 7 | cat vault.hcl 8 | backend "consul" { 9 | address = "consul:8500" 10 | advertise_addr = "consul:8300" 11 | scheme = "http" 12 | } 13 | listener "tcp" { 14 | address = "0.0.0.0:8200" 15 | tls_disable = 1 16 | } 17 | disable_mlock = true 18 | 19 | # Create Data Container 20 | # To store the configuration we'll create a container. This will be used by Vault and Consul to read the required configuration files. 21 | 22 | docker create -v /config --name config busybox; docker cp vault.hcl config:/config/; 23 | 24 | # Step 2 - Launch 25 | # With the configuration data container created we can launch the required processes to start Vault. 26 | # Launch Services 27 | 28 | docker run -d --name consul \ 29 | -p 8500:8500 \ 30 | consul:v0.6.4 \ 31 | agent -dev -client=0.0.0.0 32 | 33 | # Our Vault instance can now use Consul to store the data. All data stored within Consul will be encrypted. 34 | 35 | docker run -d --name vault-dev \ 36 | --link consul:consul \ 37 | -p 8200:8200 \ 38 | --volumes-from config \ 39 | cgswong/vault:latest server -config=/config/vault.hcl 40 | 41 | # Step 3 - Initialise 42 | # With a vault instance running we can now configure our environment and initialise the Vault. 43 | # Configure Environment 44 | 45 | alias vault='docker exec -it vault-dev vault "$@"' 46 | export VAULT_ADDR=http://127.0.0.1:8200 47 | 48 | # Initialise Vault 49 | # With the alias in place, we can make calls to the CLI. The first step is to initialise the vault using the init command. 50 | 51 | vault init -address=${VAULT_ADDR} > keys.txt 52 | cat keys.txt 53 | 54 | # Step 4 - Unseal Vault 55 | 56 | vault unseal -address=${VAULT_ADDR} $(grep 'Key 1:' keys.txt | awk '{print $NF}') 57 | vault unseal -address=${VAULT_ADDR} $(grep 'Key 2:' keys.txt | awk '{print $NF}') 58 | vault unseal -address=${VAULT_ADDR} $(grep 'Key 3:' keys.txt | awk '{print $NF}') 59 | vault status -address=${VAULT_ADDR} 60 | 61 | # Step 5 - Vault Tokens 62 | # You can use this token to login to vault. 63 | 64 | export VAULT_TOKEN=$(grep 'Initial Root Token:' keys.txt | awk '{print substr($NF, 1, length($NF)-1)}') 65 | vault auth -address=${VAULT_ADDR} ${VAULT_TOKEN} 66 | 67 | # Step 6 - Read/Write Data 68 | # Save Data 69 | # To store data, we use the write CLI command. In this case, we have a key named secret/api-key with the value 12345678 70 | 71 | vault write -address=${VAULT_ADDR} \ 72 | secret/api-key value=12345678 73 | 74 | # Read Data 75 | # Reading the key will output the value, along with other information such as the lease duration. 76 | vault read -address=${VAULT_ADDR} \ 77 | secret/api-key 78 | 79 | # You can also use the -field flag to extract the value from the secret data. 80 | 81 | vault read -address=${VAULT_ADDR} \ 82 | -field=value secret/api-key 83 | 84 | # Step 7 - HTTP API 85 | # Using the command like tool jq we can parse the data and extract the value for our key. 86 | 87 | curl -H "X-Vault-Token:$VAULT_TOKEN" \ 88 | -XGET http://docker:8200/v1/secret/api-key 89 | 90 | curl -s -H "X-Vault-Token:$VAULT_TOKEN" \ 91 | -XGET http://docker:8200/v1/secret/api-key \ 92 | | jq -r .data.value 93 | 94 | # Step 8 - Consul Data 95 | # As Vault stores all the data as encrypted key/values in Consul you can use the Consul UI to see the encrypted data. 96 | -------------------------------------------------------------------------------- /provision/roles/etcd/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /provision/roles/etcd/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: "Shawn Ma" 4 | company: ThoughtWorks, Inc. 5 | license: BSD 6 | min_ansible_version: 2.0 7 | platforms: 8 | - name: Ubuntu 9 | versions: 10 | - trusty 11 | categories: 12 | - docker 13 | dependencies: [] 14 | -------------------------------------------------------------------------------- /provision/roles/etcd/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: start etcd server container 3 | docker_container: 4 | name: "{{ etcd_name }}" 5 | image: "{{ etcd_image }}:{{ etcd_version }}" 6 | ports: 7 | - "{{ etcd_peer_port }}:{{ etcd_peer_port }}" 8 | - "{{ etcd_client_port }}:{{ etcd_client_port }}" 9 | volumes: 10 | - "{{ etcd_data_dir }}:/var/lib/etcd" 11 | env: 12 | ETCD_NAME="{{ inventory_hostname }}" 13 | ETCD_LISTEN_PEER_URLS="{{ etcd_listen_peer_urls }}" 14 | ETCD_LISTEN_CLIENT_URLS="{{ etcd_listen_client_urls }}" 15 | ETCD_ADVERTISE_CLIENT_URLS="{{ etcd_advertise_client_urls }}" 16 | ETCD_INITIAL_ADVERTISE_PEER_URLS="{{ etcd_initial_advertise_peer_urls }}" 17 | ETCD_INITIAL_CLUSTER="{{ etcd_initial_cluster }}" 18 | ETCD_INITIAL_CLUSTER_TOKEN="{{ etcd_initial_cluster_token }}" 19 | ETCD_INITIAL_CLUSTER_STATE="{{ etcd_initial_cluster_state }}" 20 | restart_policy: unless-stopped 21 | become: yes 22 | 23 | - debug: msg="{{ etcd_initial_cluster }}" 24 | -------------------------------------------------------------------------------- /provision/roles/gogs/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # image 3 | gogs_name: gogs-server 4 | gogs_image: gogs/gogs 5 | gogs_version: latest 6 | 7 | # domain 8 | gogs_domain: "{{ hostvars[inventory_hostname]['ansible_host'] }}" 9 | 10 | # network 11 | gogs_web_port: 3000 12 | gogs_ssh_port: 10022 13 | 14 | # storage 15 | gogs_data_dir: "/var/lib/gogs" 16 | 17 | # database 18 | gogs_mysql_user: gogs 19 | gogs_mysql_pass: fsBLqWQFk7yJ 20 | gogs_mysql_database: gogs 21 | -------------------------------------------------------------------------------- /provision/roles/gogs/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /provision/roles/gogs/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: "Shawn Ma" 4 | company: ThoughtWorks, Inc. 5 | license: BSD 6 | min_ansible_version: 2.0 7 | platforms: 8 | - name: Ubuntu 9 | versions: 10 | - trusty 11 | categories: 12 | - docker 13 | dependencies: [] 14 | -------------------------------------------------------------------------------- /provision/roles/gogs/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: create mysql user 3 | mysql_user: 4 | name: "{{ gogs_mysql_user }}" 5 | password: "{{ gogs_mysql_pass }}" 6 | priv: "*.*:ALL" 7 | host: "%" 8 | login_host: "{{ GOGS_MYSQL_LOGIN_HOST }}" 9 | login_port: "{{ GOGS_MYSQL_LOGIN_PORT }}" 10 | login_user: "{{ GOGS_MYSQL_LOGIN_USER }}" 11 | login_password: "{{ GOGS_MYSQL_LOGIN_PASS }}" 12 | state: present 13 | become: no 14 | run_once: yes 15 | delegate_to: localhost 16 | 17 | - name: create mysql database 18 | mysql_db: 19 | name: "{{ gogs_mysql_database }}" 20 | login_host: "{{ GOGS_MYSQL_LOGIN_HOST }}" 21 | login_port: "{{ GOGS_MYSQL_LOGIN_PORT }}" 22 | login_user: "{{ GOGS_MYSQL_LOGIN_USER }}" 23 | login_password: "{{ GOGS_MYSQL_LOGIN_PASS }}" 24 | state: present 25 | become: no 26 | run_once: yes 27 | delegate_to: localhost 28 | 29 | - name: create gogs group 30 | group: 31 | name: git 32 | state: present 33 | become: yes 34 | 35 | - name: create gogs user 36 | user: 37 | name: git 38 | group: git 39 | shell: /bin/bash 40 | generate_ssh_key: yes 41 | become: yes 42 | 43 | - name: create gogs data directory 44 | file: 45 | path: "{{ gogs_data_dir }}/gogs/conf" 46 | state: "directory" 47 | owner: git 48 | group: root 49 | become: yes 50 | 51 | - name: generate gogs script 52 | template: 53 | src: gogs.j2 54 | dest: "/app/gogs/gogs" 55 | owner: root 56 | group: root 57 | mode: 0755 58 | become: yes 59 | 60 | - name: generate gogs config 61 | template: 62 | src: app.ini.j2 63 | dest: "{{ gogs_data_dir }}/gogs/conf/app.ini" 64 | owner: git 65 | group: git 66 | become: yes 67 | 68 | - name: start gogs server container 69 | docker_container: 70 | name: "{{ gogs_name }}" 71 | image: "{{ gogs_image }}:{{ gogs_version }}" 72 | ports: 73 | - "{{ gogs_web_port }}:3000" 74 | - "{{ gogs_ssh_port }}:22" 75 | volumes: 76 | - "{{ gogs_data_dir }}:/data" 77 | - "/etc/passwd:/etc/passwd" 78 | - "/home/git/.ssh:/home/git/.ssh" 79 | restart_policy: unless-stopped 80 | become: yes 81 | 82 | - slurp: 83 | src: "/home/git/.ssh/id_rsa.pub" 84 | register: authorized_key 85 | become: yes 86 | 87 | - name: set authorized key took from file 88 | lineinfile: 89 | path: /home/git/.ssh/authorized_keys 90 | regexp: '^no-port-forwarding,no-X11-forwarding,no-agent-forwarding,no-pty' 91 | line: "no-port-forwarding,no-X11-forwarding,no-agent-forwarding,no-pty {{ authorized_key.content | b64decode }}" 92 | become: yes 93 | -------------------------------------------------------------------------------- /provision/roles/gogs/templates/app.ini.j2: -------------------------------------------------------------------------------- 1 | APP_NAME = Gogs 2 | RUN_USER = git 3 | RUN_MODE = prod 4 | 5 | [database] 6 | DB_TYPE = mysql 7 | HOST = {{ GOGS_MYSQL_LOGIN_HOST }}:{{ GOGS_MYSQL_LOGIN_PORT }} 8 | NAME = {{ gogs_mysql_database }} 9 | USER = {{ gogs_mysql_user }} 10 | PASSWD = {{ gogs_mysql_pass }} 11 | SSL_MODE = disable 12 | PATH = data/gogs.db 13 | 14 | [repository] 15 | ROOT = /data/git/gogs-repositories 16 | 17 | [server] 18 | DOMAIN = {{ gogs_domain }} 19 | HTTP_PORT = 3000 20 | ROOT_URL = http://{{ gogs_domain }}/ 21 | DISABLE_SSH = false 22 | SSH_PORT = 22 23 | START_SSH_SERVER = false 24 | OFFLINE_MODE = false 25 | 26 | [mailer] 27 | ENABLED = false 28 | 29 | [service] 30 | REGISTER_EMAIL_CONFIRM = false 31 | ENABLE_NOTIFY_MAIL = false 32 | DISABLE_REGISTRATION = false 33 | ENABLE_CAPTCHA = true 34 | REQUIRE_SIGNIN_VIEW = false 35 | 36 | [picture] 37 | DISABLE_GRAVATAR = false 38 | ENABLE_FEDERATED_AVATAR = true 39 | 40 | [session] 41 | PROVIDER = file 42 | 43 | [log] 44 | MODE = file 45 | LEVEL = Info 46 | ROOT_PATH = /data/gogs/log 47 | 48 | [security] 49 | INSTALL_LOCK = true 50 | SECRET_KEY = XXLrdHaYnNjVfX9 51 | -------------------------------------------------------------------------------- /provision/roles/gogs/templates/gogs.j2: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | ssh -p {{ gogs_ssh_port }} -o StrictHostKeyChecking=no git@127.0.0.1 \ 3 | "SSH_ORIGINAL_COMMAND=\"$SSH_ORIGINAL_COMMAND\" $0 $@" 4 | -------------------------------------------------------------------------------- /provision/roles/kubernetes-agent/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kube_certs_ca_subject: "/CN=kubernetes" 3 | 4 | 5 | kube_token: 6 | -------------------------------------------------------------------------------- /provision/roles/kubernetes-agent/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /provision/roles/kubernetes-agent/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: "Shawn Ma" 4 | company: ThoughtWorks, Inc. 5 | license: BSD 6 | min_ansible_version: 2.0 7 | platforms: 8 | - name: Ubuntu 9 | versions: 10 | - trusty 11 | categories: 12 | - docker 13 | dependencies: [] 14 | -------------------------------------------------------------------------------- /provision/roles/kubernetes-agent/tasks/certs.yml: -------------------------------------------------------------------------------- 1 | - name: create kubernetes config directory 2 | file: 3 | path: "/etc/kubernetes" 4 | state: directory 5 | owner: root 6 | group: root 7 | become: yes 8 | 9 | - name: create kubernetes certs directory 10 | file: 11 | path: "/etc/kubernetes/pki" 12 | state: directory 13 | owner: root 14 | group: root 15 | become: yes 16 | 17 | - name: create kubernetes script directory 18 | file: 19 | path: "/etc/kubernetes/manifests" 20 | state: directory 21 | owner: root 22 | group: root 23 | become: yes 24 | 25 | - name: create root ca key 26 | command: openssl genrsa -out {{ inventory_dir }}/files/ca.key 2048 creates="{{ inventory_dir }}/files/ca.key" 27 | become: no 28 | run_once: yes 29 | delegate_to: localhost 30 | 31 | - name: create root ca certificate 32 | command: openssl req -x509 -new -nodes -subj "/CN=kubernetes" -days 3650 -key {{ inventory_dir }}/files/ca.key -out {{ inventory_dir }}/files/ca.crt creates="{{ inventory_dir }}/files/ca.crt" 33 | become: no 34 | run_once: yes 35 | delegate_to: localhost 36 | 37 | - set_fact: 38 | apiserver_opnssl: "{{ lookup('template', 'ssl/apiserver.cnf') }}" 39 | 40 | - name: create apiserver key 41 | shell: | 42 | openssl genrsa -out {{ inventory_dir }}/files/apiserver.key 2048 43 | args: 44 | creates: "{{ inventory_dir }}/files/apiserver.key" 45 | executable: /bin/bash 46 | become: no 47 | run_once: yes 48 | delegate_to: localhost 49 | 50 | - name: create apiserver csr 51 | shell: | 52 | openssl req -new -subj "/CN=kube-apiserver" -key {{ inventory_dir }}/files/apiserver.key -out {{ inventory_dir }}/files/apiserver.csr -config <(echo "{{ apiserver_opnssl }}") 53 | args: 54 | creates: "{{ inventory_dir }}/files/apiserver.csr" 55 | executable: /bin/bash 56 | become: no 57 | run_once: yes 58 | delegate_to: localhost 59 | 60 | - name: create apiserver cert 61 | shell: | 62 | openssl x509 -req -CAcreateserial -CAkey {{ inventory_dir }}/files/ca.key -CA {{ inventory_dir }}/files/ca.crt -in {{ inventory_dir }}/files/apiserver.csr -out {{ inventory_dir }}/files/apiserver.crt -days 10000 -extensions v3_req -extfile <(echo "{{ apiserver_opnssl }}") 63 | args: 64 | creates: "{{ inventory_dir }}/files/apiserver.crt" 65 | executable: /bin/bash 66 | become: no 67 | run_once: yes 68 | delegate_to: localhost 69 | 70 | - set_fact: 71 | kube_openssl_config: "{{ lookup('template', 'ssl/apiserver-kubelet-client.cnf') }}" 72 | 73 | - name: create kubelet client key 74 | shell: | 75 | openssl genrsa -out {{ inventory_dir }}/files/apiserver-kubelet-client.key 2048 76 | args: 77 | creates: "{{ inventory_dir }}/files/apiserver-kubelet-client.key" 78 | executable: /bin/bash 79 | become: no 80 | run_once: yes 81 | delegate_to: localhost 82 | 83 | - name: create kubelet client csr 84 | shell: | 85 | openssl req -new -subj "/O=system:masters/CN=kube-apiserver-kubelet-client" -key {{ inventory_dir }}/files/apiserver-kubelet-client.key -out {{ inventory_dir }}/files/apiserver-kubelet-client.csr -config <(echo "{{ kube_openssl_config }}") 86 | args: 87 | creates: "{{ inventory_dir }}/files/apiserver-kubelet-client.csr" 88 | executable: /bin/bash 89 | become: no 90 | run_once: yes 91 | delegate_to: localhost 92 | 93 | - name: create kubelet client cert 94 | shell: | 95 | openssl x509 -req -CAcreateserial -CAkey {{ inventory_dir }}/files/ca.key -CA {{ inventory_dir }}/files/ca.crt -in {{ inventory_dir }}/files/apiserver-kubelet-client.csr -out {{ inventory_dir }}/files/apiserver-kubelet-client.crt -days 10000 -extensions v3_req -extfile <(echo "{{ kube_openssl_config }}") 96 | args: 97 | creates: "{{ inventory_dir }}/files/apiserver-kubelet-client.crt" 98 | executable: /bin/bash 99 | become: no 100 | run_once: yes 101 | delegate_to: localhost 102 | 103 | 104 | 105 | 106 | 107 | 108 | - set_fact: 109 | kube_openssl_config: "{{ lookup('template', 'ssl/front-proxy-ca.cnf') }}" 110 | 111 | - name: create front-proxy-ca key 112 | shell: | 113 | openssl genrsa -out {{ inventory_dir }}/files/front-proxy-ca.key 2048 114 | args: 115 | creates: "{{ inventory_dir }}/files/front-proxy-ca.key" 116 | executable: /bin/bash 117 | become: no 118 | run_once: yes 119 | delegate_to: localhost 120 | 121 | - name: create front-proxy-ca csr 122 | shell: | 123 | openssl req -new -subj "/CN=kubernetes" -key {{ inventory_dir }}/files/front-proxy-ca.key -out {{ inventory_dir }}/files/front-proxy-ca.csr -config <(echo "{{ kube_openssl_config }}") 124 | args: 125 | creates: "{{ inventory_dir }}/files/front-proxy-ca.csr" 126 | executable: /bin/bash 127 | become: no 128 | run_once: yes 129 | delegate_to: localhost 130 | 131 | - name: create front-proxy-ca cert 132 | shell: | 133 | openssl x509 -req -CAcreateserial -CAkey {{ inventory_dir }}/files/ca.key -CA {{ inventory_dir }}/files/ca.crt -in {{ inventory_dir }}/files/front-proxy-ca.csr -out {{ inventory_dir }}/files/front-proxy-ca.crt -days 10000 -extensions v3_req -extfile <(echo "{{ kube_openssl_config }}") 134 | args: 135 | creates: "{{ inventory_dir }}/files/front-proxy-ca.crt" 136 | executable: /bin/bash 137 | become: no 138 | run_once: yes 139 | delegate_to: localhost 140 | 141 | - set_fact: 142 | kube_openssl_config: "{{ lookup('template', 'ssl/front-proxy-client.cnf') }}" 143 | 144 | - name: create front-proxy-client key 145 | shell: | 146 | openssl genrsa -out {{ inventory_dir }}/files/front-proxy-client.key 2048 147 | args: 148 | creates: "{{ inventory_dir }}/files/front-proxy-client.key" 149 | executable: /bin/bash 150 | become: no 151 | run_once: yes 152 | delegate_to: localhost 153 | 154 | - name: create front-proxy-client csr 155 | shell: | 156 | openssl req -new -subj "/CN=front-proxy-client" -key {{ inventory_dir }}/files/front-proxy-client.key -out {{ inventory_dir }}/files/front-proxy-client.csr -config <(echo "{{ kube_openssl_config }}") 157 | args: 158 | creates: "{{ inventory_dir }}/files/front-proxy-client.csr" 159 | executable: /bin/bash 160 | become: no 161 | run_once: yes 162 | delegate_to: localhost 163 | 164 | - name: create front-proxy-client cert 165 | shell: | 166 | openssl x509 -req -CAcreateserial -CAkey {{ inventory_dir }}/files/ca.key -CA {{ inventory_dir }}/files/ca.crt -in {{ inventory_dir }}/files/front-proxy-client.csr -out {{ inventory_dir }}/files/front-proxy-client.crt -days 10000 -extensions v3_req -extfile <(echo "{{ kube_openssl_config }}") 167 | args: 168 | creates: "{{ inventory_dir }}/files/front-proxy-client.crt" 169 | executable: /bin/bash 170 | become: no 171 | run_once: yes 172 | delegate_to: localhost 173 | -------------------------------------------------------------------------------- /provision/roles/kubernetes-agent/tasks/config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /provision/roles/kubernetes-agent/tasks/install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: add kubernetes apt key 3 | apt_key: 4 | id: A7317B0F 5 | keyserver: keyserver.ubuntu.com 6 | state: present 7 | become: yes 8 | 9 | - name: add kubernetes apt repository 10 | apt_repository: 11 | repo: "deb http://apt.kubernetes.io/ kubernetes-{{ ansible_distribution_release }} main" 12 | filename: "kubernetes" 13 | state: present 14 | become: yes 15 | 16 | - name: install kubernetes package 17 | apt: 18 | name: "{{ item }}" 19 | state: present 20 | with_items: 21 | - keepalived 22 | - kubeadm 23 | - kubelet 24 | - kubernetes-cni 25 | become: yes 26 | 27 | - name: join kubernetes agent 28 | shell: | 29 | kubeadm join --token=0fa86a.87b5dc45a3f42032 10.202.128.107:6443 30 | args: 31 | creates: "/etc/kubernetes/pki/ca.crt" 32 | executable: /bin/bash 33 | become: yes 34 | -------------------------------------------------------------------------------- /provision/roles/kubernetes-agent/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include: install.yml 3 | tags: 4 | - development 5 | - rancher 6 | - install 7 | 8 | - include: config.yml 9 | tags: 10 | - development 11 | - rancher 12 | - config 13 | 14 | -------------------------------------------------------------------------------- /provision/roles/kubernetes-agent/templates/kubeadm-init.yml: -------------------------------------------------------------------------------- 1 | apiVersion: kubeadm.k8s.io/v1alpha1 2 | kind: MasterConfiguration 3 | kubernetesVersion: v1.7.5 4 | token: 0fa86a.87b5dc45a3f42032 5 | api: 6 | advertiseAddress: 10.202.128.107 7 | bindPort: 6443 8 | apiServerCertSANs: 9 | - toc-master01 10 | - toc-master02 11 | - toc-master03 12 | - 10.202.128.107 13 | - 10.202.128.109 14 | - 10.202.128.110 15 | - 10.202.128.111 16 | networking: 17 | podSubnet: 192.168.0.0/16 18 | serviceSubnet: 10.96.0.0/12 19 | etcd: 20 | endpoints: 21 | - http://10.202.128.109:2379 22 | - http://10.202.128.110:2379 23 | - http://10.202.128.111:2379 24 | -------------------------------------------------------------------------------- /provision/roles/kubernetes-agent/templates/ssl/apiserver-kubelet-client.cnf: -------------------------------------------------------------------------------- 1 | [req] 2 | distinguished_name = req_distinguished_name 3 | x509_extensions = v3_req 4 | [req_distinguished_name] 5 | [ v3_req ] 6 | keyUsage = digitalSignature, keyEncipherment 7 | extendedKeyUsage = serverAuth 8 | -------------------------------------------------------------------------------- /provision/roles/kubernetes-agent/templates/ssl/apiserver.cnf: -------------------------------------------------------------------------------- 1 | [req] 2 | distinguished_name = req_distinguished_name 3 | x509_extensions = v3_req 4 | [req_distinguished_name] 5 | [ v3_req ] 6 | keyUsage = digitalSignature, keyEncipherment 7 | extendedKeyUsage = serverAuth 8 | subjectAltName = @alt_names 9 | [alt_names] 10 | DNS.1 = kubernetes 11 | DNS.2 = kubernetes.default 12 | DNS.3 = kubernetes.default.svc 13 | DNS.4 = kubernetes.default.svc.cluster.local 14 | DNS.5 = toc-master01 15 | IP.1 = 10.202.128.107 16 | IP.2 = 10.202.128.109 17 | IP.3 = 10.202.128.110 18 | IP.4 = 10.202.128.111 19 | -------------------------------------------------------------------------------- /provision/roles/kubernetes-agent/templates/ssl/ca-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "signing": { 3 | "default": { 4 | "expiry": "8760h" 5 | }, 6 | "profiles": { 7 | "kubernetes": { 8 | "usages": [ 9 | "signing", 10 | "key encipherment", 11 | "server auth", 12 | "client auth"], 13 | "expiry": "8760h" 14 | } 15 | } } 16 | } 17 | -------------------------------------------------------------------------------- /provision/roles/kubernetes-agent/templates/ssl/front-proxy-ca.cnf: -------------------------------------------------------------------------------- 1 | [req] 2 | distinguished_name = req_distinguished_name 3 | x509_extensions = v3_req 4 | [req_distinguished_name] 5 | [ v3_req ] 6 | basicConstraints = CA:TRUE 7 | keyUsage = digitalSignature, keyEncipherment 8 | -------------------------------------------------------------------------------- /provision/roles/kubernetes-agent/templates/ssl/front-proxy-client.cnf: -------------------------------------------------------------------------------- 1 | [req] 2 | distinguished_name = req_distinguished_name 3 | x509_extensions = v3_req 4 | [req_distinguished_name] 5 | [ v3_req ] 6 | keyUsage = digitalSignature, keyEncipherment 7 | extendedKeyUsage = serverAuth 8 | -------------------------------------------------------------------------------- /provision/roles/kubernetes-balancing/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | 4 | 5 | 6 | # configs 7 | nginx_worker_processes: "{{ ansible_processor_vcpus | default(ansible_processor_count) }}" 8 | nginx_worker_connections: "1024" 9 | 10 | nginx_tcp_nopush: "on" 11 | nginx_tcp_nodelay: "on" 12 | 13 | nginx_log_format: '$remote_addr - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" "$http_x_forwarded_for"' 14 | 15 | nginx_vhosts: 16 | - listen: "80" 17 | server_name: "localhost" 18 | root: "/usr/share/nginx/html" 19 | index: "index.html index.htm" 20 | 21 | nginx_upstreams: [] 22 | -------------------------------------------------------------------------------- /provision/roles/kubernetes-balancing/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /provision/roles/kubernetes-balancing/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: "Shawn Ma" 4 | company: ThoughtWorks, Inc. 5 | license: BSD 6 | min_ansible_version: 2.0 7 | platforms: 8 | - name: Ubuntu 9 | versions: 10 | - trusty 11 | categories: 12 | - docker 13 | dependencies: [] 14 | -------------------------------------------------------------------------------- /provision/roles/kubernetes-balancing/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: ensure kube-balancing directory 3 | file: 4 | path: /etc/kubernetes 5 | state: directory 6 | owner: root 7 | group: root 8 | become: yes 9 | 10 | - name: generate kube-balancing proxy 11 | template: 12 | src: kube-balancing.cfg 13 | dest: /etc/kubernetes/kube-balancing.cfg 14 | become: yes 15 | 16 | - name: start kube-balancing container 17 | docker_container: 18 | name: "kube-balancing" 19 | image: "haproxy" 20 | ports: 21 | - "6443:6443" 22 | volumes: 23 | - "/etc/kubernetes/kube-balancing.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro" 24 | restart_policy: unless-stopped 25 | become: yes 26 | -------------------------------------------------------------------------------- /provision/roles/kubernetes-balancing/templates/kube-balancing.cfg: -------------------------------------------------------------------------------- 1 | global 2 | log 127.0.0.1 local0 notice 3 | maxconn 2000 4 | tune.ssl.default-dh-param 2048 5 | 6 | defaults 7 | log global 8 | mode tcp 9 | option tcplog 10 | timeout connect 5000ms 11 | timeout client 50000ms 12 | timeout server 50000ms 13 | 14 | listen kube-apiserver 15 | bind *:8443 16 | mode tcp 17 | server toc-master01 10.202.128.109:6443 18 | server toc-master02 10.202.128.110:6443 19 | server toc-master03 10.202.128.111:6443 20 | balance leastconn 21 | -------------------------------------------------------------------------------- /provision/roles/kubernetes/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kube_certs_ca_subject: "/CN=kubernetes" 3 | 4 | 5 | kube_token: 6 | -------------------------------------------------------------------------------- /provision/roles/kubernetes/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /provision/roles/kubernetes/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: "Shawn Ma" 4 | company: ThoughtWorks, Inc. 5 | license: BSD 6 | min_ansible_version: 2.0 7 | platforms: 8 | - name: Ubuntu 9 | versions: 10 | - trusty 11 | categories: 12 | - docker 13 | dependencies: [] 14 | -------------------------------------------------------------------------------- /provision/roles/kubernetes/tasks/certs.yml: -------------------------------------------------------------------------------- 1 | - name: create kubernetes config directory 2 | file: 3 | path: "/etc/kubernetes" 4 | state: directory 5 | owner: root 6 | group: root 7 | become: yes 8 | 9 | - name: create kubernetes certs directory 10 | file: 11 | path: "/etc/kubernetes/pki" 12 | state: directory 13 | owner: root 14 | group: root 15 | become: yes 16 | 17 | - name: create kubernetes script directory 18 | file: 19 | path: "/etc/kubernetes/manifests" 20 | state: directory 21 | owner: root 22 | group: root 23 | become: yes 24 | 25 | - name: create root ca key 26 | command: openssl genrsa -out {{ inventory_dir }}/files/ca.key 2048 creates="{{ inventory_dir }}/files/ca.key" 27 | become: no 28 | run_once: yes 29 | delegate_to: localhost 30 | 31 | - name: create root ca certificate 32 | command: openssl req -x509 -new -nodes -subj "/CN=kubernetes" -days 3650 -key {{ inventory_dir }}/files/ca.key -out {{ inventory_dir }}/files/ca.crt creates="{{ inventory_dir }}/files/ca.crt" 33 | become: no 34 | run_once: yes 35 | delegate_to: localhost 36 | 37 | - set_fact: 38 | apiserver_opnssl: "{{ lookup('template', 'ssl/apiserver.cnf') }}" 39 | 40 | - name: create apiserver key 41 | shell: | 42 | openssl genrsa -out {{ inventory_dir }}/files/apiserver.key 2048 43 | args: 44 | creates: "{{ inventory_dir }}/files/apiserver.key" 45 | executable: /bin/bash 46 | become: no 47 | run_once: yes 48 | delegate_to: localhost 49 | 50 | - name: create apiserver csr 51 | shell: | 52 | openssl req -new -subj "/CN=kube-apiserver" -key {{ inventory_dir }}/files/apiserver.key -out {{ inventory_dir }}/files/apiserver.csr -config <(echo "{{ apiserver_opnssl }}") 53 | args: 54 | creates: "{{ inventory_dir }}/files/apiserver.csr" 55 | executable: /bin/bash 56 | become: no 57 | run_once: yes 58 | delegate_to: localhost 59 | 60 | - name: create apiserver cert 61 | shell: | 62 | openssl x509 -req -CAcreateserial -CAkey {{ inventory_dir }}/files/ca.key -CA {{ inventory_dir }}/files/ca.crt -in {{ inventory_dir }}/files/apiserver.csr -out {{ inventory_dir }}/files/apiserver.crt -days 10000 -extensions v3_req -extfile <(echo "{{ apiserver_opnssl }}") 63 | args: 64 | creates: "{{ inventory_dir }}/files/apiserver.crt" 65 | executable: /bin/bash 66 | become: no 67 | run_once: yes 68 | delegate_to: localhost 69 | 70 | - set_fact: 71 | kube_openssl_config: "{{ lookup('template', 'ssl/apiserver-kubelet-client.cnf') }}" 72 | 73 | - name: create kubelet client key 74 | shell: | 75 | openssl genrsa -out {{ inventory_dir }}/files/apiserver-kubelet-client.key 2048 76 | args: 77 | creates: "{{ inventory_dir }}/files/apiserver-kubelet-client.key" 78 | executable: /bin/bash 79 | become: no 80 | run_once: yes 81 | delegate_to: localhost 82 | 83 | - name: create kubelet client csr 84 | shell: | 85 | openssl req -new -subj "/O=system:masters/CN=kube-apiserver-kubelet-client" -key {{ inventory_dir }}/files/apiserver-kubelet-client.key -out {{ inventory_dir }}/files/apiserver-kubelet-client.csr -config <(echo "{{ kube_openssl_config }}") 86 | args: 87 | creates: "{{ inventory_dir }}/files/apiserver-kubelet-client.csr" 88 | executable: /bin/bash 89 | become: no 90 | run_once: yes 91 | delegate_to: localhost 92 | 93 | - name: create kubelet client cert 94 | shell: | 95 | openssl x509 -req -CAcreateserial -CAkey {{ inventory_dir }}/files/ca.key -CA {{ inventory_dir }}/files/ca.crt -in {{ inventory_dir }}/files/apiserver-kubelet-client.csr -out {{ inventory_dir }}/files/apiserver-kubelet-client.crt -days 10000 -extensions v3_req -extfile <(echo "{{ kube_openssl_config }}") 96 | args: 97 | creates: "{{ inventory_dir }}/files/apiserver-kubelet-client.crt" 98 | executable: /bin/bash 99 | become: no 100 | run_once: yes 101 | delegate_to: localhost 102 | 103 | 104 | 105 | 106 | 107 | 108 | - set_fact: 109 | kube_openssl_config: "{{ lookup('template', 'ssl/front-proxy-ca.cnf') }}" 110 | 111 | - name: create front-proxy-ca key 112 | shell: | 113 | openssl genrsa -out {{ inventory_dir }}/files/front-proxy-ca.key 2048 114 | args: 115 | creates: "{{ inventory_dir }}/files/front-proxy-ca.key" 116 | executable: /bin/bash 117 | become: no 118 | run_once: yes 119 | delegate_to: localhost 120 | 121 | - name: create front-proxy-ca csr 122 | shell: | 123 | openssl req -new -subj "/CN=kubernetes" -key {{ inventory_dir }}/files/front-proxy-ca.key -out {{ inventory_dir }}/files/front-proxy-ca.csr -config <(echo "{{ kube_openssl_config }}") 124 | args: 125 | creates: "{{ inventory_dir }}/files/front-proxy-ca.csr" 126 | executable: /bin/bash 127 | become: no 128 | run_once: yes 129 | delegate_to: localhost 130 | 131 | - name: create front-proxy-ca cert 132 | shell: | 133 | openssl x509 -req -CAcreateserial -CAkey {{ inventory_dir }}/files/ca.key -CA {{ inventory_dir }}/files/ca.crt -in {{ inventory_dir }}/files/front-proxy-ca.csr -out {{ inventory_dir }}/files/front-proxy-ca.crt -days 10000 -extensions v3_req -extfile <(echo "{{ kube_openssl_config }}") 134 | args: 135 | creates: "{{ inventory_dir }}/files/front-proxy-ca.crt" 136 | executable: /bin/bash 137 | become: no 138 | run_once: yes 139 | delegate_to: localhost 140 | 141 | - set_fact: 142 | kube_openssl_config: "{{ lookup('template', 'ssl/front-proxy-client.cnf') }}" 143 | 144 | - name: create front-proxy-client key 145 | shell: | 146 | openssl genrsa -out {{ inventory_dir }}/files/front-proxy-client.key 2048 147 | args: 148 | creates: "{{ inventory_dir }}/files/front-proxy-client.key" 149 | executable: /bin/bash 150 | become: no 151 | run_once: yes 152 | delegate_to: localhost 153 | 154 | - name: create front-proxy-client csr 155 | shell: | 156 | openssl req -new -subj "/CN=front-proxy-client" -key {{ inventory_dir }}/files/front-proxy-client.key -out {{ inventory_dir }}/files/front-proxy-client.csr -config <(echo "{{ kube_openssl_config }}") 157 | args: 158 | creates: "{{ inventory_dir }}/files/front-proxy-client.csr" 159 | executable: /bin/bash 160 | become: no 161 | run_once: yes 162 | delegate_to: localhost 163 | 164 | - name: create front-proxy-client cert 165 | shell: | 166 | openssl x509 -req -CAcreateserial -CAkey {{ inventory_dir }}/files/ca.key -CA {{ inventory_dir }}/files/ca.crt -in {{ inventory_dir }}/files/front-proxy-client.csr -out {{ inventory_dir }}/files/front-proxy-client.crt -days 10000 -extensions v3_req -extfile <(echo "{{ kube_openssl_config }}") 167 | args: 168 | creates: "{{ inventory_dir }}/files/front-proxy-client.crt" 169 | executable: /bin/bash 170 | become: no 171 | run_once: yes 172 | delegate_to: localhost 173 | -------------------------------------------------------------------------------- /provision/roles/kubernetes/tasks/config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /provision/roles/kubernetes/tasks/install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: add kubernetes apt key 3 | apt_key: 4 | id: A7317B0F 5 | keyserver: keyserver.ubuntu.com 6 | state: present 7 | become: yes 8 | 9 | - name: add kubernetes apt repository 10 | apt_repository: 11 | repo: "deb http://apt.kubernetes.io/ kubernetes-{{ ansible_distribution_release }} main" 12 | filename: "kubernetes" 13 | state: present 14 | become: yes 15 | 16 | - name: install kubernetes package 17 | apt: 18 | name: "{{ item }}" 19 | state: present 20 | with_items: 21 | - kubeadm 22 | - kubelet 23 | - kubernetes-cni 24 | become: yes 25 | 26 | - name: ensure kubernetes directory 27 | file: 28 | path: "{{ item }}" 29 | state: directory 30 | owner: root 31 | group: root 32 | with_items: 33 | - /root/.kube 34 | - /etc/kubernetes 35 | become: yes 36 | 37 | - name: generate kubernetes token 38 | command: /usr/bin/kubeadm token generate 39 | register: kube_token 40 | 41 | - name: generate kubernetes init 42 | template: 43 | src: kube-init.yml 44 | dest: /etc/kubernetes/kube-init.yml 45 | become: yes 46 | 47 | - name: generate kubernetes configuration 48 | command: kubeadm init --config=/etc/kubernetes/kube-init.yml 49 | args: 50 | creates: "/etc/kubernetes/admin.conf" 51 | become: yes 52 | run_once: yes 53 | delegate_to: "{{ groups.masters | first }}" 54 | 55 | - name: distribution kubernetes configuration 56 | synchronize: 57 | src: "/etc/kubernetes/pki" 58 | dest: /etc/kubernetes 59 | delete: yes 60 | recursive: yes 61 | become: yes 62 | when: inventory_hostname in groups['masters'] and inventory_hostname != groups['masters'][0] 63 | delegate_to: "{{ groups.masters | first }}" 64 | 65 | - name: install kubernetes 66 | command: kubeadm init --config=/etc/kubernetes/kube-init.yml 67 | args: 68 | creates: "/etc/kubernetes/admin.conf" 69 | become: yes 70 | 71 | - name: create kubeconfig 72 | copy: 73 | src: /etc/kubernetes/admin.conf 74 | dest: /root/.kube/config 75 | remote_src: true 76 | owner: root 77 | group: root 78 | mode: 0755 79 | become: yes 80 | 81 | - name: install kubernetes network 82 | command: /usr/bin/kubectl apply -f https://raw.githubusercontent.com/cloudnativelabs/kube-router/master/daemonset/kubeadm-kuberouter.yaml 83 | args: 84 | creates: "/etc/cni/net.d/10-kuberouter.conf" 85 | environment: 86 | KUBECONFIG: /etc/kubernetes/admin.conf 87 | become: yes 88 | run_once: true 89 | delegate_to: "{{ groups.masters | first }}" 90 | 91 | - name: config kubernetes dns 92 | command: kubectl scale --replicas=3 -n kube-system deployment/kube-dns 93 | environment: 94 | KUBECONFIG: /etc/kubernetes/admin.conf 95 | become: yes 96 | run_once: true 97 | delegate_to: "{{ groups.masters | first }}" 98 | 99 | - name: check kubernetes dashboard 100 | shell: "kubectl get pods --all-namespaces=true | grep kubernetes-dashboard" 101 | register: check_dashboard 102 | ignore_errors: true 103 | become: yes 104 | run_once: true 105 | delegate_to: "{{ groups.masters | first }}" 106 | 107 | - name: install kubernetes dashboard 108 | command: /usr/bin/kubectl create -f https://git.io/kube-dashboard 109 | when: check_dashboard|failed 110 | environment: 111 | KUBECONFIG: /etc/kubernetes/admin.conf 112 | become: yes 113 | run_once: true 114 | delegate_to: "{{ groups.masters | first }}" 115 | 116 | - name: scale kubernetes dashboard 117 | command: kubectl scale --replicas=3 -n kube-system deployment/kubernetes-dashboard 118 | environment: 119 | KUBECONFIG: /etc/kubernetes/admin.conf 120 | become: yes 121 | run_once: true 122 | delegate_to: "{{ groups.masters | first }}" 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | - name: check kubernetes monitoring 131 | shell: "kubectl get pods --all-namespaces=true | grep heapster" 132 | register: check_monitoring 133 | ignore_errors: true 134 | become: yes 135 | run_once: true 136 | delegate_to: "{{ groups.masters | first }}" 137 | 138 | - name: install kubernetes monitoring 139 | command: /usr/bin/kubectl create -f https://raw.githubusercontent.com/kubernetes/heapster/master/deploy/kube-config/influxdb/heapster.yaml 140 | when: check_monitoring|failed 141 | environment: 142 | KUBECONFIG: /etc/kubernetes/admin.conf 143 | become: yes 144 | run_once: true 145 | delegate_to: "{{ groups.masters | first }}" 146 | 147 | - name: scale kubernetes monitoring 148 | command: kubectl scale --replicas=3 -n kube-system deployment/heapster 149 | environment: 150 | KUBECONFIG: /etc/kubernetes/admin.conf 151 | become: yes 152 | run_once: true 153 | delegate_to: "{{ groups.masters | first }}" 154 | 155 | 156 | - name: check kubernetes influxdb 157 | shell: "kubectl get pods --all-namespaces=true | grep monitoring-influxdb" 158 | register: check_influxdb 159 | ignore_errors: true 160 | become: yes 161 | run_once: true 162 | delegate_to: "{{ groups.masters | first }}" 163 | 164 | - name: install kubernetes influxdb 165 | command: /usr/bin/kubectl create -f https://raw.githubusercontent.com/kubernetes/heapster/master/deploy/kube-config/influxdb/influxdb.yaml 166 | when: check_influxdb|failed 167 | environment: 168 | KUBECONFIG: /etc/kubernetes/admin.conf 169 | become: yes 170 | run_once: true 171 | delegate_to: "{{ groups.masters | first }}" 172 | 173 | - name: scale kubernetes influxdb 174 | command: kubectl scale --replicas=3 -n kube-system deployment/monitoring-influxdb 175 | environment: 176 | KUBECONFIG: /etc/kubernetes/admin.conf 177 | become: yes 178 | run_once: true 179 | delegate_to: "{{ groups.masters | first }}" 180 | 181 | 182 | - name: check kubernetes grafana 183 | shell: "kubectl get pods --all-namespaces=true | grep monitoring-grafana" 184 | register: check_grafana 185 | ignore_errors: true 186 | become: yes 187 | run_once: true 188 | delegate_to: "{{ groups.masters | first }}" 189 | 190 | - name: install kubernetes grafana 191 | command: /usr/bin/kubectl create -f https://raw.githubusercontent.com/kubernetes/heapster/master/deploy/kube-config/influxdb/grafana.yaml 192 | when: check_grafana|failed 193 | environment: 194 | KUBECONFIG: /etc/kubernetes/admin.conf 195 | become: yes 196 | run_once: true 197 | delegate_to: "{{ groups.masters | first }}" 198 | 199 | - name: scale kubernetes grafana 200 | command: kubectl scale --replicas=3 -n kube-system deployment/monitoring-grafana 201 | environment: 202 | KUBECONFIG: /etc/kubernetes/admin.conf 203 | become: yes 204 | run_once: true 205 | delegate_to: "{{ groups.masters | first }}" 206 | -------------------------------------------------------------------------------- /provision/roles/kubernetes/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include: install.yml 3 | tags: 4 | - development 5 | - rancher 6 | - install 7 | 8 | - include: config.yml 9 | tags: 10 | - development 11 | - rancher 12 | - config 13 | 14 | -------------------------------------------------------------------------------- /provision/roles/kubernetes/templates/kube-haproxy.cfg: -------------------------------------------------------------------------------- 1 | global 2 | log 127.0.0.1 local0 notice 3 | maxconn 2000 4 | tune.ssl.default-dh-param 2048 5 | 6 | defaults 7 | log global 8 | mode tcp 9 | option tcplog 10 | timeout connect 5000ms 11 | timeout client 50000ms 12 | timeout server 50000ms 13 | 14 | listen kube-apiserver 15 | bind *:8443 16 | mode tcp 17 | server toc-master01 10.202.128.109:6443 18 | server toc-master02 10.202.128.110:6443 19 | server toc-master03 10.202.128.111:6443 20 | balance leastconn 21 | -------------------------------------------------------------------------------- /provision/roles/kubernetes/templates/kube-init.yml: -------------------------------------------------------------------------------- 1 | apiVersion: kubeadm.k8s.io/v1alpha1 2 | kind: MasterConfiguration 3 | kubernetesVersion: v1.7.5 4 | token: 0fa86a.87b5dc45a3f42032 5 | api: 6 | advertiseAddress: 10.202.128.107 7 | bindPort: 6443 8 | apiServerCertSANs: 9 | - toc-master01 10 | - toc-master02 11 | - toc-master03 12 | - 10.202.128.107 13 | - 10.202.128.109 14 | - 10.202.128.110 15 | - 10.202.128.111 16 | networking: 17 | podSubnet: 192.168.0.0/16 18 | serviceSubnet: 10.96.0.0/12 19 | etcd: 20 | endpoints: 21 | - http://10.202.128.109:2379 22 | - http://10.202.128.110:2379 23 | - http://10.202.128.111:2379 24 | -------------------------------------------------------------------------------- /provision/roles/kubernetes/templates/ssl/apiserver-kubelet-client.cnf: -------------------------------------------------------------------------------- 1 | [req] 2 | distinguished_name = req_distinguished_name 3 | x509_extensions = v3_req 4 | [req_distinguished_name] 5 | [ v3_req ] 6 | keyUsage = digitalSignature, keyEncipherment 7 | extendedKeyUsage = serverAuth 8 | -------------------------------------------------------------------------------- /provision/roles/kubernetes/templates/ssl/apiserver.cnf: -------------------------------------------------------------------------------- 1 | [req] 2 | distinguished_name = req_distinguished_name 3 | x509_extensions = v3_req 4 | [req_distinguished_name] 5 | [ v3_req ] 6 | keyUsage = digitalSignature, keyEncipherment 7 | extendedKeyUsage = serverAuth 8 | subjectAltName = @alt_names 9 | [alt_names] 10 | DNS.1 = kubernetes 11 | DNS.2 = kubernetes.default 12 | DNS.3 = kubernetes.default.svc 13 | DNS.4 = kubernetes.default.svc.cluster.local 14 | DNS.5 = toc-master01 15 | IP.1 = 10.202.128.107 16 | IP.2 = 10.202.128.109 17 | IP.3 = 10.202.128.110 18 | IP.4 = 10.202.128.111 19 | -------------------------------------------------------------------------------- /provision/roles/kubernetes/templates/ssl/ca-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "signing": { 3 | "default": { 4 | "expiry": "8760h" 5 | }, 6 | "profiles": { 7 | "kubernetes": { 8 | "usages": [ 9 | "signing", 10 | "key encipherment", 11 | "server auth", 12 | "client auth"], 13 | "expiry": "8760h" 14 | } 15 | } } 16 | } 17 | -------------------------------------------------------------------------------- /provision/roles/kubernetes/templates/ssl/front-proxy-ca.cnf: -------------------------------------------------------------------------------- 1 | [req] 2 | distinguished_name = req_distinguished_name 3 | x509_extensions = v3_req 4 | [req_distinguished_name] 5 | [ v3_req ] 6 | basicConstraints = CA:TRUE 7 | keyUsage = digitalSignature, keyEncipherment 8 | -------------------------------------------------------------------------------- /provision/roles/kubernetes/templates/ssl/front-proxy-client.cnf: -------------------------------------------------------------------------------- 1 | [req] 2 | distinguished_name = req_distinguished_name 3 | x509_extensions = v3_req 4 | [req_distinguished_name] 5 | [ v3_req ] 6 | keyUsage = digitalSignature, keyEncipherment 7 | extendedKeyUsage = serverAuth 8 | -------------------------------------------------------------------------------- /provision/roles/ldap/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # image 3 | ldap_name: ldap-server 4 | ldap_image: osixia/openldap 5 | ldap_version: 1.1.7 6 | 7 | # network 8 | ldap_port: 389 9 | 10 | ldap_data_dir: "/var/lib/ldap" 11 | -------------------------------------------------------------------------------- /provision/roles/ldap/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /provision/roles/ldap/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: "Shawn Ma" 4 | company: ThoughtWorks, Inc. 5 | license: BSD 6 | min_ansible_version: 2.0 7 | platforms: 8 | - name: Ubuntu 9 | versions: 10 | - trusty 11 | categories: 12 | - docker 13 | dependencies: [] 14 | -------------------------------------------------------------------------------- /provision/roles/ldap/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: start ldap server container 3 | docker_container: 4 | name: "{{ ldap_name }}" 5 | image: "{{ ldap_image }}:{{ ldap_version }}" 6 | ports: 7 | - "389:389" 8 | - "636:389" 9 | volumes: 10 | - "{{ ldap_data_dir }}:/var/lib/ldap" 11 | env: 12 | LDAP_ORGANISATION: "thoughtworks" 13 | LDAP_DOMAIN: "thoughtworks.ga" 14 | LDAP_ADMIN_PASSWORD: "12345678" 15 | restart_policy: always 16 | become: yes 17 | -------------------------------------------------------------------------------- /provision/roles/mysql/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # image 3 | mysql_name: mysql-server 4 | mysql_image: mysql 5 | mysql_version: 5.7 6 | 7 | # network 8 | mysql_port: 3306 9 | 10 | # config 11 | mysql_user: cattle 12 | mysql_pass: fsBLqWQFk7yJ 13 | mysql_database: cattle 14 | mysql_rootpass: "P@ss123456" 15 | 16 | # storage 17 | mysql_data_dir: "/var/lib/mysql" 18 | -------------------------------------------------------------------------------- /provision/roles/mysql/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /provision/roles/mysql/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: "Shawn Ma" 4 | company: ThoughtWorks, Inc. 5 | license: BSD 6 | min_ansible_version: 2.0 7 | platforms: 8 | - name: Ubuntu 9 | versions: 10 | - trusty 11 | categories: 12 | - docker 13 | dependencies: [] 14 | -------------------------------------------------------------------------------- /provision/roles/mysql/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: start mysql server container 3 | docker_container: 4 | name: "{{ mysql_name }}" 5 | image: "{{ mysql_image }}:{{ mysql_version }}" 6 | ports: 7 | - "{{ mysql_port }}:3306" 8 | volumes: 9 | - "{{ mysql_data_dir }}:/var/lib/mysql" 10 | env: 11 | MYSQL_USER="{{ mysql_user }}" 12 | MYSQL_PASSWORD="{{ mysql_pass }}" 13 | MYSQL_DATABASE="{{ mysql_database }}" 14 | MYSQL_ROOT_PASSWORD="{{ mysql_rootpass }}" 15 | restart_policy: unless-stopped 16 | become: yes 17 | -------------------------------------------------------------------------------- /provision/roles/nexus/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # image 3 | nexus_name: nexus-server 4 | nexus_image: baselibrary/nexus 5 | nexus_version: 3.6 6 | 7 | # network 8 | nexus_port: 8081 9 | nexus_domain: repo.thoughtworks.ga 10 | # data 11 | nexus_data_dir: "/var/lib/nexus" 12 | 13 | # security 14 | nexus_admin_user: admin 15 | nexus_admin_pass: admin123 16 | 17 | nexus_privileges: 18 | - name: all-repos-read # used as key to update a privilege 19 | description: 'Read & Browse access to all repos' 20 | type: application 21 | repository: '*' 22 | actions: 23 | - read 24 | - browse 25 | 26 | nexus_roles: 27 | - id: Developpers # can map to a LDAP group id, also used as a key to update a role 28 | name: nx-developer 29 | description: Developer Role 30 | privileges: 31 | - nx-search-read 32 | - all-repos-read 33 | roles: [] 34 | 35 | nexus_local_users: [] 36 | # example user item : 37 | # - username: jenkins # used as key to update 38 | # first_name: Jenkins 39 | # last_name: CI 40 | # email: support@company.com 41 | # password: "s3cr3t" 42 | # roles: 43 | # - developers 44 | 45 | # nexus dns 46 | nexus_dns: dnspod 47 | 48 | # branding 49 | nexus_branding_header: "" 50 | nexus_branding_footer: "Last provisionned {{ ansible_date_time.iso8601 }}" 51 | 52 | # repos 53 | nexus_repositories: 54 | - name: maven-releases 55 | type: maven2 56 | recipe: hosted 57 | write_policy: allow_once 58 | version_policy: release 59 | - name: maven-snapshot 60 | type: maven2 61 | recipe: hosted 62 | write_policy: allow 63 | version_policy: snapshot 64 | - name: maven-central 65 | type: maven2 66 | recipe: proxy 67 | remote_url: 'https://jcenter.bintray.com/' 68 | - name: maven-clojars 69 | type: maven2 70 | recipe: proxy 71 | remote_url: 'http://clojars.org/repo/' 72 | - name: maven-public 73 | type: maven2 74 | recipe: group 75 | members: 76 | - maven-releases 77 | - maven-snapshot 78 | - maven-central 79 | - maven-clojars 80 | - name: docker-releases 81 | type: docker 82 | recipe: hosted 83 | http_port: 5001 84 | write_policy: allow 85 | version_policy: release 86 | - name: docker-snapshot 87 | type: docker 88 | recipe: hosted 89 | http_port: 5002 90 | write_policy: allow 91 | version_policy: snapshot 92 | - name: docker-central 93 | type: docker 94 | recipe: proxy 95 | remote_url: 'https://registry-1.docker.io/' 96 | index_type: registry 97 | - name: docker-public 98 | type: docker 99 | recipe: group 100 | http_port: 5000 101 | members: 102 | - docker-releases 103 | - docker-snapshot 104 | - docker-central 105 | - name: npm-releases 106 | type: npm 107 | recipe: hosted 108 | write_policy: allow_once 109 | version_policy: release 110 | - name: npm-snapshot 111 | type: npm 112 | recipe: hosted 113 | write_policy: allow 114 | version_policy: snapshot 115 | - name: npm-central 116 | type: npm 117 | recipe: proxy 118 | remote_url: 'https://registry.npm.taobao.org/' 119 | - name: npm-public 120 | type: npm 121 | recipe: group 122 | members: 123 | - npm-releases 124 | - npm-snapshot 125 | - npm-central 126 | - name: bower-releases 127 | type: bower 128 | recipe: hosted 129 | write_policy: allow_once 130 | version_policy: release 131 | - name: bower-snapshot 132 | type: bower 133 | recipe: hosted 134 | write_policy: allow 135 | version_policy: snapshot 136 | - name: bower-central 137 | type: bower 138 | recipe: proxy 139 | remote_url: 'http://bower.herokuapp.com/' 140 | - name: bower-public 141 | type: bower 142 | recipe: group 143 | members: 144 | - bower-releases 145 | - bower-snapshot 146 | - bower-central 147 | - name: nuget-releases 148 | type: nuget 149 | recipe: hosted 150 | write_policy: allow_once 151 | version_policy: release 152 | - name: nuget-snapshot 153 | type: nuget 154 | recipe: hosted 155 | write_policy: allow 156 | version_policy: snapshot 157 | - name: nuget-central 158 | type: nuget 159 | recipe: proxy 160 | remote_url: 'https://www.nuget.org/api/v2/' 161 | - name: nuget-public 162 | type: nuget 163 | recipe: group 164 | members: 165 | - nuget-releases 166 | - nuget-snapshot 167 | - nuget-central 168 | - name: pypi-releases 169 | type: pypi 170 | recipe: hosted 171 | write_policy: allow_once 172 | version_policy: release 173 | - name: pypi-snapshot 174 | type: pypi 175 | recipe: hosted 176 | write_policy: allow 177 | version_policy: snapshot 178 | - name: pypi-central 179 | type: pypi 180 | recipe: proxy 181 | remote_url: 'http://mirrors.aliyun.com/pypi/' 182 | - name: pypi-public 183 | type: pypi 184 | recipe: group 185 | members: 186 | - pypi-releases 187 | - pypi-snapshot 188 | - pypi-central 189 | - name: rubygems-releases 190 | type: rubygems 191 | recipe: hosted 192 | write_policy: allow_once 193 | version_policy: release 194 | - name: rubygems-snapshot 195 | type: rubygems 196 | recipe: hosted 197 | write_policy: allow 198 | version_policy: snapshot 199 | - name: rubygems-central 200 | type: rubygems 201 | recipe: proxy 202 | remote_url: 'http://mirrors.aliyun.com/rubygems/' 203 | - name: rubygems-public 204 | type: rubygems 205 | recipe: group 206 | members: 207 | - rubygems-releases 208 | - rubygems-snapshot 209 | - rubygems-central 210 | 211 | 212 | nexus_repository_defaults: 213 | blob_store: default # Note : cannot be updated once the repo has been created 214 | strict_content_validation: true 215 | version_policy: release # release, snapshot or mixed 216 | write_policy: allow_once # allow_once or allow 217 | layout_policy: strict # strict or permissive 218 | -------------------------------------------------------------------------------- /provision/roles/nexus/files/groovy/admin.groovy: -------------------------------------------------------------------------------- 1 | import groovy.json.JsonSlurper 2 | 3 | parsed_args = new JsonSlurper().parseText(args) 4 | 5 | security.securitySystem.changePassword('admin', parsed_args.new_password) 6 | -------------------------------------------------------------------------------- /provision/roles/nexus/files/groovy/initialize.groovy: -------------------------------------------------------------------------------- 1 | import groovy.json.JsonSlurper 2 | 3 | // cleanup 4 | if (repository.getRepositoryManager().get('maven-public') != null) { 5 | repository.getRepositoryManager().delete('maven-public') 6 | } 7 | if (repository.getRepositoryManager().get('maven-releases') != null) { 8 | repository.getRepositoryManager().delete('maven-releases') 9 | } 10 | if (repository.getRepositoryManager().get('maven-snapshots') != null) { 11 | repository.getRepositoryManager().delete('maven-snapshots') 12 | } 13 | if (repository.getRepositoryManager().get('maven-central') != null) { 14 | repository.getRepositoryManager().delete('maven-central') 15 | } 16 | if (repository.getRepositoryManager().get('nuget-hosted') != null) { 17 | repository.getRepositoryManager().delete('nuget-hosted') 18 | } 19 | if (repository.getRepositoryManager().get('nuget.org-proxy') != null) { 20 | repository.getRepositoryManager().delete('nuget.org-proxy') 21 | } 22 | if (repository.getRepositoryManager().get('nuget-group') != null) { 23 | repository.getRepositoryManager().delete('nuget-group') 24 | } 25 | -------------------------------------------------------------------------------- /provision/roles/nexus/files/groovy/privileges.groovy: -------------------------------------------------------------------------------- 1 | import groovy.json.JsonSlurper 2 | import org.sonatype.nexus.security.privilege.NoSuchPrivilegeException 3 | import org.sonatype.nexus.security.user.UserManager 4 | import org.sonatype.nexus.security.privilege.Privilege 5 | 6 | parsed_args = new JsonSlurper().parseText(args) 7 | 8 | authManager = security.getSecuritySystem().getAuthorizationManager(UserManager.DEFAULT_SOURCE) 9 | 10 | def privilege 11 | boolean update = true 12 | 13 | try { 14 | privilege = authManager.getPrivilege(parsed_args.name) 15 | } catch (NoSuchPrivilegeException ignored) { 16 | // could not find any existing privilege 17 | update = false 18 | privilege = new Privilege( 19 | 'id': parsed_args.name, 20 | 'name': parsed_args.name 21 | ) 22 | } 23 | 24 | privilege.setDescription(parsed_args.description) 25 | privilege.setType(parsed_args.type) 26 | privilege.setProperties([ 27 | 'format': parsed_args.format, 28 | 'repository': parsed_args.repository, 29 | 'actions': parsed_args.actions.join(',') 30 | ] as Map) 31 | 32 | if (update) { 33 | authManager.updatePrivilege(privilege) 34 | } else { 35 | authManager.addPrivilege(privilege) 36 | } 37 | -------------------------------------------------------------------------------- /provision/roles/nexus/files/groovy/repositories.groovy: -------------------------------------------------------------------------------- 1 | import groovy.json.JsonSlurper 2 | import org.sonatype.nexus.repository.config.Configuration 3 | import org.sonatype.nexus.repository.types.GroupType 4 | import org.sonatype.nexus.repository.types.HostedType 5 | import org.sonatype.nexus.repository.types.ProxyType 6 | 7 | parsed_args = new JsonSlurper().parseText(args) 8 | 9 | def existingRepository = repository.getRepositoryManager().get(parsed_args.name) 10 | 11 | configuration = new Configuration() 12 | 13 | if (parsed_args.recipe == ProxyType.NAME) { 14 | configuration.repositoryName = parsed_args.name 15 | configuration.online = true 16 | configuration.recipeName = parsed_args.type + "-proxy" 17 | configuration.attributes = [ 18 | proxy : [ 19 | remoteUrl : parsed_args.remote_url, 20 | contentMaxAge : 1440, 21 | metadataMaxAge: 1440 22 | ], 23 | httpclient : [ 24 | connection : [ 25 | blocked : false, 26 | autoBlock: true, 27 | ], 28 | authentication: parsed_args.remote_username == null ? null : [ 29 | type : 'username', 30 | username: parsed_args.remote_username, 31 | password: parsed_args.remote_password 32 | ] 33 | ], 34 | storage : [ 35 | blobStoreName : parsed_args.blob_store, 36 | strictContentTypeValidation: Boolean.valueOf(parsed_args.strict_content_validation) 37 | ], 38 | negativeCache: [ 39 | enabled : true, 40 | timeToLive: 1440 41 | ] 42 | ] 43 | } else if (parsed_args.recipe == HostedType.NAME) { 44 | configuration.repositoryName = parsed_args.name 45 | configuration.online = true 46 | configuration.recipeName = parsed_args.type + "-hosted" 47 | configuration.attributes = [ 48 | storage: [ 49 | blobStoreName : parsed_args.blob_store, 50 | writePolicy : parsed_args.write_policy.toUpperCase(), 51 | strictContentTypeValidation: Boolean.valueOf(parsed_args.strict_content_validation) 52 | ] 53 | ] 54 | } else if (parsed_args.recipe == GroupType.NAME) { 55 | configuration.repositoryName = parsed_args.name 56 | configuration.online = true 57 | configuration.recipeName = parsed_args.type + "-group" 58 | configuration.attributes = [ 59 | group : [ 60 | memberNames: parsed_args.members 61 | ], 62 | storage: [ 63 | blobStoreName : parsed_args.blob_store, 64 | strictContentTypeValidation: Boolean.valueOf(parsed_args.strict_content_validation) 65 | ] 66 | ] 67 | } 68 | 69 | if (parsed_args.type == "maven2") { 70 | configuration.attributes.maven = [ 71 | layoutPolicy : parsed_args.layout_policy.toUpperCase(), 72 | versionPolicy: parsed_args.version_policy.toUpperCase() 73 | ] 74 | } else if (parsed_args.type == "docker") { 75 | configuration.attributes.docker = [ 76 | v1Enabled: true 77 | ] 78 | if (parsed_args.http_port) { 79 | configuration.attributes.docker.httpPort = Integer.valueOf(parsed_args.http_port) 80 | } 81 | if (parsed_args.https_port) { 82 | configuration.attributes.docker.httpsPort = Integer.valueOf(parsed_args.https_port) 83 | } 84 | if (parsed_args.recipe == ProxyType.NAME) { 85 | configuration.attributes.dockerProxy = [ 86 | indexUrl : parsed_args.index_url == null ? '' : parsed_args.index_url, 87 | indexType: parsed_args.index_type.toUpperCase() 88 | ] 89 | configuration.attributes.httpclient.connection.useTrustStore = true 90 | } 91 | } 92 | 93 | if (existingRepository != null) { 94 | existingRepository.stop() 95 | configuration.attributes['storage']['blobStoreName'] = existingRepository.configuration.attributes['storage']['blobStoreName'] 96 | existingRepository.update(configuration) 97 | existingRepository.start() 98 | } else { 99 | repository.getRepositoryManager().create(configuration) 100 | } 101 | -------------------------------------------------------------------------------- /provision/roles/nexus/files/groovy/roles.groovy: -------------------------------------------------------------------------------- 1 | import groovy.json.JsonSlurper 2 | import org.sonatype.nexus.security.user.UserManager 3 | import org.sonatype.nexus.security.role.NoSuchRoleException 4 | 5 | parsed_args = new JsonSlurper().parseText(args) 6 | 7 | authManager = security.getSecuritySystem().getAuthorizationManager(UserManager.DEFAULT_SOURCE) 8 | 9 | def existingRole = null 10 | 11 | try { 12 | existingRole = authManager.getRole(parsed_args.id) 13 | } catch (NoSuchRoleException ignored) { 14 | // could not find role 15 | } 16 | 17 | privileges = (parsed_args.privileges == null ? new HashSet() : parsed_args.privileges.toSet()) 18 | roles = (parsed_args.roles == null ? new HashSet() : parsed_args.roles.toSet()) 19 | 20 | if (existingRole != null) { 21 | existingRole.setName(parsed_args.name) 22 | existingRole.setDescription(parsed_args.description) 23 | existingRole.setPrivileges(privileges) 24 | existingRole.setRoles(roles) 25 | authManager.updateRole(existingRole) 26 | } else { 27 | security.addRole(parsed_args.id, parsed_args.name, parsed_args.description, privileges.toList(), roles.toList()) 28 | } 29 | -------------------------------------------------------------------------------- /provision/roles/nexus/files/groovy/setup_anonymous.groovy: -------------------------------------------------------------------------------- 1 | import groovy.json.JsonSlurper 2 | 3 | parsed_args = new JsonSlurper().parseText(args) 4 | 5 | security.setAnonymousAccess(Boolean.valueOf(parsed_args.anonymous_access)) 6 | -------------------------------------------------------------------------------- /provision/roles/nexus/files/groovy/setup_base_url.groovy: -------------------------------------------------------------------------------- 1 | import groovy.json.JsonSlurper 2 | 3 | parsed_args = new JsonSlurper().parseText(args) 4 | 5 | core.baseUrl(parsed_args.base_url) 6 | -------------------------------------------------------------------------------- /provision/roles/nexus/files/groovy/setup_capability.groovy: -------------------------------------------------------------------------------- 1 | import groovy.json.JsonSlurper 2 | import org.sonatype.nexus.capability.CapabilityReference 3 | import org.sonatype.nexus.capability.CapabilityType 4 | import org.sonatype.nexus.internal.capability.DefaultCapabilityReference 5 | import org.sonatype.nexus.internal.capability.DefaultCapabilityRegistry 6 | 7 | parsed_args = new JsonSlurper().parseText(args) 8 | 9 | parsed_args.capability_properties['headerEnabled'] = parsed_args.capability_properties['headerEnabled'].toString() 10 | parsed_args.capability_properties['footerEnabled'] = parsed_args.capability_properties['footerEnabled'].toString() 11 | 12 | def capabilityRegistry = container.lookup(DefaultCapabilityRegistry.class.getName()) 13 | def capabilityType = CapabilityType.capabilityType(parsed_args.capability_typeId) 14 | 15 | DefaultCapabilityReference existing = capabilityRegistry.all.find { CapabilityReference capabilityReference -> 16 | capabilityReference.context().descriptor().type() == capabilityType 17 | } 18 | 19 | if (existing) { 20 | log.info(parsed_args.typeId + ' capability updated to: {}', 21 | capabilityRegistry.update(existing.id(), existing.active, existing.notes(), parsed_args.capability_properties).toString() 22 | ) 23 | } 24 | else { 25 | log.info(parsed_args.typeId + ' capability created as: {}', capabilityRegistry. 26 | add(capabilityType, true, 'configured through api', parsed_args.capability_properties).toString() 27 | ) 28 | } 29 | -------------------------------------------------------------------------------- /provision/roles/nexus/files/groovy/setup_ldap.groovy: -------------------------------------------------------------------------------- 1 | import org.sonatype.nexus.ldap.persist.LdapConfigurationManager 2 | import org.sonatype.nexus.ldap.persist.entity.LdapConfiguration 3 | import org.sonatype.nexus.ldap.persist.entity.Connection 4 | import org.sonatype.nexus.ldap.persist.entity.Mapping 5 | import groovy.json.JsonSlurper 6 | 7 | parsed_args = new JsonSlurper().parseText(args) 8 | 9 | 10 | def ldapConfigMgr = container.lookup(LdapConfigurationManager.class.getName()); 11 | 12 | def ldapConfig = new LdapConfiguration() 13 | boolean update = false; 14 | 15 | // Look for existing config to update 16 | ldapConfigMgr.listLdapServerConfigurations().each { 17 | if (it.name == parsed_args.name) { 18 | ldapConfig = it 19 | update = true 20 | } 21 | } 22 | 23 | ldapConfig.setName(parsed_args.name) 24 | 25 | // Connection 26 | connection = new Connection() 27 | connection.setHost(new Connection.Host(Connection.Protocol.valueOf(parsed_args.protocol), parsed_args.hostname, Integer.valueOf(parsed_args.port))) 28 | connection.setAuthScheme("none") 29 | connection.setSearchBase(parsed_args.search_base) 30 | connection.setConnectionTimeout(30) 31 | connection.setConnectionRetryDelay(300) 32 | connection.setMaxIncidentsCount(3) 33 | ldapConfig.setConnection(connection) 34 | 35 | 36 | // Mapping 37 | mapping = new Mapping() 38 | mapping.setUserBaseDn(parsed_args.user_base_dn) 39 | mapping.setUserObjectClass(parsed_args.user_object_class) 40 | mapping.setUserIdAttribute(parsed_args.user_id_attribute) 41 | mapping.setUserRealNameAttribute(parsed_args.user_real_name_attribute) 42 | mapping.setEmailAddressAttribute(parsed_args.user_email_attribute) 43 | 44 | mapping.setLdapGroupsAsRoles(true) 45 | mapping.setGroupBaseDn(parsed_args.group_base_dn) 46 | mapping.setGroupObjectClass(parsed_args.group_object_class) 47 | mapping.setGroupIdAttribute(parsed_args.group_id_attribute) 48 | mapping.setGroupMemberAttribute(parsed_args.group_member_attribute) 49 | mapping.setGroupMemberFormat(parsed_args.group_member_format) 50 | 51 | ldapConfig.setMapping(mapping) 52 | 53 | 54 | if (update) { 55 | ldapConfigMgr.updateLdapServerConfiguration(ldapConfig) 56 | } else { 57 | ldapConfigMgr.addLdapServerConfiguration(ldapConfig) 58 | } 59 | -------------------------------------------------------------------------------- /provision/roles/nexus/files/groovy/users.groovy: -------------------------------------------------------------------------------- 1 | import groovy.json.JsonSlurper 2 | import org.sonatype.nexus.security.user.UserNotFoundException 3 | 4 | parsed_args = new JsonSlurper().parseText(args) 5 | 6 | try { 7 | // update an existing user 8 | user = security.securitySystem.getUser(parsed_args.username) 9 | user.setFirstName(parsed_args.first_name) 10 | user.setLastName(parsed_args.last_name) 11 | user.setEmailAddress(parsed_args.email) 12 | security.securitySystem.updateUser(user) 13 | security.setUserRoles(parsed_args.username, parsed_args.roles) 14 | security.securitySystem.changePassword(parsed_args.username, parsed_args.password) 15 | } catch(UserNotFoundException ignored) { 16 | // create the new user 17 | security.addUser(parsed_args.username, parsed_args.first_name, parsed_args.last_name, parsed_args.email, true, parsed_args.password, parsed_args.roles) 18 | } 19 | -------------------------------------------------------------------------------- /provision/roles/nexus/files/provision.groovy: -------------------------------------------------------------------------------- 1 | import org.sonatype.nexus.blobstore.api.BlobStoreManager 2 | import org.sonatype.nexus.repository.maven.LayoutPolicy 3 | import org.sonatype.nexus.repository.maven.VersionPolicy 4 | import org.sonatype.nexus.repository.storage.WritePolicy 5 | 6 | // cleanup 7 | if (repository.getRepositoryManager().get('maven-public') != null) { 8 | repository.getRepositoryManager().delete('maven-public') 9 | } 10 | if (repository.getRepositoryManager().get('maven-releases') != null) { 11 | repository.getRepositoryManager().delete('maven-releases') 12 | } 13 | if (repository.getRepositoryManager().get('maven-snapshots') != null) { 14 | repository.getRepositoryManager().delete('maven-snapshots') 15 | } 16 | if (repository.getRepositoryManager().get('maven-central') != null) { 17 | repository.getRepositoryManager().delete('maven-central') 18 | } 19 | if (repository.getRepositoryManager().get('nuget-hosted') != null) { 20 | repository.getRepositoryManager().delete('nuget-hosted') 21 | } 22 | if (repository.getRepositoryManager().get('nuget.org-proxy') != null) { 23 | repository.getRepositoryManager().delete('nuget.org-proxy') 24 | } 25 | if (repository.getRepositoryManager().get('nuget-group') != null) { 26 | repository.getRepositoryManager().delete('nuget-group') 27 | } 28 | 29 | // maven 30 | repository.createMavenHosted('maven-releases', BlobStoreManager.DEFAULT_BLOBSTORE_NAME, true, VersionPolicy.RELEASE, WritePolicy.ALLOW_ONCE, LayoutPolicy.STRICT) 31 | repository.createMavenHosted('maven-snapshot', BlobStoreManager.DEFAULT_BLOBSTORE_NAME, true, VersionPolicy.SNAPSHOT, WritePolicy.ALLOW, LayoutPolicy.STRICT) 32 | repository.createMavenProxy('maven-central', 'https://jcenter.bintray.com/', BlobStoreManager.DEFAULT_BLOBSTORE_NAME, true, VersionPolicy.RELEASE, LayoutPolicy.STRICT) 33 | repository.createMavenGroup('maven-public', ['maven-releases', 'maven-snapshot', 'maven-central']) 34 | 35 | // docker 36 | repository.createDockerHosted('docker-releases', 5001, 0) 37 | repository.createDockerHosted('docker-snapshot', 5002, 0) 38 | repository.createDockerProxy('docker-central', 'https://registry-1.docker.io/', 'REGISTRY', '', 0, 0) 39 | repository.createDockerGroup('docker-public', 5000, 0, ['docker-releases','docker-snapshot','docker-central']) 40 | 41 | // npm 42 | repository.createNpmHosted('npm-releases') 43 | repository.createNpmHosted('npm-snapshot') 44 | repository.createNpmProxy('npm-central', 'https://registry.npm.taobao.org/') 45 | repository.createNpmGroup('npm-public', ['npm-releases', 'npm-snapshot', 'npm-central']) 46 | 47 | // bower 48 | repository.createBowerHosted('bower-releases') 49 | repository.createBowerHosted('bower-snapshot') 50 | repository.createBowerProxy('bower-central', 'http://bower.herokuapp.com') 51 | repository.createBowerGroup('bower-public', ['bower-releases','bower-snapshot','bower-central']) 52 | 53 | // nuget 54 | repository.createNugetHosted('nuget-releases') 55 | repository.createNugetHosted('nuget-snapshot') 56 | repository.createNugetProxy('nuget-central', 'https://www.nuget.org/api/v2/') 57 | repository.createNugetGroup('nuget-public', ['nuget-releases','nuget-snapshot','nuget-central']) 58 | 59 | // pypi 60 | repository.createRepository(repository.createHosted('pypi-releases', 'pypi-hosted')) 61 | repository.createRepository(repository.createHosted('pypi-snapshot', 'pypi-hosted')) 62 | repository.createRepository(repository.createProxy('pypi-central', 'pypi-proxy', 'http://mirrors.aliyun.com/pypi/')) 63 | repository.createRepository(repository.createGroup('pypi-public', 'pypi-group', BlobStoreManager.DEFAULT_BLOBSTORE_NAME, 'pypi-releases', 'pypi-snapshot', 'pypi-central')) 64 | 65 | // rubygems 66 | repository.createRepository(repository.createHosted('rubygems-releases', 'rubygems-hosted')) 67 | repository.createRepository(repository.createHosted('rubygems-snapshot', 'rubygems-hosted')) 68 | repository.createRepository(repository.createProxy('rubygems-central', 'rubygems-proxy', 'http://mirrors.aliyun.com/rubygems/')) 69 | repository.createRepository(repository.createGroup('rubygems-public', 'rubygems-group', BlobStoreManager.DEFAULT_BLOBSTORE_NAME, 'rubygems-releases', 'rubygems-snapshot', 'rubygems-central')) 70 | -------------------------------------------------------------------------------- /provision/roles/nexus/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /provision/roles/nexus/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: "Shawn Ma" 4 | company: ThoughtWorks, Inc. 5 | license: BSD 6 | min_ansible_version: 2.0 7 | platforms: 8 | - name: Ubuntu 9 | versions: 10 | - trusty 11 | categories: 12 | - docker 13 | dependencies: [] 14 | -------------------------------------------------------------------------------- /provision/roles/nexus/tasks/config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include: config/privileges.yml 3 | 4 | - include: config/roles.yml 5 | 6 | - include: config/users.yml 7 | 8 | - include: config/repositories.yml 9 | 10 | 11 | #- name: execute nexus server groovy script 12 | # uri: 13 | # url: "http://localhost:8081/service/siesta/rest/v1/script/provision/run" 14 | # method: POST 15 | # user: "{{ nexus_admin_user }}" 16 | # password: "{{ nexus_admin_pass }}" 17 | # force_basic_auth: yes 18 | # headers: 19 | # Content-Type: "text/plain" 20 | # status_code: 200 21 | # when: nexus_data_dir_contents.stdout == "" 22 | -------------------------------------------------------------------------------- /provision/roles/nexus/tasks/config/api.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: call nexus groovy script {{ script_name }} 3 | uri: 4 | url: "http://localhost:{{ nexus_port }}/service/siesta/rest/v1/script/{{ script_name }}/run" 5 | method: POST 6 | user: "{{ nexus_admin_user }}" 7 | password: "{{ nexus_admin_pass }}" 8 | force_basic_auth: yes 9 | headers: 10 | Content-Type: "text/plain" 11 | body: "{{ args | to_json }}" 12 | status_code: 200,204 13 | -------------------------------------------------------------------------------- /provision/roles/nexus/tasks/config/initialize.yml: -------------------------------------------------------------------------------- 1 | - name: initialize nexus environment 2 | uri: 3 | url: "http://localhost:{{ nexus_port }}/service/siesta/rest/v1/script/initialize/run" 4 | method: POST 5 | user: "{{ nexus_admin_user }}" 6 | password: "{{ nexus_admin_pass }}" 7 | force_basic_auth: yes 8 | headers: 9 | Content-Type: "text/plain" 10 | status_code: 200,204 11 | when: nexus_data_dir_contents.stdout == "" 12 | -------------------------------------------------------------------------------- /provision/roles/nexus/tasks/config/privileges.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: config nexus privileges 3 | uri: 4 | url: "http://localhost:{{ nexus_port }}/service/siesta/rest/v1/script/privileges/run" 5 | method: POST 6 | user: "{{ nexus_admin_user }}" 7 | password: "{{ nexus_admin_pass }}" 8 | force_basic_auth: yes 9 | headers: 10 | Content-Type: "text/plain" 11 | body: "{{ item | to_json }}" 12 | status_code: 200,204 13 | with_items: "{{ nexus_privileges }}" 14 | -------------------------------------------------------------------------------- /provision/roles/nexus/tasks/config/repositories.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: config nexus repositories 3 | uri: 4 | url: "http://localhost:{{ nexus_port }}/service/siesta/rest/v1/script/repositories/run" 5 | method: POST 6 | user: "{{ nexus_admin_user }}" 7 | password: "{{ nexus_admin_pass }}" 8 | force_basic_auth: yes 9 | headers: 10 | Content-Type: "text/plain" 11 | body: "{{ ( nexus_repository_defaults|combine(item) ) | to_json }}" 12 | status_code: 200,204 13 | with_items: "{{ nexus_repositories }}" 14 | -------------------------------------------------------------------------------- /provision/roles/nexus/tasks/config/roles.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: config nexus roles 3 | uri: 4 | url: "http://localhost:{{ nexus_port }}/service/siesta/rest/v1/script/roles/run" 5 | method: POST 6 | user: "{{ nexus_admin_user }}" 7 | password: "{{ nexus_admin_pass }}" 8 | force_basic_auth: yes 9 | headers: 10 | Content-Type: "text/plain" 11 | body: "{{ item | to_json }}" 12 | status_code: 200,204 13 | with_items: "{{ nexus_roles }}" 14 | -------------------------------------------------------------------------------- /provision/roles/nexus/tasks/config/users.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: config nexus users 3 | uri: 4 | url: "http://localhost:{{ nexus_port }}/service/siesta/rest/v1/script/users/run" 5 | method: POST 6 | user: "{{ nexus_admin_user }}" 7 | password: "{{ nexus_admin_pass }}" 8 | force_basic_auth: yes 9 | headers: 10 | Content-Type: "text/plain" 11 | body: "{{ item | to_json }}" 12 | status_code: 200,204 13 | with_items: "{{ nexus_local_users }}" 14 | -------------------------------------------------------------------------------- /provision/roles/nexus/tasks/domain.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: update nexus domain to dnspod 3 | dnspod: 4 | sub_domain: "repo" 5 | base_domain: "thoughtworks.ga" 6 | record_type: A 7 | value: "{{ private_ipv4 }}" 8 | -------------------------------------------------------------------------------- /provision/roles/nexus/tasks/install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: create nexus data directory 3 | file: 4 | path: "{{ nexus_data_dir }}" 5 | state: "directory" 6 | owner: 200 7 | group: root 8 | become: yes 9 | 10 | - name: check nexus data directory 11 | command: "ls {{ nexus_data_dir }}" 12 | register: nexus_data_dir_contents 13 | become: yes 14 | 15 | - name: start nexus server container 16 | docker_container: 17 | name: "{{ nexus_name }}" 18 | image: "{{ nexus_image }}:{{ nexus_version }}" 19 | ports: 20 | - "{{ nexus_port }}:8081" 21 | - 5000:5000 22 | - 5001:5001 23 | - 5002:5002 24 | volumes: 25 | - "{{ nexus_data_dir }}:/nexus-data" 26 | restart_policy: always 27 | become: yes 28 | 29 | - name: wait for nexus server to be ready... 30 | wait_for: 31 | port: "{{ nexus_port }}" 32 | delay: 20 33 | 34 | - name: remove nexus server groovy script 35 | uri: 36 | url: "http://localhost:{{ nexus_port }}/service/siesta/rest/v1/script/{{ item }}" 37 | method: DELETE 38 | user: "{{ nexus_admin_user }}" 39 | password: "{{ nexus_admin_pass }}" 40 | force_basic_auth: yes 41 | status_code: 204,404 42 | with_items: 43 | - initialize 44 | - privileges 45 | - roles 46 | - users 47 | - repositories 48 | 49 | - name: publish nexus server groovy script 50 | uri: 51 | url: "http://localhost:{{ nexus_port }}/service/siesta/rest/v1/script" 52 | method: POST 53 | user: "{{ nexus_admin_user }}" 54 | password: "{{ nexus_admin_pass }}" 55 | force_basic_auth: yes 56 | body: 57 | name: "{{ item }}" 58 | type: "groovy" 59 | content: "{{ lookup('file', 'groovy/' + item + '.groovy') }}" 60 | body_format: json 61 | status_code: 204 62 | with_items: 63 | - initialize 64 | - privileges 65 | - roles 66 | - users 67 | - repositories 68 | 69 | - name: initialize nexus environment 70 | uri: 71 | url: "http://localhost:{{ nexus_port }}/service/siesta/rest/v1/script/initialize/run" 72 | method: POST 73 | user: "{{ nexus_admin_user }}" 74 | password: "{{ nexus_admin_pass }}" 75 | force_basic_auth: yes 76 | headers: 77 | Content-Type: "text/plain" 78 | status_code: 200,204 79 | when: nexus_data_dir_contents.stdout == "" 80 | -------------------------------------------------------------------------------- /provision/roles/nexus/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include: install.yml 3 | 4 | - include: config.yml 5 | -------------------------------------------------------------------------------- /provision/roles/nginx/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | 4 | 5 | 6 | # configs 7 | nginx_worker_processes: "{{ ansible_processor_vcpus | default(ansible_processor_count) }}" 8 | nginx_worker_connections: "1024" 9 | 10 | nginx_tcp_nopush: "on" 11 | nginx_tcp_nodelay: "on" 12 | 13 | nginx_log_format: '$remote_addr - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" "$http_x_forwarded_for"' 14 | 15 | nginx_vhosts: 16 | - listen: "80" 17 | server_name: "localhost" 18 | root: "/usr/share/nginx/html" 19 | index: "index.html index.htm" 20 | 21 | nginx_upstreams: [] 22 | -------------------------------------------------------------------------------- /provision/roles/nginx/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /provision/roles/nginx/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: "Shawn Ma" 4 | company: ThoughtWorks, Inc. 5 | license: BSD 6 | min_ansible_version: 2.0 7 | platforms: 8 | - name: Ubuntu 9 | versions: 10 | - trusty 11 | categories: 12 | - docker 13 | dependencies: [] 14 | -------------------------------------------------------------------------------- /provision/roles/nginx/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: clean nginx configuration 3 | file: 4 | path: /etc/nginx/conf 5 | state: absent 6 | become: yes 7 | 8 | - name: ensure nginx directory 9 | file: 10 | path: "{{ item }}" 11 | state: directory 12 | owner: root 13 | group: root 14 | with_items: 15 | - /etc/nginx/ssl 16 | - /etc/nginx/conf 17 | become: yes 18 | 19 | - name: generate nginx configuration 20 | template: 21 | src: nginx.conf.j2 22 | dest: "/etc/nginx/nginx.conf" 23 | become: yes 24 | 25 | - name: generate vhost configuration 26 | template: 27 | src: vhost.conf.j2 28 | dest: "/etc/nginx/conf/{{ item.server_name.split(' ')[0] }}_{{ item.listen }}.conf" 29 | with_items: "{{ nginx_vhosts }}" 30 | become: yes 31 | 32 | - name: start nginx container 33 | docker_container: 34 | name: "nginx" 35 | image: "nginx" 36 | network_mode: host 37 | volumes: 38 | - "/etc/nginx/ssl:/etc/nginx/ssl:ro" 39 | - "/etc/nginx/conf:/etc/nginx/conf:ro" 40 | - "/etc/nginx/nginx.conf:/etc/nginx/nginx.conf:ro" 41 | restart_policy: unless-stopped 42 | become: yes 43 | -------------------------------------------------------------------------------- /provision/roles/nginx/templates/nginx.conf.j2: -------------------------------------------------------------------------------- 1 | user nginx; 2 | worker_processes {{ nginx_worker_processes }}; 3 | 4 | error_log /var/log/nginx/error.log warn; 5 | pid /var/run/nginx.pid; 6 | 7 | events { 8 | worker_connections {{ nginx_worker_connections }}; 9 | } 10 | 11 | http { 12 | include /etc/nginx/mime.types; 13 | default_type application/octet-stream; 14 | 15 | log_format main {{ nginx_log_format|indent(23) }}; 16 | access_log /var/log/nginx/access.log main; 17 | 18 | sendfile on; 19 | tcp_nopush {{ nginx_tcp_nopush }}; 20 | tcp_nodelay {{ nginx_tcp_nodelay }}; 21 | 22 | keepalive_timeout 65; 23 | 24 | #gzip on; 25 | 26 | {% for upstream in nginx_upstreams %} 27 | upstream {{ upstream.name }} { 28 | {% if upstream.strategy is defined %} 29 | {{ upstream.strategy }}; 30 | {% endif %} 31 | {% for server in upstream.servers %} 32 | server {{ server }}; 33 | {% endfor %} 34 | {% if upstream.keepalive is defined %} 35 | keepalive {{ upstream.keepalive }}; 36 | {% endif %} 37 | } 38 | {% endfor %} 39 | 40 | include /etc/nginx/conf/*.conf; 41 | } 42 | -------------------------------------------------------------------------------- /provision/roles/nginx/templates/vhost.conf.j2: -------------------------------------------------------------------------------- 1 | server { 2 | listen {{ item.listen | default('80') }} {% if item.ssl %}ssl{% endif %}; 3 | 4 | {% if item.server_name is defined %} 5 | server_name {{ item.server_name }}; 6 | {% endif %} 7 | 8 | {% if item.ssl_certificate is defined %} 9 | ssl_certificate {{ item.ssl_certificate }}; 10 | {% endif %} 11 | {% if item.ssl_certificate_key is defined %} 12 | ssl_certificate_key {{ item.ssl_certificate_key }}; 13 | {% endif %} 14 | 15 | {% if item.root is defined %} 16 | root {{ item.root }}; 17 | {% endif %} 18 | 19 | index {{ item.index | default('index.html index.htm') }}; 20 | 21 | {% if item.error_page is defined %} 22 | error_page {{ item.error_page }}; 23 | {% endif %} 24 | 25 | {% if item.access_log is defined %} 26 | access_log {{ item.access_log }}; 27 | {% endif %} 28 | {% if item.error_log is defined %} 29 | error_log {{ item.error_log }} error; 30 | {% endif %} 31 | 32 | {% if item.extra_parameters is defined %} 33 | {{ item.extra_parameters|indent(4) }} 34 | {% endif %} 35 | } 36 | -------------------------------------------------------------------------------- /provision/roles/rancher-agent/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # image 3 | rancher_agent_name: rancher-server 4 | rancher_agent_image: rancher/agent 5 | rancher_agent_version: v1.2.5 6 | # network 7 | rancher_agent_port: 8080 8 | # storage 9 | rancher_agent_data_dir: "/var/lib/rancher" 10 | -------------------------------------------------------------------------------- /provision/roles/rancher-agent/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart docker 3 | service: name=docker state=restarted -------------------------------------------------------------------------------- /provision/roles/rancher-agent/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: verify if project exists 3 | uri: 4 | method: GET 5 | url: "http://{{ RANCHER_MASTER_HOST }}:{{ RANCHER_MASTER_PORT }}/v1/projects?name={{ RANCHER_PROJECT_NAME }}" 6 | user: "{{ RANCHER_APIKEY_PUBLIC }}" 7 | password: "{{ RANCHER_APIKEY_SECRET }}" 8 | status_code: 200 9 | become: no 10 | run_once: yes 11 | delegate_to: localhost 12 | register: project_result 13 | 14 | - name: create project 15 | uri: 16 | method: POST 17 | url: "http://{{ RANCHER_MASTER_HOST }}:{{ RANCHER_MASTER_PORT }}/v1/projects" 18 | user: "{{ RANCHER_APIKEY_PUBLIC }}" 19 | password: "{{ RANCHER_APIKEY_SECRET }}" 20 | body: "{{ lookup('template', 'project.json.j2') }}" 21 | body_format: json 22 | status_code: 201 23 | become: no 24 | run_once: yes 25 | delegate_to: localhost 26 | register: project_response 27 | when: project_result.json['data']|length == 0 28 | 29 | - name: register project id 30 | set_fact: 31 | RANCHER_PROJECT_ID: "{{ project_result.json['data'][0]['id'] }}" 32 | become: no 33 | run_once: yes 34 | delegate_to: localhost 35 | when: project_result.json['data']|length > 0 36 | 37 | - name: register project id 38 | set_fact: 39 | RANCHER_PROJECT_ID: "{{ project_response.json['id'] }}" 40 | become: no 41 | run_once: yes 42 | delegate_to: localhost 43 | when: project_result.json['data']|length == 0 44 | 45 | - name: get the default project id 46 | uri: 47 | method: POST 48 | url: "http://{{ RANCHER_MASTER_HOST }}:{{ RANCHER_MASTER_PORT }}/v1/registrationtokens?projectId={{ RANCHER_PROJECT_ID }}" 49 | user: "{{ RANCHER_APIKEY_PUBLIC }}" 50 | password: "{{ RANCHER_APIKEY_SECRET }}" 51 | return_content: yes 52 | status_code: 201 53 | become: no 54 | run_once: yes 55 | delegate_to: localhost 56 | register: rancher_token_url 57 | 58 | - name: return the registration url of rancher server 59 | uri: 60 | method: GET 61 | url: "{{ rancher_token_url.json['links']['self'] }}" 62 | user: "{{ RANCHER_APIKEY_PUBLIC }}" 63 | password: "{{ RANCHER_APIKEY_SECRET }}" 64 | become: no 65 | run_once: yes 66 | delegate_to: localhost 67 | register: rancher_token 68 | 69 | - name: register the host machine with the rancher server 70 | docker: 71 | image: "{{ rancher_agent_image }}:{{ rancher_agent_version }}" 72 | privileged: yes 73 | volumes: 74 | - /data/lib/rancher:/var/lib/rancher 75 | - /var/run/docker.sock:/var/run/docker.sock 76 | env: 77 | CATTLE_HOST_LABELS="project={{ RANCHER_PROJECT_NAME }}&orchestration=true&etcd=true" 78 | command: "http://{{ RANCHER_MASTER_HOST }}:{{ RANCHER_MASTER_PORT }}/v1/scripts/{{ rancher_token.json['token'] }}" 79 | become: yes 80 | when: inventory_hostname == groups[RANCHER_PROJECT_NAME][0] 81 | 82 | - name: register the host machine with the rancher server 83 | docker: 84 | image: "{{ rancher_agent_image }}:{{ rancher_agent_version }}" 85 | privileged: yes 86 | volumes: 87 | - /data/lib/rancher:/var/lib/rancher 88 | - /var/run/docker.sock:/var/run/docker.sock 89 | env: 90 | CATTLE_HOST_LABELS="project={{ RANCHER_PROJECT_NAME }}&compute=true&etcd=true" 91 | command: "http://{{ RANCHER_MASTER_HOST }}:{{ RANCHER_MASTER_PORT }}/v1/scripts/{{ rancher_token.json['token'] }}" 92 | become: yes 93 | when: inventory_hostname != groups[RANCHER_PROJECT_NAME][0] 94 | -------------------------------------------------------------------------------- /provision/roles/rancher-agent/templates/default/docker.j2: -------------------------------------------------------------------------------- 1 | # Docker Upstart and SysVinit configuration file 2 | 3 | # 4 | # THIS FILE DOES NOT APPLY TO SYSTEMD 5 | # 6 | # Please see the documentation for "systemd drop-ins": 7 | # https://docs.docker.com/engine/articles/systemd/ 8 | # 9 | 10 | # Customize location of Docker binary (especially for development testing). 11 | #DOCKERD="/usr/local/bin/dockerd" 12 | 13 | # Use DOCKER_OPTS to modify the daemon startup options. 14 | DOCKER_OPTS="-H unix:///var/run/docker.sock -H tcp://0.0.0.0:2375 {{ docker_opts }}" 15 | 16 | # If you need Docker to use an HTTP proxy, it can also be specified here. 17 | #export http_proxy="http://127.0.0.1:3128/" 18 | 19 | # This is also a handy place to tweak where Docker's temporary files go. 20 | #export TMPDIR="/mnt/bigdrive/docker-tmp" -------------------------------------------------------------------------------- /provision/roles/rancher-agent/templates/init/docker.conf.j2: -------------------------------------------------------------------------------- 1 | description "Docker daemon" 2 | 3 | start on (filesystem and net-device-up IFACE!=lo) 4 | stop on runlevel [!2345] 5 | limit nofile 524288 1048576 6 | limit nproc 524288 1048576 7 | 8 | respawn 9 | 10 | kill timeout 20 11 | 12 | pre-start script 13 | # see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount 14 | if grep -v '^#' /etc/fstab | grep -q cgroup \ 15 | || [ ! -e /proc/cgroups ] \ 16 | || [ ! -d /sys/fs/cgroup ]; then 17 | exit 0 18 | fi 19 | if ! mountpoint -q /sys/fs/cgroup; then 20 | mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup 21 | fi 22 | ( 23 | cd /sys/fs/cgroup 24 | for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do 25 | mkdir -p $sys 26 | if ! mountpoint -q $sys; then 27 | if ! mount -n -t cgroup -o $sys cgroup $sys; then 28 | rmdir $sys || true 29 | fi 30 | fi 31 | done 32 | ) 33 | end script 34 | 35 | script 36 | # modify these in /etc/default/$UPSTART_JOB (/etc/default/docker) 37 | DOCKERD=/usr/bin/docker 38 | DOCKER_OPTS= 39 | if [ -f /etc/default/$UPSTART_JOB ]; then 40 | . /etc/default/$UPSTART_JOB 41 | fi 42 | exec "$DOCKERD" daemon $DOCKER_OPTS 43 | end script 44 | 45 | # Don't emit "started" event until docker.sock is ready. 46 | # See https://github.com/docker/docker/issues/6647 47 | post-start script 48 | DOCKER_OPTS= 49 | DOCKER_SOCKET= 50 | if [ -f /etc/default/$UPSTART_JOB ]; then 51 | . /etc/default/$UPSTART_JOB 52 | fi 53 | 54 | if ! printf "%s" "$DOCKER_OPTS" | grep -qE -e '-H|--host'; then 55 | DOCKER_SOCKET=/var/run/docker.sock 56 | else 57 | DOCKER_SOCKET=$(printf "%s" "$DOCKER_OPTS" | grep -oP -e '(-H|--host)\W*unix://\K(\S+)') 58 | fi 59 | 60 | if [ -n "$DOCKER_SOCKET" ]; then 61 | while ! [ -e "$DOCKER_SOCKET" ]; do 62 | initctl status $UPSTART_JOB | grep -qE "(stop|respawn)/" && exit 1 63 | echo "Waiting for $DOCKER_SOCKET" 64 | sleep 0.1 65 | done 66 | echo "$DOCKER_SOCKET is up" 67 | fi 68 | end script -------------------------------------------------------------------------------- /provision/roles/rancher-agent/templates/portworx.j2: -------------------------------------------------------------------------------- 1 | VOLUME_DRIVER_NAME=pxd 2 | CLUSTER_ID={{ RANCHER_PROJECT_NAME }} 3 | KVDB="{% for host in groups[etcd_cluster_group] -%}etcd://{{ hostvars[host]['ansible_host'] }}:2379{% if not loop.last %},{% endif %}{% endfor %}" 4 | USE_DISKS='-s /dev/sdb' 5 | HEADER_DIR=/usr/src 6 | -------------------------------------------------------------------------------- /provision/roles/rancher-agent/templates/project.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "name":"{{ RANCHER_PROJECT_NAME }}", 3 | "description":"{{ RANCHER_PROJECT_NAME }}", 4 | "allowSystemRole":false, 5 | "members":[], 6 | "swarm":false, 7 | "kubernetes":false, 8 | "mesos":false, 9 | "virtualMachine":false, 10 | "publicDns":false, 11 | "servicesPortRange":null 12 | } -------------------------------------------------------------------------------- /provision/roles/rancher-balancing/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | 4 | 5 | 6 | # configs 7 | nginx_worker_processes: "{{ ansible_processor_vcpus | default(ansible_processor_count) }}" 8 | nginx_worker_connections: "1024" 9 | 10 | nginx_tcp_nopush: "on" 11 | nginx_tcp_nodelay: "on" 12 | 13 | nginx_log_format: '$remote_addr - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" "$http_x_forwarded_for"' 14 | 15 | nginx_vhosts: 16 | - listen: "80" 17 | server_name: "localhost" 18 | root: "/usr/share/nginx/html" 19 | index: "index.html index.htm" 20 | 21 | nginx_upstreams: [] 22 | -------------------------------------------------------------------------------- /provision/roles/rancher-balancing/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /provision/roles/rancher-balancing/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: "Shawn Ma" 4 | company: ThoughtWorks, Inc. 5 | license: BSD 6 | min_ansible_version: 2.0 7 | platforms: 8 | - name: Ubuntu 9 | versions: 10 | - trusty 11 | categories: 12 | - docker 13 | dependencies: [] 14 | -------------------------------------------------------------------------------- /provision/roles/rancher-balancing/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: ensure rancher-balancing directory 3 | file: 4 | path: /etc/rancher 5 | state: directory 6 | owner: root 7 | group: root 8 | become: yes 9 | 10 | - name: generate rancher-balancing proxy 11 | template: 12 | src: rancher-balancing.cfg 13 | dest: /etc/rancher/rancher-balancing.cfg 14 | become: yes 15 | 16 | - name: start rancher-balancing container 17 | docker_container: 18 | name: "kube-balancing" 19 | image: "haproxy" 20 | ports: 21 | - "6443:6443" 22 | volumes: 23 | - "/etc/kubernetes/kube-balancing.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro" 24 | restart_policy: unless-stopped 25 | become: yes 26 | -------------------------------------------------------------------------------- /provision/roles/rancher-balancing/templates/kube-balancing.cfg: -------------------------------------------------------------------------------- 1 | global 2 | log 127.0.0.1 local0 notice 3 | maxconn 2000 4 | tune.ssl.default-dh-param 2048 5 | 6 | defaults 7 | log global 8 | mode tcp 9 | option tcplog 10 | timeout connect 5000ms 11 | timeout client 50000ms 12 | timeout server 50000ms 13 | 14 | listen kube-apiserver 15 | bind *:6443 16 | mode tcp 17 | server toc-master01 10.202.128.109:6443 18 | server toc-master02 10.202.128.110:6443 19 | server toc-master03 10.202.128.111:6443 20 | balance leastconn 21 | -------------------------------------------------------------------------------- /provision/roles/rancher-stack/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | rancher_jenkins_plugins: 3 | - git 4 | - subversion 5 | - workflow-aggregator 6 | - dashboard-view 7 | - cloudbees-folder 8 | - token-macro 9 | - simple-theme 10 | - docker 11 | - ldap 12 | - locale 13 | - swarm 14 | rancher_jenkins_config: 15 | security: 16 | ldap: 17 | server: "ldap://{{ LDAP_HOST }}:{{ LDAP_PORT }}" 18 | rootDN: "{{ LDAP_BASE_DN }}" 19 | userSearchBase: "{{ LDAP_USER_SEARCH_BASE }}" 20 | userSearchFilter: "{{ LDAP_USER_SEARCH_FILTER }}" 21 | groupSearchBase: "{{ LDAP_GROUP_SEARCH_BASE }}" 22 | groupSearchFilter: "{{ LDAP_HROUP_SEARCH_FILTER }}" 23 | groupSearchAttr: memberOf 24 | managerDN: "{{ LDAP_BIND_DN }}" 25 | managerPassword: "{{ LDAP_BIND_ENCRYPT }}" 26 | credentials: 27 | - id: jenkins-slave-password 28 | description: Jenkis Slave with Password Configuration 29 | username: root 30 | password: jenkins 31 | cloud: 32 | docker: 33 | - name: toc-infra01 34 | serverUrl: tcp://10.202.128.112:2375 35 | containerCap: '50' 36 | connectTimeout: 5 37 | readTimeout: 15 38 | credentialsId: jenkins-slave-password 39 | templates: 40 | - image: baselibrary/jenkins-slave:1.12 41 | command: "/usr/sbin/sshd -D" 42 | volumes: "/var/run/docker.sock:/var/run/docker.sock\n /data/lib/jenkins/workspace:/data/lib/jenkins/workspace\n /tmp:/tmp /root/.m2:/root/.m2\n /root/.gradle:/root/.gradle\n /root/.npmrc:/root/.npmrc\n /root/.bowerrc:/root/.bowerrc" 43 | volumesFrom: '' 44 | environments: SSH_PASS=jenkins 45 | bindAllPorts: false 46 | privileged: false 47 | tty: false 48 | label: docker 49 | remoteFs: "/data/lib/jenkins" 50 | remoteFsMapping: "/data/lib/jenkins" 51 | instanceCap: '2' 52 | numExecutors: 1 53 | removeVolume: false 54 | pullStrategy: Pull once and update latest 55 | - name: toc-infra02 56 | serverUrl: tcp://10.202.128.113:2375 57 | containerCap: '50' 58 | connectTimeout: 5 59 | readTimeout: 15 60 | credentialsId: jenkins-slave-password 61 | templates: 62 | - image: baselibrary/jenkins-slave:1.12 63 | command: "/usr/sbin/sshd -D" 64 | volumes: "/var/run/docker.sock:/var/run/docker.sock\n /data/lib/jenkins/workspace:/data/lib/jenkins/workspace\n /tmp:/tmp /root/.m2:/root/.m2\n /root/.gradle:/root/.gradle\n /root/.npmrc:/root/.npmrc\n /root/.bowerrc:/root/.bowerrc" 65 | volumesFrom: '' 66 | environments: SSH_PASS=jenkins 67 | bindAllPorts: false 68 | privileged: false 69 | tty: false 70 | label: docker 71 | remoteFs: "/data/lib/jenkins" 72 | remoteFsMapping: "/data/lib/jenkins" 73 | instanceCap: '2' 74 | numExecutors: 1 75 | removeVolume: false 76 | pullStrategy: Pull once and update latest 77 | - name: toc-infra03 78 | serverUrl: tcp://10.202.128.114:2375 79 | containerCap: '50' 80 | connectTimeout: 5 81 | readTimeout: 15 82 | credentialsId: jenkins-slave-password 83 | templates: 84 | - image: baselibrary/jenkins-slave:1.12 85 | command: "/usr/sbin/sshd -D" 86 | volumes: "/var/run/docker.sock:/var/run/docker.sock\n /data/lib/jenkins/workspace:/data/lib/jenkins/workspace\n /tmp:/tmp /root/.m2:/root/.m2\n /root/.gradle:/root/.gradle\n /root/.npmrc:/root/.npmrc\n /root/.bowerrc:/root/.bowerrc" 87 | volumesFrom: '' 88 | environments: SSH_PASS=jenkins 89 | bindAllPorts: false 90 | privileged: false 91 | tty: false 92 | label: docker 93 | remoteFs: "/data/lib/jenkins" 94 | remoteFsMapping: "/data/lib/jenkins" 95 | instanceCap: '2' 96 | numExecutors: 1 97 | removeVolume: false 98 | pullStrategy: Pull once and update latest 99 | -------------------------------------------------------------------------------- /provision/roles/rancher-stack/files/jenkins.txt: -------------------------------------------------------------------------------- 1 | PORT=8080 2 | volume_work=/var/lib/jenkins 3 | plugins=git subversion workflow-aggregator dashboard-view cloudbees-folder token-macro simple-theme docker ldap locale swarm 4 | config={"security":{"ldap":{"server":"ldap://ldap.thoughtworks.io","rootDN":"OU=Enterprise,OU=Principal,DC=corporate,DC=thoughtworks,DC=com","userSearchBase":"OU=Employees","userSearchFilter":"sAMAccountName={0}","groupSearchBase":"OU=Groups","groupSearchFilter":"(& (cn={0}) (objectclass=group) )","groupSearchAttr":"memberOf","managerDN":"CN=Qiang Shawn Ma,OU=Xian,OU=Employees,OU=Enterprise,OU=Principal,DC=corporate,DC=thoughtworks,DC=com","managerPassword":"Ad4CwVOAzj+I+kaJkpgXZlYBncPvZkbZd9ftSffH42E="}},"credentials":[{"id":"jenkins-slave-password","description":"Jenkis Slave with Password Configuration","username":"root","password":"jenkins"}],"cloud":{"docker":[{"name":"toc-infra01","serverUrl":"tcp://10.202.128.83:2375","containerCap":"50","connectTimeout":5,"readTimeout":15,"credentialsId":"jenkins-slave-password","templates":[{"image":"baselibrary/jenkins-slave:1.12","command":"/usr/sbin/sshd -D","volumes":"/var/run/docker.sock:/var/run/docker.sock\\n /data/lib/jenkins:/data/lib/jenkins\\n /tmp:/tmp","volumesFrom":"","environments":"SSH_PASS=jenkins\\n AUTHORIZED_KEYS=ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDQ340uv5ecAnQebmh5Rz0otp1KYhnt9czUhIiOPRPyzJAhm+rh8M6zl3hn3O9ysEhIVDYU1Dl0H6ssR26uZWAMaTsMeTAjCn4IThxWWO4T5utoMVCcYDtVgbYIIrF5chKX2C7MovEUlNdzjhacfocvzYc1AqTyuNiXM9KtyN4YrxnExX6Uqg/76mJMZ7BFBVpdI2rhqj1oCRyE7zalE76JtBTj4kW2l/5dsYFVlG5EEj6WCibppGOaDxCtw46Z8WmSROF/aV3/kWXW+CVPlCha2uqgkyL6n/4AJ1mtnfnXmJc5m5oLM4EkrCTbwTwOXnWjP6nDThSWn55P0TN3PRSB qsma@thoughtworks.com","bindAllPorts":false,"privileged":false,"tty":false,"label":"docker","remoteFs":"/data/lib/jenkins","remoteFsMapping":"/data/lib/jenkins","instanceCap":"2","numExecutors":1,"removeVolume":false,"pullStrategy":"Pull once and update latest"}]},{"name":"toc-infra02","serverUrl":"tcp://10.202.128.84:2375","containerCap":"50","connectTimeout":5,"readTimeout":15,"credentialsId":"jenkins-slave-password","templates":[{"image":"baselibrary/jenkins-slave:1.12","command":"/usr/sbin/sshd -D","volumes":"/var/run/docker.sock:/var/run/docker.sock\\n /data/lib/jenkins:/data/lib/jenkins\\n /tmp:/tmp","volumesFrom":"","environments":"SSH_PASS=jenkins\\n AUTHORIZED_KEYS=ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDQ340uv5ecAnQebmh5Rz0otp1KYhnt9czUhIiOPRPyzJAhm+rh8M6zl3hn3O9ysEhIVDYU1Dl0H6ssR26uZWAMaTsMeTAjCn4IThxWWO4T5utoMVCcYDtVgbYIIrF5chKX2C7MovEUlNdzjhacfocvzYc1AqTyuNiXM9KtyN4YrxnExX6Uqg/76mJMZ7BFBVpdI2rhqj1oCRyE7zalE76JtBTj4kW2l/5dsYFVlG5EEj6WCibppGOaDxCtw46Z8WmSROF/aV3/kWXW+CVPlCha2uqgkyL6n/4AJ1mtnfnXmJc5m5oLM4EkrCTbwTwOXnWjP6nDThSWn55P0TN3PRSB qsma@thoughtworks.com","bindAllPorts":false,"privileged":false,"tty":false,"label":"docker","remoteFs":"/data/lib/jenkins","remoteFsMapping":"/data/lib/jenkins","instanceCap":"2","numExecutors":1,"removeVolume":false,"pullStrategy":"Pull once and update latest"}]},{"name":"toc-infra03","serverUrl":"tcp://10.202.128.85:2375","containerCap":"50","connectTimeout":5,"readTimeout":15,"credentialsId":"jenkins-slave-password","templates":[{"image":"baselibrary/jenkins-slave:1.12","command":"/usr/sbin/sshd -D","volumes":"/var/run/docker.sock:/var/run/docker.sock\\n /data/lib/jenkins:/data/lib/jenkins\\n /tmp:/tmp","volumesFrom":"","environments":"SSH_PASS=jenkins\\n AUTHORIZED_KEYS=ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDQ340uv5ecAnQebmh5Rz0otp1KYhnt9czUhIiOPRPyzJAhm+rh8M6zl3hn3O9ysEhIVDYU1Dl0H6ssR26uZWAMaTsMeTAjCn4IThxWWO4T5utoMVCcYDtVgbYIIrF5chKX2C7MovEUlNdzjhacfocvzYc1AqTyuNiXM9KtyN4YrxnExX6Uqg/76mJMZ7BFBVpdI2rhqj1oCRyE7zalE76JtBTj4kW2l/5dsYFVlG5EEj6WCibppGOaDxCtw46Z8WmSROF/aV3/kWXW+CVPlCha2uqgkyL6n/4AJ1mtnfnXmJc5m5oLM4EkrCTbwTwOXnWjP6nDThSWn55P0TN3PRSB qsma@thoughtworks.com","bindAllPorts":false,"privileged":false,"tty":false,"label":"docker","remoteFs":"/data/lib/jenkins","remoteFsMapping":"/data/lib/jenkins","instanceCap":"2","numExecutors":1,"removeVolume":false,"pullStrategy":"Pull once and update latest"}]},{"name":"toc-infra04","serverUrl":"tcp://10.202.128.86:2375","containerCap":"50","connectTimeout":5,"readTimeout":15,"credentialsId":"jenkins-slave-password","templates":[{"image":"baselibrary/jenkins-slave:1.12","command":"/usr/sbin/sshd -D","volumes":"/var/run/docker.sock:/var/run/docker.sock\\n /data/lib/jenkins:/data/lib/jenkins\\n /tmp:/tmp","volumesFrom":"","environments":"SSH_PASS=jenkins\\n AUTHORIZED_KEYS=ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDQ340uv5ecAnQebmh5Rz0otp1KYhnt9czUhIiOPRPyzJAhm+rh8M6zl3hn3O9ysEhIVDYU1Dl0H6ssR26uZWAMaTsMeTAjCn4IThxWWO4T5utoMVCcYDtVgbYIIrF5chKX2C7MovEUlNdzjhacfocvzYc1AqTyuNiXM9KtyN4YrxnExX6Uqg/76mJMZ7BFBVpdI2rhqj1oCRyE7zalE76JtBTj4kW2l/5dsYFVlG5EEj6WCibppGOaDxCtw46Z8WmSROF/aV3/kWXW+CVPlCha2uqgkyL6n/4AJ1mtnfnXmJc5m5oLM4EkrCTbwTwOXnWjP6nDThSWn55P0TN3PRSB qsma@thoughtworks.com","bindAllPorts":false,"privileged":false,"tty":false,"label":"docker","remoteFs":"/data/lib/jenkins","remoteFsMapping":"/data/lib/jenkins","instanceCap":"2","numExecutors":1,"removeVolume":false,"pullStrategy":"Pull once and update latest"}]}]}} 5 | -------------------------------------------------------------------------------- /provision/roles/rancher-stack/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart docker 3 | service: name=docker state=restarted -------------------------------------------------------------------------------- /provision/roles/rancher-stack/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: config rancher stack 4 | template: 5 | src: "{{ RANCHER_STACK_NAME }}.j2" 6 | dest: "/tmp/{{ RANCHER_STACK_NAME }}.txt" 7 | become: no 8 | run_once: yes 9 | delegate_to: localhost 10 | 11 | - name: check rancher stack 12 | command: "rancher --url http://{{ RANCHER_MASTER_HOST }}:{{ RANCHER_MASTER_PORT }}/v1 --access-key {{ RANCHER_APIKEY_PUBLIC }} --secret-key {{ RANCHER_APIKEY_SECRET }} --environment {{ RANCHER_PROJECT_NAME }} stacks | grep ThoughtWorks/{{ RANCHER_STACK_NAME }}" 13 | register: rancher_stack 14 | become: no 15 | run_once: yes 16 | delegate_to: localhost 17 | 18 | - name: install rancher stack 19 | command: "rancher --url http://{{ RANCHER_MASTER_HOST }}:{{ RANCHER_MASTER_PORT }}/v1 --access-key {{ RANCHER_APIKEY_PUBLIC }} --secret-key {{ RANCHER_APIKEY_SECRET }} --environment {{ RANCHER_PROJECT_NAME }} catalog install --name {{ RANCHER_STACK_NAME }} --answers /tmp/{{ RANCHER_STACK_NAME }}.txt ThoughtWorks/{{ RANCHER_STACK_NAME }}" 20 | when: rancher_stack.stdout.find(RANCHER_STACK_NAME) == -1 21 | become: no 22 | run_once: yes 23 | delegate_to: localhost 24 | -------------------------------------------------------------------------------- /provision/roles/rancher-stack/templates/jenkins.j2: -------------------------------------------------------------------------------- 1 | URL=http://127.0.0.1 2 | PORT=8080 3 | volume_driver=local 4 | volume_work=/data/lib/jenkins 5 | plugins=git subversion workflow-aggregator dashboard-view cloudbees-folder token-macro simple-theme docker ldap locale swarm 6 | config={{ rancher_jenkins_config | to_json }} 7 | -------------------------------------------------------------------------------- /provision/roles/rancher-stack/templates/jenkins/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | services: 3 | jenkins: 4 | image: baselibrary/jenkins:2 5 | ports: 6 | - 8080:8080/tcp 7 | volumes_from: 8 | - jenkins-config 9 | - jenkins-volume 10 | labels: 11 | io.rancher.sidekicks: jenkins-config,jenkins-volume 12 | io.rancher.container.hostname_override: container_name 13 | entrypoint: 14 | - /usr/share/jenkins/rancher/jenkins.sh 15 | jenkins-config: 16 | image: baselibrary/jenkins-config:1.0 17 | jenkins-volume: 18 | image: busybox 19 | volumes: 20 | - /var/lib/jenkins:/var/lib/jenkins 21 | labels: 22 | io.rancher.container.start_once: 'true' 23 | entrypoint: ["chown", "-R", "1000:1000", "/var/lib/jenkins"] 24 | -------------------------------------------------------------------------------- /provision/roles/rancher-stack/templates/portworx.j2: -------------------------------------------------------------------------------- 1 | VOLUME_DRIVER_NAME=pxd 2 | CLUSTER_ID={{ RANCHER_PROJECT_NAME }} 3 | KVDB="etcd://{% for host in groups[etcd_cluster_group] -%}{{ hostvars[host]['ansible_host'] }}:2379{% if not loop.last %},{% endif %}{% endfor %}" 4 | USE_DISKS='-s /dev/sdb' 5 | HEADER_DIR=/usr/src 6 | -------------------------------------------------------------------------------- /provision/roles/rancher/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # image 3 | rancher_name: rancher-server 4 | rancher_image: rancher/server 5 | rancher_version: v1.6.14 6 | # network 7 | rancher_port: 8080 8 | # storage 9 | rancher_data_dir: "/var/lib/rancher" 10 | 11 | rancher_catalogs: 12 | catalogs: 13 | library: 14 | url: https://git.rancher.io/rancher-catalog.git 15 | branch: v1.6-release 16 | community: 17 | url: https://git.rancher.io/community-catalog.git 18 | branch: master 19 | 20 | # json body 21 | rancher_apikeys_body: 22 | name: apikey 23 | description: infrastructure apikey 24 | accountId: 1a1 25 | publicValue: "{{ RANCHER_APIKEY_PUBLIC }}" 26 | secretValue: "{{ RANCHER_APIKEY_SECRET }}" 27 | 28 | rancher_openldap_body: 29 | enabled: true 30 | accessMode: "unrestricted" 31 | server: "{{ LDAP_HOST }}" 32 | port: "{{ LDAP_PORT }}" 33 | tls: false 34 | serviceAccountUsername: "{{ LDAP_USER }}@{{ LDAP_DOMAIN }}" 35 | serviceAccountPassword: "{{ LDAP_BIND_PASS }}" 36 | domain: "{{ LDAP_BASE_DN }}" 37 | userLoginField: "sAMAccountName" 38 | userObjectClass: "person" 39 | userNameField: "name" 40 | userSearchField: "sAMAccountName" 41 | groupObjectClass: "group" 42 | groupNameField: "name" 43 | groupSearchField: "sAMAccountName" 44 | connectionTimeout: 1000 45 | 46 | rancher_admins_body: 47 | name: "{{ LDAP_USER }}" 48 | kind: "admin" 49 | externalId: "{{ LDAP_BIND_DN }}" 50 | externalIdType: "openldap_user" 51 | 52 | rancher_catalogs_body: 53 | id: catalog.url 54 | name: catalog.url 55 | source: "Default Environment Variables" 56 | value: "{{ rancher_catalogs | to_json }}" 57 | activeValue: "{{ rancher_catalogs | to_json }}" 58 | -------------------------------------------------------------------------------- /provision/roles/rancher/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /provision/roles/rancher/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: "Shawn Ma" 4 | company: ThoughtWorks, Inc. 5 | license: BSD 6 | min_ansible_version: 2.0 7 | platforms: 8 | - name: Ubuntu 9 | versions: 10 | - trusty 11 | categories: 12 | - docker 13 | dependencies: [] 14 | -------------------------------------------------------------------------------- /provision/roles/rancher/tasks/config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: wait for the rancher server to start 3 | uri: 4 | url: "http://{{ RANCHER_MASTER_HOST }}:{{ RANCHER_MASTER_PORT }}/v1" 5 | status_code: 200,401 6 | become: no 7 | run_once: yes 8 | delegate_to: localhost 9 | register: rancher_auth 10 | until: rancher_auth.status == 200 or rancher_auth.status == 401 11 | retries: 5 12 | delay: 30 13 | 14 | - debug: msg="{{ RANCHER_APIKEY_PUBLIC }}" 15 | 16 | - debug: msg="{{ RANCHER_APIKEY_SECRET }}" 17 | 18 | - name: config apikey 19 | uri: 20 | method: POST 21 | url: "http://{{ RANCHER_MASTER_HOST }}:{{ RANCHER_MASTER_PORT }}/v1/apikeys" 22 | body: "{{ rancher_apikeys_body | to_json }}" 23 | body_format: json 24 | status_code: 201 25 | become: no 26 | run_once: yes 27 | delegate_to: localhost 28 | ignore_errors: true 29 | when: rancher_auth.status == 200 30 | delay: 10 31 | 32 | - name: config ldap authentication 33 | uri: 34 | method: POST 35 | url: "http://{{ RANCHER_MASTER_HOST }}:{{ RANCHER_MASTER_PORT }}/v1/openldapconfigs" 36 | user: "{{ RANCHER_APIKEY_PUBLIC }}" 37 | password: "{{ RANCHER_APIKEY_SECRET }}" 38 | body: "{{ rancher_openldap_body | to_json }}" 39 | body_format: json 40 | status_code: 201 41 | become: no 42 | run_once: yes 43 | delegate_to: localhost 44 | when: rancher_auth.status == 200 45 | delay: 10 46 | 47 | - name: config admin user 48 | uri: 49 | method: PUT 50 | url: "http://{{ RANCHER_MASTER_HOST }}:{{ RANCHER_MASTER_PORT }}/v1/accounts/1a1" 51 | user: "{{ RANCHER_APIKEY_PUBLIC }}" 52 | password: "{{ RANCHER_APIKEY_SECRET }}" 53 | body: "{{ rancher_admins_body | to_json }}" 54 | body_format: json 55 | status_code: 200 56 | become: no 57 | run_once: yes 58 | delegate_to: localhost 59 | when: rancher_auth.status == 200 60 | delay: 10 61 | 62 | #- name: config catalogs 63 | # uri: 64 | # method: PUT 65 | # url: "http://{{ RANCHER_MASTER_HOST }}:{{ RANCHER_MASTER_PORT }}/v1/settings/catalog.url" 66 | # user: "{{ RANCHER_APIKEY_PUBLIC }}" 67 | # password: "{{ RANCHER_APIKEY_SECRET }}" 68 | # body: "{{ rancher_catalogs_body | to_json }}" 69 | # body_format: json 70 | # status_code: 200 71 | # become: no 72 | # run_once: yes 73 | # delegate_to: localhost 74 | # delay: 10 75 | -------------------------------------------------------------------------------- /provision/roles/rancher/tasks/install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: create mysql user 3 | mysql_user: 4 | name: "{{ RANCHER_MYSQL_USER }}" 5 | password: "{{ RANCHER_MYSQL_PASS }}" 6 | priv: "*.*:ALL" 7 | host_all: yes 8 | login_host: "{{ RANCHER_MYSQL_HOST }}" 9 | login_port: "{{ RANCHER_MYSQL_PORT }}" 10 | login_user: "{{ MYSQL_ROOT_USER }}" 11 | login_password: "{{ MYSQL_ROOT_PASS }}" 12 | state: present 13 | become: no 14 | run_once: yes 15 | delegate_to: localhost 16 | 17 | - name: create mysql database 18 | mysql_db: 19 | name: "{{ RANCHER_MYSQL_DATABASE }}" 20 | login_host: "{{ RANCHER_MYSQL_HOST }}" 21 | login_port: "{{ RANCHER_MYSQL_PORT }}" 22 | login_user: "{{ MYSQL_ROOT_USER }}" 23 | login_password: "{{ MYSQL_ROOT_PASS }}" 24 | state: present 25 | become: no 26 | run_once: yes 27 | delegate_to: localhost 28 | 29 | - name: start rancher server container 30 | docker_container: 31 | name: "{{ rancher_name }}" 32 | image: "{{ rancher_image }}:{{ rancher_version }}" 33 | ports: 34 | - "8080:8080" 35 | - "9345:9345" 36 | command: "--db-host {{ RANCHER_MYSQL_HOST }} --db-port {{ RANCHER_MYSQL_PORT }} --db-user {{ RANCHER_MYSQL_USER }} --db-pass {{ RANCHER_MYSQL_PASS }} --db-name {{ RANCHER_MYSQL_DATABASE }} --advertise-address {{ ansible_host }}" 37 | restart_policy: unless-stopped 38 | become: yes 39 | 40 | #- name: wait for the rancher server to start 41 | # wait_for: 42 | # host: localhost 43 | # port: 8080 44 | # delay: 30 45 | -------------------------------------------------------------------------------- /provision/roles/rancher/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include: install.yml 3 | tags: 4 | - development 5 | - rancher 6 | - install 7 | 8 | - include: config.yml 9 | tags: 10 | - development 11 | - rancher 12 | - config 13 | 14 | -------------------------------------------------------------------------------- /provision/roles/rancher/templates/apikey.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "name": "apikey", 3 | "description": "infrastructure apikey", 4 | "accountId": "1a1", 5 | "publicValue": "{{ RANCHER_APIKEY_PUBLIC }}", 6 | "secretValue": "{{ RANCHER_APIKEY_SECRET }}" 7 | } -------------------------------------------------------------------------------- /provision/roles/rancher/templates/catalog.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "id": "catalog.url", 3 | "name": "catalog.url", 4 | "source": "Database", 5 | "value": "{{ rancher_catalogs | to_nice_json }}", 6 | "activeValue": "{{ rancher_catalogs | to_nice_json }}" 7 | } 8 | -------------------------------------------------------------------------------- /provision/roles/rancher/templates/openldap.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "accessMode": "unrestricted", 3 | "server": "{{ LDAP_HOST }}", 4 | "port": {{ LDAP_PORT }}, 5 | "tls": false, 6 | "enabled": true, 7 | "serviceAccountUsername": "{{ LDAP_USER }}@{{ LDAP_DOMAIN }}", 8 | "serviceAccountPassword": "{{ LDAP_BIND_PASS }}", 9 | "domain": "{{ LDAP_BASE_DN }}", 10 | "userLoginField": "sAMAccountName", 11 | "userObjectClass": "person", 12 | "userNameField": "name", 13 | "userSearchField": "sAMAccountName", 14 | "groupObjectClass": "group", 15 | "groupNameField": "name", 16 | "groupSearchField": "sAMAccountName", 17 | "connectionTimeout": 1000 18 | } -------------------------------------------------------------------------------- /provision/roles/rancher/templates/user.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "name": "{{ LDAP_USER }}", 3 | "kind": "admin", 4 | "externalId": "{{ LDAP_BIND_DN }}", 5 | "externalIdType": "openldap_user" 6 | } -------------------------------------------------------------------------------- /provision/roles/vault/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # image 3 | vault_name: vault-server 4 | vault_image: vault 5 | vault_version: 0.6.3 6 | 7 | # network 8 | vault_port: 8200 9 | 10 | vault_data_dir: "/var/lib/vault" 11 | -------------------------------------------------------------------------------- /provision/roles/vault/files/vault.sh: -------------------------------------------------------------------------------- 1 | # Store Secrets using Hashicorp Vault 2 | 3 | # Learn how to store and manage secrets using Hashicorp Vault 4 | 5 | # Step 1 - Configuration 6 | 7 | cat vault.hcl 8 | backend "consul" { 9 | address = "consul:8500" 10 | advertise_addr = "consul:8300" 11 | scheme = "http" 12 | } 13 | listener "tcp" { 14 | address = "0.0.0.0:8200" 15 | tls_disable = 1 16 | } 17 | disable_mlock = true 18 | 19 | # Create Data Container 20 | # To store the configuration we'll create a container. This will be used by Vault and Consul to read the required configuration files. 21 | 22 | docker create -v /config --name config busybox; docker cp vault.hcl config:/config/; 23 | 24 | # Step 2 - Launch 25 | # With the configuration data container created we can launch the required processes to start Vault. 26 | # Launch Services 27 | 28 | docker run -d --name consul \ 29 | -p 8500:8500 \ 30 | consul:v0.6.4 \ 31 | agent -dev -client=0.0.0.0 32 | 33 | # Our Vault instance can now use Consul to store the data. All data stored within Consul will be encrypted. 34 | 35 | docker run -d --name vault-dev \ 36 | --link consul:consul \ 37 | -p 8200:8200 \ 38 | --volumes-from config \ 39 | cgswong/vault:latest server -config=/config/vault.hcl 40 | 41 | # Step 3 - Initialise 42 | # With a vault instance running we can now configure our environment and initialise the Vault. 43 | # Configure Environment 44 | 45 | alias vault='docker exec -it vault-dev vault "$@"' 46 | export VAULT_ADDR=http://127.0.0.1:8200 47 | 48 | # Initialise Vault 49 | # With the alias in place, we can make calls to the CLI. The first step is to initialise the vault using the init command. 50 | 51 | vault init -address=${VAULT_ADDR} > keys.txt 52 | cat keys.txt 53 | 54 | # Step 4 - Unseal Vault 55 | 56 | vault unseal -address=${VAULT_ADDR} $(grep 'Key 1:' keys.txt | awk '{print $NF}') 57 | vault unseal -address=${VAULT_ADDR} $(grep 'Key 2:' keys.txt | awk '{print $NF}') 58 | vault unseal -address=${VAULT_ADDR} $(grep 'Key 3:' keys.txt | awk '{print $NF}') 59 | vault status -address=${VAULT_ADDR} 60 | 61 | # Step 5 - Vault Tokens 62 | # You can use this token to login to vault. 63 | 64 | export VAULT_TOKEN=$(grep 'Initial Root Token:' keys.txt | awk '{print substr($NF, 1, length($NF)-1)}') 65 | vault auth -address=${VAULT_ADDR} ${VAULT_TOKEN} 66 | 67 | # Step 6 - Read/Write Data 68 | # Save Data 69 | # To store data, we use the write CLI command. In this case, we have a key named secret/api-key with the value 12345678 70 | 71 | vault write -address=${VAULT_ADDR} \ 72 | secret/api-key value=12345678 73 | 74 | # Read Data 75 | # Reading the key will output the value, along with other information such as the lease duration. 76 | vault read -address=${VAULT_ADDR} \ 77 | secret/api-key 78 | 79 | # You can also use the -field flag to extract the value from the secret data. 80 | 81 | vault read -address=${VAULT_ADDR} \ 82 | -field=value secret/api-key 83 | 84 | # Step 7 - HTTP API 85 | # Using the command like tool jq we can parse the data and extract the value for our key. 86 | 87 | curl -H "X-Vault-Token:$VAULT_TOKEN" \ 88 | -XGET http://docker:8200/v1/secret/api-key 89 | 90 | curl -s -H "X-Vault-Token:$VAULT_TOKEN" \ 91 | -XGET http://docker:8200/v1/secret/api-key \ 92 | | jq -r .data.value 93 | 94 | # Step 8 - Consul Data 95 | # As Vault stores all the data as encrypted key/values in Consul you can use the Consul UI to see the encrypted data. 96 | -------------------------------------------------------------------------------- /provision/roles/vault/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /provision/roles/vault/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: "Shawn Ma" 4 | company: ThoughtWorks, Inc. 5 | license: BSD 6 | min_ansible_version: 2.0 7 | platforms: 8 | - name: Ubuntu 9 | versions: 10 | - trusty 11 | categories: 12 | - docker 13 | dependencies: [] 14 | -------------------------------------------------------------------------------- /provision/roles/vault/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: start vault server container 3 | docker_container: 4 | name: "{{ vault_name }}" 5 | image: "{{ vault_image }}:{{ vault_version }}" 6 | ports: 7 | - "{{ vault_port }}:8200" 8 | volumes: 9 | - "{{ vault_data_dir }}/conf:/vault/config" 10 | - "{{ vault_data_dir }}/logs:/vault/logs" 11 | - "{{ vault_data_dir }}/file:/vault/file" 12 | capabilities: 13 | - "IPC_LOCK" 14 | env: 15 | VAULT_LOCAL_CONFIG: '{"backend": {"file": {"path": "/vault/file"}},"listener": {"tcp":{"address": "0.0.0.0:8200","tls_disable":1}},"default_lease_ttl": "168h", "max_lease_ttl": "720h"}' 16 | command: "vault server -config=/vault/config" 17 | restart_policy: unless-stopped 18 | become: yes 19 | 20 | - name: wait for the vault server to start 21 | wait_for: 22 | port: "{{ vault_port }}" 23 | delay: 10 24 | 25 | - stat: 26 | path: "{{ vault_data_dir }}/keys" 27 | register: vault_keys 28 | become: yes 29 | 30 | - name: vault init 31 | shell: "docker exec {{ vault_name }} vault init -address=http://127.0.0.1:8200 > {{ vault_data_dir }}/keys" 32 | when: vault_keys.stat.exists == false 33 | become: yes 34 | 35 | - name: vault unseal 36 | shell: "docker exec {{ vault_name }} vault unseal -address=http://127.0.0.1:8200 $(grep 'Key {{ item }}:' {{ vault_data_dir }}/keys | awk '{print $NF}')" 37 | with_items: [1, 2, 3] 38 | when: vault_keys.stat.exists == false 39 | become: yes 40 | 41 | - name: vault root token 42 | shell: "grep 'Initial Root Token:' {{ vault_data_dir }}/keys | awk '{print $NF}'" 43 | register: vault_token 44 | become: yes 45 | 46 | - name: generate apikey configuration 47 | template: src=files/vault.j2 dest="{{ inventory_dir }}/group_vars/all/vault.yml" 48 | delegate_to: localhost 49 | -------------------------------------------------------------------------------- /provision/scaleworks: -------------------------------------------------------------------------------- 1 | [all] 2 | toc-common01 ansible_host=10.202.128.107 ansible_user=root ansible_ssh_private_key_file=~/.ssh/id_rsa 3 | toc-repo01 ansible_host=10.202.128.108 ansible_user=scaleworks ansible_ssh_private_key_file=~/.ssh/id_rsa 4 | toc-master01 ansible_host=10.202.128.109 ansible_user=root ansible_ssh_private_key_file=~/.ssh/id_rsa 5 | toc-master02 ansible_host=10.202.128.110 ansible_user=root ansible_ssh_private_key_file=~/.ssh/id_rsa 6 | toc-master03 ansible_host=10.202.128.111 ansible_user=root ansible_ssh_private_key_file=~/.ssh/id_rsa 7 | toc-infra01 ansible_host=10.202.128.112 ansible_user=root ansible_ssh_private_key_file=~/.ssh/id_rsa 8 | toc-infra02 ansible_host=10.202.128.113 ansible_user=root ansible_ssh_private_key_file=~/.ssh/id_rsa 9 | toc-infra03 ansible_host=10.202.128.114 ansible_user=root ansible_ssh_private_key_file=~/.ssh/id_rsa 10 | toc-kube01 ansible_host=10.202.128.115 ansible_user=root ansible_ssh_private_key_file=~/.ssh/id_rsa 11 | toc-kube02 ansible_host=10.202.128.116 ansible_user=root ansible_ssh_private_key_file=~/.ssh/id_rsa 12 | toc-kube03 ansible_host=10.202.128.117 ansible_user=root ansible_ssh_private_key_file=~/.ssh/id_rsa 13 | toc-deliflow01 ansible_host=10.202.128.119 ansible_user=root ansible_ssh_private_key_file=~/.ssh/id_rsa 14 | toc-deliflow02 ansible_host=10.202.128.120 ansible_user=root ansible_ssh_private_key_file=~/.ssh/id_rsa 15 | toc-deliflow03 ansible_host=10.202.128.121 ansible_user=root ansible_ssh_private_key_file=~/.ssh/id_rsa 16 | toc-demo01 ansible_host=10.202.128.122 ansible_user=root ansible_ssh_private_key_file=~/.ssh/id_rsa 17 | 18 | [common] 19 | toc-common01 20 | 21 | [repository] 22 | toc-repo01 23 | 24 | [masters] 25 | toc-master01 26 | toc-master02 27 | toc-master03 28 | 29 | [infra] 30 | toc-infra01 31 | toc-infra02 32 | toc-infra03 33 | 34 | [kube] 35 | toc-kube01 36 | toc-kube02 37 | toc-kube03 38 | 39 | 40 | [deliflow] 41 | toc-deliflow01 42 | toc-deliflow02 43 | toc-deliflow03 44 | 45 | [demo] 46 | toc-demo01 47 | -------------------------------------------------------------------------------- /provision/scaleworks.retry: -------------------------------------------------------------------------------- 1 | toc-master01 2 | -------------------------------------------------------------------------------- /provision/test.yml: -------------------------------------------------------------------------------- 1 | # ansible-playbook -i provision/scaleworks provision/test.yml 2 | --- 3 | - hosts: localhost 4 | tasks: 5 | - rancher_apikey: 6 | url: 'http://10.202.128.79:8080/v1/' 7 | access_key: '2A984C1F5D0EE3D204BE' 8 | secret_key: 'TfAUTVH7xsrKioHkAokYvx1yAEk57Fyepa6d5G5r' 9 | name: aaaaa 10 | account: 1a1 11 | publicValue: bbbb 12 | secretValue: sadasd 13 | state: absent 14 | -------------------------------------------------------------------------------- /thoughtworks.csr: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE REQUEST----- 2 | MIIC8DCCAdgCAQAwgZExCzAJBgNVBAYTAkNOMRAwDgYDVQQIEwdTSEFOTlhJMQ4w 3 | DAYDVQQHEwVYSSdBTjEVMBMGA1UEChMMVGhvdWdodFdvcmtzMQwwCgYDVQQLEwNU 4 | T0MxGDAWBgNVBAMTD3Rob3VnaHR3b3Jrcy5pbzEhMB8GCSqGSIb3DQEJARYSZmx5 5 | MndpbmRAZ21haWwuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA 6 | 0r2T4/BcqmepLczNlgFsbAX2juS+eZ4tdenyfF2s8QwrIipO9wrlLhGJCy7mRjIP 7 | yf1msPTWLpoy5WxPVY5IbI3Iz06AYNRaBejrdsWu7CbfbMbG6nQJIAXyr8iTzhMD 8 | TmaJvG3of8Tp8luw6SliFTnLs6YtUwe/AMmrIAqL5XhVSd6bZd7F121J3Q0t8RHm 9 | VlBtlgw2TQtRPVeY1t7jZ6x2NFxFNPlLVNv+UOByXSXPYAL+O/Dcb8e7jH4uEKas 10 | 0mg7+g8vb03vVlD85Crf3zuARC+mc9RrBqM9R8SDPmiKHCbgwBSTX1t+0dzxQRjB 11 | kdljL6Hzshw6AZRnamvCKQIDAQABoBkwFwYJKoZIhvcNAQkHMQoTCDEyMzQ1Njc4 12 | MA0GCSqGSIb3DQEBBQUAA4IBAQBRmaF42BT5LJ6WB4jh+IQTf4TxElc74Tunf9Cg 13 | 4cATtmJoJzuVBB6hKmQyjssgwDZ7d8byjwlr6QxU/cWPe4l0do1uG7W1fGRlaigm 14 | BZ8G/WngOkynVeHTbp/A+DzNIGN1l5pxN3fnEshwbNJt2nr9flHEwr5UjamRUrD6 15 | RkmypveWMOol12VPXNoEV2KDWN18SCSWoPtrf9HznsdAucbqG7NAxRqBh0csz8r0 16 | 3g11n0OkZHB3HiHCknTDObFpxy7UiIZpFpUHNbeh6RyNfUp/oTubLJ+0Z3ySsRHS 17 | JaXTmvGNUSgIWWLQ+NBb89+Y5n6LRO+Pz+s40gDXSkTHSPdA 18 | -----END CERTIFICATE REQUEST----- 19 | --------------------------------------------------------------------------------