├── .gitignore
├── Jenkinsfile
├── Jenkinsfile-onmerge
├── LICENSE
├── Makefile
├── README.md
├── Vagrantfile
├── ansible.cfg
├── aws.tf
├── cleanup.yml
├── fetch
└── .keep
├── group_vars
├── all
└── examples
│ └── volplugin
├── install_auth_proxy.yml
├── install_base.yml
├── install_contiv.yml
├── install_docker.yml
├── install_etcd.yml
├── install_scheduler.yml
├── library
└── ceph_facts
├── roles
├── ansible
│ └── tasks
│ │ └── main.yml
├── auth_proxy
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ ├── auth-proxy.service
│ │ ├── contivRule.service
│ │ └── contivRule.sh
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ ├── cleanup.yml
│ │ └── main.yml
│ ├── templates
│ │ └── auth_proxy.j2
│ ├── tests
│ │ ├── inventory
│ │ └── test.yml
│ └── vars
│ │ └── main.yml
├── base
│ ├── files
│ │ ├── consul
│ │ ├── consul.service
│ │ └── volmaster.service
│ └── tasks
│ │ ├── main.yml
│ │ ├── redhat_tasks.yml
│ │ └── ubuntu_tasks.yml
├── bind
│ ├── files
│ │ └── resolv.conf
│ └── tasks
│ │ └── main.yml
├── ceph-common
│ ├── LICENSE
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ ├── cephdev.asc
│ │ ├── cephstable.asc
│ │ └── cephstableice.asc
│ ├── handlers
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ ├── checks
│ │ │ ├── check_mandatory_vars.yml
│ │ │ └── check_system.yml
│ │ ├── installs
│ │ │ ├── debian_ceph_repository.yml
│ │ │ ├── install_on_debian.yml
│ │ │ ├── install_on_redhat.yml
│ │ │ ├── install_rgw_on_debian.yml
│ │ │ ├── install_rgw_on_redhat.yml
│ │ │ └── redhat_ceph_repository.yml
│ │ ├── main.yml
│ │ ├── misc
│ │ │ └── system_tuning.yml
│ │ └── pre_requisites
│ │ │ ├── prerequisite_ice.yml
│ │ │ ├── prerequisite_rh_storage_cdn_install.yml
│ │ │ └── prerequisite_rh_storage_iso_install.yml
│ └── templates
│ │ ├── ceph-extra.repo
│ │ ├── ceph.conf.j2
│ │ ├── httpd.conf
│ │ ├── redhat_ice_repo.j2
│ │ ├── redhat_storage_repo.j2
│ │ ├── rgw.conf
│ │ └── s3gw.fcgi.j2
├── ceph-install
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ ├── cephdev.asc
│ │ ├── cephstable.asc
│ │ └── cephstableice.asc
│ └── tasks
│ │ ├── installs
│ │ ├── debian_ceph_repository.yml
│ │ ├── install_on_debian.yml
│ │ ├── install_on_redhat.yml
│ │ ├── install_rgw_on_debian.yml
│ │ ├── install_rgw_on_redhat.yml
│ │ └── redhat_ceph_repository.yml
│ │ └── main.yml
├── ceph-mds
│ ├── LICENSE
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ └── tasks
│ │ ├── docker
│ │ ├── checks.yml
│ │ ├── fetch_configs.yml
│ │ ├── main.yml
│ │ ├── pre_requisite.yml
│ │ ├── selinux.yml
│ │ └── start_docker_mds.yml
│ │ ├── main.yml
│ │ └── pre_requisite.yml
├── ceph-mon
│ ├── LICENSE
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ └── precise
│ │ │ └── 92-ceph
│ ├── meta
│ │ └── main.yml
│ └── tasks
│ │ ├── ceph_keys.yml
│ │ ├── create_mds_filesystems.yml
│ │ ├── deploy_monitors.yml
│ │ ├── docker
│ │ ├── checks.yml
│ │ ├── copy_configs.yml
│ │ ├── fetch_configs.yml
│ │ ├── main.yml
│ │ ├── pre_requisite.yml
│ │ ├── selinux.yml
│ │ └── start_docker_monitor.yml
│ │ ├── main.yml
│ │ ├── openstack_config.yml
│ │ ├── secure_cluster.yml
│ │ └── start_monitor.yml
├── ceph-osd
│ ├── LICENSE
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ ├── activate_osds.yml
│ │ ├── check_devices.yml
│ │ ├── docker
│ │ │ ├── checks.yml
│ │ │ ├── fetch_configs.yml
│ │ │ ├── main.yml
│ │ │ ├── pre_requisite.yml
│ │ │ ├── selinux.yml
│ │ │ └── start_docker_osd.yml
│ │ ├── main.yml
│ │ ├── osd_fragment.yml
│ │ ├── pre_requisite.yml
│ │ └── scenarios
│ │ │ ├── journal_collocation.yml
│ │ │ ├── osd_directory.yml
│ │ │ └── raw_multi_journal.yml
│ └── templates
│ │ └── osd.conf.j2
├── ceph-restapi
│ ├── LICENSE
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ └── tasks
│ │ ├── docker
│ │ ├── fetch_configs.yml
│ │ ├── main.yml
│ │ ├── pre_requisite.yml
│ │ └── start_docker_restapi.yml
│ │ ├── main.yml
│ │ ├── pre_requisite.yml
│ │ └── start_restapi.yml
├── ceph-rgw
│ ├── LICENSE
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ ├── docker
│ │ │ ├── checks.yml
│ │ │ ├── fetch_configs.yml
│ │ │ ├── main.yml
│ │ │ ├── pre_requisite.yml
│ │ │ ├── selinux.yml
│ │ │ └── start_docker_rgw.yml
│ │ ├── main.yml
│ │ ├── openstack-keystone.yml
│ │ ├── pre_requisite.yml
│ │ └── start_radosgw.yml
│ └── templates
│ │ └── ceph.j2
├── consul
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ └── consul.service
│ ├── tasks
│ │ ├── cleanup.yml
│ │ └── main.yml
│ └── templates
│ │ └── consul.j2
├── contiv_cluster
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ ├── clusterm.args
│ │ ├── clusterm.conf
│ │ └── collins.service
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ ├── cleanup.yml
│ │ └── main.yml
│ └── templates
│ │ ├── clusterm.j2
│ │ └── collins.j2
├── contiv_network
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ ├── aci-gw.service
│ │ ├── contiv_cni.conf
│ │ ├── netmaster.service
│ │ ├── netplugin.service
│ │ ├── v2plugin.service
│ │ └── v2plugin.sh
│ ├── tasks
│ │ ├── aci_tasks.yml
│ │ ├── cleanup.yml
│ │ ├── k8s_tasks.yml
│ │ ├── main.yml
│ │ ├── ovs.yml
│ │ ├── ovs_cleanup.yml
│ │ ├── services.yml
│ │ ├── v2plugin.yml
│ │ └── v2plugin_local_install.yml
│ └── templates
│ │ ├── aci_gw.j2
│ │ ├── netmaster.j2
│ │ ├── netplugin.j2
│ │ └── netplugin_k8s_config.j2
├── contiv_storage
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ ├── volmaster
│ │ ├── volmaster.service
│ │ ├── volplugin.service
│ │ ├── volsupervisor
│ │ └── volsupervisor.service
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ ├── cleanup.yml
│ │ └── main.yml
│ └── templates
│ │ └── volplugin.j2
├── dev
│ ├── meta
│ │ └── main.yml
│ └── tasks
│ │ ├── main.yml
│ │ ├── os_agnostic_tasks.yml
│ │ ├── redhat_tasks.yml
│ │ └── ubuntu_tasks.yml
├── docker
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ ├── cleanup.yml
│ │ ├── create_docker_device.yml
│ │ ├── main.yml
│ │ ├── redhat_install_tasks.yml
│ │ └── ubuntu_install_tasks.yml
│ ├── templates
│ │ ├── docker-svc.j2
│ │ └── env.conf.j2
│ └── vars
│ │ └── main.yml
├── etcd
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── molecule-requirements.txt
│ ├── molecule
│ │ └── default
│ │ │ ├── create.yml
│ │ │ ├── destroy.yml
│ │ │ ├── molecule.yml
│ │ │ ├── playbook.yml
│ │ │ └── prepare.yml
│ ├── tasks
│ │ ├── cleanup.yml
│ │ └── main.yml
│ └── templates
│ │ ├── etcd.service.j2
│ │ └── etcd_env_file.j2
├── gluster
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ ├── install_gluster.yml
│ │ └── main.yml
│ └── templates
│ │ └── peers.sh.j2
├── haproxy
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ └── precise
│ │ │ └── haproxy
│ ├── handlers
│ │ ├── main.yml
│ │ └── precise.yml
│ ├── tasks
│ │ ├── main.yml
│ │ └── precise.yml
│ └── templates
│ │ └── precise
│ │ └── haproxy.cfg
├── kubernetes
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ ├── certs
│ │ │ ├── basic_auth.csv
│ │ │ ├── ca.crt
│ │ │ ├── known_tokens.csv
│ │ │ ├── kubecfg.crt
│ │ │ ├── kubecfg.key
│ │ │ ├── server.cert
│ │ │ └── server.key
│ │ └── kubernetes.service
│ ├── handlers
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ ├── cleanup.yml
│ │ └── main.yml
│ └── templates
│ │ ├── kubernetes.j2
│ │ ├── master_manifest.j2
│ │ └── worker_manifest.j2
├── nfs
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ ├── cleanup.yml
│ │ ├── client.yml
│ │ ├── main.yml
│ │ └── server.yml
├── scheduler_stack
│ ├── defaults
│ │ └── main.yml
│ └── meta
│ │ └── main.yml
├── serf
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ └── serf.service
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ └── serf.j2
├── swarm
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ └── swarm.service
│ ├── tasks
│ │ ├── cleanup.yml
│ │ └── main.yml
│ └── templates
│ │ └── swarm.j2
├── test
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ ├── main.yml
│ │ ├── os_agnostic_tasks.yml
│ │ ├── redhat_tasks.yml
│ │ └── ubuntu_tasks.yml
│ └── templates
│ │ └── vbox.service.j2
├── ucarp
│ ├── files
│ │ ├── ucarp.service
│ │ └── ucarp
│ │ │ ├── vip_down.sh
│ │ │ └── vip_up.sh
│ ├── tasks
│ │ ├── cleanup.yml
│ │ ├── install_ucarp.yml
│ │ └── main.yml
│ └── templates
│ │ └── ucarp.sh.j2
├── ucp
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ └── ucp.service
│ ├── tasks
│ │ ├── cleanup.yml
│ │ └── main.yml
│ └── templates
│ │ └── ucp.j2
└── vagrant
│ ├── tasks
│ └── main.yml
│ └── templates
│ ├── bash_profile.j2
│ └── bashrc.j2
├── site.yml
├── test
└── files
│ └── insecure_private_key
├── uninstall_auth_proxy.yml
├── uninstall_contiv.yml
├── uninstall_docker.yml
├── uninstall_etcd.yml
├── uninstall_scheduler.yml
├── uninstall_v2plugin.yml
└── vcenter.tf
/.gitignore:
--------------------------------------------------------------------------------
1 | #ide files
2 | *.swp
3 |
4 | # ansible fetch dir
5 | fetch/*
6 |
7 | #vagrant temp files
8 | .vagrant*
9 |
10 | # Molecule runtime data
11 | .molecule
12 |
13 | # Any virtual environments
14 | venv
15 |
16 | terraform.tfstate*
17 | terraform.tfvars
18 |
--------------------------------------------------------------------------------
/Jenkinsfile:
--------------------------------------------------------------------------------
1 | #!groovy
2 | pipeline {
3 | agent { label 'public' }
4 | options {
5 | timeout(time: 30, unit: 'MINUTES')
6 | }
7 | stages {
8 | stage('Test first time config') {
9 | steps {
10 | sh '''
11 | set -euo pipefail
12 | make test-up
13 | '''
14 | }
15 | }
16 | stage('Test second time provisioning') {
17 | steps {
18 | sh '''
19 | set -euo pipefail
20 | make test-provision
21 | '''
22 | }
23 | }
24 | stage('Test cleanup') {
25 | steps {
26 | sh '''
27 | set -euo pipefail
28 | make test-cleanup
29 | '''
30 | }
31 | }
32 | }
33 | post {
34 | always {
35 | sh '''
36 | set -euo pipefail
37 | vagrant destroy -f
38 | '''
39 | }
40 | }
41 | }
42 |
--------------------------------------------------------------------------------
/Jenkinsfile-onmerge:
--------------------------------------------------------------------------------
1 | Jenkinsfile
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright 2017 Cisco Systems Inc. All rights reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License");
4 | you may not use this file except in compliance with the License.
5 | You may obtain a copy of the License at
6 | http://www.apache.org/licenses/LICENSE-2.0
7 |
8 | Unless required by applicable law or agreed to in writing, software
9 | distributed under the License is distributed on an "AS IS" BASIS,
10 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 | See the License for the specific language governing permissions and
12 | limitations under the License.
13 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | .PHONY: test-up test-provision test-cleanup
2 |
3 | test-up:
4 | vagrant up
5 |
6 | test-provision:
7 | vagrant provision
8 |
9 | test-cleanup:
10 | CONTIV_ANSIBLE_PLAYBOOK="./cleanup.yml" CONTIV_ANSIBLE_TAGS="all" vagrant provision
11 |
12 | test-test:
13 | CONTIV_ANSIBLE_TAGS="prebake-for-test" vagrant provision
14 |
15 | test-etcd:
16 | cd roles/etcd && virtualenv venv && . venv/bin/activate \
17 | && pip install --upgrade pip \
18 | && pip install -r molecule-requirements.txt \
19 | && molecule converge && molecule destroy \
20 | || (molecule destroy && exit 1)
21 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [](http://contiv.ngrok.io/view/Ansible%20CI/job/Ansible%20Push%20Build%20Master/)
2 |
3 | # Ansible Playbooks
4 |
5 | These are ansible playbooks we use for configuration management in a Contiv service cluster.
6 |
7 | This project is used by vendoring into other repositories.
8 |
9 | Following projects are using this work:
10 |
11 | - **[contiv/build](https://github.com/contiv/build)** : uses it to generate vagrant boxes using packer
12 | - **[contiv/lab](https://github.com/contiv/lab)** : uses it to configure dev and test host environments
13 | - **[contiv/volplugin](https://github.com/contiv/volplugin)**: uses it to provision test vm environment
14 | - **[contiv/cluster](https://github.com/contiv/cluster)** : uses it to manage the node commission/decommission workflow in a contiv cluster
15 |
--------------------------------------------------------------------------------
/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | callback_whitelist = profile_tasks
3 |
--------------------------------------------------------------------------------
/cleanup.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This playbook performs service cleanup for contiv services.
3 | #
4 | # Note: cleanup is not expected to fail, so we set ignore_errors to yes here
5 |
6 | - hosts: all
7 | become: true
8 | ignore_errors: yes
9 | tasks:
10 | - include_vars: roles/{{ item }}/defaults/main.yml
11 | with_items:
12 | - "contiv_network"
13 | - "contiv_storage"
14 | - "swarm"
15 | - "kubernetes"
16 | - "ucp"
17 | - "docker"
18 | - "etcd"
19 | - include: roles/{{ item }}/tasks/cleanup.yml
20 | with_items:
21 | - ucarp
22 | - contiv_network
23 | - contiv_storage
24 | - swarm
25 | - kubernetes
26 | - ucp
27 | - etcd
28 | - nfs
29 | - docker
30 | static: no
31 | ignore_errors: yes
32 |
--------------------------------------------------------------------------------
/fetch/.keep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/contiv/ansible/5c5b9a0caa5691da4a1f1e463bde553ecf890ade/fetch/.keep
--------------------------------------------------------------------------------
/group_vars/all:
--------------------------------------------------------------------------------
1 | ---
2 | # Variables here are applicable to all host groups
3 |
4 | # ceph specific variables
5 | ceph_stable: true # use ceph stable branch
6 | cephx: true
7 | cephx_require_signatures: false # Kernel RBD does NOT support signatures!
8 | cephx_cluster_require_signatures: true
9 | cephx_service_require_signatures: false
10 | crush_location: false
11 | osd_crush_location: "'root={{ ceph_crush_root }} rack={{ ceph_crush_rack }} host={{ ansible_hostname }}'"
12 | monitor_interface: "{{ control_interface }}"
13 | ceph_stable_ice_temp_path: /opt/ICE/ceph-repo/
14 | ceph_stable_ice_kmod: ""
15 |
16 | #host variables
17 | node_name: "{{ inventory_hostname }}"
18 | node_addr: "{{ hostvars[inventory_hostname]['ansible_' + control_interface]['ipv4']['address'] }}"
19 | validate_certs: "yes"
20 | service_vip: "{{ netmaster_ip }}"
21 |
22 | # following variables are used in one or more roles, but have no good default value to pick from.
23 | # Leaving them as commented so that playbooks can fail early due to variable not defined error.
24 |
25 | # env:
26 | # control_interface:
27 |
28 | ## the comparison this way allows us to override ucarp when netmaster_ip is
29 | ## non-nil. If it is nil, service_ip should be set to something unique.
30 | # netmaster_ip:
31 |
32 | host_capability: "can-run-user-containers, storage"
33 |
34 | etcd_peer_advertise_interface: "{{ control_interface }}"
35 |
--------------------------------------------------------------------------------
/install_auth_proxy.yml:
--------------------------------------------------------------------------------
1 | # netplugin-master hosts set up the proxy
2 | - name: Install auth proxy
3 | hosts: netplugin-master
4 | become: true
5 | roles:
6 | - { role: auth_proxy }
7 |
--------------------------------------------------------------------------------
/install_base.yml:
--------------------------------------------------------------------------------
1 | # netplugin-node hosts set up netmast/netplugin in a cluster
2 | - name: Install Base
3 | hosts: netplugin-node
4 | become: true
5 | environment: '{{ env }}'
6 | roles:
7 | - { role: base }
8 |
--------------------------------------------------------------------------------
/install_contiv.yml:
--------------------------------------------------------------------------------
1 | # netplugin-master hosts set up netmaster and rest as netplugin
2 | - name: Install Contiv to Master
3 | hosts: netplugin-master
4 | become: true
5 | environment: '{{ env }}'
6 | roles:
7 | - { role: contiv_network, scheduler_provider: native-swarm, run_as: master }
8 | - name: Install Contiv to Workers
9 | hosts: netplugin-worker
10 | become: true
11 | environment: '{{ env }}'
12 | roles:
13 | - { role: contiv_network, scheduler_provider: native-swarm, run_as: worker }
14 |
--------------------------------------------------------------------------------
/install_docker.yml:
--------------------------------------------------------------------------------
1 | # netplugin-node hosts set up netmast/netplugin in a cluster
2 | - name: Install docker
3 | hosts: netplugin-node
4 | become: true
5 | environment: '{{ env }}'
6 | roles:
7 | - { role: docker, etcd_client_port1: 2379 }
8 |
--------------------------------------------------------------------------------
/install_etcd.yml:
--------------------------------------------------------------------------------
1 | # netplugin-master hosts set up netmaster nodes
2 | - name: Install etcd to master
3 | hosts: netplugin-master
4 | become: true
5 | environment: '{{ env }}'
6 | roles:
7 | - { role: etcd, run_as: master }
8 |
9 | - name: Install etc to worker
10 | hosts: netplugin-worker
11 | become: true
12 | environment: '{{ env }}'
13 | roles:
14 | - { role: etcd, run_as: worker }
15 |
--------------------------------------------------------------------------------
/install_scheduler.yml:
--------------------------------------------------------------------------------
1 | # Install scheduler stack with manager nodes on master nodes and the others as workers
2 | - name: Install scheduler to master
3 | hosts: netplugin-master
4 | become: true
5 | environment: '{{ env }}'
6 | roles:
7 | - { role: scheduler_stack, docker_api_port: 2385, run_as: master }
8 |
9 | - name: Install scehduler to workers
10 | hosts: netplugin-worker
11 | become: true
12 | environment: '{{ env }}'
13 | roles:
14 | - { role: scheduler_stack, docker_api_port: 2385, run_as: worker }
15 |
--------------------------------------------------------------------------------
/roles/ansible/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This role contains tasks for installing ansible
3 |
4 | - name: install ansible (redhat)
5 | yum:
6 | name: ansible
7 | state: latest
8 | when: ansible_os_family == "RedHat"
9 |
10 | - name: add ansible apt repository (debian)
11 | apt_repository:
12 | repo: ppa:ansible/ansible
13 | state: present
14 | validate_certs: "{{ validate_certs }}"
15 | when: ansible_os_family == "Debian"
16 |
17 | - name: install ansible (debian)
18 | apt: name=ansible state=latest
19 | when: ansible_os_family == "Debian"
20 |
--------------------------------------------------------------------------------
/roles/auth_proxy/README.md:
--------------------------------------------------------------------------------
1 | Role Name
2 | =========
3 |
4 | Role to install Contiv API Proxy and UI
5 |
6 | Requirements
7 | ------------
8 |
9 | Docker needs to be installed to run the auth proxy container.
10 |
11 | Role Variables
12 | --------------
13 |
14 | auth_proxy_image specifies the image with version tag to be used to spin up the auth proxy container.
15 | auth_proxy_cert, auth_proxy_key specify files to use for the proxy server certificates.
16 | auth_proxy_port is the host port and auth_proxy_datastore the cluster data store address.
17 |
18 | Dependencies
19 | ------------
20 |
21 | docker
22 |
23 | Example Playbook
24 | ----------------
25 |
26 | - hosts: netplugin-node
27 | become: true
28 | roles:
29 | - { role: auth_proxy, auth_proxy_port: 10000, auth_proxy_datastore: etcd://netmaster:2379 }
30 |
--------------------------------------------------------------------------------
/roles/auth_proxy/defaults/main.yml:
--------------------------------------------------------------------------------
1 | auth_proxy_image: "contiv/auth_proxy:1.0.0-beta.2"
2 | auth_proxy_port: 10000
3 | contiv_certs: "/var/contiv/certs"
4 | auth_proxy_cert: "{{ contiv_certs }}/auth_proxy_cert.pem"
5 | auth_proxy_key: "{{ contiv_certs }}/auth_proxy_key.pem"
6 | auth_proxy_datastore: "{{ cluster_store }}"
7 | auth_proxy_binaries: "/var/contiv_cache"
8 | auth_proxy_local_install: False
9 | auth_proxy_rule_comment: "contiv_auth proxy service"
10 | auth_proxy_netmaster: "localhost:9999"
11 |
--------------------------------------------------------------------------------
/roles/auth_proxy/files/auth-proxy.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Contiv Proxy and UI
3 | After=auditd.service systemd-user-sessions.service time-sync.target docker.service
4 |
5 | [Service]
6 | ExecStart=/usr/bin/auth_proxy.sh start
7 | ExecStop=/usr/bin/auth_proxy.sh stop
8 | KillMode=control-group
9 | Restart=on-failure
10 | RestartSec=10
11 |
12 | [Install]
13 | WantedBy=multi-user.target
14 |
--------------------------------------------------------------------------------
/roles/auth_proxy/files/contivRule.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=run contiv rule service
3 |
4 | [Service]
5 | Type=oneshot
6 | # firewalld acquires the lock by 3s but isn't finished yet
7 | # but contivRule.sh wait for the lock release
8 | ExecStartPre=/bin/sleep 3
9 | ExecStart=/usr/bin/contivRule.sh
10 | StandardOutput=journal
11 |
12 | [Install]
13 | WantedBy=multi-user.target firewalld.service
14 |
--------------------------------------------------------------------------------
/roles/auth_proxy/files/contivRule.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 | IFS="
4 | "
5 | rules_file="/etc/contiv/rules.conf"
6 | if [ -f "$rules_file" ]; then
7 | while read line; do
8 | eval iptables -w 10 $line
9 | done < $rules_file
10 | else
11 | mkdir -p "/etc/contiv"
12 | touch $rules_file
13 | iptables -S | sed '/contiv/!d;s/^-A/-I/' > $rules_file
14 | fi
15 |
--------------------------------------------------------------------------------
/roles/auth_proxy/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # handlers file for auth_proxy
3 |
--------------------------------------------------------------------------------
/roles/auth_proxy/tasks/cleanup.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: stop auth-proxy container
4 | service: name=auth-proxy state=stopped
5 |
6 | - name: cleanup iptables for auth proxy
7 | shell: iptables -D INPUT -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ auth_proxy_rule_comment }} ({{ item }})"
8 | become: true
9 | with_items:
10 | - "{{ auth_proxy_port }}"
11 |
--------------------------------------------------------------------------------
/roles/auth_proxy/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # tasks file for auth_proxy
3 | - name: setup iptables for auth proxy
4 | shell: >
5 | ( iptables -L INPUT | grep "{{ auth_proxy_rule_comment }} ({{ item }})" ) || \
6 | iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ auth_proxy_rule_comment }} ({{ item }})"
7 | become: true
8 | with_items:
9 | - "{{ auth_proxy_port }}"
10 |
11 | # Load the auth-proxy-image from local tar. Ignore any errors to handle the
12 | # case where the image is not built in
13 | - name: copy auth-proxy image
14 | copy: src={{ auth_proxy_binaries }}/auth-proxy-image.tar dest=/tmp/auth-proxy-image.tar
15 | when: auth_proxy_local_install == True
16 |
17 | - name: load auth-proxy image
18 | shell: docker load -i /tmp/auth-proxy-image.tar
19 | when: auth_proxy_local_install == True
20 |
21 | - name: create cert folder for proxy
22 | file: path=/var/contiv/certs state=directory
23 |
24 | - name: copy shell script for starting auth-proxy
25 | template: src=auth_proxy.j2 dest=/usr/bin/auth_proxy.sh mode=u=rwx,g=rx,o=rx
26 |
27 | - name: copy cert for starting auth-proxy
28 | copy: src=cert.pem dest=/var/contiv/certs/auth_proxy_cert.pem mode=u=rw,g=r,o=r
29 |
30 | - name: copy key for starting auth-proxy
31 | copy: src=key.pem dest=/var/contiv/certs/auth_proxy_key.pem mode=u=rw,g=r,o=r
32 |
33 | - name: copy systemd units for auth-proxy
34 | copy: src=auth-proxy.service dest=/etc/systemd/system/auth-proxy.service
35 |
36 | - name: start auth-proxy container
37 | systemd: name=auth-proxy daemon_reload=yes state=started enabled=yes
38 |
39 | - name: copy cotivRule.sh file
40 | copy: src=contivRule.sh dest=/usr/bin/contivRule.sh mode=u=rwx,g=rx,o=rx
41 |
42 | - name: copy systemd units for contiv rules
43 | copy: src=contivRule.service dest=/etc/systemd/system/contivRule.service
44 |
45 | - name: start contivRule service
46 | systemd: name=contivRule daemon_reload=yes state=started enabled=yes
47 |
--------------------------------------------------------------------------------
/roles/auth_proxy/templates/auth_proxy.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | usage="$0 start/stop"
4 | if [ $# -ne 1 ]; then
5 | echo USAGE: $usage
6 | exit 1
7 | fi
8 |
9 | case $1 in
10 | start)
11 | set -e
12 |
13 | /usr/bin/docker run --rm \
14 | -p 10000:{{ auth_proxy_port }} \
15 | --net=host --name=auth-proxy \
16 | -e NO_NETMASTER_STARTUP_CHECK=1 \
17 | -v /var/contiv:/var/contiv \
18 | {{ auth_proxy_image }} \
19 | --tls-key-file={{ auth_proxy_key }} \
20 | --tls-certificate={{ auth_proxy_cert }} \
21 | --data-store-address={{ auth_proxy_datastore }} \
22 | --netmaster-address={{ auth_proxy_netmaster }} \
23 | --listen-address=:10000
24 | ;;
25 |
26 | stop)
27 | # don't stop on error
28 | /usr/bin/docker stop auth-proxy
29 | /usr/bin/docker rm -f -v auth-proxy
30 | ;;
31 |
32 | *)
33 | echo USAGE: $usage
34 | exit 1
35 | ;;
36 | esac
37 |
--------------------------------------------------------------------------------
/roles/auth_proxy/tests/inventory:
--------------------------------------------------------------------------------
1 | localhost
--------------------------------------------------------------------------------
/roles/auth_proxy/tests/test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | remote_user: root
4 | roles:
5 | - auth_proxy
6 |
--------------------------------------------------------------------------------
/roles/auth_proxy/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # vars file for auth_proxy
3 |
--------------------------------------------------------------------------------
/roles/base/files/consul:
--------------------------------------------------------------------------------
1 | CONSUL_OPTS=""
2 |
--------------------------------------------------------------------------------
/roles/base/files/consul.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Consul
3 | After=auditd.service systemd-user-sessions.service time-sync.target
4 |
5 | [Service]
6 | EnvironmentFile=/etc/default/consul
7 | ExecStart=/usr/bin/consul agent -server -data-dir /var/lib/consul $CONSUL_OPTS
8 | KillMode=process
9 |
10 | [Install]
11 | WantedBy=multi-user.target
12 |
--------------------------------------------------------------------------------
/roles/base/files/volmaster.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Volume Master
3 | After=auditd.service systemd-user-sessions.service time-sync.target
4 |
5 | [Service]
6 | ExecStart=/opt/golang/bin/volmaster /etc/volmaster.json
7 | KillMode=process
8 |
--------------------------------------------------------------------------------
/roles/base/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This role contains tasks for install base packages
3 |
4 | - include: ubuntu_tasks.yml
5 | when: ansible_os_family == "Debian"
6 | tags:
7 | - prebake-for-dev
8 |
9 | - include: redhat_tasks.yml
10 | when: ansible_os_family == "RedHat"
11 | tags:
12 | - prebake-for-dev
13 |
--------------------------------------------------------------------------------
/roles/base/tasks/redhat_tasks.yml:
--------------------------------------------------------------------------------
1 | # install epel-release first to ensure the extra packages can be installed later
2 | - name: install epel release package (centos)
3 | yum:
4 | name: epel-release
5 | state: latest
6 | when: ansible_distribution != "RedHat"
7 |
8 | - name: install/upgrade base packages (redhat)
9 | yum:
10 | name: "{{ item }}"
11 | update_cache: true
12 | state: latest
13 | with_items:
14 | - yum-utils
15 | - ntp
16 | - unzip
17 | - bzip2
18 | - curl
19 | - python-requests # XXX required by ceph repo, but it has a bad package on it
20 | - bash-completion
21 | - libselinux-python
22 | - e2fsprogs
23 | - openssh-server
24 |
25 | - name: install and start ntp
26 | service: name=ntpd state=started enabled=yes
27 |
--------------------------------------------------------------------------------
/roles/base/tasks/ubuntu_tasks.yml:
--------------------------------------------------------------------------------
1 | - name: upgrade system (debian)
2 | apt:
3 | update_cache: true
4 | state: latest
5 |
6 | - name: install base packages (debian)
7 | apt:
8 | name: "{{ item }}"
9 | state: latest
10 | with_items:
11 | - ntp
12 | - unzip
13 | - bzip2
14 | - curl
15 | - python-software-properties
16 | - bash-completion
17 | - python-selinux
18 | - e2fsprogs
19 | - openssh-server
20 |
--------------------------------------------------------------------------------
/roles/bind/files/resolv.conf:
--------------------------------------------------------------------------------
1 | nameserver 127.0.0.1
2 |
--------------------------------------------------------------------------------
/roles/bind/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: install bind (redhat)
2 | yum:
3 | name: bind
4 | state: present
5 | when: ansible_os_family == "RedHat"
6 |
7 | - name: install bind (debian)
8 | apt:
9 | name: bind9
10 | state: present
11 | when: ansible_os_family == "Debian"
12 |
13 | - name: start/enable bind (redhat)
14 | service:
15 | name: named
16 | enabled: true
17 | state: started
18 | when: ansible_os_family == "RedHat"
19 |
20 | - name: start/enable bind (debian)
21 | service:
22 | name: bind9
23 | enabled: true
24 | state: started
25 | when: ansible_os_family == "Debian"
26 |
27 | - name: rewrite resolv.conf
28 | copy:
29 | src: resolv.conf
30 | dest: /etc/resolv.conf
31 |
32 |
--------------------------------------------------------------------------------
/roles/ceph-common/README.md:
--------------------------------------------------------------------------------
1 | # Ansible role: Ceph Common
2 |
3 | This role does several things prior to bootstrapping your Ceph cluster:
4 |
5 | * Checks the system and validates that Ceph can be installed
6 | * Tunes the operating system if the node is an OSD server
7 | * Installs Ceph
8 | * Generates `ceph.conf`
9 |
10 | # Requirements
11 |
12 | Nothing, it runs out of the box.
13 |
14 | # Role variables
15 |
16 | Have a look at `defaults/main.yml`.
17 |
18 | ## Mandatory variables
19 |
20 | * Install source, choose one of these:
21 | * `ceph_stable`
22 | * `ceph_dev`
23 | * `ceph_stable_ice`
24 | * `ceph_stable_rh_storage`
25 | * `journal_size`
26 | * `monitor_interface`
27 | * `public_network`
28 | * `cluster_network`
29 |
30 | ## Handlers
31 |
32 | * update apt cache
33 | * restart ceph-mon
34 | * restart ceph-osd
35 | * restart ceph-mds
36 | * restart ceph-rgw
37 | * restart ceph-restapi
38 | * restart apache2
39 |
40 | # Dependencies
41 |
42 | None
43 |
44 | # Example Playbook
45 |
46 | ```
47 | - hosts: servers
48 | remote_user: ubuntu
49 | roles:
50 | - { role: leseb.ceph-common }
51 | ```
52 |
53 | # Misc
54 |
55 | This role is a **mandatory** dependency for the following roles:
56 |
57 | * ceph-mon
58 | * ceph-osd
59 | * ceph-mds
60 | * ceph-rgw
61 | * ceph-restapi
62 |
63 | # Contribution
64 |
65 | **THIS REPOSITORY DOES NOT ACCEPT PULL REQUESTS**.
66 | **PULL REQUESTS MUST GO THROUGH [CEPH-ANSIBLE](https://github.com/ceph/ceph-ansible)**.
67 |
68 | # License
69 |
70 | Apache
71 |
72 | # Author Information
73 |
74 | This role was created by [Sébastien Han](http://sebastien-han.fr/).
75 |
--------------------------------------------------------------------------------
/roles/ceph-common/files/cephdev.asc:
--------------------------------------------------------------------------------
1 | -----BEGIN PGP PUBLIC KEY BLOCK-----
2 | Version: GnuPG v1.4.10 (GNU/Linux)
3 |
4 | mQGiBE1Rr28RBADCxdpLV3ea9ocpS/1+UCvHqD5xjmlw/9dmji4qrUX0+IhPMNuA
5 | GBBt2CRaR7ygMF5S0NFXooegph0/+NT0KisLIuhUI3gde4SWb5jsb8hpGUse9MC5
6 | DN39P46zZSpepIMlQuQUkge8W/H2qBu10RcwQhs7o2fZ1zK9F3MmRCkBqwCggpap
7 | GsOgE2IlWjcztmE6xcPO0wED/R4BxTaQM+jxIjylnHgn9PYy6795yIc/ZoYjNnIh
8 | QyjqbLWnyzeTmjPBwcXNljKqzEoA/Cjb2gClxHXrYAw7bGu7wKbnqhzdghSx7ab+
9 | HwIoy/v6IQqv+EXZgYHonqQwqtgfAHp5ON2gWu03cHoGkXfmA4qZIoowqMolZhGo
10 | cF30A/9GotDdnMlqh8bFBOCMuxfRow7H8RpfL0fX7VHA0knAZEDk2rNFeebL5QKH
11 | GNJm9Wa6JSVj1NUIaz4LHyravqXi4MXzlUqauhLHw1iG+qwZlPM04z+1Dj6A+2Hr
12 | b5UxI/I+EzmO5OYa38YWOqybNVBH0wO+sMCpdBq0LABa8X29LbRPQ2VwaCBhdXRv
13 | bWF0ZWQgcGFja2FnZSBidWlsZCAoQ2VwaCBhdXRvbWF0ZWQgcGFja2FnZSBidWls
14 | ZCkgPHNhZ2VAbmV3ZHJlYW0ubmV0PohgBBMRAgAgAhsDBgsJCAcDAgQVAggDBBYC
15 | AwECHgECF4AFAlEUm1YACgkQbq6uIgPDlRqTUACeMqJ+vwatwb+y/KWeNfmgtQ8+
16 | kDwAn0MHwY42Wmb7FA891j88enooCdxRuQQNBE1Rr28QEACKG04kxGY1cwGoInHV
17 | P6z1+8oqGiaiYWFflYRtSiwoUVtl30T1sMOSzoEvmauc+rmBBfsyaBb8DLDUIgGK
18 | v1FCOY/tfqnOyQXotPjgaLeCtK5A5Z5D212wbskf5fRHAxiychwKURiEeesRa7EW
19 | rF6ohFxOTy9NOlFi7ctusShw6Q2kUtN7bQCX9hJdYs7PYQXvCXvW8DNt7IitF7Mp
20 | gMHNcj0wik6p38I4s7pqK6mqP4AXVVSWbJKr/LSz8bI8KhWRAT7erVAZf6FElR2x
21 | ZVr3c4zsE2HFpnZTsM5y/nj8fUkgKGl8OfBuUoh+MCVfnPmE6sgWfDTKkwWtUcmL
22 | 6V9UQ1INUJ3sk+XBY9SMNbOn04su9FjQyNEMI/3VK7yuyKBRAN7IIVgP2ch499m6
23 | +YFV9ZkG3JSTovNiqSpQouW7YPkS+8mxlPo03LQcU5bHeacBl0T8Xjlvqu6q279E
24 | liHul4huKL0+myPN4DtmOTh/kwgSy3BGCBdS+wfAJSZcuKI7pk7pHGCdUjNMHQZm
25 | PFbwzp33bVLd16gnAx0OW5DOn6l0VfgIQNSJ2rn7WZ5jdyg/Flp2VlWVtAHFLzkC
26 | a+LvQ5twSuzrV/VipSr3xz3pTDLY+ZxDztvrgA6AST8+sdq6uQTYjwUQV0wzanvp
27 | 9hkC5eqRY6YlzcgMkWFv8DCIEwADBQ//ZQaeVmG6T5vyfXf2JrCipmI4MAdO+ezE
28 | tWE82wgixlCvvm26UmUejCYgtD6DmwY/7/bIjvJDhUwP0+hAHHOpR62gncoMtbMr
29 | yHpm3FvYH58JNk5gx8ZA322WEc2GCRCQzrMQoMKBcpZY/703GpQ4l3RZ7/25gq7A
30 | NohV5zeddFQftc05PMBBJLU3U+lrnahJS1WaOXNQzS6oVj9jNda1jkgcQni6QssS
31 | IMT6rAPsVbGJhe9mxr2VWdQ90QlubpszIeSJuqqJxLwqH8XHXZmQOYxmyVP9a3pF
32 | qWDmsNxDA8ttYnMIc+nUAgCDJ84ScwQ1GvoCUD1b1cFNzvvhEHsNb4D/XbdrFcFG
33 | wEkeyivUsojdq2YnGjYSgauqyNWbeEgBrWzUe5USYysmziL/KAubcUjIbeRGxyPS
34 | 6iQ2kbvfEJJPgocWTfLs5j61FObO+MVlj+PEmxWbcsIRv/pnG2V2FPJ8evhzgvp7
35 | cG9imZPM6dWHzc/ZFdi3Bcs51RtStsvPqXv4icKIi+01h1MLHNBqwuUkIiiK7ooM
36 | lvnp+DiEsVSuYYKBdGTi+4+nduuYL2g8CTNJKZuC46dY7EcE3lRYZlxl7dwN3jfL
37 | PRlnNscs34dwhZa+b70Flia0U1DNF4jrIFFBSHD3TqMg0Z6kxp1TfxpeGOLOqnBW
38 | rr0GKehu9CGISQQYEQIACQIbDAUCURSbegAKCRBurq4iA8OVGv9TAJ9EeXVrRS3p
39 | PZkT1R21FszUc9LvmgCeMduh5IPGFWSx9MjUc7/j1QKYm7g=
40 | =per8
41 | -----END PGP PUBLIC KEY BLOCK-----
42 |
--------------------------------------------------------------------------------
/roles/ceph-common/files/cephstable.asc:
--------------------------------------------------------------------------------
1 | -----BEGIN PGP PUBLIC KEY BLOCK-----
2 | Version: GnuPG v1
3 |
4 | mQINBFX4hgkBEADLqn6O+UFp+ZuwccNldwvh5PzEwKUPlXKPLjQfXlQRig1flpCH
5 | E0HJ5wgGlCtYd3Ol9f9+qU24kDNzfbs5bud58BeE7zFaZ4s0JMOMuVm7p8JhsvkU
6 | C/Lo/7NFh25e4kgJpjvnwua7c2YrA44ggRb1QT19ueOZLK5wCQ1mR+0GdrcHRCLr
7 | 7Sdw1d7aLxMT+5nvqfzsmbDullsWOD6RnMdcqhOxZZvpay8OeuK+yb8FVQ4sOIzB
8 | FiNi5cNOFFHg+8dZQoDrK3BpwNxYdGHsYIwU9u6DWWqXybBnB9jd2pve9PlzQUbO
9 | eHEa4Z+jPqxY829f4ldaql7ig8e6BaInTfs2wPnHJ+606g2UH86QUmrVAjVzlLCm
10 | nqoGymoAPGA4ObHu9X3kO8viMBId9FzooVqR8a9En7ZE0Dm9O7puzXR7A1f5sHoz
11 | JdYHnr32I+B8iOixhDUtxIY4GA8biGATNaPd8XR2Ca1hPuZRVuIiGG9HDqUEtXhV
12 | fY5qjTjaThIVKtYgEkWMT+Wet3DPPiWT3ftNOE907e6EWEBCHgsEuuZnAbku1GgD
13 | LBH4/a/yo9bNvGZKRaTUM/1TXhM5XgVKjd07B4cChgKypAVHvef3HKfCG2U/DkyA
14 | LjteHt/V807MtSlQyYaXUTGtDCrQPSlMK5TjmqUnDwy6Qdq8dtWN3DtBWQARAQAB
15 | tCpDZXBoLmNvbSAocmVsZWFzZSBrZXkpIDxzZWN1cml0eUBjZXBoLmNvbT6JAjgE
16 | EwECACIFAlX4hgkCGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEOhKwsBG
17 | DzmUXdIQAI8YPcZMBWdv489q8CzxlfRIRZ3Gv/G/8CH+EOExcmkVZ89mVHngCdAP
18 | DOYCl8twWXC1lwJuLDBtkUOHXNuR5+Jcl5zFOUyldq1Hv8u03vjnGT7lLJkJoqpG
19 | l9QD8nBqRvBU7EM+CU7kP8+09b+088pULil+8x46PwgXkvOQwfVKSOr740Q4J4nm
20 | /nUOyTNtToYntmt2fAVWDTIuyPpAqA6jcqSOC7Xoz9cYxkVWnYMLBUySXmSS0uxl
21 | 3p+wK0lMG0my/gb+alke5PAQjcE5dtXYzCn+8Lj0uSfCk8Gy0ZOK2oiUjaCGYN6D
22 | u72qDRFBnR3jaoFqi03bGBIMnglGuAPyBZiI7LJgzuT9xumjKTJW3kN4YJxMNYu1
23 | FzmIyFZpyvZ7930vB2UpCOiIaRdZiX4Z6ZN2frD3a/vBxBNqiNh/BO+Dex+PDfI4
24 | TqwF8zlcjt4XZ2teQ8nNMR/D8oiYTUW8hwR4laEmDy7ASxe0p5aijmUApWq5UTsF
25 | +s/QbwugccU0iR5orksM5u9MZH4J/mFGKzOltfGXNLYI6D5Mtwrnyi0BsF5eY0u6
26 | vkdivtdqrq2DXY+ftuqLOQ7b+t1RctbcMHGPptlxFuN9ufP5TiTWSpfqDwmHCLsT
27 | k2vFiMwcHdLpQ1IH8ORVRgPPsiBnBOJ/kIiXG2SxPUTjjEGOVgeA
28 | =/Tod
29 | -----END PGP PUBLIC KEY BLOCK-----
30 |
--------------------------------------------------------------------------------
/roles/ceph-common/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | author: Sébastien Han
4 | description: Installs Ceph
5 | license: Apache
6 | min_ansible_version: 1.7
7 | platforms:
8 | - name: Ubuntu
9 | versions:
10 | - trusty
11 | categories:
12 | - system
13 | dependencies: []
14 |
--------------------------------------------------------------------------------
/roles/ceph-common/tasks/checks/check_system.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: fail on unsupported system
3 | fail:
4 | msg: "System not supported {{ ansible_system }}"
5 | when: "ansible_system not in ['Linux']"
6 |
7 | - name: fail on unsupported architecture
8 | fail:
9 | msg: "Architecture not supported {{ ansible_architecture }}"
10 | when: "ansible_architecture not in ['x86_64']"
11 |
12 | - name: fail on unsupported distribution
13 | fail:
14 | msg: "Distribution not supported {{ ansible_os_family }}"
15 | when: "ansible_os_family not in ['Debian', 'RedHat']"
16 |
17 | - name: fail on unsupported distribution for red hat storage
18 | fail:
19 | msg: "Distribution not supported {{ ansible_distribution_version }} by Red Hat Storage, only RHEL 7.1"
20 | when:
21 | ceph_stable_rh_storage and
22 | {{ ansible_distribution_version | version_compare('7.1', '<') }}
23 |
24 | - name: check ansible version
25 | local_action: shell ansible --version | awk '/[0-9].[0-9].[0-9]/ {print $2}'
26 | changed_when: false
27 | sudo: false
28 | register: ansible_version
29 |
--------------------------------------------------------------------------------
/roles/ceph-common/tasks/installs/debian_ceph_repository.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: install the ceph repository stable key
4 | apt_key:
5 | data: "{{ lookup('file', '../../files/cephstable.asc') }}"
6 | state: present
7 | when: ceph_stable
8 |
9 | - name: install the ceph development repository key
10 | apt_key:
11 | data: "{{ lookup('file', '../../files/cephdev.asc') }}"
12 | state: present
13 | when: ceph_dev
14 |
15 | - name: install intank ceph enterprise repository key
16 | apt_key:
17 | data: "{{ lookup('file', '../../files/cephstableice.asc') }}"
18 | state: present
19 | when: ceph_stable_ice
20 |
21 | - name: add ceph stable repository
22 | apt_repository:
23 | repo: "deb http://ceph.com/debian-{{ ceph_stable_release }}/ {{ ceph_stable_distro_source | default(ansible_lsb.codename) }} main"
24 | state: present
25 | changed_when: false
26 | when: ceph_stable
27 |
28 | - name: add ceph development repository
29 | apt_repository:
30 | repo: "deb http://gitbuilder.ceph.com/ceph-deb-{{ ansible_lsb.codename }}-x86_64-basic/ref/{{ ceph_dev_branch }} {{ ansible_lsb.codename }} main"
31 | state: present
32 | changed_when: false
33 | when: ceph_dev
34 |
35 | - name: add inktank ceph enterprise repository
36 | apt_repository:
37 | repo: "deb file://{{ ceph_stable_ice_temp_path }} {{ ansible_lsb.codename }} main"
38 | state: present
39 | changed_when: false
40 | when: ceph_stable_ice
41 |
--------------------------------------------------------------------------------
/roles/ceph-common/tasks/installs/install_on_debian.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: install dependencies
3 | apt:
4 | pkg: "{{ item }}"
5 | state: present
6 | update_cache: yes
7 | cache_valid_time: 3600
8 | with_items: debian_package_dependencies
9 |
10 | - name: configure ceph apt repository
11 | include: debian_ceph_repository.yml
12 | when: ceph_origin == 'upstream'
13 |
14 | - name: install ceph
15 | apt:
16 | pkg: "{{ item }}"
17 | state: latest
18 | default_release: "{{ ansible_distribution_release }}{{ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports else ''}}"
19 | with_items:
20 | - ceph
21 | - ceph-common #|
22 | - ceph-fs-common #|--> yes, they are already all dependencies from 'ceph'
23 | - ceph-fuse #|--> however while proceding to rolling upgrades and the 'ceph' package upgrade
24 | - ceph-mds #|--> they don't get update so we need to force them
25 | - libcephfs1 #|
26 |
27 | - name: install rados gateway
28 | apt:
29 | pkg: radosgw
30 | state: latest
31 | update_cache: yes
32 | when:
33 | rgw_group_name in group_names
34 |
35 | - name: configure rbd clients directories
36 | file:
37 | path: "{{ item }}"
38 | state: directory
39 | owner: libvirt-qemu
40 | group: kvm
41 | mode: 0755
42 | with_items:
43 | - rbd_client_log_path
44 | - rbd_client_admin_socket_path
45 | when: rbd_client_directories
46 |
--------------------------------------------------------------------------------
/roles/ceph-common/tasks/installs/install_on_redhat.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: install dependencies
3 | yum:
4 | name: "{{ item }}"
5 | state: present
6 | with_items: redhat_package_dependencies
7 |
8 | - name: configure ceph yum repository
9 | include: redhat_ceph_repository.yml
10 | when: ceph_origin == 'upstream'
11 |
12 | - name: install ceph
13 | yum:
14 | name: ceph
15 | state: latest
16 | when: not ceph_stable_rh_storage
17 |
18 | - name: install red hat storage ceph mon
19 | yum:
20 | name: "{{ item }}"
21 | state: latest
22 | with_items:
23 | - ceph
24 | - ceph-mon
25 | when:
26 | ceph_stable_rh_storage and
27 | mon_group_name in group_names
28 |
29 | - name: install red hat storage ceph osd
30 | yum:
31 | name: "{{ item }}"
32 | state: latest
33 | with_items:
34 | - ceph
35 | - ceph-osd
36 | when:
37 | ceph_stable_rh_storage and
38 | osd_group_name in group_names
39 |
40 | - name: install Inktank Ceph Enterprise RBD Kernel modules
41 | yum:
42 | name: "{{ item }}"
43 | with_items:
44 | - "{{ ceph_stable_ice_temp_path }}/kmod-libceph-{{ ceph_stable_ice_kmod }}.rpm"
45 | - "{{ ceph_stable_ice_temp_path }}/kmod-rbd-{{ ceph_stable_ice_kmod }}.rpm"
46 | when: ceph_stable_ice
47 |
48 | - name: install rados gateway
49 | yum:
50 | name: ceph-radosgw
51 | state: latest
52 | when:
53 | rgw_group_name in group_names
54 |
55 | - name: configure rbd clients directories
56 | file:
57 | path: "{{ item }}"
58 | state: directory
59 | owner: qemu
60 | group: libvirtd
61 | mode: 0755
62 | with_items:
63 | - rbd_client_log_path
64 | - rbd_client_admin_socket_path
65 | when: rbd_client_directories
66 |
--------------------------------------------------------------------------------
/roles/ceph-common/tasks/installs/install_rgw_on_redhat.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: add ceph extra
3 | template:
4 | src: ../../templates/ceph-extra.repo
5 | dest: /etc/yum.repos.d
6 | owner: root
7 | group: root
8 |
9 | - name: add special fastcgi repository key
10 | rpm_key:
11 | key: http://dag.wieers.com/rpm/packages/RPM-GPG-KEY.dag.txt
12 | validate_certs: "{{ validate_certs }}"
13 |
14 | - name: add special fastcgi repository
15 | command: rpm -ivh http://pkgs.repoforge.org/rpmforge-release/rpmforge-release-0.5.3-1.el6.rf.x86_64.rpm
16 | changed_when: false
17 |
18 | - name: install apache and fastcgi
19 | yum:
20 | name: "{{ item }}"
21 | state: present
22 | with_items:
23 | - httpd
24 | - mod_fastcgi
25 | - mod_fcgid
26 |
27 | - name: install rados gateway vhost
28 | template:
29 | src: ../../templates/rgw.conf
30 | dest: /etc/httpd/conf.d/rgw.conf
31 | owner: root
32 | group: root
33 |
34 | - name: install s3gw.fcgi script
35 | template:
36 | src: ../../templates/s3gw.fcgi.j2
37 | dest: /var/www/s3gw.fcgi
38 | mode: 0555
39 | owner: root
40 | group: root
41 |
42 | - name: disable default site
43 | shell: sed -i "s/^[^+#]/#/g" /etc/httpd/conf.d/welcome.conf
44 | changed_when: false
45 | notify:
46 | - restart apache2
47 |
--------------------------------------------------------------------------------
/roles/ceph-common/tasks/installs/redhat_ceph_repository.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: install the ceph stable repository key
3 | rpm_key:
4 | key: "{{ ceph_stable_key }}"
5 | state: present
6 | validate_certs: "{{ validate_certs }}"
7 | when: ceph_stable
8 |
9 | - name: install the ceph development repository key
10 | rpm_key:
11 | key: "{{ ceph_dev_key }}"
12 | state: present
13 | validate_certs: "{{ validate_certs }}"
14 | when: ceph_dev
15 |
16 | - name: install inktank ceph enterprise repository key
17 | rpm_key:
18 | key: "{{ ceph_stable_ice_temp_path }}/release.asc"
19 | state: present
20 | validate_certs: "{{ validate_certs }}"
21 | when: ceph_stable_ice
22 |
23 | - name: install red hat storage repository key
24 | rpm_key:
25 | key: "{{ ceph_stable_rh_storage_repository_path }}/RPM-GPG-KEY-redhat-release"
26 | state: present
27 | validate_certs: "{{ validate_certs }}"
28 | when:
29 | ceph_stable_rh_storage and
30 | ceph_stable_rh_storage_iso_install
31 |
32 | - name: add ceph stable repository
33 | yum:
34 | name: http://ceph.com/rpm-{{ ceph_stable_release }}/{{ ceph_stable_redhat_distro }}/noarch/ceph-release-1-0.{{ ceph_stable_redhat_distro|replace('rhel', 'el') }}.noarch.rpm
35 | changed_when: false
36 | when: ceph_stable
37 |
38 | - name: add ceph development repository
39 | yum:
40 | name: http://gitbuilder.ceph.com/ceph-rpm-{{ ceph_dev_redhat_distro }}-x86_64-basic/ref/{{ ceph_dev_branch }}/noarch/ceph-release-1-0.{{ ceph_stable_redhat_distro }}.noarch.rpm
41 | changed_when: false
42 | when: ceph_dev
43 |
44 | - name: add inktank ceph enterprise repository
45 | template:
46 | src: redhat_ice_repo.j2
47 | dest: /etc/yum.repos.d/ice.repo
48 | owner: root
49 | group: root
50 | mode: 0644
51 | when: ceph_stable_ice
52 |
53 | - name: add red hat storage repository
54 | template:
55 | src: redhat_storage_repo.j2
56 | dest: /etc/yum.repos.d/rh_storage.repo
57 | owner: root
58 | group: root
59 | mode: 0644
60 | when:
61 | ceph_stable_rh_storage and
62 | ceph_stable_rh_storage_iso_install
63 |
--------------------------------------------------------------------------------
/roles/ceph-common/tasks/misc/system_tuning.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: disable osd directory parsing by updatedb
3 | command: updatedb -e /var/lib/ceph
4 | changed_when: false
5 | failed_when: false
6 |
7 | - name: disable transparent hugepage
8 | command: "echo never > /sys/kernel/mm/transparent_hugepage/enabled"
9 | changed_when: false
10 | failed_when: false
11 | when: disable_transparent_hugepage
12 |
13 | - name: disable swap
14 | command: swapoff -a
15 | changed_when: false
16 | failed_when: false
17 | when: disable_swap
18 |
19 | - name: apply operating system tuning
20 | sysctl:
21 | name: "{{ item.name }}"
22 | value: "{{ item.value }}"
23 | state: present
24 | sysctl_file: /etc/sysctl.conf
25 | ignoreerrors: yes
26 | with_items: os_tuning_params
27 |
--------------------------------------------------------------------------------
/roles/ceph-common/tasks/pre_requisites/prerequisite_ice.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: create ice package directory
3 | file:
4 | path: "{{ ceph_stable_ice_temp_path }}"
5 | state: directory
6 | owner: root
7 | group: root
8 | mode: 0644
9 | when: ceph_stable_ice
10 |
11 | - name: get ice packages
12 | get_url:
13 | url_username: "{{ ceph_stable_ice_user }}"
14 | url_password: "{{ ceph_stable_ice_password }}"
15 | url: "{{ ceph_stable_ice_url }}/{{ ceph_stable_ice_version }}/ICE-{{ ceph_stable_ice_version }}-{{ ceph_stable_ice_distro }}.tar.gz"
16 | dest: "{{ ceph_stable_ice_temp_path }}/ICE-{{ ceph_stable_ice_version }}-{{ ceph_stable_ice_distro }}.tar.gz"
17 | when: ceph_stable_ice
18 |
19 | - name: get ice Kernel Modules
20 | get_url:
21 | url_username: "{{ ceph_stable_ice_user }}"
22 | url_password: "{{ ceph_stable_ice_password }}"
23 | url: "{{ ceph_stable_ice_url }}/{{ ceph_stable_ice_kmod_version }}/{{ item }}"
24 | dest: "{{ ceph_stable_ice_temp_path }}"
25 | with_items:
26 | - kmod-libceph-{{ ceph_stable_ice_kmod }}.rpm
27 | - kmod-rbd-{{ ceph_stable_ice_kmod }}.rpm
28 | when:
29 | ceph_stable_ice and
30 | ansible_os_family == 'RedHat'
31 |
32 | - name: stat extracted ice repo files
33 | stat:
34 | path: "{{ ceph_stable_ice_temp_path }}/ice_setup.py"
35 | register: repo_exist
36 | when: ceph_stable_ice
37 |
38 | - name: extract ice packages
39 | shell: tar -xzf ICE-{{ ceph_stable_ice_version }}-{{ ceph_stable_ice_distro }}.tar.gz
40 | args:
41 | chdir: "{{ ceph_stable_ice_temp_path }}"
42 | changed_when: false
43 | when:
44 | ceph_stable_ice and
45 | repo_exist.stat.exists == False
46 |
47 | - name: move ice extracted packages
48 | shell: "mv {{ ceph_stable_ice_temp_path }}/ceph/*/* {{ ceph_stable_ice_temp_path }}"
49 | changed_when: false
50 | when:
51 | ceph_stable_ice and
52 | repo_exist.stat.exists == False
53 |
--------------------------------------------------------------------------------
/roles/ceph-common/tasks/pre_requisites/prerequisite_rh_storage_cdn_install.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: determine if node is registered with subscription-manager.
3 | command: subscription-manager identity
4 | register: subscription
5 | changed_when: false
6 |
7 | - name: check if the red hat optional repo is present
8 | shell: yum --noplugins --cacheonly repolist | grep -sq rhel-7-server-optional-rpms
9 | changed_when: false
10 | failed_when: false
11 | register: rh_optional_repo
12 |
13 | - name: enable red hat optional repository
14 | command: subscription-manager repos --enable rhel-7-server-optional-rpms
15 | changed_when: false
16 | when: rh_optional_repo.rc != 0
17 |
18 | - name: check if the red hat storage monitor repo is already present
19 | shell: yum --noplugins --cacheonly repolist | grep -sq rhel-7-server-rhceph-1.3-mon-rpms
20 | changed_when: false
21 | failed_when: false
22 | register: rh_storage_mon_repo
23 | when: mon_group_name in group_names
24 |
25 | - name: enable red hat storage monitor repository
26 | command: subscription-manager repos --enable rhel-7-server-rhceph-1.3-mon-rpms
27 | changed_when: false
28 | when:
29 | mon_group_name in group_names and
30 | rh_storage_mon_repo.rc != 0
31 |
32 | - name: check if the red hat storage osd repo is already present
33 | shell: yum --noplugins --cacheonly repolist | grep -sq rhel-7-server-rhceph-1.3-osd-rpms
34 | changed_when: false
35 | failed_when: false
36 | register: rh_storage_osd_repo
37 | when: osd_group_name in group_names
38 |
39 | - name: enable red hat storage osd repository
40 | command: subscription-manager repos --enable rhel-7-server-rhceph-1.3-osd-rpms
41 | changed_when: false
42 | when:
43 | osd_group_name in group_names and
44 | rh_storage_osd_repo.rc != 0
45 |
46 | - name: check if the red hat storage rados gateway repo is already present
47 | shell: yum --noplugins --cacheonly repolist | grep -sq rhel-7-server-rhceph-1.3-tools-rpms
48 | changed_when: false
49 | failed_when: false
50 | register: rh_storage_rgw_repo
51 | when: rgw_group_name in group_names
52 |
53 | - name: enable red hat storage rados gateway repository
54 | command: subscription-manager repos --enable rhel-7-server-rhceph-1.3-tools-rpms
55 | changed_when: false
56 | when:
57 | rgw_group_name in group_names and
58 | rh_storage_rgw_repo.rc != 0
59 |
--------------------------------------------------------------------------------
/roles/ceph-common/tasks/pre_requisites/prerequisite_rh_storage_iso_install.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: create red hat storage package directories
3 | file:
4 | path: "{{ item }}"
5 | state: directory
6 | with_items:
7 | - "{{ ceph_stable_rh_storage_mount_path }}"
8 | - "{{ ceph_stable_rh_storage_repository_path }}"
9 |
10 | - name: fetch the red hat storage iso from the ansible server
11 | copy:
12 | src: "{{ ceph_stable_rh_storage_iso_path }}"
13 | dest: "{{ ceph_stable_rh_storage_iso_path }}"
14 |
15 | - name: mount red hat storage iso file
16 | mount:
17 | name: "{{ ceph_stable_rh_storage_mount_path }}"
18 | src: "{{ ceph_stable_rh_storage_iso_path }}"
19 | fstype: iso9660
20 | state: mounted
21 |
22 | - name: copy red hat storage iso content
23 | shell: cp -r {{ ceph_stable_rh_storage_mount_path }}/* {{ ceph_stable_rh_storage_repository_path }}
24 | args:
25 | creates: "{{ ceph_stable_rh_storage_repository_path }}/README"
26 |
27 | - name: mount red hat storage iso file
28 | mount:
29 | name: "{{ ceph_stable_rh_storage_mount_path }}"
30 | src: "{{ ceph_stable_rh_storage_iso_path }}"
31 | fstype: iso9660
32 | state: unmounted
33 |
--------------------------------------------------------------------------------
/roles/ceph-common/templates/ceph-extra.repo:
--------------------------------------------------------------------------------
1 | # {{ ansible_managed }}
2 |
3 | [ceph-extras]
4 | name=Ceph Extras Packages
5 | baseurl=http://ceph.com/packages/ceph-extras/rpm/{{ redhat_distro_ceph_extra }}/$basearch
6 | enabled=1
7 | priority=2
8 | gpgcheck=1
9 | type=rpm-md
10 | gpgkey=https://download.ceph.com/keys/release.asc
11 |
12 | {% if (redhat_distro_ceph_extra != "centos6.4" and redhat_distro_ceph_extra != "rhel6.4" and redhat_distro_ceph_extra != "rhel6.5") %}
13 | [ceph-extras-noarch]
14 | name=Ceph Extras noarch
15 | baseurl=http://ceph.com/packages/ceph-extras/rpm/{{ redhat_distro_ceph_extra }}/noarch
16 | enabled=1
17 | priority=2
18 | gpgcheck=1
19 | type=rpm-md
20 | gpgkey=https://download.ceph.com/keys/release.asc
21 | {% endif %}
22 |
23 | [ceph-extras-source]
24 | name=Ceph Extras Sources
25 | baseurl=http://ceph.com/packages/ceph-extras/rpm/{{ redhat_distro_ceph_extra }}/SRPMS
26 | enabled=1
27 | priority=2
28 | gpgcheck=1
29 | type=rpm-md
30 | gpgkey=https://download.ceph.com/keys/release.asc
31 |
--------------------------------------------------------------------------------
/roles/ceph-common/templates/httpd.conf:
--------------------------------------------------------------------------------
1 | # {{ ansible_managed }}
2 |
3 | ServerName {{ ansible_hostname }}
4 |
--------------------------------------------------------------------------------
/roles/ceph-common/templates/redhat_ice_repo.j2:
--------------------------------------------------------------------------------
1 | # {{ ansible_managed }}
2 | [ice]
3 | name=Inktank Ceph Enterprise - local packages for Ceph
4 | baseurl=file://{{ ceph_stable_ice_temp_path }}
5 | enabled=1
6 | gpgcheck=1
7 | type=rpm-md
8 | priority=1
9 | gpgkey=file://{{ ceph_stable_ice_temp_path }}/release.asc
10 |
--------------------------------------------------------------------------------
/roles/ceph-common/templates/redhat_storage_repo.j2:
--------------------------------------------------------------------------------
1 | # {{ ansible_managed }}
2 | [rh_storage_mon]
3 | name=Red Hat Storage Ceph - local packages for Ceph
4 | baseurl=file://{{ ceph_stable_rh_storage_repository_path }}/MON
5 | enabled=1
6 | gpgcheck=1
7 | type=rpm-md
8 | priority=1
9 | gpgkey=file://{{ ceph_stable_rh_storage_repository_path }}/RPM-GPG-KEY-redhat-release
10 |
11 | [rh_storage_osd]
12 | name=Red Hat Storage Ceph - local packages for Ceph
13 | baseurl=file://{{ ceph_stable_rh_storage_repository_path }}/OSD
14 | enabled=1
15 | gpgcheck=1
16 | type=rpm-md
17 | priority=1
18 | gpgkey=file://{{ ceph_stable_rh_storage_repository_path }}/RPM-GPG-KEY-redhat-release
19 |
20 | [rh_storage_calamari]
21 | name=Red Hat Storage Ceph - local packages for Ceph
22 | baseurl=file://{{ ceph_stable_rh_storage_repository_path }}/Calamari
23 | enabled=1
24 | gpgcheck=1
25 | type=rpm-md
26 | priority=1
27 | gpgkey=file://{{ ceph_stable_rh_storage_repository_path }}/RPM-GPG-KEY-redhat-release
28 |
29 | [rh_storage_installer]
30 | name=Red Hat Storage Ceph - local packages for Ceph
31 | baseurl=file://{{ ceph_stable_rh_storage_repository_path }}/Installer
32 | enabled=1
33 | gpgcheck=1
34 | type=rpm-md
35 | priority=1
36 | gpgkey=file://{{ ceph_stable_rh_storage_repository_path }}/RPM-GPG-KEY-redhat-release
37 |
--------------------------------------------------------------------------------
/roles/ceph-common/templates/rgw.conf:
--------------------------------------------------------------------------------
1 | # {{ ansible_managed }}
2 |
3 | FastCgiExternalServer /var/www/s3gw.fcgi -socket /tmp/radosgw-{{ ansible_hostname }}.sock
4 |
5 | ServerName {{ ansible_hostname }}
6 | ServerAdmin {{ email_address }}@{{ ansible_fqdn }}
7 | DocumentRoot /var/www
8 |
9 |
10 |
11 | Options +ExecCGI
12 | AllowOverride All
13 | SetHandler fastcgi-script
14 | Order allow,deny
15 | Allow from all
16 | AuthBasicAuthoritative Off
17 |
18 |
19 |
20 | RewriteEngine On
21 | RewriteRule ^/([a-zA-Z0-9-_.]*)([/]?.*) /s3gw.fcgi?page=$1¶ms=$2&%{QUERY_STRING} [E=HTTP_AUTHORIZATION:%{HTTP:Authorization},L]
22 |
23 |
24 |
--------------------------------------------------------------------------------
/roles/ceph-common/templates/s3gw.fcgi.j2:
--------------------------------------------------------------------------------
1 | # {{ ansible_managed }}
2 | #!/bin/sh
3 | exec /usr/bin/radosgw -c /etc/ceph/ceph.conf -n client.radosgw.{{ ansible_hostname }}
4 |
--------------------------------------------------------------------------------
/roles/ceph-install/files/cephstable.asc:
--------------------------------------------------------------------------------
1 | -----BEGIN PGP PUBLIC KEY BLOCK-----
2 | Version: GnuPG v1
3 |
4 | mQINBFX4hgkBEADLqn6O+UFp+ZuwccNldwvh5PzEwKUPlXKPLjQfXlQRig1flpCH
5 | E0HJ5wgGlCtYd3Ol9f9+qU24kDNzfbs5bud58BeE7zFaZ4s0JMOMuVm7p8JhsvkU
6 | C/Lo/7NFh25e4kgJpjvnwua7c2YrA44ggRb1QT19ueOZLK5wCQ1mR+0GdrcHRCLr
7 | 7Sdw1d7aLxMT+5nvqfzsmbDullsWOD6RnMdcqhOxZZvpay8OeuK+yb8FVQ4sOIzB
8 | FiNi5cNOFFHg+8dZQoDrK3BpwNxYdGHsYIwU9u6DWWqXybBnB9jd2pve9PlzQUbO
9 | eHEa4Z+jPqxY829f4ldaql7ig8e6BaInTfs2wPnHJ+606g2UH86QUmrVAjVzlLCm
10 | nqoGymoAPGA4ObHu9X3kO8viMBId9FzooVqR8a9En7ZE0Dm9O7puzXR7A1f5sHoz
11 | JdYHnr32I+B8iOixhDUtxIY4GA8biGATNaPd8XR2Ca1hPuZRVuIiGG9HDqUEtXhV
12 | fY5qjTjaThIVKtYgEkWMT+Wet3DPPiWT3ftNOE907e6EWEBCHgsEuuZnAbku1GgD
13 | LBH4/a/yo9bNvGZKRaTUM/1TXhM5XgVKjd07B4cChgKypAVHvef3HKfCG2U/DkyA
14 | LjteHt/V807MtSlQyYaXUTGtDCrQPSlMK5TjmqUnDwy6Qdq8dtWN3DtBWQARAQAB
15 | tCpDZXBoLmNvbSAocmVsZWFzZSBrZXkpIDxzZWN1cml0eUBjZXBoLmNvbT6JAjgE
16 | EwECACIFAlX4hgkCGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4BAheAAAoJEOhKwsBG
17 | DzmUXdIQAI8YPcZMBWdv489q8CzxlfRIRZ3Gv/G/8CH+EOExcmkVZ89mVHngCdAP
18 | DOYCl8twWXC1lwJuLDBtkUOHXNuR5+Jcl5zFOUyldq1Hv8u03vjnGT7lLJkJoqpG
19 | l9QD8nBqRvBU7EM+CU7kP8+09b+088pULil+8x46PwgXkvOQwfVKSOr740Q4J4nm
20 | /nUOyTNtToYntmt2fAVWDTIuyPpAqA6jcqSOC7Xoz9cYxkVWnYMLBUySXmSS0uxl
21 | 3p+wK0lMG0my/gb+alke5PAQjcE5dtXYzCn+8Lj0uSfCk8Gy0ZOK2oiUjaCGYN6D
22 | u72qDRFBnR3jaoFqi03bGBIMnglGuAPyBZiI7LJgzuT9xumjKTJW3kN4YJxMNYu1
23 | FzmIyFZpyvZ7930vB2UpCOiIaRdZiX4Z6ZN2frD3a/vBxBNqiNh/BO+Dex+PDfI4
24 | TqwF8zlcjt4XZ2teQ8nNMR/D8oiYTUW8hwR4laEmDy7ASxe0p5aijmUApWq5UTsF
25 | +s/QbwugccU0iR5orksM5u9MZH4J/mFGKzOltfGXNLYI6D5Mtwrnyi0BsF5eY0u6
26 | vkdivtdqrq2DXY+ftuqLOQ7b+t1RctbcMHGPptlxFuN9ufP5TiTWSpfqDwmHCLsT
27 | k2vFiMwcHdLpQ1IH8ORVRgPPsiBnBOJ/kIiXG2SxPUTjjEGOVgeA
28 | =/Tod
29 | -----END PGP PUBLIC KEY BLOCK-----
30 |
--------------------------------------------------------------------------------
/roles/ceph-install/tasks/installs/debian_ceph_repository.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: install the ceph repository stable key
4 | apt_key:
5 | data: "{{ lookup('file', '../../files/cephstable.asc') }}"
6 | state: present
7 | when: ceph_stable
8 |
9 | - name: install the ceph development repository key
10 | apt_key:
11 | data: "{{ lookup('file', '../../files/cephdev.asc') }}"
12 | state: present
13 | when: ceph_dev
14 |
15 | - name: install intank ceph enterprise repository key
16 | apt_key:
17 | data: "{{ lookup('file', '../../files/cephstableice.asc') }}"
18 | state: present
19 | when: ceph_stable_ice
20 |
21 | - name: add ceph stable repository
22 | apt_repository:
23 | repo: "deb http://ceph.com/debian-{{ ceph_stable_release }}/ {{ ceph_stable_distro_source | default(ansible_lsb.codename) }} main"
24 | state: present
25 | changed_when: false
26 | when: ceph_stable
27 |
28 | - name: add ceph development repository
29 | apt_repository:
30 | repo: "deb http://gitbuilder.ceph.com/ceph-deb-{{ ansible_lsb.codename }}-x86_64-basic/ref/{{ ceph_dev_branch }} {{ ansible_lsb.codename }} main"
31 | state: present
32 | changed_when: false
33 | when: ceph_dev
34 |
35 | - name: add inktank ceph enterprise repository
36 | apt_repository:
37 | repo: "deb file://{{ ceph_stable_ice_temp_path }} {{ ansible_lsb.codename }} main"
38 | state: present
39 | changed_when: false
40 | when: ceph_stable_ice
41 |
--------------------------------------------------------------------------------
/roles/ceph-install/tasks/installs/install_on_debian.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: install dependencies
3 | apt:
4 | pkg: "{{ item }}"
5 | state: present
6 | update_cache: yes
7 | cache_valid_time: 3600
8 | with_items: debian_package_dependencies
9 |
10 | - name: configure ceph apt repository
11 | include: debian_ceph_repository.yml
12 | when: ceph_origin == 'upstream'
13 |
14 | - name: install ceph
15 | apt:
16 | pkg: "{{ item }}"
17 | state: latest
18 | default_release: "{{ ansible_distribution_release }}{{ '-backports' if ceph_origin == 'distro' and ceph_use_distro_backports else ''}}"
19 | with_items:
20 | - ceph
21 | - ceph-common #|
22 | - ceph-fs-common #|--> yes, they are already all dependencies from 'ceph'
23 | - ceph-fuse #|--> however while proceding to rolling upgrades and the 'ceph' package upgrade
24 | - ceph-mds #|--> they don't get update so we need to force them
25 | - libcephfs1 #|
26 |
27 | - name: install rados gateway
28 | apt:
29 | pkg: radosgw
30 | state: latest
31 | update_cache: yes
32 | when:
33 | rgw_group_name in group_names
34 |
35 | - name: configure rbd clients directories
36 | file:
37 | path: "{{ item }}"
38 | state: directory
39 | owner: libvirt-qemu
40 | group: kvm
41 | mode: 0755
42 | with_items:
43 | - rbd_client_log_path
44 | - rbd_client_admin_socket_path
45 | when: rbd_client_directories
46 |
--------------------------------------------------------------------------------
/roles/ceph-install/tasks/installs/install_on_redhat.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: install dependencies
3 | yum:
4 | name: "{{ item }}"
5 | state: present
6 | with_items: redhat_package_dependencies
7 |
8 | - name: configure ceph yum repository
9 | include: redhat_ceph_repository.yml
10 | when: ceph_origin == 'upstream'
11 |
12 | - name: install ceph
13 | yum:
14 | name: ceph
15 | state: latest
16 | when: not ceph_stable_rh_storage
17 |
18 | - name: install red hat storage ceph mon
19 | yum:
20 | name: "{{ item }}"
21 | state: latest
22 | with_items:
23 | - ceph
24 | - ceph-mon
25 | when:
26 | ceph_stable_rh_storage and
27 | mon_group_name in group_names
28 |
29 | - name: install red hat storage ceph osd
30 | yum:
31 | name: "{{ item }}"
32 | state: latest
33 | with_items:
34 | - ceph
35 | - ceph-osd
36 | when:
37 | ceph_stable_rh_storage and
38 | osd_group_name in group_names
39 |
40 | - name: install Inktank Ceph Enterprise RBD Kernel modules
41 | yum:
42 | name: "{{ item }}"
43 | with_items:
44 | - "{{ ceph_stable_ice_temp_path }}/kmod-libceph-{{ ceph_stable_ice_kmod }}.rpm"
45 | - "{{ ceph_stable_ice_temp_path }}/kmod-rbd-{{ ceph_stable_ice_kmod }}.rpm"
46 | when: ceph_stable_ice
47 |
48 | - name: install rados gateway
49 | yum:
50 | name: ceph-radosgw
51 | state: latest
52 | when:
53 | rgw_group_name in group_names
54 |
55 | - name: configure rbd clients directories
56 | file:
57 | path: "{{ item }}"
58 | state: directory
59 | owner: qemu
60 | group: libvirtd
61 | mode: 0755
62 | with_items:
63 | - rbd_client_log_path
64 | - rbd_client_admin_socket_path
65 | when: rbd_client_directories
66 |
--------------------------------------------------------------------------------
/roles/ceph-install/tasks/installs/install_rgw_on_redhat.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: add ceph extra
3 | template:
4 | src: ../../templates/ceph-extra.repo
5 | dest: /etc/yum.repos.d
6 | owner: root
7 | group: root
8 |
9 | - name: add special fastcgi repository key
10 | rpm_key:
11 | validate_certs: "{{ validate_certs }}"
12 | key: http://dag.wieers.com/rpm/packages/RPM-GPG-KEY.dag.txt
13 |
14 | - name: add special fastcgi repository
15 | command: rpm -ivh http://pkgs.repoforge.org/rpmforge-release/rpmforge-release-0.5.3-1.el6.rf.x86_64.rpm
16 | changed_when: false
17 |
18 | - name: install apache and fastcgi
19 | yum:
20 | name: "{{ item }}"
21 | state: present
22 | with_items:
23 | - httpd
24 | - mod_fastcgi
25 | - mod_fcgid
26 |
27 | - name: install rados gateway vhost
28 | template:
29 | src: ../../templates/rgw.conf
30 | dest: /etc/httpd/conf.d/rgw.conf
31 | owner: root
32 | group: root
33 |
34 | - name: install s3gw.fcgi script
35 | template:
36 | src: ../../templates/s3gw.fcgi.j2
37 | dest: /var/www/s3gw.fcgi
38 | mode: 0555
39 | owner: root
40 | group: root
41 |
42 | - name: disable default site
43 | shell: sed -i "s/^[^+#]/#/g" /etc/httpd/conf.d/welcome.conf
44 | changed_when: false
45 | notify:
46 | - restart apache2
47 |
--------------------------------------------------------------------------------
/roles/ceph-install/tasks/installs/redhat_ceph_repository.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: install the ceph stable repository key
3 | rpm_key:
4 | key: "{{ ceph_stable_key }}"
5 | state: present
6 | validate_certs: "{{ validate_certs }}"
7 | when: ceph_stable
8 |
9 | - name: install the ceph development repository key
10 | rpm_key:
11 | validate_certs: "{{ validate_certs }}"
12 | key: "{{ ceph_dev_key }}"
13 | state: present
14 | when: ceph_dev
15 |
16 | - name: install inktank ceph enterprise repository key
17 | rpm_key:
18 | validate_certs: "{{ validate_certs }}"
19 | key: "{{ ceph_stable_ice_temp_path }}/release.asc"
20 | state: present
21 | when: ceph_stable_ice
22 |
23 | - name: install red hat storage repository key
24 | rpm_key:
25 | validate_certs: "{{ validate_certs }}"
26 | key: "{{ ceph_stable_rh_storage_repository_path }}/RPM-GPG-KEY-redhat-release"
27 | state: present
28 | when:
29 | ceph_stable_rh_storage and
30 | ceph_stable_rh_storage_iso_install
31 |
32 | - name: add ceph stable repository
33 | yum:
34 | name: http://ceph.com/rpm-{{ ceph_stable_release }}/{{ ceph_stable_redhat_distro }}/noarch/ceph-release-1-0.{{ ceph_stable_redhat_distro|replace('rhel', 'el') }}.noarch.rpm
35 | changed_when: false
36 | when: ceph_stable
37 |
38 | - name: add ceph development repository
39 | yum:
40 | name: http://gitbuilder.ceph.com/ceph-rpm-{{ ceph_dev_redhat_distro }}-x86_64-basic/ref/{{ ceph_dev_branch }}/noarch/ceph-release-1-0.{{ ceph_stable_redhat_distro }}.noarch.rpm
41 | changed_when: false
42 | when: ceph_dev
43 |
44 | - name: add inktank ceph enterprise repository
45 | template:
46 | src: redhat_ice_repo.j2
47 | dest: /etc/yum.repos.d/ice.repo
48 | owner: root
49 | group: root
50 | mode: 0644
51 | when: ceph_stable_ice
52 |
53 | - name: add red hat storage repository
54 | template:
55 | src: redhat_storage_repo.j2
56 | dest: /etc/yum.repos.d/rh_storage.repo
57 | owner: root
58 | group: root
59 | mode: 0644
60 | when:
61 | ceph_stable_rh_storage and
62 | ceph_stable_rh_storage_iso_install
63 |
--------------------------------------------------------------------------------
/roles/ceph-install/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - include: ./installs/install_on_redhat.yml
3 | when: ansible_os_family == 'RedHat'
4 |
--------------------------------------------------------------------------------
/roles/ceph-mds/README.md:
--------------------------------------------------------------------------------
1 | # Ansible role: Ceph Metadata
2 |
3 | This role bootstraps Ceph metadata(s).
4 | It can bootstrap dockerized Ceph metadata(s).
5 |
6 | # Requirements
7 |
8 | Nothing, it runs out of the box.
9 |
10 | # Role variables
11 |
12 | Have a look at: `defaults/main.yml`.
13 |
14 | ## Mandatory variables
15 |
16 | None.
17 |
18 | # Dependencies
19 |
20 | The role `leseb.ceph-common` must be installed.
21 |
22 | # Example Playbook
23 |
24 | ```
25 | - hosts: servers
26 | remote_user: ubuntu
27 | roles:
28 | - { role: leseb.ceph-mds }
29 | ```
30 |
31 | # Contribution
32 |
33 | **THIS REPOSITORY DOES NOT ACCEPT PULL REQUESTS**
34 | **PULL REQUESTS MUST GO THROUGH [CEPH-ANSIBLE](https://github.com/ceph/ceph-ansible)**
35 |
36 | # License
37 |
38 | Apache
39 |
40 | # Author Information
41 |
42 | This role was created by [Sébastien Han](http://sebastien-han.fr/).
43 |
--------------------------------------------------------------------------------
/roles/ceph-mds/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # You can override vars by using host or group vars
3 |
4 | ###########
5 | # GENERAL #
6 | ###########
7 |
8 | fetch_directory: fetch/
9 |
10 | cephx: true
11 |
12 |
13 | ##########
14 | # DOCKER #
15 | ##########
16 |
17 | mds_containerized_deployment: false
18 | ceph_mds_docker_username: ceph
19 | ceph_mds_docker_imagename: daemon
20 | ceph_mds_docker_extra_env: "MDS_NAME={{ ansible_hostname }}" # comma separated variables
21 |
--------------------------------------------------------------------------------
/roles/ceph-mds/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | author: Sébastien Han
4 | description: Installs Ceph Metadata
5 | license: Apache
6 | min_ansible_version: 1.7
7 | platforms:
8 | - name: Ubuntu
9 | versions:
10 | - trusty
11 | categories:
12 | - system
13 | dependencies:
14 | - { role: ceph-common, when: not mds_containerized_deployment }
15 |
--------------------------------------------------------------------------------
/roles/ceph-mds/tasks/docker/checks.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: set config and keys paths
3 | set_fact:
4 | ceph_config_keys:
5 | - /etc/ceph/ceph.client.admin.keyring
6 | - /etc/ceph/ceph.conf
7 | - /etc/ceph/monmap
8 | - /etc/ceph/ceph.mon.keyring
9 | - /var/lib/ceph/bootstrap-osd/ceph.keyring
10 | - /var/lib/ceph/bootstrap-rgw/ceph.keyring
11 | - /var/lib/ceph/bootstrap-mds/ceph.keyring
12 |
13 | - name: stat for ceph config and keys
14 | stat:
15 | path: "{{ item }}"
16 | with_items: ceph_config_keys
17 | changed_when: false
18 | failed_when: false
19 | register: statleftover
20 |
21 | - name: fail if we find existing cluster files
22 | fail:
23 | msg: "looks like no cluster is running but ceph files are present, please remove them"
24 | with_together:
25 | - ceph_config_keys
26 | - statleftover.results
27 | when: item.1.stat.exists == true
28 |
--------------------------------------------------------------------------------
/roles/ceph-mds/tasks/docker/fetch_configs.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # NOTE (leseb): the mds container needs the admin key
3 | # so it can create the mds pools for cephfs
4 | - name: set config and keys paths
5 | set_fact:
6 | ceph_config_keys:
7 | - /etc/ceph/ceph.conf
8 | - /etc/ceph/ceph.client.admin.keyring
9 | - /var/lib/ceph/bootstrap-mds/ceph.keyring
10 |
11 | - name: stat for ceph config and keys
12 | local_action: stat path={{ item }}
13 | with_items: ceph_config_keys
14 | changed_when: false
15 | sudo: false
16 | failed_when: false
17 | register: statconfig
18 |
19 | - name: try to fetch ceph config and keys
20 | copy:
21 | src: "{{ fetch_directory }}/docker_mon_files/{{ item.0 }}"
22 | dest: "{{ item.0 }}"
23 | owner: root
24 | group: root
25 | mode: 644
26 | changed_when: false
27 | with_together:
28 | - ceph_config_keys
29 | - statconfig.results
30 | when: item.1.stat.exists == true
31 |
--------------------------------------------------------------------------------
/roles/ceph-mds/tasks/docker/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: check if a cluster is already running
3 | shell: "docker ps | grep -sq 'ceph/daemon'"
4 | register: ceph_health
5 | changed_when: false
6 | failed_when: false
7 |
8 | - include: checks.yml
9 | when: ceph_health.rc != 0
10 |
11 | - include: pre_requisite.yml
12 | - include: selinux.yml
13 | when: ansible_os_family == 'RedHat'
14 |
15 | - include: fetch_configs.yml
16 | - include: start_docker_mds.yml
17 |
--------------------------------------------------------------------------------
/roles/ceph-mds/tasks/docker/pre_requisite.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: create mds bootstrap directory
3 | file:
4 | path: "{{ item }}"
5 | state: directory
6 | with_items:
7 | - /etc/ceph/
8 | - /var/lib/ceph/bootstrap-mds
9 |
10 | - name: install pip on debian
11 | apt:
12 | name: pip
13 | state: present
14 | when: ansible_os_family == 'Debian'
15 |
16 | - name: install pip on redhat
17 | yum:
18 | name: python-pip
19 | state: present
20 | when: ansible_os_family == 'RedHat'
21 |
22 | # NOTE (leseb): for version 1.1.0 because https://github.com/ansible/ansible-modules-core/issues/1227
23 | - name: install docker-py
24 | pip:
25 | name: docker-py
26 | version: 1.1.0
27 |
--------------------------------------------------------------------------------
/roles/ceph-mds/tasks/docker/selinux.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: check if selinux is enabled
3 | command: getenforce
4 | register: sestatus
5 | changed_when: false
6 |
7 | - name: set selinux permissions
8 | shell: chcon -Rt svirt_sandbox_file_t {{ item }}
9 | with_items:
10 | - /etc/ceph
11 | - /var/lib/ceph
12 | changed_when: false
13 | when: sestatus.stdout != 'Disabled'
14 |
--------------------------------------------------------------------------------
/roles/ceph-mds/tasks/docker/start_docker_mds.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: run the ceph medata docker image
3 | docker:
4 | image: "{{ ceph_mds_docker_username }}/{{ ceph_mds_docker_imagename }}"
5 | name: ceph-{{ ansible_hostname }}-mds
6 | net: host
7 | state: running
8 | env: "CEPH_DAEMON=MDS,CEPHFS_CREATE=1,{{ ceph_mds_docker_extra_env }}"
9 | volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph"
10 |
--------------------------------------------------------------------------------
/roles/ceph-mds/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - include: pre_requisite.yml
3 | when: not mds_containerized_deployment
4 |
5 | - include: ./docker/main.yml
6 | when: mds_containerized_deployment
7 |
--------------------------------------------------------------------------------
/roles/ceph-mon/README.md:
--------------------------------------------------------------------------------
1 | # Ansible role: Ceph Monitor
2 |
3 | This role mainly bootstraps Ceph monitor(s) but also has several capabilities:
4 |
5 | * Deploys Ceph monitor(s)
6 | * Manages Ceph keys
7 | * Can create OpenStack pools, users and keys
8 | * Secures a cluster (protect pools)
9 | * Bootstraps dockerized Ceph monitors
10 |
11 | # Requirements
12 |
13 | Nothing, it runs out of the box.
14 |
15 | # Role variables
16 |
17 | Have a look at: `defaults/main.yml`.
18 |
19 | ## Mandatory variables
20 |
21 | None.
22 |
23 | # Dependencies
24 |
25 | The role `leseb.ceph-common` must be installed.
26 |
27 | # Example Playbook
28 |
29 | ```
30 | - hosts: servers
31 | remote_user: ubuntu
32 | roles:
33 | - { role: leseb.ceph-mon }
34 | ```
35 |
36 | # Contribution
37 |
38 | **THIS REPOSITORY DOES NOT ACCEPT PULL REQUESTS**
39 | **PULL REQUESTS MUST GO THROUGH [CEPH-ANSIBLE](https://github.com/ceph/ceph-ansible)**
40 |
41 | # License
42 |
43 | Apache
44 |
45 | # Author Information
46 |
47 | This role was created by [Sébastien Han](http://sebastien-han.fr/).
48 |
--------------------------------------------------------------------------------
/roles/ceph-mon/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # You can override vars by using host or group vars
3 |
4 | ###########
5 | # GENERAL #
6 | ###########
7 |
8 | fetch_directory: fetch/
9 |
10 | mon_group_name: mons
11 |
12 | # ACTIVATE BOTH FSID AND MONITOR_SECRET VARIABLES FOR NON-VAGRANT DEPLOYMENT
13 | fsid: "{{ cluster_uuid.stdout }}"
14 | monitor_secret: "{{ monitor_keyring.stdout }}"
15 | cephx: true
16 |
17 | # CephFS
18 | pool_default_pg_num: 128
19 | cephfs_data: cephfs_data
20 | cephfs_metadata: cephfs_metadata
21 | cephfs: cephfs
22 |
23 | # Secure your cluster
24 | # This will set the following flags on all the pools:
25 | # * nosizechange
26 | # * nopgchange
27 | # * nodelete
28 |
29 | secure_cluster: false
30 | secure_cluster_flags:
31 | - nopgchange
32 | - nodelete
33 | - nosizechange
34 |
35 |
36 | #############
37 | # OPENSTACK #
38 | #############
39 |
40 | openstack_config: false
41 | openstack_glance_pool:
42 | name: images
43 | pg_num: "{{ pool_default_pg_num }}"
44 | openstack_cinder_pool:
45 | name: volumes
46 | pg_num: "{{ pool_default_pg_num }}"
47 | openstack_nova_pool:
48 | name: vms
49 | pg_num: "{{ pool_default_pg_num }}"
50 | openstack_cinder_backup_pool:
51 | name: backups
52 | pg_num: "{{ pool_default_pg_num }}"
53 |
54 | openstack_keys:
55 | - { name: client.glance, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_glance_pool.name }}'" }
56 | - { name: client.cinder, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_cinder_pool.name }}, allow rwx pool={{ openstack_nova_pool.name }}, allow rx pool={{ openstack_glance_pool.name }}'" }
57 | - { name: client.cinder-backup, value: "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool={{ openstack_cinder_backup_pool.name }}'" }
58 |
59 | ##########
60 | # DOCKER #
61 | ##########
62 |
63 | mon_containerized_deployment: false
64 | ceph_mon_docker_interface: eth0
65 | #ceph_mon_docker_subnet: # subnet of the ceph_mon_docker_interface
66 | ceph_mon_docker_username: ceph
67 | ceph_mon_docker_imagename: daemon
68 | ceph_mon_extra_envs: "MON_NAME={{ ansible_hostname }}" # comma separated variables
69 |
--------------------------------------------------------------------------------
/roles/ceph-mon/files/precise/92-ceph:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo -n "Ceph state is: "
4 | /usr/bin/ceph health
5 | echo ""
6 |
--------------------------------------------------------------------------------
/roles/ceph-mon/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | author: Sébastien Han
4 | description: Installs Ceph Monitor
5 | license: Apache
6 | min_ansible_version: 1.7
7 | platforms:
8 | - name: Ubuntu
9 | versions:
10 | - trusty
11 | categories:
12 | - system
13 | dependencies:
14 | - { role: ceph-common, when: not mon_containerized_deployment }
15 |
--------------------------------------------------------------------------------
/roles/ceph-mon/tasks/ceph_keys.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # NOTE (leseb): wait for mon discovery and quorum resolution
3 | # the admin key is not instantanely created so we have to wait a bit
4 | - name: wait for client.admin key exists
5 | wait_for:
6 | path: /etc/ceph/ceph.client.admin.keyring
7 | run_once: true
8 |
9 | - name: create ceph rest api keyring
10 | command: ceph auth get-or-create client.restapi osd 'allow *' mon 'allow *' -o /etc/ceph/ceph.client.restapi.keyring
11 | args:
12 | creates: /etc/ceph/ceph.client.restapi.keyring
13 | changed_when: false
14 | when:
15 | cephx and
16 | groups[restapi_group_name] is defined
17 | run_once: true
18 |
19 | - include: openstack_config.yml
20 | when:
21 | openstack_config and
22 | cephx
23 | run_once: true
24 |
25 | - name: find ceph keys
26 | shell: ls -1 /etc/ceph/*.keyring
27 | changed_when: false
28 | register: ceph_keys
29 | when: cephx
30 | run_once: true
31 |
32 | - name: set keys permissions
33 | file:
34 | path: "{{ item }}"
35 | mode: 0600
36 | owner: root
37 | group: root
38 | with_items:
39 | - "{{ ceph_keys.stdout_lines }}"
40 | run_once: true
41 |
42 | - name: copy keys to the ansible server
43 | fetch:
44 | src: "{{ item }}"
45 | dest: "{{ fetch_directory }}/{{ fsid }}/{{ item }}"
46 | flat: yes
47 | with_items:
48 | - "{{ ceph_keys.stdout_lines }}"
49 | - /var/lib/ceph/bootstrap-osd/ceph.keyring
50 | - /var/lib/ceph/bootstrap-rgw/ceph.keyring
51 | - /var/lib/ceph/bootstrap-mds/ceph.keyring
52 | when: cephx
53 | run_once: true
54 |
55 | - name: drop in a motd script to report status when logging in
56 | copy:
57 | src: precise/92-ceph
58 | dest: /etc/update-motd.d/92-ceph
59 | owner: root
60 | group: root
61 | mode: 0755
62 | when: ansible_distribution_release == 'precise'
63 | run_once: true
64 |
--------------------------------------------------------------------------------
/roles/ceph-mon/tasks/create_mds_filesystems.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # NOTE (leseb): in the present playbook the conditional is done on the task
3 | # We don't do this in main.yml because of the 'docker' variable, when set to true
4 | # the role 'ceph-common' doesn't get inherited so the condition can not be evaluate
5 | # since those check are performed by the ceph-common role
6 | - name: create filesystem pools
7 | command: ceph osd pool create {{ item }} {{ pool_default_pg_num }}
8 | with_items:
9 | - cephfs_data
10 | - cephfs_metadata
11 | changed_when: false
12 | when: not {{ ceph_version.stdout | version_compare('0.84', '<') }}
13 |
14 | - name: create ceph filesystem
15 | command: ceph fs new {{ cephfs }} {{ cephfs_metadata }} {{ cephfs_data }}
16 | changed_when: false
17 | when: not {{ ceph_version.stdout | version_compare('0.84', '<') }}
18 |
--------------------------------------------------------------------------------
/roles/ceph-mon/tasks/docker/checks.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: set config and keys paths
3 | set_fact:
4 | ceph_config_keys:
5 | - /etc/ceph/ceph.client.admin.keyring
6 | - /etc/ceph/ceph.conf
7 | - /etc/ceph/monmap
8 | - /etc/ceph/ceph.mon.keyring
9 | - /var/lib/ceph/bootstrap-osd/ceph.keyring
10 | - /var/lib/ceph/bootstrap-rgw/ceph.keyring
11 | - /var/lib/ceph/bootstrap-mds/ceph.keyring
12 |
13 | - name: stat for ceph config and keys
14 | stat:
15 | path: "{{ item }}"
16 | with_items: ceph_config_keys
17 | changed_when: false
18 | failed_when: false
19 | register: statleftover
20 |
21 | - name: fail if we find existing cluster files
22 | fail:
23 | msg: "looks like no cluster is running but ceph files are present, please remove them"
24 | with_together:
25 | - ceph_config_keys
26 | - statleftover.results
27 | when: item.1.stat.exists == true
28 |
--------------------------------------------------------------------------------
/roles/ceph-mon/tasks/docker/copy_configs.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: push ceph files to the ansible server
3 | fetch:
4 | src: "{{ item.0 }}"
5 | dest: "{{ fetch_directory }}/docker_mon_files/{{ item.0 }}"
6 | flat: yes
7 | with_together:
8 | - ceph_config_keys
9 | - statconfig.results
10 | when: item.1.stat.exists == false
11 |
--------------------------------------------------------------------------------
/roles/ceph-mon/tasks/docker/fetch_configs.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: set config and keys paths
3 | set_fact:
4 | ceph_config_keys:
5 | - /etc/ceph/ceph.client.admin.keyring
6 | - /etc/ceph/ceph.conf
7 | - /etc/ceph/monmap
8 | - /etc/ceph/ceph.mon.keyring
9 | - /var/lib/ceph/bootstrap-osd/ceph.keyring
10 | - /var/lib/ceph/bootstrap-rgw/ceph.keyring
11 | - /var/lib/ceph/bootstrap-mds/ceph.keyring
12 |
13 | - name: stat for ceph config and keys
14 | local_action: stat path={{ item }}
15 | with_items: ceph_config_keys
16 | changed_when: false
17 | sudo: false
18 | failed_when: false
19 | register: statconfig
20 |
21 | - name: try to fetch ceph config and keys
22 | copy:
23 | src: "{{ fetch_directory }}/docker_mon_files/{{ item.0 }}"
24 | dest: "{{ item.0 }}"
25 | owner: root
26 | group: root
27 | mode: 644
28 | changed_when: false
29 | with_together:
30 | - ceph_config_keys
31 | - statconfig.results
32 | when: item.1.stat.exists == true
33 |
--------------------------------------------------------------------------------
/roles/ceph-mon/tasks/docker/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: check if a cluster is already running
3 | shell: "docker ps | grep -sq 'ceph/daemon'"
4 | register: ceph_health
5 | changed_when: false
6 | failed_when: false
7 |
8 | - include: checks.yml
9 | when: ceph_health.rc != 0
10 |
11 | - include: pre_requisite.yml
12 | - include: selinux.yml
13 | when: ansible_os_family == 'RedHat'
14 |
15 | - include: fetch_configs.yml
16 | - include: start_docker_monitor.yml
17 | - include: copy_configs.yml
18 |
--------------------------------------------------------------------------------
/roles/ceph-mon/tasks/docker/pre_requisite.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: create bootstrap directories
3 | file:
4 | path: "{{ item }}"
5 | state: directory
6 | with_items:
7 | - /etc/ceph/
8 | - /var/lib/ceph/bootstrap-osd
9 | - /var/lib/ceph/bootstrap-mds
10 | - /var/lib/ceph/bootstrap-rgw
11 |
12 | - name: install pip on debian
13 | apt:
14 | name: pip
15 | state: present
16 | when: ansible_os_family == 'Debian'
17 |
18 | - name: install pip on redhat
19 | yum:
20 | name: python-pip
21 | state: present
22 | when: ansible_os_family == 'RedHat'
23 |
24 | # NOTE (leseb): for version 1.1.0 because https://github.com/ansible/ansible-modules-core/issues/1227
25 | - name: install docker-py
26 | pip:
27 | name: docker-py
28 | version: 1.1.0
29 |
--------------------------------------------------------------------------------
/roles/ceph-mon/tasks/docker/selinux.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: check if selinux is enabled
3 | command: getenforce
4 | register: sestatus
5 | changed_when: false
6 |
7 | - name: set selinux permissions
8 | shell: chcon -Rt svirt_sandbox_file_t {{ item }}
9 | with_items:
10 | - /etc/ceph
11 | - /var/lib/ceph
12 | changed_when: false
13 | when: sestatus.stdout != 'Disabled'
14 |
--------------------------------------------------------------------------------
/roles/ceph-mon/tasks/docker/start_docker_monitor.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: run the ceph Monitor docker image
3 | docker:
4 | image: "{{ ceph_mon_docker_username }}/{{ ceph_mon_docker_imagename }}"
5 | name: "{{ ansible_hostname }}"
6 | net: "host"
7 | state: "running"
8 | env: "MON_IP={{ hostvars[inventory_hostname]['ansible_' + ceph_mon_docker_interface]['ipv4']['address'] }},CEPH_DAEMON=MON,CEPH_PUBLIC_NETWORK={{ ceph_mon_docker_subnet }},{{ ceph_mon_extra_envs }}"
9 | volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph"
10 |
--------------------------------------------------------------------------------
/roles/ceph-mon/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - include: deploy_monitors.yml
3 | when: not mon_containerized_deployment
4 |
5 | - include: start_monitor.yml
6 | when: not mon_containerized_deployment
7 |
8 | - include: ceph_keys.yml
9 | when: not mon_containerized_deployment
10 |
11 | - include: create_mds_filesystems.yml
12 | when:
13 | not mon_containerized_deployment and
14 | groups[mds_group_name] is defined
15 |
16 | - include: secure_cluster.yml
17 | when:
18 | secure_cluster and
19 | not mon_containerized_deployment
20 |
21 | - include: ./docker/main.yml
22 | when: mon_containerized_deployment
23 |
--------------------------------------------------------------------------------
/roles/ceph-mon/tasks/openstack_config.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: create openstack pool
3 | command: ceph osd pool create {{ item.name }} {{ item.pg_num }}
4 | with_items:
5 | - "{{ openstack_glance_pool }}"
6 | - "{{ openstack_cinder_pool }}"
7 | - "{{ openstack_nova_pool }}"
8 | - "{{ openstack_cinder_backup_pool }}"
9 | changed_when: false
10 | failed_when: false
11 |
12 | - name: create openstack keys
13 | command: ceph auth get-or-create {{ item.name }} {{ item.value }} -o /etc/ceph/ceph.{{ item.name }}.keyring
14 | args:
15 | creates: /etc/ceph/ceph.{{ item.name }}.keyring
16 | with_items: openstack_keys
17 | changed_when: false
18 |
--------------------------------------------------------------------------------
/roles/ceph-mon/tasks/secure_cluster.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: collect all the pools
3 | command: rados lspools
4 | register: ceph_pools
5 | when: "{{ ceph_version.stdout | version_compare('0.94', '>=') }}"
6 |
7 | - name: secure the cluster
8 | command: ceph osd pool set {{ item[0] }} {{ item[1] }} true
9 | with_nested:
10 | - "{{ ceph_pools.stdout_lines }}"
11 | - secure_cluster_flags
12 | when: "{{ ceph_version.stdout | version_compare('0.94', '>=') }}"
13 |
--------------------------------------------------------------------------------
/roles/ceph-osd/README.md:
--------------------------------------------------------------------------------
1 | # Ansible role: Ceph OSD
2 |
3 | This role bootstraps Ceph OSD(s).
4 | It can bootstrap dockerized Ceph OSD(s).
5 |
6 | # Requirements
7 |
8 | Nothing, it runs out of the box.
9 |
10 | # Role variables
11 |
12 | Have a look at: `defaults/main.yml`.
13 |
14 | ## Mandatory variables
15 |
16 | Choose between the following scenario to configure your OSDs, **choose only one**:
17 |
18 | * `journal_collocation`
19 | * `raw_multi_journal`
20 | * `osd_directory`
21 |
22 | Then:
23 |
24 | * `devices`
25 | * `raw_journal_devices` (**only if** you activated `raw_multi_journal`)
26 | * `osd_directories` (**only if** you activated `osd_directory`)
27 |
28 | # Dependencies
29 |
30 | The role `leseb.ceph-common` must be installed.
31 |
32 | # Example Playbook
33 |
34 | ```
35 | - hosts: servers
36 | remote_user: ubuntu
37 | roles:
38 | - { role: leseb.ceph-osd }
39 | ```
40 |
41 | # Contribution
42 |
43 | **THIS REPOSITORY DOES NOT ACCEPT PULL REQUESTS**
44 | **PULL REQUESTS MUST GO THROUGH [CEPH-ANSIBLE](https://github.com/ceph/ceph-ansible)**
45 |
46 | # License
47 |
48 | Apache
49 |
50 | # Author Information
51 |
52 | This role was created by [Sébastien Han](http://sebastien-han.fr/).
53 |
--------------------------------------------------------------------------------
/roles/ceph-osd/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | author: Sébastien Han
4 | description: Installs Ceph Object Storage Daemon
5 | license: Apache
6 | min_ansible_version: 1.7
7 | platforms:
8 | - name: Ubuntu
9 | versions:
10 | - trusty
11 | categories:
12 | - system
13 | dependencies:
14 | - { role: ceph-common, when: not osd_containerized_deployment }
15 |
--------------------------------------------------------------------------------
/roles/ceph-osd/tasks/activate_osds.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # NOTE (leseb) : this task is for disk devices only because of the explicit use of the first
3 | # partition.
4 | - name: activate osd(s) when device is a disk
5 | command: ceph-disk activate {{ item.1 | regex_replace('^(\/dev\/cciss\/c[0-9]{1}d[0-9]{1})$', '\\1p') }}1
6 | with_together:
7 | - ispartition.results
8 | - devices
9 | changed_when: false
10 | failed_when: false
11 | when: item.0.rc != 0
12 |
13 | # NOTE (leseb): this task is for partitions because we don't explicitly use a partition.
14 | - name: activate osd(s) when device is a partition
15 | command: "ceph-disk activate {{ item.1 }}"
16 | with_together:
17 | - ispartition.results
18 | - devices
19 | changed_when: false
20 | failed_when: false
21 | when: item.0.rc == 0
22 |
23 | - include: osd_fragment.yml
24 | when: crush_location
25 |
26 | - name: start and add that the osd service(s) to the init sequence (before infernalis)
27 | service:
28 | name: ceph
29 | state: started
30 | enabled: yes
31 | when:
32 | ansible_distribution == "Ubuntu" or
33 | (ceph_stable_release == 'dumpling' or
34 | ceph_stable_release == 'emperor' or
35 | ceph_stable_release == 'firefly' or
36 | ceph_stable_release == 'giant' or
37 | ceph_stable_release == 'hammer')
38 |
39 | - name: start and add that the osd service(s) to the init sequence (on or after infernalis)
40 | service:
41 | name: ceph.target
42 | state: started
43 | enabled: yes
44 | when:
45 | ansible_distribution != "Ubuntu" and not
46 | (ceph_stable_release == 'dumpling' or
47 | ceph_stable_release == 'emperor' or
48 | ceph_stable_release == 'firefly' or
49 | ceph_stable_release == 'giant' or
50 | ceph_stable_release == 'hammer')
51 |
52 |
--------------------------------------------------------------------------------
/roles/ceph-osd/tasks/check_devices.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # NOTE (leseb): current behavior of ceph-disk is to fail when the device is mounted "stderr: ceph-disk: Error: Device is mounted: /dev/sdb1"
3 | # the return code is 1, which makes sense, however ideally if ceph-disk will detect a ceph partition
4 | # it should exist we rc=0 and don't do anything unless we do something like --force
5 | # As as a final word, I prefer to keep the partition check instead of running ceph-disk prepare with "failed_when: false"
6 | # I believe it's safer
7 | #
8 | # regex syntax uses (pat1|pat2|...|patN) for different families of device
9 | # names, but has a common expression for partition number at the end.
10 | # allow 2-digit partition numbers so fast SSDs can be shared by > 9 disks
11 | # for SSD journals.
12 |
13 | - name: check if the device is a partition
14 | shell: "echo '{{ item }}' | egrep '/dev/(sd[a-z]{1,2}|hd[a-z]{1,2}|cciss/c[0-9]d[0-9]p|nvme[0-9]n[0-9]p)[0-9]{1,2}$'"
15 | with_items: devices
16 | changed_when: false
17 | failed_when: false
18 | register: ispartition
19 |
20 | - name: check the partition status of the osd disks
21 | shell: "parted --script {{ item }} print > /dev/null 2>&1"
22 | with_items: devices
23 | changed_when: false
24 | failed_when: false
25 | register: osd_partition_status
26 | when:
27 | journal_collocation or
28 | raw_multi_journal
29 |
30 | - name: check the partition status of the journal devices
31 | shell: "parted --script {{ item }} print > /dev/null 2>&1"
32 | with_items: raw_journal_devices
33 | changed_when: false
34 | failed_when: false
35 | register: journal_partition_status
36 | when: raw_multi_journal
37 |
38 | - name: fix partitions gpt header or labels of the osd disks
39 | shell: sgdisk --zap-all --clear --mbrtogpt -g -- {{ item.1 }}
40 | with_together:
41 | - osd_partition_status.results
42 | - devices
43 | changed_when: false
44 | when: (journal_collocation or raw_multi_journal) and item.0.rc != 0
45 |
46 | - name: fix partitions gpt header or labels of the journal devices
47 | shell: sgdisk --zap-all --clear --mbrtogpt -g -- {{ item.1 }}
48 | with_together:
49 | - journal_partition_status.results
50 | - raw_journal_devices
51 | changed_when: false
52 | when: raw_multi_journal and item.0.rc != 0
53 |
54 | - name: if partition named 'ceph' exists
55 | shell: "parted --script {{ item }} print | egrep -sq '^ 1.*ceph'"
56 | with_items: devices
57 | changed_when: false
58 | failed_when: false
59 | register: parted
60 |
--------------------------------------------------------------------------------
/roles/ceph-osd/tasks/docker/checks.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: set config and keys paths
3 | set_fact:
4 | ceph_config_keys:
5 | - /etc/ceph/ceph.client.admin.keyring
6 | - /etc/ceph/ceph.conf
7 | - /etc/ceph/monmap
8 | - /etc/ceph/ceph.mon.keyring
9 | - /var/lib/ceph/bootstrap-osd/ceph.keyring
10 | - /var/lib/ceph/bootstrap-rgw/ceph.keyring
11 | - /var/lib/ceph/bootstrap-mds/ceph.keyring
12 |
13 | - name: stat for ceph config and keys
14 | stat:
15 | path: "{{ item }}"
16 | with_items: ceph_config_keys
17 | changed_when: false
18 | failed_when: false
19 | register: statleftover
20 |
21 | - name: fail if we find existing cluster files
22 | fail:
23 | msg: "looks like no cluster is running but ceph files are present, please remove them"
24 | with_together:
25 | - ceph_config_keys
26 | - statleftover.results
27 | when: item.1.stat.exists == true
28 |
--------------------------------------------------------------------------------
/roles/ceph-osd/tasks/docker/fetch_configs.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: set config and keys paths
3 | set_fact:
4 | ceph_config_keys:
5 | - /etc/ceph/ceph.conf
6 | - /var/lib/ceph/bootstrap-osd/ceph.keyring
7 |
8 | - name: stat for ceph config and keys
9 | local_action: stat path={{ item }}
10 | with_items: ceph_config_keys
11 | changed_when: false
12 | sudo: false
13 | failed_when: false
14 | register: statconfig
15 |
16 | - name: try to fetch ceph config and keys
17 | copy:
18 | src: "{{ fetch_directory }}/docker_mon_files/{{ item.0 }}"
19 | dest: "{{ item.0 }}"
20 | owner: root
21 | group: root
22 | mode: 644
23 | changed_when: false
24 | with_together:
25 | - ceph_config_keys
26 | - statconfig.results
27 | when: item.1.stat.exists == true
28 |
--------------------------------------------------------------------------------
/roles/ceph-osd/tasks/docker/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: check if a cluster is already running
3 | shell: "docker ps | grep -sq 'ceph/daemon'"
4 | register: ceph_health
5 | changed_when: false
6 | failed_when: false
7 |
8 | - include: checks.yml
9 | when: ceph_health.rc != 0
10 |
11 | - include: pre_requisite.yml
12 | - include: selinux.yml
13 | when: ansible_os_family == 'RedHat'
14 |
15 | - include: fetch_configs.yml
16 | - include: start_docker_osd.yml
17 |
--------------------------------------------------------------------------------
/roles/ceph-osd/tasks/docker/pre_requisite.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: create osd bootstrap directory
3 | file:
4 | path: "{{ item }}"
5 | state: directory
6 | with_items:
7 | - /etc/ceph/
8 | - /var/lib/ceph/bootstrap-osd
9 |
10 | - name: install pip on debian
11 | apt:
12 | name: pip
13 | state: present
14 | when: ansible_os_family == 'Debian'
15 |
16 | - name: install pip on redhat
17 | yum:
18 | name: python-pip
19 | state: present
20 | when: ansible_os_family == 'RedHat'
21 |
22 | # NOTE (leseb): for version 1.1.0 because https://github.com/ansible/ansible-modules-core/issues/1227
23 | - name: install docker-py
24 | pip:
25 | name: docker-py
26 | version: 1.1.0
27 |
--------------------------------------------------------------------------------
/roles/ceph-osd/tasks/docker/selinux.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: check if selinux is enabled
3 | command: getenforce
4 | register: sestatus
5 | changed_when: false
6 |
7 | - name: set selinux permissions
8 | shell: chcon -Rt svirt_sandbox_file_t {{ item }}
9 | with_items:
10 | - /etc/ceph
11 | - /var/lib/ceph
12 | changed_when: false
13 | when: sestatus.stdout != 'Disabled'
14 |
--------------------------------------------------------------------------------
/roles/ceph-osd/tasks/docker/start_docker_osd.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: run the ceph osd docker image
3 | docker:
4 | image: "{{ ceph_osd_docker_username }}/{{ ceph_osd_docker_imagename }}"
5 | name: "{{ ansible_hostname }}-osd-{{ item | regex_replace('/', '') }}"
6 | net: host
7 | pid: host
8 | state: running
9 | privileged: yes
10 | env: "OSD_DEVICE={{ item }},{{ ceph_osd_docker_extra_env }}"
11 | volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph,/dev/:/dev/"
12 | with_items: ceph_osd_docker_devices
13 |
--------------------------------------------------------------------------------
/roles/ceph-osd/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - include: pre_requisite.yml
4 | when: not osd_containerized_deployment
5 |
6 | - include: ./scenarios/journal_collocation.yml
7 | when: journal_collocation and not osd_containerized_deployment
8 |
9 | - include: ./scenarios/raw_multi_journal.yml
10 | when: raw_multi_journal and not osd_containerized_deployment
11 |
12 | - include: ./scenarios/osd_directory.yml
13 | when: osd_directory and not osd_containerized_deployment
14 |
15 | - include: ./docker/main.yml
16 | when: osd_containerized_deployment
17 |
--------------------------------------------------------------------------------
/roles/ceph-osd/tasks/osd_fragment.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: get osd path
3 | shell: "df | grep {{ item }} | awk '{print $6}'"
4 | with_items: devices
5 | changed_when: false
6 | failed_when: false
7 | register: osd_path
8 |
9 | - name: get osd id
10 | command: cat {{ item.stdout }}/whoami
11 | with_items: osd_path.results
12 | changed_when: false
13 | failed_when: false
14 | register: osd_id
15 |
16 | - name: create a ceph fragment and assemble directory
17 | file:
18 | path: "{{ item }}"
19 | state: directory
20 | owner: root
21 | group: root
22 | mode: 0644
23 | with_items:
24 | - /etc/ceph/ceph.d/
25 | - /etc/ceph/ceph.d/osd_fragments
26 |
27 | - name: create the osd fragment
28 | template:
29 | src: osd.conf.j2
30 | dest: /etc/ceph/ceph.d/osd_fragments/osd.{{ item.stdout }}.conf
31 | with_items: osd_id.results
32 |
33 | - name: copy ceph.conf for assembling
34 | command: cp /etc/ceph/ceph.conf /etc/ceph/ceph.d/
35 | changed_when: false
36 |
37 | - name: assemble osd sections
38 | assemble:
39 | src: /etc/ceph/ceph.d/osd_fragments/
40 | dest: /etc/ceph/ceph.d/osd.conf
41 | owner: root
42 | group: root
43 | mode: 0644
44 |
45 | - name: assemble ceph conf and osd fragments
46 | assemble:
47 | src: /etc/ceph/ceph.d/
48 | dest: /etc/ceph/ceph.conf
49 | owner: root
50 | group: root
51 | mode: 0644
52 |
--------------------------------------------------------------------------------
/roles/ceph-osd/tasks/pre_requisite.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: install dependencies
3 | apt:
4 | pkg: parted
5 | state: present
6 | when: ansible_os_family == 'Debian'
7 |
8 | - name: install dependencies
9 | yum:
10 | name: parted
11 | state: present
12 | when: ansible_os_family == 'RedHat'
13 |
14 | - name: create bootstrap-osd directory (for or after infernalis release)
15 | file:
16 | path: /var/lib/ceph/bootstrap-osd/
17 | state: directory
18 | owner: ceph
19 | group: ceph
20 | mode: 0755
21 | when:
22 | cephx and
23 | (ceph_stable_release != 'dumpling' or
24 | ceph_stable_release != 'emperor' or
25 | ceph_stable_release != 'firefly' or
26 | ceph_stable_release != 'giant' or
27 | ceph_stable_release != 'hammer')
28 |
29 | - name: create bootstrap-osd directory (before infernalis release)
30 | file:
31 | path: /var/lib/ceph/bootstrap-osd/
32 | state: directory
33 | owner: root
34 | group: root
35 | mode: 0755
36 | when:
37 | cephx and
38 | (ceph_stable_release == 'dumpling' or
39 | ceph_stable_release == 'emperor' or
40 | ceph_stable_release == 'firefly' or
41 | ceph_stable_release == 'giant' or
42 | ceph_stable_release == 'hammer')
43 |
44 | - name: copy osd bootstrap key (for or after infernalis release)
45 | copy:
46 | src: "{{ fetch_directory }}/{{ fsid }}/var/lib/ceph/bootstrap-osd/ceph.keyring"
47 | dest: /var/lib/ceph/bootstrap-osd/ceph.keyring
48 | owner: ceph
49 | group: ceph
50 | mode: 0600
51 | when:
52 | cephx and
53 | (ceph_stable_release != 'dumpling' or
54 | ceph_stable_release != 'emperor' or
55 | ceph_stable_release != 'firefly' or
56 | ceph_stable_release != 'giant' or
57 | ceph_stable_release != 'hammer')
58 |
59 | - name: copy osd bootstrap key (before infernalis release)
60 | copy:
61 | src: "{{ fetch_directory }}/{{ fsid }}/var/lib/ceph/bootstrap-osd/ceph.keyring"
62 | dest: /var/lib/ceph/bootstrap-osd/ceph.keyring
63 | owner: root
64 | group: root
65 | mode: 600
66 | when:
67 | cephx and
68 | (ceph_stable_release == 'dumpling' or
69 | ceph_stable_release == 'emperor' or
70 | ceph_stable_release == 'firefly' or
71 | ceph_stable_release == 'giant' or
72 | ceph_stable_release == 'hammer')
73 |
--------------------------------------------------------------------------------
/roles/ceph-osd/tasks/scenarios/journal_collocation.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ## SCENARIO 1: JOURNAL AND OSD_DATA ON THE SAME DEVICE
3 |
4 | - include: ../check_devices.yml
5 |
6 | # NOTE (leseb): the prepare process must be parallelized somehow...
7 | # if you have 64 disks with 4TB each, this will take a while
8 | # since Ansible will sequential process the loop
9 |
10 | # NOTE (alahouze): if the device is a partition, the parted command below has
11 | # failed, this is why we check if the device is a partition too.
12 | - name: automatic prepare osd disk(s) without partitions
13 | command: ceph-disk prepare "/dev/{{ item.key }}"
14 | ignore_errors: true
15 | register: prepared_osds
16 | with_dict: ansible_devices
17 | when:
18 | ansible_devices is defined and
19 | item.value.removable == "0" and
20 | item.value.partitions|count == 0 and
21 | journal_collocation and
22 | osd_auto_discovery
23 |
24 | - name: manually Prepare osd disk(s)
25 | command: "ceph-disk prepare {{ item.2 }}"
26 | ignore_errors: true
27 | with_together:
28 | - parted.results
29 | - ispartition.results
30 | - devices
31 | when:
32 | item.0.rc != 0 and
33 | item.1.rc != 0 and
34 | journal_collocation and not
35 | osd_auto_discovery
36 |
37 | - include: ../activate_osds.yml
38 |
--------------------------------------------------------------------------------
/roles/ceph-osd/tasks/scenarios/osd_directory.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ## SCENARIO 4: USE A DIRECTORY INSTEAD OF A DISK FOR OSD
3 |
4 | # NOTE (leseb): we do not check the filesystem underneath the directory
5 | # so it is really up to you to configure this properly.
6 | # Declaring more than one directory on the same filesystem will confuse Ceph.
7 | - name: create osd directories
8 | file:
9 | path: "{{ item }}"
10 | state: directory
11 | owner: root
12 | group: root
13 | with_items: osd_directories
14 |
15 | # NOTE (leseb): the prepare process must be parallelized somehow...
16 | # if you have 64 disks with 4TB each, this will take a while
17 | # since Ansible will sequential process the loop
18 | - name: prepare OSD disk(s)
19 | command: "ceph-disk prepare {{ item }}"
20 | with_items: osd_directories
21 | changed_when: false
22 | when: osd_directory
23 |
24 | - name: activate OSD(s)
25 | command: "ceph-disk activate {{ item }}"
26 | with_items: osd_directories
27 | changed_when: false
28 |
29 | - name: start and add that the OSD service to the init sequence
30 | service:
31 | name: ceph
32 | state: started
33 | enabled: yes
34 |
--------------------------------------------------------------------------------
/roles/ceph-osd/tasks/scenarios/raw_multi_journal.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ## SCENARIO 3: N JOURNAL DEVICES FOR N OSDS
3 |
4 | - include: ../check_devices.yml
5 |
6 | # NOTE (leseb): the prepare process must be parallelized somehow...
7 | # if you have 64 disks with 4TB each, this will take a while
8 | # since Ansible will sequential process the loop
9 |
10 | # NOTE (alahouze): if the device is a partition, the parted command below has
11 | # failed, this is why we check if the device is a partition too.
12 | - name: prepare osd disk(s)
13 | command: "ceph-disk prepare {{ item.2 }} {{ item.3 }}"
14 | with_together:
15 | - parted.results
16 | - ispartition.results
17 | - devices
18 | - raw_journal_devices
19 | changed_when: false
20 | ignore_errors: true
21 | when:
22 | item.0.rc != 0 and
23 | item.1.rc != 0 and
24 | raw_multi_journal
25 |
26 | - include: ../activate_osds.yml
27 |
--------------------------------------------------------------------------------
/roles/ceph-osd/templates/osd.conf.j2:
--------------------------------------------------------------------------------
1 | [osd.{{ item.stdout }}]
2 | osd crush location = {{ osd_crush_location }}
3 |
--------------------------------------------------------------------------------
/roles/ceph-restapi/README.md:
--------------------------------------------------------------------------------
1 | # Ansible role: Ceph REST API
2 |
3 | This role bootstraps Ceph REST API(s).
4 | It can bootstrap dockerized Ceph REST API(s).
5 |
6 | # Requirements
7 |
8 | Nothing, it runs out of the box.
9 |
10 | # Role variables
11 |
12 | Have a look at: `defaults/main.yml`.
13 |
14 | ## Mandatory variables
15 |
16 | None.
17 |
18 | # Dependencies
19 |
20 | The role `leseb.ceph-common` must be installed.
21 |
22 | # Example Playbook
23 |
24 | ```
25 | - hosts: servers
26 | remote_user: ubuntu
27 | roles:
28 | - { role: leseb.ceph-restapi }
29 | ```
30 |
31 | # Contribution
32 |
33 | **THIS REPOSITORY DOES NOT ACCEPT PULL REQUESTS**
34 | **PULL REQUESTS MUST GO THROUGH [CEPH-ANSIBLE](https://github.com/ceph/ceph-ansible)**
35 |
36 | # License
37 |
38 | Apache
39 |
40 | # Author Information
41 |
42 | This role was created by [Sébastien Han](http://sebastien-han.fr/).
43 |
--------------------------------------------------------------------------------
/roles/ceph-restapi/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | ###########
4 | # GENERAL #
5 | ###########
6 |
7 | fetch_directory: fetch/
8 |
9 | ##########
10 | # DOCKER #
11 | ##########
12 |
13 | restapi_containerized_deployment: false
14 | ceph_restapi_docker_interface: eth0
15 | ceph_restapi_port: 5000
16 | ceph_restapi_docker_username: ceph
17 | ceph_restapi_docker_imagename: daemon
18 | ceph_restapi_docker_extra_env: "RESTAPI_IP=0.0.0.0" # comma separated variables
19 |
--------------------------------------------------------------------------------
/roles/ceph-restapi/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | author: Sébastien Han
4 | description: Installs Ceph Monitors
5 | license: Apache
6 | min_ansible_version: 1.7
7 | platforms:
8 | - name: Ubuntu
9 | versions:
10 | - trusty
11 | categories:
12 | - system
13 | dependencies:
14 | - { role: ceph-common, when: not restapi_containerized_deployment }
15 |
--------------------------------------------------------------------------------
/roles/ceph-restapi/tasks/docker/fetch_configs.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: set config and keys paths
3 | set_fact:
4 | ceph_config_keys:
5 | - /etc/ceph/ceph.conf
6 | - /etc/ceph/ceph.client.admin.keyring
7 |
8 | - name: stat for ceph config and keys
9 | local_action: stat path={{ item }}
10 | with_items: ceph_config_keys
11 | changed_when: false
12 | sudo: false
13 | ignore_errors: true
14 | register: statconfig
15 |
16 | - name: try to fetch ceph config and keys
17 | copy:
18 | src: "{{ fetch_directory }}/docker_mon_files/{{ item.0 }}"
19 | dest: "{{ item.0 }}"
20 | owner: root
21 | group: root
22 | mode: 644
23 | changed_when: false
24 | with_together:
25 | - ceph_config_keys
26 | - statconfig.results
27 | when: item.1.stat.exists == true
28 |
--------------------------------------------------------------------------------
/roles/ceph-restapi/tasks/docker/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - include: pre_requisite.yml
3 | - include: fetch_configs.yml
4 | - include: start_docker_restapi.yml
5 |
--------------------------------------------------------------------------------
/roles/ceph-restapi/tasks/docker/pre_requisite.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: install pip on debian
3 | apt:
4 | name: pip
5 | state: present
6 | when: ansible_os_family == 'Debian'
7 |
8 | - name: install pip on redhat
9 | yum:
10 | name: python-pip
11 | state: present
12 | when: ansible_os_family == 'RedHat'
13 |
14 | # NOTE (leseb): for version 1.1.0 because https://github.com/ansible/ansible-modules-core/issues/1227
15 | - name: install docker-py
16 | pip:
17 | name: docker-py
18 | version: 1.1.0
19 |
--------------------------------------------------------------------------------
/roles/ceph-restapi/tasks/docker/start_docker_restapi.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: run the ceph rest api docker image
3 | docker:
4 | image: "{{ ceph_restapi_docker_username }}/{{ ceph_restapi_docker_imagename }}"
5 | name: "{{ ansible_hostname }}-ceph-restapi"
6 | net: host
7 | expose: "{{ ceph_restapi_port }}"
8 | state: running
9 | env: "RESTAPI_IP={{ hostvars[inventory_hostname]['ansible_' + ceph_restapi_docker_interface]['ipv4']['address'] }},CEPH_DAEMON=RESTAPI,{{ ceph_restapi_docker_extra_env }}"
10 | volumes: "/etc/ceph:/etc/ceph"
11 |
--------------------------------------------------------------------------------
/roles/ceph-restapi/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - include: pre_requisite.yml
3 | when: not restapi_containerized_deployment
4 |
5 | - include: start_restapi.yml
6 | when: not restapi_containerized_deployment
7 |
8 | - include: ./docker/main.yml
9 | when: restapi_containerized_deployment
10 |
--------------------------------------------------------------------------------
/roles/ceph-restapi/tasks/start_restapi.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: check if ceph rest api is already started
3 | shell: "pgrep ceph-rest-api"
4 | changed_when: false
5 | failed_when: false
6 | register: restapi_status
7 |
8 | - name: start ceph rest api
9 | shell: "nohup ceph-rest-api &"
10 | changed_when: false
11 | when: restapi_status.rc != 0
12 |
--------------------------------------------------------------------------------
/roles/ceph-rgw/README.md:
--------------------------------------------------------------------------------
1 | # Ansible role: Ceph Rados Gateway
2 |
3 | This role bootstraps Ceph Rados Gateway(s).
4 | It can bootstrap dockerized Ceph Rados Gateway(s).
5 |
6 | It supports two methods to configure Rados Gateway:
7 |
8 | * with civetweb (default and preferred)
9 | * with Apache and CGI
10 |
11 | It can be configured to support a connection with OpenStack Keystone.
12 |
13 | # Requirements
14 |
15 | Nothing, it runs out of the box.
16 |
17 | # Role variables
18 |
19 | Have a look at: `defaults/main.yml`.
20 |
21 | ## Mandatory variables
22 |
23 | None.
24 |
25 | # Dependencies
26 |
27 | The role `leseb.ceph-common` must be installed.
28 |
29 | # Example Playbook
30 |
31 | ```
32 | - hosts: servers
33 | remote_user: ubuntu
34 | roles:
35 | - { role: leseb.ceph-rgw }
36 | ```
37 |
38 | # Contribution
39 |
40 | **THIS REPOSITORY DOES NOT ACCEPT PULL REQUESTS**
41 | **PULL REQUESTS MUST GO THROUGH [CEPH-ANSIBLE](https://github.com/ceph/ceph-ansible)**
42 |
43 | # License
44 |
45 | Apache
46 |
47 | # Author Information
48 |
49 | This role was created by [Sébastien Han](http://sebastien-han.fr/).
50 |
--------------------------------------------------------------------------------
/roles/ceph-rgw/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # You can override vars by using host or group vars
3 |
4 | ###########
5 | # GENERAL #
6 | ###########
7 |
8 | fetch_directory: fetch/
9 |
10 | ## Ceph options
11 | #
12 | cephx: true
13 |
14 | # Used for the sudo exception while starting the radosgw process
15 | # a new entry /etc/sudoers.d/ceph will be created
16 | # allowing root to not require tty
17 | radosgw_user: root
18 |
19 | ##########
20 | # DOCKER #
21 | ##########
22 |
23 | rgw_containerized_deployment: false
24 | ceph_rgw_civetweb_port: 80
25 | ceph_rgw_docker_username: ceph
26 | ceph_rgw_docker_imagename: daemon
27 | ceph_rgw_docker_extra_env: "RGW_CIVETWEB_PORT={{ ceph_rgw_civetweb_port }}" # comma separated variables
28 |
--------------------------------------------------------------------------------
/roles/ceph-rgw/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | author: Sébastien Han
4 | description: Installs Ceph Rados Gateway
5 | license: Apache
6 | min_ansible_version: 1.7
7 | platforms:
8 | - name: Ubuntu
9 | versions:
10 | - trusty
11 | categories:
12 | - system
13 | dependencies:
14 | - { role: ceph-common, when: not rgw_containerized_deployment }
15 |
--------------------------------------------------------------------------------
/roles/ceph-rgw/tasks/docker/checks.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: set config and keys paths
3 | set_fact:
4 | ceph_config_keys:
5 | - /etc/ceph/ceph.client.admin.keyring
6 | - /etc/ceph/ceph.conf
7 | - /etc/ceph/monmap
8 | - /etc/ceph/ceph.mon.keyring
9 | - /var/lib/ceph/bootstrap-osd/ceph.keyring
10 | - /var/lib/ceph/bootstrap-rgw/ceph.keyring
11 | - /var/lib/ceph/bootstrap-mds/ceph.keyring
12 |
13 | - name: stat for ceph config and keys
14 | stat:
15 | path: "{{ item }}"
16 | with_items: ceph_config_keys
17 | changed_when: false
18 | failed_when: false
19 | register: statleftover
20 |
21 | - name: fail if we find existing cluster files
22 | fail:
23 | msg: "looks like no cluster is running but ceph files are present, please remove them"
24 | with_together:
25 | - ceph_config_keys
26 | - statleftover.results
27 | when: item.1.stat.exists == true
28 |
--------------------------------------------------------------------------------
/roles/ceph-rgw/tasks/docker/fetch_configs.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: set config and keys paths
3 | set_fact:
4 | ceph_config_keys:
5 | - /etc/ceph/ceph.conf
6 | - /var/lib/ceph/bootstrap-rgw/ceph.keyring
7 |
8 | - name: stat for ceph config and keys
9 | local_action: stat path={{ item }}
10 | with_items: ceph_config_keys
11 | changed_when: false
12 | sudo: false
13 | ignore_errors: true
14 | register: statconfig
15 |
16 | - name: try to fetch ceph config and keys
17 | copy:
18 | src: "{{ fetch_directory }}/docker_mon_files/{{ item.0 }}"
19 | dest: "{{ item.0 }}"
20 | owner: root
21 | group: root
22 | mode: 644
23 | changed_when: false
24 | with_together:
25 | - ceph_config_keys
26 | - statconfig.results
27 | when: item.1.stat.exists == true
28 |
--------------------------------------------------------------------------------
/roles/ceph-rgw/tasks/docker/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: check if a cluster is already running
3 | shell: "docker ps | grep -sq 'ceph/daemon'"
4 | register: ceph_health
5 | changed_when: false
6 | failed_when: false
7 |
8 | - include: checks.yml
9 | when: ceph_health.rc != 0
10 |
11 | - include: pre_requisite.yml
12 | - include: selinux.yml
13 | when: ansible_os_family == 'RedHat'
14 |
15 | - include: fetch_configs.yml
16 | - include: start_docker_rgw.yml
17 |
--------------------------------------------------------------------------------
/roles/ceph-rgw/tasks/docker/pre_requisite.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: create rgw bootstrap directory
3 | file:
4 | path: "{{ item }}"
5 | state: directory
6 | with_items:
7 | - /etc/ceph/
8 | - /var/lib/ceph/bootstrap-rgw
9 |
10 | - name: install pip on debian
11 | apt:
12 | name: pip
13 | state: present
14 | when: ansible_os_family == 'Debian'
15 |
16 | - name: install pip on redhat
17 | yum:
18 | name: python-pip
19 | state: present
20 | when: ansible_os_family == 'RedHat'
21 |
22 | # NOTE (leseb): for version 1.1.0 because https://github.com/ansible/ansible-modules-core/issues/1227
23 | - name: install docker-py
24 | pip:
25 | name: docker-py
26 | version: 1.1.0
27 |
--------------------------------------------------------------------------------
/roles/ceph-rgw/tasks/docker/selinux.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: check if selinux is enabled
3 | command: getenforce
4 | register: sestatus
5 | changed_when: false
6 |
7 | - name: set selinux permissions
8 | shell: chcon -Rt svirt_sandbox_file_t {{ item }}
9 | with_items:
10 | - /etc/ceph
11 | - /var/lib/ceph
12 | changed_when: false
13 | when: sestatus.stdout != 'Disabled'
14 |
--------------------------------------------------------------------------------
/roles/ceph-rgw/tasks/docker/start_docker_rgw.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: run the rados gateway docker image
3 | docker:
4 | image: "{{ ceph_rgw_docker_username }}/{{ ceph_rgw_docker_imagename }}"
5 | name: ceph-{{ ansible_hostname }}-rgw
6 | expose=: "{{ ceph_rgw_civetweb_port }}"
7 | ports: "{{ ceph_rgw_civetweb_port }}:{{ ceph_rgw_civetweb_port }}"
8 | state: running
9 | env=: "CEPH_DAEMON=RGW,{{ ceph_rgw_docker_extra_env }}"
10 | volumes: "/var/lib/ceph:/var/lib/ceph,/etc/ceph:/etc/ceph"
11 |
--------------------------------------------------------------------------------
/roles/ceph-rgw/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - include: pre_requisite.yml
3 | when: not rgw_containerized_deployment
4 |
5 | - include: openstack-keystone.yml
6 | when: radosgw_keystone
7 |
8 | - include: start_radosgw.yml
9 | when: not rgw_containerized_deployment
10 |
11 | - include: ./docker/main.yml
12 | when: rgw_containerized_deployment
13 |
--------------------------------------------------------------------------------
/roles/ceph-rgw/tasks/openstack-keystone.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: create nss directory for keystone certificates
3 | file:
4 | path: "{{ radosgw_nss_db_path }}"
5 | state: directory
6 | owner: root
7 | group: root
8 | mode: 0644
9 |
10 | - name: create nss entries for keystone certificates
11 | shell: "{{ item }}"
12 | with_items:
13 | - "openssl x509 -in /etc/keystone/ssl/certs/ca.pem -pubkey |certutil -d {{ radosgw_nss_db_path }} -A -n ca -t 'TCu,Cu,Tuw'"
14 | - "openssl x509 -in /etc/keystone/ssl/certs/signing_cert.pem -pubkey | certutil -A -d {{ radosgw_nss_db_path }} -n signing_cert -t 'P,P,P'"
15 |
--------------------------------------------------------------------------------
/roles/ceph-rgw/tasks/start_radosgw.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: check if rgw is started
3 | command: /etc/init.d/radosgw status
4 | register: rgwstatus
5 | changed_when: false
6 | failed_when: false
7 |
8 | - name: start rgw
9 | command: /etc/init.d/radosgw start
10 | when:
11 | rgwstatus.rc != 0 and
12 | ansible_distribution != "Ubuntu" and
13 | ansible_os_family != 'RedHat'
14 |
15 | - name: activate rgw on ubuntu
16 | command: initctl emit radosgw cluster=ceph id=rgw.{{ ansible_hostname }}
17 | changed_when: false
18 | when: ansible_distribution == 'Ubuntu'
19 |
20 | - name: start rgw on ubuntu
21 | service:
22 | name: radosgw
23 | args: id=rgw.{{ ansible_hostname }}
24 | state: started
25 | when: ansible_distribution == 'Ubuntu'
26 |
27 | - name: start rgw on red hat (before or on infernalis)
28 | service:
29 | name: ceph-radosgw
30 | state: started
31 | enabled: yes
32 | when: ansible_os_family == 'RedHat' and
33 | (ceph_stable_release == 'dumpling' or
34 | ceph_stable_release == 'emperor' or
35 | ceph_stable_release == 'firefly' or
36 | ceph_stable_release == 'giant' or
37 | ceph_stable_release == 'hammer')
38 |
39 | - name: start rgw on red hat (after infernalis)
40 | service:
41 | name: ceph-radosgw@{{ ansible_hostname }}
42 | state: started
43 | enabled: yes
44 | when: ansible_os_family == 'RedHat' and not
45 | (ceph_stable_release == 'dumpling' or
46 | ceph_stable_release == 'emperor' or
47 | ceph_stable_release == 'firefly' or
48 | ceph_stable_release == 'giant' or
49 | ceph_stable_release == 'hammer')
50 |
--------------------------------------------------------------------------------
/roles/ceph-rgw/templates/ceph.j2:
--------------------------------------------------------------------------------
1 | # {{ ansible_managed }}
2 | Defaults:{{ radosgw_user }} !requiretty
3 |
--------------------------------------------------------------------------------
/roles/consul/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | consul_version: "0.6.4"
3 |
--------------------------------------------------------------------------------
/roles/consul/files/consul.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Consul
3 | After=auditd.service systemd-user-sessions.service time-sync.target docker.service
4 |
5 | [Service]
6 | Restart=on-failure
7 | RestartSec=10s
8 | ExecStart=/usr/bin/consul.sh start
9 | ExecStop=/usr/bin/consul.sh stop
10 | KillMode=control-group
11 |
12 | [Install]
13 | WantedBy=multi-user.target
14 |
--------------------------------------------------------------------------------
/roles/consul/tasks/cleanup.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: stop consul
4 | service: name=consul state=stopped
5 |
6 | - name: disable consul
7 | service: name=consul state=disabled
8 |
9 | - name: remove systemd unit
10 | file:
11 | state: absent
12 | src: /etc/systemd/system/consul.service
13 |
14 | - name: cleanup iptables (udp)
15 | shell: iptables -D INPUT -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "contiv_consul udp ({{ item }})"
16 | become: true
17 | with_items:
18 | - 8300
19 | - 8301
20 | - 8500
21 |
22 | - name: cleanup iptables (tcp)
23 | shell: iptables -D INPUT -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "contiv_consul tcp ({{ item }})"
24 | become: true
25 | with_items:
26 | - 8400
27 | - 8300
28 | - 8301
29 | - 8500
30 |
--------------------------------------------------------------------------------
/roles/consul/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: download consul binary {{ consul_version }}
2 | get_url:
3 | validate_certs: "{{ validate_certs }}"
4 | url: "https://releases.hashicorp.com/consul/{{ consul_version }}/consul_{{ consul_version }}_linux_amd64.zip"
5 | dest: "/tmp/consul_{{ consul_version }}_linux_amd64.zip"
6 | tags:
7 | - prebake-for-dev
8 |
9 | - name: install consul {{ consul_version }}
10 | unarchive:
11 | copy: no
12 | src: "/tmp/consul_{{ consul_version }}_linux_amd64.zip"
13 | dest: /usr/bin
14 | tags:
15 | - prebake-for-dev
16 |
17 | - name: install consul run script
18 | template:
19 | src: "consul.j2"
20 | dest: "/usr/bin/consul.sh"
21 | mode: 0755
22 | owner: root
23 |
24 | - name: install consul service
25 | copy: src=consul.service dest=/etc/systemd/system/consul.service
26 |
27 | - name: start consul
28 | service: name=consul state=started
29 |
30 | - name: setup UDP iptables for consul
31 | shell: >
32 | ( iptables -L INPUT | grep -q 'contiv_consul udp ({{ item }})' ) || \
33 | iptables -I INPUT 1 -p udp --dport {{ item }} -j ACCEPT -m comment --comment "contiv_consul udp ({{ item }})"
34 | become: true
35 | with_items:
36 | - 8300
37 | - 8301
38 | - 8500
39 |
40 | - name: setup TCP iptables for consul
41 | shell: >
42 | ( iptables -L INPUT | grep -q 'contiv_consul tcp ({{ item }})' ) || \
43 | iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "contiv_consul tcp ({{ item }})"
44 | become: true
45 | with_items:
46 | - 8400
47 | - 8300
48 | - 8301
49 | - 8500
50 |
--------------------------------------------------------------------------------
/roles/consul/templates/consul.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | extra_args=""
4 |
5 | if [ "{{ node_addr }}" = "{{ consul_leader }}" ]
6 | then
7 | extra_args=" -bootstrap"
8 | else
9 | extra_args=" -join {{ consul_leader }}"
10 | fi
11 |
12 | case "$1" in
13 | start)
14 | docker run --name consul --rm -p 8400:8400 -p 8300:8300/udp -p 8301:8301/udp -p 8500:8500/udp -p 8300:8300 -p 8301:8301 -p 8500:8500 consul:v{{ consul_version }} agent ${extra_args} -server -node=$(hostname) -client=0.0.0.0 -advertise {{ node_addr }}
15 | ;;
16 | stop)
17 | docker rm -f consul
18 | ;;
19 | esac
20 |
--------------------------------------------------------------------------------
/roles/contiv_cluster/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # role variable for the cluster manager service
3 |
4 | collins_image: contiv/collins
5 | collins_image_version: "02_25_2016"
6 | collins_host_port: 9000
7 | collins_guest_port: 9000
8 | clusterm_rule_comment: "contiv_clusterm traffic"
9 | clusterm_client_port: 9007
10 | clusterm_args_file: "clusterm.args"
11 | clusterm_conf_file: "clusterm.conf"
12 |
13 | contiv_cluster_version: "v0.1-09-15-2016.10-33-27.UTC"
14 | contiv_cluster_tar_file: "cluster-{{ contiv_cluster_version }}.tar.bz2"
15 | contiv_cluster_src_file: "https://github.com/contiv/cluster/releases/download/{{ contiv_cluster_version }}/{{ contiv_cluster_tar_file }}"
16 | contiv_cluster_dest_file: "/tmp/{{ contiv_cluster_tar_file }}"
17 |
--------------------------------------------------------------------------------
/roles/contiv_cluster/files/clusterm.args:
--------------------------------------------------------------------------------
1 | CLUSTERM_ARGS="--config=/etc/default/clusterm/clusterm.conf"
2 |
--------------------------------------------------------------------------------
/roles/contiv_cluster/files/clusterm.conf:
--------------------------------------------------------------------------------
1 | {
2 | "comment" : "empty JSON loads a default clusterm configuration. Add configuration here and restart clusterm service to load non-default configuration"
3 | }
4 |
--------------------------------------------------------------------------------
/roles/contiv_cluster/files/collins.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Collins
3 | After=auditd.service systemd-user-sessions.service time-sync.target docker.service
4 |
5 | [Service]
6 | ExecStart=/usr/bin/collins.sh start
7 | ExecStop=/usr/bin/collins.sh stop
8 | KillMode=control-group
9 |
--------------------------------------------------------------------------------
/roles/contiv_cluster/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # The dependecies for cluster-mgr
3 |
4 | dependencies:
5 | - { role: ansible }
6 |
--------------------------------------------------------------------------------
/roles/contiv_cluster/tasks/cleanup.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This play contains tasks for cleaning up contiv_cluster services
3 |
4 | - name: cleanup iptables for clusterm
5 | shell: iptables -D INPUT -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ clusterm_rule_comment }} ({{ item }})"
6 | with_items:
7 | - "{{ clusterm_client_port }}"
8 |
9 | - name: stop clusterm
10 | service: name=clusterm state=stopped
11 |
12 | - name: stop collins
13 | service: name=collins state=stopped
14 |
--------------------------------------------------------------------------------
/roles/contiv_cluster/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This role contains tasks for configuring and starting clusterm service
3 |
4 | - name: copy shell script for starting collins
5 | template: src=collins.j2 dest=/usr/bin/collins.sh mode=u=rwx,g=rx,o=rx
6 | tags:
7 | - prebake-for-dev
8 |
9 | - name: copy systemd units for collins
10 | copy: src=collins.service dest=/etc/systemd/system/collins.service
11 | tags:
12 | - prebake-for-dev
13 |
14 | - name: check for collins image
15 | shell: "docker images | grep {{ collins_image }} | grep -q {{ collins_image_version }}"
16 | ignore_errors: true
17 | register: collins_exists
18 | tags:
19 | - prebake-for-dev
20 |
21 | - name: pull collins container image
22 | shell: "docker pull {{ collins_image }}:{{ collins_image_version }}"
23 | when: not collins_exists|success
24 | tags:
25 | - prebake-for-dev
26 |
27 | - name: start collins
28 | service: name=collins state=started
29 |
30 | - name: download clusterm
31 | get_url:
32 | validate_certs: "{{ validate_certs }}"
33 | url: "{{ contiv_cluster_src_file }}"
34 | dest: "{{ contiv_cluster_dest_file }}"
35 |
36 | - name: install clusterm
37 | shell: tar vxjf {{ contiv_cluster_dest_file }}
38 | args:
39 | chdir: /usr/bin/
40 |
41 | - name: create conf dir for clusterm
42 | file:
43 | name: /etc/default/clusterm/
44 | state: directory
45 |
46 | - name: copy conf files for clusterm
47 | copy:
48 | src: "{{ item.file }}"
49 | dest: "/etc/default/clusterm/{{ item.file }}"
50 | force: "{{ item.force }}"
51 | with_items:
52 | - { file: "{{ clusterm_args_file }}", force: "yes" }
53 | - { file: "{{ clusterm_conf_file }}", force: "no" }
54 |
55 | - name: copy systemd units for clusterm
56 | template: src=clusterm.j2 dest=/etc/systemd/system/clusterm.service
57 |
58 | - name: setup iptables for clusterm
59 | shell: >
60 | ( iptables -L INPUT | grep "{{ clusterm_rule_comment }} ({{ item }})" ) || \
61 | iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ clusterm_rule_comment }} ({{ item }})"
62 | with_items:
63 | - "{{ clusterm_client_port }}"
64 |
65 | - name: start clusterm
66 | service: name=clusterm state=started
67 |
--------------------------------------------------------------------------------
/roles/contiv_cluster/templates/clusterm.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Clusterm
3 | After=auditd.service systemd-user-sessions.service time-sync.target serf.service collins.service
4 |
5 | [Service]
6 | EnvironmentFile=/etc/default/clusterm/{{ clusterm_args_file }}
7 | ExecStart=/usr/bin/clusterm $CLUSTERM_ARGS
8 | Restart=on-failure
9 | RestartSec=10
10 | KillMode=control-group
11 |
--------------------------------------------------------------------------------
/roles/contiv_cluster/templates/collins.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | usage="$0 start"
4 | if [ $# -ne 1 ]; then
5 | echo USAGE: $usage
6 | exit 1
7 | fi
8 |
9 | case $1 in
10 | start)
11 | set -e
12 |
13 | /usr/bin/docker run -t -p {{ collins_host_port }}:{{ collins_guest_port }} \
14 | --name collins {{ collins_image }}:{{ collins_image_version }}
15 | ;;
16 |
17 | stop)
18 | # skipping `set -e` as we shouldn't stop on error
19 | /usr/bin/docker stop collins
20 | /usr/bin/docker rm collins
21 | ;;
22 |
23 | *)
24 | echo USAGE: $usage
25 | exit 1
26 | ;;
27 | esac
28 |
--------------------------------------------------------------------------------
/roles/contiv_network/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | # Role defaults for contiv_network & v2plugin
4 | # NOTE: Role defaults have a lower priority than inventory vars.
5 | # Include variables which need to be overridden by inventory vars here.
6 |
7 | contiv_v2plugin_image: "contiv/v2plugin:{{ contiv_network_version }}"
8 | contiv_v2plugin_tar_filename: "v2plugin-{{ contiv_network_version }}.tar.gz"
9 |
10 | contiv_network_mode: "standalone" # Accepted values: standalone, aci
11 | netplugin_mode: "docker" # Accepted values: docker, kubernetes
12 | fwd_mode: "bridge" #Accepted values: bridge , routing
13 | listen_url: ":9999"
14 | control_url: ":9999"
15 | netctl_url: "http://netmaster:9999"
16 | cluster_store: "etcd://127.0.0.1:2379"
17 | contiv_standalone_binaries: "/var/contiv_cache"
18 | ofnet_master_port: 9001
19 | ofnet_agent_port1: 9002
20 | ofnet_agent_port2: 9003
21 | netmaster_port: 9999
22 | gobgp_grpc_port: 8080
23 | bgp_port: 179
24 | # Since there is a conflict on vxlan port used by Docker overlay (ingress)
25 | # contiv needs to use a different port than the standard 4789 for v2plugin
26 | # vxlan_port: 8472
27 | vxlan_port: 4789
28 | netplugin_rule_comment: "contiv_network traffic"
29 |
30 | aci_gw_image: "contiv/aci-gw"
31 | # contiv tar files to get netplugin binary
32 | contiv_network_version: "1.1.7"
33 | contiv_network_tar_file: "netplugin-{{ contiv_network_version }}.tar.bz2"
34 | contiv_network_src_tar_file: "{{ contiv_standalone_binaries }}/{{ contiv_network_tar_file }}"
35 | contiv_network_src_file: "https://github.com/contiv/netplugin/releases/download/{{ contiv_network_version }}/{{ contiv_network_tar_file }}"
36 | contiv_network_dest_file: "/tmp/{{ contiv_network_tar_file }}"
37 |
38 | apic_epg_bridge_domain: "not_specified"
39 | apic_contracts_unrestricted_mode: "no"
40 |
41 | contiv_network_k8s_config_dir: "/opt/contiv/config/"
42 | contiv_network_local_install: False
43 |
--------------------------------------------------------------------------------
/roles/contiv_network/files/aci-gw.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Contiv ACI gw
3 | After=auditd.service systemd-user-sessions.service time-sync.target docker.service
4 |
5 | [Service]
6 | ExecStart=/usr/bin/aci_gw.sh start
7 | ExecStop=/usr/bin/aci_gw.sh stop
8 | KillMode=control-group
9 | Restart=on-failure
10 | RestartSec=10
11 |
12 | [Install]
13 | WantedBy=multi-user.target
14 |
--------------------------------------------------------------------------------
/roles/contiv_network/files/contiv_cni.conf:
--------------------------------------------------------------------------------
1 | {
2 | "cniVersion": "0.1.0",
3 | "name": "contiv-netplugin",
4 | "type": "contivk8s"
5 | }
6 |
--------------------------------------------------------------------------------
/roles/contiv_network/files/netmaster.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Netmaster
3 | After=auditd.service systemd-user-sessions.service time-sync.target etcd.service
4 |
5 | [Service]
6 | EnvironmentFile=/etc/default/netmaster
7 | ExecStart=/usr/bin/netmaster $NETMASTER_ARGS
8 | KillMode=control-group
9 | Restart=on-failure
10 | RestartSec=10
11 |
12 | [Install]
13 | WantedBy=multi-user.target
14 |
--------------------------------------------------------------------------------
/roles/contiv_network/files/netplugin.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Netplugin
3 | After=auditd.service systemd-user-sessions.service time-sync.target etcd.service
4 |
5 | [Service]
6 | EnvironmentFile=/etc/default/netplugin
7 | ExecStart=/usr/bin/netplugin $NETPLUGIN_ARGS
8 | ExecStopPost=/usr/bin/rm -f /run/docker/plugins/netplugin.sock
9 | KillMode=control-group
10 | Restart=on-failure
11 | RestartSec=10
12 |
13 | [Install]
14 | WantedBy=multi-user.target
15 |
--------------------------------------------------------------------------------
/roles/contiv_network/files/v2plugin.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=enable v2plugin after etcd
3 | After=docker.service etcd.service
4 |
5 | [Service]
6 | Type=oneshot
7 | ExecStart=/usr/bin/v2plugin.sh
8 | StandardOutput=journal
9 |
10 | [Install]
11 | WantedBy=multi-user.target etcd.service
12 |
--------------------------------------------------------------------------------
/roles/contiv_network/files/v2plugin.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Keep trying forever to "docker plugin enable" the contiv plugin
4 |
5 | set -euxo pipefail
6 | while [ true ]
7 | do
8 | ID="$(docker plugin ls | awk '/contiv/ {print $2}')"
9 | STATUS="$(docker plugin ls | awk '{print $8}')"
10 | if [ $STATUS != true ]; then
11 | docker plugin enable $ID || sleep 1
12 | else
13 | break
14 | fi
15 | done
16 |
--------------------------------------------------------------------------------
/roles/contiv_network/tasks/aci_tasks.yml:
--------------------------------------------------------------------------------
1 | # These tasks are run when contiv_network_mode is set to aci
2 |
3 | # Load the aci-gw-image from local tar. Ignore any errors to handle the
4 | # case where the image is not built in
5 | - name: copy aci-gw image
6 | copy: src={{ contiv_standalone_binaries }}/aci-gw-image.tar dest=/tmp/aci-gw-image.tar
7 | when: contiv_network_local_install == True
8 |
9 | - name: load aci-gw image
10 | shell: docker load -i /tmp/aci-gw-image.tar
11 | when: contiv_network_local_install == True
12 |
13 | # For non-local installs pull the aci-gw image. Ignore errors if the image pull times out.
14 | # It will be loaded on docker run
15 | - name: pull aci-gw image
16 | shell: docker pull {{ aci_gw_image }}
17 | when: contiv_network_local_install == False
18 |
19 | - name: copy shell script for starting aci-gw
20 | template: src=aci_gw.j2 dest=/usr/bin/aci_gw.sh mode=u=rwx,g=rx,o=rx
21 |
22 | - name: copy systemd units for aci-gw
23 | copy: src=aci-gw.service dest=/etc/systemd/system/aci-gw.service
24 |
25 | - name: start aci-gw container
26 | systemd: name=aci-gw daemon_reload=yes state=started enabled=yes
27 |
28 | - name: set aci mode
29 | shell: netctl --netmaster {{ netctl_url }} global set --fabric-mode aci
30 | run_once: true
31 |
--------------------------------------------------------------------------------
/roles/contiv_network/tasks/cleanup.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This play contains tasks for cleaning up contiv_network & v2plugin services
3 |
4 | - name: stop netmaster
5 | service: name=netmaster state=stopped
6 |
7 | - name: stop aci-gw container
8 | service: name=aci-gw state=stopped
9 |
10 | - name: stop netplugin
11 | service: name=netplugin state=stopped
12 |
13 | - name: uninstall v2plugin on nodes
14 | shell: >
15 | (/usr/bin/docker plugin disable {{ contiv_v2plugin_image }}) && /usr/bin/docker plugin rm -f {{ contiv_v2plugin_image }}
16 |
17 |
18 | - name: cleanup netmaster host alias
19 | lineinfile:
20 | dest: /etc/hosts
21 | regexp: " netmaster$"
22 | state: absent
23 | become: true
24 |
25 | - name: cleanup iptables for contiv network control plane
26 | shell: iptables -D INPUT -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ netplugin_rule_comment }} ({{ item }})"
27 | become: true
28 | with_items:
29 | - "{{ ofnet_master_port }}"
30 | - "{{ ofnet_agent_port1 }}"
31 | - "{{ ofnet_agent_port2 }}"
32 | - "{{ netmaster_port }}"
33 | - "{{ gobgp_grpc_port }}"
34 | - "{{ bgp_port }}"
35 | - "{{ vxlan_port }}"
36 |
37 | - include: ovs_cleanup.yml
38 | static: no
39 | ignore_errors: yes
40 |
--------------------------------------------------------------------------------
/roles/contiv_network/tasks/k8s_tasks.yml:
--------------------------------------------------------------------------------
1 | # These tasks are run when scheduler is kubernetes
2 |
3 | - name: include kubernetes specific variables
4 | include_vars: roles/kubernetes/{{ item }}/main.yml
5 | with_items:
6 | - defaults
7 |
8 | - name: copy contiv cni bin to k8s plugin dir
9 | copy:
10 | src: "/usr/bin/contiv/netplugin/contivk8s"
11 | dest: "{{ k8s_net_plugin_bin_dir }}/contivk8s"
12 | mode: u=rwx,g=rx,o=rx
13 | remote_src: yes
14 | force: yes
15 | #notify: restart kubernetes
16 |
17 | - name: copy contiv k8s plugin config file
18 | copy:
19 | src: contiv_cni.conf
20 | dest: "{{ k8s_net_plugin_config_dir }}/contiv_cni.conf"
21 | #notify: restart kubernetes
22 |
23 | - name: ensure contiv plugin config dir exists
24 | file:
25 | path: "{{ contiv_network_k8s_config_dir }}"
26 | recurse: yes
27 | state: directory
28 |
29 | - name: setup contiv.json config for the cni plugin
30 | template:
31 | src: netplugin_k8s_config.j2
32 | dest: "{{ contiv_network_k8s_config_dir }}/contiv.json"
33 |
--------------------------------------------------------------------------------
/roles/contiv_network/tasks/ovs_cleanup.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This play contains tasks for cleaning up contiv_network services
3 |
4 | - name: cleanup ovs vlan state
5 | shell: ovs-vsctl del-br contivVlanBridge
6 |
7 | - name: cleanup ovs vxlan state
8 | shell: ovs-vsctl del-br contivVxlanBridge
9 |
10 | - name: cleanup ports
11 | shell: >
12 | set -x; for p in $(ifconfig | grep vport | awk '{print $1}'); do
13 | ip link delete $p type veth;
14 | done
15 | args:
16 | executable: /bin/bash
17 | register: ports
18 |
19 | - debug: var=ports
20 |
21 | - name: deny openvswitch_t type in selinux
22 | shell: >
23 | semanage permissive -d openvswitch_t
24 | become: true
25 |
26 | - name: cleanup iptables for vxlan vtep port
27 | shell: iptables -D INPUT -p udp --dport {{ item }} -j ACCEPT -m comment --comment "{{ netplugin_rule_comment }} ({{ item }})"
28 | become: true
29 | with_items:
30 | - "{{ vxlan_port }}"
31 |
--------------------------------------------------------------------------------
/roles/contiv_network/tasks/services.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This role contains tasks for configuring and starting netmaster and netplugin service
3 |
4 | # Install ovs, needed for our netplugin deployments.
5 | - include: ovs.yml
6 |
7 | - name: for legacy install, extract all binaries
8 | shell: tar vxjf {{ contiv_network_dest_file }}
9 | args:
10 | chdir: /usr/bin/contiv/netplugin
11 |
12 | - name: create links for netplugin binaries
13 | file: src=/usr/bin/contiv/netplugin/{{ item }} dest=/usr/bin/{{ item }} state=link force=yes
14 | with_items:
15 | - netmaster
16 | - netplugin
17 | - contivk8s
18 |
19 | - name: copy environment file for netplugin
20 | template: src=netplugin.j2 dest=/etc/default/netplugin
21 |
22 | - name: copy systemd units for netplugin
23 | copy: src=netplugin.service dest=/etc/systemd/system/netplugin.service
24 |
25 | - name: start netplugin
26 | systemd: name=netplugin daemon_reload=yes state=started enabled=yes
27 |
28 | # XXX: remove this task once the following is resolved: https://github.com/contiv/netplugin/issues/275
29 | - name: setup hostname alias
30 | lineinfile:
31 | dest: /etc/hosts
32 | line: "{{ item.line }}"
33 | regexp: "{{ item.regexp }}"
34 | state: present
35 | with_items:
36 | - { line: '127.0.0.1 localhost', regexp: '^127\.0\.0\.1 .*localhost$' }
37 | - { line: '{{ node_addr }} {{ ansible_hostname }}', regexp: ' {{ ansible_hostname }}$' }
38 |
39 | - name: copy environment file for netmaster
40 | template: src=netmaster.j2 dest=/etc/default/netmaster
41 | when: run_as == "master"
42 |
43 | - name: copy systemd units for netmaster
44 | copy: src=netmaster.service dest=/etc/systemd/system/netmaster.service
45 | when: run_as == "master"
46 |
47 | - name: start netmaster
48 | systemd: name=netmaster daemon_reload=yes state=started enabled=yes
49 | when: run_as == "master"
50 |
--------------------------------------------------------------------------------
/roles/contiv_network/tasks/v2plugin.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This role contains tasks for starting docker plugin service
3 |
4 | - name: for v2 plugin, extract netctl binaries
5 | shell: tar vxjf {{ contiv_network_dest_file }} netctl contrib/completion/bash/netctl
6 | args:
7 | chdir: /usr/bin/contiv/netplugin
8 |
9 | - name: create the directories needed for ovs
10 | file: state=directory path={{ item }}
11 | with_items:
12 | - "/etc/openvswitch"
13 | - "/var/log/openvswitch"
14 |
15 | - name: check if v2plugin has been installed on {{ run_as }} nodes
16 | shell: docker plugin ls | grep -n {{ contiv_v2plugin_image }}
17 | register: v2plugin_installed
18 | changed_when: no
19 | ignore_errors: True
20 |
21 | - name: check if v2plugin archive is in the contiv_cache
22 | local_action: stat path=/var/contiv_cache/{{ contiv_v2plugin_tar_filename }} follow=yes
23 | register: v2plugin_archive_stat
24 |
25 | - name: install v2plugin on {{ run_as }} nodes from dockerhub
26 | shell: >
27 | /usr/bin/docker plugin install --grant-all-permissions {{contiv_v2plugin_image}} ctrl_ip={{node_addr}} control_url={{node_addr}}:{{netmaster_port}} vxlan_port={{vxlan_port}} iflist={{netplugin_if}} plugin_name={{contiv_v2plugin_image}} cluster_store={{cluster_store}} plugin_role={{run_as}} fwd_mode={{ fwd_mode }}
28 | retries: 5
29 | delay: 10
30 | when:
31 | - v2plugin_installed|failed
32 | - not v2plugin_archive_stat.stat.exists
33 |
34 | - name: copy v2plugin.sh file
35 | copy: src=v2plugin.sh dest=/usr/bin/v2plugin.sh mode=u=rwx,g=rx,o=rx
36 |
37 | - name: copy systemd units for v2plugin
38 | copy: src=v2plugin.service dest=/etc/systemd/system/v2plugin.service
39 |
40 | - name: enable v2plugin
41 | systemd: name=v2plugin enabled=yes
42 |
43 | - name: include v2plugin install from contiv_cache
44 | include: v2plugin_local_install.yml
45 | when:
46 | - v2plugin_installed|failed
47 | - v2plugin_archive_stat.stat.exists
48 | static: no
49 |
--------------------------------------------------------------------------------
/roles/contiv_network/tasks/v2plugin_local_install.yml:
--------------------------------------------------------------------------------
1 | - name: Create temp directory for building v2plugin
2 | tempfile: state=directory suffix=-v2plugin
3 | register: v2plugin_temp_dir
4 |
5 | - block:
6 | - name: Allow Docker to read the tmpdir
7 | file: path={{ v2plugin_temp_dir.path }} group=docker mode=750
8 |
9 | - name: Create v2plugin rootfs directory
10 | file: state=directory path={{ v2plugin_temp_dir.path }}/rootfs
11 |
12 | - name: Unpack v2plugin from local archive
13 | unarchive:
14 | src: "/var/contiv_cache/{{ contiv_v2plugin_tar_filename }}"
15 | dest: "{{ v2plugin_temp_dir.path }}/rootfs/"
16 |
17 | - name: Copy config.json for plugin container
18 | copy: src=/var/contiv_cache/config.json dest={{ v2plugin_temp_dir.path }}
19 |
20 | - name: Create v2plugin from exploded archive
21 | shell: >
22 | docker plugin create {{ contiv_v2plugin_image }}
23 | {{ v2plugin_temp_dir.path }}
24 |
25 | - name: Set v2plugin settings
26 | shell: >
27 | docker plugin set {{ contiv_v2plugin_image }}
28 | ctrl_ip={{ node_addr }}
29 | control_url={{ node_addr }}:{{ netmaster_port }}
30 | vxlan_port={{ vxlan_port }}
31 | iflist={{ netplugin_if }}
32 | plugin_name={{ contiv_v2plugin_image }}
33 | cluster_store={{ cluster_store }}
34 | plugin_role={{ run_as }}
35 | fwd_mode={{ fwd_mode }}
36 |
37 | always:
38 | - name: Remove v2plugin rootfs
39 | file: state=absent path={{ v2plugin_temp_dir.path }}
40 |
41 | - name: start v2plugin
42 | systemd: name=v2plugin state=started
43 |
--------------------------------------------------------------------------------
/roles/contiv_network/templates/aci_gw.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | usage="$0 start"
4 | if [ $# -ne 1 ]; then
5 | echo USAGE: $usage
6 | exit 1
7 | fi
8 |
9 | case $1 in
10 | start)
11 | set -e
12 |
13 | /usr/bin/docker run -t --net=host \
14 | -e "APIC_URL={{ apic_url }}" \
15 | -e "APIC_USERNAME={{ apic_username }}" \
16 | -e "APIC_PASSWORD={{ apic_password }}" \
17 | -e "APIC_LEAF_NODE={{ apic_leaf_nodes }}" \
18 | -e "APIC_PHYS_DOMAIN={{ apic_phys_domain }}" \
19 | -e "APIC_EPG_BRIDGE_DOMAIN={{ apic_epg_bridge_domain }}" \
20 | -e "APIC_CONTRACTS_UNRESTRICTED_MODE={{ apic_contracts_unrestricted_mode }}" \
21 | --name=contiv-aci-gw \
22 | {{ aci_gw_image }}
23 | ;;
24 |
25 | stop)
26 | # don't stop on error
27 | /usr/bin/docker stop contiv-aci-gw
28 | /usr/bin/docker rm contiv-aci-gw
29 | ;;
30 |
31 | *)
32 | echo USAGE: $usage
33 | exit 1
34 | ;;
35 | esac
36 |
--------------------------------------------------------------------------------
/roles/contiv_network/templates/netmaster.j2:
--------------------------------------------------------------------------------
1 | NETMASTER_ARGS="--cluster-mode {{netplugin_mode}} -cluster-store {{cluster_store}} -listen-url {{ listen_url }} -control-url {{ control_url }}"
2 |
--------------------------------------------------------------------------------
/roles/contiv_network/templates/netplugin.j2:
--------------------------------------------------------------------------------
1 | NETPLUGIN_ARGS='-plugin-mode {{netplugin_mode}} -vlan-if {{netplugin_if}} -vtep-ip {{node_addr}} -ctrl-ip {{node_addr}} -cluster-store {{cluster_store}} '
2 |
--------------------------------------------------------------------------------
/roles/contiv_network/templates/netplugin_k8s_config.j2:
--------------------------------------------------------------------------------
1 | {% if k8s_auth_token != "" %}
2 | {
3 | "K8S_API_SERVER": "https://{{ service_vip }}:{{ k8s_api_secure_port }}",
4 | "K8S_CA": "{{ k8s_cert_dir }}/{{ k8s_ca_file }}",
5 | "K8S_KEY": "{{ k8s_cert_dir }}/{{ k8s_cert_key }}",
6 | "K8S_CERT": "{{ k8s_cert_dir }}/{{ k8s_cert_file }}",
7 | "K8S_TOKEN": "{{ k8s_auth_token }}"
8 | }
9 | {% else %}
10 | {
11 | "K8S_API_SERVER": "https://{{ service_vip }}:{{ k8s_api_secure_port }}",
12 | "K8S_CA": "{{ k8s_cert_dir }}/{{ k8s_ca_file }}",
13 | "K8S_KEY": "{{ k8s_cert_dir }}/{{ k8s_cert_key }}",
14 | "K8S_CERT": "{{ k8s_cert_dir }}/{{ k8s_cert_file }}"
15 | }
16 | {% endif %}
17 |
--------------------------------------------------------------------------------
/roles/contiv_storage/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | # Role defaults for contiv_storage
4 |
5 | contiv_storage_supervisor_host: ""
6 | contiv_storage_version: "v0.2.0"
7 | contiv_storage_tar_file: "volplugin-{{ contiv_storage_version }}.tar.bz2"
8 | contiv_storage_src_file: "https://github.com/contiv/volplugin/releases/download/{{ contiv_storage_version }}/{{ contiv_storage_tar_file }}"
9 | contiv_storage_dest_file: "/tmp/{{ contiv_storage_tar_file }}"
10 |
--------------------------------------------------------------------------------
/roles/contiv_storage/files/volmaster:
--------------------------------------------------------------------------------
1 | VOLMASTER_ARGS=""
2 |
--------------------------------------------------------------------------------
/roles/contiv_storage/files/volmaster.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Volmaster
3 | After=auditd.service systemd-user-sessions.service time-sync.target etcd.service
4 |
5 | [Service]
6 | EnvironmentFile=/etc/default/volmaster
7 | ExecStart=/usr/bin/volmaster $VOLMASTER_ARGS
8 | KillMode=control-group
9 |
10 | [Install]
11 | WantedBy=multi-user.target
12 |
--------------------------------------------------------------------------------
/roles/contiv_storage/files/volplugin.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Volplugin
3 | After=auditd.service systemd-user-sessions.service time-sync.target etcd.service
4 |
5 | [Service]
6 | EnvironmentFile=/etc/default/volplugin
7 | ExecStart=/usr/bin/volplugin $VOLPLUGIN_ARGS
8 | ExecStopPost=/usr/bin/rm -f /run/docker/plugins/volplugin.sock
9 | KillMode=control-group
10 |
11 | [Install]
12 | WantedBy=multi-user.target
13 |
--------------------------------------------------------------------------------
/roles/contiv_storage/files/volsupervisor:
--------------------------------------------------------------------------------
1 | VOLSUPERVISOR_ARGS=""
2 |
--------------------------------------------------------------------------------
/roles/contiv_storage/files/volsupervisor.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Volsupervisor
3 | After=auditd.service systemd-user-sessions.service time-sync.target etcd.service
4 |
5 | [Service]
6 | EnvironmentFile=/etc/default/volsupervisor
7 | ExecStart=/usr/bin/volsupervisor $VOLSUPERVISOR_ARGS
8 | KillMode=control-group
9 |
10 | [Install]
11 | WantedBy=multi-user.target
12 |
--------------------------------------------------------------------------------
/roles/contiv_storage/meta/main.yml:
--------------------------------------------------------------------------------
1 | dependencies:
2 | - { role: ceph-install, tags: prebake-for-dev }
3 |
--------------------------------------------------------------------------------
/roles/contiv_storage/tasks/cleanup.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This play contains tasks for cleaning up contiv_storage services
3 |
4 | - name: stop volmaster
5 | service: name=volmaster state=stopped
6 |
7 | - name: stop volsupervisor
8 | service: name=volsupervisor state=stopped
9 |
10 | - name: stop volplugin
11 | service: name=volplugin state=stopped
12 |
--------------------------------------------------------------------------------
/roles/contiv_storage/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This role contains tasks for configuring and starting the storage service
3 |
4 | - name: download storage service binaries
5 | get_url:
6 | validate_certs: "{{ validate_certs }}"
7 | url: "{{ contiv_storage_src_file }}"
8 | dest: "{{ contiv_storage_dest_file }}"
9 |
10 | - name: install storage service
11 | shell: tar vxjf {{ contiv_storage_dest_file }}
12 | args:
13 | chdir: /usr/bin/
14 |
15 | - name: copy environment file for volmaster
16 | copy: src=volmaster dest=/etc/default/volmaster
17 | when: run_as == "master"
18 |
19 | - name: copy systemd units for volmaster
20 | copy: src=volmaster.service dest=/etc/systemd/system/volmaster.service
21 | when: run_as == "master"
22 |
23 | - name: start volmaster
24 | service: name=volmaster state=restarted
25 | when: run_as == "master"
26 |
27 | - name: copy environment file for volsupervisor
28 | copy: src=volsupervisor dest=/etc/default/volsupervisor
29 | when: run_as == "master"
30 |
31 | - name: copy systemd units for volsupervisor
32 | copy: src=volsupervisor.service dest=/etc/systemd/system/volsupervisor.service
33 | when: run_as == "master"
34 |
35 | - name: start volsupervisor
36 | service: name=volsupervisor state=restarted
37 | when: run_as == "master" and contiv_storage_supervisor_host != ""
38 | run_once: true
39 | delegate_to: "{{ contiv_storage_supervisor_host }}"
40 |
41 | - name: copy environment file for volplugin
42 | template: src=volplugin.j2 dest=/etc/default/volplugin
43 |
44 | - name: copy systemd units for volplugin
45 | copy: src=volplugin.service dest=/etc/systemd/system/volplugin.service
46 |
47 | - name: start volplugin
48 | service: name=volplugin state=restarted
49 |
--------------------------------------------------------------------------------
/roles/contiv_storage/templates/volplugin.j2:
--------------------------------------------------------------------------------
1 | VOLPLUGIN_ARGS='--master {{ service_vip }}:9005'
2 |
--------------------------------------------------------------------------------
/roles/dev/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # The dependecies contains tasks for installing development packages
3 | # We also use this role to pre-bake packages in the OS image for our development
4 | # environment.
5 | #
6 | # Note:
7 | # - 'dev' role is run as part of devtest host-group which is invoked from 'contiv/build'
8 | # and 'contiv/lab'. See those repos for details on the ansible invocation but the
9 | # most noticable point is that they run the tasks tagged as 'prebake-for-dev'
10 | # - Anything marked with tags 'prebake-for-dev' is installed when 'dev' role is run,
11 | # so all that is needed for pre-baking certain binaries is to mark them with
12 | # 'prebake-for-dev' tag in respective roles.
13 |
14 | dependencies:
15 | - { role: ansible, tags: 'prebake-for-dev' }
16 | - { role: docker }
17 | - { role: etcd }
18 | - { role: consul }
19 | - { role: swarm }
20 | - { role: ucp }
21 | - { role: contiv_network }
22 | - { role: v2plugin }
23 |
--------------------------------------------------------------------------------
/roles/dev/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This role contains tasks for installing development packages
3 | # We also use this role to pre-bake packages in the OS image for our development
4 | # environment.
5 | #
6 | # Note:
7 | # - 'dev' role is run as part of devtest host-group which is invoked from 'contiv/build'
8 | # and 'contiv/lab'. See those repos for details on the ansible invocation but the
9 | # most noticable point is that they run the tasks tagged as 'prebake-for-dev'
10 | # - Anything marked with tags 'prebake-for-dev' is installed when 'dev' role is run,
11 | # so all that is needed for pre-baking certain binaries is to mark them with
12 | # 'prebake-for-dev' tag in respective roles. For third-party roles like ceph we tag
13 | # it explicitly here
14 |
15 |
16 | - include: ubuntu_tasks.yml
17 | when: ansible_os_family == "Debian"
18 | tags:
19 | - prebake-for-dev
20 |
21 | - include: redhat_tasks.yml
22 | when: ansible_os_family == "RedHat"
23 | tags:
24 | - prebake-for-dev
25 |
26 | - include: os_agnostic_tasks.yml
27 | tags:
28 | - prebake-for-dev
29 |
--------------------------------------------------------------------------------
/roles/dev/tasks/os_agnostic_tasks.yml:
--------------------------------------------------------------------------------
1 | - name: Set Go version
2 | set_fact:
3 | go_version: "1.7.6"
4 |
5 | - name: "download Golang {{ go_version }}"
6 | get_url:
7 | validate_certs: "{{ validate_certs }}"
8 | url: "https://storage.googleapis.com/golang/go{{ go_version }}.linux-amd64.tar.gz"
9 | dest: "/tmp/go{{ go_version }}.linux-amd64.tar.gz"
10 |
11 | - name: "install Golang {{ go_version }}"
12 | shell: "rm -rf go/ && tar xfvz /tmp/go{{ go_version }}.linux-amd64.tar.gz"
13 | args:
14 | chdir: /usr/local/
15 |
16 | - name: setup golang environment
17 | copy:
18 | dest: /etc/profile.d/00golang.sh
19 | content: "export PATH=/opt/golang/bin:/usr/local/go/bin:$PATH; export GOPATH=/opt/golang"
20 |
--------------------------------------------------------------------------------
/roles/dev/tasks/redhat_tasks.yml:
--------------------------------------------------------------------------------
1 | - name: install/upgrade base packages (redhat)
2 | yum:
3 | name: "{{ item }}"
4 | update_cache: true
5 | state: latest
6 | with_items:
7 | - vim
8 | - git
9 | - mercurial
10 | - gcc
11 | - perl
12 | - time
13 | - tcpdump
14 |
--------------------------------------------------------------------------------
/roles/dev/tasks/ubuntu_tasks.yml:
--------------------------------------------------------------------------------
1 | - name: install base packages (debian)
2 | apt:
3 | name: "{{ item }}"
4 | state: latest
5 | with_items:
6 | - vim-nox
7 | - git
8 | - mercurial
9 | - build-essential
10 | - perl
11 | - time
12 | - tcpdump
13 |
--------------------------------------------------------------------------------
/roles/docker/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Default values for docker role
3 |
4 | docker_api_port: 2385
5 | docker_version: 1.12.6
6 | docker_compose_version: 1.10.0
7 | docker_rule_comment: "docker api"
8 | docker_device: ""
9 | docker_device_size: "10000MB"
10 | docker_device_metadata_size: "1000MB"
11 | docker_cs_engine_supported_versions_ubuntu:
12 | "wily": [ "1.10", "1.11", "1.12", "1.13" ]
13 | "xenial": [ "1.11", "1.12", "1.13" ]
14 | "Core": [""] # needed for redhat installations due to templating
15 | "Maipo": [""] # needed for redhat installations due to templating
16 | docker_mount_flags: "shared"
17 | docker_reset_container_state: false
18 | docker_reset_image_state: false
19 |
--------------------------------------------------------------------------------
/roles/docker/tasks/cleanup.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This play contains tasks for cleaning up docker
3 |
4 | - name: remove all containers
5 | shell: unset DOCKER_HOST && docker rm -f -v $(docker ps -a -q)
6 | when: docker_reset_container_state == true
7 |
8 | - name: remove all images
9 | shell: unset DOCKER_HOST && docker rmi -f $(docker images -q)
10 | when: docker_reset_image_state == true
11 |
12 | - name: stop docker
13 | service: name=docker state=stopped
14 |
15 | - name: stop docker tcp socket
16 | service: name=docker-tcp.socket state=stopped
17 |
18 | - name: cleanup iptables for docker
19 | shell: iptables -D INPUT -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ docker_rule_comment }} ({{ item }})"
20 | become: true
21 | with_items:
22 | - "{{ docker_api_port }}"
23 |
--------------------------------------------------------------------------------
/roles/docker/tasks/create_docker_device.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: pvcreate check for {{ docker_device }}
4 | shell: "pvdisplay {{ docker_device }}"
5 | register: pvcreated
6 | ignore_errors: true
7 |
8 | - name: pvcreate {{ docker_device }}
9 | shell: "pvcreate {{ docker_device }}"
10 | when: pvcreated|failed
11 |
12 | - name: vgcreate check for {{ docker_device }}
13 | shell: "vgdisplay contiv"
14 | register: vgcreated
15 | ignore_errors: true
16 |
17 | - name: vgcreate contiv
18 | shell: "vgcreate contiv {{ docker_device }}"
19 | when: vgcreated|failed
20 |
21 | - name: lvcreate check for {{ docker_device }}
22 | shell: "lvdisplay contiv | grep -q dockerthin"
23 | register: lvcreated
24 | ignore_errors: true
25 |
26 | - name: lvcreate contiv-dockerthin
27 | shell: lvcreate -n dockerthin -T contiv --size {{ docker_device_size }} --poolmetadatasize {{ docker_device_metadata_size }}
28 | when: lvcreated|failed
29 | register: thin_provisioned
30 |
--------------------------------------------------------------------------------
/roles/docker/tasks/redhat_install_tasks.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This role contains tasks for installing docker service
3 | #
4 |
5 | - name: add docker's public key for CS-engine (redhat) (from sks-keyservers.net)
6 | rpm_key:
7 | key: "https://sks-keyservers.net/pks/lookup?op=get&search=0xee6d536cf7dc86e2d7d56f59a178ac6c6238f52e"
8 | state: present
9 | validate_certs: "{{ validate_certs }}"
10 | register: result
11 | ignore_errors: True
12 |
13 | - name: add docker's public key for CS-engine (redhat) (from pgp.mit.edu)
14 | rpm_key:
15 | key: "https://pgp.mit.edu/pks/lookup?op=get&search=0xee6d536cf7dc86e2d7d56f59a178ac6c6238f52e"
16 | state: present
17 | validate_certs: "{{ validate_certs }}"
18 | when: result|failed
19 |
20 | - name: add docker CS-engine repos (redhat)
21 | shell: yum-config-manager --add-repo https://yum.dockerproject.org/repo/main/centos/7/
22 | become: true
23 |
24 | - name: remove docker (redhat)
25 | yum: name=docker-engine state=absent
26 |
27 | - name: install docker (redhat)
28 | shell: curl https://get.docker.com | sed 's/docker-engine/docker-engine-{{ docker_version }}/' | bash
29 |
--------------------------------------------------------------------------------
/roles/docker/tasks/ubuntu_install_tasks.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This role contains tasks for installing docker service
3 | #
4 |
5 | - name: add docker's public key for CS-engine (debian)
6 | apt_key:
7 | url: "https://sks-keyservers.net/pks/lookup?op=get&search=0xee6d536cf7dc86e2d7d56f59a178ac6c6238f52e"
8 | state: present
9 | validate_certs: "{{ validate_certs }}"
10 |
11 | - name: add docker CS-engine repos (debian)
12 | apt_repository:
13 | repo: "deb https://packages.docker.com/{{ item }}/apt/repo ubuntu-{{ ansible_distribution_release }} main"
14 | state: present
15 | with_items: "{{ docker_cs_engine_supported_versions_ubuntu[ansible_distribution_release] }}"
16 |
17 | - name: install docker (debian)
18 | shell: curl https://get.docker.com | sed 's/docker-engine/--force-yes docker-engine={{ docker_version }}-0~{{ ansible_distribution_release }}/' | bash
19 | when: "{{ docker_version | version_compare('1.12', '<') }}"
20 |
21 | - name: install docker 1.12+ (debian)
22 | shell: curl https://get.docker.com | sed 's/docker-engine/--force-yes docker-engine={{ docker_version }}-0~ubuntu-{{ ansible_distribution_release }}/' | bash
23 | when: "{{ docker_version | version_compare('1.12', '>=') }}"
24 |
25 |
--------------------------------------------------------------------------------
/roles/docker/templates/docker-svc.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Docker Application Container Engine
3 | Documentation=https://docs.docker.com
4 | After=network.target
5 |
6 | [Service]
7 | TimeoutStartSec=5m
8 | Type=notify
9 | {% if docker_device != "" %}
10 | {% set docker_devmapper_opts = "--storage-opt dm.thinpooldev=/dev/mapper/contiv-dockerthin" %}
11 | {% else %}
12 | {% set docker_devmapper_opts = "" %}
13 | {% endif %}
14 |
15 | {% if docker_version | version_compare('1.12', '>=') %}
16 | {% set docker_daemon_startup = "dockerd" %}
17 | {% else %}
18 | {% set docker_daemon_startup = "docker daemon" %}
19 | {% endif %}
20 |
21 | {% set docker_sockets = "-H tcp://0.0.0.0:" + docker_api_port|string + " -H unix:///var/run/docker.sock" %}
22 | {% set cluster_store = "--cluster-store=etcd://localhost:" + etcd_client_port1|string %}
23 |
24 | ExecStart=/usr/bin/{{ docker_daemon_startup }} {{ docker_devmapper_opts }} {{ docker_sockets }} {{ cluster_store }}
25 | MountFlags={{ docker_mount_flags }}
26 | LimitNOFILE=1048576
27 | LimitNPROC=1048576
28 | LimitCORE=infinity
29 |
30 | [Install]
31 | WantedBy=multi-user.target
32 |
--------------------------------------------------------------------------------
/roles/docker/templates/env.conf.j2:
--------------------------------------------------------------------------------
1 | [Service]
2 | {% for key, value in env.iteritems() %}
3 | Environment="{{ key }}={{ value }}"
4 | {% endfor %}
5 |
--------------------------------------------------------------------------------
/roles/docker/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | thin_provisioned: false
3 |
--------------------------------------------------------------------------------
/roles/etcd/README.md:
--------------------------------------------------------------------------------
1 | ### Testing
2 |
3 | Right now just install is under test
4 |
5 | First setup molecule to run
6 | ```
7 | virtualenv venv
8 | . venv/bin/activate
9 | pip install --upgrade pip
10 | pip install -r molecule-requirements.txt
11 | molecule converge
12 | ```
13 |
14 | To shutdown
15 | ```
16 | molecule destroy
17 | ```
18 |
19 | To login to guest
20 | ```
21 | molecule login --host etcd{1,2,3,-proxy}
22 | ```
23 |
24 |
--------------------------------------------------------------------------------
/roles/etcd/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # role variable for the etcd service
3 |
4 | etcd_version: v2.3.8
5 | etcd_cluster_name: contiv
6 |
7 | etcd_proxy: "{{ etcd_proxies_group in groups
8 | and inventory_hostname in groups[etcd_proxies_group] }}"
9 | etcd_member: "{{ etcd_members_group in groups
10 | and inventory_hostname in groups[etcd_members_group] }}"
11 | etcd_service_name: "etcd{{ etcd_proxy|bool|ternary('-proxy','') }}-{{
12 | etcd_cluster_name}}"
13 | etcd_docker_name: "{{ etcd_service_name }}"
14 |
15 | etcd_node_name: "{{ inventory_hostname_short }}"
16 | etcd_client_port1: 2379
17 | etcd_client_port2: 4001
18 |
19 | etcd_peer_port1: 2380
20 |
21 | etcd_members_group: netplugin-master
22 | etcd_proxies_group: netplugin-worker
23 | etcd_initial_cluster_token: "etcd-{{ etcd_cluster_name }}"
24 |
25 | etcd_rule_comment: "{{ etcd_cluster_name }}_etcd traffic"
26 |
27 | etcd_heartbeat_interval: 1000
28 | etcd_election_timeout: 10000
29 |
30 | etcd_data_dir: /var/lib/etcd/data
31 | etcd_wal_dir: /var/lib/etcd/wal
32 |
33 | etcd_reset_state: false
34 |
35 | # Etcd has "advertise" urls for the other nodes to use if it's behind a proxy
36 | # Also, it needs to "listen" on IPs and ports for peers to talk to
37 | etcd_peer_advertise_interface: "{{ ansible_default_ipv4.interface }}"
38 | etcd_peer_advertise_address: "{{
39 | hostvars[inventory_hostname]['ansible_' + etcd_peer_advertise_interface]['ipv4']['address'] }}"
40 | etcd_peer_listen_address: "{{ etcd_peer_advertise_address }}"
41 | etcd_peer_advertise_urls:
42 | - "http://{{ etcd_peer_advertise_address }}:{{ etcd_peer_port1 }}"
43 | etcd_peer_listen_urls:
44 | - "http://{{ etcd_peer_listen_address }}:{{ etcd_peer_port1 }}"
45 |
46 | etcd_client_advertise_address: "{{ etcd_peer_advertise_address }}"
47 | etcd_client_listen_address: 0.0.0.0
48 | etcd_client_advertise_urls:
49 | - "http://{{ etcd_client_advertise_address }}:{{ etcd_client_port1 }}"
50 | - "http://{{ etcd_client_advertise_address }}:{{ etcd_client_port2 }}"
51 | etcd_client_listen_urls:
52 | - "http://{{ etcd_client_listen_address }}:{{ etcd_client_port1 }}"
53 | - "http://{{ etcd_client_listen_address }}:{{ etcd_client_port2 }}"
54 |
55 | etcd_systemd_restart_delay_sec: 2
56 | etcd_systemd_restart: always
57 |
--------------------------------------------------------------------------------
/roles/etcd/molecule-requirements.txt:
--------------------------------------------------------------------------------
1 | ansible==2.3
2 | python-vagrant==0.5.15
3 | molecule==2.3.1
4 |
--------------------------------------------------------------------------------
/roles/etcd/molecule/default/create.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Create
3 | hosts: localhost
4 | connection: local
5 | gather_facts: False
6 | vars:
7 | molecule_file: "{{ lookup('env', 'MOLECULE_FILE') }}"
8 | molecule_instance_config: "{{ lookup('env', 'MOLECULE_INSTANCE_CONFIG') }}"
9 | molecule_yml: "{{ lookup('file', molecule_file) | molecule_from_yaml }}"
10 | tasks:
11 | - name: Create molecule instance(s)
12 | molecule_vagrant:
13 | instance_name: "{{ item.name }}"
14 | instance_interfaces: "{{ item.interfaces | default(omit) }}"
15 | instance_raw_config_args: "{{ item.instance_raw_config_args | default(omit) }}"
16 |
17 | platform_box: "{{ item.box }}"
18 | platform_box_version: "{{ item.box_version | default(omit) }}"
19 | platform_box_url: "{{ item.box_url | default(omit) }}"
20 |
21 | provider_name: "{{ molecule_yml.driver.provider.name }}"
22 | provider_memory: "{{ item.memory | default(omit) }}"
23 | provider_cpus: "{{ item.cpus | default(omit) }}"
24 | provider_raw_config_args: "{{ item.raw_config_args | default(omit) }}"
25 |
26 | state: up
27 | register: server
28 | with_items: "{{ molecule_yml.platforms }}"
29 |
30 | # Mandatory configuration for Molecule to function.
31 |
32 | - name: Populate instance config dict
33 | set_fact:
34 | instance_conf_dict: {
35 | 'instance': "{{ item.Host }}",
36 | 'address': "{{ item.HostName }}",
37 | 'user': "{{ item.User }}",
38 | 'port': "{{ item.Port }}",
39 | 'identity_file': "{{ item.IdentityFile }}", }
40 | with_items: "{{ server.results }}"
41 | register: instance_config_dict
42 | when: server.changed | bool
43 |
44 | - name: Convert instance config dict to a list
45 | set_fact:
46 | instance_conf: "{{ instance_config_dict.results | map(attribute='ansible_facts.instance_conf_dict') | list }}"
47 | when: server.changed | bool
48 |
49 | - name: Dump instance config
50 | copy:
51 | # NOTE(retr0h): Workaround for Ansible 2.2.
52 | # https://github.com/ansible/ansible/issues/20885
53 | content: "{{ instance_conf | to_json | from_json | molecule_to_yaml | molecule_header }}"
54 | dest: "{{ molecule_instance_config }}"
55 | when: server.changed | bool
56 |
--------------------------------------------------------------------------------
/roles/etcd/molecule/default/destroy.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Destroy
3 | hosts: localhost
4 | connection: local
5 | gather_facts: False
6 | no_log: "{{ not lookup('env', 'MOLECULE_DEBUG') | bool }}"
7 | vars:
8 | molecule_file: "{{ lookup('env', 'MOLECULE_FILE') }}"
9 | molecule_instance_config: "{{ lookup('env', 'MOLECULE_INSTANCE_CONFIG') }}"
10 | molecule_yml: "{{ lookup('file', molecule_file) | molecule_from_yaml }}"
11 | tasks:
12 | - name: Destroy molecule instance(s)
13 | molecule_vagrant:
14 | instance_name: "{{ item.name }}"
15 | platform_box: "{{ item.box }}"
16 | provider_name: "{{ molecule_yml.driver.provider.name }}"
17 | force_stop: "{{ item.force_stop | default(True) }}"
18 |
19 | state: destroy
20 | register: server
21 | with_items: "{{ molecule_yml.platforms }}"
22 |
23 | # Mandatory configuration for Molecule to function.
24 |
25 | - name: Populate instance config
26 | set_fact:
27 | instance_conf: {}
28 |
29 | - name: Dump instance config
30 | copy:
31 | # NOTE(retr0h): Workaround for Ansible 2.2.
32 | # https://github.com/ansible/ansible/issues/20885
33 | content: "{{ instance_conf | to_json | from_json | molecule_to_yaml | molecule_header }}"
34 | dest: "{{ molecule_instance_config }}"
35 | when: server.changed | bool
36 |
--------------------------------------------------------------------------------
/roles/etcd/molecule/default/molecule.yml:
--------------------------------------------------------------------------------
1 | ---
2 | dependency:
3 | name: galaxy
4 | driver:
5 | name: vagrant
6 | provider:
7 | name: virtualbox
8 | platforms:
9 | - name: etcd1
10 | box: contiv/centos73
11 | box_version: 0.10.1
12 | provider_cpus: 2
13 | provider_memory: 512
14 | groups:
15 | - netplugin-master
16 | interfaces:
17 | - network_name: private_network
18 | type: static
19 | ip: 192.168.11.3
20 | auto_config: true
21 | - name: etcd2
22 | box: contiv/centos73
23 | box_version: 0.10.1
24 | provider_cpus: 2
25 | provider_memory: 512
26 | groups:
27 | - netplugin-master
28 | interfaces:
29 | - network_name: private_network
30 | type: static
31 | ip: 192.168.11.4
32 | auto_config: true
33 | - name: etcd3
34 | box: contiv/centos73
35 | box_version: 0.10.1
36 | provider_cpus: 2
37 | provider_memory: 512
38 | groups:
39 | - netplugin-master
40 | interfaces:
41 | - network_name: private_network
42 | type: static
43 | ip: 192.168.11.5
44 | auto_config: true
45 | - name: etcd-proxy
46 | box: contiv/centos73
47 | box_version: 0.10.1
48 | provider_cpus: 2
49 | provider_memory: 512
50 | groups:
51 | - netplugin-worker
52 | interfaces:
53 | - network_name: private_network
54 | type: static
55 | ip: 192.168.11.6
56 | auto_config: true
57 | provisioner:
58 | name: ansible
59 | playbooks:
60 | create: create.yml
61 | destroy: destroy.yml
62 |
--------------------------------------------------------------------------------
/roles/etcd/molecule/default/playbook.yml:
--------------------------------------------------------------------------------
1 | - hosts: all
2 | become: yes
3 | vars:
4 | validate_certs: no
5 | etcd_peer_advertise_interface: enp0s8
6 | roles:
7 | - role: etcd
8 |
--------------------------------------------------------------------------------
/roles/etcd/molecule/default/prepare.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Prepare
3 | hosts: all
4 | gather_facts: False
5 | tasks:
6 | - name: Install python for Ansible
7 | raw: test -e /usr/bin/python || (apt -y update && apt install -y python-minimal)
8 | become: True
9 | changed_when: False
10 |
--------------------------------------------------------------------------------
/roles/etcd/tasks/cleanup.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This play contains tasks for cleaning up etcd
3 |
4 | - name: stop etcd
5 | service: name=etcd state=stopped
6 |
7 | - name: cleanup etcd state
8 | file: state=absent path={{ etcd_data_dir }}
9 | when: etcd_cleanup_state == true
10 |
11 | - name: cleanup iptables for etcd
12 | shell: iptables -D INPUT -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ etcd_rule_comment }} ({{ item }})"
13 | become: true
14 | with_items:
15 | - "{{ etcd_client_port1 }}"
16 | - "{{ etcd_client_port2 }}"
17 | - "{{ etcd_peer_port1 }}"
18 | - "{{ etcd_peer_port2 }}"
19 |
--------------------------------------------------------------------------------
/roles/etcd/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This role contains tasks for configuring and starting etcd service
3 |
4 | - block:
5 | - name: download etcdctl {{ etcd_version }}
6 | get_url:
7 | validate_certs: "{{ validate_certs }}"
8 | url: https://github.com/coreos/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-amd64.tar.gz
9 | dest: /tmp/etcd-{{ etcd_version }}-linux-amd64.tar.gz
10 | tags:
11 | - prebake-for-dev
12 |
13 | - name: install etcdctl binaries
14 | shell: >
15 | tar vxzf /tmp/etcd-{{ etcd_version }}-linux-amd64.tar.gz &&
16 | mv etcd-{{ etcd_version }}-linux-amd64/etcd* /usr/bin
17 | tags:
18 | - prebake-for-dev
19 |
20 | - name: pull etcd container {{ etcd_version }}
21 | shell: docker pull quay.io/coreos/etcd:{{ etcd_version }}
22 | retries: 5
23 | delay: 10
24 | tags:
25 | - prebake-for-dev
26 |
27 | - name: setup iptables for etcd
28 | shell: >
29 | ( iptables -L INPUT | grep "{{ etcd_rule_comment }} ({{ item }})" ) || \
30 | iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ etcd_rule_comment }} ({{ item }})"
31 | become: true
32 | with_items:
33 | - "{{ etcd_client_port1 }}"
34 | - "{{ etcd_client_port2 }}"
35 | - "{{ etcd_peer_port1 }}"
36 |
37 | - name: Set facts from role defaults for sharing with other hosts
38 | set_fact:
39 | etcd_node_name: "{{ etcd_node_name }}"
40 | etcd_peer_advertise_urls: "{{ etcd_peer_advertise_urls }}"
41 |
42 | - name: template etcd docker environment file
43 | template:
44 | src: etcd_env_file.j2
45 | dest: "/etc/{{ etcd_service_name }}.env"
46 |
47 | - name: template systemd units for etcd
48 | template:
49 | src: etcd.service.j2
50 | dest: "/etc/systemd/system/{{ etcd_service_name }}.service"
51 |
52 | - name: start etcd server or proxy
53 | systemd:
54 | name: "{{ etcd_service_name }}"
55 | daemon_reload: yes
56 | state: started
57 | enabled: yes
58 | when: etcd_member|bool or etcd_proxy|bool
59 |
--------------------------------------------------------------------------------
/roles/etcd/templates/etcd.service.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Etcd for {{ etcd_cluster_name }}
3 | BindsTo=docker.service
4 | After=auditd.service systemd-user-sessions.service time-sync.target docker.service
5 |
6 | [Service]
7 |
8 | ExecStart=/usr/bin/docker run \
9 | -t --rm --net=host --env-file /etc/{{ etcd_service_name }}.env \
10 | {% if etcd_member %}
11 | -v {{ etcd_data_dir }}:{{ etcd_data_dir }} -v {{ etcd_wal_dir }}:{{ etcd_wal_dir }} \
12 | {% endif %}
13 | --name {{ etcd_docker_name }} \
14 | quay.io/coreos/etcd:v2.3.8
15 |
16 | ExecStop=/usr/bin/docker stop {{ etcd_docker_name }}
17 | ExecStop=/usr/bin/docker rm {{ etcd_docker_name }}
18 |
19 | Restart={{ etcd_systemd_restart }}
20 | RestartSec={{ etcd_systemd_restart_delay_sec }}
21 |
22 | [Install]
23 | WantedBy=multi-user.target docker.service
24 | Alias=etcd.service
25 |
--------------------------------------------------------------------------------
/roles/etcd/templates/etcd_env_file.j2:
--------------------------------------------------------------------------------
1 | {% if etcd_proxy %}
2 | ETCD_PROXY=on
3 | {% elif etcd_member %}
4 | ETCD_NAME={{ etcd_node_name }}
5 | ETCD_DATA_DIR={{ etcd_data_dir }}
6 | ETCD_WAL_DIR={{ etcd_wal_dir }}
7 | ETCD_INITIAL_CLUSTER_TOKEN={{ etcd_initial_cluster_token }}
8 | ETCD_LISTEN_CLIENT_URLS={{ etcd_client_listen_urls|join(",") }}
9 | ETCD_ADVERTISE_CLIENT_URLS={{ etcd_client_advertise_urls|join(",") }}
10 | ETCD_INITIAL_ADVERTISE_PEER_URLS={{ etcd_peer_advertise_urls|join(",") }}
11 | ETCD_LISTEN_PEER_URLS={{ etcd_peer_listen_urls|join(",") }}
12 | ETCD_HEARTBEAT_INTERVAL={{ etcd_heartbeat_interval }}
13 | ETCD_ELECTION_TIMEOUT={{ etcd_election_timeout }}
14 | ETCD_INITIAL_CLUSTER_STATE=new
15 | {% endif %}
16 | ETCD_INITIAL_CLUSTER=
17 | {%- for host in groups[etcd_members_group] %}
18 | {{- hostvars[host]['etcd_node_name'] ~ '=' ~ hostvars[host]['etcd_peer_advertise_urls'][0] -}}
19 | {% if not loop.last -%},{%- endif %}
20 | {% endfor %}
21 |
--------------------------------------------------------------------------------
/roles/gluster/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | gluster_peers_group:
3 | gluster_interface:
4 | install_gluster: false
5 | gluster_device: ""
6 |
--------------------------------------------------------------------------------
/roles/gluster/tasks/install_gluster.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: install glusterfs repository
4 | yum: name=centos-release-gluster state=present
5 |
6 | - name: install glusterfs
7 | yum: name=glusterfs-server state=present
8 |
9 | - name: configure gluster brick device
10 | filesystem:
11 | dev: "{{ gluster_device }}"
12 | fstype: xfs
13 | opts: "-i size=512"
14 |
15 | - name: make mountpoint for gluster brick
16 | file:
17 | name: /data/brick1
18 | state: directory
19 |
20 | - name: mount gluster brick
21 | mount:
22 | fstype: xfs
23 | src: "{{ gluster_device }}"
24 | name: /data/brick1
25 | state: mounted
26 |
27 | ## NOTE: fstab configuration is separate from mounting, according to ansible
28 | ## docs for the mount module.
29 | - name: configure fstab for gluster brick
30 | mount:
31 | fstype: xfs
32 | src: "{{ gluster_device }}"
33 | name: /data/brick1
34 | state: present
35 |
36 | - name: start glusterd
37 | service:
38 | name: glusterd
39 | enabled: true
40 | state: started
41 |
42 | - name: "compute peers for each host"
43 | set_fact:
44 | hosts: |
45 | {%- set peers=[] -%}
46 | {%- for host in groups[gluster_peers_group] -%}
47 | {%- if host != ansible_hostname -%}
48 | {%- if peers.append(hostvars[host]['ansible_' + hostvars[host]['gluster_interface']]['ipv4']['address']) -%}
49 | {%- endif -%}
50 | {%- endif -%}
51 | {%- endfor -%}
52 | {{ peers }}
53 |
54 | - name: "probe peers"
55 | command: "gluster peer probe {{ item }}"
56 | with_items: hosts
57 | when: install_gluster
58 | register: command_result
59 | failed_when: "command_result.rc != 0 and command_result.rc != 1"
60 |
--------------------------------------------------------------------------------
/roles/gluster/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - include: install_gluster.yml
4 | when: install_gluster
5 |
--------------------------------------------------------------------------------
/roles/gluster/templates/peers.sh.j2:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | {%- set peers=[] -%}
4 | {%- for host in groups[etcd_peers_group] -%}
5 | {%- if host != node_name -%}
6 | {%- if peers.append(host) -%}
7 | {%- endif -%}
8 | {%- endif -%}
9 | {%- endfor -%}
10 | PEERS='
11 | {%- if peers -%}
12 | {#- print the peer addr -#}
13 | {{- hostvars[peers[0]]['ansible_' + hostvars[peers[0]]['control_interface']]['ipv4']['address'] -}}
14 | {%- else -%}
15 | {#- print nothing -#}
16 | {%- endif -%}
17 | '
18 |
--------------------------------------------------------------------------------
/roles/haproxy/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # You can override vars by using host or group vars
3 |
4 | rgw_group_name: rgws
5 |
6 | # Rados Gateway options
7 | radosgw_interface: eth1 # the public interface which the radosgw talks to the world with, this variable is used in the haproxy role, this does not need to be set if haproxy is not used.
8 |
--------------------------------------------------------------------------------
/roles/haproxy/files/precise/haproxy:
--------------------------------------------------------------------------------
1 | # Set ENABLED to 1 if you want the init script to start haproxy.
2 | ENABLED=1
3 | # Add extra flags here.
4 | #EXTRAOPTS="-de -m 16"
5 |
--------------------------------------------------------------------------------
/roles/haproxy/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ## Check distribution release
3 | #
4 |
5 | - include: precise.yml
6 | when: ansible_distribution_release == 'precise'
7 |
--------------------------------------------------------------------------------
/roles/haproxy/handlers/precise.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart haproxy
3 | service: >
4 | name=haproxy
5 | state=restarted
6 |
--------------------------------------------------------------------------------
/roles/haproxy/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ## Check distribution release
3 | #
4 |
5 | - include: precise.yml
6 | when: ansible_distribution_release == 'precise'
7 |
--------------------------------------------------------------------------------
/roles/haproxy/tasks/precise.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Add repository
3 | apt_repository: >
4 | repo=ppa:vbernat/haproxy-1.5
5 | state=present
6 |
7 | - name: Install haproxy
8 | apt: >
9 | name={{ item }}
10 | state=present
11 | with_items:
12 | - haproxy
13 | - socat
14 |
15 | - name: Copy default configuration
16 | copy: >
17 | src=precise/haproxy
18 | dest=/etc/default/haproxy
19 | notify: restart haproxy
20 |
21 | - name: Create configuration
22 | template: >
23 | src=precise/haproxy.cfg
24 | dest=/etc/haproxy/haproxy.cfg
25 | backup=yes
26 | notify: restart haproxy
27 |
28 | - name: Start and enable haproxy
29 | service: >
30 | name=haproxy
31 | state=started
32 | enabled=yes
33 |
--------------------------------------------------------------------------------
/roles/haproxy/templates/precise/haproxy.cfg:
--------------------------------------------------------------------------------
1 | #
2 | # {{ ansible_managed }}
3 | #
4 | global
5 | log /dev/log local0
6 | log /dev/log local1 notice
7 | chroot /var/lib/haproxy
8 | user haproxy
9 | group haproxy
10 | daemon
11 | stats socket /var/lib/haproxy/stats level admin
12 |
13 | defaults
14 | log global
15 | mode http
16 | option httplog
17 | option dontlognull
18 | contimeout 5000
19 | clitimeout 50000
20 | srvtimeout 50000
21 | errorfile 400 /etc/haproxy/errors/400.http
22 | errorfile 403 /etc/haproxy/errors/403.http
23 | errorfile 408 /etc/haproxy/errors/408.http
24 | errorfile 500 /etc/haproxy/errors/500.http
25 | errorfile 502 /etc/haproxy/errors/502.http
26 | errorfile 503 /etc/haproxy/errors/503.http
27 | errorfile 504 /etc/haproxy/errors/504.http
28 |
29 | frontend http_frontend
30 | bind *:80
31 | mode http
32 | option httpclose
33 | option forwardfor
34 | default_backend rgw
35 |
36 | frontend https_frontend
37 | bind *:443 ssl crt /etc/ceph/radosgw-key-cert.pem
38 | mode http
39 | option httpclose
40 | option forwardfor
41 | reqadd X-Forwarded-Proto:\ https
42 | default_backend rgw
43 |
44 | backend rgw
45 | mode http
46 | balance roundrobin
47 | cookie RADOSGWLB insert indirect nocache
48 | {% for host in groups[rgw_group_name] %}
49 | server {{ hostvars[host].ansible_hostname }} {{ hostvars[host]['ansible_' + radosgw_interface ].ipv4.address }}:80 check cookie {{ hostvars[host].ansible_hostname }}
50 | {% endfor %}
51 |
52 | listen stats :8080
53 | mode http
54 | stats enable
55 | stats hide-version
56 | stats realm Haproxy\ Statistics
57 | stats uri /
58 | #stats auth Username:Password
59 |
--------------------------------------------------------------------------------
/roles/kubernetes/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # role variable for the kubernetes role
3 | #
4 | k8s_api_insecure_port: 8080
5 | k8s_api_secure_port: 6443
6 | k8s_image: "gcr.io/google_containers/hyperkube"
7 | k8s_version: "v1.3.4"
8 | k8s_rule_comment: "contiv_k8s traffic"
9 | k8s_net_plugin_mode: "cni"
10 | k8s_net_plugin_bin_dir: "/opt/cni/bin"
11 | k8s_config_dir: "/etc/kubernetes"
12 | k8s_cert_dir: "{{ k8s_config_dir }}/certs"
13 | k8s_cert_file: "kubecfg.crt"
14 | k8s_cert_key: "kubecfg.key"
15 | k8s_ca_file: "ca.crt"
16 | k8s_manifests_dir: "{{ k8s_config_dir }}/manifests"
17 | k8s_net_plugin_config_dir: "{{ k8s_config_dir }}/cni/net.d"
18 | k8s_local_cert_dir: "./certs"
19 | k8s_auth_token: ""
20 |
--------------------------------------------------------------------------------
/roles/kubernetes/files/certs/basic_auth.csv:
--------------------------------------------------------------------------------
1 | admin,admin,admin
2 |
--------------------------------------------------------------------------------
/roles/kubernetes/files/certs/ca.crt:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIDUDCCAjigAwIBAgIJAMobcK5f+44qMA0GCSqGSIb3DQEBCwUAMB8xHTAbBgNV
3 | BAMMFDEwLjAuMi4xNUAxNDcwNzc4Njk4MB4XDTE2MDgwOTIxMzgxOFoXDTI2MDgw
4 | NzIxMzgxOFowHzEdMBsGA1UEAwwUMTAuMC4yLjE1QDE0NzA3Nzg2OTgwggEiMA0G
5 | CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYerrSeYaVTeoBJ8M5XEgO1pmAuFTG
6 | Xdnp9uClqH055PQzBCIz566a9hItyKSkPdsRVGMS8zJVePMEuiYnJWUjD1nF2O1f
7 | XXhwKIF8wrk8gYjxcUXTRuQPjbmCzhd2JpyxLZRM0xOMsKzKKTQLDHIl6dC2U/+1
8 | OA/XdrJOi62iU9gloPKFpGvMTu/+GxXpwKhXQzEZNx81LWsPZXosl+EiGTDG6s3w
9 | rKS0NCK9t1v//CPg70uzg7UbAmHTC1z/ZgPaC9JfjN7j9RcicT9X7lBn+N5O2Q0T
10 | vxLrA8bpTHgaX5QELWZMTAbE9D2SUz5At1UXGOUqdI4hSp9yekRAdVV9AgMBAAGj
11 | gY4wgYswHQYDVR0OBBYEFIy7JSMFwrQnuXAZetCJ5qiquQIgME8GA1UdIwRIMEaA
12 | FIy7JSMFwrQnuXAZetCJ5qiquQIgoSOkITAfMR0wGwYDVQQDDBQxMC4wLjIuMTVA
13 | MTQ3MDc3ODY5OIIJAMobcK5f+44qMAwGA1UdEwQFMAMBAf8wCwYDVR0PBAQDAgEG
14 | MA0GCSqGSIb3DQEBCwUAA4IBAQBYPHj6WGG4THhXsnHQScRRT8RmX71h2aNYHphf
15 | ORw2rr8Lvd2/vt5ae1EwyqWcFazd8wN2+WALEX/rs19PE2c0A1bgpKGQXKDRw8TF
16 | 5aC3fuuo7o4arI8DG9b9cj5IZdXGunvCOjEoZw43a4KWLYFoepAulcLcx3BEnLtI
17 | /rpoocD7Ko7OGPrhNpbBUlg4KQ3lhvOLkYylCPCqB2OgS5rAKkuQCUk+XFa+xPSL
18 | 912w3qY0+fV8RBGyuFmhy0nI4eMus8vAZOlMLd9lvl3eaUzfwDHspVdPH36ceEai
19 | WMESxBh3MDkrHyRa2CDC3tBLCLwrQrwzo5Tlwa5kpHM9C6nO
20 | -----END CERTIFICATE-----
21 |
--------------------------------------------------------------------------------
/roles/kubernetes/files/certs/known_tokens.csv:
--------------------------------------------------------------------------------
1 | XZ1vYosZm3QVI8qxTHDhDxvPrJSQ9LOf,admin,admin
2 | LNCGKd2V1J9rfKV2Rvjh4KROJyil4w0h,kubelet,kubelet
3 | QJK1gy7UWGGRRenOotvxDauzMZINaZ2K,kube_proxy,kube_proxy
4 |
--------------------------------------------------------------------------------
/roles/kubernetes/files/certs/kubecfg.key:
--------------------------------------------------------------------------------
1 | -----BEGIN PRIVATE KEY-----
2 | MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDGId3VTK1RIT4D
3 | o3gJHrlmEjE4djIlzsfHzXtF+QgCGkuvoC8bgTmfwdwZ6xEtvD0HGNOo9W4fsu+z
4 | HvrKbNamj/GVWBbOa4kb0pWZ4xd5iLDufre/HptcbPwRnpktAEt/CBe3n/zZIBKB
5 | UWoCiU/OoYatQi6sA5a2ZI/NiXBAmgEJm05KECbxDBF9RVvKA0+3xsw7PoHzSs/k
6 | KYliyZ+J94y+fn/3A7o/I7ucHt47qVylQTS6Fi1jwvkogW5aJMlwH3wXnhhlMUYt
7 | N7ka30E7L/4hTdH9fU1hER5v9i4IDn8UH2xXUSyMrhDOhSYddnultWwCwvjt7/VK
8 | MNFV7rflAgMBAAECggEAaPT1y5Fz2p9ud4T4CHTDJ8QJWOycAhU5FanPXLDgQiTl
9 | 5SJI8hwRh4StcPlqPz0RYxoOvdlNvfMPMWb+2Y6wYsmr4z4nzq9+kUOLxlpbTCFS
10 | APUVp2OyT1LYcQkAAau/UorZ7YcN7YMk6mZTbwaIRWChtFRz3uGDzBkXaH0e/sjn
11 | lkjduI2aupQf9CQ6ZYL+Mji/v93pdrMkAR6aLXAfW+dmp7s6RQwBEW9kmATLwgEV
12 | 4Iq1LxFt3qFiqrd1tgDJBggNyPQv+ujsYxHvdbx/9Mu6lP3/3/v8tgKG8TjUG4LB
13 | /oXbcaXGPvC1ba96fQNTMeOazHfOMQR/Wse3IwAQ4QKBgQDjhm6qV/oXl7KRVf6Q
14 | OA+/tJq6o1j8Yo1bJweHYi6Elr3zZoem4Q9VV8NfGxkHl0vQpDEQKbCstI4ZnUfX
15 | hqKuO1Rif4oEKpnTj6JLhU/H/TNq9wUjSQT/Te2upFctlsuHR3pIu6qhB931+igT
16 | Kzw/jHnjTtw5nsvGGP9h4mkjXQKBgQDe7bs8radlysEIopixd+lJFMgMmqKBVeva
17 | 1gXOzDKCA9j5e9Eq1LVTAu/KgUNlwaBRigdzdYsngP/vCSIuZLvUO9PW474Iil7h
18 | WX9+gJyI+dMUP1yUBroXAaVQT4b7g5+lm5SGoslyTVjBX631BtEaeNJvm9m/5n49
19 | OQBmQ/VmKQKBgC4YBQFzk1IGRIHXFxxmjyI6V4JqJ1PyL0sBLDBMPrpTw6W05tRz
20 | EeUrQ4RX/AkiqJRbPylUGNvUNFmSGITcXMbJdD4EMpTHgKRwm+OK3W/ZpvoLkznj
21 | yCRkyDsyRgsRWqhMXcSwaE/5RQlZJZdEkg8ZyXO4fTsbJZM7azmMYdptAoGAfcCr
22 | E0J+IGysUVuvOMUEosYHRxzfuIeoYSzD3/DOOQtElQ9kYX3Vvzrz207pnvgHoJd1
23 | NotC8QjS1ptXXcZTMhiBmkGkDBtKBGLBCbIxGw+aI1zOs23MAXhbMZ967FxGbVwC
24 | /6PKcSDyhTqc3ZB32i/3vR+0SI0T/yMtd4K0ZmkCgYA7AWw9Z6y87j13DLn/OwY3
25 | mrOEIpjCE9LmoaVcaoM63deSIEdI8kE3sWOXueLJDeSGigEZPiia7eloIaTpF4Ht
26 | V85AsXGCnTPk1bl5+qG8JFBL0vaZdwvY3h48MlDc04SokkZeWITij4IYyETngEWf
27 | NLUK025oxOl5wYFyyOyZXA==
28 | -----END PRIVATE KEY-----
29 |
--------------------------------------------------------------------------------
/roles/kubernetes/files/certs/server.key:
--------------------------------------------------------------------------------
1 | -----BEGIN PRIVATE KEY-----
2 | MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDqgWNkcPx4G1H5
3 | A2AmaQHPfnly6e4K6UXuYMwVpArLBpj4RiHgt3XOG3thf1xh18o1oeLD++f2+LXH
4 | N+WGHQgdhl+SaKR3QDx2PyXonmcDveaMlzV70p72RrvvJYFE9Oy5FAonwzv5xlFS
5 | 80R7PJpohSYihb7rSb6hlVzUXQ+Imbpwh3qbhQX2Ieuo82/ymUq3cXzgYeAjVpSM
6 | W4HTV3sah/M1j4to59XbxFs73BZo5q4/g8K41z8Kn0Vq+5Swy7ReEzvJV6D3bqCO
7 | q+QCAdoGl6zd+PYQRLG5mX6LBfeT0oXwFjSSlaKFmq8dcFAXfNtOe9JxLCh/AWJI
8 | PjHtdXeLAgMBAAECggEAB542bV/iQ8CXdCBw8i59GZGBPoNs8TvMwsawOo++rgPD
9 | 2mTk2tj64bww0OUDSkZpUj61ECrRyUshFJyzPx0YU2U3G5l8pkcW77xzbdAfhpaX
10 | s7+BxEj+Uv/PQSCVx3ixa/3DqrED9MRFhg1TDzHTzkttBrgTkK8vmy4v+tIfHA/a
11 | emB7HelZ/Q9LLRYniko/onnbiMYf+1lqccVCwbz50r9xmxf87MD322BA+NEz3Vkl
12 | sGrw0bf2bloVpWLoVLWNqa5pq7JTqGe2C2IJtn+uf+L/rCL6DRbYnzSyuWSx9Mlk
13 | nakiM31TPtvhuZm8dXDUcyvrvwowalRe2ITAZC26qQKBgQD8uJgTTvH92ifCPe5y
14 | 5JHiEkqy3a9wHEWbBvEgBdkdPG1iSPbUXnf1yRbcoClguthePxxx4dUze7zPt+y+
15 | GOq62XYEjC3ySbnGdrO7Jz9MMLnUjjTG+y7ATJ6JOQXvcRaQlL8wwdWNPSHWBgHH
16 | ppBdk8g1/37PcDWkJmhkoFVhBwKBgQDtjEqcCgXgf93BezZ0KdSsvIo62A0hkkLm
17 | ntKSBVGwRKPXrvXIJYE8vWlGNIHtxIpHowSv37EaT3QyL5B+3G6T4iex3/lNMVRc
18 | quXe9cR6aGJoOkjpNmfdN+oDIyy4ed6KVTzRenjPZig20vUUQ46X8T93DYV+B9jy
19 | z/9MDA8IXQKBgQDaAQr+rP1HdGcTwoTti4rkr87MWTu7xEevId/jNehCthvNyw9R
20 | x2RIG/2iuG65SLTZKCwLeTqHPNh1fz20Bp6GLI0IAzynYz3knAUTDxolSWsklr+k
21 | Xo3HyNCBozUhhjZm5km/b5YeTnvfujLS3TZv4sm0+v+TRkKfpZosI7tDBQKBgDyv
22 | 9bOtn9i98gJnHv9IXIwQ0uO7pIcqHatbcgJaF4eH0a/dg+8xHbZN0aJc8cCz0HwC
23 | usTPyiglmlLj8e9nE1z2jxuTU9aTvrC5/GYctahsGI/2LABgpOJQUZmhl+A9Skxf
24 | AF2oA0tBFw8CkBgwNjK9MHCS5MLQ8G0AFIHyJrkFAoGARAiOhaY20lA0WBI1wHQ8
25 | mW1k0EiwuLggkzb9w8B200XGS4mkcIqgrySnTP8jwNH5vzh5AJRsCdNB089LT05B
26 | 50JLipAbSNhgBI7yEDWa262VBkXEihpEFaUj+mT+4/uy95mk/PmCLQrLREAxaReZ
27 | wkw4ZScbklzfhsLUTcMLwPI=
28 | -----END PRIVATE KEY-----
29 |
--------------------------------------------------------------------------------
/roles/kubernetes/files/kubernetes.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Kubernetes
3 | After=auditd.service systemd-user-sessions.service time-sync.target etcd.service
4 |
5 | [Service]
6 | ExecStart=/usr/bin/kubernetes.sh start
7 | ExecStop=/usr/bin/kubernetes.sh stop
8 | KillMode=control-group
9 |
10 | [Install]
11 | WantedBy=multi-user.target
12 |
--------------------------------------------------------------------------------
/roles/kubernetes/handlers/main.yml:
--------------------------------------------------------------------------------
1 | # notify handlers for kubernetes
2 |
3 | - name: restart kubernetes
4 | service: name=kubernetes state=restarted
5 |
--------------------------------------------------------------------------------
/roles/kubernetes/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # The dependecies contains tasks for installing kubernetes scheduler stack
3 |
4 | dependencies:
5 | - { role: etcd }
6 |
--------------------------------------------------------------------------------
/roles/kubernetes/tasks/cleanup.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This play contains tasks for cleaning up kubernetes
3 |
4 | - name: stop kubernetes
5 | service: name=kubernetes state=stopped
6 |
7 | - name: cleanup iptables for kubernetes
8 | shell: iptables -D INPUT -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ k8s_rule_comment }} ({{ item }})"
9 | with_items:
10 | - "{{ k8s_api_insecure_port }}"
11 | - "{{ k8s_api_secure_port }}"
12 |
--------------------------------------------------------------------------------
/roles/kubernetes/templates/worker_manifest.j2:
--------------------------------------------------------------------------------
1 | {
2 | "apiVersion": "v1",
3 | "kind": "Pod",
4 | "metadata": {
5 | "name": "k8s-worker",
6 | "namespace": "kube-system"
7 | },
8 | "spec":{
9 | "hostNetwork": true,
10 | "containers":[
11 | {
12 | "name": "kube-proxy",
13 | "image": "{{ k8s_image }}:{{ k8s_version }}",
14 | "command": [
15 | "/hyperkube",
16 | "proxy",
17 | "--master=http://127.0.0.1:{{ k8s_api_insecure_port }}",
18 | "--v=2",
19 | "--resource-container=\"\""
20 | ],
21 | "securityContext": {
22 | "privileged": true
23 | }
24 | }
25 | ],
26 | "volumes": [
27 | {
28 | "name": "data",
29 | "hostPath": { "path": "{{ k8s_cert_dir }}" }
30 | }
31 | ]
32 |
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/roles/nfs/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | use_nfs_server: false
3 |
--------------------------------------------------------------------------------
/roles/nfs/tasks/cleanup.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: stop nfs services (redhat)
3 | service: "name={{item}} state=stopped"
4 | with_items:
5 | - rpcbind
6 | - nfs-server
7 | - rpc-statd
8 | - nfs-idmapd
9 |
10 | - name: stop nfs services (debian)
11 | service: "name={{item}} state=stopped"
12 | with_items:
13 | - nfs-kernel-server
14 | - nfs-common
15 |
--------------------------------------------------------------------------------
/roles/nfs/tasks/client.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: install nfs client (redhat)
3 | yum: "update_cache=yes name={{ item }}"
4 | with_items:
5 | - nfs-utils
6 | - libnfsidmap
7 | when: ansible_os_family == "RedHat"
8 |
9 | - name: install nfs client (debian)
10 | apt: "update_cache=yes name={{ item }}"
11 | with_items:
12 | - nfs-common
13 | - rpcbind
14 | when: ansible_os_family == "Debian"
15 |
16 | - name: enable nfs client services (redhat)
17 | service: "name={{item}} enabled=yes state=started"
18 | when: ansible_os_family == "RedHat"
19 | with_items:
20 | - rpcbind
21 |
--------------------------------------------------------------------------------
/roles/nfs/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - include: ./server.yml
3 | when: use_nfs_server
4 |
5 | - include: ./client.yml
6 | when: not use_nfs_server
7 |
--------------------------------------------------------------------------------
/roles/nfs/tasks/server.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: install nfs server (redhat)
3 | yum: "update_cache=yes name={{ item }}"
4 | with_items:
5 | - nfs-utils
6 | - libnfsidmap
7 | when: ansible_os_family == "RedHat"
8 |
9 | - name: install nfs server (debian)
10 | apt: "update_cache=yes name={{ item }}"
11 | with_items:
12 | - nfs-kernel-server
13 | - rpcbind
14 | when: ansible_os_family == "Debian"
15 |
16 | - name: enable nfs server services (redhat)
17 | service: "name={{item}} enabled=yes"
18 | when: ansible_os_family == "RedHat"
19 | with_items:
20 | - rpcbind
21 | - nfs-server
22 |
23 | - name: start nfs server services (redhat)
24 | service: "name={{item}} state=started"
25 | when: ansible_os_family == "RedHat"
26 | with_items:
27 | - rpcbind
28 | - nfs-server
29 | - rpc-statd
30 | - nfs-idmapd
31 |
32 | - name: ensure nfs service is running (debian)
33 | service: "name={{item}} enabled=yes state=started"
34 | when: ansible_os_family == "Debian"
35 | with_items:
36 | - nfs-kernel-server
37 |
--------------------------------------------------------------------------------
/roles/scheduler_stack/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Role defaults for scheduler stack
3 |
4 | scheduler_provider: "native-swarm" # Accepted values: native-swarm, ucp-swarm, kubernetes
5 |
--------------------------------------------------------------------------------
/roles/scheduler_stack/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This role contains tasks for configuring and starting the scheduler stacks
3 | # like native-swarm, ucp-swarm, k8s, mesos etc
4 |
5 | dependencies:
6 | - { role: swarm, when: scheduler_provider == "native-swarm" }
7 | - { role: kubernetes, when: scheduler_provider == "kubernetes" }
8 | - { role: ucp, when: scheduler_provider == "ucp-swarm" }
9 |
--------------------------------------------------------------------------------
/roles/serf/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # role variable for the serf service
3 |
4 | serf_discovery_interface: "{{ control_interface }}"
5 | serf_cluster_name: "mycluster"
6 | serf_node_label: "{{ ansible_hostname }}"
7 |
--------------------------------------------------------------------------------
/roles/serf/files/serf.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Serf
3 | After=network.target auditd.service systemd-user-sessions.service time-sync.target
4 |
5 | [Service]
6 | ExecStart=/usr/bin/serf.sh start
7 | ExecStop=/usr/bin/serf.sh stop
8 | Restart=on-failure
9 | RestartSec=10
10 | KillMode=control-group
11 |
12 | [Install]
13 | WantedBy=multi-user.target
14 |
--------------------------------------------------------------------------------
/roles/serf/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This role contains tasks for configuring and starting serf service
3 |
4 | - name: install lshw (debian)
5 | apt:
6 | name: "{{ item }}"
7 | state: latest
8 | with_items:
9 | - lshw
10 | when: ansible_os_family == "Debian"
11 |
12 | - name: install/upgrade base packages (redhat)
13 | yum:
14 | name: "{{ item }}"
15 | update_cache: true
16 | state: latest
17 | with_items:
18 | - lshw
19 | when: ansible_os_family == "RedHat"
20 |
21 | - name: download serf binary
22 | get_url:
23 | validate_certs: "{{ validate_certs }}"
24 | url: https://releases.hashicorp.com/serf/0.6.4/serf_0.6.4_linux_amd64.zip
25 | dest: /tmp/serf_0.6.4_linux_amd64.zip
26 |
27 | - name: install serf
28 | unarchive:
29 | copy: no
30 | src: /tmp/serf_0.6.4_linux_amd64.zip
31 | dest: /usr/bin
32 |
33 | - name: copy the serf start/stop script
34 | template: src=serf.j2 dest=/usr/bin/serf.sh mode=u=rwx,g=rx,o=rx
35 |
36 | - name: copy systemd units for serf
37 | copy: src=serf.service dest=/etc/systemd/system/serf.service
38 |
39 | - name: enable serf to be started on boot-up and start it as well
40 | service: name=serf state=started enabled=yes
41 |
--------------------------------------------------------------------------------
/roles/serf/templates/serf.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | usage="$0 start"
4 | if [ $# -ne 1 ]; then
5 | echo USAGE: $usage
6 | exit 1
7 | fi
8 |
9 | {% set mdns_sport_comment="'contiv_serf discovery sport'" -%}
10 | {%- set mdns_sport_rule="-p udp --sport 5353 -i " +
11 | serf_discovery_interface +
12 | " -j ACCEPT -m comment --comment " +
13 | mdns_sport_comment -%}
14 | {%- set mdns_dport_comment="'contiv_serf discovery dport'" -%}
15 | {%- set mdns_dport_rule="-p udp --dport 5353 -i " +
16 | serf_discovery_interface +
17 | " -j ACCEPT -m comment --comment " +
18 | mdns_dport_comment -%}
19 | {%- set serf_tcp_comment="'contiv_serf control'" -%}
20 | {%- set serf_tcp_rule="-p tcp --dport 7946 -i " +
21 | serf_discovery_interface +
22 | " -j ACCEPT -m comment --comment " +
23 | serf_tcp_comment -%}
24 |
25 | case $1 in
26 | start)
27 | # fail on error
28 | set -e
29 |
30 | # install necessary iptables to let serf work
31 | echo setting up iptables for serf
32 | ( /sbin/iptables -L INPUT | grep {{ mdns_sport_comment }} || \
33 | /sbin/iptables -I INPUT 1 {{ mdns_sport_rule }} )
34 | ( /sbin/iptables -L INPUT | grep {{ mdns_dport_comment }} || \
35 | /sbin/iptables -I INPUT 1 {{ mdns_dport_rule }} )
36 | ( /sbin/iptables -L INPUT | grep {{ serf_tcp_comment }} || \
37 | /sbin/iptables -I INPUT 1 {{ serf_tcp_rule }} )
38 |
39 | echo starting serf
40 | label={{ serf_node_label }}
41 | serial=$(lshw -c system | grep serial | awk '{print $2}')
42 | addr=$(ip addr list dev {{ serf_discovery_interface }} | \
43 | grep inet | grep {{ serf_discovery_interface }} | \
44 | awk '{split ($2, a , "/"); print a[1]}')
45 | if [[ "$addr" == "" ]]; then
46 | echo {{ serf_discovery_interface }} is not assigned a valid addr: ***$addr***
47 | exit 1
48 | fi
49 |
50 | # start serf
51 | /usr/bin/serf agent -node="$label-$serial" -discover {{ serf_cluster_name }} -iface {{ serf_discovery_interface }} \
52 | -tag NodeLabel=$label \
53 | -tag NodeSerial=$serial \
54 | -tag NodeAddr=$addr
55 | ;;
56 |
57 | stop)
58 | # cleanup iptables
59 | /sbin/iptables -D INPUT {{ mdns_sport_rule }}
60 | /sbin/iptables -D INPUT {{ mdns_dport_rule }}
61 | /sbin/iptables -D INPUT {{ serf_tcp_rule }}
62 | ;;
63 |
64 | *)
65 | echo USAGE: $usage
66 | exit 1
67 | ;;
68 | esac
69 |
--------------------------------------------------------------------------------
/roles/swarm/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # role variable for the swarm service
3 | #
4 | swarm_api_port: 2375
5 | swarm_version: "1.2.5"
6 | swarm_rule_comment: "contiv_swarm traffic"
7 | swarm_strategy: spread
8 | swarm_etcd_url: "{{ node_addr if run_as == 'master' else '127.0.0.1' }}"
9 |
--------------------------------------------------------------------------------
/roles/swarm/files/swarm.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Swarm
3 | After=auditd.service systemd-user-sessions.service time-sync.target etcd.service
4 |
5 | [Service]
6 | ExecStart=/usr/bin/swarm.sh start
7 | ExecStop=/usr/bin/swarm.sh stop
8 | KillMode=control-group
9 | Restart=on-failure
10 | RestartSec=10
11 |
12 | [Install]
13 | WantedBy=multi-user.target
14 |
--------------------------------------------------------------------------------
/roles/swarm/tasks/cleanup.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This play contains tasks for cleaning up swarm
3 |
4 | - name: stop swarm
5 | service: name=swarm state=stopped
6 |
7 | - name: cleanup iptables for swarm
8 | shell: iptables -D INPUT -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ swarm_rule_comment }} ({{ item }})"
9 | become: true
10 | with_items:
11 | - "{{ swarm_api_port }}"
12 |
--------------------------------------------------------------------------------
/roles/swarm/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This role contains tasks for configuring and starting swarm service
3 | - name: check for swarm image
4 | shell: "docker images | grep swarm | grep -q {{ swarm_version }}"
5 | ignore_errors: true
6 | register: swarm_exists
7 | tags:
8 | - prebake-for-dev
9 |
10 | - name: download swarm container image
11 | shell: docker pull swarm:{{ swarm_version }}
12 | tags:
13 | - prebake-for-dev
14 | when: not swarm_exists|success
15 |
16 | - name: setup iptables for swarm
17 | shell: >
18 | ( iptables -L INPUT | grep "{{ swarm_rule_comment }} ({{ item }})" ) || \
19 | iptables -I INPUT 1 -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ swarm_rule_comment }} ({{ item }})"
20 | become: true
21 | with_items:
22 | - "{{ swarm_api_port }}"
23 |
24 | - name: copy the swarm start/stop script
25 | template: src=swarm.j2 dest=/usr/bin/swarm.sh mode=u=rwx,g=rx,o=rx
26 |
27 | - name: copy systemd units for swarm
28 | copy: src=swarm.service dest=/etc/systemd/system/swarm.service
29 |
30 | - name: start swarm
31 | systemd: name=swarm daemon_reload=yes state=started enabled=yes
32 |
--------------------------------------------------------------------------------
/roles/swarm/templates/swarm.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | usage="$0 start"
4 | if [ $# -ne 1 ]; then
5 | echo USAGE: $usage
6 | exit 1
7 | fi
8 |
9 | case $1 in
10 | start)
11 | echo starting swarm as {{ run_as }} on {{ node_name }}[{{ node_addr }}]
12 | {% if run_as == "master" -%}
13 | /usr/bin/docker run -t -d -p {{ swarm_api_port }}:{{ swarm_api_port }} \
14 | --net=host --name=swarm-manager \
15 | swarm:{{ swarm_version }} manage \
16 | -H :{{ swarm_api_port }} \
17 | --strategy {{ swarm_strategy }} \
18 | --replication --advertise={{ node_addr }}:{{ swarm_api_port }} \
19 | etcd://{{ node_addr }}:{{ etcd_client_port1 }}
20 | {% endif %}
21 | /usr/bin/docker run -t --net=host --name=swarm-agent \
22 | swarm:{{ swarm_version }} join \
23 | --advertise={{ node_addr }}:{{ docker_api_port }} \
24 | etcd://{{ swarm_etcd_url }}:{{ etcd_client_port1 }}
25 | ;;
26 |
27 | stop)
28 | # skipping `set -e` as we shouldn't stop on error
29 | /usr/bin/docker stop swarm-manager
30 | /usr/bin/docker rm swarm-manager
31 | /usr/bin/docker stop swarm-agent
32 | /usr/bin/docker rm swarm-agent
33 | ;;
34 |
35 | *)
36 | echo USAGE: $usage
37 | exit 1
38 | ;;
39 | esac
40 |
--------------------------------------------------------------------------------
/roles/test/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # role variable for the test environment packages
3 |
4 | vbox_package_version: "5.1"
5 | vbox_major_version: "{{ vbox_package_version }}.14"
6 | vbox_build_number: "112924"
7 | vbox_version_ubuntu: "{{ vbox_package_version }}_{{ vbox_major_version }}-{{ vbox_build_number }}"
8 | vbox_version_ubuntu_dist_version:
9 | vivid: "{{ vbox_version_ubuntu }}~Ubuntu~trusty"
10 | wily: "{{ vbox_version_ubuntu }}~Ubuntu~wily"
11 | xenial: "{{ vbox_version_ubuntu }}~Ubuntu~xenial"
12 | vbox_version_redhat: "{{ vbox_package_version }}-{{ vbox_major_version }}_{{ vbox_build_number }}"
13 | vagrant_version: "1.9.1"
14 | packer_version: "0.12.2"
15 |
--------------------------------------------------------------------------------
/roles/test/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This role contains tasks for installing test environment packages
3 |
4 | - include: os_agnostic_tasks.yml
5 | tags:
6 | - prebake-for-test
7 |
8 | - include: ubuntu_tasks.yml
9 | when: ansible_os_family == "Debian"
10 | tags:
11 | - prebake-for-test
12 |
13 |
14 | - include: redhat_tasks.yml
15 | when: ansible_os_family == "RedHat"
16 | tags:
17 | - prebake-for-test
18 |
--------------------------------------------------------------------------------
/roles/test/tasks/os_agnostic_tasks.yml:
--------------------------------------------------------------------------------
1 | - name: check packer's version
2 | shell: packer --version
3 | register: packer_installed_version
4 | ignore_errors: yes
5 |
6 | - name: download packer
7 | get_url:
8 | validate_certs: "{{ validate_certs }}"
9 | url: "https://releases.hashicorp.com/packer/{{ packer_version }}/packer_{{ packer_version }}_linux_amd64.zip"
10 | dest: "/tmp/packer_{{ packer_version }}_linux_amd64.zip"
11 | force: no
12 | when: packer_installed_version.stdout != "{{ packer_version }}"
13 |
14 | - name: install packer
15 | shell: rm -f packer* && unzip /tmp/packer_{{ packer_version }}_linux_amd64.zip
16 | args:
17 | chdir: /usr/local/bin
18 | when: packer_installed_version.stdout != "{{ packer_version }}"
19 |
--------------------------------------------------------------------------------
/roles/test/tasks/redhat_tasks.yml:
--------------------------------------------------------------------------------
1 | - name: download VBox (redhat)
2 | get_url:
3 | validate_certs: "{{ validate_certs }}"
4 | url: http://download.virtualbox.org/virtualbox/{{ vbox_major_version }}/VirtualBox-{{ vbox_version_redhat }}_el7-1.x86_64.rpm
5 | dest: /tmp/VirtualBox-{{ vbox_major_version }}.rpm
6 | force: no
7 |
8 | - name: check for installed vbox (redhat)
9 | shell: yum list installed | grep -i virtualbox | awk '{print $1}'
10 | register: vbox_installed_version
11 |
12 | - name: stop vbox service if it is running
13 | service: name=vbox state=stopped
14 | when: not (vbox_installed_version.stdout | match('.*{{vbox_package_version}}.*'))
15 | ignore_errors: yes
16 |
17 | - name: uninstall VBox (redhat)
18 | yum: name="{{ vbox_installed_version.stdout }}" state=absent
19 | when: not (vbox_installed_version.stdout | match('.*{{vbox_package_version}}.*'))
20 |
21 | - name: install VBox (redhat)
22 | yum: name=/tmp/VirtualBox-{{ vbox_major_version }}.rpm state=present
23 |
24 | - name: install VBox dkms and dependencies (redhat)
25 | yum: name={{ item }} state=latest
26 | with_items:
27 | - binutils
28 | - gcc
29 | - make
30 | - patch
31 | - libgomp
32 | - glibc-headers
33 | - glibc-devel
34 | - kernel-headers
35 | - kernel-devel
36 | - dkms
37 |
38 | - name: copy systemd units for virtualbox-server
39 | template: src=vbox.service.j2 dest=/etc/systemd/system/vbox.service
40 |
41 | - name: enable vbox to be started on boot-up and start it as well
42 | service: name=vbox state=started enabled=yes
43 |
44 | - name: download vagrant (redhat)
45 | get_url:
46 | validate_certs: "{{ validate_certs }}"
47 | url: https://releases.hashicorp.com/vagrant/{{ vagrant_version }}/vagrant_{{ vagrant_version }}_x86_64.rpm
48 | dest: /tmp/vagrant_{{ vagrant_version }}.rpm
49 | force: no
50 |
51 | - name: install vagrant (redhat)
52 | yum: name=/tmp/vagrant_{{ vagrant_version }}.rpm state=present
53 |
--------------------------------------------------------------------------------
/roles/test/tasks/ubuntu_tasks.yml:
--------------------------------------------------------------------------------
1 | - name: download VBox (debian)
2 | get_url:
3 | validate_certs: "{{ validate_certs }}"
4 | url: http://download.virtualbox.org/virtualbox/{{ vbox_major_version }}/virtualbox-{{ vbox_version_ubuntu_dist_version[ansible_distribution_release] }}_amd64.deb
5 | dest: /tmp/virtualbox-{{ vbox_major_version }}.deb
6 | force: no
7 |
8 | - name: check for installed vbox (ubuntu)
9 | shell: VBoxManage --version
10 | register: vbox_installed_version
11 | ignore_errors: yes
12 |
13 | - name: stop vbox service if it is running (ubuntu)
14 | service: name=vbox state=stopped
15 | when: not (vbox_installed_version.stdout | match('.*{{vbox_package_version}}.*'))
16 | ignore_errors: yes
17 |
18 | - name: uninstall VBox (ubuntu)
19 | apt: name='virtualbox-*' state=absent
20 | when: not (vbox_installed_version.stdout | match('.*{{vbox_package_version}}.*'))
21 |
22 | - name: install VBox (debian)
23 | apt: deb=/tmp/virtualbox-{{ vbox_major_version }}.deb state=present
24 |
25 | - name: install VBox dkms (debian)
26 | apt: name=dkms state=latest
27 |
28 | - name: copy systemd units for virtualbox-server
29 | template: src=vbox.service.j2 dest=/etc/systemd/system/vbox.service
30 |
31 | - name: enable vbox to be started on boot-up and start it as well
32 | service: name=vbox state=started enabled=yes
33 |
34 | - name: download vagrant (debian)
35 | get_url:
36 | validate_certs: "{{ validate_certs }}"
37 | url: https://releases.hashicorp.com/vagrant/{{ vagrant_version }}/vagrant_{{ vagrant_version }}_x86_64.deb
38 | dest: /tmp/vagrant_{{ vagrant_version }}.deb
39 | force: no
40 |
41 | - name: install vagrant (debian)
42 | apt: deb=/tmp/vagrant_{{ vagrant_version }}.deb state=present
43 |
--------------------------------------------------------------------------------
/roles/test/templates/vbox.service.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Virtualbox Server
3 | After=network.target auditd.service systemd-user-sessions.service time-sync.target
4 |
5 | [Service]
6 | ExecStart=/usr/lib/virtualbox/VBoxSVC --pidfile VBoxSVC.pid
7 | Restart=on-failure
8 | RestartSec=10
9 | KillMode=control-group
10 | User={{ ansible_user }}
11 |
12 | [Install]
13 | WantedBy=multi-user.target
14 |
--------------------------------------------------------------------------------
/roles/ucarp/files/ucarp.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Ucarp (Virtual IP service)
3 | After=auditd.service systemd-user-sessions.service time-sync.target
4 |
5 | [Service]
6 | ExecStart=/usr/bin/ucarp.sh start
7 | KillMode=control-group
8 |
--------------------------------------------------------------------------------
/roles/ucarp/files/ucarp/vip_down.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | usage="$0 "
4 | if [ $# -ne 2 ]; then
5 | echo USAGE: $usage
6 | exit 1
7 | fi
8 |
9 | set -x -e
10 |
11 | intf=$1
12 |
13 | /sbin/ip link del dev ${intf}_0
14 |
--------------------------------------------------------------------------------
/roles/ucarp/files/ucarp/vip_up.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | usage="$0 "
4 | if [ $# -ne 2 ]; then
5 | echo USAGE: $usage
6 | exit 1
7 | fi
8 |
9 | set -x -e
10 |
11 | intf=$1
12 | vip=$2
13 |
14 | /sbin/ip link add name ${intf}_0 type dummy
15 |
16 | /sbin/ip addr add ${vip} dev ${intf}_0
17 |
18 | /sbin/ip link set dev ${intf}_0 up
19 |
--------------------------------------------------------------------------------
/roles/ucarp/tasks/cleanup.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This play contains tasks for cleaning up ucarp
3 |
4 | - name: stop ucarp
5 | service: name=ucarp state=stopped
6 |
--------------------------------------------------------------------------------
/roles/ucarp/tasks/install_ucarp.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This role contains tasks for configuring and starting ucarp service
3 |
4 | - name: download and install ucarp service (Redhat)
5 | yum: name=ucarp state=present
6 | when: ansible_os_family == "RedHat"
7 |
8 | - name: download and install ucarp service (Ubuntu)
9 | apt: name=ucarp state=present
10 | when: ansible_os_family == "Debian"
11 |
12 | - name: copy the ucarp start/stop script
13 | template: src=ucarp.sh.j2 dest=/usr/bin/ucarp.sh mode=u=rwx,g=rx,o=rx
14 |
15 | - name: copy the vip up and down scripts used by ucarp
16 | copy: src=ucarp/ dest=/usr/bin/ucarp/ mode=u=rwx,g=rx,o=rx
17 |
18 | - name: copy systemd units for ucarp
19 | copy: src=ucarp.service dest=/etc/systemd/system/ucarp.service
20 |
21 | - name: start ucarp
22 | service: name=ucarp state=started
23 |
--------------------------------------------------------------------------------
/roles/ucarp/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | ## the comparison this way allows us to override ucarp when netmaster_ip is
4 | ## non-nil. If it is nil, service_ip should be set to something unique.
5 | - name: install ucarp
6 | include: install_ucarp.yml
7 | when: netmaster_ip is undefined
8 |
--------------------------------------------------------------------------------
/roles/ucarp/templates/ucarp.sh.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | usage="$0 "
4 | if [ $# -ne 1 ]; then
5 | echo USAGE: $usage
6 | exit 1
7 | fi
8 |
9 | set -x -e
10 |
11 | case $1 in
12 | start)
13 | /sbin/ucarp --shutdown --interface={{ control_interface }} \
14 | --srcip={{ hostvars[inventory_hostname]['ansible_' + control_interface]['ipv4']['address'] }} \
15 | --vhid=1 --pass=cluster_secret --addr={{ service_vip }} \
16 | --upscript="/usr/bin/ucarp/vip_up.sh" --downscript="/usr/bin/ucarp/vip_down.sh"
17 | ;;
18 |
19 | *)
20 | echo USAGE: $usage
21 | exit 1
22 | esac
23 |
--------------------------------------------------------------------------------
/roles/ucp/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Role defaults for ucp
3 |
4 | ucp_version: "1.1.0"
5 | ucp_local_dir: "fetch/ucp"
6 | ucp_remote_dir: "/tmp"
7 | ucp_instance_id_file: "ucp-instance-id"
8 | ucp_fingerprint_file: "ucp-fingerprint"
9 | ucp_certificate_file: "ucp-certificate-backup.tar"
10 | ucp_fifo_file: "ucp-fifo"
11 | ucp_bootstrap_node_name: ""
12 | ucp_admin_user: "admin"
13 | ucp_admin_password: "orca"
14 | ucp_controller_replica: "--replica"
15 | ucp_rule_comment: "contiv_ucp traffic"
16 | ucp_port1: "12376"
17 | ucp_port2: "12379"
18 | ucp_port3: "12380"
19 | ucp_port4: "12381"
20 | ucp_port5: "12382"
21 | ucp_port6: "12383"
22 | ucp_port7: "12384"
23 | ucp_port8: "12385"
24 | ucp_port9: "12386"
25 | ucp_hb_port: "2375"
26 | ucp_swarm_port: "2376"
27 | ucp_controller_port: "443"
28 | ucp_swarm_strategy: "spread"
29 | ucp_license_remote: "/tmp/docker_subscription.lic"
30 | ucp_cert_secret: "secret"
31 |
--------------------------------------------------------------------------------
/roles/ucp/files/ucp.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Ucp
3 | After=auditd.service systemd-user-sessions.service time-sync.target docker.service
4 |
5 | [Service]
6 | ExecStart=/usr/bin/ucp.sh start
7 | ExecStop=/usr/bin/ucp.sh stop
8 | KillMode=control-group
9 |
--------------------------------------------------------------------------------
/roles/ucp/tasks/cleanup.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This play contains tasks for cleaning up ucp
3 |
4 | - name: stop ucp
5 | service: name=ucp state=stopped
6 |
7 | - name: cleanup ucp files from remote
8 | file: name="{{ ucp_remote_dir }}/{{ item }}" state=absent
9 | with_items:
10 | - "{{ ucp_fingerprint_file }}"
11 | - "{{ ucp_instance_id_file }}"
12 | - "{{ ucp_certificate_file }}"
13 |
14 | # XXX: temporary fix for issue with ucp 1.1.0 where it doesn't cleanup this file
15 | # remove this once it is fixed. Target fix version is 1.1.2
16 | - name: cleanup ucp generated docker config file
17 | file: name=/etc/docker/daemon.json state=absent
18 | become: true
19 |
20 | - name: cleanup iptables for ucp
21 | shell: iptables -D INPUT -p tcp --dport {{ item }} -j ACCEPT -m comment --comment "{{ ucp_rule_comment }} ({{ item }})"
22 | become: true
23 | with_items:
24 | - "{{ ucp_port1 }}"
25 | - "{{ ucp_port2 }}"
26 | - "{{ ucp_port3 }}"
27 | - "{{ ucp_port4 }}"
28 | - "{{ ucp_port5 }}"
29 | - "{{ ucp_port6 }}"
30 | - "{{ ucp_port7 }}"
31 | - "{{ ucp_port8 }}"
32 | - "{{ ucp_port9 }}"
33 | - "{{ ucp_hb_port }}"
34 | - "{{ ucp_swarm_port }}"
35 | - "{{ ucp_controller_port }}"
36 |
--------------------------------------------------------------------------------
/roles/vagrant/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: check for vagrant user
3 | shell: id vagrant
4 | register: vagrant_exist
5 |
6 | - name: install bashrc with proxy configs
7 | template:
8 | src: bashrc.j2
9 | dest: /home/vagrant/.bashrc
10 | owner: vagrant
11 | group: vagrant
12 | mode: 0755
13 | when: vagrant_exist
14 |
15 | - name: install bash_profile with swarm configs
16 | template:
17 | src: bash_profile.j2
18 | dest: /home/vagrant/.bash_profile
19 | owner: vagrant
20 | group: vagrant
21 | mode: 0755
22 | when: vagrant_exist
23 |
24 | - name: add vagrant user to docker group
25 | command: gpasswd -a vagrant docker
26 | when: vagrant_exist
27 |
--------------------------------------------------------------------------------
/roles/vagrant/templates/bash_profile.j2:
--------------------------------------------------------------------------------
1 | # .bash_profile
2 |
3 | # Get the aliases and functions
4 | if [ -f ~/.bashrc ]; then
5 | . ~/.bashrc
6 | fi
7 |
8 | # User specific environment and startup programs
9 |
10 | export PATH=$PATH:$HOME/.local/bin:$HOME/bin
11 |
12 | use_swarm() {
13 | export DOCKER_HOST=tcp://{{ node_addr }}:2375
14 | }
15 |
--------------------------------------------------------------------------------
/roles/vagrant/templates/bashrc.j2:
--------------------------------------------------------------------------------
1 | if [ -f /etc/bashrc ]; then
2 | . /etc/bashrc
3 | fi
4 |
5 | export HTTP_PROXY={{ lookup('env', 'HTTP_PROXY') }}
6 | export http_proxy={{ lookup('env', 'http_proxy') }}
7 | export HTTPS_PROXY={{ lookup('env', 'HTTPS_PROXY') }}
8 | export https_proxy={{ lookup('env', 'https_proxy') }}
9 |
--------------------------------------------------------------------------------
/test/files/insecure_private_key:
--------------------------------------------------------------------------------
1 | -----BEGIN RSA PRIVATE KEY-----
2 | MIIEogIBAAKCAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzI
3 | w+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoP
4 | kcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2
5 | hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NO
6 | Td0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcW
7 | yLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQIBIwKCAQEA4iqWPJXtzZA68mKd
8 | ELs4jJsdyky+ewdZeNds5tjcnHU5zUYE25K+ffJED9qUWICcLZDc81TGWjHyAqD1
9 | Bw7XpgUwFgeUJwUlzQurAv+/ySnxiwuaGJfhFM1CaQHzfXphgVml+fZUvnJUTvzf
10 | TK2Lg6EdbUE9TarUlBf/xPfuEhMSlIE5keb/Zz3/LUlRg8yDqz5w+QWVJ4utnKnK
11 | iqwZN0mwpwU7YSyJhlT4YV1F3n4YjLswM5wJs2oqm0jssQu/BT0tyEXNDYBLEF4A
12 | sClaWuSJ2kjq7KhrrYXzagqhnSei9ODYFShJu8UWVec3Ihb5ZXlzO6vdNQ1J9Xsf
13 | 4m+2ywKBgQD6qFxx/Rv9CNN96l/4rb14HKirC2o/orApiHmHDsURs5rUKDx0f9iP
14 | cXN7S1uePXuJRK/5hsubaOCx3Owd2u9gD6Oq0CsMkE4CUSiJcYrMANtx54cGH7Rk
15 | EjFZxK8xAv1ldELEyxrFqkbE4BKd8QOt414qjvTGyAK+OLD3M2QdCQKBgQDtx8pN
16 | CAxR7yhHbIWT1AH66+XWN8bXq7l3RO/ukeaci98JfkbkxURZhtxV/HHuvUhnPLdX
17 | 3TwygPBYZFNo4pzVEhzWoTtnEtrFueKxyc3+LjZpuo+mBlQ6ORtfgkr9gBVphXZG
18 | YEzkCD3lVdl8L4cw9BVpKrJCs1c5taGjDgdInQKBgHm/fVvv96bJxc9x1tffXAcj
19 | 3OVdUN0UgXNCSaf/3A/phbeBQe9xS+3mpc4r6qvx+iy69mNBeNZ0xOitIjpjBo2+
20 | dBEjSBwLk5q5tJqHmy/jKMJL4n9ROlx93XS+njxgibTvU6Fp9w+NOFD/HvxB3Tcz
21 | 6+jJF85D5BNAG3DBMKBjAoGBAOAxZvgsKN+JuENXsST7F89Tck2iTcQIT8g5rwWC
22 | P9Vt74yboe2kDT531w8+egz7nAmRBKNM751U/95P9t88EDacDI/Z2OwnuFQHCPDF
23 | llYOUI+SpLJ6/vURRbHSnnn8a/XG+nzedGH5JGqEJNQsz+xT2axM0/W/CRknmGaJ
24 | kda/AoGANWrLCz708y7VYgAtW2Uf1DPOIYMdvo6fxIB5i9ZfISgcJ/bbCUkFrhoH
25 | +vq/5CIWxCPp0f85R4qxxQ5ihxJ0YDQT9Jpx4TMss4PSavPaBH3RXow5Ohe+bYoQ
26 | NE5OgEXk2wVfZczCZpigBKbKZHNYcelXtTt/nP3rsCuGcM4h53s=
27 | -----END RSA PRIVATE KEY-----
28 |
--------------------------------------------------------------------------------
/uninstall_auth_proxy.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This playbook performs service cleanup for auth_proxy.
3 | #
4 | # Note: cleanup is not expected to fail, so we set ignore_errors to yes here
5 |
6 | - hosts: netplugin-master
7 | become: true
8 | tasks:
9 | - include_vars: roles/{{ item }}/defaults/main.yml
10 | with_items:
11 | - "auth_proxy"
12 | - include: roles/{{ item }}/tasks/cleanup.yml
13 | with_items:
14 | - auth_proxy
15 | static: no
16 | ignore_errors: yes
17 |
--------------------------------------------------------------------------------
/uninstall_contiv.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This playbook performs service cleanup for contiv network.
3 | #
4 | # Note: cleanup is not expected to fail, so we set ignore_errors to yes here
5 |
6 | - hosts: all
7 | become: true
8 | tasks:
9 | - include_vars: roles/{{ item }}/defaults/main.yml
10 | with_items:
11 | - "contiv_network"
12 | - "swarm"
13 | - "kubernetes"
14 | - "docker"
15 | - "etcd"
16 | - include: roles/{{ item }}/tasks/cleanup.yml
17 | with_items:
18 | - contiv_network
19 | static: no
20 | ignore_errors: yes
21 |
--------------------------------------------------------------------------------
/uninstall_docker.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This playbook performs service cleanup for docker.
3 | #
4 | # Note: cleanup is not expected to fail, so we set ignore_errors to yes here
5 |
6 | - hosts: all
7 | become: true
8 | tasks:
9 | - include_vars: roles/{{ item }}/defaults/main.yml
10 | with_items:
11 | - "contiv_network"
12 | - "swarm"
13 | - "kubernetes"
14 | - "docker"
15 | - "etcd"
16 | - include: roles/{{ item }}/tasks/cleanup.yml
17 | with_items:
18 | - docker
19 | static: no
20 | ignore_errors: yes
21 |
--------------------------------------------------------------------------------
/uninstall_etcd.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This playbook performs service cleanup for etcd.
3 | #
4 | # Note: cleanup is not expected to fail, so we set ignore_errors to yes here
5 |
6 | - hosts: all
7 | become: true
8 | tasks:
9 | - include_vars: roles/{{ item }}/defaults/main.yml
10 | with_items:
11 | - "contiv_network"
12 | - "swarm"
13 | - "kubernetes"
14 | - "docker"
15 | - "etcd"
16 | - include: roles/{{ item }}/tasks/cleanup.yml
17 | with_items:
18 | - etcd
19 | static: no
20 | ignore_errors: yes
21 |
--------------------------------------------------------------------------------
/uninstall_scheduler.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This playbook performs service cleanup for swarm.
3 | #
4 | # Note: cleanup is not expected to fail, so we set ignore_errors to yes here
5 |
6 | - hosts: all
7 | become: true
8 | tasks:
9 | - include_vars: roles/{{ item }}/defaults/main.yml
10 | with_items:
11 | - "contiv_network"
12 | - "swarm"
13 | - "kubernetes"
14 | - "docker"
15 | - "etcd"
16 | - include: roles/{{ item }}/tasks/cleanup.yml
17 | with_items:
18 | - swarm
19 | static: no
20 | ignore_errors: yes
21 |
--------------------------------------------------------------------------------
/uninstall_v2plugin.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This playbook performs cleanup for v2plugin.
3 | #
4 | # Note: cleanup is not expected to fail, so we set ignore_errors to yes here
5 |
6 | - hosts: all
7 | become: true
8 | tasks:
9 | - include_vars: roles/v2plugin/defaults/main.yml
10 | - include: roles/v2plugin/tasks/cleanup.yml
11 | static: no
12 | ignore_errors: yes
13 |
--------------------------------------------------------------------------------