├── LICENSE ├── README.md ├── deploy_contiv_network_config.yml ├── deploy_istio.yml ├── deploy_sample_apps.yml ├── group_vars └── all ├── inventory ├── roles ├── common │ └── tasks │ │ └── main.yml ├── contiv │ └── tasks │ │ └── main.yml ├── contiv_network_cfg │ └── tasks │ │ └── main.yml ├── docker │ └── tasks │ │ └── main.yml ├── istio │ └── tasks │ │ └── main.yml ├── kubeadm │ └── tasks │ │ └── main.yml ├── master │ └── tasks │ │ └── main.yml ├── sample_apps │ ├── tasks │ │ ├── PHPGuestbook.yml │ │ ├── WordPressSQL.yml │ │ └── main.yml │ ├── templates │ │ ├── guestbook.yaml │ │ └── wordpresssql.yaml │ └── vars │ │ └── main.yml └── worker │ └── tasks │ └── main.yml └── site.yml /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Brad Downey 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Basic Install of Kubernetes and Contiv Networking 2 | ## standalone kubeadm deployment 3 | ### Orignal code credit goes to https://github.com/ben-st/ansible-kubeadm. I used it as a base and changed the CNI to Contiv (by Cisco). 4 | 5 | - Requires Ansible 2.4 or newer 6 | - Expects 3 ubuntu nodes (at least 16.04) 7 | - Expects passwordless sudo 8 | 9 | These playbooks deploy a very basic installation of kubeadm. 10 | To use them, first edit the "inventory" file to contain the 11 | hostnames of the machines on which you want kubeadm deployed, and edit the 12 | group_vars/ file to set any kubeadm configuration parameters you need. 13 | 14 | Then run the playbook, like this: 15 | 16 | `ansible-playbook -i inventory site.yml` 17 | 18 | 19 | This is a very simple playbook. It deploys the following: 20 | 21 | 1. Installs basic components (docker, kubeadm, contiv) 22 | 2. Basic kubernetes cluster using kubeadm (3 nodes is recommended) 23 | 3. Contiv CNI in vxlan mode 24 | 4. Adds three networks (see group_vars/all) 25 | 5. Adds two sample applications (guestbook and wordpress) 26 | 27 | 28 | -------------------------------------------------------------------------------- /deploy_contiv_network_config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook is used to configure basic networking options in contiv. 3 | # The expectation is it will run on the master host using netctl installed previously. 4 | # The k8 cluster should be setup and in a running state. 5 | # BGP Configuration is optional. Comment out BGP roles as necessary. 6 | 7 | 8 | - name: Gather facts on all hosts 9 | hosts: 10 | - master 11 | - worker 12 | remote_user: "{{ ansible_remote_user }}" 13 | gather_facts: True 14 | become: yes 15 | become_method: sudo 16 | 17 | - name: Configure networks 18 | hosts: master 19 | remote_user: "{{ ansible_remote_user }}" 20 | become: yes 21 | become_method: sudo 22 | roles: 23 | - contiv_network_cfg 24 | -------------------------------------------------------------------------------- /deploy_istio.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook is to install istio service mesh. See istio.io for more info. 3 | 4 | - name: Deploy Istio Service Mesh 5 | hosts: master 6 | remote_user: "{{ ansible_remote_user }}" 7 | become: yes 8 | become_method: sudo 9 | roles: 10 | - { role: istio, when: deploy_istio } -------------------------------------------------------------------------------- /deploy_sample_apps.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook installs two sample applications with contiv security groups and istio sidecars. 3 | 4 | - name: Deploy Sample Applications 5 | hosts: master 6 | remote_user: "{{ ansible_remote_user }}" 7 | become: yes 8 | become_method: sudo 9 | roles: 10 | - { role: sample_apps, when: deploy_sample_apps } -------------------------------------------------------------------------------- /group_vars/all: -------------------------------------------------------------------------------- 1 | 2 | kubeadm_token: db85f7.cff657b31b20eed5 3 | 4 | master_ip: "{{ hostvars['master1']['ansible_eth0']['ipv4']['address'] }}" 5 | 6 | ansible_remote_user: ubuntu 7 | 8 | kubeadm_reset_before_init: true 9 | 10 | delete_kube_dns: false 11 | 12 | deploy_sample_apps: true 13 | 14 | deploy_istio: true 15 | 16 | contiv_nets: 17 | # This network name is required for host to access pods. 18 | contivh1: 19 | net_type: -n infra 20 | net_sub: -s 192.0.2.0/24 21 | net_gw: -g 192.0.2.1 22 | # This is used for pods that do not have a io.contiv.network label 23 | default-net: 24 | net_type: -n data 25 | net_sub: -s 172.16.10.10-172.16.10.250/24 26 | net_gw: -g 172.16.10.1 27 | blue: 28 | net_type: -n data 29 | net_sub: -s 172.17.0.5-172.17.15.250/20 30 | net_gw: -g 172.17.0.1 31 | green: 32 | net_type: -n data 33 | net_sub: -s 172.18.0.5-172.18.15.250/20 34 | net_gw: -g 172.18.0.1 35 | -------------------------------------------------------------------------------- /inventory: -------------------------------------------------------------------------------- 1 | 2 | [master] 3 | master1 ansible_ssh_host=192.168.1.10 4 | 5 | [worker] 6 | 192.168.1.11 7 | 192.168.1.12 8 | 9 | [master:vars] 10 | ansible_ssh_common_args='-o StrictHostKeyChecking=no' 11 | 12 | [worker:vars] 13 | ansible_ssh_common_args='-o StrictHostKeyChecking=no' -------------------------------------------------------------------------------- /roles/common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Install and Update Python 2 | raw: sudo bash -c "test -e /usr/bin/python || (apt -qqy update && apt install -qy python-minimal)" 3 | 4 | - name: Run the equivalent of "apt-get update" as a separate step 5 | apt: 6 | update_cache: yes 7 | become: yes 8 | become_method: sudo 9 | 10 | # This may require a reboot 11 | - name: Update all packages to the latest version 12 | apt: 13 | upgrade: dist 14 | become: yes 15 | become_method: sudo 16 | 17 | - name: install basic packages 18 | apt: package={{ item }} update_cache=yes 19 | with_items: 20 | - jq 21 | - tree 22 | - python-yaml 23 | become: yes 24 | become_method: sudo -------------------------------------------------------------------------------- /roles/contiv/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Copy files from contiv/netplugin on github 3 | get_url: 4 | url: "{{ item }}" 5 | dest: /tmp/ 6 | mode: 0755 7 | with_items: 8 | - https://raw.githubusercontent.com/contiv/netplugin/master/install/k8s/contiv/contiv-compose 9 | - https://raw.githubusercontent.com/contiv/netplugin/master/install/k8s/contiv/contiv-base.yaml 10 | 11 | - name: Contiv Compose 12 | shell: /tmp/contiv-compose use-release --k8s-api https://{{ master_ip }}:6443 /tmp/contiv-base.yaml > /tmp/contiv.yaml 13 | 14 | - name: Install Contiv Networking 15 | command: "kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /tmp/contiv.yaml" 16 | 17 | # Contiv provides name resolution via the EP name. Comment this out to keep kube-dns 18 | - name: Remove kube-dns 19 | command: kubectl --kubeconfig /etc/kubernetes/admin.conf {{ item }} 20 | with_items: 21 | - delete deployment/kube-dns -n kube-system 22 | - delete svc kube-dns -n kube-system 23 | - delete serviceaccounts kube-dns -n kube-system 24 | - delete clusterrolebindings system:kube-dns -n kube-system 25 | ignore_errors: yes 26 | when: delete_kube_dns 27 | 28 | - name: wait for netmaster to become active. 29 | wait_for: 30 | port: 9999 31 | delay: 10 32 | timeout: 900 33 | 34 | - name: Ensure there is a netmaster entry in /etc/hosts for this machine. Otherwise netctl doesn't work. 35 | lineinfile: 36 | dest: /etc/hosts 37 | line: "{{ master_ip }} netmaster" 38 | 39 | - pause: 40 | minutes: 2 41 | prompt: "Make sure network pods are started" -------------------------------------------------------------------------------- /roles/contiv_network_cfg/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Configure Networks 4 | command: netctl net create -t default {{ item.value.net_type }} {{ item.value.net_sub }} {{ item.value.net_gw }} {{ item.key }} 5 | with_dict: "{{ contiv_nets }}" 6 | -------------------------------------------------------------------------------- /roles/docker/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: install docker 4 | apt: package={{ item }} update_cache=yes 5 | with_items: 6 | - docker.io 7 | 8 | - name: add remote user to group docker 9 | command: usermod -aG docker {{ansible_user}} 10 | 11 | - name: Start docker service 12 | service: 13 | name: docker 14 | state: restarted 15 | enabled: yes 16 | 17 | # This is needed based on Docker 1.13 update. Kubernetes and/or CNI could also fix this. 18 | - name: Re-enable ipchains FORWARD 19 | iptables: 20 | chain: FORWARD 21 | policy: ACCEPT -------------------------------------------------------------------------------- /roles/istio/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Get istio latest release version 4 | shell: 'curl -L -s https://api.github.com/repos/istio/istio/releases/latest | grep tag_name | sed "s/ *\"tag_name\": *\"\(.*\)\",*/\1/"' 5 | register: istio_ver 6 | 7 | - name: Untar istio 8 | unarchive: 9 | src: https://github.com/istio/istio/releases/download/{{ istio_ver.stdout }}/istio-{{ istio_ver.stdout }}-linux.tar.gz 10 | dest: /tmp 11 | remote_src: yes 12 | 13 | - name: Copy istioctl to /usr/bin 14 | copy: 15 | src: /tmp/istio-{{ istio_ver.stdout }}/bin/istioctl 16 | dest: /usr/bin 17 | remote_src: yes 18 | mode: 0755 19 | owner: root 20 | group: root 21 | 22 | - name: Install base Istio 23 | command: "kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /tmp/istio-{{ istio_ver.stdout }}/install/kubernetes/istio.yaml" 24 | register: task_result 25 | until: task_result.rc == 0 26 | retries: 3 27 | delay: 5 28 | ignore_errors: yes 29 | 30 | - pause: 31 | minutes: 1 32 | prompt: "Make sure istio pods are started" 33 | 34 | - name: Install Bookinfo Sample App 35 | shell: "kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f <(istioctl kube-inject -f /tmp/istio-{{ istio_ver.stdout }}/samples/bookinfo/kube/bookinfo.yaml)" 36 | when: deploy_sample_apps 37 | 38 | - name: Location of Sample Files 39 | debug: 40 | msg: "The sample files are located at /tmp/istio-{{ istio_ver.stdout }}/samples/bookinfo/kube/" -------------------------------------------------------------------------------- /roles/kubeadm/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: install transport-https 4 | apt: package={{ item }} update_cache=yes 5 | with_items: 6 | - apt-transport-https 7 | 8 | - apt_key: 9 | url: "https://packages.cloud.google.com/apt/doc/apt-key.gpg" 10 | state: present 11 | 12 | - name: Add Kubernetes apt repo 13 | apt_repository: 14 | repo: 'deb http://apt.kubernetes.io/ kubernetes-xenial main' 15 | filename: kubernetes 16 | state: present 17 | 18 | - name: install kubeadm 19 | apt: package={{ item }} update_cache=yes 20 | with_items: 21 | - kubelet 22 | - kubeadm 23 | - kubectl 24 | - kubernetes-cni 25 | -------------------------------------------------------------------------------- /roles/master/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Reset kubeadm before init in case this is not the first run 3 | command: kubeadm reset 4 | when: kubeadm_reset_before_init 5 | 6 | - name: kubeadm init with pre generated token 7 | command: kubeadm init --token {{ kubeadm_token }} --service-cidr 10.96.0.0/12 8 | 9 | - name: wait for kubernetes to become active. 10 | wait_for: 11 | port: 6443 12 | delay: 10 13 | timeout: 300 14 | 15 | - name: allow permissive RBAC rules 16 | command: "kubectl --kubeconfig /etc/kubernetes/admin.conf create clusterrolebinding permissive-binding \ 17 | --clusterrole=cluster-admin \ 18 | --user=admin \ 19 | --user=kubelet \ 20 | --group=system:serviceaccounts" 21 | 22 | - name: taint master node 23 | command: kubectl --kubeconfig /etc/kubernetes/admin.conf taint nodes --all node-role.kubernetes.io/master- 24 | 25 | - name: create .kube dir 26 | file: 27 | path: ~{{ansible_remote_user }}/.kube 28 | state: directory 29 | owner: "{{ ansible_remote_user }}" 30 | group: "{{ ansible_remote_user }}" 31 | 32 | - name: copy config file to HOME/.kube dir 33 | copy: 34 | src: /etc/kubernetes/admin.conf 35 | dest: ~{{ansible_remote_user }}/.kube/config 36 | remote_src: yes 37 | owner: "{{ ansible_remote_user }}" 38 | group: "{{ ansible_remote_user }}" 39 | force: yes 40 | mode: 0400 41 | 42 | - name: copy config file to root/.kube dir 43 | copy: 44 | src: /etc/kubernetes/admin.conf 45 | dest: /root/.kube/config 46 | remote_src: yes 47 | owner: root 48 | group: root 49 | force: yes 50 | mode: 0400 51 | 52 | - pause: 53 | minutes: 3 54 | prompt: "Make sure network pods are started" -------------------------------------------------------------------------------- /roles/sample_apps/tasks/PHPGuestbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Remove app if exists 3 | shell: if [ -e /tmp/guestbook.yaml ]; then kubectl --kubeconfig /etc/kubernetes/admin.conf delete -f /tmp/guestbook.yaml; rm /tmp/guestbook.yaml; else echo "Existing file not found"; fi 4 | 5 | - name: Remove Groups if exists 6 | command: netctl group rm {{ item.value.epg_name }} 7 | with_dict: "{{ contiv_groups['PHPGuestbook'] }}" 8 | ignore_errors: yes 9 | 10 | - name: Remove Policy Groups 11 | command: netctl policy rm {{ item.value.policy_name }} 12 | with_dict: "{{ contiv_groups['PHPGuestbook'] }}" 13 | ignore_errors: yes 14 | 15 | - name: Create Policy Groups 16 | command: netctl policy create {{ item.value.policy_name }} 17 | with_dict: "{{ contiv_groups['PHPGuestbook'] }}" 18 | 19 | - name: Configure Endpoint Groups 20 | command: netctl group create {{ item.value.network_name }} {{ item.value.epg_name }} -p {{ item.value.policy_name }} 21 | with_dict: "{{ contiv_groups['PHPGuestbook'] }}" 22 | 23 | - name: Create Policy Rules 24 | command: netctl policy rule-add {{ item.0.policy_name }} {{ item.1 }} 25 | with_subelements: 26 | - "{{ contiv_groups['PHPGuestbook'] }}" 27 | - policy_rules 28 | 29 | 30 | - name: Copy Guestbook YAML to host 31 | template: 32 | src: guestbook.yaml 33 | dest: /tmp/guestbook.yaml 34 | 35 | - name: Deploying PHP Guestbook application with Redis 36 | command: "kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /tmp/guestbook.yaml" 37 | 38 | -------------------------------------------------------------------------------- /roles/sample_apps/tasks/WordPressSQL.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Remove app if exists 3 | shell: if [ -e /tmp/wordpresssql.yaml ]; then kubectl --kubeconfig /etc/kubernetes/admin.conf delete -f /tmp/wordpresssql.yaml; rm /tmp/wordpresssql.yaml; else echo "Existing file not found"; fi 4 | 5 | - name: Remove Groups if exists 6 | command: netctl group rm {{ item.value.epg_name }} 7 | with_dict: "{{ contiv_groups['WordPressSQL'] }}" 8 | ignore_errors: yes 9 | 10 | - name: Remove Policy Groups 11 | command: netctl policy rm {{ item.value.policy_name }} 12 | with_dict: "{{ contiv_groups['WordPressSQL'] }}" 13 | ignore_errors: yes 14 | 15 | - name: Create Policy Groups 16 | command: netctl policy create {{ item.value.policy_name }} 17 | with_dict: "{{ contiv_groups['WordPressSQL'] }}" 18 | 19 | - name: Configure Endpoint Groups 20 | command: netctl group create {{ item.value.network_name }} {{ item.value.epg_name }} -p {{ item.value.policy_name }} 21 | with_dict: "{{ contiv_groups['WordPressSQL'] }}" 22 | 23 | - name: Create Policy Rules 24 | command: netctl policy rule-add {{ item.0.policy_name }} {{ item.1 }} 25 | with_subelements: 26 | - "{{ contiv_groups['WordPressSQL'] }}" 27 | - policy_rules 28 | 29 | - name: Copy WordPressSQL YAML to host 30 | template: 31 | src: wordpresssql.yaml 32 | dest: /tmp/wordpresssql.yaml 33 | 34 | - name: Create SQL Secret 35 | command: "kubectl --kubeconfig /etc/kubernetes/admin.conf {{ item }}" 36 | with_items: 37 | - "delete secret mysql-pass" 38 | - "create secret generic mysql-pass --from-literal=password=YOUR_PASSWORD" 39 | ignore_errors: yes 40 | 41 | - name: Deploying Wordpress SQL 42 | command: "kubectl --kubeconfig /etc/kubernetes/admin.conf apply -f /tmp/wordpresssql.yaml" 43 | 44 | -------------------------------------------------------------------------------- /roles/sample_apps/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - import_tasks: PHPGuestbook.yml 2 | - import_tasks: WordPressSQL.yml -------------------------------------------------------------------------------- /roles/sample_apps/templates/guestbook.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2 2 | kind: Deployment 3 | metadata: 4 | name: frontend 5 | spec: 6 | selector: 7 | matchLabels: 8 | app: guestbook 9 | tier: frontend 10 | replicas: 3 11 | template: 12 | metadata: 13 | labels: 14 | app: guestbook 15 | tier: frontend 16 | io.contiv.network: "{{ contiv_groups['PHPGuestbook']['epg-web']['network_name'] }}" 17 | io.contiv.net-group: "{{ contiv_groups['PHPGuestbook']['epg-web']['epg_name'] }}" 18 | spec: 19 | containers: 20 | - name: php-redis 21 | image: gcr.io/google-samples/gb-frontend:v4 22 | resources: 23 | requests: 24 | cpu: 100m 25 | memory: 100Mi 26 | env: 27 | - name: GET_HOSTS_FROM 28 | value: dns 29 | # Using `GET_HOSTS_FROM=dns` requires your cluster to 30 | # provide a dns service. As of Kubernetes 1.3, DNS is a built-in 31 | # service launched automatically. However, if the cluster you are using 32 | # does not have a built-in DNS service, you can instead 33 | # instead access an environment variable to find the master 34 | # service's host. To do so, comment out the 'value: dns' line above, and 35 | # uncomment the line below: 36 | # value: env 37 | ports: 38 | - containerPort: 80 39 | --- 40 | apiVersion: v1 41 | kind: Service 42 | metadata: 43 | name: frontend 44 | labels: 45 | app: guestbook 46 | tier: frontend 47 | spec: 48 | # comment or delete the following line if you want to use a LoadBalancer 49 | type: NodePort 50 | # if your cluster supports it, uncomment the following to automatically create 51 | # an external load-balanced IP for the frontend service. 52 | # type: LoadBalancer 53 | ports: 54 | - port: 80 55 | selector: 56 | app: guestbook 57 | tier: frontend 58 | --- 59 | apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2 60 | kind: Deployment 61 | metadata: 62 | name: redis-master 63 | spec: 64 | selector: 65 | matchLabels: 66 | app: redis 67 | role: master 68 | tier: backend 69 | replicas: 1 70 | template: 71 | metadata: 72 | labels: 73 | app: redis 74 | role: master 75 | tier: backend 76 | io.contiv.network: "{{ contiv_groups['PHPGuestbook']['epg-db']['network_name'] }}" 77 | io.contiv.net-group: "{{ contiv_groups['PHPGuestbook']['epg-db']['epg_name'] }}" 78 | spec: 79 | containers: 80 | - name: master 81 | image: k8s.gcr.io/redis:e2e # or just image: redis 82 | resources: 83 | requests: 84 | cpu: 100m 85 | memory: 100Mi 86 | ports: 87 | - containerPort: 6379 88 | --- 89 | apiVersion: v1 90 | kind: Service 91 | metadata: 92 | name: redis-master 93 | labels: 94 | app: redis 95 | role: master 96 | tier: backend 97 | spec: 98 | ports: 99 | - port: 6379 100 | targetPort: 6379 101 | selector: 102 | app: redis 103 | role: master 104 | tier: backend 105 | --- 106 | apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2 107 | kind: Deployment 108 | metadata: 109 | name: redis-slave 110 | spec: 111 | selector: 112 | matchLabels: 113 | app: redis 114 | role: slave 115 | tier: backend 116 | replicas: 2 117 | template: 118 | metadata: 119 | labels: 120 | app: redis 121 | role: slave 122 | tier: backend 123 | io.contiv.network: "{{ contiv_groups['PHPGuestbook']['epg-db']['network_name'] }}" 124 | io.contiv.net-group: "{{ contiv_groups['PHPGuestbook']['epg-db']['epg_name'] }}" 125 | spec: 126 | containers: 127 | - name: slave 128 | image: gcr.io/google_samples/gb-redisslave:v1 129 | resources: 130 | requests: 131 | cpu: 100m 132 | memory: 100Mi 133 | env: 134 | - name: GET_HOSTS_FROM 135 | value: dns 136 | # Using `GET_HOSTS_FROM=dns` requires your cluster to 137 | # provide a dns service. As of Kubernetes 1.3, DNS is a built-in 138 | # service launched automatically. However, if the cluster you are using 139 | # does not have a built-in DNS service, you can instead 140 | # instead access an environment variable to find the master 141 | # service's host. To do so, comment out the 'value: dns' line above, and 142 | # uncomment the line below: 143 | # value: env 144 | ports: 145 | - containerPort: 6379 146 | --- 147 | apiVersion: v1 148 | kind: Service 149 | metadata: 150 | name: redis-slave 151 | labels: 152 | app: redis 153 | role: slave 154 | tier: backend 155 | spec: 156 | ports: 157 | - port: 6379 158 | selector: 159 | app: redis 160 | role: slave 161 | tier: backend 162 | -------------------------------------------------------------------------------- /roles/sample_apps/templates/wordpresssql.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolume 2 | apiVersion: v1 3 | metadata: 4 | name: wp-pv-volume1 5 | labels: 6 | app: wordpress 7 | type: local 8 | spec: 9 | capacity: 10 | storage: 2Gi 11 | accessModes: 12 | - ReadWriteOnce 13 | hostPath: 14 | path: "/tmp/wordpress-data1" 15 | --- 16 | kind: PersistentVolume 17 | apiVersion: v1 18 | metadata: 19 | name: wp-pv-volume2 20 | labels: 21 | app: wordpress 22 | type: local 23 | spec: 24 | capacity: 25 | storage: 2Gi 26 | accessModes: 27 | - ReadWriteOnce 28 | hostPath: 29 | path: "/tmp/wordpress-data2" 30 | --- 31 | apiVersion: v1 32 | kind: Service 33 | metadata: 34 | name: wordpress-mysql 35 | labels: 36 | app: wordpress 37 | spec: 38 | ports: 39 | - port: 3306 40 | selector: 41 | app: wordpress 42 | tier: mysql 43 | type: ClusterIP 44 | --- 45 | apiVersion: v1 46 | kind: PersistentVolumeClaim 47 | metadata: 48 | name: mysql-pv-claim 49 | labels: 50 | app: wordpress 51 | spec: 52 | accessModes: 53 | - ReadWriteOnce 54 | resources: 55 | requests: 56 | storage: 2Gi 57 | --- 58 | apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2 59 | kind: Deployment 60 | metadata: 61 | name: wordpress-mysql 62 | labels: 63 | app: wordpress 64 | spec: 65 | selector: 66 | matchLabels: 67 | app: wordpress 68 | tier: mysql 69 | strategy: 70 | type: Recreate 71 | template: 72 | metadata: 73 | labels: 74 | app: wordpress 75 | tier: mysql 76 | io.contiv.network: "{{ contiv_groups['WordPressSQL']['epg-db']['network_name'] }}" 77 | io.contiv.net-group: "{{ contiv_groups['WordPressSQL']['epg-db']['epg_name'] }}" 78 | spec: 79 | containers: 80 | - image: mysql:5.6 81 | name: mysql 82 | env: 83 | - name: MYSQL_ROOT_PASSWORD 84 | valueFrom: 85 | secretKeyRef: 86 | name: mysql-pass 87 | key: password 88 | ports: 89 | - containerPort: 3306 90 | name: mysql 91 | volumeMounts: 92 | - name: mysql-persistent-storage 93 | mountPath: /var/lib/mysql 94 | volumes: 95 | - name: mysql-persistent-storage 96 | persistentVolumeClaim: 97 | claimName: mysql-pv-claim 98 | --- 99 | apiVersion: v1 100 | kind: Service 101 | metadata: 102 | name: wordpress 103 | labels: 104 | app: wordpress 105 | spec: 106 | ports: 107 | - port: 80 108 | selector: 109 | app: wordpress 110 | tier: frontend 111 | type: LoadBalancer 112 | --- 113 | apiVersion: v1 114 | kind: PersistentVolumeClaim 115 | metadata: 116 | name: wp-pv-claim 117 | labels: 118 | app: wordpress 119 | spec: 120 | accessModes: 121 | - ReadWriteOnce 122 | resources: 123 | requests: 124 | storage: 2Gi 125 | --- 126 | apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2 127 | kind: Deployment 128 | metadata: 129 | name: wordpress 130 | labels: 131 | app: wordpress 132 | io.contiv.network: "{{ contiv_groups['WordPressSQL']['epg-web']['network_name'] }}" 133 | io.contiv.net-group: "{{ contiv_groups['WordPressSQL']['epg-web']['epg_name'] }}" 134 | spec: 135 | selector: 136 | matchLabels: 137 | app: wordpress 138 | tier: frontend 139 | strategy: 140 | type: Recreate 141 | template: 142 | metadata: 143 | labels: 144 | app: wordpress 145 | tier: frontend 146 | spec: 147 | containers: 148 | - image: wordpress:4.8-apache 149 | name: wordpress 150 | env: 151 | - name: WORDPRESS_DB_HOST 152 | value: wordpress-mysql 153 | - name: WORDPRESS_DB_PASSWORD 154 | valueFrom: 155 | secretKeyRef: 156 | name: mysql-pass 157 | key: password 158 | ports: 159 | - containerPort: 80 160 | name: wordpress 161 | volumeMounts: 162 | - name: wordpress-persistent-storage 163 | mountPath: /var/www/html 164 | volumes: 165 | - name: wordpress-persistent-storage 166 | persistentVolumeClaim: 167 | claimName: wp-pv-claim 168 | -------------------------------------------------------------------------------- /roles/sample_apps/vars/main.yml: -------------------------------------------------------------------------------- 1 | contiv_groups: 2 | PHPGuestbook: 3 | epg-web: 4 | network_name: blue 5 | epg_name: epg-blue-guestbook-web 6 | policy_name: policy-blue-guestbook-web 7 | policy_rules: 8 | - "10 -d in -l tcp -P 80 -j allow" 9 | epg-db: 10 | network_name: blue 11 | epg_name: epg-blue-guestbook-db 12 | policy_name: policy-blue-guestbook-db 13 | policy_rules: 14 | - "10 -d in -g epg-blue-guestbook-web -j allow" 15 | - "20 -d in -l icmp -j allow" 16 | WordPressSQL: 17 | epg-web: 18 | network_name: blue 19 | epg_name: epg-blue-wordpress-web 20 | policy_name: policy-blue-wordpress-web 21 | policy_rules: 22 | - "10 -d in -l tcp -P 80 -j allow" 23 | epg-db: 24 | network_name: blue 25 | epg_name: epg-blue-wordpress-db 26 | policy_name: policy-blue-wordpress-db 27 | policy_rules: 28 | - "10 -d in -g epg-blue-wordpress-web -j allow" 29 | - "20 -d in -l icmp -j allow" -------------------------------------------------------------------------------- /roles/worker/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Reset kubeadm before init in case this is not the first run 3 | command: kubeadm reset 4 | when: kubeadm_reset_before_init 5 | 6 | - name: kubeadm join with pre generated token 7 | command: kubeadm join --token {{ kubeadm_token }} {{ master_ip }}:6443 --discovery-token-unsafe-skip-ca-verification 8 | -------------------------------------------------------------------------------- /site.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook deploys a simple kubeadm install. 3 | - name: Bootstrap Tasks 4 | hosts: 5 | - master 6 | - worker 7 | remote_user: "{{ ansible_remote_user }}" 8 | gather_facts: False 9 | roles: 10 | - common 11 | 12 | - name: Install Kubernetes master 13 | hosts: master 14 | remote_user: "{{ ansible_remote_user }}" 15 | become: yes 16 | become_method: sudo 17 | roles: 18 | - docker 19 | - kubeadm 20 | - master 21 | - contiv 22 | 23 | - name: Install nodes 24 | remote_user: "{{ ansible_remote_user }}" 25 | hosts: worker 26 | become: yes 27 | become_method: sudo 28 | roles: 29 | - docker 30 | - kubeadm 31 | - worker 32 | 33 | - import_playbook: deploy_contiv_network_config.yml 34 | - import_playbook: deploy_istio.yml 35 | - import_playbook: deploy_sample_apps.yml 36 | 37 | --------------------------------------------------------------------------------