├── playbooks ├── roles ├── ka-monitoring │ ├── private │ │ ├── roles │ │ ├── uninstall.yml │ │ └── config.yml │ ├── uninstall.yml │ └── config.yml ├── ka-init │ ├── init.yml │ └── group_vars │ │ ├── builder.yml │ │ └── all.yml ├── allhost-setup.yml ├── vm-teardown.yml ├── kube-teardown.yml ├── fedora-python-bootstrapper.yml ├── virthost-setup.yml ├── bmhost-setup.yml ├── kube-install.yml └── kube-install-ovn.yml ├── roles ├── kube-init │ ├── defaults │ │ └── main.yml │ ├── templates │ │ ├── kubeadm.cfg.v1alpha.j2 │ │ ├── kubeadm.cfg.v1beta.j2 │ │ ├── audit.yaml.j2 │ │ └── kubeadm.cfg.v1beta2.j2 │ └── tasks │ │ └── main.yml ├── kube-install │ ├── vars │ │ ├── RedHat-26.yml │ │ ├── RedHat-6.yml │ │ ├── RedHat-7.yml │ │ └── RedHat-8.yml │ ├── templates │ │ └── kubernetes.repo.j2 │ ├── defaults │ │ └── main.yml │ ├── tasks │ │ ├── variables.yml │ │ ├── system_setup.yml │ │ ├── binary_install.yml │ │ └── main.yml │ └── handlers │ │ └── main.yml ├── kube-niceties │ ├── defaults │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── configure-kubectl │ ├── defaults │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── cri-o-install │ ├── defaults │ │ └── main.yml │ └── tasks │ │ ├── main.yml │ │ ├── pkg_copr_install.yml │ │ ├── binary_install.yml │ │ └── build_install.yml ├── kube-teardown │ ├── defaults │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── bridge-setup │ ├── templates │ │ ├── ifcfg-eth0.j2 │ │ └── ifcfg-cni0.j2 │ ├── handlers │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── kubectl-proxy-systemd │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── templates │ │ └── kubectl-proxy.service.j2 │ └── tasks │ │ └── main.yml ├── multus-2nics-setup │ ├── templates │ │ ├── ifcfg-eth1.1.j2 │ │ └── ifcfg-eth1.j2 │ └── tasks │ │ └── main.yml ├── optional-packages │ └── tasks │ │ └── main.yml ├── ovnkube-setup │ ├── files │ │ └── ovnkube-node.diff │ └── tasks │ │ └── main.yml ├── lb-setup │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── haproxy.cfg.j2 ├── kube-master-join-cluster │ └── tasks │ │ └── main.yml ├── kube-join-cluster │ └── tasks │ │ └── main.yml └── ovn-setup │ └── tasks │ └── main.yml ├── host_vars ├── kube-node-1.yml ├── kube-node-2.yml └── kube-master.yml ├── docs ├── images │ └── kube-ansible_overview.png ├── scratch.rbac.md ├── troubleshooting.md ├── additional_scenarios_and_usage.md └── scratch.md ├── inventory ├── examples │ ├── bmhost │ │ └── bmhost.inventory │ ├── virthost │ │ └── virthost.inventory │ ├── multus-2nics │ │ └── extra-vars.yml │ ├── vms │ │ └── vms.inventory │ └── crio │ │ └── crio.inventory └── ci │ └── virthost2.home.61will.space │ ├── vms.local │ └── engine.yml ├── ansible.cfg ├── AUTHORS ├── .gitignore ├── contrib └── scripts │ └── changelog.sh ├── requirements.yml ├── Jenkinsfile ├── .github └── workflows │ └── lint.yml ├── LICENSE ├── README.md └── CHANGELOG.md /playbooks/roles: -------------------------------------------------------------------------------- 1 | ../roles/ -------------------------------------------------------------------------------- /playbooks/ka-monitoring/private/roles: -------------------------------------------------------------------------------- 1 | ../../../roles/ -------------------------------------------------------------------------------- /roles/kube-init/defaults/main.yml: -------------------------------------------------------------------------------- 1 | control_plane_listen_all: false 2 | -------------------------------------------------------------------------------- /roles/kube-install/vars/RedHat-26.yml: -------------------------------------------------------------------------------- 1 | --- 2 | __firewall_service: iptables 3 | -------------------------------------------------------------------------------- /roles/kube-install/vars/RedHat-6.yml: -------------------------------------------------------------------------------- 1 | --- 2 | __firewall_service: iptables 3 | -------------------------------------------------------------------------------- /roles/kube-install/vars/RedHat-7.yml: -------------------------------------------------------------------------------- 1 | --- 2 | __firewall_service: firewalld 3 | -------------------------------------------------------------------------------- /roles/kube-install/vars/RedHat-8.yml: -------------------------------------------------------------------------------- 1 | --- 2 | __firewall_service: firewalld 3 | -------------------------------------------------------------------------------- /roles/kube-niceties/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kubectl_bash_completion: true 3 | -------------------------------------------------------------------------------- /host_vars/kube-node-1.yml: -------------------------------------------------------------------------------- 1 | --- 2 | cni_subnet: fd00:101::/64 3 | cni_gateway: fd00:101::1 4 | -------------------------------------------------------------------------------- /host_vars/kube-node-2.yml: -------------------------------------------------------------------------------- 1 | --- 2 | cni_subnet: fd00:102::/64 3 | cni_gateway: fd00:102::1 4 | -------------------------------------------------------------------------------- /host_vars/kube-master.yml: -------------------------------------------------------------------------------- 1 | --- 2 | cni_subnet: fd00:1234::/110 3 | cni_gateway: fd00:1234::1 4 | -------------------------------------------------------------------------------- /roles/configure-kubectl/defaults/main.yml: -------------------------------------------------------------------------------- 1 | kubectl_user: centos 2 | kubectl_group: centos 3 | kubectl_home: /home/centos 4 | -------------------------------------------------------------------------------- /roles/cri-o-install/defaults/main.yml: -------------------------------------------------------------------------------- 1 | crio_giturl: https://github.com/cri-o/cri-o.git 2 | crio_version: master 3 | 4 | -------------------------------------------------------------------------------- /roles/kube-teardown/defaults/main.yml: -------------------------------------------------------------------------------- 1 | kubectl_user: centos 2 | kubectl_group: centos 3 | kubectl_home: /home/centos 4 | -------------------------------------------------------------------------------- /docs/images/kube-ansible_overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-nfvpe/kube-ansible/HEAD/docs/images/kube-ansible_overview.png -------------------------------------------------------------------------------- /playbooks/ka-monitoring/uninstall.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - import_playbook: ../ka-init/init.yml 3 | 4 | - import_playbook: private/uninstall.yml 5 | -------------------------------------------------------------------------------- /roles/bridge-setup/templates/ifcfg-eth0.j2: -------------------------------------------------------------------------------- 1 | DEVICE="eth0" 2 | BOOTPROTO="none" 3 | ONBOOT="yes" 4 | NM_CONTROLLED=no 5 | BRIDGE=cni0 6 | -------------------------------------------------------------------------------- /roles/kubectl-proxy-systemd/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kubectl_home: /home/centos 3 | kubectl_user: centos 4 | kubectl_group: centos 5 | -------------------------------------------------------------------------------- /inventory/examples/bmhost/bmhost.inventory: -------------------------------------------------------------------------------- 1 | bmhost ansible_host=10.8.125.31 ansible_ssh_user=therbert 2 | 3 | [bmhosts] 4 | bmhost 5 | 6 | -------------------------------------------------------------------------------- /roles/kube-install/templates/kubernetes.repo.j2: -------------------------------------------------------------------------------- 1 | [kubernetes] 2 | name=Kubernetes 3 | baseurl={{ kube_baseurl }} 4 | enabled=1 5 | gpgcheck=0 6 | repo_gpgcheck=0 -------------------------------------------------------------------------------- /playbooks/ka-init/init.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Prerequisites 3 | hosts: all 4 | tasks: 5 | - name: include group vars 6 | include_vars: group_vars/all.yml 7 | -------------------------------------------------------------------------------- /roles/bridge-setup/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: restart network 4 | service: 5 | name: network 6 | state: restarted 7 | listen: 'restart net' 8 | -------------------------------------------------------------------------------- /roles/kube-install/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | binary_install: false 3 | artifacts_sync_path: /opt/k8s/artifacts 4 | kubectl_home: /home/centos 5 | skip_reboot: true 6 | -------------------------------------------------------------------------------- /playbooks/allhost-setup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - import_playbook: ka-init/init.yml 3 | 4 | - import_playbook: virthost-setup.yml 5 | 6 | - import_playbook: bmhost-setup.yml 7 | 8 | -------------------------------------------------------------------------------- /inventory/examples/virthost/virthost.inventory: -------------------------------------------------------------------------------- 1 | vmhost ansible_host=127.0.0.1 ansible_ssh_user=root ansible_python_interpreter=/usr/bin/python3 2 | 3 | [virthost] 4 | vmhost 5 | -------------------------------------------------------------------------------- /roles/multus-2nics-setup/templates/ifcfg-eth1.1.j2: -------------------------------------------------------------------------------- 1 | DEVICE=eth1.1 2 | BOOTPROTO=static 3 | USERCTL=no 4 | VLAN=yes 5 | IPADDR=172.16.1.1 6 | NETMASK=255.255.255.0 7 | ONBOOT=yes 8 | -------------------------------------------------------------------------------- /playbooks/vm-teardown.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - import_playbook: ka-init/init.yml 3 | 4 | - hosts: virthost 5 | tasks: [] 6 | roles: 7 | - { role: redhat-nfvpe.vm-spinup, teardown: true } 8 | -------------------------------------------------------------------------------- /roles/kube-install/tasks/variables.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Include variables for Red Hat systems 3 | include_vars: "{{ ansible_os_family }}-{{ ansible_distribution_major_version }}.yml" 4 | -------------------------------------------------------------------------------- /roles/kube-install/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart kubelet 3 | systemd: 4 | name: kubelet 5 | state: restarted 6 | daemon_reload: yes 7 | listen: 'restart kubelet' 8 | -------------------------------------------------------------------------------- /inventory/examples/multus-2nics/extra-vars.yml: -------------------------------------------------------------------------------- 1 | # Note: 2 | # You need to set -e 'network_type=2nics' in case of playbooks/virthost-setup.yml 3 | # to create 2nic environment. 4 | --- 5 | multus_version: "master" 6 | -------------------------------------------------------------------------------- /roles/bridge-setup/templates/ifcfg-cni0.j2: -------------------------------------------------------------------------------- 1 | DEVICE=cni0 2 | TYPE=Bridge 3 | ONBOOT=yes 4 | BOOTPROTO="dhcp" 5 | PEERDNS="yes" 6 | IPV6INIT="no" 7 | PERSISTENT_DHCLIENT="1" 8 | NM_CONTROLLED=no 9 | DELAY=0 10 | -------------------------------------------------------------------------------- /playbooks/kube-teardown.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - import_playbook: ka-init/init.yml 3 | 4 | - hosts: nodes,master 5 | become: true 6 | become_user: root 7 | tasks: [] 8 | roles: 9 | - { role: kube-teardown } 10 | -------------------------------------------------------------------------------- /playbooks/ka-monitoring/config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Initialize the run 3 | import_playbook: ../ka-init/init.yml 4 | 5 | # Adds monitoring to the system by using Prometheus Operator 6 | - import_playbook: private/config.yml 7 | -------------------------------------------------------------------------------- /roles/kubectl-proxy-systemd/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: Restart proxy when changed 2 | systemd: 3 | name: kubectl-proxy 4 | state: restarted 5 | daemon_reload: yes 6 | listen: template_kubectlproxy_unit_changed 7 | -------------------------------------------------------------------------------- /playbooks/ka-monitoring/private/uninstall.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: master 3 | tasks: 4 | - name: Run prometheus-operator uninstall role 5 | include_role: 6 | name: redhat-nfvpe.prometheus-operator 7 | tasks_from: uninstall.yml 8 | -------------------------------------------------------------------------------- /roles/optional-packages/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install optional packages 3 | package: 4 | name: "{{ item }}" 5 | state: present 6 | with_items: "{{ optional_packages }}" 7 | when: optional_packages is defined and optional_packages.0 is defined -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | host_key_checking = False 3 | retry_files_enabled = False 4 | deprecation_warnings = False 5 | roles_path = ./roles 6 | stdout_callback = skippy 7 | log_path=./logfile 8 | 9 | [paramiko_connection] 10 | record_host_keys = False 11 | -------------------------------------------------------------------------------- /AUTHORS: -------------------------------------------------------------------------------- 1 | Bradley Watkins 2 | dougbtv 3 | dougbtv 4 | Doug Smith 5 | Feng Pan 6 | Leif Madsen 7 | Leif Madsen 8 | Tomofumi Hayashi 9 | -------------------------------------------------------------------------------- /playbooks/ka-monitoring/private/config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Prometheus Operator 3 | hosts: master 4 | tasks: 5 | - import_role: 6 | name: redhat-nfvpe.prometheus-operator 7 | tasks_from: install.yml 8 | when: 9 | - monitoring_install | default(False) | bool 10 | -------------------------------------------------------------------------------- /roles/kubectl-proxy-systemd/templates/kubectl-proxy.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=kubectl-proxy 3 | 4 | [Service] 5 | User={{ kubectl_user }} 6 | Group={{ kubectl_group }} 7 | ExecStart=/bin/bash -c "source {{ kubectl_home }}/.bashrc && /usr/bin/kubectl proxy --port={{ kubectl_proxy_port }}" 8 | 9 | [Install] 10 | WantedBy=multi-user.target 11 | 12 | -------------------------------------------------------------------------------- /playbooks/fedora-python-bootstrapper.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: master,nodes 3 | become: true 4 | become_user: root 5 | gather_facts: False 6 | tasks: 7 | # https://trello.com/c/XaiXEocS/239-bz-to-track-adding-python-to-the-fedora-cloud-images 8 | - name: install python2 and dnf stuff 9 | raw: (dnf -y install python-dnf python2-dnf libselinux-python) 10 | -------------------------------------------------------------------------------- /roles/kube-teardown/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Teardown kube nodes 3 | shell: > 4 | kubeadm reset -f 5 | 6 | - name: Remove all semaphores 7 | file: 8 | dest: "{{ item }}" 9 | state: absent 10 | with_items: 11 | - "/etc/.kubeadm-complete" 12 | - "{{ kubectl_home }}/.kubeadm-podnetwork-complete" 13 | - "/etc/.kubeadm-joined" 14 | - "{{ kubectl_home }}/admin.conf" 15 | -------------------------------------------------------------------------------- /inventory/ci/virthost2.home.61will.space/vms.local: -------------------------------------------------------------------------------- 1 | kube-master ansible_host=192.168.3.100 2 | kube-node-1 ansible_host=192.168.3.101 3 | kube-node-2 ansible_host=192.168.3.102 4 | 5 | [master] 6 | kube-master 7 | 8 | [nodes] 9 | kube-node-1 10 | kube-node-2 11 | 12 | [all:vars] 13 | ansible_user=centos 14 | ansible_ssh_common_args='-o StrictHostKeyChecking=no' 15 | ansible_ssh_private_key_file=/home/jenkins/.ssh/id_rsa 16 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.retry 2 | *.swp 3 | *.swo 4 | 5 | # ignore inventory except for the example inventory (allow for override) 6 | inventory/* 7 | !inventory/examples/ 8 | !inventory/ci/ 9 | 10 | # ignore externally installed roles 11 | roles/install-go 12 | roles/install-docker 13 | roles/auto-kube-dev 14 | roles/redhat-nfvpe.vm-spinup 15 | roles/redhat-nfvpe.prometheus-operator 16 | roles/oVirt.* 17 | 18 | #ignore locally generated logfile 19 | logfile 20 | -------------------------------------------------------------------------------- /playbooks/virthost-setup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - import_playbook: ka-init/init.yml 3 | 4 | - hosts: virthost 5 | tasks: 6 | - name: Reload dnsmasq config, so that VM's DNS cache can be refreshed. 7 | block: 8 | - debug: 9 | msg: "Reloading dnsmasq config." 10 | - name: Reload dnsmasq config 11 | command: pkill -HUP dnsmasq 12 | tags: dns-workaround 13 | 14 | roles: 15 | - { role: redhat-nfvpe.vm-spinup } 16 | -------------------------------------------------------------------------------- /contrib/scripts/changelog.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | echo "# Release $2" > /tmp/changelog.top 4 | git log $1...$2 --pretty=format:'* [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/%H) -- %s ' --reverse | grep -v "Merge" >> /tmp/changelog.top 5 | echo -e "\n\n" >> /tmp/changelog.top 6 | sed -i 's/[[:space:]]*$//' /tmp/changelog.top 7 | 8 | cat CHANGELOG.md >> /tmp/changelog.top 9 | mv /tmp/changelog.top CHANGELOG.md 10 | rm -f /tmp/changelog.top 11 | -------------------------------------------------------------------------------- /playbooks/ka-init/group_vars/builder.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # ---------------------- 3 | # builder vars 4 | # ---------------------- 5 | # Hardware checks. 6 | perform_hardware_checks: true 7 | recommend_ram_mb: 24576 8 | recommend_free_disk_gb: 36.1 9 | 10 | # builder setup 11 | use_planter: true 12 | skip_builder_setup: false 13 | skip_build: false 14 | 15 | # Kube repo & version 16 | kubernetes_repo_url: https://github.com/kubernetes/kubernetes.git 17 | kubernetes_version: master 18 | -------------------------------------------------------------------------------- /roles/kube-install/tasks/system_setup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Disable SELinux :(" 3 | selinux: 4 | state: disabled 5 | 6 | - name: "reboot machine" 7 | reboot: 8 | reboot_timeout: 600 9 | when: not skip_reboot 10 | 11 | - name: "Stop iptables :(" 12 | service: 13 | name: "{{ __firewall_service }}" 14 | state: stopped 15 | ignore_errors: yes 16 | 17 | - name: "Disable iptables :(" 18 | service: 19 | name: "{{ __firewall_service }}" 20 | enabled: no 21 | ignore_errors: yes 22 | -------------------------------------------------------------------------------- /requirements.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - src: https://github.com/redhat-nfvpe/ansible-role-install-go 3 | name: install-go 4 | version: master 5 | - src: https://github.com/redhat-nfvpe/ansible-role-install-docker 6 | name: install-docker 7 | version: master 8 | - src: https://github.com/redhat-nfvpe/ansible-role-vm-spinup 9 | name: redhat-nfvpe.vm-spinup 10 | version: master 11 | - src: https://github.com/redhat-nfvpe/ansible-role-prometheus-operator 12 | name: redhat-nfvpe.prometheus-operator 13 | version: master 14 | -------------------------------------------------------------------------------- /roles/kubectl-proxy-systemd/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # Creates a systemd unit and starts kubectl proxy. 2 | # https://kubernetes.io/docs/user-guide/kubectl/v1.8/#proxy 3 | 4 | - name: Template kubectl proxy systemd unit 5 | template: 6 | src: kubectl-proxy.service.j2 7 | dest: /etc/systemd/system/kubectl-proxy.service 8 | register: template_kubectlproxy_unit 9 | notify: template_kubectlproxy_unit_changed 10 | 11 | - name: Always ensure proxy is started 12 | systemd: 13 | name: kubectl-proxy 14 | state: started 15 | daemon_reload: yes 16 | -------------------------------------------------------------------------------- /roles/multus-2nics-setup/templates/ifcfg-eth1.j2: -------------------------------------------------------------------------------- 1 | DEVICE=eth1 2 | NAME=eth1 3 | USERCTL=no 4 | {% if ansible_hostname == 'kube-master' %} 5 | BOOTPROTO=static 6 | ONBOOT=yes 7 | IPADDR=10.1.1.1 8 | NETMASK=255.255.255.0 9 | {% elif ansible_hostname == 'kube-node-1' %} 10 | BOOTPROTO=static 11 | ONBOOT=yes 12 | IPADDR=10.1.1.11 13 | NETMASK=255.255.255.0 14 | {% elif ansible_hostname == 'kube-node-2' %} 15 | BOOTPROTO=static 16 | ONBOOT=yes 17 | IPADDR=10.1.1.12 18 | NETMASK=255.255.255.0 19 | {% elif ansible_hostname == 'kube-node-3' %} 20 | BOOTPROTO=static 21 | ONBOOT=yes 22 | IPADDR=10.1.1.13 23 | NETMASK=255.255.255.0 24 | {% else %} 25 | ONBOOT=no 26 | {% endif %} 27 | -------------------------------------------------------------------------------- /roles/kube-niceties/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Check that kubectl is available 4 | stat: 5 | path: /bin/kubectl 6 | register: kubectl_stat 7 | 8 | - name: Setup kubectl bash completion 9 | block: 10 | - name: Install bash-completion 11 | become: true 12 | become_user: root 13 | package: 14 | name: bash-completion 15 | state: present 16 | 17 | - name: Add kubectl bash completion to .bash_profile 18 | lineinfile: 19 | dest: "{{ ansible_env.HOME }}/.bash_profile" 20 | line: "source <(kubectl completion bash)" 21 | insertafter: "export PATH" 22 | when: kubectl_bash_completion and kubectl_stat.stat.exists 23 | -------------------------------------------------------------------------------- /roles/ovnkube-setup/files/ovnkube-node.diff: -------------------------------------------------------------------------------- 1 | *** ovnkube-node.yaml Wed Jan 13 15:22:13 2021 2 | --- ovnkube-node.yaml.mod Wed Jan 13 15:34:55 2021 3 | *************** 4 | *** 101,107 **** 5 | securityContext: 6 | runAsUser: 0 7 | privileged: true 8 | ! 9 | 10 | terminationMessagePolicy: FallbackToLogsOnError 11 | volumeMounts: 12 | --- 101,108 ---- 13 | securityContext: 14 | runAsUser: 0 15 | privileged: true 16 | ! capabilities: 17 | ! add: ["NET_BIND_SERVICE", "NET_ADMIN", "NET_RAW", "SYS_RAWIO", "SYS_CHROOT", "SYS_ADMIN", "SYS_PTRACE"] 18 | 19 | terminationMessagePolicy: FallbackToLogsOnError 20 | volumeMounts: 21 | -------------------------------------------------------------------------------- /roles/lb-setup/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: "Disable SELinux :(" 2 | selinux: 3 | state: disabled 4 | 5 | - name: Install haproxy 6 | package: 7 | name: haproxy 8 | state: present 9 | 10 | - name: check if haproxy config file exist 11 | stat: 12 | path: /etc/haproxy/haproxy.cfg 13 | register: file_status 14 | delegate_to: localhost 15 | 16 | - name: backup original 17 | copy: 18 | src: /etc/haproxy/haproxy.cfg 19 | dest: /etc/haproxy/haproxy.cfg.orig 20 | when: file_status.stat.exists 21 | 22 | - name: create haproxy.cfg 23 | template: 24 | src: haproxy.cfg.j2 25 | dest: /etc/haproxy/haproxy.cfg 26 | 27 | - name: start haproxy 28 | systemd: 29 | name: haproxy.service 30 | state: started 31 | enabled: yes 32 | -------------------------------------------------------------------------------- /inventory/examples/vms/vms.inventory: -------------------------------------------------------------------------------- 1 | kube-master ansible_host=192.168.1.155 2 | kube-node-1 ansible_host=192.168.1.168 3 | kube-node-2 ansible_host=192.168.1.65 4 | kube-node-3 ansible_host=192.168.1.149 5 | 6 | [master] 7 | kube-master 8 | 9 | [nodes] 10 | kube-node-1 11 | kube-node-2 12 | kube-node-3 13 | 14 | [master:vars] 15 | ansible_ssh_user=centos 16 | # ansible_become=true 17 | # ansible_become_user=root 18 | # ansible_ssh_common_args='-o ProxyCommand="ssh -W %h:%p root@192.168.1.119"' 19 | ansible_ssh_private_key_file=/home/doug/.ssh/id_openshift_hosts 20 | 21 | [nodes:vars] 22 | ansible_ssh_user=centos 23 | # ansible_become=true 24 | # ansible_become_user=root 25 | # ansible_ssh_common_args='-o ProxyCommand="ssh -W %h:%p root@192.168.1.119"' 26 | ansible_ssh_private_key_file=/home/doug/.ssh/id_openshift_hosts 27 | -------------------------------------------------------------------------------- /roles/kube-master-join-cluster/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Default cri-o flags to empty 2 | set_fact: 3 | arg_crio: "" 4 | 5 | - name: Set cri-o flags 6 | set_fact: 7 | arg_crio: "--ignore-preflight-errors=all" 8 | when: container_runtime == "crio" 9 | 10 | - name: Default cri-o flags to empty 11 | set_fact: 12 | kubeadm_master_cert: "" 13 | 14 | - name: set master config in kubeadm option 15 | set_fact: 16 | kubeadm_master_cert: "--control-plane --certificate-key {{ kubeadm_cert_key }}" 17 | 18 | - name: Join each node to the master with the join command 19 | shell: > 20 | {{ kubeadm_join_command }} {{ kubeadm_master_cert }} 21 | args: 22 | creates: /etc/.kubeadm-joined 23 | 24 | - name: Mark the nodes as joined 25 | file: 26 | path: /etc/.kubeadm-joined 27 | state: directory 28 | -------------------------------------------------------------------------------- /roles/kube-init/templates/kubeadm.cfg.v1alpha.j2: -------------------------------------------------------------------------------- 1 | # Full parameters @ https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init/ 2 | apiVersion: kubeadm.k8s.io/v1alpha1 3 | kind: MasterConfiguration 4 | {% if control_plane_listen_all %} 5 | controllerManagerExtraArgs: 6 | address: 0.0.0.0 7 | schedulerExtraArgs: 8 | address: 0.0.0.0 9 | {% endif %} 10 | {% if enable_device_plugins %} 11 | apiServerExtraArgs: 12 | feature-gates: DevicePlugins=true 13 | {% endif %} 14 | networking: 15 | podSubnet: {{ pod_network_cidr }}/16 16 | kubeletConfiguration: 17 | baseConfig: 18 | cgroupDriver: systemd 19 | # kubeletCgroups: /systemd/system.slice 20 | # unsure if this fits for: runtime-cgroups 21 | # systemCgroups: /systemd/system.slice 22 | {% if container_runtime == "crio" %} 23 | criSocket: /var/run/crio/crio.sock 24 | {% endif %} 25 | -------------------------------------------------------------------------------- /roles/cri-o-install/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Force install by removing semaphor. 4 | file: 5 | dest: /etc/.crio-installed 6 | state: absent 7 | when: crio_force|default(false) 8 | 9 | - name: Check for .crio-installed semaphor 10 | stat: 11 | path: /etc/.crio-installed 12 | register: crio_semaphor 13 | 14 | - name: Include the crio install plays 15 | block: 16 | - name: Get package and install crio 17 | block: 18 | - include: binary_install.yml 19 | when: not crio_use_copr|bool 20 | - include: pkg_copr_install.yml 21 | when: crio_use_copr|bool 22 | when: not crio_build_install|bool 23 | - name: Build and install crio 24 | include: build_install.yml 25 | when: crio_build_install|bool 26 | when: not crio_semaphor.stat.exists 27 | 28 | - name: Set .crio-installed semaphor file 29 | file: 30 | dest: /etc/.crio-installed 31 | state: directory 32 | -------------------------------------------------------------------------------- /roles/configure-kubectl/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # -------- Configure kubectl ------------- 2 | # It does the following 3 | # sudo cp /etc/kubernetes/admin.conf $HOME/.kube 4 | # sudo chown $(id -u):$(id -g) $HOME/.kube/admin.conf 5 | # export KUBECONFIG=$HOME/.kube/admin.conf 6 | 7 | - name: Ensure .kube folder exists 8 | file: 9 | path: "{{ kubectl_home }}/.kube/" 10 | state: directory 11 | owner: "{{ kubectl_user }}" 12 | group: "{{ kubectl_group }}" 13 | mode: 0755 14 | 15 | - name: Copy admin.conf to kubectl user's home 16 | shell: > 17 | cp -f /etc/kubernetes/admin.conf {{ kubectl_home }}/.kube/admin.conf 18 | args: 19 | creates: "{{ kubectl_home }}/admin.conf" 20 | 21 | - name: Set admin.conf ownership 22 | file: 23 | path: "{{ kubectl_home }}/.kube/admin.conf" 24 | owner: "{{ kubectl_user }}" 25 | group: "{{ kubectl_group }}" 26 | 27 | - name: Add KUBECONFIG env for admin.conf to .bashrc 28 | lineinfile: 29 | dest: "{{ kubectl_home }}/.bashrc" 30 | regexp: "KUBECONFIG" 31 | line: "export KUBECONFIG={{ kubectl_home }}/.kube/admin.conf" 32 | -------------------------------------------------------------------------------- /roles/kube-init/templates/kubeadm.cfg.v1beta.j2: -------------------------------------------------------------------------------- 1 | # Full parameters @ https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init/ 2 | # for v1.13 (https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1) 3 | kind: ClusterConfiguration 4 | apiVersion: kubeadm.k8s.io/v1beta1 5 | apiServer: 6 | extraArgs: 7 | enable-admission-plugins: NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook 8 | networking: 9 | podSubnet: {{ pod_network_cidr }}/16 10 | {% if control_plane_listen_all %} 11 | controllerManager: 12 | extraArgs: 13 | address: 0.0.0.0 14 | scheduler: 15 | extraArgs: 16 | address: 0.0.0.0 17 | {% endif %} 18 | {% if enable_device_plugins %} 19 | featureGates: 20 | DevicePlugins: true 21 | {% endif %} 22 | --- 23 | kind: InitConfiguration 24 | apiVersion: kubeadm.k8s.io/v1beta1 25 | {% if container_runtime == "crio" %} 26 | nodeRegistration: 27 | criSocket: /var/run/crio/crio.sock 28 | {% endif %} 29 | --- 30 | kind: KubeletConfiguration 31 | apiVersion: kubelet.config.k8s.io/v1beta1 32 | {% if container_runtime == "crio" %} 33 | cgroupDriver: systemd 34 | {% endif %} 35 | -------------------------------------------------------------------------------- /roles/kube-join-cluster/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # - debug: "msg={{ kubeadm_join_command }}" 3 | 4 | - name: Set skip_preflight_checks args if need be. 5 | set_fact: 6 | arg_crio: "--ignore-preflight-errors=all" 7 | when: skip_preflight_checks 8 | 9 | - name: Set cri-o args if need be. 10 | set_fact: 11 | arg_crio: "--ignore-preflight-errors=all --cri-socket=\\/var\\/run\\/crio\\/crio.sock" 12 | when: container_runtime == "crio" 13 | 14 | - name: Change the given command 15 | shell: > 16 | set -o pipefail && echo {{ kubeadm_join_command }} | sed -e 's/join/join {{ arg_crio }} /' 17 | register: modified_command 18 | when: container_runtime == "crio" or skip_preflight_checks 19 | 20 | - name: Change the kubeadm_join_command fact when crio 21 | set_fact: 22 | kubeadm_join_command: "{{ modified_command.stdout }}" 23 | when: container_runtime == "crio" or skip_preflight_checks 24 | 25 | - name: Join each node to the master with the join command 26 | shell: > 27 | {{ kubeadm_join_command }} 28 | args: 29 | creates: /etc/.kubeadm-joined 30 | 31 | - name: Mark the nodes as joined 32 | file: 33 | path: /etc/.kubeadm-joined 34 | state: directory 35 | -------------------------------------------------------------------------------- /roles/bridge-setup/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install bridge-utils and firewalld # noqa package-latest 3 | yum: 4 | name: bridge-utils,firewalld 5 | state: latest 6 | 7 | - name: enable firewalld 8 | systemd: 9 | name: firewalld 10 | state: started 11 | enabled: yes 12 | 13 | - name: Accept bridge forwarding and accept packets # noqa no-changed-when 14 | shell: > 15 | firewall-cmd --permanent --direct --passthrough ipv4 -I FORWARD -m physdev --physdev-is-bridged -j ACCEPT; 16 | firewall-cmd --permanent --direct --passthrough ipv4 -D INPUT -j REJECT --reject-with icmp-host-prohibited 17 | 18 | - name: Enable IPv4 Forwarding 19 | sysctl: 20 | name: net.ipv4.conf.all.forwarding 21 | value: 1 22 | sysctl_set: yes 23 | state: present 24 | 25 | - name: Configure cni0 for control plane 26 | template: 27 | src: ifcfg-cni0.j2 28 | dest: /etc/sysconfig/network-scripts/ifcfg-cni0 29 | 30 | - name: Configure eth0 for control plane 31 | template: 32 | src: ifcfg-eth0.j2 33 | dest: /etc/sysconfig/network-scripts/ifcfg-eth0 34 | notify: "restart net" 35 | 36 | # Note: 37 | # "restart net" sends DHCP request through eth0 for cni0. 38 | # Currently DHCP client will use eth0's MAC, hence IP 39 | # address should not changed. 40 | -------------------------------------------------------------------------------- /roles/kube-install/tasks/binary_install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Check for download complete semaphor 4 | stat: 5 | path: "{{ kubectl_home }}/.kube-binary-download-complete" 6 | register: download_complete_semaphor 7 | 8 | - name: Delete existing binaries when necessary 9 | file: 10 | path: "{{ item.path }}" 11 | state: absent 12 | with_items: 13 | - path: /usr/bin/kubelet 14 | url_is_set: "{{ binary_kubelet_url is defined }}" 15 | - path: /usr/bin/kubectl 16 | url_is_set: "{{ binary_kubectl_url is defined }}" 17 | - path: /usr/bin/kubeadm 18 | url_is_set: "{{ binary_kubeadm_url is defined }}" 19 | when: > 20 | item.url_is_set and 21 | (not download_complete_semaphor.stat.exists|bool or binary_install_force_redownload) 22 | 23 | - name: Download kubelet/kubectl/kubeadm 24 | get_url: 25 | url: "{{ item.use_url }}" 26 | dest: "{{ item.to_path }}" 27 | mode: 0755 28 | force: "{{ binary_install_force_redownload }}" 29 | when: binary_kubelet_url is defined 30 | with_items: 31 | - use_url: "{{ binary_kubelet_url }}" 32 | to_path: "/usr/bin/kubelet" 33 | - use_url: "{{ binary_kubeadm_url }}" 34 | to_path: "/usr/bin/kubeadm" 35 | - use_url: "{{ binary_kubectl_url }}" 36 | to_path: "/usr/bin/kubectl" 37 | 38 | - name: Mark download complete 39 | file: 40 | path: "{{ kubectl_home }}/.kube-binary-download-complete" 41 | state: directory 42 | -------------------------------------------------------------------------------- /roles/multus-2nics-setup/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Ensure requried packages for cri-o 3 | package: 4 | name: "{{ item }}" 5 | state: present 6 | with_items: 7 | - NetworkManager 8 | 9 | - name: Enable NetworkManager 10 | systemd: 11 | name: NetworkManager.service 12 | daemon_reload: yes 13 | state: started 14 | enabled: yes 15 | 16 | - name: create /etc/cni/multus/net.d 17 | file: 18 | path: /etc/cni/multus/net.d 19 | state: directory 20 | recurse: yes 21 | owner: root 22 | group: root 23 | mode: 0755 24 | 25 | - name: Delete automatically created connection # noqa no-changed-when 26 | shell: | 27 | nmcli d disconnect eth1 && \ 28 | nmcli c delete 'Wired connection 1' 29 | ignore_errors: true 30 | 31 | - name: Configure eth1 for data plane 32 | template: 33 | src: ifcfg-eth1.j2 34 | dest: /etc/sysconfig/network-scripts/ifcfg-eth1 35 | 36 | - name: Configure eth1.1 for data plane 37 | template: 38 | src: ifcfg-eth1.1.j2 39 | dest: /etc/sysconfig/network-scripts/ifcfg-eth1.1 40 | when: "'master' in group_names" 41 | 42 | - name: Reload NetworkManager connection # noqa no-changed-when 43 | command: nmcli connection reload 44 | 45 | - name: bring up eth1 # noqa no-changed-when 46 | command: nmcli c up eth1 47 | 48 | - name: rename eth1.1 connection name 49 | command: nmcli c modify 'Vlan eth1.1' connection.id 'eth1.1' 50 | when: "'master' in group_names" 51 | 52 | - name: bring up eth1.1 53 | command: nmcli c up 'eth1.1' 54 | when: "'master' in group_names" 55 | -------------------------------------------------------------------------------- /inventory/examples/crio/crio.inventory: -------------------------------------------------------------------------------- 1 | # This file is used for build/install crio, with crio_build_install 2 | kube-master ansible_host=master.example.local 3 | kube-node-1 ansible_host=node.example.local 4 | vmhost ansible_host=virt-host.example.local ansible_ssh_user=root 5 | 6 | [virthost] 7 | vmhost 8 | 9 | [virthost:vars] 10 | # Using Fedora 11 | centos_genericcloud_url=https://download.fedoraproject.org/pub/fedora/linux/releases/26/CloudImages/x86_64/images/Fedora-Cloud-Base-26-1.5.x86_64.qcow2 12 | image_destination_name=Fedora-Cloud-Base-26-1.5.x86_64.qcow2 13 | set_root_disk_gb=18 14 | 15 | [master] 16 | kube-master 17 | 18 | [nodes] 19 | kube-node-1 20 | 21 | [master:vars] 22 | # Using Fedora 23 | ansible_ssh_user=fedora 24 | ansible_ssh_private_key_file=/home/itsme/.ssh/id_testvms 25 | kubectl_home=/home/fedora 26 | kubectl_user=fedora 27 | kubectl_group=fedora 28 | # Using CRI-O (you must set this as an extra var, e.g. `-e "container_runtime=crio"`) 29 | # crio_build_install=true 30 | # container_runtime=crio 31 | 32 | [nodes:vars] 33 | # Using Fedora 34 | ansible_ssh_user=fedora 35 | ansible_ssh_private_key_file=/home/itsme/.ssh/id_testvms 36 | kubectl_home=/home/fedora 37 | kubectl_user=fedora 38 | kubectl_group=fedora 39 | # Using CRI-O (you must set this as an extra var, e.g. `-e "container_runtime=crio"`) 40 | # crio_build_install=true 41 | # container_runtime=crio 42 | 43 | # Need to set crio_versions for installed kubernetes version, see following URL for details. 44 | # https://github.com/kubernetes-incubator/cri-o#compatibility-matrix-cri-o---kubernetes-clusters 45 | # crio_versions=v1.11.1 46 | -------------------------------------------------------------------------------- /roles/ovn-setup/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: update kernel 2 | block: 3 | - name: rpm import 4 | rpm_key: 5 | state: present 6 | key: https://www.elrepo.org/RPM-GPG-KEY-elrepo.org 7 | - name: install elrepo 8 | yum: 9 | name: https://www.elrepo.org/elrepo-release-7.0-4.el7.elrepo.noarch.rpm 10 | state: present 11 | - name: install kernel-ml 12 | yum: 13 | name: kernel-ml 14 | enablerepo: elrepo-kernel 15 | state: present 16 | - name: update grub default conf 17 | lineinfile: 18 | regexp: "GRUB_DEFAULT=saved" 19 | dest: /etc/default/grub 20 | line: "GRUB_DEFAULT=0" 21 | - name: update grub conf 22 | command: grub2-mkconfig -o /boot/grub2/grub.cfg 23 | - name: "reboot machine" 24 | reboot: 25 | reboot_timeout: 600 26 | when: ansible_kernel is version('4.6','<') 27 | 28 | - name: install openvswitch and required dependencies on the master/worker nodes 29 | block: 30 | - name: install openstack repo 31 | yum_repository: 32 | name: openstack-train 33 | description: openstack-train 34 | file: openstack-train 35 | baseurl: http://mirror.centos.org/centos/7/cloud/x86_64/openstack-train/ 36 | gpgcheck: no 37 | - name: install openvswitch 38 | yum: 39 | name: 40 | - openvswitch 41 | enablerepo: openstack-train 42 | state: present 43 | - name: install libibverbs 44 | yum: 45 | name: libibverbs 46 | state: present 47 | - name: enable epel 48 | yum: 49 | name: epel-release 50 | state: present 51 | - name: enable python-pip 52 | yum: 53 | name: python-pip 54 | state: present 55 | enablerepo: epel 56 | 57 | - name: install git 58 | yum: 59 | name: git 60 | state: present 61 | 62 | -------------------------------------------------------------------------------- /playbooks/bmhost-setup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - import_playbook: ka-init/init.yml 3 | 4 | - hosts: bmhosts 5 | become: true 6 | become_user: root 7 | gather_facts: True 8 | tasks: 9 | - name: Turn off swap 10 | shell: swapoff -a 11 | when: ansible_virtualization_role == "host" 12 | 13 | # Fedora cloud image doesn't necessarily come with Python2 by default, so... 14 | # https://trello.com/c/XaiXEocS/239-bz-to-track-adding-python-to-the-fedora-cloud-images 15 | - name: Check to see if there is a separate BM host. 16 | set_fact: 17 | - name: set host_type 18 | set_fact: 19 | host_type: "{{ host_type | default('centos') }}" 20 | - name: install epel, python2, and dnf stuff 21 | raw: (dnf -y install epel-release python-dnf python2-dnf libselinux-python) 22 | when: host_type == "fedora"|default("centos") 23 | - name: install packages 24 | package: 25 | name: ansible 26 | state: present 27 | 28 | # We will run NFV workloads and cluster and are using user space networking. 29 | # We may need to install provision and compile specific versions of dpdk such as 30 | # 1811 from upstream. 31 | - name: install dpdk 32 | package: 33 | name: dpdk 34 | state: present 35 | 36 | # Add all bm hosts to inventory group nodes. 37 | - name: Add bmhosts to inventory group nodes 38 | add_host: 39 | hostname: "{{ [bmhosts] }}" 40 | groups: 41 | - nodes 42 | 43 | # Add bmhosts to nodes in all inventory file. 44 | # Nodes have already been written by set up vmhost play 45 | - name: add bmhosts to nodes in in all inventory fileadd bmhosts to nodes in in all inventory file 46 | connection: local 47 | command: 48 | sed -i '/\[nodes/a {{item}}' {{ playbook_dir }}/../inventory/{{ all_inventory }} 49 | args: 50 | warn: false 51 | with_inventory_hostnames: 52 | - all:bhmosts 53 | -------------------------------------------------------------------------------- /Jenkinsfile: -------------------------------------------------------------------------------- 1 | pipeline { 2 | agent { 3 | label 'jenkins-agent-ansible-274-centos7' 4 | } 5 | 6 | environment { 7 | ENGINE_CREDS = credentials('engine-management-creds') 8 | VM_SSH_KEY = credentials('ka-public-ssh-key') 9 | ANSIBLE_SSH_PASS = credentials('ka-ansible-ssh-pass') 10 | } 11 | 12 | stages { 13 | stage('Environment setup and prerequisites') { 14 | steps { 15 | echo "Running ${env.BUILD_ID} on ${env.JENKINS_URL}" 16 | sh 'ansible-galaxy install -r requirements.yml' 17 | sh "ansible-playbook -c local -i inventory/ci/virthost2.home.61will.space/engine.yml playbooks/ovirt_vm_infra.yml -e 'vm_state=absent'" 18 | } 19 | } 20 | 21 | stage('Build virtual machines') { 22 | steps { 23 | sh "ansible-playbook -c local -i inventory/ci/virthost2.home.61will.space/engine.yml playbooks/ovirt_vm_infra.yml -e 'vm_state=running'" 24 | } 25 | } 26 | 27 | stage('Install Kubernetes') { 28 | steps { 29 | sh "mkdir -p /home/jenkins/.ssh && chmod 0700 /home/jenkins/.ssh" 30 | // for now we're copying the keys over, but they are not used because I can't get the container image to ssh via keys into the virtual machines 31 | configFileProvider([configFile(fileId: 'kube-ansible-ssh-privkey', targetLocation: '/home/jenkins/.ssh/id_rsa')]) {} 32 | configFileProvider([configFile(fileId: 'kube-ansible-ssh-pubkey', targetLocation: '/home/jenkins/.ssh/id_rsa.pub')]) {} 33 | sh "ansible-playbook -e ansible_ssh_pass=${env.ANSIBLE_SSH_PASS} -i inventory/ci/virthost2.home.61will.space/vms.local playbooks/kube-install.yml" 34 | } 35 | } 36 | } 37 | post { 38 | always { 39 | echo "All done." 40 | } 41 | } 42 | } 43 | 44 | // vim: ft=groovy shiftwidth=4 tabstop=4 expandtab smartindent : 45 | -------------------------------------------------------------------------------- /roles/kube-install/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Setup variables 3 | - include: variables.yml 4 | 5 | - import_tasks: system_setup.yml 6 | 7 | - name: Template kube RPM repo 8 | template: 9 | src: kubernetes.repo.j2 10 | dest: /etc/yum.repos.d/kubernetes.repo 11 | 12 | - name: Default kube version fact 13 | set_fact: 14 | kube_version_parameter: "" 15 | 16 | - name: Set kube version fact to specific version when specified 17 | set_fact: 18 | kube_version_parameter: "-{{ kube_version }}" 19 | when: kube_version != "latest" 20 | 21 | - name: copy/install Kubernetes packages 22 | block: 23 | - name: copy rpm file 24 | copy: 25 | src: "{{ item }}" 26 | dest: /root 27 | owner: root 28 | group: root 29 | mode: "u=rw,g=r,o=r" 30 | with_items: 31 | - "{{ k8s_use_rpms }}/kubectl.rpm" 32 | - "{{ k8s_use_rpms }}/kubelet.rpm" 33 | - "{{ k8s_use_rpms }}/kubeadm.rpm" 34 | - "{{ k8s_use_rpms }}/kubernetes-cni.rpm" 35 | - name: Install required packages for Kubernetes deployment 36 | package: 37 | name: "{{ item }}" 38 | state: present 39 | with_items: 40 | - "/root/kubectl.rpm" 41 | - "/root/kubelet.rpm" 42 | - "/root/kubeadm.rpm" 43 | - "/root/kubernetes-cni.rpm" 44 | when: k8s_use_rpms is defined 45 | 46 | - name: Install required packages for Kubernetes deployment 47 | package: 48 | name: "{{ item }}" 49 | state: present 50 | with_items: 51 | - kubectl{{ kube_version_parameter }} 52 | - kubelet{{ kube_version_parameter }} 53 | - kubeadm{{ kube_version_parameter }} 54 | - kubernetes-cni 55 | when: k8s_use_rpms is not defined 56 | 57 | - name: Optionally include playbook for binary install 58 | include: binary_install.yml 59 | when: binary_install 60 | 61 | - name: Create /etc/cni/net.d 62 | file: 63 | path: /etc/cni/net.d 64 | state: directory 65 | 66 | - name: make sure kubelet is started and enabled 67 | service: 68 | name: "kubelet" 69 | state: started 70 | enabled: yes 71 | -------------------------------------------------------------------------------- /roles/kube-init/templates/audit.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: audit.k8s.io/v1 # This is required. 2 | kind: Policy 3 | # Don't generate audit events for all requests in RequestReceived stage. 4 | omitStages: 5 | - "RequestReceived" 6 | rules: 7 | # Log pod changes at RequestResponse level 8 | - level: RequestResponse 9 | resources: 10 | - group: "" 11 | # Resource "pods" doesn't match requests to any subresource of pods, 12 | # which is consistent with the RBAC policy. 13 | resources: ["pods"] 14 | # Log "pods/log", "pods/status" at Metadata level 15 | - level: Metadata 16 | resources: 17 | - group: "" 18 | resources: ["pods/log", "pods/status"] 19 | 20 | # Don't log requests to a configmap called "controller-leader" 21 | - level: None 22 | resources: 23 | - group: "" 24 | resources: ["configmaps"] 25 | resourceNames: ["controller-leader"] 26 | 27 | # Don't log watch requests by the "system:kube-proxy" on endpoints or services 28 | - level: None 29 | users: ["system:kube-proxy"] 30 | verbs: ["watch"] 31 | resources: 32 | - group: "" # core API group 33 | resources: ["endpoints", "services"] 34 | 35 | # Don't log authenticated requests to certain non-resource URL paths. 36 | - level: None 37 | userGroups: ["system:authenticated"] 38 | nonResourceURLs: 39 | - "/api*" # Wildcard matching. 40 | - "/version" 41 | 42 | # Log the request body of configmap changes in kube-system. 43 | - level: Request 44 | resources: 45 | - group: "" # core API group 46 | resources: ["configmaps"] 47 | # This rule only applies to resources in the "kube-system" namespace. 48 | # The empty string "" can be used to select non-namespaced resources. 49 | namespaces: ["kube-system"] 50 | 51 | # Log configmap and secret changes in all other namespaces at the Metadata level. 52 | - level: Metadata 53 | resources: 54 | - group: "" # core API group 55 | resources: ["secrets", "configmaps"] 56 | 57 | # Log all other resources in core and extensions at the Request level. 58 | - level: Request 59 | resources: 60 | - group: "" # core API group 61 | - group: "extensions" # Version of group should NOT be included. 62 | 63 | # A catch-all rule to log all other requests at the Metadata level. 64 | - level: Metadata 65 | # Long-running requests like watches that fall under this rule will not 66 | # generate an audit event in RequestReceived. 67 | omitStages: 68 | - "RequestReceived" 69 | -------------------------------------------------------------------------------- /roles/kube-init/templates/kubeadm.cfg.v1beta2.j2: -------------------------------------------------------------------------------- 1 | # Full parameters @ https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm-init/ 2 | # for v1.13 (https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta1) 3 | kind: ClusterConfiguration 4 | apiVersion: kubeadm.k8s.io/v1beta2 5 | apiServer: 6 | extraArgs: 7 | enable-admission-plugins: NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook 8 | {% if enable_auditlog|default(false) %} 9 | audit-log-path: /var/log/kubernetes/audit.log 10 | audit-log-maxage: "30" 11 | audit-log-maxbackup: "10" 12 | audit-log-maxsize: "100" 13 | audit-policy-file: /etc/kubernetes/audit/audit.yaml 14 | authorization-mode: Node,RBAC 15 | timeoutForControlPlane: 4m0s 16 | extraVolumes: 17 | - name: "audit-policy" 18 | hostPath: /etc/kubernetes/audit 19 | mountPath: /etc/kubernetes/audit 20 | readOnly: false 21 | type: DirectoryOrCreate 22 | - name: "audit-log" 23 | hostPath: /var/log/kubernetes 24 | mountPath: /var/log/kubernetes 25 | readOnly: false 26 | type: DirectoryOrCreate 27 | {% endif %} 28 | {% if enable_endpointslice|default(false) %} 29 | feature-gates: "EndpointSlice=true" 30 | {% endif %} 31 | networking: 32 | podSubnet: {{ pod_network_cidr }}/16 33 | {% if control_plane_listen_all %} 34 | controllerManager: 35 | extraArgs: 36 | address: 0.0.0.0 37 | scheduler: 38 | extraArgs: 39 | address: 0.0.0.0 40 | {% endif %} 41 | {% if groups.lb is defined and groups.lb|length > 0 %} 42 | {% for node in groups["lb"] %} 43 | controlPlaneEndpoint: {{ hostvars[node]['ansible_host'] }}:6443 44 | {% endfor %} 45 | {% endif %} 46 | {% if enable_endpointslice|default(false) %} 47 | controllerManager: 48 | extraArgs: 49 | feature-gates: "EndpointSlice=true" 50 | {% endif %} 51 | {% if k8s_image_repository|default("") != "" %} 52 | imageRepository: {{ k8s_image_repository }} 53 | {% endif %} 54 | {% if kube_version != "latest" %} 55 | kubernetesVersion: {{ kube_version }} 56 | {% endif %} 57 | --- 58 | kind: InitConfiguration 59 | apiVersion: kubeadm.k8s.io/v1beta2 60 | {% if container_runtime == "crio" %} 61 | nodeRegistration: 62 | criSocket: /var/run/crio/crio.sock 63 | {% if enable_endpointslice|default(false) %} 64 | kubeletExtraArgs: 65 | feature-gates: "EndpointSlice=true" 66 | {% endif %} 67 | {% endif %} 68 | --- 69 | kind: KubeletConfiguration 70 | apiVersion: kubelet.config.k8s.io/v1beta1 71 | {% if container_runtime == "crio" %} 72 | cgroupDriver: systemd 73 | {% endif %} 74 | --- 75 | apiVersion: kubeproxy.config.k8s.io/v1alpha1 76 | kind: KubeProxyConfiguration 77 | {% if enable_endpointslice|default(false) %} 78 | featureGates: 79 | EndpointSlice: true 80 | {% endif %} 81 | -------------------------------------------------------------------------------- /playbooks/kube-install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - import_playbook: ka-init/init.yml 3 | 4 | - hosts: lb 5 | become: true 6 | become_user: root 7 | tasks: [] 8 | roles: 9 | - { role: lb-setup } 10 | 11 | - hosts: master,nodes,master_slave 12 | become: true 13 | become_user: root 14 | tasks: [] 15 | roles: 16 | - { role: multus-2nics-setup, when: network_type == '2nics' } 17 | - { role: bridge-setup, when: network_type == 'bridge' } 18 | - { role: optional-packages } 19 | # You can add "crio_force: true" if you need to run the builds again. 20 | - { role: cri-o-install, when: container_runtime == 'crio', crio_force: false } 21 | - { role: install-docker, when: container_runtime == 'docker' } 22 | - { role: kube-install } 23 | 24 | - hosts: master,nodes,master_slave 25 | become: true 26 | become_user: root 27 | tasks: 28 | - name: Set ipv4 ip_forward to 1 29 | sysctl: 30 | name: net.ipv4.ip_forward 31 | value: 1 32 | sysctl_set: yes 33 | state: present 34 | reload: yes 35 | - name: Load module if parameter is not in node # noqa no-changed-when 36 | shell: > 37 | if [ ! -f /proc/sys/net/bridge/bridge-nf-call-iptables ]; then \ 38 | modprobe br_netfilter; \ 39 | fi 40 | - name: Set bridge-nf-call-iptables to 1 41 | sysctl: 42 | name: net.bridge.bridge-nf-call-iptables 43 | value: 1 44 | sysctl_set: yes 45 | state: present 46 | reload: yes 47 | 48 | - hosts: master 49 | become: true 50 | become_user: root 51 | tasks: [] 52 | roles: 53 | - { role: kube-init } 54 | - { role: configure-kubectl } 55 | 56 | - hosts: master_slave 57 | become: true 58 | become_user: root 59 | pre_tasks: 60 | - name: Get cert related args from master 61 | set_fact: 62 | kubeadm_cert_key: "{{ hostvars[groups['master'][0]]['kubeadm_cert_key'] }}" 63 | - name: Get kubeadm_join_command from master 64 | set_fact: 65 | kubeadm_join_command: "{{ hostvars[groups['master'][0]]['kubeadm_join_command'] }}" 66 | tasks: [] 67 | roles: 68 | - { role: kube-master-join-cluster } 69 | - { role: configure-kubectl } 70 | 71 | # without become. 72 | - hosts: master, master_slave 73 | tasks: [] 74 | roles: 75 | - { role: kube-niceties } 76 | 77 | - hosts: nodes 78 | become: true 79 | become_user: root 80 | pre_tasks: 81 | - name: Get kubeadm_join_command from master 82 | set_fact: 83 | kubeadm_join_command: "{{ hostvars[groups['master'][0]]['kubeadm_join_command'] }}" 84 | tasks: [] 85 | roles: 86 | - { role: kube-join-cluster } 87 | 88 | - hosts: master 89 | become: true 90 | become_user: root 91 | tasks: [] 92 | roles: 93 | - { role: kubectl-proxy-systemd } 94 | -------------------------------------------------------------------------------- /roles/cri-o-install/tasks/pkg_copr_install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Some (lots!) borrowed from https://github.com/cri-o/cri-o-ansible 3 | 4 | - name: install copr plugin 5 | yum: 6 | name: yum-plugin-copr 7 | state: present 8 | disable_gpg_check: yes 9 | 10 | - name: yum-enable-copr 11 | block: 12 | - name: enable conmon 13 | command: yum copr enable -y s1061123/conmon 14 | args: 15 | warn: no 16 | - name: enable cri-o 17 | command: yum copr enable -y s1061123/cri-o 18 | args: 19 | warn: no 20 | - name: enable cri-tools 21 | command: yum copr enable -y s1061123/cri-tools 22 | args: 23 | warn: no 24 | 25 | - name: Install CRI-O/conmon/critools 26 | block: 27 | - name: conmon 28 | yum: 29 | name: conmon 30 | state: present 31 | disable_gpg_check: yes 32 | - name: cri-o 33 | yum: 34 | name: cri-o 35 | state: present 36 | disable_gpg_check: yes 37 | - name: cri-tools 38 | yum: 39 | name: cri-tools 40 | state: present 41 | disable_gpg_check: yes 42 | 43 | - name: enable and start CRI-O 44 | systemd: 45 | name: crio 46 | state: started 47 | enabled: yes 48 | daemon_reload: yes 49 | 50 | - name: remove default CNI for further k8s cni plugin install 51 | file: 52 | path: /etc/cni/net.d 53 | state: absent 54 | 55 | - name: re-create default CNI for further k8s cni plugin install 56 | file: 57 | path: /etc/cni/net.d 58 | state: directory 59 | 60 | - name: modprobe br_netfilter 61 | modprobe: 62 | name: br_netfilter 63 | state: present 64 | 65 | - name: tune sysctl 66 | sysctl: 67 | name: net.bridge.bridge-nf-call-iptables 68 | value: '1' 69 | reload: yes 70 | state: present 71 | sysctl_set: yes 72 | sysctl_file: /etc/sysctl.conf 73 | 74 | - name: Make directories 75 | file: 76 | path: "{{ item }}" 77 | state: directory 78 | with_items: 79 | - /etc/systemd/system/kubelet.service.d/ 80 | 81 | - name: systemd dropin for kubeadm # noqa no-changed-when 82 | shell: > 83 | sh -c 'echo "[Service] 84 | Environment=\"KUBELET_EXTRA_ARGS=--cgroup-driver=systemd 85 | --container-runtime=remote --runtime-request-timeout=15m 86 | --image-service-endpoint /var/run/crio/crio.sock 87 | --container-runtime-endpoint /var/run/crio/crio.sock\"" > /etc/systemd/system/kubelet.service.d/0-crio.conf' 88 | 89 | - name: flush iptables 90 | iptables: 91 | chain: "{{ item }}" 92 | flush: yes 93 | with_items: [ 'INPUT', 'FORWARD', 'OUTPUT' ] 94 | 95 | - name: enable and start CRI-O 96 | systemd: 97 | name: crio 98 | state: restarted 99 | enabled: yes 100 | daemon_reload: yes 101 | -------------------------------------------------------------------------------- /.github/workflows/lint.yml: -------------------------------------------------------------------------------- 1 | name: Ansible Lint # feel free to pick your own name 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | build: 7 | 8 | runs-on: ubuntu-latest 9 | 10 | steps: 11 | # Important: This sets up your GITHUB_WORKSPACE environment variable 12 | - uses: actions/checkout@v2 13 | 14 | # - name: Install Ansible galaxy roles 15 | # run: ansible-galaxy install -r requirements.yml 16 | 17 | - name: Lint Ansible Playbook 18 | # replace "master" with any valid ref 19 | uses: ansible/ansible-lint-action@master 20 | with: 21 | # [required] 22 | # Paths to ansible files (i.e., playbooks, tasks, handlers etc..) 23 | # or valid Ansible directories according to the Ansible role 24 | # directory structure. 25 | # If you want to lint multiple ansible files, use the following syntax 26 | # targets: | 27 | # playbook_1.yml 28 | # playbook_2.yml 29 | targets: | 30 | playbooks/virthost-setup.yml 31 | playbooks/kube-install.yml 32 | playbooks/kube-install-ovn.yml 33 | # [optional] 34 | # Arguments to override a package and its version to be set explicitly. 35 | # Must follow the example syntax. 36 | override-deps: | 37 | ansible==2.10.6 38 | ansible-lint==5.0.2 39 | # [optional] 40 | # Arguments to be passed to the ansible-lint 41 | # Options: 42 | # -q quieter, although not silent output 43 | # -p parseable output in the format of pep8 44 | # --parseable-severity parseable output including severity of rule 45 | # -r RULESDIR specify one or more rules directories using one or 46 | # more -r arguments. Any -r flags override the default 47 | # rules in ansiblelint/rules, unless -R is also used. 48 | # -R Use default rules in ansiblelint/rules in addition to 49 | # any extra 50 | # rules directories specified with -r. There is no need 51 | # to specify this if no -r flags are used 52 | # -t TAGS only check rules whose id/tags match these values 53 | # -x SKIP_LIST only check rules whose id/tags do not match these 54 | # values 55 | # --nocolor disable colored output 56 | # --exclude=EXCLUDE_PATHS 57 | # path to directories or files to skip. This option is 58 | # repeatable. 59 | # -c C Specify configuration file to use. Defaults to ".ansible-lint" 60 | args: "-x risky-file-permissions -x role-name" 61 | -------------------------------------------------------------------------------- /docs/scratch.rbac.md: -------------------------------------------------------------------------------- 1 | # RBAC 2 | 3 | 4 | [Here's the bible](https://kubernetes.io/docs/admin/authorization/rbac/#service-account-permissions) 5 | 6 | Help from [@liggitt](https://github.com/liggitt) (Jordan Liggitt) 7 | 8 | ``` 9 | liggitt [1:08 PM] 10 | if you’re on 1.6 with RBAC, you’ll want to keep https://kubernetes.io/docs/admin/authorization/rbac/#service-account-permissions close at hand (edited) 11 | 12 | [1:10] 13 | most things don’t define their own roles (which is fine, the default `view`, `edit`, `admin`, `cluster-admin` roles cover a ton of use cases), but very few apps explain the API permissions they require (some need none, some need read-only access, some assume they are root, etc) 14 | 15 | dougbtv [1:12 PM] 16 | awesome, appreciate the pointer, insightful on the roles. looking forward to getting my feet wet with RBAC, too 17 | 18 | liggitt [1:12 PM] 19 | handing out permissions to the service accounts you’re running apps with is part of running an app on your cluster. if you don’t care what has access, you can grant really broad permissions and be done with it. if you want to know what is doing what, you can get more granular. (edited) 20 | ``` 21 | 22 | ## Flannel 23 | 24 | [flannel]https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml 25 | 26 | ## Now having trouble with flannel having network connections outside... 27 | 28 | [wan connectivity doesn't work](http://pasteall.org/338143) 29 | [IP tables forward didn't work](http://pasteall.org/338157) 30 | 31 | Some ideas from slack: 32 | 33 | ``` 34 | foxie [9:04 AM] 35 | @dougbtv when has it started? Have you by any chance updated to docker 1.13.x or 1.17.x? 36 | 37 | dougbtv [9:11 AM] 38 | @foxie I'm on `Docker version 17.03.1-ce, build c6d412e` 39 | 40 | [9:11] 41 | good chance that when I was using kube 1.5 I was using 1.12.x 42 | 43 | [9:14] 44 | (I might try to reinstall the cluster with 1.12, that's a good possibility to eliminate! appreciate the brain cycles) 45 | 46 | foxie [9:16 AM] 47 | you may want to try 48 | 49 | [9:16] 50 | iptables -P FORWARD ACCEPT 51 | 52 | [9:16] 53 | they changed that with 1.13 54 | 55 | dougbtv [9:21 AM] 56 | awesome idea -- didn't exactly work for me, but, I think you're onto something, for some reason those iptables rules look like I'm missing something and I can't quite put my finger on it. fwiw, here's the results of giving that a try: http://pasteall.org/338157 57 | ``` 58 | 59 | 60 | Tried inserting the rule at the top for fun... 61 | 62 | ## Roll-back to docker 1.12 63 | 64 | Let's see what we can do... 65 | 66 | [Following this issue-comment](https://github.com/kubernetes/kubeadm/issues/212#issuecomment-291413672) 67 | 68 | We need to specifically do this one: 69 | 70 | > (/etc/systemd/system/kubelet.service.d/10-kubeadm.conf) add "--cgroup-driver=systemd" at the end of the last line. 71 | => This is because Docker uses systemd for cgroup-driver while kubelet uses cgroupfs for cgroup-driver. 72 | 73 | -------------------------------------------------------------------------------- /roles/cri-o-install/tasks/binary_install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Create a path for us 4 | set_fact: 5 | extended_path: "/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/usr/local/go/bin" 6 | 7 | - name: Ensure src dir 8 | file: 9 | path: ${HOME}/src 10 | state: directory 11 | 12 | - name: Ensure requried packages for cri-o 13 | package: 14 | name: "{{ item }}" 15 | state: present 16 | with_items: 17 | - make 18 | - iptables 19 | 20 | - name: Install cri-o 21 | block: 22 | - name: Ensure required dir for cri-o 23 | file: 24 | path: "{{ item }}" 25 | state: directory 26 | with_items: 27 | - /opt/cni/bin 28 | - /etc/crio 29 | - /usr/share/containers/oci/hooks.d 30 | - /usr/local/share/oci-umount/oci-umount.d 31 | - /usr/local/lib/systemd/system 32 | - name: Get cri-o 33 | get_url: 34 | url: "{{ crio_binary_url }}" 35 | dest: ${HOME}/src/crio.tar.gz 36 | - name: Extract cri-o 37 | unarchive: 38 | src: ${HOME}/src/crio.tar.gz 39 | dest: ${HOME}/src 40 | remote_src: yes 41 | - name: Install cri-o 42 | shell: | 43 | cd {{ ansible_env.HOME }}/src/crio-v1.20.1 && \ 44 | make install 45 | environment: 46 | PATH: "{{ extended_path }}" 47 | - name: remove CNI config 48 | file: 49 | path: /etc/cni/net.d/10-crio-bridge.conf 50 | state: absent 51 | 52 | - name: Install crictl 53 | block: 54 | - name: Get crictl 55 | get_url: 56 | url: "{{ crictl_binary_url }}" 57 | dest: ${HOME}/src/crictl.tar.gz 58 | - name: Extract crictl 59 | unarchive: 60 | src: ${HOME}/src/crictl.tar.gz 61 | dest: ${HOME}/src 62 | remote_src: yes 63 | - name: Copy crictl 64 | copy: 65 | src: ${HOME}/src/crictl 66 | dest: /usr/bin/crictl 67 | owner: root 68 | group: root 69 | mode: '755' 70 | remote_src: yes 71 | 72 | - name: Install conmon 73 | block: 74 | - name: Get conmon 75 | get_url: 76 | url: "{{ conmon_binary_url }}" 77 | dest: ${HOME}/src/conmon 78 | - name: Copy conmon 79 | copy: 80 | src: ${HOME}/src/conmon 81 | dest: /usr/local/bin/conmon 82 | owner: root 83 | group: root 84 | mode: '755' 85 | remote_src: yes 86 | 87 | - name: set fs.may_detach_mounts 88 | sysctl: 89 | name: fs.may_detach_mounts 90 | value: '1' 91 | sysctl_file: /etc/sysctl.d/crio.conf 92 | reload: yes 93 | 94 | - name: systemd daemon-reload 95 | systemd: 96 | daemon_reload: yes 97 | 98 | - name: add docker.io registry in /etc/crio/crio.conf 99 | lineinfile: 100 | path: /etc/crio/crio.conf 101 | insertafter: '^#registries = \[' 102 | line: 'registries = [ "docker.io" ]' 103 | 104 | - name: Enable cri-o 105 | systemd: 106 | name: crio.service 107 | state: started 108 | enabled: yes 109 | -------------------------------------------------------------------------------- /playbooks/kube-install-ovn.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - import_playbook: ka-init/init.yml 3 | 4 | - hosts: lb 5 | become: true 6 | become_user: root 7 | tasks: [] 8 | roles: 9 | - { role: lb-setup } 10 | 11 | - hosts: master,nodes,master_slave 12 | become: true 13 | become_user: root 14 | tasks: [] 15 | roles: 16 | - { role: ovn-setup } 17 | - { role: multus-2nics-setup, when: network_type == '2nics' } 18 | - { role: bridge-setup, when: network_type == 'bridge' } 19 | - { role: optional-packages } 20 | # You can add "crio_force: true" if you need to run the builds again. 21 | - { role: cri-o-install, when: container_runtime == 'crio', crio_force: false } 22 | - { role: install-docker, when: container_runtime == 'docker' } 23 | - { role: kube-install } 24 | 25 | - hosts: master,nodes,master_slave 26 | become: true 27 | become_user: root 28 | tasks: 29 | - name: Set ipv4 ip_forward to 1 30 | sysctl: 31 | name: net.ipv4.ip_forward 32 | value: 1 33 | sysctl_set: yes 34 | state: present 35 | reload: yes 36 | - name: Load module if parameter is not in node # noqa no-changed-when 37 | shell: > 38 | if [ ! -f /proc/sys/net/bridge/bridge-nf-call-iptables ]; then \ 39 | modprobe br_netfilter; \ 40 | fi 41 | - name: Set bridge-nf-call-iptables to 1 42 | sysctl: 43 | name: net.bridge.bridge-nf-call-iptables 44 | value: 1 45 | sysctl_set: yes 46 | state: present 47 | reload: yes 48 | 49 | - hosts: master 50 | become: true 51 | become_user: root 52 | tasks: [] 53 | roles: 54 | - { role: kube-init } 55 | - { role: configure-kubectl } 56 | 57 | 58 | - hosts: master_slave 59 | become: true 60 | become_user: root 61 | pre_tasks: 62 | - name: Get cert related args from master 63 | set_fact: 64 | kubeadm_cert_key: "{{ hostvars[groups['master'][0]]['kubeadm_cert_key'] }}" 65 | - name: Get kubeadm_join_command from master 66 | set_fact: 67 | kubeadm_join_command: "{{ hostvars[groups['master'][0]]['kubeadm_join_command'] }}" 68 | tasks: [] 69 | roles: 70 | - { role: kube-master-join-cluster } 71 | - { role: configure-kubectl } 72 | 73 | 74 | # ---- placeholder: kube-cni 75 | # without become. 76 | 77 | - hosts: master, master_slave 78 | tasks: [] 79 | roles: 80 | - { role: kube-niceties } 81 | 82 | - hosts: nodes 83 | become: true 84 | become_user: root 85 | pre_tasks: 86 | - name: Get kubeadm_join_command from master 87 | set_fact: 88 | kubeadm_join_command: "{{ hostvars[groups['master'][0]]['kubeadm_join_command'] }}" 89 | tasks: [] 90 | roles: 91 | - { role: kube-join-cluster } 92 | 93 | - hosts: master 94 | become: true 95 | become_user: root 96 | tasks: 97 | - name: Install patch package 98 | yum: 99 | name: patch 100 | state: present 101 | 102 | - hosts: master 103 | become: true 104 | become_user: centos 105 | tasks: [] 106 | roles: 107 | - { role: ovnkube-setup } 108 | -------------------------------------------------------------------------------- /docs/troubleshooting.md: -------------------------------------------------------------------------------- 1 | This document contains detail about the possible issue that you might encounter while deploying Kubernetes cluster using kube-ansible. 2 | 3 | #### Python 2 binding related error 4 | 5 | **Issue:** You might encounter the following error if you specify `ansible_python_interpreter` in virthost.inventory file. 6 | ``` 7 | The Python 2 bindings for rpm are needed for this module. If you require Python 3 support use the `dnf` Ansible module instead.. The Python 2 yum module is needed for this module. If you require Python 3 support use the `dnf` Ansible module instead. 8 | 9 | ``` 10 | **Fix:** Remove the `ansible_python_interpreter` from virthost.inventory, it will resort for python2.7 interpreter. 11 | 12 | ### OVN DB (NB & SB) cluster doesn't come-up successfully and result in failed ovn-kubernetes deployment. 13 | K8S cluster hosted on VMs (libvirt), DNS of the hostname is resolved through the libvirt dnsmasq. 14 | If VM ends up with the stale DNS entry for the hostname, OVN NB db cluster will pickup the wrong ip 15 | address for the hostname and it will try to bind to that address, which will fail and result in broken RAFT cluster for 16 | NB and SB databases. 17 | To verify that you possibly are hitting this issue, you can check the ovndb-nb logs and look for 18 | ``` 19 | 2020-03-06T23:31:34.223365946+00:00 stdout F Creating cluster database /etc/ovn/ovnnb_db.db. 20 | 2020-03-06T23:31:34.264442237+00:00 stderr F 2020-03-06T23:31:34Z|00001|vlog|INFO|opened log file /var/log/ovn/ovsdb-server-nb.log 21 | 2020-03-06T23:31:34.272737799+00:00 stderr F 2020-03-06T23:31:34Z|00002|raft|INFO|term 1: 678573 ms timeout expired, starting election 22 | 2020-03-06T23:31:34.272737799+00:00 stderr F 2020-03-06T23:31:34Z|00003|raft|INFO|term 1: elected leader by 1+ of 1 servers 23 | 2020-03-06T23:31:34.280252334+00:00 stderr F 2020-03-06T23:31:34Z|00004|ovsdb_server|INFO|ovsdb-server (Open vSwitch) 2.12.0 24 | 2020-03-06T23:31:34.280666734+00:00 stderr F 2020-03-06T23:31:34Z|00005|socket_util|ERR|6643:192.168.122.41: bind: Cannot assign requested address 25 | 2020-03-06T23:31:34.280749768+00:00 stderr F 2020-03-06T23:31:34Z|00006|raft|WARN|ptcp:6643:192.168.122.41: listen failed (Cannot assign requested address) 26 | ``` 27 | and if you will check the ip address of the host machine, it will be different that 192.168.122.41 as shown 28 | in the example logs above. OVN-Kubernetes picks the IP address of the host using `getent ahostsv4 `, so if the host shows different IP 29 | address from `getent ahostsv4 ` & `ifconfig` on that host, it means your DNS cache is stale and needs refresh. 30 | 31 | **Workaround** Make sure that the dnsmasq service is in correct state on your physical machine where libvirt is running. 32 | Fire `pkill -HUP dnsmasq` on the physical machine, it will result in `dnsmasq` loading the fresh configuration and 33 | now `getent ahostsv4 ` in VM should show the same ip address as `ifconfig`. Once DNS cache is refreshed, 34 | you can trigger the ansible-playbook that installs the k8s deployment. 35 | ***Note*** This workaround is already part kube-ansible playbook. If you want to skip this workaround please run `virthost.yml` playbook with `--skip-tags dns-workaround`. 36 | -------------------------------------------------------------------------------- /roles/lb-setup/templates/haproxy.cfg.j2: -------------------------------------------------------------------------------- 1 | #--------------------------------------------------------------------- 2 | # Example configuration for a possible web application. See the 3 | # full configuration options online. 4 | # 5 | # http://haproxy.1wt.eu/download/1.4/doc/configuration.txt 6 | # 7 | #--------------------------------------------------------------------- 8 | 9 | #--------------------------------------------------------------------- 10 | # Global settings 11 | #--------------------------------------------------------------------- 12 | global 13 | # to have these messages end up in /var/log/haproxy.log you will 14 | # need to: 15 | # 16 | # 1) configure syslog to accept network log events. This is done 17 | # by adding the '-r' option to the SYSLOGD_OPTIONS in 18 | # /etc/sysconfig/syslog 19 | # 20 | # 2) configure local2 events to go to the /var/log/haproxy.log 21 | # file. A line like the following can be added to 22 | # /etc/sysconfig/syslog 23 | # 24 | # local2.* /var/log/haproxy.log 25 | # 26 | log 127.0.0.1 local2 27 | 28 | chroot /var/lib/haproxy 29 | pidfile /var/run/haproxy.pid 30 | maxconn 4000 31 | user haproxy 32 | group haproxy 33 | daemon 34 | 35 | # turn on stats unix socket 36 | stats socket /var/lib/haproxy/stats 37 | 38 | #--------------------------------------------------------------------- 39 | # common defaults that all the 'listen' and 'backend' sections will 40 | # use if not designated in their block 41 | #--------------------------------------------------------------------- 42 | defaults 43 | mode http 44 | log global 45 | option httplog 46 | option dontlognull 47 | option http-server-close 48 | option forwardfor except 127.0.0.0/8 49 | option redispatch 50 | retries 3 51 | timeout http-request 10s 52 | timeout queue 1m 53 | timeout connect 10s 54 | timeout client 1m 55 | timeout server 1m 56 | timeout http-keep-alive 10s 57 | timeout check 10s 58 | maxconn 3000 59 | 60 | #--------------------------------------------------------------------- 61 | # main frontend which proxys to the backends 62 | #--------------------------------------------------------------------- 63 | frontend kubernetes 64 | bind {{ ansible_default_ipv4.address }}:6443 65 | option tcplog 66 | mode tcp 67 | default_backend kubernetes-master-nodes 68 | #--------------------------------------------------------------------- 69 | # static backend for serving up images, stylesheets and such 70 | #--------------------------------------------------------------------- 71 | backend static 72 | balance roundrobin 73 | server static 127.0.0.1:4331 check 74 | #--------------------------------------------------------------------- 75 | # round robin balancing between the various backends 76 | #--------------------------------------------------------------------- 77 | backend kubernetes-master-nodes 78 | mode tcp 79 | balance roundrobin 80 | option tcp-check 81 | {% for node in groups["master"] %} 82 | server {{ node }} {{ hostvars[node]['ansible_host'] }}:6443 check 83 | {% endfor %} 84 | {% for node in groups["master_slave"] %} 85 | server {{ node }} {{ hostvars[node]['ansible_host'] }}:6443 check 86 | {% endfor %} 87 | -------------------------------------------------------------------------------- /roles/ovnkube-setup/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Taint master nodes # noqa no-changed-when 2 | command: kubectl taint nodes --all node-role.kubernetes.io/master- 3 | failed_when: false 4 | 5 | - name: setup ovn-kubernetes 6 | block: 7 | - name: mkdir 8 | file: 9 | path: ${HOME}/work/src/github.com/ovn-org 10 | state: directory 11 | 12 | - name: git clone # noqa git-latest 13 | git: 14 | repo: "{{ ovn_kubernetes_repo | default('https://github.com/ovn-org/ovn-kubernetes') }}" 15 | dest: ${HOME}/work/src/github.com/ovn-org/ovn-kubernetes 16 | version: "{{ ovn_kubernetes_branch | default('master') }}" 17 | 18 | - name: set kube_api_server 19 | set_fact: 20 | kube_api_server: "https://{{ ansible_default_ipv4.address }}:6443" 21 | 22 | - name: overwrite kube_api_server in case of HA 23 | set_fact: 24 | kube_api_server: "https://{{ hostvars[groups['lb'][0]]['ansible_host'] }}:6443" 25 | when: groups.master_slave is defined and groups.master_slave|length > 0 26 | 27 | - name: create ovn yaml file 28 | shell: | 29 | ./daemonset.sh --image={{ ovn_image_repo }} \ 30 | --net-cidr=10.244.0.0/16 \ 31 | --svc-cidr=10.96.0.0/12 \ 32 | --gateway-mode="local" \ 33 | --k8s-apiserver={{ kube_api_server }} \ 34 | --master-loglevel="{{ ovnkube_master_loglevel | default('5') }}" \ 35 | --node-loglevel="{{ ovnkube_node_loglevel | default('4') }}" \ 36 | --ovn-loglevel-northd="{{ ovn_loglevel_northd | default('-vfile:info') }}" \ 37 | --ovn-loglevel-nb="{{ ovn_loglevel_nb | default('-vfile:info') }}" \ 38 | --ovn-loglevel-sb="{{ ovn_loglevel_sb | default('-vfile:info') }}" \ 39 | --ovn-loglevel-controller="{{ ovn_loglevel_controller | default('-vfile:info') }}" \ 40 | --ovn-loglevel-nbctld="{{ ovn_loglevel_nbctld | default('-vfile:info') }}" 41 | args: 42 | chdir: $HOME/work/src/github.com/ovn-org/ovn-kubernetes/dist/images 43 | 44 | - name: Add label to master node for ovn raft mode 45 | command: kubectl label nodes {{ item }} k8s.ovn.org/ovnkube-db=true 46 | with_items: 47 | - "{{ groups['master'] }}" 48 | - "{{ groups['master_slave'] }}" 49 | when: enable_ovn_raft is defined and enable_ovn_raft 50 | 51 | # see https://github.com/ovn-org/ovn-kubernetes/pull/1351 for the detail 52 | - name: Patch ovnkube-node yaml 53 | patch: 54 | src: ovnkube-node.diff 55 | dest: $HOME/work/src/github.com/ovn-org/ovn-kubernetes/dist/yaml/ovnkube-node.yaml 56 | 57 | - name: Add ovn-kube ymls 58 | command: kubectl create -f {{ item }} 59 | args: 60 | chdir: $HOME/work/src/github.com/ovn-org/ovn-kubernetes/dist/yaml 61 | with_items: 62 | - ovn-setup.yaml 63 | - ovs-node.yaml 64 | - ovnkube-db-raft.yaml 65 | - ovnkube-master.yaml 66 | - ovnkube-node.yaml 67 | when: enable_ovn_raft is defined and enable_ovn_raft 68 | 69 | - name: Add ovn-kube ymls 70 | command: kubectl create -f {{ item }} 71 | args: 72 | chdir: $HOME/work/src/github.com/ovn-org/ovn-kubernetes/dist/yaml 73 | with_items: 74 | - ovn-setup.yaml 75 | - ovs-node.yaml 76 | - ovnkube-db.yaml 77 | - ovnkube-master.yaml 78 | - ovnkube-node.yaml 79 | when: enable_ovn_raft is not defined and not enable_ovn_raft 80 | 81 | - name: Patch coredns # noqa no-changed-when 82 | command: | 83 | kubectl -n kube-system patch deploy coredns -p \ 84 | '{"spec":{"template":{"metadata":{"annotations": {"kubernetes.io/egress-bandwidth": "1M", "kubernetes.io/ingress-bandwidth": "1M" }}}}}' 85 | 86 | - name: Stop kube-proxy daemonset # noqa no-changed-when 87 | command: kubectl -n kube-system delete ds kube-proxy 88 | failed_when: false 89 | -------------------------------------------------------------------------------- /roles/kube-init/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Alright we're using the kubeadm init 3 | # You can reset this with: 4 | # [root@kube-master centos]# kubeadm --help | grep reset 5 | # reset Run this to revert any changes made to this host by 'kubeadm init' or 'kubeadm join'. 6 | 7 | - name: create kubeadm config file 8 | block: 9 | - name: Install jq binary 10 | get_url: 11 | url: https://github.com/stedolan/jq/releases/download/jq-1.5/jq-linux64 12 | dest: /usr/bin/jq 13 | 14 | - name: set jq binary permission 15 | file: 16 | path: /usr/bin/jq 17 | mode: 0755 18 | 19 | - name: check kubeadm version (major) 20 | shell: > 21 | kubeadm version -o json | jq -r .clientVersion.major 22 | register: kubeadm_version_major 23 | 24 | - name: check kubeadm version (minor) 25 | shell: > 26 | kubeadm version -o json | jq -r .clientVersion.minor | sed -e 's/\+//g' 27 | register: kubeadm_version_minor 28 | 29 | - name: create kubeadm config file (<= 1.12) 30 | template: 31 | src: kubeadm.cfg.v1alpha.j2 32 | dest: /root/kubeadm.cfg 33 | when: kubeadm_version_minor.stdout|int <= 12 34 | 35 | - name: create kubeadm config file (>= 1.13) 36 | template: 37 | src: kubeadm.cfg.v1beta.j2 38 | dest: /root/kubeadm.cfg 39 | when: kubeadm_version_minor.stdout|int >= 13 40 | 41 | - name: create kubeadm config file (>= 1.16) 42 | template: 43 | src: kubeadm.cfg.v1beta2.j2 44 | dest: /root/kubeadm.cfg 45 | when: kubeadm_version_minor.stdout|int >= 16 46 | 47 | - name: configure audit 48 | block: 49 | - name: create audit directory 50 | file: 51 | path: /etc/kubernetes/audit 52 | state: directory 53 | 54 | - name: create audit policy file 55 | template: 56 | src: audit.yaml.j2 57 | dest: /etc/kubernetes/audit/audit.yaml 58 | when: enable_auditlog | default(false) | bool 59 | 60 | - name: Default cri-o flags to empty 61 | set_fact: 62 | arg_crio: "" 63 | 64 | - name: Set cri-o flags 65 | set_fact: 66 | arg_crio: "--ignore-preflight-errors=all" 67 | when: container_runtime == "crio" 68 | 69 | - name: Default k8s version to empty 70 | set_fact: 71 | k8s_version: "" 72 | 73 | - name: set upload-certs in kubeadm option 74 | set_fact: 75 | k8s_upload_certs: "" 76 | 77 | - name: set upload-certs in kubeadm option 78 | set_fact: 79 | k8s_upload_certs: "--upload-certs" 80 | when: groups.master_slave is defined and groups.master_slave|length > 0 81 | 82 | # Was trying to use flannel and running with: 83 | # kubeadm init > /etc/kubeadm.init.txt 84 | # abandonded for now... 85 | - name: Run kubeadm init 86 | shell: > 87 | kubeadm init {{ k8s_version }} {{ arg_crio }} {{ k8s_upload_certs }} --config=/root/kubeadm.cfg > /var/log/kubeadm.init.log 88 | args: 89 | creates: /etc/.kubeadm-complete 90 | 91 | - name: Mark init complete 92 | file: 93 | path: /etc/.kubeadm-complete 94 | state: directory 95 | 96 | - name: Get join command # noqa no-changed-when 97 | shell: > 98 | kubeadm token create --print-join-command 99 | register: kubeadm_join_output 100 | 101 | - name: Set fact with join command 102 | set_fact: 103 | kubeadm_join_command: "{{ kubeadm_join_output.stdout }}" 104 | 105 | - name: Get certificate-key 106 | shell: > 107 | set -o pipefail && kubeadm init phase upload-certs --upload-certs -v 0 2> /dev/null | tail -n 1 108 | register: kubeadm_cert_key_output 109 | when: groups.master_slave is defined and groups.master_slave|length > 0 110 | 111 | - name: Set fact with certificate-key 112 | set_fact: 113 | kubeadm_cert_key: "{{ kubeadm_cert_key_output.stdout }}" 114 | when: groups.master_slave is defined and groups.master_slave|length > 0 115 | -------------------------------------------------------------------------------- /inventory/ci/virthost2.home.61will.space/engine.yml: -------------------------------------------------------------------------------- 1 | all: 2 | hosts: 3 | manager: 4 | ansible_host: localhost 5 | ansible_private_key_file: /home/jenkins/.ssh/id_rsa 6 | ansible_user: jenkins 7 | children: 8 | engine: 9 | hosts: 10 | manager: 11 | vars: 12 | wait_for_ip: yes 13 | vm_infra_wait_for_ip_retries: 20 14 | vm_infra_wait_for_ip_delay: 30 15 | 16 | engine_url: https://engine.management.61will.space/ovirt-engine/api 17 | 18 | ka_master: 19 | cluster: Default 20 | domain: management.61will.space 21 | template: centos7_template 22 | storage_domain: hosted_storage 23 | memory: 16GiB 24 | cores: 2 25 | state: "{{ vm_state }}" 26 | ssh_key: "{{ vm_ssh_key }}" 27 | nics: 28 | - name: ovirtext 29 | network: ovirtext 30 | profile: ovirtext 31 | 32 | ka_compute: 33 | cluster: Default 34 | domain: management.61will.space 35 | template: centos7_template 36 | storage_domain: hosted_storage 37 | memory: 12GiB 38 | cores: 1 39 | state: "{{ vm_state }}" 40 | ssh_key: "{{ vm_ssh_key }}" 41 | nics: 42 | - name: ovirtext 43 | network: ovirtext 44 | profile: ovirtext 45 | 46 | vms: 47 | - name: ka-master 48 | tag: ka_master 49 | profile: "{{ ka_master }}" 50 | cloud_init_nics: 51 | - name_name: eth0 52 | nic_ip_address: 192.168.1.100 53 | nic_boot_protocol: static 54 | nic_netmask: 255.255.255.0 55 | nic_gateway: 192.168.1.1 56 | nic_on_boot: true 57 | - nic_name: eth1 58 | nic_ip_address: 192.168.3.100 59 | nic_boot_protocol: static 60 | nic_netmask: 255.255.255.0 61 | nic_gateway: 192.168.3.1 62 | nic_on_boot: true 63 | cloud_init: 64 | host_name: ka-master 65 | dns_search: management.61will.space home.61will.space 66 | user_name: centos 67 | - name: ka-compute-1 68 | tag: ka_compute 69 | profile: "{{ ka_compute }}" 70 | cloud_init_nics: 71 | - name_name: eth0 72 | nic_ip_address: 192.168.1.101 73 | nic_boot_protocol: static 74 | nic_netmask: 255.255.255.0 75 | nic_gateway: 192.168.1.1 76 | nic_on_boot: true 77 | - nic_name: eth1 78 | nic_ip_address: 192.168.3.101 79 | nic_boot_protocol: static 80 | nic_netmask: 255.255.255.0 81 | nic_gateway: 192.168.3.1 82 | nic_on_boot: true 83 | cloud_init: 84 | host_name: ka-compute-1 85 | dns_search: management.61will.space home.61will.space 86 | user_name: centos 87 | - name: ka-compute-2 88 | tag: ka_compute 89 | profile: "{{ ka_compute }}" 90 | cloud_init_nics: 91 | - name_name: eth0 92 | nic_ip_address: 192.168.1.102 93 | nic_boot_protocol: static 94 | nic_netmask: 255.255.255.0 95 | nic_gateway: 192.168.1.1 96 | nic_on_boot: true 97 | - nic_name: eth1 98 | nic_ip_address: 192.168.3.102 99 | nic_boot_protocol: static 100 | nic_netmask: 255.255.255.0 101 | nic_gateway: 192.168.3.1 102 | nic_on_boot: true 103 | cloud_init: 104 | host_name: ka-compute-2 105 | dns_search: management.61will.space home.61will.space 106 | user_name: centos 107 | # vim: set tabstop=2 shiftwidth=2 smartindent expandtab : 108 | -------------------------------------------------------------------------------- /docs/additional_scenarios_and_usage.md: -------------------------------------------------------------------------------- 1 | # Additional Scenarios and Usage 2 | 3 | In addition to setting up a basic vanilla Kubernetes environment, Kucean 4 | supports some additional overrides and technology changes, including deployment 5 | of a specific version or binary of Kubernetes, or using CRI-O as a backend. 6 | 7 | ## Setting a specific version 8 | 9 | You may optionally set the `kube_version` variable to install a specific 10 | version. This version number comes from a `yum search kubelet 11 | --showduplicates`. For example: 12 | 13 | ``` 14 | ansible-playbook -i inventory/vms.local.generated \ 15 | -e 'kube_version=1.8.3-0' \ 16 | playbooks/kube-install.yml 17 | ``` 18 | 19 | ## Install specific binaries 20 | 21 | By default, we install the kubelet (and `kubeadm`, `kubectl` and the core CNI 22 | plugins) via RPM. However, if you'd like to install specific binaries for 23 | either the kubelet, kubeadm or kubetl -- you can do so by specifying that you'd 24 | like to perform a binary install and specify URLs (that point to, say, binaries 25 | in a GitHub release). 26 | 27 | There are sample variables provided in the `./group_vars/all.yml` file, and you 28 | can set them up such as: 29 | 30 | ``` 31 | binary_install: true 32 | binary_kubectl_url: https://github.com/leblancd/kubernetes/releases/download/v1.9.0-alpha.1.ipv6.1b/kubectl 33 | binary_kubeadm_url: https://github.com/leblancd/kubernetes/releases/download/v1.9.0-alpha.1.ipv6.1b/kubeadm 34 | binary_kubelet_url: https://github.com/leblancd/kubernetes/releases/download/v1.9.0-alpha.1.ipv6.1b/kubelet 35 | binary_install_force_redownload: false 36 | ``` 37 | 38 | ## Using CRI-O 39 | 40 | You can also enable [CRI-O](http://cri-o.io/) to have an OCI compatible 41 | runtime. Set the `container_runtime` variable in 42 | `inventory/vms.local.generated` under `[master:vars]` and `[nodes:vars]`, or as 43 | an extra var when you run the playbook: 44 | 45 | ``` 46 | $ ansible-playbook -i inventory/vms.local.generated \ 47 | -e 'ansible_python_interpreter=/usr/bin/python3' \ 48 | -e 'container_runtime=crio' \ 49 | playbooks/kube-install.yml 50 | ``` 51 | 52 | Additionally, the compilation of CRI-O requires a beefier machine, memory-wise. 53 | It's recommended you spin up the machines with 4 gigs of ram or greater during 54 | the VM creation phase, should you use it. One may wish to add the parameters 55 | `-e "system_default_ram_mb=4096"` to your playbook run of `virthost-setup.yml`. 56 | 57 | Set `crio_build_install=True` if you download and build crio from git repo. 58 | You may also need to set `crio_build_version` to match kubernetes version. See [Compatibility matrix](https://github.com/kubernetes-sigs/cri-o#compatibility-matrix-cri-o---kubernetes-clusters) for the detail. 59 | 60 | ## Deploying Cluster with OVN-Kubernetes CNI 61 | ``` 62 | ansible-playbook -i inventory/vms.local.generated -e 'network_type=2nics' \ 63 | -e 'container_runtime=crio' -e 'ovn_image_repo=' \ 64 | playbooks/kube-install-ovn.yml 65 | ``` 66 | 67 | ## Using Fedora 68 | 69 | Take a gander at the `./inventory/examples/crio/crio.inventory` for an example 70 | of how to override the proper variables to use Fedora. 71 | 72 | ## Installing optional packages 73 | 74 | Sometimes, one might like to install additional packages across the hosts in 75 | their cluster, one can do so by setting the `optional_packages` variable as a 76 | list of RPM packages. 77 | 78 | For example, one may have a `./inventory/optional.yaml` and therein specify: 79 | 80 | ``` 81 | --- 82 | optional_packages: 83 | - tcpdump 84 | - bind-utils 85 | ``` 86 | 87 | You may then include that when you kick off your playbook for example... 88 | 89 | ``` 90 | ansible-playbook -i inventory/your.inventory -e "@./inventory/optional.yaml" playbooks/kube-install.yaml 91 | ``` 92 | 93 | ## Installing Prometheus Operator 94 | 95 | You can install [Prometheus 96 | Operator](https://github.com/coreos/prometheus-operator) as part of your 97 | cluster deployment, providing a monitoring service to help with scale testing. 98 | 99 | To enable Prometheus Operator, add the following variables to your inventory or 100 | variables file (or pass to `ansible-playbook` with `-e`): 101 | 102 | ``` 103 | monitoring_install: true 104 | control_plane_listen_all: true 105 | ``` 106 | 107 | Once you've setup the variables, you can post-install Prometheus-Operator with 108 | the following `ansible-playbook` command: 109 | 110 | ``` 111 | ansible-playbook -i inventory/vms.local.generated -e "@./inventory/my_vars.yml" playbooks/ka-monitoring/config.yml 112 | ``` 113 | -------------------------------------------------------------------------------- /roles/cri-o-install/tasks/build_install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Some (lots!) borrowed from https://github.com/cri-o/cri-o-ansible 3 | 4 | - name: Create a path for us 5 | set_fact: 6 | extended_path: "/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin:/home/centos/.local/bin:/home/centos/bin/:/usr/local/go/bin" 7 | 8 | - name: Install required packages for cri-o 9 | package: 10 | name: "{{ item }}" 11 | state: present 12 | with_items: 13 | - wget 14 | - git 15 | - make 16 | - gcc 17 | - tar 18 | - libseccomp-devel 19 | - glib2-devel 20 | - glibc-static 21 | - container-selinux 22 | - btrfs-progs-devel 23 | - device-mapper-devel 24 | - glibc-devel 25 | - gpgme-devel 26 | - libassuan-devel 27 | - libgpg-error-devel 28 | - pkgconfig 29 | - json-glib-devel 30 | - skopeo-containers 31 | - ostree 32 | - ostree-devel 33 | - iptables 34 | 35 | - name: Make directories 36 | file: 37 | path: "{{ item }}" 38 | state: directory 39 | with_items: 40 | - /etc/systemd/system/kubelet.service.d/ 41 | - /var/lib/etcd 42 | - /etc/cni/net.d 43 | - /etc/containers 44 | 45 | - name: clone runc 46 | git: 47 | repo: https://github.com/opencontainers/runc 48 | dest: "{{ ansible_env.HOME }}/{{ gopath }}/src/github.com/opencontainers/runc" 49 | version: master 50 | 51 | - name: clone CRI-O 52 | git: 53 | repo: "{{ crio_giturl|default('https://github.com/cri-o/cri-o.git') }}" 54 | dest: "{{ ansible_env.HOME }}/{{ gopath }}/src/github.com/cri-o/cri-o" 55 | version: "{{ crio_version|default('master') }}" 56 | 57 | - name: clone conmon 58 | git: 59 | repo: https://github.com/containers/conmon.git 60 | dest: "{{ ansible_env.HOME }}/{{ gopath }}/src/github.com/containers/conmon" 61 | version: master 62 | 63 | - name: clone cri-tools 64 | git: 65 | repo: https://github.com/kubernetes-sigs/cri-tools.git 66 | dest: "{{ ansible_env.HOME }}/{{ gopath }}/src/github.com/kubernetes-sigs/cri-tools" 67 | version: master 68 | 69 | - name: clone CNI 70 | git: 71 | repo: https://github.com/containernetworking/plugins 72 | dest: "{{ ansible_env.HOME }}/{{ gopath }}/src/github.com/containernetworking/plugins" 73 | version: master 74 | 75 | - name: build runc # noqa no-changed-when 76 | shell: | 77 | cd {{ ansible_env.HOME }}/{{ gopath }}/src/github.com/opencontainers/runc && \ 78 | export GOPATH={{ ansible_env.HOME }}/{{ gopath }} && \ 79 | make BUILDTAGS="seccomp selinux" && make install 80 | environment: 81 | PATH: "{{ extended_path }}" 82 | 83 | - name: link runc # noqa no-changed-when 84 | file: 85 | src: /usr/local/sbin/runc 86 | dest: /usr/bin/runc 87 | state: link 88 | 89 | - name: build cri-o # noqa no-changed-when 90 | shell: | 91 | cd {{ ansible_env.HOME }}/{{ gopath }}/src/github.com/cri-o/cri-o && \ 92 | make && \ 93 | echo "fs.may_detach_mounts=1" >> /etc/sysctl.conf && \ 94 | sysctl -p && \ 95 | make install && \ 96 | make install.systemd && \ 97 | make install.config 98 | environment: 99 | PATH: "{{ extended_path }}" 100 | GOPATH: "{{ ansible_env.HOME }}/{{ gopath }}" 101 | 102 | - name: build conmon # noqa no-changed-when 103 | shell: | 104 | cd {{ ansible_env.HOME }}/{{ gopath }}/src/github.com/containers/conmon && \ 105 | make && \ 106 | make install 107 | environment: 108 | PATH: "{{ extended_path }}" 109 | GOPATH: "{{ ansible_env.HOME }}/{{ gopath }}" 110 | 111 | - name: build cri-tools # noqa no-changed-when 112 | shell: | 113 | cd {{ ansible_env.HOME }}/{{ gopath }}/src/github.com/kubernetes-sigs/cri-tools && \ 114 | make && \ 115 | make install 116 | environment: 117 | PATH: "{{ extended_path }}" 118 | GOPATH: "{{ ansible_env.HOME }}/{{ gopath }}" 119 | 120 | - name: build CNI stuff # noqa no-changed-when 121 | shell: | 122 | cd {{ ansible_env.HOME }}/{{ gopath }}/src/github.com/containernetworking/plugins && \ 123 | ./build_linux.sh && \ 124 | mkdir -p /opt/cni/bin && \ 125 | cp bin/* /opt/cni/bin/ 126 | environment: 127 | PATH: "{{ extended_path }}" 128 | 129 | - name: run CRI-O with systemd cgroup manager 130 | replace: 131 | regexp: 'cgroupfs' 132 | replace: 'systemd' 133 | name: /etc/crio/crio.conf 134 | backup: yes 135 | 136 | - name: enable and start CRI-O 137 | systemd: 138 | name: crio 139 | state: started 140 | enabled: yes 141 | daemon_reload: yes 142 | 143 | - name: modprobe br_netfilter 144 | modprobe: 145 | name: br_netfilter 146 | state: present 147 | 148 | - name: tune sysctl 149 | sysctl: 150 | name: net.bridge.bridge-nf-call-iptables 151 | value: '1' 152 | reload: yes 153 | state: present 154 | sysctl_set: yes 155 | sysctl_file: /etc/sysctl.conf 156 | 157 | - name: systemd dropin for kubeadm # noqa no-changed-when 158 | shell: > 159 | sh -c 'echo "[Service] 160 | Environment=\"KUBELET_EXTRA_ARGS=--container-runtime=remote 161 | --runtime-request-timeout=15m --image-service-endpoint /var/run/crio/crio.sock 162 | --container-runtime-endpoint /var/run/crio/crio.sock\"" > /etc/systemd/system/kubelet.service.d/0-crio.conf' 163 | 164 | - name: flush iptables 165 | iptables: 166 | chain: "{{ item }}" 167 | flush: yes 168 | with_items: [ 'INPUT', 'FORWARD', 'OUTPUT' ] 169 | -------------------------------------------------------------------------------- /docs/scratch.md: -------------------------------------------------------------------------------- 1 | ## Showing some pertinent details about our nginx pods... 2 | 3 | [root@kube-master centos]# kubectl get pods | tail -n +2 | awk '{print $1}' | xargs -i kubectl describe pod {} | grep -Pi "(^Node|^Name[^s]|^IP)" 4 | 5 | ## On CNI 6 | 7 | Run into an issue that looks kind of [like this](https://github.com/kubernetes/kubernetes/issues/36575) so I added `--pod-network-cidr=` but then I got: 8 | 9 | ``` 10 | FirstSeen LastSeen Count From SubObjectPath Type Reason Message 11 | --------- -------- ----- ---- ------------- -------- ------ ------- 12 | 6m 6m 1 {default-scheduler } Normal Scheduled Successfully assigned nginx-17db8 to kube-node-2 13 | 6m 6m 3 {kubelet kube-node-2} Warning FailedSync Error syncing pod, skipping: failed to "SetupNetwork" for "nginx-17db8_default" with SetupNetworkError: "Failed to setup network for pod \"nginx-17db8_default(1a2b64f2-f39b-11e6-a7ae-52540078670a)\" using network plugins \"cni\": cni config unintialized; Skipping pod" 14 | ``` 15 | 16 | My guess is because I'm being stubborn and trying to use Flannel plainly. I think I should try weave as shown in... basically every single example I see. 17 | 18 | 19 | ---- 20 | 21 | ## History from kube master on successful run 22 | 23 | ``` 24 | [root@kube-master centos]# history 25 | 1 kubectl get nodes 26 | 2 yum install -y nano mlocate 27 | 3 nano nginx_pod.yaml 28 | 4 kubectl create -f nginx_pod.yaml 29 | 5 kubectl get pods 30 | 6 kubectl get pods 31 | 7 kubectl describe pod nginx-507br 32 | 8 updatedb 33 | 9 locate cni 34 | 10 cd /etc/cni/net.d/ 35 | 11 ls 36 | 12 cat 10-weave.conf 37 | 13 cd /var/log/ 38 | 14 ls -lathr 39 | 15 cat podnetwork-apply.log 40 | 16 kubectl get pods 41 | 17 kubectl describe pod nginx-507br 42 | 18 kubectl status 43 | 19 kubectl get status 44 | 20 kubectl get pods 45 | 21 kubectl apply -f https://git.io/weave-kube 46 | 22 kubectl get pods 47 | 23 cd /etc/ 48 | 24 ls -lathr 49 | 25 cat kubeadm.init.txt 50 | 26 kubectl get namespaces 51 | 27 kubectl get pods --namespace=kube-system 52 | 28 kubectl get pods --namespace=default 53 | 29 kubectl describe pod nginx-507br 54 | 30 kubectl get pods --namespace=kube-system 55 | 31 kubectl get pods 56 | 32 kubectl get pods 57 | 33 kubectl describe pod nginx-507br 58 | 34 ifconfig 59 | 35 curl 10.38.0.1 60 | 36 route -n 61 | 37 kubectl describe pod nginx-507br 62 | 38 kubectl get pods 63 | 39 kubectl describe pod nginx-507br 64 | 40 kubectl describe pod nginx-wwnl8 65 | 41 cd ~ 66 | 42 ls 67 | 43 updatedb 68 | 44 locate nginx_pod 69 | 45 cd /home/centos/ 70 | 46 ls 71 | 47 cat nginx_pod.yaml 72 | 48 kubectl expose rc nginx --port=8999 --target-port=8000 73 | 49 kubectl get svc 74 | 50 curl 10.111.204.122:8999 75 | 51 curl 10.111.204.122:8999 76 | 52 route -n 77 | 53 iptables -L 78 | 54 systemctl status firewalld 79 | 55 iptables -A INPUT -p tcp --dport 8999 -j ACCEPT 80 | 56 iptables -L 81 | 57 iptables -n -L 82 | 58 curl 10.111.204.122:8999 83 | 59 route -n 84 | 60 ifconfig 85 | 61 kubectl get svc 86 | 62 curl -k https://10.96.0.1 87 | 63 iptables -n -L 88 | 64 curl -v 10.111.204.122:8999 89 | 65 ping 10.111.204.122 90 | 66 kubectl get svc 91 | 67 kubectl get pods 92 | 68 kubectl describe pod nginx-wwnl8 93 | 69 kubectl describe pod nginx-507br 94 | 70 curl 10.38.0.1 95 | 71 curl 10.32.0.2 96 | 72 kubectl describe pod nginx-507br | grep -Pi "(^Node)" 97 | 73 kubectl describe pod nginx-507br | grep -Pi "(^Node|^IP)" 98 | 74 kubectl get pods 99 | 75 kubectl get pods | tail -n +1 100 | 76 kubectl get pods | tail -n +2 101 | 77 kubectl get pods | tail -n +2 | awk '{print $1}' 102 | 78 kubectl get pods | tail -n +2 | awk '{print $1}' | xargs -i kubectl describe pod {} | grep -Pi "(^Node|^IP)" 103 | 79 kubectl get pods | tail -n +2 | awk '{print $1}' | xargs -i kubectl describe pod {} | grep -Pi "(^Node|^Name[^s]|^IP)" 104 | 80 kubectl get svc 105 | 81 curl 10.111.204.122 106 | 82 curl 10.111.204.122:8999 107 | 83 kubectl describe svc nginx 108 | 84 kubectl delete svc nginx 109 | 85 history | grep -i svc 110 | 86 history | grep -i service 111 | 87 history | grep -i expose 112 | 88 kubectl expose rc nginx --port=8999 --target-port=80 113 | 89 curl 10.111.204.122:8999 114 | 90 kubectl get svc 115 | 91 curl 10.96.194.77:8999 116 | 92 curl 10.96.194.77:8999 117 | 93 iptables -L INPUT 118 | 94 iptables -L INPUT --linenumbers 119 | 95 iptables -D 120 | 96 iptables -n -L INPUT --linenumbers 121 | 97 iptables -n -L INPUT --line-numbers 122 | 98 iptables -D 7 INPUT 123 | 99 iptables -D INPUT 7 124 | 100 curl 10.96.194.77:8999 125 | 101 kubectl get svc 126 | 102 history | grep expose 127 | 103 ifconfig | grep 192 128 | 104 kubectl get svc 129 | 105 kubectl delete svc nginx 130 | 106 kubectl expose rc nginx --port=8999 --target-port=80 --external-ip 192.168.122.213 131 | 107 kubectl get svc 132 | 108 curl 192.168.122.213:8999 133 | 109 history 134 | ``` 135 | 136 | ``` 137 | [root@kube-master centos]# cat nginx_pod.yaml 138 | apiVersion: v1 139 | kind: ReplicationController 140 | metadata: 141 | name: nginx 142 | spec: 143 | replicas: 2 144 | selector: 145 | app: nginx 146 | template: 147 | metadata: 148 | name: nginx 149 | labels: 150 | app: nginx 151 | spec: 152 | containers: 153 | - name: nginx 154 | image: nginx 155 | ports: 156 | - containerPort: 80 157 | 158 | ``` -------------------------------------------------------------------------------- /playbooks/ka-init/group_vars/all.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # --------------------------- - 3 | # Changes for bare metal - - 4 | # Name of inventory file - - 5 | # --------------------------- - 6 | all_inventory: "all.local.generated" 7 | 8 | # What container runtime do we use? 9 | # valid values: 10 | # - docker 11 | # - crio 12 | container_runtime: docker 13 | 14 | # --------------------------- - 15 | # docker vars - - 16 | # --------------------------- - 17 | docker_install_suppress_newgrp: true 18 | 19 | # --------------------------- - 20 | # crio vars - - 21 | # --------------------------- - 22 | # Which version of crio? 23 | # (doesn't matter if docker is container runtime) 24 | crio_build_version: v1.11.1 25 | crio_build_install: False 26 | crio_use_copr: False 27 | # for binary install 28 | crio_binary_url: https://storage.googleapis.com/k8s-conform-cri-o/artifacts/crio-v1.20.1.tar.gz 29 | crictl_binary_url: https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.20.0/crictl-v1.20.0-linux-amd64.tar.gz 30 | conmon_binary_url: https://github.com/containers/conmon/releases/download/v2.0.27/conmon.amd64 31 | 32 | 33 | # Network type (2nics or default) 34 | network_type: "default" 35 | # Pod net work CIDR 36 | pod_network_cidr: "10.244.0.0" 37 | 38 | # General config 39 | 40 | # At 1.7.2 you need this cause of a bug in kubeadm join. 41 | # Turn it off later, or, try it if a join fails. 42 | skip_preflight_checks: true 43 | 44 | # Stable. (was busted at 1.6 release, may work now, untested for a couple months) 45 | kube_baseurl: http://yum.kubernetes.io/repos/kubernetes-el7-x86_64 46 | 47 | # Unstable 48 | # kube_baseurl: http://yum.kubernetes.io/repos/kubernetes-el7-x86_64-unstable 49 | 50 | # Kube Version 51 | # Accepts "latest" or the version part of an RPM (typically based on the kubelet RPM). 52 | # For example if you were to look at `yum search kubelet --showduplicates` 53 | # You'd see things like "kubelet-1.7.5-0.x86_64" 54 | # You'd use "1.7.5-0" here, such as: 55 | # kube_version: 1.7.5-0 56 | # The default is... "latest" 57 | kube_version: "latest" 58 | 59 | # Binary install 60 | # Essentially replaces the RPM installed binaries with a specific set of binaries from URLs. 61 | # binary_install: true 62 | # binary_install_force_redownload: false 63 | 64 | images_directory: /home/images 65 | system_default_ram_mb: 4096 66 | system_default_cpus: 4 67 | 68 | # Define all VM's that need to be created and their respective roles. 69 | # There are three roles user can defined 70 | # - master: Kubernets primary master node 71 | # - master_slave: Kubernets secondary master nodes that joins primary master 72 | # - nodes : Kubernetes nodes (worker) 73 | virtual_machines: 74 | - name: kube-master1 75 | node_type: master 76 | - name: kube-node-1 77 | node_type: nodes 78 | - name: kube-node-2 79 | node_type: nodes 80 | # Uncomment following (lb/master_slave) for k8s master HA cluster 81 | # - name: kube-lb 82 | # node_type: lb 83 | # - name: kube-master2 84 | # node_type: master_slave 85 | # - name: kube-master3 86 | # node_type: master_slave 87 | 88 | # - name: builder 89 | # node_type: builder 90 | # system_ram_mb: 24576 91 | # - name: my-support-node 92 | # node_type: other 93 | # system_ram_mb: 8192 94 | # system_cpus: 8 95 | 96 | # Kubectl proxy. 97 | kubectl_proxy_port: 8088 98 | 99 | # Allow the kubernetes control plane to listen on all interfaces 100 | #control_plane_listen_all: true 101 | 102 | # ---------------------------- 103 | # ovn vars. 104 | # ---------------------------- 105 | #ovn_image_repo: "docker.io/ovnkube/ovn-daemonset-u:latest" 106 | ovn_image_repo: "docker.io/nfvpe/ovn-daemonset-u:latest" 107 | 108 | # OVN Kubernets repo and branch 109 | # ovn_kubernetes_repo: https://github.com/ovn-org/ovn-kubernetes 110 | # ovn_kubernetes_branch: master 111 | # Setup ovn-kubernetes in clustered HA mode (Raft based) 112 | # enable_ovn_raft: True 113 | 114 | # Set logging parameters for different OVN components 115 | # Log level for ovnkube master 116 | # ovnkube_master_loglevel: "5" 117 | 118 | # Log level for ovnkube node 119 | # ovnkube_node_loglevel: "5" 120 | 121 | # Log config for ovn northd 122 | # ovn_loglevel_northd: "-vconsole:info -vfile:info" 123 | 124 | # Log config for OVN Northbound Database 125 | # ovn_loglevel_nb: "-vconsole:info -vfile:info" 126 | 127 | # Log config for OVN Southbound Database 128 | # ovn_loglevel_sb: "-vconsole:info -vfile:info" 129 | 130 | # Log config for OVN Controller 131 | # ovn_loglevel_controller: "-vconsole:info" 132 | 133 | # Log config for OVN NBCTL daemon 134 | # ovn_loglevel_nbctld: "-vconsole:info" 135 | 136 | # ---------------------------- 137 | # virt-host vars. 138 | # ---------------------------- 139 | 140 | # Allows one to skip the steps to initially setup a virthost 141 | # convenient when iterating quickly. 142 | skip_virthost_depedencies: false 143 | 144 | # Enables a bridge to the outside LAN 145 | # (as opposed to using virbr0) 146 | bridge_networking: false 147 | bridge_name: virbr0 148 | bridge_physical_nic: "enp1s0f1" 149 | bridge_network_name: "br0" 150 | bridge_network_cidr: 192.168.1.0/24 151 | 152 | # ---------------------------- 153 | # device plugins 154 | # ---------------------------- 155 | enable_device_plugins: false 156 | 157 | # ---------------------------- 158 | # builder vars 159 | # ---------------------------- 160 | 161 | # NOTE: these builder vars are here and not in the group_vars/builder.yml file 162 | # because these values are used across different types of nodes, and not just 163 | # directly on the builder server itself. 164 | 165 | # artifact paths 166 | artifacts_sync_path: /opt/k8s/artifacts 167 | 168 | # builder archive list 169 | archive_list: 170 | - rpms/kubeadm-x86_64.rpm 171 | - rpms/kubectl-x86_64.rpm 172 | - rpms/kubelet-x86_64.rpm 173 | - rpms/kubernetes-cni-x86_64.rpm 174 | - cloud-controller-manager.tar 175 | - kube-apiserver.tar 176 | - kube-controller-manager.tar 177 | - kube-proxy.tar 178 | - kube-scheduler.tar 179 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # kube-ansible 2 | 3 | `kube-ansible` is a set of Ansible playbooks and roles that allows 4 | you to instantiate a vanilla Kubernetes cluster on (primarily) CentOS virtual 5 | machines or baremetal. 6 | 7 | Additionally, kube-ansible includes CNI pod networking (defaulting to Flannel, 8 | with an ability to deploy Weave, Multus and OVN Kubernetes). 9 | 10 | The purpose of kube-ansible is to provide a simpler lab environment that allows 11 | prototyping and proof of concepts. For staging and production deployments, we 12 | recommend that you utilize 13 | [OpenShift-Ansible](https://github.com/openshift/openshift-ansible) 14 | 15 | ## Playbooks 16 | 17 | Playbooks are located in the `playbooks/` directory. 18 | 19 | | Playbook | Inventory | Purpose | 20 | | ---------------------------------------- | ------------------------------------- | ------------------------------------------------------------------ | 21 | | `virthost-setup.yml` | `./inventory/virthost/` | Provision a virtual machine host | 22 | | `bmhost-setup.yml` | `./inventory/bmhost/` | Provision a bare metal host and add to group nodes. | 23 | | `allhost-setup.yml` | `./inventory/allhosts/` | Provision both a virtual machine host and a bare metal host. | 24 | | `kube-install.yml` | `./inventory/all.local.generated` | Install and configure a k8s cluster using all hosts in group nodes | 25 | | `kube-install-ovn.yml` | `./inventory/all.local.generated` | Install and configure a k8s cluster with OVN network using all hosts in group nodes | 26 | | `kube-teardown.yml` | `./inventory/all.local.generated` | Runs `kubeadm reset` on all nodes to tear down k8s | 27 | | `vm-teardown.yml` | `./inventory/virthost/` | Destroys VMs on the virtual machine host | 28 | | `fedora-python-bootstrapper.yml` | `./inventory/vms.local.generated` | Bootstrapping Python dependencies on cloud images | 29 | 30 | *(Table generated with [markdown tables](http://www.tablesgenerator.com/markdown_tables))* 31 | 32 | ## Overview 33 | 34 | kube-ansible provides the means to install and setup KVM as a virtual host 35 | platform on which virtual machines can be created, and used as the foundation 36 | of a Kubernetes cluster installation. 37 | 38 | ![kube-ansible Topology Overview](docs/images/kube-ansible_overview.png) 39 | 40 | There are generally two steps to this deployment: 41 | 42 | * Installation of KVM on the baremetal system and virtual machines instantiation 43 | * Kubernetes environment installation and setup on the virtual machines 44 | 45 | Start with configuring the `virthost/` inventory to match the required working 46 | environment, including DNS or IP address of the baremetal system, that will be 47 | installed and configured on the KVM platform. It also setup the network (KVM 48 | network, whether that be a bridged interface, or a NAT interface), and then 49 | define the system topology that needs to be deployed (e.g number of virtual 50 | machines to instantiate). 51 | 52 | All the above mentioned configuration is done by `virthost-setup.yml` playbook, 53 | which performs the virtual host basic configuration, virtual machine 54 | instantiation, and extra virtual disk creation when configuring persistent 55 | storage with GlusterFS. 56 | 57 | During the `virthost-setup.yml` a `vms.local.generated` inventory file is 58 | created with the IP addresses and hostname of the virtual machines. The 59 | `vms.local.generated` file can then be used with Kubernetes installation 60 | playbooks like `kube-install.yml` or `kube-install-ovn.yml`. 61 | 62 | ## Usage 63 | 64 | ### Step 0. Install dependent roles 65 | 66 | Install role dependencies with `ansible-galaxy`. This step will install the main 67 | dependencies like (go and docker) and also brings other roles that is required 68 | for setting up the VMs. 69 | 70 | ``` 71 | ansible-galaxy install -r requirements.yml 72 | ``` 73 | 74 | ### Step 1. Create virtual host inventory 75 | 76 | Copy the example `virthost` inventory into a new directory. 77 | 78 | ``` 79 | cp -r inventory/examples/virthost inventory/virthost/ 80 | ``` 81 | Modify `./inventory/virthost/virthost.inventory` to setup a virtual 82 | host (If inventory is already present, please skip this step). 83 | 84 | ### Step 2. Override the default configuration if requires 85 | All the default configuration settings used by kube-ansible playbooks are present 86 | in the [all.yml](playbooks/ka-init/group_vars/all.yml) file. 87 | 88 | For instance by default kube-ansible creates one master and two worker node setup 89 | only (please refer to ordered list under `virtual_machines` in [all.yml](playbooks/ka-init/group_vars/all.yml)), 90 | but if HA cluster deployment (stacked control plane nodes) is required, 91 | edit the [all.yml](playbooks/ka-init/group_vars/all.yml) file and change the 92 | configuration to something on the line of 93 | 94 | ``` 95 | virtual_machines: 96 | - name: kube-lb 97 | node_type: lb 98 | - name: kube-master1 99 | node_type: master 100 | - name: kube-master2 101 | node_type: master_slave 102 | - name: kube-master3 103 | node_type: master_slave 104 | - name: kube-node-1 105 | node_type: nodes 106 | - name: kube-node-2 107 | node_type: nodes 108 | ``` 109 | Above configuration change will create 3 node HA cluster with 2 worker nodes and 110 | a LB node. 111 | 112 | You can also define separate vCPU and vRAM for each of the virtual machines with 113 | `system_ram_mb` and `system_cpus`. The default values are setup via `system_default_ram_mb` 114 | and `system_default_cpus` which can also be overridden if you wish different 115 | default values. (Current defaults are 2048MB and 4 vCPU.) 116 | 117 | 118 | > **WARNING** 119 | > 120 | > If you're not going to be connecting to the virtual machines from the same 121 | > network as your source machine, you'll need to make sure you setup the 122 | > `ssh_proxy_enabled: true` and other related `ssh_proxy_...` variables to 123 | > allow the `kube-install.yml` playbook to work properly. See next **NOTE** for 124 | > more information. 125 | 126 | ### Step 3. Create the virtual machines defined in [all.yml](./playbooks/ka-init/group_vars/all.yml) 127 | 128 | Once the default configuration is being changed as per the setup requirements, 129 | execute the following instruction to create the VMs and generate the final inventory 130 | with all the details required for Kubernetes installation on these VMs. 131 | 132 | > **NOTE** 133 | > 134 | > There are a few extra variables you may wish to set against the virtual host 135 | > which can be satisfied in the `inventory/virthost/group_vars/virthost.yml` 136 | > file of your local inventory configuration in `inventory/virthost/` that you 137 | > just created. 138 | > 139 | > Primarily, this is for overriding the default variables located in the 140 | > [all.yml](playbooks/ka-init/group_vars/all.yml) file, or overriding the default values 141 | > associated with the roles. 142 | > 143 | > Some common variables you may wish to override include: 144 | > 145 | > * `bridge_networking: false` _disable bridge networking setup_ 146 | > * `images_directory: /home/images/kubelab` _override image directory 147 | > location_ 148 | > * `spare_disk_location: /home/images/kubelab` _override spare disk location_ 149 | > 150 | > The following values are used in the generation of the final inventory file 151 | > `vms.local.generated` 152 | > 153 | > * `ssh_proxy_enabled: true` _proxy via jump host (remote virthost)_ 154 | > * `ssh_proxy_user: root` _username to SSH into virthost_ 155 | > * `ssh_proxy_host: virthost` _hostname or IP of virthost_ 156 | > * `ssh_proxy_port: 2222` _port of the virthost (optional, default 22)_ 157 | > * `vm_ssh_key_path: /home/lmadsen/.ssh/id_vm_rsa` _path to local SSH key_ 158 | 159 | **Running on virthost directly** 160 | ``` 161 | ansible-playbook -i inventory/virthost/ playbooks/virthost-setup.yml 162 | ``` 163 | 164 | **Setting up virthost as a jump host** 165 | ``` 166 | ansible-playbook -i inventory/virthost/ -e ssh_proxy_enabled=true playbooks/virthost-setup.yml 167 | ``` 168 | 169 | Both the commands above will generate a new inventory file `vm.local.generated` 170 | in `inventory` directory. This inventory file will be used by the Kubernetes 171 | installation playbooks to install Kubernetes on the provisioned VMs. For instance, 172 | below content is an example of `vm.local.generated` file for 3 node HA Kubernetes cluster 173 | 174 | ``` 175 | kube-lb ansible_host=192.168.122.31 176 | kube-master1 ansible_host=192.168.122.117 177 | kube-master2 ansible_host=192.168.122.160 178 | kube-master3 ansible_host=192.168.122.143 179 | kube-node-1 ansible_host=192.168.122.53 180 | kube-node-2 ansible_host=192.168.122.60 181 | 182 | [lb] 183 | kube-lb 184 | 185 | [master] 186 | kube-master1 187 | 188 | [master_slave] 189 | kube-master2 190 | kube-master3 191 | 192 | [nodes] 193 | kube-node-1 194 | kube-node-2 195 | 196 | 197 | [all:vars] 198 | ansible_user=centos 199 | ansible_ssh_private_key_file=/root/.ssh/dev-server/id_vm_rsa 200 | ``` 201 | > **Tip** 202 | > User can override the configuration values from command line as well 203 | 204 | ``` 205 | # ansible-playbook -i inventory/virthost.inventory -e 'network_type=2nics' playbooks/virthost-setup.yml 206 | ``` 207 | 208 | ### Step 4. Install Kubernetes on the instantiated virtual machines 209 | 210 | During the execution of _Step 3_ a local inventory file `inventory/vms.local.generated` 211 | should have been generated. This inventory file contains the virtual machines and their 212 | IP addresses. Alternatively you can ignore the generated inventory and copy the example 213 | inventory directory from `inventory/examples/vms/` and modify to your hearts 214 | content. 215 | 216 | This inventory file need to be passed to the Kubernetes Installation playbooks 217 | (`kube-install.yml \ kube-install-ovn.yml`). 218 | 219 | 220 | ``` 221 | ansible-playbook -i inventory/vms.local.generated playbooks/kube-install.yml 222 | ``` 223 | 224 | > **NOTE** 225 | > 226 | > If you're not running the Ansible playbooks from the virtual host itself, 227 | > it's possible to connect to the virtual machines via SSH proxy. You can do 228 | > this by setting up the `ssh_proxy_...` variables as noted in _Step 3_. 229 | 230 | #### Options 231 | 232 | kube-ansible supports following options and these options can be configured in [all.yml](playbooks/ka-init/group_vars/all.yml): 233 | 234 | - `network_type` (optional, string): specify network topology for the virthost, each master/worker has one interface (eth0) in default: 235 | - `2nics`: each master/worker node has two interfaces: eth0 and eth1 236 | - `bridge`: add linux bridge (`cni0`) and move `eth0` under `cni0`. This is useful to use linux bridge CNI for Kubernetes Pod's network 237 | - `container_runtime` (optional, string): specify container runtime that Kubernetess uses. Default uses Docker. 238 | - `crio`: install [cri-o](https://cri-o.io/) for the container runtime 239 | - `crio_use_copr` (optional, boolean): (only in case of cri-o) set true if [copr cri-o RPM](http://copr.fedorainfracloud.org/coprs/s1061123/cri-o) is used 240 | - `ovn_image_repo` (optional, string): set the container image (e.g. `docker.io/ovnkube/ovn-daemonset-u:latest`) 241 | - `enable_endpointslice` (optional, boolean): set `True` if endpointslice is used instead of endpoints 242 | - `enable_auditlog` (optional, boolean): set `True` if auditing logs 243 | - `enable_ovn_raft` (optional, boolean): (`kube-install-ovn.yml` only) set `True` if you want to OVN with raft mode 244 | - `ovn_image_repo` (optional, string): Replace the url if image needs to be pull from other location. 245 | 246 | > **NOTE** 247 | > 248 | > In case of `enable_ovn_raft=True`, you need to build your own image from the upstream `ovn-kubernetes` 249 | > repo and push it to your account and configure `ovn_image_repo` to point to that newly built image, 250 | > because current official ovn-kubernetes image does not support raft. 251 | 252 | **Tip** 253 | User can override the [all.yml](playbooks/ka-init/group_vars/all.yml) configuration values from command line 254 | as well. Here's the example: 255 | 256 | - Install Kubernetes with cri-o runtime, each host has two NICs (eth0, eth1): 257 | 258 | ``` 259 | # ansible-playbook -i inventory/vms.local.generated -e 'network_type=2nics' -e 'container_runtime=crio' playbooks/kube-install.yml 260 | ``` 261 | 262 | Once ansible-playbook execute successfully, to verify the installation login to the Kubernetes master 263 | virtual machine and run `kubectl get nodes` and verify that all the nodes are in a _Ready_ state. 264 | (It may take some time for everything to coalesce and the nodes to report back to the Kubernetes master node.) 265 | 266 | In order to login to the nodes, you may need to `ssh-add 267 | ~/.ssh/vmhost/id_vm_rsa`. The private key created on the virtual host will be 268 | automatically fetched to your local machine, allowing you to connect to the 269 | nodes when proxying. 270 | 271 | > **Pro Tip** 272 | > 273 | > You can create a `~/.bashrc` alias to SSH into the virtual machines if you're 274 | > not executing the Ansible playbooks directly from your virtual host (i.e. 275 | > from your laptop or desktop). To SSH into the nodes via SSH proxy, add the 276 | > following alias: 277 | > 278 | > ``` 279 | > alias ssh-virthost='ssh -o ProxyCommand="ssh -W %h:%p root@virthost"' 280 | > ``` 281 | > It's assumed you're logging into the virtual host as the `root` user and at 282 | > hostname `virthost`. Change as required. 283 | > 284 | > **Usage**: `source ~/.bashrc ; ssh-virthost centos@kube-master` 285 | 286 | ### Step 5. Verify the installation 287 | 288 | Once you're logged into your Kubernetes master node, run the following command 289 | to check the state of your cluster. 290 | 291 | ``` 292 | $ kubectl get nodes 293 | NAME STATUS ROLES AGE VERSION 294 | kube-master1 Ready master 18h v1.17.3 295 | kube-master2 Ready master 18h v1.17.3 296 | kube-master3 Ready master 18h v1.17.3 297 | kube-node-1 Ready 18h v1.17.3 298 | kube-node-2 Ready 18h v1.17.3 299 | ``` 300 | 301 | Everything should be marked as ready. If so, you're good to go! 302 | 303 | ## Example Setup and configuration instructions 304 | Following instructions are to create a HA Kubernetes cluster with two worker nodes and `OVN-Kubernetes` 305 | in Raft mode as a CNI. All these instructions are executed from the physical server where virtual virtual_machines 306 | will be created to deploy the Kubernetes cluster. 307 | 308 | **Install requirements** 309 | 310 | ``` 311 | ansible-galaxy install -r requirements.yml 312 | ``` 313 | 314 | **Create inventory** 315 | ``` 316 | cp -r inventory/examples/virthost inventory/virthost/ 317 | ``` 318 | **Configure inventory** 319 | Content of `inventory/virthost/virthost.inventory` 320 | ``` 321 | dev-server ansible_host=127.0.0.1 ansible_ssh_user=root 322 | [virthost] 323 | dev-server 324 | ``` 325 | **Configure default values** 326 | Overridden configuration values in [all.yml](playbooks/ka-init/group_vars/all.yml) 327 | ``` 328 | container_runtime: crio 329 | virtual_machines: 330 | - name: kube-master1 331 | node_type: master 332 | - name: kube-node-1 333 | node_type: nodes 334 | - name: kube-node-2 335 | node_type: nodes 336 | # Uncomment following (lb/master_slave) for k8s master HA cluster 337 | - name: kube-lb 338 | node_type: lb 339 | - name: kube-master2 340 | node_type: master_slave 341 | - name: kube-master3 342 | node_type: master_slave 343 | 344 | ovn_image_repo: "docker.io/avishnoi/ovn-kubernetes:latest" 345 | enable_ovn_raft: True 346 | ``` 347 | 348 | **Create Virtual Machines for Kubernetes deployment and generate final inventory** 349 | ``` 350 | ansible-playbook -i inventory/virthost/ playbooks/virthost-setup.yml 351 | ``` 352 | This playbook creates required VMs and generate the final inventory file (`vms.local.generated`). 353 | `virsh list` lists all the created VMs. 354 | 355 | ``` 356 | # virsh list 357 | Id Name State 358 | ---------------------------------------------------- 359 | 4 kube-master1 running 360 | 5 kube-node-1 running 361 | 6 kube-node-2 running 362 | 7 kube-lb running 363 | 8 kube-master2 running 364 | 9 kube-master3 running 365 | ``` 366 | 367 | Generated `vms.local.generated` file 368 | ``` 369 | # cat ./inventory/vms.local.generated 370 | kube-lb ansible_host=192.168.122.31 371 | kube-master1 ansible_host=192.168.122.117 372 | kube-master2 ansible_host=192.168.122.160 373 | kube-master3 ansible_host=192.168.122.143 374 | kube-node-1 ansible_host=192.168.122.53 375 | kube-node-2 ansible_host=192.168.122.60 376 | 377 | [lb] 378 | kube-lb 379 | 380 | [master] 381 | kube-master1 382 | 383 | [master_slave] 384 | kube-master2 385 | kube-master3 386 | 387 | [nodes] 388 | kube-node-1 389 | kube-node-2 390 | 391 | 392 | [all:vars] 393 | ansible_user=centos 394 | ansible_ssh_private_key_file=/root/.ssh/dev-server/id_vm_rsa 395 | ``` 396 | **Install Kubernetes** 397 | 398 | ``` 399 | ansible-playbook -i inventory/vms.local.generated playbooks/kube-install-ovn.yml 400 | ``` 401 | **Verify Setup** 402 | Login to Kubernets master node 403 | ``` 404 | ssh -i ~/.ssh/dev-server/id_vm_rsa centos@kube-master1 405 | ``` 406 | 407 | Verify that all the nodes join the cluster 408 | 409 | ``` 410 | [centos@kube-master1 ~]$ kubectl get nodes 411 | NAME STATUS ROLES AGE VERSION 412 | kube-master1 Ready master 18h v1.17.3 413 | kube-master2 Ready master 18h v1.17.3 414 | kube-master3 Ready master 18h v1.17.3 415 | kube-node-1 Ready 18h v1.17.3 416 | kube-node-2 Ready 18h v1.17.3 417 | 418 | $ kubectl version 419 | Client Version: version.Info{Major:"1", Minor:"17", GitVersion:"v1.17.3", GitCommit:"06ad960bfd03b39c8310aaf92d1e7c12ce618213", GitTreeState:"clean", BuildDate:"2020-02-11T18:14:22Z", GoVersion:"go1.13.6", Compiler:"gc", Platform:"linux/amd64"} 420 | Server Version: version.Info{Major:"1", Minor:"17", GitVersion:"v1.17.3", GitCommit:"06ad960bfd03b39c8310aaf92d1e7c12ce618213", GitTreeState:"clean", BuildDate:"2020-02-11T18:07:13Z", GoVersion:"go1.13.6", Compiler:"gc", Platform:"linux/amd64"} 421 | ``` 422 | 423 | 424 | # About 425 | 426 | Initially inspired by: 427 | 428 | * [k8s 1.5 on Centos](http://linoxide.com/containers/setup-kubernetes-kubeadm-centos/) 429 | * [kubeadm getting started](https://kubernetes.io/docs/getting-started-guides/kubeadm/) 430 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Release v0.5.0 2 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/7c71ca1f59b4f0670f1dc8cb9dfb527476e230de) -- [refactor] Point redhat-nfvpe.vm-spinup at develop 3 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/55a5a3dddde441bb8cbcd1f3ea47eee708a202ff) -- [hotfix] Resolve issues introduced in 37da9afa49548852b63135278f125e09d8795cdf 4 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/f261c4e859be31deea138d5c88894a2ad41aac1b) -- Refactor playbook location to match openshift-ansible 5 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/5370f922e2adb2de1e3bb7b675d1ad8505eb93c9) -- [hotfix] Fix missing all.yml in ka-init 6 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/6bb1c3102a3804b37b3081f211f2ac3072fb5fd9) -- [monitoring] Add Prometheus-Operator role and playbooks 7 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/b8302877338c886fb247d38ef45f4f1f460e40f5) -- [hotfix] Point PromOps at redhat-nfvpe fork 8 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/f2df919c0988525c3d7dfe9709bdfd8ae8bdc7fd) -- [enhance] Make GlusterFS volume template contents dynamic 9 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/3e0b7294d72586bb7bae368cba2e334f2abf9179) -- [enhance] Allow multiple StorageClass creation 10 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/69eedeae848a93de49d2312cc202e3910bf4c40e) -- [enhance] Template and deploy PromOps via Ansible 11 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/e026982eaf65829faca40c38d9975e81d38a6cf1) -- [release] Point roles back at master components 12 | 13 | # Release v0.3.0 14 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/05d23e2b70863f718df0e43e5958ce58aeca48a3) -- Allow k8s ctrlplane listen all interfaces 15 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/8ec7f8424453a493c8b314237952d05407dbb8f3) -- Add more available value candidate in 'pod_network_type' 16 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/6a007057d8d095a1f66ebd792224ecf3a7c52713) -- [kubelet] Enable authorization token webhook 17 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/e9f79a51f6f71e722a78fc693921ab876ff68451) -- [bugfix] Addresses #180 by templating kubeadm config file, and including the pod network cidr within 18 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/e0d2d230356a4119be076a0a946ebaad01cec50b) -- [bugfix] Adds kubelet & runtime cgroup params to fix issue reported in #195 19 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/a59fe2455b800735ab4ef08bd56987897ee276ee) -- [flannel][bugfix] Fixes flannel for use against Docker 1.13 (works with 1.12 as well), reported in #200 20 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/fe95355af5b2fc12843861b6e80cb90281ba594a) -- [hotfix] Fixes for flannel configurations for Multus & Multus+CRD 21 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/d382900828440f1ed1db9dc5f9ac286c2cc3543a) -- [changelog] Update CHANGELOG for 0.2.1 release 22 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/ab033082049276ef19c1915127f85bcbcc94e837) -- [release_mgmt] Update changelog script to script trailing spaces 23 | 24 | # Release 0.2.1 25 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/300e9bdc2909291d22c7ed2e7f196dd5162d5930) -- Allows builder.yml to pass ansible-lint 26 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/3757fe7b17baeebf6ba9b638c99ac3490bba948d) -- [vm-spinup][delete] remove roles 27 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/b56432dbfff55ea6da5b30e2a7908ba53e067ac2) -- [vm-spinup] terse changes to allow use of new role 28 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/10b73f0661cfbf96233637d940a335fb073f0d09) -- [enhance] Provide method to only sync artifacts 29 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/2def04e121c2a86efbe06bf64664f20b6b9d890b) -- [enhance] Only sync artifacts from list if they exist 30 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/05c802cfd81676f3e1e78e933250787d35c6632c) -- [enhance] Allow instantiation of k8s clusters from built artifacts 31 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/84edc80775d0f4b8266bc81c6c7917d64ad7620c) -- [hotfix] Artifact build fixes detected in testing 32 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/e4511ca31b9b204c72552adcfcb143376e9a7333) -- Bridge network support in kube-ansible 33 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/c90397ef51d93ca9d22e256e7f2c49e50d50351f) -- Add kokonet-bridge support 34 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/b417f2af7e6d5cf1eaef65f8a00c243ae95277b7) -- Incorporate Leif's comments in PR #169. 35 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/3966723affcca04d4c1a255e5ed4d5b0afd59f7f) -- Incorporate Feng's comments. 36 | 37 | # Release v0.2.0 38 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/25feeffc78ff312892709e0e70643e5a196c70b8) -- Use Docker CE repo for builder VM 39 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/2d59aa57b7bbe534652e840d83a2a75315c62066) -- Update project name to kube-ansible 40 | 41 | # Release v0.1.8 42 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/662b0fff8d75200f95ffd3af4097f6c2efa28dbc) -- [typos][minor] Replaces kubadm with kubeadm 43 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/077b4eb2ff43e367e122a646ed894308f70d3525) -- [feature][optional-packages] Adds role that can handle installing optional packages should deployers wish to extend the packages available on master & nodes 44 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/7f703b41c67e39084c2b405af47dc43687b031cb) -- [hotfix][multus-crd] Typo in template name, more appropriate CNI config name: param 45 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/4400aa8990c10dcdd610c16400bf6b98711d81ab) -- [bugfix][multus-cni] Missing customized clusterrole for node api calls 46 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/bf4991b7755dd6e06c7b1c1789110c8b5d41f036) -- Copies artifacts from the builder to the k8s nodes 47 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/1267af8c833a18d78045837d7f258529f616c092) -- Adds group_vars/builder.yml 48 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/3d00560945a4afb8d74a738bf6cb4214d9ee76c0) -- Cleans up Ansible linting for virthost-setup 49 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/303cfbf7f83ce57e1c1305738e88985beee0f60d) -- [hotfix] Revert erroneous lint fix in attach disks 50 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/f78e4eebb0cedbb630905edd7d3eb121343ab056) -- [feature][minor] adds variable to skip virthost depedencies 51 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/460b9720420dcb079b4be6efed35230ffd05ed2b) -- Avoids duplicating builder plays 52 | 53 | # Release v0.1.7 54 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/ba786202b0fcf7916fb2fb131e713a3a18f5e80f) -- Allow customization of VM vCPU and vRAM values 55 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/a742a2221df5cf614fe963da45caa812f3afbd9d) -- [WIP] Update to using install-go external role 56 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/704e91e42307d9759f43b1c6fb7e3a2f06cba37c) -- Drop when clause in var include for kube-install 57 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/809bf2b738fb5ce6e662bcea8865c9d7ce12c540) -- Migrate to using install-docker role 58 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/5e579caba56761a17cd33a29386161439c33be3a) -- Fix proxy ssh failure in vms.local.generated 59 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/f0443b46c343b0d52b06871934af3eab836f114a) -- Adds ability to build k8s 60 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/9317b6ef1f46f587403c434d688ac8659d182986) -- Moves additional scenarios and usage 61 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/9b04578b38ad99488f1c67b18f45651df1f45c2a) -- Fix link to IPv6 documentation 62 | * [view commit](http://github.com/redhat-nfvpe/kube-ansible/commit/35090f565a96892593c4cae90b428f0fe62c2422) -- Changes related to release v0.1.7 63 | 64 | # Release 0.1.6 65 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/44136f7f4af41d4af94e3872726f206635e3f821) -- Convert kube-centos-ansible to kucean 66 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/920fc8381410a2ea96601c2947c72c60303b2310) -- Avoid use of ansible_host and machine UUID 67 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/03408112e2d93b1ff109f25196f19243e0ba2cf2) -- [feature] adds 'binary install' option to override kubelet, kubectl and kubeadm with custom binaries. addresses #81 68 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/199d37964987151666aeffe15fc38167e33d36cc) -- Drop the use of all_vms 69 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/2071e69431d56d90d5ffa457cd0ddcaaeb384235) -- Add new role to gather kube niceties 70 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/1a0e5305c5d7722f1cf7477538669357b49bf615) -- Load virthost privkey onto source machine 71 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/186696d759cbbcdd99c8b250276c49e6a491d4a6) -- Drop old GlusterFS playbooks 72 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/8464f881d7398286c8877cff9bbd111e6b0bbe74) -- Enhance documentation and usage experience 73 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/aea24e7a22e600d3a156c67f28f12621788a1888) -- [HotFix] Complete missing documentation for kubectl 74 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/d8ff428e0074629387160f6f4a12194fcb793b7b) -- Add LICENSE file to the project 75 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/0822a412af3905c337db4d498b5a59a7816b3031) -- [multus][upgrade] Adds latest Multus including CRD functionality. Address #78 and #72 76 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/509ca38a99370137b0ae4c6f369b4952d648c4cc) -- Add AUTHORS and CHANGELOG 77 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/38dca9194515bb1a61a017b21f9b6ef11f351da4) -- Add base IPv6 lab functionality (#106) 78 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/b1db349673aa565227a1662e7b56e3726016aaef) -- [ipv6][docs][bugfix] Has some required (minor) fixes to get the IPv6 deploy running and includes some docs to help one initially get there 79 | 80 | # Release 0.1.5 81 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/a916eb5697e5d6fbc7d510c7184f27e5f0511648) -- [bugfix][workaround] workaround to get proper kubectl version 82 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/2711d41882db0c2ae10a329c86d21a7a5f73d69c) -- Lock version of kubernetes 83 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/9fee8152ed499d541f1790caff30f03d620f3a22) -- Fix port for ssh proxy 84 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/179c18f84388dfc24a0feb5e7ab64360da0a4f66) -- Add ssh_proxy_port and tag k8s to a known working version 85 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/259cf6426b36e94637d2034561ab1dfd95baaf07) -- [kube1.8] updates to have a working glusterfs in kubernetes 1.8 per #63 86 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/fd626f2c71d484fcd6cf05c5e7b69cc7ab8c1f75) -- [kube1.8] updates to use systemd module for kubectl-proxy service, removes blank file 87 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/9c5a6574c00283ee3de233bd4a8ae236d996b277) -- [minor] updates to latest stable Heketi 88 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/41e4e09606454cd3ce705dbf7d09ac3d25b30155) -- Clean up some missing values 89 | 90 | # Release 0.1.4 91 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/96be541ae41062c1be30391bb99315219c2a7a33) -- [docs] sweeping find & replace of 'minion' with 'node' 92 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/c40c9ea706353b678a35ab4e32005a4d687bd0b2) -- [bugfix][glusterfs] oops missed removing environment after refactoring admin.conf for kube 93 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/c6e58b212caa7b0b7de51b743d9c785f97273934) -- [gluster][workaround] Works around upstream gluster-kubernetes with pinning heketi image tag 94 | 95 | # Release 0.1.3 96 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/7ce47b6ae3738eb6b3992bc7be7c0b1dc4e3c7d1) -- [crio][bugfix] Turns out with Fedora, you sometimes need to expand the root disk, on the VM side 97 | 98 | # Release 0.1.2 99 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/12b6c616b0b10dd17ef933c8eb738e3ef0640738) -- Further cleanup of the inventory 100 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/27e8fad9748ebaecd23f6538f3da4efc666d90da) -- [hotfix][version-pin] update for pinning gluster-kubernetes 101 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/415a48e27cf5240d5bb03769f899dcb0fedaccda) -- Remove VM disk image on teardown 102 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/976b376cbd80ed4290380023eac4ea59fbce982b) -- [hotfix][minor] update spare disk creation to use qemu-img to speed it up 103 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/d86f8357fa465654a06601220f095e0dca341d93) -- [hotfix] Revert minion 3 delete 104 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/5ab66a4dae960c8723cca5f33a307f300dab4c4c) -- [bugfix][glusterfs] Pins version to last known good commit, by Doug's analysis 105 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/6c31e6d2525876031d99fbb32e71f99148da4a51) -- [hotfix][minor] Drop hard coded TLD hostname 106 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/20573ffcb84452bdb4b25c391bdf5fb4a15c303e) -- [major] Dynamically generate and control VM inventory 107 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/2def0f63bce924e27ba34a4d0deaa01881aadfac) -- [hotfix] Revert breaking indent 108 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/b5d12cbbffa7ce54630123e23a13c69ced21b898) -- [gluster] defaults to running attach disk in virt-host-setup 109 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/338f1d28f7dfa7d113babf76057059e949b3afe8) -- [hotfix] Don't become root when it breaks installation 110 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/5558ba75c1840a143fbc0c02d331cb3b420be22e) -- [minor] Clean up some documentation errors 111 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/a54e65619fd883790624d9f07b1d5a482de21bea) -- [crio] updates to later cri-o tag for kube 1.7 112 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/f0ed4a8d30b3505f0e327d3882a4359d18abb50b) -- [minor][refactor] remove dictionary for vm parameters (makes it easier on adding extra vars) 113 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/120d7e3f2798efccdb25c27f17f0f29c1a549c1f) -- [crio] adds example for crio with buildah 114 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/cc1369b9cf9d1420205378cf14a78686ef4383f4) -- [crio][refactor] Addresses #52 for refactor to support Fedora (specifically to address Buildah requirements) 115 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/00092dd59a0fc9a291287665f9e2739ecf838e07) -- [crio][docs] update docs for readme clarity / voice, and clean up inventory 116 | 117 | # Release 0.1.1 118 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/b10f9912a4101fec790216e65c8f5a8ced7d6e18) -- [minor][inventory] current inventory style 119 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/a4380c09246ff6982b24aff1e7614c5440df1de0) -- [minor] adds a handy-dandy get-ip script for getting IPs of VMs 120 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/d3ab8b01ad0c4ef999bc627f04034c6e3d6333b2) -- [crio] updates for crio, trying to get to work with 1.7 but didn't work 121 | 122 | # Release 0.1.0 123 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/ae9f7f4f51fb52377589ec439520a6134243d5bd) -- [glusterdynamic][stub] stubbing in json template for glusterfs topology 124 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/3ed501088e81da551ad68fe8eaa96f1935abd40f) -- [glusterdynamic][increment] has basic steps, but, erroring out at gluster-kubernetes 290 125 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/07521d9cc6fb62ba132ff4bff567a664d952c2a7) -- [hotfix] Adds kubernetes version specification capability 126 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/a716993ea75ab8d31511e12754381063118df247) -- [glusterdynamic][significant] allows for manual running of gk-deploy which finishes successfully (unverified) 127 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/3c01fef4ea322863ce906a37ff1cbf2991f34b85) -- [glusterdynamic] steps for running the gk-deploy and creating storage class 128 | 129 | # Release 0.0.6 130 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/3cc2ea6b25edb2e68917f16c49d5f708597d3f05) -- [ansible23][minor] ignore errors in undefine all vms play 131 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/2828f3ed92700435173e652e456d3b888f3f852b) -- [ansible23] fixes for virt host setup 132 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/ab155a269018dfafa08041c2b76b0b8c115fd21f) -- [ansible23] syntax fixes for deprecation of jinja2 in conditionals 133 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/bd1f113d8fe74f6bfca9aa3ffd24dce7d3bded6a) -- [bugfix][minor] missing refactor for group vars in gluster-recreate-volumes playbook 134 | 135 | # Release 0.0.5 136 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/463605c0254ff79369c4b81826cabbccc42d802d) -- Clean up some Ansible lint issues 137 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/b827da2c3a6ca43e44639e36737361641b3ae0dd) -- Break out firewall service name 138 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/0852798e81caf69add9495bb723c8716fca13862) -- [bridge-networking] adds option to use bridged networking instead of NAT'd 139 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/db901b322c15198641715ada1b83ae30982f5c99) -- [vars] change default to bridged networking 140 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/9c27633212a85010aeefb995f64cf209fafe07fa) -- [minor] proper conditional for get ips for bridge networking 141 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/b6fd6e681c5c7c02c8062cca42ca126c83d45647) -- [bugfix] sets /proc/sys/net/bridge/bridge-nf-call-iptables in tasks 142 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/54fad01761991b02f95495a03fac57d636974239) -- [docs] updates readme to remove 1.5 reference 143 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/9e651231888791dba62512ba976a67b0b2831377) -- [minor] skip network restart when bridge network is not freshly templated 144 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/125b3179bda867443f0825dd1a509ad1dd9eaa29) -- [vars] adds option to skip-preflight-checks when necessary (in this case to bypass kubeadm join bug in kube 1.7.2 dist) 145 | 146 | # Release 0.0.4 147 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/34f72e065f7342cd8cc0d90cc887f13b4a645108) -- [crio][stub] mostly stubs in from existing crio ansible playbook 148 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/b6c0792d821474d2a50366b16587075699b90568) -- [cri-o][significant] has a running cri-o, but, no ancillary tools 149 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/cf3e1373fc9055c46e8e2fcfeed25115a052e2fe) -- [buildah] has a successful buildah install (it doesn't do much for now) 150 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/d8da31e344147ce06c8b7a7dbb951b2974bfb6f5) -- [cri-o][bugfix] fixes cri-o build error (the one requiring a second run), disables buildah for now 151 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/e13802cbea4ddaa4ef128983ac4c9a5236dad639) -- [cri-o] cleanup inventory and vars 152 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/bff2a4e2744b407358bf1c8a3fe8da89e3ae3861) -- [cri-o][docs] updates readme for extra var 153 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/c31b0edfb2e22b7b6174e5aba2e7e4c60e32cf74) -- Allow for easier variable overrides 154 | 155 | # Release 0.0.3 156 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/29a2117fb4cb8f819266b243c2e111f416692b12) -- [glusterfs] has playbook for attaching spare disk to VMs 157 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/a0cb7dab8ec46cd410eafe02e6fa057d18d36d39) -- [glusterfs] has creation of 'bricks' the xfs physical volumes & volume groups 158 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/f8593687f17c8e64305c30d1d1148283795dd157) -- [glusterfs] has peers joining 159 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/6bdcf20dea40de134084e87ee38866d32aa37e01) -- [glusterfs][significant] has a working glusterfs cluster 160 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/5dc97de635ddba4fe98d4d9d5a9a37ad70f2e12d) -- [glusterfs] has templates put on disk on master (but not applied) 161 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/30da8d2bf7ebc4dd1e6bf031397d45b5c11ce5a6) -- [glusterfs] breaks out volume creation from peer probe to create multiple glusterfs volumes 162 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/e4fc92d50aab5f662900e608ea1912ccd105578c) -- [glusterfs][minor] changes reclaim policy to delete from recycle (recycle failed, and delete makes sense for a prototype) 163 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/4d834710b53c2e034f2e1e6dafb65cbdbba8c258) -- [glustefs][minor] adds storage class to try to work around glitch 164 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/81cdb80965771c2edbb92bdbda42b96d613b1676) -- [glusterfs] add playbook just for recreating volumes (otherwise, difficult to delete and start fresh) 165 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/916a74b872bc6ea4c3d09f2ffb3aa4c2aaac6975) -- [glusterfs] has proper method for actually deleting date when recreating volumes 166 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/6da71bd3b6f091c13b743e674b11cf19b1992275) -- [glusterfs][oops] didn't have proper volume names in volumes yaml 167 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/6c287e992a7c37e6a3a7b3fc9bcbf7b46b4fd0c3) -- [glusterfs][oops] I had a debug when in there, needed to flip it back, also had wrong template name 168 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/d3a0a900cba81f4882748ebc73a7b30d52a48dd2) -- Convert yum to package 169 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/0b08998cf2449ec15a99aad8051ee7ec49c8ce47) -- Clean up use of mustaches in when conditionals 170 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/c0de028b17513d68fce9b95950c0ee15d11844f9) -- Ignore everything in inventory (except samples) 171 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/8962459faa68af1a53b351f1e8ecd91e85d5b385) -- Also need to ignore the inventory/ directory... 172 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/98dd9a5842850073b1ee77576c9554aee794b170) -- Revert "Clean up use of mustaches in when conditionals" 173 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/c2b18cb8de1fee5365f674ebcf94c77426274c5e) -- Add ansible.cfg to avoid hostkey checking 174 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/58ae61d2ae97966eb18923c5a685c2518d0959d3) -- [bugfix] fixes #6 for bridge-nf-call-iptables 175 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/c94b6ee7a7939fca1783676bc1fb25633704dbbd) -- [fixbridge][bugfix] wrong spot for changing /proc/sys/net/bridge/bridge-nf-call-iptables 176 | 177 | # Release 0.0.2 178 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/138ddf4e4b54731a44d3de9dc6b009065e1184f0) -- [increment][no-cni] has an install generally working, no e2e test complete with it. DNS pod not coming up, flannel not coming up 179 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/b81f47123ac9a216245b452bac37cd0aa22a9c3a) -- [upgrade16] adds flannel rbac, and has had a successful run getting flannel pods up (and dns pod) but can't curl an nginx pod, pods can't reach wan 180 | * [view commit](http://github.com/redhat-nfvpe/kube-centos-ansible/commit/258fdb38ccd7dbd584ff917bdbff80c362a9ff7f) -- [upgrade16] has a working k8s 1.6.1 beta install, downgraded docker to 1.12.x 181 | --------------------------------------------------------------------------------