├── meta └── main.yml ├── inventories ├── group_vars │ ├── all.yml │ ├── mycluster_worker.yml │ ├── mycluster_master.yml │ ├── mycluster.yml │ └── rke_cluster_rancher.yml ├── host_vars │ ├── localhost.yml │ ├── rancher_mycluster.yml │ └── cluster_rancher.yml └── site ├── roles ├── docker │ ├── tasks │ │ ├── main.yml │ │ └── docker_setup.yml │ ├── defaults │ │ └── main.yml │ └── README.md ├── firewalld │ ├── defaults │ │ └── main.yml │ ├── tasks │ │ ├── main.yml │ │ └── firewall.yml │ └── README.md ├── rancher_keepalived │ ├── tasks │ │ ├── main.yml │ │ └── configure-keepalived.yml │ ├── files │ │ ├── psp:privileged.yml │ │ ├── default:psp:privileged.yml │ │ ├── cm-local-notify.yml │ │ └── cm-cloudscale-notify.yml │ ├── templates │ │ └── ds_failover.yml.j2 │ ├── defaults │ │ └── main.yml │ └── README.md ├── rke_rancher_clusters │ ├── tasks │ │ ├── rke-config.yml │ │ ├── rke-up.yml │ │ ├── dependencies.yml │ │ ├── main.yml │ │ ├── rancher-initial-setup.yml │ │ └── rancher.yml │ ├── defaults │ │ └── main.yml │ ├── README.md │ └── templates │ │ └── cluster.yml.j2 └── custom_k8s_cluster │ ├── tasks │ ├── main.yml │ ├── nodes.yml │ └── cluster.yml │ ├── defaults │ └── main.yml │ ├── templates │ └── cluster.json.j2 │ └── README.md ├── plays ├── site.yml ├── prepare_k8s_nodes.yml ├── deploy_rancher.yml ├── deploy_k8s_cluster.yml └── cleanup_k8snodes.yml ├── .gitignore ├── Pipfile ├── ansible.cfg ├── README.md └── Pipfile.lock /meta/main.yml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /inventories/group_vars/all.yml: -------------------------------------------------------------------------------- 1 | ansible_user: centos -------------------------------------------------------------------------------- /inventories/group_vars/mycluster_worker.yml: -------------------------------------------------------------------------------- 1 | k8s_roles: 2 | - worker -------------------------------------------------------------------------------- /inventories/group_vars/mycluster_master.yml: -------------------------------------------------------------------------------- 1 | k8s_roles: 2 | - controlplane 3 | - etcd -------------------------------------------------------------------------------- /inventories/group_vars/mycluster.yml: -------------------------------------------------------------------------------- 1 | k8s_labels: [] 2 | # - name: env 3 | # value: int 4 | -------------------------------------------------------------------------------- /inventories/group_vars/rke_cluster_rancher.yml: -------------------------------------------------------------------------------- 1 | k8s_roles: 2 | - controlplane 3 | - etcd 4 | - worker -------------------------------------------------------------------------------- /roles/docker/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: basic docker setup 4 | import_tasks: docker_setup.yml 5 | -------------------------------------------------------------------------------- /inventories/host_vars/localhost.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # don't use sudo for localhost 3 | ansible_become: false 4 | become: false 5 | -------------------------------------------------------------------------------- /plays/site.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - import_playbook: prepare_k8s_nodes.yml 3 | - import_playbook: deploy_rancher.yml 4 | - import_playbook: deploy_k8s_cluster.yml 5 | -------------------------------------------------------------------------------- /roles/firewalld/defaults/main.yml: -------------------------------------------------------------------------------- 1 | k8s_roles: 2 | - controlplane 3 | - etcd 4 | - worker 5 | enable_firewalld: true 6 | manage_rancher_related_firewalld_rules: true -------------------------------------------------------------------------------- /plays/prepare_k8s_nodes.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: playbook to install/prepare k8s nodes for Rancher 3 | hosts: rancher_k8s_nodes 4 | roles: 5 | - role: docker 6 | - role: firewalld 7 | -------------------------------------------------------------------------------- /inventories/host_vars/rancher_mycluster.yml: -------------------------------------------------------------------------------- 1 | custom_k8s_cluster_api_key: "" # Create a new token on your Rancher Control Plane 2 | custom_k8s_cluster_rancher_host: "" # URL of your Rancher control Plane -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /.cache 2 | *.retry 3 | .vscode 4 | 5 | # Files downloaded or created during Rancher Plays 6 | plays/helm 7 | plays/kubectl-* 8 | plays/rke-* 9 | plays/linux-amd64/* 10 | plays/custom_k8s_cluster/* 11 | -------------------------------------------------------------------------------- /plays/deploy_rancher.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: playbook to install a k8s cluster with rke and deploy Rancher Control Plane 3 | hosts: rke_rancher_clusters 4 | gather_facts: no 5 | roles: 6 | - role: rke_rancher_clusters -------------------------------------------------------------------------------- /roles/rancher_keepalived/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Configure keepalived 4 | delegate_to: localhost 5 | import_tasks: configure-keepalived.yml 6 | tags: 7 | - keepalived_installation 8 | when: 9 | - keepalived_enabled 10 | -------------------------------------------------------------------------------- /Pipfile: -------------------------------------------------------------------------------- 1 | [[source]] 2 | url = "https://pypi.org/simple" 3 | verify_ssl = true 4 | name = "pypi" 5 | 6 | [packages] 7 | ansible = ">2.9,<4.3" 8 | openshift = "*" 9 | jmespath = "*" 10 | selinux = "*" 11 | 12 | [dev-packages] 13 | pylint = "<2.0.0" 14 | -------------------------------------------------------------------------------- /roles/rke_rancher_clusters/tasks/rke-config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Create RKE Config 4 | template: 5 | src: cluster.yml.j2 6 | dest: "{{ rke_cluster_config }}" 7 | mode: 0666 8 | vars: 9 | cluster: "{{ groups['rke_' + inventory_hostname] }}" 10 | changed_when: false -------------------------------------------------------------------------------- /roles/docker/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for docker_buildnode 3 | docker_required_packages: 4 | - git 5 | - yum-utils 6 | - device-mapper-persistent-data 7 | - lvm2 8 | - pygpgme 9 | 10 | # See https://github.com/rancher/install-docker for available versions. 11 | docker_version: "19.03" 12 | -------------------------------------------------------------------------------- /plays/deploy_k8s_cluster.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Gather facts from cluster nodes 3 | hosts: rancher_k8s_nodes 4 | tasks: 5 | - setup: 6 | 7 | - name: playbook to install a custom k8s cluster with a Rancher Control Plane 8 | hosts: custom_k8s_clusters 9 | gather_facts: no 10 | roles: 11 | - role: custom_k8s_cluster -------------------------------------------------------------------------------- /roles/rancher_keepalived/files/psp:privileged.yml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: psp:privileged 5 | namespace: ipfailover 6 | rules: 7 | - apiGroups: 8 | - extensions 9 | resourceNames: 10 | - unrestricted-psp 11 | resources: 12 | - podsecuritypolicy 13 | verbs: 14 | - use 15 | -------------------------------------------------------------------------------- /roles/rancher_keepalived/files/default:psp:privileged.yml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: default:psp:privileged 5 | namespace: ipfailover 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: Role 9 | name: psp:privileged 10 | subjects: 11 | - kind: ServiceAccount 12 | name: default 13 | namespace: ipfailover 14 | -------------------------------------------------------------------------------- /inventories/host_vars/cluster_rancher.yml: -------------------------------------------------------------------------------- 1 | rancher_failover_ip: [] 2 | # - vip: 192.168.1.1 3 | # router_id: 50 4 | # master: rancher01 5 | rancher_failover_ipv6: [] 6 | 7 | certmanager_enabled: true 8 | rancher_letsencrypt_email: my@email.com # Only needed when certmanager_enabled is true 9 | 10 | rancher_hostname: "" # Make sure this is set 11 | rancher_admin_password: "mys3cret" # local admin user account 12 | -------------------------------------------------------------------------------- /roles/firewalld/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Install firewalld 4 | yum: 5 | name: "firewalld" 6 | state: present 7 | when: enable_firewalld 8 | 9 | - name: Enable and start firewalld 10 | systemd: 11 | state: started 12 | name: firewalld 13 | enabled: yes 14 | when: enable_firewalld 15 | 16 | - name: Configure Firewall 17 | import_tasks: firewall.yml 18 | when: manage_rancher_related_firewalld_rules 19 | -------------------------------------------------------------------------------- /roles/rke_rancher_clusters/tasks/rke-up.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "RUN rke up" 3 | command: "{{ rke_cluster_bin }} up --config {{ rke_cluster_config }}" 4 | register: rke_up_result 5 | changed_when: false 6 | 7 | - name: "rke up Result" 8 | debug: 9 | msg: "{{ rke_up_result.stdout }}" 10 | when: rke_up_result is not skipped 11 | 12 | - name: Check that KubeConfig exists 13 | stat: 14 | path: "{{ rke_cluster_kube_config }}" 15 | register: kubeconfig_result -------------------------------------------------------------------------------- /inventories/site: -------------------------------------------------------------------------------- 1 | ### Special Groups for RKE & Rancher cluster ### 2 | # Dummy Hosts representing a cluster 3 | 4 | [custom_k8s_clusters] 5 | rancher_mycluster # Belongs to Ansible Group mycluster 6 | 7 | [rke_rancher_clusters] 8 | cluster_rancher # Belongs to Ansible Group rke_cluster_rancher 9 | 10 | ### Hosts 11 | 12 | [rancher_k8s_nodes:children] 13 | rke_cluster_rancher 14 | mycluster 15 | 16 | [mycluster:children] 17 | mycluster_master 18 | mycluster_worker 19 | 20 | [mycluster_master] 21 | master01 22 | 23 | [mycluster_worker] 24 | worker01 25 | 26 | [rke_cluster_rancher] 27 | rancher01 28 | rancher02 29 | rancher03 -------------------------------------------------------------------------------- /roles/firewalld/README.md: -------------------------------------------------------------------------------- 1 | ansible-role-firewalld 2 | ================== 3 | 4 | Node preparation for hosts to be used with RKE 5 | 6 | Requirements 7 | ------------ 8 | 9 | Only uses python and ansible. 10 | 11 | 12 | Role Variables 13 | -------------- 14 | 15 | ```yaml 16 | --- 17 | k8s_roles: 18 | - controlplane 19 | - etcd 20 | - worker 21 | enable_firewalld: true 22 | manage_rancher_related_firewalld_rules: true 23 | ``` 24 | 25 | Dependencies 26 | ------------ 27 | 28 | * none 29 | 30 | License 31 | ------- 32 | 33 | GPLv3 34 | 35 | Author Information 36 | ------------------ 37 | 38 | * Sebastian Plattner (plattner@puzzle.ch) 39 | -------------------------------------------------------------------------------- /roles/rke_rancher_clusters/tasks/dependencies.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Download RKE Binary from Github 3 | get_url: 4 | url: "{{ rke_binary_download_url }}" 5 | dest: "./{{ rke_cluster_bin }}" 6 | mode: 0777 7 | changed_when: false 8 | 9 | - name: Download kubectl binary 10 | get_url: 11 | url: "{{ kubectl_binary_download_url }}" 12 | dest: "{{ kubectl_binary }}" 13 | mode: 0777 14 | changed_when: false 15 | 16 | - name: Retrieve helm binary archive 17 | unarchive: 18 | src: "{{ helm_binary_download_url }}" 19 | dest: "./" 20 | remote_src: yes 21 | changed_when: false 22 | check_mode: no 23 | 24 | - name: Move helm binary into place 25 | copy: 26 | src: "linux-amd64/helm" 27 | dest: "{{ helm_binary }}" 28 | mode: 0755 29 | changed_when: false 30 | -------------------------------------------------------------------------------- /roles/custom_k8s_cluster/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Create or get Cluster 4 | delegate_to: localhost 5 | import_tasks: cluster.yml 6 | 7 | - name: Nodes 8 | delegate_to: localhost 9 | import_tasks: nodes.yml 10 | 11 | - name: Configure keepalived 12 | include_role: 13 | name: rancher_keepalived 14 | public: yes 15 | vars: 16 | keepalived_cluster_group_inventory_name: "{{ custom_k8s_cluster_group_inventory_name }}" 17 | keepalived_deployment_on_custom_cluster: true 18 | keepalived_deployment_rancher_api: "{{ custom_k8s_cluster_rancher_api }}" 19 | keepalived_deployment_rancher_api_key: "{{ custom_k8s_cluster_api_key }}" 20 | keepalived_deployment_rancher_api_verify_ssl: "{{ custom_k8s_cluster_verify_ssl }}" 21 | keepalived_deployment_rancher_cluster_id: "{{ cluster.id }}" 22 | -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | forks = 20 3 | 4 | gathering = smart 5 | fact_caching = jsonfile 6 | fact_caching_connection = .cache/facts 7 | fact_caching_timeout = 86400 8 | 9 | retry_files_enabled = False 10 | roles_path = roles 11 | 12 | # Use the YAML callback plugin. 13 | stdout_callback = yaml 14 | # Don't use the stdout_callback when running ad-hoc commands. e.g: ansible -m setup 15 | bin_ansible_callbacks = False 16 | 17 | # don't automatically convert "false" string to bool, use |bool filter if required. 18 | conditional_bare_variables = False 19 | 20 | [privilege_escalation] 21 | become=True 22 | become_method=sudo 23 | become_user=root 24 | become_ask_pass=False 25 | 26 | [ssh_connection] 27 | ssh_args = -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no 28 | pipelining = true 29 | 30 | [inventory] 31 | enable_plugins = ini 32 | -------------------------------------------------------------------------------- /roles/docker/README.md: -------------------------------------------------------------------------------- 1 | ansible-role-docker 2 | ============================= 3 | 4 | This Role is used to deploy docker engine on a CentOS/RHEL System. 5 | 6 | ToDo 7 | 8 | Requirements 9 | ------------ 10 | 11 | To be able to use this role you need the following: 12 | * ansible 13 | 14 | Role Variables 15 | -------------- 16 | 17 | Most of the vars should be quite self exlanatory. 18 | 19 | ```yaml 20 | --- 21 | docker_packages: 22 | - git 23 | - yum-utils 24 | - device-mapper-persistent-data 25 | - lvm2 26 | docker_pkg_repo: "https://download.docker.com/linux/centos/docker-ce.repo" 27 | Dependencies 28 | 29 | docker_package: docker-ce 30 | ------------ 31 | 32 | None 33 | 34 | Example Playbook 35 | ---------------- 36 | 37 | A simple example of how to deploy a docker engine 38 | 39 | ```yaml 40 | - name: docker engine 41 | hosts: docker_nodes 42 | roles: 43 | - role: docker 44 | ``` 45 | 46 | License 47 | ------- 48 | 49 | GPLv3 50 | 51 | Author Information 52 | ------------------ 53 | 54 | * Sebastian Plattner (plattner@puzzle.ch) 55 | -------------------------------------------------------------------------------- /roles/rancher_keepalived/files/cm-local-notify.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | notify.sh: |- 4 | #!/bin/bash 5 | 6 | # for ANY state transition. 7 | # "notify" script is called AFTER the 8 | # notify_* script(s) and is executed 9 | # with 3 arguments provided by keepalived 10 | # (ie don't include parameters in the notify line). 11 | # arguments 12 | # $1 = "GROUP"|"INSTANCE" 13 | # $2 = name of group or instance 14 | # $3 = target state of transition 15 | # ("MASTER"|"BACKUP"|"FAULT") 16 | 17 | TYPE=$1 18 | NAME=$2 19 | STATE=$3 20 | 21 | case $STATE in 22 | "MASTER") echo "I'm the MASTER! Whup whup." > /proc/1/fd/1 23 | exit 0 24 | ;; 25 | "BACKUP") echo "Ok, i'm just a backup, great." > /proc/1/fd/1 26 | exit 0 27 | ;; 28 | "FAULT") echo "Fault, what ?" > /proc/1/fd/1 29 | exit 0 30 | ;; 31 | *) echo "Unknown state" > /proc/1/fd/1 32 | exit 1 33 | ;; 34 | esac 35 | kind: ConfigMap 36 | metadata: 37 | name: keepalived-failover 38 | namespace: ipfailover 39 | -------------------------------------------------------------------------------- /plays/cleanup_k8snodes.yml: -------------------------------------------------------------------------------- 1 | - name: Cleanup Nodes 2 | hosts: all 3 | tasks: 4 | - name: Stop Docker Container 5 | shell: 6 | cmd: "docker rm -f $(docker ps -qa)" 7 | ignore_errors: True 8 | - name: Unmount kubelet mounts 9 | shell: 10 | cmd: "for mount in $(mount | grep tmpfs | grep '/var/lib/kubelet' | awk '{ print $3 }') /var/lib/kubelet /var/lib/rancher; do umount $mount; done" 11 | ignore_errors: True 12 | - name: Remove Docker Images 13 | shell: 14 | cmd: "docker rmi -f $(docker images -q)" 15 | ignore_errors: True 16 | - name: Remove Docker Volumes 17 | shell: 18 | cmd: "docker volume rm $(docker volume ls -q)" 19 | ignore_errors: True 20 | - name: Remove Directories and Files 21 | file: 22 | path: "{{ item }}" 23 | state: absent 24 | with_items: 25 | - /etc/ceph 26 | - /etc/cni 27 | - /etc/kubernetes 28 | - /opt/cni 29 | - /opt/rke 30 | - /run/secrets/kubernetes.io 31 | - /run/calico 32 | - /run/flannel 33 | - /var/lib/calico 34 | - /var/lib/etcd 35 | - /var/lib/cni 36 | - /var/lib/kubelet 37 | - /var/lib/rancher/rke/log 38 | - /var/log/containers 39 | - /var/log/pods 40 | - /var/log/pods 41 | - /var/run/calico 42 | -------------------------------------------------------------------------------- /roles/rke_rancher_clusters/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Download binary dependencies 4 | delegate_to: localhost 5 | import_tasks: dependencies.yml 6 | 7 | - name: RKE Config 8 | delegate_to: localhost 9 | import_tasks: rke-config.yml 10 | 11 | - name: RKE Up 12 | delegate_to: localhost 13 | import_tasks: rke-up.yml 14 | 15 | - name: Configure keepalived 16 | include_role: 17 | name: rancher_keepalived 18 | public: yes 19 | vars: 20 | kubeconfigfile: "{{ rke_cluster_kube_config }}" 21 | keepalived_cluster_group_inventory_name: "{{ rke_cluster_group_inventory_name }}" 22 | 23 | - name: Get Rancher Install Status for first time setup check 24 | delegate_to: localhost 25 | uri: 26 | url: "https://{{ rancher_hostname }}/ping" 27 | validate_certs: no 28 | return_content: true 29 | status_code: 30 | - -1 # For cases where nginx ingress controller is not yet ready 31 | - 200 # Rancher should be installed 32 | - 404 # Rancher is not yet installed 33 | register: rancher_installed 34 | check_mode: no 35 | changed_when: false 36 | 37 | - name: Set Rancher install status 38 | set_fact: 39 | rancher_installed: "{{ rancher_installed.content.find('pong') != -1 }}" 40 | 41 | - name: Install/update Rancher 42 | delegate_to: localhost 43 | import_tasks: rancher.yml 44 | when: 45 | - kubeconfig_result.stat.exists 46 | 47 | - name: Rancher initial setup (only on first time setup) 48 | delegate_to: localhost 49 | import_tasks: rancher-initial-setup.yml 50 | when: 51 | - not rancher_installed 52 | -------------------------------------------------------------------------------- /roles/firewalld/tasks/firewall.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # https://rancher.com/docs/rancher/v2.x/en/installation/requirements/ 4 | # https://rancher.com/docs/rancher/v2.x/en/installation/options/firewall/ 5 | 6 | 7 | - name: Open firewall for Nodes with role etcd 8 | firewalld: 9 | port: "{{ item }}" 10 | permanent: true 11 | immediate: true 12 | state: enabled 13 | with_items: 14 | - 2376/tcp 15 | - 2379/tcp 16 | - 2380/tcp 17 | - 8472/udp 18 | - 9099/tcp 19 | - 10250/tcp 20 | when: 21 | - "'etcd' in k8s_roles" 22 | 23 | - name: Open firewall for Nodes with role control_plane 24 | firewalld: 25 | port: "{{ item }}" 26 | permanent: true 27 | immediate: true 28 | state: enabled 29 | with_items: 30 | - 80/tcp 31 | - 443/tcp 32 | - 2376/tcp 33 | - 6443/tcp 34 | - 8472/udp 35 | - 9099/tcp 36 | - 10250/tcp 37 | - 10254/tcp 38 | - 30000-32767/tcp 39 | - 30000-32767/udp 40 | when: 41 | - "'controlplane' in k8s_roles" 42 | 43 | - name: Open firewall for Nodes with role worker 44 | firewalld: 45 | port: "{{ item }}" 46 | permanent: true 47 | immediate: true 48 | state: enabled 49 | with_items: 50 | - 22/tcp 51 | - 80/tcp 52 | - 443/tcp 53 | - 2376/tcp 54 | - 8472/udp 55 | - 9099/tcp 56 | - 10250/tcp 57 | - 10254/tcp 58 | - 30000-32767/tcp 59 | - 30000-32767/udp 60 | when: 61 | - "'worker' in k8s_roles" 62 | 63 | - name: Open vrrp for keepalived 64 | firewalld: 65 | rich_rule: rule protocol value="vrrp" accept 66 | permanent: yes 67 | immediate: true 68 | state: enabled 69 | when: 70 | - "'worker' in k8s_roles" 71 | -------------------------------------------------------------------------------- /roles/custom_k8s_cluster/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Rancher API Key 3 | custom_k8s_cluster_api_key: "" 4 | # Rancher API Host 5 | custom_k8s_cluster_rancher_host: "" 6 | custom_k8s_cluster_rancher_api: "https://{{ custom_k8s_cluster_rancher_host }}/v3" 7 | custom_k8s_cluster_verify_ssl: yes 8 | custom_k8s_cluster_self_signed_certificate: false 9 | custom_k8s_cluster_ca_checksum_param: "{{ '--ca-checksum' if custom_k8s_cluster_self_signed_certificate else '' }}" 10 | 11 | # Cluster Name, defaults to the inventory Name [custom_k8s_clusters] Ansible Group without the 'rancher_' Prefix 12 | # As Cluster in Rancher cannot use _ in Names, we need to replace it with - 13 | custom_k8s_cluster_name: "{{ inventory_hostname | regex_replace('rancher_') | regex_replace('_','-') }}" 14 | custom_k8s_cluster_group_inventory_name: "{{ inventory_hostname | regex_replace('rancher_') }}" 15 | 16 | # Enable Pod Security Policy 17 | custom_k8s_cluster_enable_psp: true 18 | # Default Pod Security Policy 19 | custom_k8s_clusters_default_psp: "restricted" 20 | # Enable Network Segregation between Projects 21 | custom_k8s_cluster_enable_network_policy: true 22 | # Kubernetes Version 23 | custom_k8s_cluster_kubernetes_version: "v1.18.12-rancher1-1" 24 | # Ingress Provider (none, nginx) 25 | custom_k8s_clusters_ingress_provider: "none" 26 | 27 | # Rancher Agent to be used 28 | custom_k8s_cluster_agent_version: "v2.4.11" 29 | 30 | # Base command for the Ranger Agent 31 | custom_k8s_cluster_docker_commmand_base: "docker run -d --privileged --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run rancher/rancher-agent:{{ custom_k8s_cluster_agent_version}} --server https://{{ custom_k8s_cluster_rancher_host }}" 32 | 33 | # Internal Interface 34 | # See https://rancher.com/docs/rke/latest/en/config-options/nodes/#internal-address 35 | # & https://rancher.com/docs/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/agent-options/#ip-address-options 36 | custom_k8s_cluster_ingress_node_internal_iface: eth0 37 | 38 | custom_k8s_cluster_use_fqdn_nodename: true 39 | -------------------------------------------------------------------------------- /roles/custom_k8s_cluster/tasks/nodes.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Get current nodes from Rancher Control Plane 3 | uri: 4 | url: "{{ custom_k8s_cluster_rancher_api }}/clusters/{{ cluster.id }}/nodes" 5 | validate_certs: "{{ custom_k8s_cluster_verify_ssl }}" 6 | return_content: yes 7 | headers: 8 | Authorization: "Bearer {{ custom_k8s_cluster_api_key }}" 9 | register: cluster_nodes 10 | check_mode: no 11 | 12 | - name: Show already added nodes 13 | debug: 14 | msg: "{{ cluster_nodes | json_query(\"json.data[*].hostname\") }}" 15 | 16 | - name: Show nodes which are going to be added 17 | debug: 18 | msg: "{{ hostvars[item]['ansible_facts']['hostname'] }}{{ '.' if hostvars[item]['ansible_facts']['domain'] else '' }}{{ hostvars[item]['ansible_facts']['domain'] }}" 19 | when: 20 | - (cluster_nodes | json_query("json.data[?hostname == '" + hostvars[item]['ansible_facts']['hostname'] + ("." + hostvars[item]['ansible_facts']['domain'] if hostvars[item]['ansible_facts']['domain'] and custom_k8s_cluster_use_fqdn_nodename else "") +"']") | length) == 0 21 | with_items: 22 | - "{{ groups[custom_k8s_cluster_group_inventory_name] }}" 23 | 24 | - name: Add Nodes when not already added 25 | delegate_to: "{{ item }}" 26 | command: "{{ custom_k8s_cluster_docker_commmand_base }} {{ custom_k8s_cluster_ca_checksum_param }} {{ clustercachecksum | default('') }} --token {{ clusterregistrationtoken }} --internal-address {{ custom_k8s_cluster_ingress_node_internal_iface }} --node-name {{ hostvars[item]['ansible_facts']['hostname'] }}{{ '.' + hostvars[item]['ansible_facts']['domain'] if hostvars[item]['ansible_facts']['domain'] and custom_k8s_cluster_use_fqdn_nodename else '' }}{% for label in hostvars[item]['k8s_labels'] %} --label {{ label.name }}={{ label.value }}{% endfor %}{% for role in hostvars[item]['k8s_roles'] %} --{{ role }}{% endfor %}" 27 | when: 28 | - (cluster_nodes | json_query("json.data[?hostname == '" + hostvars[item]['ansible_facts']['hostname'] + ("." + hostvars[item]['ansible_facts']['domain'] if hostvars[item]['ansible_facts']['domain'] and custom_k8s_cluster_use_fqdn_nodename else "") +"']") | length) == 0 29 | with_items: 30 | - "{{ groups[custom_k8s_cluster_group_inventory_name] }}" 31 | ignore_errors: yes 32 | -------------------------------------------------------------------------------- /roles/custom_k8s_cluster/templates/cluster.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "dockerRootDir": "/var/lib/docker", 3 | "enableClusterAlerting": false, 4 | "enableClusterMonitoring": false, 5 | "enableNetworkPolicy": {{ custom_k8s_cluster_enable_network_policy | lower }}, 6 | "type": "cluster", 7 | "name": "{{ custom_k8s_cluster_name }}", 8 | "rancherKubernetesEngineConfig": { 9 | "addonJobTimeout": 30, 10 | "ignoreDockerVersion": true, 11 | "sshAgentAuth": false, 12 | "type": "rancherKubernetesEngineConfig", 13 | "kubernetesVersion": "{{ custom_k8s_cluster_kubernetes_version }}", 14 | "authentication": { 15 | "strategy": "x509", 16 | "type": "authnConfig" 17 | }, 18 | "network": { 19 | "plugin": "canal", 20 | "type": "networkConfig", 21 | "options": { 22 | "flannel_backend_type": "vxlan" 23 | } 24 | }, 25 | "ingress": { 26 | "provider": "{{ custom_k8s_clusters_ingress_provider }}", 27 | "type": "ingressConfig" 28 | }, 29 | "monitoring": { 30 | "provider": "metrics-server", 31 | "type": "monitoringConfig" 32 | }, 33 | "services": { 34 | "type": "rkeConfigServices", 35 | "kubeApi": { 36 | "extra_args": { 37 | "feature-gates": "MountPropagation=true" 38 | }, 39 | "alwaysPullImages": false, 40 | "podSecurityPolicy": true, 41 | "serviceNodePortRange": "30000-32767", 42 | "type": "kubeAPIService" 43 | }, 44 | "kubelet": { 45 | "extra_args": { 46 | "feature-gates": "MountPropagation=true" 47 | } 48 | }, 49 | "etcd": { 50 | "creation": "12h", 51 | "extraArgs": { 52 | "heartbeat-interval": 500, 53 | "election-timeout": 5000 54 | }, 55 | "retention": "72h", 56 | "snapshot": false, 57 | "type": "etcdService", 58 | "backupConfig": { 59 | "enabled": true, 60 | "intervalHours": 6, 61 | "retention": 12, 62 | "type": "backupConfig" 63 | } 64 | } 65 | } 66 | }, 67 | "localClusterAuthEndpoint": { 68 | "enabled": true, 69 | "type": "localClusterAuthEndpoint" 70 | }, 71 | "defaultPodSecurityPolicyTemplateId": "{{ custom_k8s_clusters_default_psp | lower }}" 72 | } -------------------------------------------------------------------------------- /roles/rke_rancher_clusters/tasks/rancher-initial-setup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Get Login Token 4 | uri: 5 | url: "https://{{ rancher_hostname }}/v3-public/localProviders/local?action=login" 6 | return_content: yes 7 | body: "{ 'username': 'admin', 'password': '{{ rancher_admin_initial_password }}'}" 8 | body_format: json 9 | method: POST 10 | status_code: 201 11 | validate_certs: no 12 | register: json_response 13 | 14 | - name: Set Rancher Login Token 15 | set_fact: 16 | rancher_login_token: "{{ json_response.json.token }}" 17 | 18 | - name: Change admin Password 19 | uri: 20 | url: "https://{{ rancher_hostname }}/v3/users?action=changepassword" 21 | return_content: yes 22 | body: 23 | currentPassword: "{{ rancher_admin_initial_password }}" 24 | newPassword: "{{ rancher_admin_password }}" 25 | body_format: json 26 | method: POST 27 | status_code: 28 | - 200 29 | - 201 30 | - 422 31 | validate_certs: no 32 | headers: 33 | Authorization: "Bearer {{ rancher_login_token }}" 34 | 35 | - name: Create API Key 36 | uri: 37 | url: "https://{{ rancher_hostname }}/v3/token" 38 | return_content: yes 39 | body: 40 | description: ansible_automation 41 | body_format: json 42 | method: POST 43 | status_code: 44 | - 201 45 | validate_certs: no 46 | headers: 47 | Authorization: "Bearer {{ rancher_login_token }}" 48 | register: json_response 49 | 50 | - name: Set Rancher API 51 | set_fact: 52 | rancher_api_token: "{{ json_response.json.token }}" 53 | 54 | - name: Add RANCHER_TOKEN_HOLDER dummy host 55 | add_host: 56 | name: "RANCHER_TOKEN_HOLDER" 57 | api_token: "{{ rancher_api_token }}" 58 | 59 | - name: Change server-url 60 | uri: 61 | url: "https://{{ rancher_hostname }}/v3/settings/server-url" 62 | return_content: yes 63 | body: 64 | value: "https://{{ rancher_hostname }}" 65 | body_format: json 66 | method: PUT 67 | status_code: 68 | - 200 69 | validate_certs: no 70 | headers: 71 | Authorization: "Bearer {{ rancher_api_token }}" 72 | 73 | - name: Telemetry opt-in/out 74 | uri: 75 | url: "https://{{ rancher_hostname }}/v3/settings/telemetry-opt" 76 | return_content: yes 77 | body: 78 | value: "{{ rancher_telemetry }}" 79 | body_format: json 80 | method: PUT 81 | status_code: 82 | - 200 83 | validate_certs: no 84 | headers: 85 | Authorization: "Bearer {{ rancher_api_token }}" -------------------------------------------------------------------------------- /roles/custom_k8s_cluster/README.md: -------------------------------------------------------------------------------- 1 | custom_k8s_cluster 2 | ================== 3 | 4 | Create a new custom K8S cluster on the Rancher host and add master and worker nodes 5 | 6 | Requirements 7 | ------------ 8 | 9 | Only uses python and ansible. 10 | 11 | 12 | Role Variables 13 | -------------- 14 | 15 | ```yaml 16 | --- 17 | # Rancher API Key 18 | custom_k8s_cluster_api_key: "" 19 | # Rancher API Host 20 | custom_k8s_cluster_rancher_host: "" 21 | custom_k8s_cluster_rancher_api: "https://{{ custom_k8s_cluster_rancher_host }}/v3" 22 | custom_k8s_cluster_verify_ssl: yes 23 | custom_k8s_cluster_self_signed_certificate: false 24 | custom_k8s_cluster_ca_checksum_param: "{{ '--ca-checksum' if custom_k8s_cluster_self_signed_certificate else '' }}" 25 | 26 | # Cluster Name, defaults to the inventory Name [custom_k8s_clusters] Ansible Group without the 'rancher_' Prefix 27 | # As Cluster in Rancher cannot use _ in Names, we need to replace it with - 28 | custom_k8s_cluster_name: "{{ inventory_hostname | regex_replace('rancher_') | regex_replace('_','-') }}" 29 | custom_k8s_cluster_group_inventory_name: "{{ inventory_hostname | regex_replace('rancher_') }}" 30 | 31 | # Enable Pod Security Policy 32 | custom_k8s_cluster_enable_psp: true 33 | # Default Pod Security Policy 34 | custom_k8s_clusters_default_psp: "restricted" 35 | # Enable Network Segregation between Projects 36 | custom_k8s_cluster_enable_network_policy: true 37 | # Kubernetes Version 38 | custom_k8s_cluster_kubernetes_version: "v1.19.4-rancher1-1" 39 | # Ingress Provider (none, nginx) 40 | custom_k8s_clusters_ingress_provider: "none" 41 | 42 | # Rancher Agent to be used 43 | custom_k8s_cluster_agent_version: "v2.4.11" 44 | 45 | # Base command for the Ranger Agent 46 | custom_k8s_cluster_docker_commmand_base: "docker run -d --privileged --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run rancher/rancher-agent:{{ custom_k8s_cluster_agent_version}} --server https://{{ custom_k8s_cluster_rancher_host }}" 47 | 48 | # Internal Interface 49 | # See https://rancher.com/docs/rke/latest/en/config-options/nodes/#internal-address 50 | # & https://rancher.com/docs/rancher/v2.x/en/cluster-provisioning/rke-clusters/custom-nodes/agent-options/#ip-address-options 51 | custom_k8s_cluster_ingress_node_internal_iface: eth0 52 | 53 | custom_k8s_cluster_use_fqdn_nodename: true 54 | ``` 55 | 56 | Dependencies 57 | ------------ 58 | 59 | * none 60 | 61 | License 62 | ------- 63 | 64 | GPLv3 65 | 66 | Author Information 67 | ------------------ 68 | 69 | * Sebastian Plattner (plattner@puzzle.ch) 70 | -------------------------------------------------------------------------------- /roles/docker/tasks/docker_setup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Basic Setup for Docker Buildnodes (CentOS) 4 | - name: install required packages 5 | yum: 6 | name: "{{ docker_required_packages }}" 7 | state: present 8 | 9 | - name: gather the package facts 10 | package_facts: 11 | manager: auto 12 | 13 | - name: check if docker is installed and if so, get the docker version 14 | shell: "docker -v | cut -d ' ' -f 3 | cut -d ',' -f 1" 15 | changed_when: false 16 | when: 17 | - "'docker-ce' in ansible_facts.packages" 18 | register: installed_docker_version 19 | 20 | - name: create temporary installation script directory 21 | changed_when: false 22 | tempfile: 23 | state: directory 24 | suffix: _docker_installation 25 | register: docker_installation_dir 26 | 27 | - name: install Docker service 28 | block: 29 | - name: set docker installation script fact 30 | set_fact: 31 | docker_installation_script: "{{ docker_version }}.sh" 32 | 33 | - name: set docker installation script path fact 34 | set_fact: 35 | docker_installation_script_full_path: "{{ docker_installation_dir.path }}/{{ docker_installation_script }}" 36 | 37 | - name: get Rancher's Docker installation script 38 | get_url: 39 | url: "https://releases.rancher.com/install-docker/{{ docker_installation_script }}" 40 | dest: "{{ docker_installation_script_full_path }}" 41 | mode: '0755' 42 | 43 | - name: execute the Docker installation script 44 | shell: "bash {{ docker_installation_script_full_path }}" 45 | 46 | - name: remove the Docker installation script 47 | file: 48 | path: "{{ docker_installation_script_full_path }}" 49 | state: absent 50 | when: (installed_docker_version is skipped or not docker_version in installed_docker_version.stdout) 51 | 52 | - name: cleanup docker_installation_dir 53 | changed_when: false 54 | file: 55 | path: "{{ docker_installation_dir.path }}" 56 | state: absent 57 | when: docker_installation_dir is defined 58 | 59 | - name: Enable IP Forwarding 60 | sysctl: 61 | name: net.ipv4.ip_forward 62 | value: '1' 63 | sysctl_set: yes 64 | state: present 65 | reload: yes 66 | 67 | - name: enable and start docker 68 | systemd: 69 | name: docker 70 | daemon-reload: yes 71 | enabled: yes 72 | state: started 73 | 74 | - name: setup a cronjob for docker cleanup 75 | changed_when: false 76 | cron: 77 | name: docker cleanup 78 | minute: "00" 79 | hour: "21" 80 | user: root 81 | job: 'docker system prune -f --filter "until=2h" -a >/dev/null 2>&1' 82 | cron_file: docker_cleanup 83 | 84 | - name: setup a cronjob for docker volumes cleanup 85 | changed_when: false 86 | cron: 87 | name: docker volumes cleanup 88 | minute: "15" 89 | hour: "21" 90 | user: root 91 | job: 'for vol in $(docker volume ls -q -f dangling=true); do docker volume rm $vol >/dev/null 2>&1; done' 92 | cron_file: docker_cleanup 93 | 94 | - name: adding existing user '{{ ansible_user }}' to group docker 95 | user: 96 | name: '{{ ansible_user }}' 97 | groups: 98 | - docker 99 | append: yes 100 | -------------------------------------------------------------------------------- /roles/rke_rancher_clusters/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ### Helm Settings 3 | helm_binary_version: v3.4.2 4 | helm_binary_download_url: "https://get.helm.sh/helm-{{ helm_binary_version }}-linux-amd64.tar.gz" 5 | helm_binary: "./helm" 6 | helm_home: ".helm" 7 | helm_rancher_repo: rancher-stable 8 | helm_rancher_repo_url: "https://releases.rancher.com/server-charts/stable" 9 | helm_rancher_version: v2.4.11 10 | # You only need to configure these "helm_certmanager_*" values if you enable 11 | # Let's Encrypt support in the "Rancher Cluster Settings" section! 12 | helm_certmanager_version: 1.1.0 13 | helm_certmanager_jetstack_repo: jetstack 14 | helm_certmanager_jetstack_repo_url: https://charts.jetstack.io 15 | 16 | ### Kubectl Settings 17 | kubectl_binary_version: v1.20.1 18 | kubectl_binary_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kubectl_binary_version }}/bin/linux/amd64/kubectl" 19 | kubectl_binary: "./kubectl-{{ kubectl_binary_version }}" 20 | 21 | ### RKE Cluster Settings 22 | rke_binary_version: v1.2.3 23 | rke_binary_download_url: "https://github.com/rancher/rke/releases/download/{{ rke_binary_version }}/rke_linux-amd64" 24 | rke_cluster_bin: "./rke-{{ rke_binary_version }}" 25 | rke_cluster_kube_config: "kube_config_config-{{ inventory_hostname }}.yml" 26 | rke_cluster_config: "config-{{ inventory_hostname }}.yml" 27 | rke_kubernetes_version: v1.19.4-rancher1-1 28 | rke_ssh_user: centos # due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565 29 | rke_ssh_agent_auth: true 30 | rke_network_interface: eth0 31 | rke_cluster_group_inventory_name: "{{ groups['rke_' + inventory_hostname] }}" 32 | 33 | ### Rancher Cluster Settings 34 | # General Rancher Settings 35 | rancher_hostname: "" 36 | rancher_admin_password: "" 37 | rancher_admin_initial_password: admin 38 | rancher_telemetry: out 39 | ## Rancher SSL Configuration 40 | # See https://rancher.com/docs/rancher/v2.x/en/installation/k8s-install/helm-rancher/#4-choose-your-ssl-configuration 41 | # for more information! You have to chose one (and exactly one!) method - disable the others! 42 | # 43 | # If you chose Method 1 or 2, ensure to also enable Cert-Manager (otherwide disable it!): 44 | rancher_certmanager_enabled: false 45 | # 46 | # Method 1: "Rancher-generated TLS certificate" (fully handled by Rancher via Cert-Manager) 47 | # 48 | rancher_generated_self_signed_certificates: false 49 | # 50 | # Method 2: "Lets Encrypt Configuration" (fully handled by Rancher via Cert-Manager) 51 | # 52 | rancher_lets_encrypt_generated_certificated: false 53 | rancher_letsencrypt_email: me@example.com 54 | rancher_certmanager_crd_url: "https://github.com/jetstack/cert-manager/releases/download/v{{ helm_certmanager_version }}/cert-manager.crds.yaml" 55 | # 56 | # Method 3: "Bring your own certificate" (officially signed or self-signed configuration) 57 | # 58 | rancher_bring_your_own_certificates: true 59 | rancher_tls_crt: "" 60 | rancher_tls_key: "" 61 | rancher_tls_cacerts: "" 62 | # rancher_tls_self_signed needs to be true if the Rancher TLS cert is self-signed and not generated by Rancher itself, then also the 63 | # full CA chain needs to be provided via rancher_tls_cacerts. 64 | # See https://rancher.com/blog/2020/transport-layer-security-p2 for more information 65 | rancher_tls_self_signed: false -------------------------------------------------------------------------------- /roles/rancher_keepalived/templates/ds_failover.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: ipfailover-{{ item.vip | replace(":","-")}} 5 | namespace: {{ keepalived_ns }} 6 | labels: 7 | failover: failover-{{ item.vip | replace(":","-") }} 8 | spec: 9 | updateStrategy: 10 | rollingUpdate: 11 | maxUnavailable: 1 12 | type: RollingUpdate 13 | selector: 14 | matchLabels: 15 | failover: failover-{{ item.vip | replace(":","-") }} 16 | template: 17 | metadata: 18 | labels: 19 | failover: failover-{{ item.vip | replace(":","-") }} 20 | spec: 21 | {% if node_selector != "" -%} 22 | affinity: 23 | nodeAffinity: 24 | requiredDuringSchedulingIgnoredDuringExecution: 25 | nodeSelectorTerms: 26 | - matchExpressions: 27 | - key: {{ node_selector }} 28 | operator: In 29 | values: 30 | - "true" 31 | {% endif -%} 32 | {% if node_toleration != "" -%} 33 | tolerations: 34 | - key: {{ node_toleration }} 35 | operator: "Equal" 36 | value: "true" 37 | effect: "NoSchedule" 38 | {% endif -%} 39 | hostNetwork: true 40 | containers: 41 | - env: 42 | {% if with_cloud_notify -%} 43 | - name: API_TOKEN 44 | value: {{ keepalived_cloudscale_api_token }} 45 | - name: SERVER_IDS 46 | value: '#PYTHON2BASH:[{{ server_ids }}]' 47 | - name: KEEPALIVED_NOTIFY 48 | value: /tmp/failoverscripts/notify.sh 49 | {% endif -%} 50 | - name: KEEPALIVED_ROUTER_ID 51 | value: "{{ item.router_id }}" 52 | - name: KEEPALIVED_INTERFACE 53 | value: "{{ interface }}" 54 | - name: KEEPALIVED_TRACK_INTERFACE 55 | value: "{{ track_interface }}" 56 | - name: KEEPALIVED_UNICAST_PEERS 57 | value: '#PYTHON2BASH:[{{ unicast_peers }}]' 58 | - name: KEEPALIVED_VIRTUAL_IPS 59 | value: '#PYTHON2BASH:[''{{ item.vip }}'']' 60 | - name: MASTER_HOST 61 | value: {{ item.master }} 62 | - name: KEEPALIVED_PASSWORD 63 | value: {{ item.password }} 64 | image: {{ keepalived_image }} 65 | imagePullPolicy: Always 66 | {% if with_cloud_notify -%} 67 | livenessProbe: 68 | exec: 69 | command: 70 | - /tmp/failoverscripts/check.py 71 | failureThreshold: 2 72 | initialDelaySeconds: 10 73 | periodSeconds: 60 74 | successThreshold: 1 75 | timeoutSeconds: 2 76 | {% endif -%} 77 | name: ipfailover 78 | {% if with_cloud_notify -%} 79 | volumeMounts: 80 | - mountPath: /tmp/failoverscripts/ 81 | name: config 82 | {% endif -%} 83 | securityContext: 84 | allowPrivilegeEscalation: false 85 | capabilities: 86 | add: 87 | - NET_ADMIN 88 | privileged: false 89 | procMount: Default 90 | readOnlyRootFilesystem: false 91 | runAsNonRoot: false 92 | terminationMessagePath: /dev/termination-log 93 | terminationMessagePolicy: File 94 | {% if with_cloud_notify -%} 95 | volumes: 96 | - configMap: 97 | defaultMode: 511 98 | name: keepalived-failover 99 | optional: false 100 | name: config 101 | {% endif -%} -------------------------------------------------------------------------------- /roles/rke_rancher_clusters/README.md: -------------------------------------------------------------------------------- 1 | ansible-role-rke_rancher_clusters 2 | ================== 3 | 4 | Create a new Kubernetes Cluster with RKE and deploys Rancher on it 5 | 6 | Requirements 7 | ------------ 8 | 9 | Only uses python and ansible. 10 | 11 | 12 | Role Variables 13 | -------------- 14 | 15 | ```yaml 16 | --- 17 | ### Helm Settings 18 | helm_binary_version: v3.4.2 19 | helm_binary_download_url: "https://get.helm.sh/helm-{{ helm_binary_version }}-linux-amd64.tar.gz" 20 | helm_binary: "./helm" 21 | helm_home: ".helm" 22 | helm_rancher_repo: rancher-stable 23 | helm_rancher_repo_url: "https://releases.rancher.com/server-charts/stable" 24 | helm_rancher_version: v2.4.11 25 | # You only need to configure these "helm_certmanager_*" values if you enable 26 | # Let's Encrypt support in the "Rancher Cluster Settings" section! 27 | helm_certmanager_version: 1.1.0 28 | helm_certmanager_jetstack_repo: jetstack 29 | helm_certmanager_jetstack_repo_url: https://charts.jetstack.io 30 | 31 | ### Kubectl Settings 32 | kubectl_binary_version: v1.20.1 33 | kubectl_binary_download_url: "https://storage.googleapis.com/kubernetes-release/release/{{ kubectl_binary_version }}/bin/linux/amd64/kubectl" 34 | kubectl_binary: "./kubectl-{{ kubectl_binary_version }}" 35 | 36 | ### RKE Cluster Settings 37 | rke_binary_version: v1.2.3 38 | rke_binary_download_url: "https://github.com/rancher/rke/releases/download/{{ rke_binary_version }}/rke_linux-amd64" 39 | rke_cluster_bin: "./rke-{{ rke_binary_version }}" 40 | rke_cluster_kube_config: "kube_config_config-{{ inventory_hostname }}.yml" 41 | rke_cluster_config: "config-{{ inventory_hostname }}.yml" 42 | rke_kubernetes_version: v1.19.4-rancher1-1 43 | rke_ssh_user: centos # due to https://bugzilla.redhat.com/show_bug.cgi?id=1527565 44 | rke_ssh_agent_auth: true 45 | rke_network_interface: eth0 46 | rke_cluster_group_inventory_name: "{{ groups['rke_' + inventory_hostname] }}" 47 | 48 | ### Rancher Cluster Settings 49 | # General Rancher Settings 50 | rancher_hostname: "" 51 | rancher_admin_password: "" 52 | rancher_admin_initial_password: admin 53 | rancher_telemetry: out 54 | ## Rancher SSL Configuration 55 | # See https://rancher.com/docs/rancher/v2.x/en/installation/k8s-install/helm-rancher/#4-choose-your-ssl-configuration 56 | # for more information! You have to chose one (and exactly one!) method - disable the others! 57 | # 58 | # If you chose Method 1 or 2, ensure to also enable Cert-Manager (otherwide disable it!): 59 | rancher_certmanager_enabled: false 60 | # 61 | # Method 1: "Rancher-generated TLS certificate" (fully handled by Rancher via Cert-Manager) 62 | # 63 | rancher_generated_self_signed_certificates: false 64 | # 65 | # Method 2: "Lets Encrypt Configuration" (fully handled by Rancher via Cert-Manager) 66 | # 67 | rancher_lets_encrypt_generated_certificated: false 68 | rancher_letsencrypt_email: me@example.com 69 | rancher_certmanager_crd_url: "https://github.com/jetstack/cert-manager/releases/download/v{{ helm_certmanager_version }}/cert-manager.crds.yaml" 70 | # 71 | # Method 3: "Bring your own certificate" (officially signed or self-signed configuration) 72 | # 73 | rancher_bring_your_own_certificates: true 74 | rancher_tls_crt: "" 75 | rancher_tls_key: "" 76 | rancher_tls_cacerts: "" 77 | # rancher_tls_self_signed needs to be true if the Rancher TLS cert is self-signed and not generated by Rancher itself, then also the 78 | # full CA chain needs to be provided via rancher_tls_cacerts. 79 | # See https://rancher.com/blog/2020/transport-layer-security-p2 for more information 80 | rancher_tls_self_signed: false 81 | ``` 82 | 83 | Dependencies 84 | ------------ 85 | 86 | * none 87 | 88 | License 89 | ------- 90 | 91 | GPLv3 92 | 93 | Author Information 94 | ------------------ 95 | 96 | * Sebastian Plattner (plattner@puzzle.ch) 97 | -------------------------------------------------------------------------------- /roles/rke_rancher_clusters/templates/cluster.yml.j2: -------------------------------------------------------------------------------- 1 | nodes: 2 | {% for node in cluster %} 3 | - address: {{ hostvars[node]['ansible_' + rke_network_interface]['ipv4']['address'] }} 4 | internal_address: {{ hostvars[node]['ansible_' + rke_network_interface]['ipv4']['address'] }} 5 | user: {{ rke_ssh_user }} 6 | role: [{% for role in hostvars[node]['k8s_roles'] %}{{ role }},{% endfor %}] 7 | 8 | {% endfor %} 9 | 10 | kubernetes_version: "{{ rke_kubernetes_version }}" 11 | 12 | addon_job_timeout: 30 13 | authentication: 14 | strategy: x509 15 | ignore_docker_version: true 16 | # 17 | # # Currently only nginx ingress provider is supported. 18 | # # To disable ingress controller, set `provider: none` 19 | # # To enable ingress on specific nodes, use the node_selector, eg: 20 | # provider: nginx 21 | # node_selector: 22 | # app: ingress 23 | # 24 | ingress: 25 | provider: nginx 26 | monitoring: 27 | provider: metrics-server 28 | # 29 | # If you are using calico on AWS 30 | # 31 | # network: 32 | # plugin: calico 33 | # calico_network_provider: 34 | # cloud_provider: aws 35 | # 36 | # # To specify flannel interface 37 | # 38 | # network: 39 | # plugin: flannel 40 | # flannel_network_provider: 41 | # iface: eth1 42 | # 43 | # # To specify flannel interface for canal plugin 44 | # 45 | # network: 46 | # plugin: canal 47 | # canal_network_provider: 48 | # iface: eth1 49 | # 50 | network: 51 | options: 52 | flannel_backend_type: vxlan 53 | plugin: canal 54 | # 55 | # services: 56 | # kube-api: 57 | # service_cluster_ip_range: 10.43.0.0/16 58 | # kube-controller: 59 | # cluster_cidr: 10.42.0.0/16 60 | # service_cluster_ip_range: 10.43.0.0/16 61 | # kubelet: 62 | # cluster_domain: cluster.local 63 | # cluster_dns_server: 10.43.0.10 64 | # 65 | services: 66 | etcd: 67 | backup_config: 68 | interval_hours: 12 69 | retention: 6 70 | safe_timestamp: false 71 | creation: 12h 72 | extra_args: 73 | election-timeout: 5000 74 | heartbeat-interval: 500 75 | gid: 1000 76 | retention: 72h 77 | snapshot: false 78 | uid: 1000 79 | kube_api: 80 | always_pull_images: true 81 | pod_security_policy: true 82 | service_node_port_range: 30000-32767 83 | event_rate_limit: 84 | enabled: true 85 | audit_log: 86 | enabled: true 87 | secrets_encryption_config: 88 | enabled: true 89 | extra_args: 90 | anonymous-auth: "false" 91 | enable-admission-plugins: "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds,AlwaysPullImages,DenyEscalatingExec,NodeRestriction,EventRateLimit,PodSecurityPolicy" 92 | profiling: "false" 93 | service-account-lookup: "true" 94 | tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" 95 | extra_binds: 96 | - "/opt/kubernetes:/opt/kubernetes" 97 | kubelet: 98 | generate_serving_certificate: true 99 | extra_args: 100 | feature-gates: "RotateKubeletServerCertificate=true" 101 | protect-kernel-defaults: "false" 102 | tls-cipher-suites: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_RSA_WITH_AES_128_GCM_SHA256" 103 | kube-controller: 104 | extra_args: 105 | profiling: "false" 106 | address: "127.0.0.1" 107 | terminated-pod-gc-threshold: "1000" 108 | feature-gates: "RotateKubeletServerCertificate=true" 109 | scheduler: 110 | extra_args: 111 | profiling: "false" 112 | address: "127.0.0.1" 113 | ssh_agent_auth: {{ rke_ssh_agent_auth }} 114 | -------------------------------------------------------------------------------- /roles/rancher_keepalived/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Custom K8s cluster vIP HA setup 3 | # Useful when you want built-in HA for your custom K8s cluster ingress controller without external LB 4 | keepalived_enabled: false 5 | # Set the keepaliveds namespace name 6 | keepalived_ns: ipfailover 7 | # Move the keelalived namespace to the Rancher System project 8 | keepalived_ns_to_system_project: true 9 | # Configure Rancher System project PSP to: "none", "restricted" or "unrestricted" (only works when "keepalived_ns_to_system_project" is true) 10 | keepalived_rancher_system_project_psp: unrestricted 11 | # Specify if the keepalived setup should only use a private IP. 12 | # If so, set "keepalived_private_only" to "true" 13 | # and leave all "*_public_*" configuration options down here empty. 14 | keepalived_private_only: false 15 | # Specify if the keepalived setup should only use IPv4. 16 | # If so, set "keepalived_ipv4_only" to "true" 17 | # and leave all "*_ipv6" configuration options down here empty. 18 | keepalived_ipv4_only: false 19 | # Specify a node selector labels if keepalived containers should only run on certain nodes 20 | # If left empty, the daemonset will deploy a replica per node. For example "vip_public" and "vip_private". 21 | keepalived_private_node_selector: "" 22 | keepalived_public_node_selector: "" 23 | # Specify a node toleration label if keepalived containers should be running on tainted certain nodes. 24 | # For example "public_ingress". 25 | keepalived_private_node_toleration: "" 26 | keepalived_public_node_toleration: "" 27 | # Specify where the custom K8s cluster is running. Currently supported environments are: 28 | # - "local": Local keepalived setup 29 | # - "cloudscale": Keepalived setup with cloudscale floating IP 30 | keepalived_setup_env: local 31 | # If "keepalived_setup_env" is set to "cloudscale", a cloudscale API token needs to be provided. 32 | keepalived_cloudscale_api_token: "" 33 | # Keepalived service Docker image 34 | keepalived_image: puzzle/keepalived:2.0.20 35 | # Specify if keepalived daemonset deployment destination is on a custom K8s cluster. 36 | # If set to true, the keepalived role waits with its tasks until the destination cluster is ready and not in transitioning state 37 | keepalived_deployment_on_custom_cluster: false 38 | # If "keepalived_deployment_on_custom_cluster" is set to true the following Rancher API related variables ("keepalived_deployment_rancher_*") need to be set too. 39 | keepalived_deployment_rancher_api: "{{ custom_k8s_cluster_rancher_api }}" 40 | keepalived_deployment_rancher_api_key: "{{ custom_k8s_cluster_api_key }}" 41 | keepalived_deployment_rancher_api_verify_ssl: yes 42 | keepalived_deployment_rancher_cluster_id: "" 43 | # Keepalived IP address configuration 44 | keepalived_private_failover_track_interface_ip: eth0 45 | keepalived_private_failover_ip: 46 | - vip: "192.0.2.254" 47 | router_id: 1 48 | master: rancher01 49 | password: my-top-secret-password1-here 50 | keepalived_public_failover_track_interface_ip: eth0 51 | keepalived_public_failover_ip: 52 | - vip: "198.51.100.254" 53 | router_id: 2 54 | master: rancher01 55 | password: my-top-secret-password2-here 56 | keepalived_public_failover_track_interface_ipv6: eth0 57 | keepalived_public_failover_ipv6: 58 | - vip: "2001:db8::ffff" 59 | router_id: 3 60 | master: rancher01 61 | password: my-top-secret-password3-here 62 | # Node groups to deploy keepalived on 63 | # Usually the default variable "keepalived_cluster_group_inventory_name" is replaced with "custom_k8s_cluster_group_inventory_name" or 64 | # "rke_cluster_group_inventory_name" for example - depending from which other role this keepalived role is called. 65 | keepalived_cluster_group_inventory_name: "{{ inventory_hostname | regex_replace('rancher_') }}" 66 | # Node groups 67 | # Example: If you would like to split keepalived daemonsets to different host groups: 68 | # - private IPv4: "{{ groups[keepalived_cluster_group_inventory_name + '_master'] }}" 69 | # - public IPv4: "{{ groups[keepalived_cluster_group_inventory_name + '_ingress'] }}" 70 | # - public IPv6: "{{ groups[keepalived_cluster_group_inventory_name + '_ingress'] }}" 71 | keepalived_private_node_group_ipv4: "{{ keepalived_cluster_group_inventory_name }}" 72 | keepalived_public_node_group_ipv4: "{{ keepalived_cluster_group_inventory_name }}" 73 | keepalived_public_node_group_ipv6: "{{ keepalived_cluster_group_inventory_name }}" 74 | -------------------------------------------------------------------------------- /roles/rancher_keepalived/README.md: -------------------------------------------------------------------------------- 1 | keepalived 2 | ================== 3 | 4 | Install keepalived daemonset on an existing K8s cluster 5 | 6 | Requirements 7 | ------------ 8 | 9 | - Only uses python and ansible. 10 | - The keepalived daemonset pods only come up when the host port 80 becomes reachable (≃ an ingress controller is running) 11 | 12 | 13 | Role Variables 14 | -------------- 15 | 16 | ```yaml 17 | --- 18 | # Custom K8s cluster vIP HA setup 19 | # Useful when you want built-in HA for your custom K8s cluster ingress controller without external LB 20 | keepalived_enabled: false 21 | # Set the keepaliveds namespace name 22 | keepalived_ns: ipfailover 23 | # Move the keelalived namespace to the Rancher System project 24 | keepalived_ns_to_system_project: true 25 | # Configure Rancher System project PSP to: "none", "restricted" or "unrestricted" (only works when "keepalived_ns_to_system_project" is true) 26 | keepalived_rancher_system_project_psp: unrestricted 27 | # Specify if the keepalived setup should only use a private IP. 28 | # If so, set "keepalived_private_only" to "true" 29 | # and leave all "*_public_*" configuration options down here empty. 30 | keepalived_private_only: false 31 | # Specify if the keepalived setup should only use IPv4. 32 | # If so, set "keepalived_ipv4_only" to "true" 33 | # and leave all "*_ipv6" configuration options down here empty. 34 | keepalived_ipv4_only: false 35 | # Specify a node selector labels if keepalived containers should only run on certain nodes 36 | # If left empty, the daemonset will deploy a replica per node. For example "vip_public" and "vip_private". 37 | keepalived_private_node_selector: "" 38 | keepalived_public_node_selector: "" 39 | # Specify a node toleration label if keepalived containers should be running on tainted certain nodes. 40 | # For example "public_ingress". 41 | keepalived_private_node_toleration: "" 42 | keepalived_public_node_toleration: "" 43 | # Specify where the custom K8s cluster is running. Currently supported environments are: 44 | # - "local": Local keepalived setup 45 | # - "cloudscale": Keepalived setup with cloudscale floating IP 46 | keepalived_setup_env: local 47 | # If "keepalived_setup_env" is set to "cloudscale", a cloudscale API token needs to be provided. 48 | keepalived_cloudscale_api_token: "" 49 | # Keepalived service Docker image 50 | keepalived_image: puzzle/keepalived:2.0.20 51 | # Specify if keepalived daemonset deployment destination is on a custom K8s cluster. 52 | # If set to true, the keepalived role waits with its tasks until the destination cluster is ready and not in transitioning state 53 | keepalived_deployment_on_custom_cluster: false 54 | # If "keepalived_deployment_on_custom_cluster" is set to true the following Rancher API related variables ("keepalived_deployment_rancher_*") need to be set too. 55 | keepalived_deployment_rancher_api: "{{ custom_k8s_cluster_rancher_api }}" 56 | keepalived_deployment_rancher_api_key: "{{ custom_k8s_cluster_api_key }}" 57 | keepalived_deployment_rancher_api_verify_ssl: yes 58 | keepalived_deployment_rancher_cluster_id: "" 59 | # Keepalived IP address configuration 60 | keepalived_private_failover_track_interface_ip: eth0 61 | keepalived_private_failover_ip: 62 | - vip: "192.0.2.254" 63 | router_id: 1 64 | master: rancher01 65 | password: my-top-secret-password1-here 66 | keepalived_public_failover_track_interface_ip: eth0 67 | keepalived_public_failover_ip: 68 | - vip: "198.51.100.254" 69 | router_id: 2 70 | master: rancher01 71 | password: my-top-secret-password2-here 72 | keepalived_public_failover_track_interface_ipv6: eth0 73 | keepalived_public_failover_ipv6: 74 | - vip: "2001:db8::ffff" 75 | router_id: 3 76 | master: rancher01 77 | password: my-top-secret-password3-here 78 | # Node groups to deploy keepalived on 79 | # Usually the default variable "keepalived_cluster_group_inventory_name" is replaced with "custom_k8s_cluster_group_inventory_name" or 80 | # "rke_cluster_group_inventory_name" for example - depending from which other role this keepalived role is called. 81 | keepalived_cluster_group_inventory_name: "{{ inventory_hostname | regex_replace('rancher_') }}" 82 | # Node groups 83 | # Example: If you would like to split keepalived daemonsets to different host groups: 84 | # - private IPv4: "{{ groups[keepalived_cluster_group_inventory_name + '_master'] }}" 85 | # - public IPv4: "{{ groups[keepalived_cluster_group_inventory_name + '_ingress'] }}" 86 | # - public IPv6: "{{ groups[keepalived_cluster_group_inventory_name + '_ingress'] }}" 87 | keepalived_private_node_group_ipv4: "{{ keepalived_cluster_group_inventory_name }}" 88 | keepalived_public_node_group_ipv4: "{{ keepalived_cluster_group_inventory_name }}" 89 | keepalived_public_node_group_ipv6: "{{ keepalived_cluster_group_inventory_name }}" 90 | ``` 91 | 92 | Dependencies 93 | ------------ 94 | 95 | * none 96 | 97 | License 98 | ------- 99 | 100 | GPLv3 101 | 102 | Author Information 103 | ------------------ 104 | 105 | * Sebastian Plattner (plattner@puzzle.ch) 106 | -------------------------------------------------------------------------------- /roles/rancher_keepalived/files/cm-cloudscale-notify.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | check.py: |- 4 | #!/usr/bin/env python 5 | 6 | import requests 7 | import os 8 | import sys 9 | import subprocess 10 | 11 | cloudscale_api = "https://api.cloudscale.ch/" 12 | cloudscale_api_token = os.environ['API_TOKEN'] 13 | 14 | hostname = os.environ['HOSTNAME'].split(".")[0] 15 | vips = eval(os.environ['KEEPALIVED_VIRTUAL_IPS'].replace("#PYTHON2BASH:","")) 16 | 17 | def findMySelf(): 18 | 19 | try: 20 | response = requests.get("{}/v1/servers".format(cloudscale_api), headers={'Authorization': "Bearer {}".format(cloudscale_api_token)}) 21 | response.raise_for_status() 22 | servers = response.json() 23 | except requests.ConnectionError as e: 24 | return None 25 | except requests.HTTPError as e: 26 | return None 27 | 28 | me = None 29 | 30 | if "detail" in servers: 31 | # There was an error 32 | return None 33 | 34 | for server in servers: 35 | if server["name"] == hostname: 36 | me = server 37 | break; 38 | 39 | return me 40 | 41 | def getFoatingIP(ip): 42 | 43 | try: 44 | response = requests.get("{}/v1/floating-ips/{}".format(cloudscale_api,ip), headers={'Authorization': "Bearer {}".format(cloudscale_api_token)}) 45 | response.raise_for_status() 46 | floating_ip = response.json() 47 | except requests.ConnectionError as e: 48 | return None 49 | except requests.HTTPError as e: 50 | return None 51 | 52 | if "detail" in floating_ip: 53 | # There was an error 54 | return None 55 | 56 | return floating_ip 57 | 58 | def main(): 59 | 60 | me = findMySelf() 61 | 62 | if me is None: 63 | # API seems wrong, exit 0 to not cause a ip failover 64 | sys.exit(0) 65 | 66 | for vip in vips: 67 | floating_ip = getFoatingIP(vip) 68 | 69 | if floating_ip is None: 70 | # API seems wrong, exit 0 to not cause a ip failover 71 | sys.exit(0) 72 | 73 | 74 | result = subprocess.Popen("/sbin/ip a show dev eth0 | /bin/grep {}".format(vip), shell=True, stdout=subprocess.PIPE) 75 | text = result.communicate()[0] 76 | 77 | is_active = result.returncode == 0 78 | 79 | if is_active: 80 | # I'm active, so I should have the floating 81 | if floating_ip["server"]["uuid"] == me["uuid"]: 82 | print("Interface has IP {} and assigend correct".format(vip)) 83 | sys.exit(0) 84 | else: 85 | print("Interface has IP {} and not assigend correct".format(vip)) 86 | sys.exit(1) 87 | else: 88 | # I'm not active, so I should not have the floating 89 | if floating_ip["server"]["uuid"] == me["uuid"]: 90 | print("Interface don't have IP {} and not assigend correct".format(vip)) 91 | sys.exit(1) 92 | else: 93 | print("Interface don't have IP {} and assigend correct".format(vip)) 94 | sys.exit(0) 95 | 96 | if __name__== "__main__": 97 | main() 98 | notify.sh: "#!/bin/bash\n\n# for ANY state transition.\n# \"notify\" script is called 99 | AFTER the\n# notify_* script(s) and is executed\n# with 3 arguments provided by 100 | keepalived\n# (ie don't include parameters in the notify line).\n# arguments\n# 101 | $1 = \"GROUP\"|\"INSTANCE\"\n# $2 = name of group or instance\n# $3 = target state 102 | of transition\n# (\"MASTER\"|\"BACKUP\"|\"FAULT\")\n\nTYPE=$1\nNAME=$2\nSTATE=$3\n\nSERVERNAME=\"$(hostname)\"\n\nfor 103 | info in $(complex-bash-env iterate SERVER_IDS)\ndo\n\n if [ $(complex-bash-env 104 | isRow \"${!info}\") = true ]; then\n\n key=$(complex-bash-env getRowKeyVarName 105 | \"${!info}\")\n value=$(complex-bash-env getRowValueVarName \"${!info}\")\n\n\t\t 106 | \ currentserver=${!key}\n if [ \"$currentserver\" == 107 | \"$SERVERNAME\" ]; then\n SERVER_ID=${!value}\n fi\n\n 108 | \ fi\ndone\n\necho \"I'm $SERVERNAME, with ID: $SERVER_ID\" > /proc/1/fd/1\n\ncase 109 | $STATE in\n \"MASTER\") echo \"I'm the MASTER! Whup whup.\" > /proc/1/fd/1\n 110 | \ sleep 1\n for vip in $(complex-bash-env iterate 111 | KEEPALIVED_VIRTUAL_IPS)\n do\n VIRTUAL_IP=${!vip}\n 112 | \ ip a show dev eth0 | grep ${VIRTUAL_IP}\n if 113 | [ $? -eq 0 ]; then\n echo \"Update $SERVER_ID with IP ${VIRTUAL_IP}\" 114 | > /proc/1/fd/1\n curl -H \"Authorization: Bearer $API_TOKEN\" 115 | -F server=$SERVER_ID https://api.cloudscale.ch/v1/floating-ips/{$VIRTUAL_IP} > 116 | /proc/1/fd/1\n else\n echo \"not updating 117 | because IP ${VIRTUAL_IP} not assigned to eth0\" > /proc/1/fd/1\n fi\n 118 | \ done\n\n exit 0\n ;;\n \"BACKUP\") 119 | echo \"Ok, i'm just a backup, great.\" > /proc/1/fd/1\n exit 120 | 0\n ;;\n \"FAULT\") echo \"Fault, what ?\" > /proc/1/fd/1\n 121 | \ exit 0\n ;;\n *) echo \"Unknown 122 | state\" > /proc/1/fd/1\n exit 1\n ;;\nesac" 123 | kind: ConfigMap 124 | metadata: 125 | name: keepalived-failover 126 | namespace: ipfailover 127 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Ansible Playbooks and Roles for Rancher 2 | 3 | **Disclaimer!**: We use this as a base for our own and customer setup at puzzle. Heavy work in progress and a lot of things that can be improved. 4 | Feel free to contribute. We are happy to assist. 5 | 6 | These Ansible playbook and roles can be used to: 7 | * create a [Rancher Control Plane](https://rancher.com/) using [rke](https://github.com/rancher/rke) and [helm](https://helm.sh/) 8 | * add a custom Kubernetes cluster to an existing Rancher Control Plane 9 | 10 | ## Prerequisites 11 | We recommend you to run this playbooks inside a [pipenv](https://pipenv.pypa.io/en/latest/). 12 | 13 | All dependencies are managed using pipenv, to get a virtual environment use: 14 | ```bash 15 | # Only if you don't have pipenv yet: 16 | pip install --user pipenv 17 | ``` 18 | 19 | Switch to the virtual environment and install dependencies into: 20 | ```bash 21 | pipenv shell --three 22 | pipenv install 23 | # Now you can run ansible-playbook commands inside this pipenv shell: 24 | ansible-playbook ... 25 | ``` 26 | 27 | You can verify the installed dependencies using `pipenv graph` (inside the pipenv shell): 28 | ```bash 29 | $ pipenv graph 30 | ansible==2.9.12 31 | - cryptography [required: Any, installed: 3.2] 32 | <--- output truncated ---> 33 | - PyYAML [required: Any, installed: 5.3.1] 34 | jmespath==0.10.0 35 | openshift==0.11.2 36 | - jinja2 [required: Any, installed: 2.11.2] 37 | - MarkupSafe [required: >=0.23, installed: 1.1.1] 38 | - kubernetes [required: ~=11.0.0, installed: 11.0.0] 39 | - certifi [required: >=14.05.14, installed: 2020.6.20] 40 | <--- output truncated ---> 41 | - six [required: Any, installed: 1.15.0] 42 | selinux==0.2.1 43 | - distro [required: >=1.3.0, installed: 1.5.0] 44 | - setuptools [required: >=39.0, installed: 50.3.2] 45 | ``` 46 | 47 | ## Inventory 48 | 49 | Check [inventories/site](./inventories/site) for a sample inventory. 50 | 51 | There are two special ansible groups: 52 | * `rke_rancher_clusters`: Hosts in this group represent a Rancher Control Plane instance 53 | * `custom_k8s_clusters`: Hosts in this group represent a custom kubernetes cluster added to a Rancher Control Plane 54 | 55 | Members (Nodes) of the Rancher Control Plane and the Kubernetes cluster are managed with the following ansible groups. 56 | 57 | ### Rancher Control Plane 58 | 59 | For Rancher Control Plane: Assuming we have a Rancher Control Plane with the name `cluster_rancher`, we create the `cluster_rancher` host to the `rke_rancher_clusters` group and then add all nodes for this to the group `rke_cluster_rancher`, so the Rancher Control Plane name with a `rke_` prefix. 60 | 61 | ```ini 62 | [rke_rancher_clusters] 63 | cluster_rancher # Belongs to Ansible Group rke_cluster_rancher 64 | 65 | [rke_cluster_rancher] 66 | rancher01 67 | rancher02 68 | rancher03 69 | 70 | ``` 71 | 72 | Make sure to set at least the following vars: 73 | * For the `cluster_rancher` special host, you have to set `rancher_hostname`, `rancher_admin_password` and `rancher_failover_ip` with its sub-configurations (see [roles/rke_rancher_clusters/README.md](roles/rke_rancher_clusters/README.md) for more information). Check [inventories/host_vars/cluster_rancher.yml](./inventories/host_vars/cluster_rancher.yml) and [roles/rke_rancher_clusters/defaults/main.yml](./roles/rke_rancher_clusters/defaults/main.yml]) for more details. 74 | * Set the `k8s_roles` each of the member should have (for a Rancher Control Plane this is normally `controlplane`, `etcd`, `worker`). See [inventories/group_vars/rke_cluster_rancher.yml](./inventories/group_vars/rke_cluster_rancher.yml) as an example. 75 | 76 | 77 | ### Custom Kubernetes Cluster 78 | 79 | For a custom Kubernetes cluster managed with a Rancher Control Plane: Assuming our cluster has the name `mycluster` we create a host `rancher_mycluster` in the `custom_k8s_clusters` group (so cluster name with a `rancher_` prefix). The member nodes of this cluster are then added to a group with the name `mycluster`. To use some dedicated roles on some nodes you can use other ansible groups which are children of the `mycluster` group. 80 | 81 | 82 | ```ini 83 | [custom_k8s_clusters] 84 | rancher_mycluster 85 | 86 | [mycluster:children] 87 | mycluster_master 88 | mycluster_worker 89 | 90 | [mycluster_master] 91 | master01 92 | 93 | [mycluster_worker] 94 | worker01 95 | ``` 96 | 97 | Make sure to set at least the following vars: 98 | * For the `ranchr_mycluster` special host you have to set at least the `custom_k8s_cluster_api_key` & `custom_k8s_cluster_rancher_host` variable. Check [roles/custom_k8s_cluster/defaults/main.yml](./roles/custom_k8s_cluster/defaults/main.yml]) for more details and variables. 99 | * Set the `k8s_roles` each member should have. See [inventories/group_vars/mycluster_master.yml](./inventories/group_vars/mycluster_master.yml) and [inventories/group_vars/mycluster_worker.yml](./inventories/group_vars/mycluster_worker.yml) as an example. 100 | 101 | ## Playbooks 102 | 103 | ### site.yml 104 | 105 | Playbook to apply `docker`, `firewalld`, `rke_rancher_clusters` & `custom_rk8s_cluster`. Check [plays/prepare_k8s_nodes.yml](./plays/prepare_k8s_nodes.yml), [plays/deploy_rancher.yml](./plays/deploy_rancher.yml) & [plays/deploy_k8s_cluster.yml](./plays/deploy_k8s_cluster.yml) for details. 106 | 107 | ### cleanup_k8snode.yml 108 | 109 | With this playbook to can cleanup a node which was already added to a kubernetes cluster. Based on https://rancher.com/docs/rancher/v2.x/en/cluster-admin/cleaning-cluster-nodes/ 110 | 111 | ## Roles 112 | 113 | ### docker 114 | 115 | Simple role to install Docker. Check [roles/docker/README.md](./roles/docker/README.md) for more details. 116 | 117 | ### firewalld 118 | 119 | The role only configures firewalld depending on the `k8s_role` the node has (this behaviour can also be disabled if you want to). Based on https://rancher.com/docs/rancher/v2.x/en/installation/options/firewall/ 120 | 121 | ### rke_rancher_clusters 122 | 123 | Role to deploy a Rancher Control Plane with `rke` and `helm`. Check [roles/rke_rancher_clusters/README.md](./roles/rke_rancher_clusters/README.md) for more details. 124 | 125 | ### custom_rk8s_cluster 126 | 127 | Role to create a custom Kubernetes cluster on a Rancher Control Plane and add nodes to the cluster. Check [roles/custom_k8s_cluster/README.md](./roles/custom_k8s_cluster/README.md) for more details. 128 | 129 | ### rancher_keepalived 130 | 131 | Role to deploy keepalived Daemonsets on Rancher Control Plane **and** custom Kubernetes clusters. Provides one or multiple highly available virtual IPv4/IPv6 address(es) to the regarding cluster. Usually directly called from `rke_rancher_clusters` and `custom_rk8s_cluster`. 132 | 133 | ## License 134 | 135 | GPLv3 136 | 137 | ## Author Information 138 | 139 | * Sebastian Plattner 140 | * Philip Schmid 141 | -------------------------------------------------------------------------------- /roles/rke_rancher_clusters/tasks/rancher.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create namespace for Rancher Control Plane 3 | k8s: 4 | kubeconfig: "{{ rke_cluster_kube_config }}" 5 | state: present 6 | api_version: v1 7 | kind: Namespace 8 | name: cattle-system 9 | vars: 10 | cluster: "{{ groups['rke_' + inventory_hostname] }}" 11 | 12 | - name: Create namespace for cert-manager 13 | k8s: 14 | kubeconfig: "{{ rke_cluster_kube_config }}" 15 | state: present 16 | api_version: v1 17 | kind: Namespace 18 | name: cert-manager 19 | vars: 20 | cluster: "{{ groups['rke_' + inventory_hostname] }}" 21 | when: 22 | - rancher_certmanager_enabled 23 | 24 | - name: Add Rancher Repo to Helm 25 | command: "{{ helm_binary }} repo add --kubeconfig {{ rke_cluster_kube_config }} {{ helm_rancher_repo }} {{ helm_rancher_repo_url }}" 26 | changed_when: false 27 | 28 | - name: Add Jetstack Cert-Manager Repo to Helm 29 | command: "{{ helm_binary }} repo add --kubeconfig {{ rke_cluster_kube_config }} {{ helm_certmanager_jetstack_repo }} {{ helm_certmanager_jetstack_repo_url }}" 30 | changed_when: false 31 | when: 32 | - rancher_certmanager_enabled 33 | 34 | - name: Update Helm Repo 35 | command: "{{ helm_binary }} repo update --kubeconfig {{ rke_cluster_kube_config }}" 36 | changed_when: false 37 | 38 | - name: Install CRD's for CertManager 39 | command: "{{ kubectl_binary }} --kubeconfig {{ rke_cluster_kube_config }} apply --validate=false -f {{ rancher_certmanager_crd_url }}" 40 | changed_when: false 41 | ignore_errors: yes 42 | when: 43 | - rancher_certmanager_enabled 44 | 45 | - name: Install Cert-Manager 46 | command: "{{ helm_binary }} upgrade --install --kubeconfig {{ rke_cluster_kube_config }} --namespace cert-manager --version {{ helm_certmanager_version }} cert-manager {{ helm_certmanager_jetstack_repo }}/cert-manager" 47 | changed_when: false 48 | when: 49 | - rancher_certmanager_enabled 50 | 51 | - name: Wait for certmanager to be ready 52 | command: "{{ kubectl_binary }} -n cert-manager --kubeconfig {{ rke_cluster_kube_config }} rollout status deploy/cert-manager" 53 | changed_when: false 54 | when: 55 | - rancher_certmanager_enabled 56 | 57 | - name: Wait for certmanager cainjector to be ready 58 | command: "{{ kubectl_binary }} -n cert-manager --kubeconfig {{ rke_cluster_kube_config }} rollout status deploy/cert-manager-cainjector" 59 | changed_when: false 60 | when: 61 | - rancher_certmanager_enabled 62 | 63 | - name: Wait for certmanager webhook to be ready 64 | command: "{{ kubectl_binary }} -n cert-manager --kubeconfig {{ rke_cluster_kube_config }} rollout status deploy/cert-manager-webhook" 65 | changed_when: false 66 | when: 67 | - rancher_certmanager_enabled 68 | 69 | - name: Wait 20 seconds for cert-manager to properly run 70 | pause: 71 | seconds: "20" 72 | 73 | - name: Create tls secret when using "Bring your own certificate" Rancher certificate method (method 3) 74 | k8s: 75 | kubeconfig: "{{ rke_cluster_kube_config }}" 76 | state: present 77 | definition: 78 | apiVersion: v1 79 | kind: Secret 80 | metadata: 81 | name: tls-rancher-ingress 82 | namespace: cattle-system 83 | data: 84 | tls.crt: "{{ rancher_tls_crt }}" 85 | tls.key: "{{ rancher_tls_key }}" 86 | type: kubernetes.io/tls 87 | when: 88 | - rancher_bring_your_own_certificates 89 | 90 | - name: Create tls cacerts secret when using "Bring your own certificate" Rancher certificate method (method 3) and using self-signed Rancher TLS cert 91 | k8s: 92 | kubeconfig: "{{ rke_cluster_kube_config }}" 93 | state: present 94 | definition: 95 | apiVersion: v1 96 | kind: Secret 97 | metadata: 98 | name: tls-ca 99 | namespace: cattle-system 100 | data: 101 | cacerts.pem: "{{ rancher_tls_cacerts }}" 102 | when: 103 | - rancher_bring_your_own_certificates 104 | - rancher_tls_self_signed 105 | 106 | # Rancher Certificate Method 1: 107 | - name: Install Rancher with generated self-signed certificates (via Cert-Manager) 108 | command: "{{ helm_binary }} upgrade --install --kubeconfig {{ rke_cluster_kube_config }} --namespace cattle-system --set hostname={{ rancher_hostname }} --set ingress.tls.source=rancher --version {{ helm_rancher_version }} rancher {{ helm_rancher_repo }}/rancher" 109 | changed_when: false 110 | when: 111 | - rancher_certmanager_enabled 112 | - rancher_generated_self_signed_certificates 113 | - not rancher_lets_encrypt_generated_certificated 114 | - not rancher_bring_your_own_certificates 115 | 116 | # Rancher Certificate Method 2: 117 | - name: Install Rancher with Let's Encrypt (via Cert-Manager) 118 | command: "{{ helm_binary }} upgrade --install --kubeconfig {{ rke_cluster_kube_config }} --namespace cattle-system --set hostname={{ rancher_hostname }} --set ingress.tls.source=letsEncrypt --set letsEncrypt.email={{ rancher_letsencrypt_email }} --version {{ helm_rancher_version }} rancher {{ helm_rancher_repo }}/rancher" 119 | changed_when: false 120 | when: 121 | - rancher_certmanager_enabled 122 | - not rancher_generated_self_signed_certificates 123 | - rancher_lets_encrypt_generated_certificated 124 | - not rancher_bring_your_own_certificates 125 | 126 | # Rancher Certificate Method 3a: 127 | - name: Install Rancher with officially signed certificates 128 | command: "{{ helm_binary }} upgrade --install --kubeconfig {{ rke_cluster_kube_config }} --namespace cattle-system --set hostname={{ rancher_hostname }} --set ingress.tls.source=secret --version {{ helm_rancher_version }} rancher {{ helm_rancher_repo }}/rancher" 129 | changed_when: false 130 | when: 131 | - not rancher_certmanager_enabled 132 | - not rancher_generated_self_signed_certificates 133 | - not rancher_lets_encrypt_generated_certificated 134 | - rancher_bring_your_own_certificates 135 | - not rancher_tls_self_signed 136 | 137 | # Rancher Certificate Method 3b: 138 | - name: Install Rancher with self-signed certificates 139 | command: "{{ helm_binary }} upgrade --install --kubeconfig {{ rke_cluster_kube_config }} --namespace cattle-system --set hostname={{ rancher_hostname }} --set ingress.tls.source=secret --set privateCA=true --version {{ helm_rancher_version }} rancher {{ helm_rancher_repo }}/rancher" 140 | changed_when: false 141 | when: 142 | - not rancher_certmanager_enabled 143 | - not rancher_generated_self_signed_certificates 144 | - not rancher_lets_encrypt_generated_certificated 145 | - rancher_bring_your_own_certificates 146 | - rancher_tls_self_signed 147 | 148 | - name: Wait for Rancher Deployment to be ready 149 | command: "{{ kubectl_binary }} -n cattle-system --kubeconfig {{ rke_cluster_kube_config }} rollout status deploy/rancher" 150 | changed_when: false 151 | 152 | - name: Wait for Rancher to be ready 153 | command: "curl -k --silent https://{{ rancher_hostname }}/ping" 154 | args: 155 | warn: no 156 | register: rancher_ready 157 | until: rancher_ready.stdout.find("pong") != -1 158 | retries: 24 159 | delay: 5 160 | changed_when: false 161 | check_mode: no 162 | -------------------------------------------------------------------------------- /roles/rancher_keepalived/tasks/configure-keepalived.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Wait until Cluster is ready and not in transitioning state 4 | uri: 5 | url: "{{ keepalived_deployment_rancher_api }}/clusters/{{ keepalived_deployment_rancher_cluster_id }}" 6 | validate_certs: "{{ keepalived_deployment_rancher_api_verify_ssl }}" 7 | return_content: yes 8 | headers: 9 | Authorization: "Bearer {{ keepalived_deployment_rancher_api_key }}" 10 | register: clusterstate 11 | until: clusterstate.json.state == "active" and clusterstate.json.transitioning == "no" 12 | retries: 30 13 | delay: 60 14 | when: 15 | - keepalived_deployment_on_custom_cluster 16 | 17 | - name: Create Namespace for IP Failover objects 18 | k8s: 19 | kubeconfig: "{{ kubeconfigfile }}" 20 | state: present 21 | api_version: v1 22 | kind: Namespace 23 | name: "{{ keepalived_ns }}" 24 | 25 | - name: Create Role for privileged PSP for IP Failover 26 | k8s: 27 | kubeconfig: "{{ kubeconfigfile }}" 28 | state: present 29 | definition: "{{ lookup('file', 'psp:privileged.yml') }}" 30 | 31 | - name: Create RoleBinding for privileged PSP for IP Failover 32 | k8s: 33 | kubeconfig: "{{ kubeconfigfile }}" 34 | state: present 35 | definition: "{{ lookup('file', 'default:psp:privileged.yml') }}" 36 | 37 | - name: Get all Rancher projects 38 | uri: 39 | url: "{{ keepalived_deployment_rancher_api }}/clusters/{{ keepalived_deployment_rancher_cluster_id }}/projects" 40 | validate_certs: "{{ keepalived_deployment_rancher_api_verify_ssl }}" 41 | return_content: yes 42 | headers: 43 | Authorization: "Bearer {{ keepalived_deployment_rancher_api_key }}" 44 | register: rancherprojects 45 | when: 46 | - keepalived_deployment_on_custom_cluster 47 | - keepalived_ns_to_system_project 48 | 49 | - name: Get the Rancher System project ID 50 | set_fact: 51 | ranchersystemprojectid: "{{ rancherprojects | json_query(\"json.data[?name == 'System'].id\") }}" 52 | when: 53 | - keepalived_deployment_on_custom_cluster 54 | - keepalived_ns_to_system_project 55 | - (rancherprojects | length) > 0 56 | 57 | - name: Print Rancher System project ID 58 | debug: 59 | msg: "{{ ranchersystemprojectid }}" 60 | when: 61 | - keepalived_deployment_on_custom_cluster 62 | - keepalived_ns_to_system_project 63 | - (ranchersystemprojectid | length) > 0 64 | 65 | - name: Move the keepalived namespace to the Rancher System project 66 | uri: 67 | url: "{{ keepalived_deployment_rancher_api }}/clusters/{{ keepalived_deployment_rancher_cluster_id }}/namespaces/{{ keepalived_ns }}?action=move" 68 | validate_certs: "{{ keepalived_deployment_rancher_api_verify_ssl }}" 69 | return_content: yes 70 | headers: 71 | Authorization: "Bearer {{ keepalived_deployment_rancher_api_key }}" 72 | body: 73 | projectId: "{{ ranchersystemprojectid }}" 74 | body_format: json 75 | status_code: 200 76 | method: POST 77 | when: 78 | - keepalived_deployment_on_custom_cluster 79 | - keepalived_ns_to_system_project 80 | - (ranchersystemprojectid | length) > 0 81 | 82 | - name: Set System project to PSP "unrestricted" 83 | uri: 84 | url: "{{ keepalived_deployment_rancher_api }}/projects/{{ ranchersystemprojectid | first }}?action=setpodsecuritypolicytemplate" 85 | validate_certs: "{{ keepalived_deployment_rancher_api_verify_ssl }}" 86 | return_content: yes 87 | headers: 88 | Authorization: "Bearer {{ keepalived_deployment_rancher_api_key }}" 89 | body: 90 | podSecurityPolicyTemplateId: "{{ keepalived_rancher_system_project_psp }}" 91 | body_format: json 92 | status_code: 200 93 | method: POST 94 | when: 95 | - keepalived_deployment_on_custom_cluster 96 | - keepalived_ns_to_system_project 97 | - (ranchersystemprojectid | length) > 0 98 | 99 | - name: Create ConfigMap with check.py and/or notify.sh for IP Failover 100 | k8s: 101 | kubeconfig: "{{ kubeconfigfile }}" 102 | state: present 103 | definition: "{{ lookup('file', 'cm-{{ keepalived_setup_env }}-notify.yml') }}" 104 | 105 | - name: Create IP Failover DaemonSets for Internal IP 106 | k8s: 107 | kubeconfig: "{{ kubeconfigfile }}" 108 | state: present 109 | definition: "{{ lookup('template', 'ds_failover.yml.j2') }}" 110 | with_items: 111 | - "{{ keepalived_private_failover_ip }}" 112 | vars: 113 | nodes: "{{ groups[keepalived_private_node_group_ipv4] }}" 114 | unicast_peers: "{% for node in nodes %}''{{ hostvars[node]['ansible_' + keepalived_private_failover_track_interface_ip]['ipv4']['address'] }}'',{% endfor %}" 115 | server_ids: "{% for node in nodes if keepalived_setup_env == 'cloudscale' %}{''{{ hostvars[node]['inventory_hostname'] }}'':''{{ hostvars[node]['cloudscale_vm']['uuid'] }}''},{% endfor %}" 116 | with_cloud_notify: "{{ 'True' if keepalived_setup_env == 'cloudscale' else 'False' }}" 117 | node_selector: "{{ keepalived_private_node_selector }}" 118 | node_toleration: "{{ keepalived_private_node_toleration }}" 119 | interface: "{{ keepalived_private_failover_track_interface_ip }}" 120 | track_interface: "{{ keepalived_private_failover_track_interface_ip }}" 121 | 122 | - name: Create IP Failover DaemonSets for Public IP 123 | k8s: 124 | kubeconfig: "{{ kubeconfigfile }}" 125 | state: present 126 | definition: "{{ lookup('template', 'ds_failover.yml.j2') }}" 127 | with_items: 128 | - "{{ keepalived_public_failover_ip }}" 129 | vars: 130 | nodes: "{{ groups[keepalived_public_node_group_ipv4] }}" 131 | unicast_peers: "{% for node in nodes %}''{{ hostvars[node]['ansible_' + keepalived_public_failover_track_interface_ip]['ipv4']['address'] }}'',{% endfor %}" 132 | server_ids: "{% for node in nodes if keepalived_setup_env == 'cloudscale' %}{''{{ hostvars[node]['inventory_hostname'] }}'':''{{ hostvars[node]['cloudscale_vm']['uuid'] }}''},{% endfor %}" 133 | with_cloud_notify: "{{ 'True' if keepalived_setup_env == 'cloudscale' else 'False' }}" 134 | node_selector: "{{ keepalived_public_node_selector }}" 135 | node_toleration: "{{ keepalived_public_node_toleration }}" 136 | interface: "{{ keepalived_public_failover_track_interface_ip }}" 137 | track_interface: "{{ keepalived_public_failover_track_interface_ip }}" 138 | when: 139 | - not keepalived_private_only 140 | 141 | - name: Create IP Failover DaemonSets for Public IPv6 142 | k8s: 143 | kubeconfig: "{{ kubeconfigfile }}" 144 | state: present 145 | definition: "{{ lookup('template', 'ds_failover.yml.j2') }}" 146 | with_items: 147 | - "{{ keepalived_public_failover_ipv6 }}" 148 | vars: 149 | nodes: "{{ groups[keepalived_public_node_group_ipv6] }}" 150 | unicast_peers: "{% for node in nodes %}''{{ hostvars[node]['ansible_default_ipv6']['address'] }}'',{% endfor %}" 151 | server_ids: "{% for node in nodes if keepalived_setup_env == 'cloudscale' %}{''{{ hostvars[node]['inventory_hostname'] }}'':''{{ hostvars[node]['cloudscale_vm']['uuid'] }}''},{% endfor %}" 152 | with_cloud_notify: "{{ 'True' if keepalived_setup_env == 'cloudscale' else 'False' }}" 153 | node_selector: "{{ keepalived_public_node_selector }}" 154 | node_toleration: "{{ keepalived_public_node_toleration }}" 155 | interface: "{{ keepalived_public_failover_track_interface_ipv6 }}" 156 | track_interface: "{{ keepalived_public_failover_track_interface_ipv6 }}" 157 | when: 158 | - not keepalived_private_only 159 | - not keepalived_ipv4_only -------------------------------------------------------------------------------- /roles/custom_k8s_cluster/tasks/cluster.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check if api_token is already present (rke_rancher_clusters was run before) 3 | set_fact: 4 | custom_k8s_cluster_api_key: "{{ hostvars['RANCHER_TOKEN_HOLDER']['api_token'] }}" 5 | when: 6 | - hostvars['RANCHER_TOKEN_HOLDER'] is defined 7 | - hostvars['RANCHER_TOKEN_HOLDER']['api_token'] is defined 8 | - (hostvars['RANCHER_TOKEN_HOLDER']['api_token'] | length) > 0 9 | - (custom_k8s_cluster_api_key | length) == 0 10 | 11 | - name: Get all Cluster from Rancher Control Plane 12 | uri: 13 | url: "{{ custom_k8s_cluster_rancher_api }}/clusters" 14 | validate_certs: "{{ custom_k8s_cluster_verify_ssl }}" 15 | return_content: yes 16 | headers: 17 | Authorization: "Bearer {{ custom_k8s_cluster_api_key }}" 18 | register: rancher_cluster 19 | check_mode: no 20 | 21 | - name: Set Fact for current cluster 22 | delegate_to: localhost 23 | set_fact: 24 | cluster: "{{ rancher_cluster | json_query(\"json.data[?name == '\" + custom_k8s_cluster_name + \"']\") }}" 25 | check_mode: no 26 | 27 | - name: Set Fact for current cluster version, if it already exists 28 | delegate_to: localhost 29 | set_fact: 30 | cluster_networkpolicy: "{{ cluster[0]['enableNetworkPolicy'] }}" 31 | cluster_version: "{{ cluster[0]['rancherKubernetesEngineConfig']['kubernetesVersion'] }}" 32 | cluster_ingress: "{{ cluster[0]['rancherKubernetesEngineConfig']['ingress']['provider'] }}" 33 | cluster_psp_template: "{{ cluster[0]['defaultPodSecurityPolicyTemplateId'] }}" 34 | cluster_id: "{{ cluster[0]['id'] }}" 35 | check_mode: no 36 | when: 37 | - (cluster | length) > 0 38 | 39 | - name: Print cluster facts from the Rancher API 40 | debug: 41 | msg: 42 | - "Currently applied cluster version: {{ cluster_version | lower }}" 43 | - " -> Cluster version configuration: {{ custom_k8s_cluster_kubernetes_version | lower }}" 44 | - "Currently applied cluster networkpolicy configuration: {{ cluster_networkpolicy | lower }}" 45 | - " -> Cluster networkpolicy configuration: {{ custom_k8s_cluster_enable_network_policy | lower }}" 46 | - "Currently applied cluster ingress controller: {{ cluster_ingress | lower }}" 47 | - " -> Cluster ingress controller configuration: {{ custom_k8s_clusters_ingress_provider | lower }}" 48 | - "Currently applied cluster PSP template: {{ cluster_psp_template | lower }}" 49 | - " -> Cluster PSP template configuration: {{ custom_k8s_clusters_default_psp | lower }}" 50 | when: 51 | - (cluster | length) > 0 52 | 53 | - name: Cluster does not exist, create new Cluster 54 | uri: 55 | url: "{{ custom_k8s_cluster_rancher_api }}/clusters" 56 | validate_certs: "{{ custom_k8s_cluster_verify_ssl }}" 57 | return_content: yes 58 | headers: 59 | Authorization: "Bearer {{ custom_k8s_cluster_api_key }}" 60 | body: "{{ lookup('template','cluster.json.j2') | regex_replace('\n',' ') }}" 61 | body_format: json 62 | status_code: 201 63 | method: POST 64 | register: cluster_new 65 | when: 66 | - (cluster | length) == 0 67 | 68 | - name: Cluster does exist, check if update is requested 69 | uri: 70 | url: "{{ custom_k8s_cluster_rancher_api }}/clusters/{{ cluster_id }}" 71 | validate_certs: "{{ custom_k8s_cluster_verify_ssl }}" 72 | return_content: yes 73 | headers: 74 | Authorization: "Bearer {{ custom_k8s_cluster_api_key }}" 75 | body: "{{ lookup('template','cluster.json.j2') | regex_replace('\n',' ') }}" 76 | body_format: json 77 | status_code: 200 78 | method: PUT 79 | when: | 80 | ((cluster | length) > 0) and 81 | ((custom_k8s_cluster_enable_network_policy | lower ) != (cluster_networkpolicy | lower) or 82 | (custom_k8s_cluster_kubernetes_version | lower) != (cluster_version | lower) or 83 | (custom_k8s_clusters_ingress_provider | lower) != (cluster_ingress | lower) or 84 | (custom_k8s_clusters_default_psp | lower) != (cluster_psp_template | lower)) 85 | 86 | - name: Set fact if existing cluster 87 | set_fact: 88 | cluster: "{{ cluster[0] }}" 89 | when: 90 | - (cluster | length) > 0 91 | 92 | - name: Set fact if new cluster 93 | set_fact: 94 | cluster: "{{ cluster_new.json }}" 95 | when: 96 | - cluster_new is not skipped 97 | 98 | - name: Get Cluster Registration Token 99 | uri: 100 | url: "{{ custom_k8s_cluster_rancher_api }}/clusterregistrationtoken?id={{ cluster.id }}" 101 | validate_certs: "{{ custom_k8s_cluster_verify_ssl }}" 102 | return_content: yes 103 | headers: 104 | Authorization: "Bearer {{ custom_k8s_cluster_api_key }}" 105 | register: clusterregistrationtoken 106 | check_mode: no 107 | 108 | - name: Filter for correct clusterRegistrationToken 109 | set_fact: 110 | clusterregistrationtoken: "{{ clusterregistrationtoken | json_query(\"json.data[?clusterId == '\" + cluster.id + \"']\") }}" 111 | 112 | - name: Create cluster registration token when not already exists 113 | uri: 114 | url: "{{ custom_k8s_cluster_rancher_api }}/clusterregistrationtoken" 115 | validate_certs: "{{ custom_k8s_cluster_verify_ssl }}" 116 | return_content: yes 117 | headers: 118 | Authorization: "Bearer {{ custom_k8s_cluster_api_key }}" 119 | body: 120 | type: "clusterRegistrationToken" 121 | clusterId: "{{ cluster.id }}" 122 | body_format: json 123 | status_code: 201 124 | method: POST 125 | register: newclusterRegistrationToken 126 | when: 127 | - ( clusterregistrationtoken | length ) == 0 128 | 129 | - name: Set cluster registration token if new token 130 | set_fact: 131 | clusterregistrationtoken: "{{ newclusterRegistrationToken.json.token }}" 132 | when: 133 | - newclusterRegistrationToken is not skipped 134 | 135 | - name: Set cluster registration token if existing token 136 | set_fact: 137 | clusterregistrationtoken: "{{ clusterregistrationtoken[0].token }}" 138 | when: 139 | - newclusterRegistrationToken is skipped 140 | - ( clusterregistrationtoken | length ) > 0 141 | 142 | - name: Cluster registration token 143 | debug: 144 | msg: "{{ clusterregistrationtoken }}" 145 | 146 | - name: Get CA checksum 147 | uri: 148 | url: "{{ custom_k8s_cluster_rancher_api }}/clusterregistrationtoken?id={{ cluster.id }}" 149 | validate_certs: "{{ custom_k8s_cluster_verify_ssl }}" 150 | return_content: yes 151 | headers: 152 | Authorization: "Bearer {{ custom_k8s_cluster_api_key }}" 153 | register: clusternodecommand 154 | check_mode: no 155 | when: 156 | - custom_k8s_cluster_self_signed_certificate 157 | 158 | - name: Filter for correct cluster clusterNodeCommand 159 | set_fact: 160 | clusternodecommand: "{{ clusternodecommand | json_query(\"json.data[?clusterId == '\" + cluster.id + \"']\") }}" 161 | when: 162 | - custom_k8s_cluster_self_signed_certificate 163 | 164 | - name: Set cluster node command 165 | set_fact: 166 | clusternodecommand: "{{ clusternodecommand[0].nodeCommand }}" 167 | when: 168 | - custom_k8s_cluster_self_signed_certificate 169 | 170 | - name: Parse cluster ca-checksum 171 | set_fact: 172 | clustercachecksum: "{{ clusternodecommand | regex_search(custom_k8s_cluster_ca_checksum_param | string + '(\\s+)' +'(.*)', '\\2') | first }}" 173 | when: 174 | - custom_k8s_cluster_self_signed_certificate 175 | 176 | - name: Parsed ca-checksum from cluster 177 | debug: 178 | msg: "{{ clustercachecksum }}" 179 | when: 180 | - custom_k8s_cluster_self_signed_certificate 181 | 182 | - name: Get KubeConfig from Rancher Control Plane 183 | uri: 184 | url: "{{ custom_k8s_cluster_rancher_api }}/clusters/{{ cluster.id }}?action=generateKubeconfig" 185 | validate_certs: "{{ custom_k8s_cluster_verify_ssl }}" 186 | return_content: yes 187 | headers: 188 | Authorization: "Bearer {{ custom_k8s_cluster_api_key }}" 189 | body_format: json 190 | status_code: 200 191 | method: POST 192 | register: kubeconfig 193 | 194 | - name: Create local download directory 195 | delegate_to: localhost 196 | file: 197 | path: "./custom_k8s_cluster" 198 | state: directory 199 | mode: 0755 200 | run_once: true 201 | 202 | - name: Set KubeConfig filename 203 | set_fact: 204 | kubeconfigfile: "./custom_k8s_cluster/{{ cluster.id }}" 205 | when: 206 | - kubeconfig is not skipped 207 | 208 | - name: Store KubeConfig 209 | copy: 210 | content: "{{ kubeconfig.json.config }}" 211 | dest: "{{ kubeconfigfile }}" 212 | when: 213 | - kubeconfig is not skipped 214 | -------------------------------------------------------------------------------- /Pipfile.lock: -------------------------------------------------------------------------------- 1 | { 2 | "_meta": { 3 | "hash": { 4 | "sha256": "c0f953807ea076c242a5948b9606070f5a3e80cd89bb13a98fb953483fe20b30" 5 | }, 6 | "pipfile-spec": 6, 7 | "requires": {}, 8 | "sources": [ 9 | { 10 | "name": "pypi", 11 | "url": "https://pypi.org/simple", 12 | "verify_ssl": true 13 | } 14 | ] 15 | }, 16 | "default": { 17 | "ansible": { 18 | "hashes": [ 19 | "sha256:737d819ffbd7a80c28795b4edd93e59ad21e6e6d53af0d19f57412814f9260d0" 20 | ], 21 | "index": "pypi", 22 | "version": "==4.2.0" 23 | }, 24 | "ansible-core": { 25 | "hashes": [ 26 | "sha256:7d3ce47014122907454704363485e48513f8ad0f00138b88870eb6d88953d121" 27 | ], 28 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'", 29 | "version": "==2.11.5" 30 | }, 31 | "cachetools": { 32 | "hashes": [ 33 | "sha256:2cc0b89715337ab6dbba85b5b50effe2b0c74e035d83ee8ed637cf52f12ae001", 34 | "sha256:61b5ed1e22a0924aed1d23b478f37e8d52549ff8a961de2909c69bf950020cff" 35 | ], 36 | "markers": "python_version ~= '3.5'", 37 | "version": "==4.2.2" 38 | }, 39 | "certifi": { 40 | "hashes": [ 41 | "sha256:2bbf76fd432960138b3ef6dda3dde0544f27cbf8546c458e60baf371917ba9ee", 42 | "sha256:50b1e4f8446b06f41be7dd6338db18e0990601dce795c2b1686458aa7e8fa7d8" 43 | ], 44 | "version": "==2021.5.30" 45 | }, 46 | "cffi": { 47 | "hashes": [ 48 | "sha256:06c54a68935738d206570b20da5ef2b6b6d92b38ef3ec45c5422c0ebaf338d4d", 49 | "sha256:0c0591bee64e438883b0c92a7bed78f6290d40bf02e54c5bf0978eaf36061771", 50 | "sha256:19ca0dbdeda3b2615421d54bef8985f72af6e0c47082a8d26122adac81a95872", 51 | "sha256:22b9c3c320171c108e903d61a3723b51e37aaa8c81255b5e7ce102775bd01e2c", 52 | "sha256:26bb2549b72708c833f5abe62b756176022a7b9a7f689b571e74c8478ead51dc", 53 | "sha256:33791e8a2dc2953f28b8d8d300dde42dd929ac28f974c4b4c6272cb2955cb762", 54 | "sha256:3c8d896becff2fa653dc4438b54a5a25a971d1f4110b32bd3068db3722c80202", 55 | "sha256:4373612d59c404baeb7cbd788a18b2b2a8331abcc84c3ba40051fcd18b17a4d5", 56 | "sha256:487d63e1454627c8e47dd230025780e91869cfba4c753a74fda196a1f6ad6548", 57 | "sha256:48916e459c54c4a70e52745639f1db524542140433599e13911b2f329834276a", 58 | "sha256:4922cd707b25e623b902c86188aca466d3620892db76c0bdd7b99a3d5e61d35f", 59 | "sha256:55af55e32ae468e9946f741a5d51f9896da6b9bf0bbdd326843fec05c730eb20", 60 | "sha256:57e555a9feb4a8460415f1aac331a2dc833b1115284f7ded7278b54afc5bd218", 61 | "sha256:5d4b68e216fc65e9fe4f524c177b54964af043dde734807586cf5435af84045c", 62 | "sha256:64fda793737bc4037521d4899be780534b9aea552eb673b9833b01f945904c2e", 63 | "sha256:6d6169cb3c6c2ad50db5b868db6491a790300ade1ed5d1da29289d73bbe40b56", 64 | "sha256:7bcac9a2b4fdbed2c16fa5681356d7121ecabf041f18d97ed5b8e0dd38a80224", 65 | "sha256:80b06212075346b5546b0417b9f2bf467fea3bfe7352f781ffc05a8ab24ba14a", 66 | "sha256:818014c754cd3dba7229c0f5884396264d51ffb87ec86e927ef0be140bfdb0d2", 67 | "sha256:8eb687582ed7cd8c4bdbff3df6c0da443eb89c3c72e6e5dcdd9c81729712791a", 68 | "sha256:99f27fefe34c37ba9875f224a8f36e31d744d8083e00f520f133cab79ad5e819", 69 | "sha256:9f3e33c28cd39d1b655ed1ba7247133b6f7fc16fa16887b120c0c670e35ce346", 70 | "sha256:a8661b2ce9694ca01c529bfa204dbb144b275a31685a075ce123f12331be790b", 71 | "sha256:a9da7010cec5a12193d1af9872a00888f396aba3dc79186604a09ea3ee7c029e", 72 | "sha256:aedb15f0a5a5949ecb129a82b72b19df97bbbca024081ed2ef88bd5c0a610534", 73 | "sha256:b315d709717a99f4b27b59b021e6207c64620790ca3e0bde636a6c7f14618abb", 74 | "sha256:ba6f2b3f452e150945d58f4badd92310449876c4c954836cfb1803bdd7b422f0", 75 | "sha256:c33d18eb6e6bc36f09d793c0dc58b0211fccc6ae5149b808da4a62660678b156", 76 | "sha256:c9a875ce9d7fe32887784274dd533c57909b7b1dcadcc128a2ac21331a9765dd", 77 | "sha256:c9e005e9bd57bc987764c32a1bee4364c44fdc11a3cc20a40b93b444984f2b87", 78 | "sha256:d2ad4d668a5c0645d281dcd17aff2be3212bc109b33814bbb15c4939f44181cc", 79 | "sha256:d950695ae4381ecd856bcaf2b1e866720e4ab9a1498cba61c602e56630ca7195", 80 | "sha256:e22dcb48709fc51a7b58a927391b23ab37eb3737a98ac4338e2448bef8559b33", 81 | "sha256:e8c6a99be100371dbb046880e7a282152aa5d6127ae01783e37662ef73850d8f", 82 | "sha256:e9dc245e3ac69c92ee4c167fbdd7428ec1956d4e754223124991ef29eb57a09d", 83 | "sha256:eb687a11f0a7a1839719edd80f41e459cc5366857ecbed383ff376c4e3cc6afd", 84 | "sha256:eb9e2a346c5238a30a746893f23a9535e700f8192a68c07c0258e7ece6ff3728", 85 | "sha256:ed38b924ce794e505647f7c331b22a693bee1538fdf46b0222c4717b42f744e7", 86 | "sha256:f0010c6f9d1a4011e429109fda55a225921e3206e7f62a0c22a35344bfd13cca", 87 | "sha256:f0c5d1acbfca6ebdd6b1e3eded8d261affb6ddcf2186205518f1428b8569bb99", 88 | "sha256:f10afb1004f102c7868ebfe91c28f4a712227fe4cb24974350ace1f90e1febbf", 89 | "sha256:f174135f5609428cc6e1b9090f9268f5c8935fddb1b25ccb8255a2d50de6789e", 90 | "sha256:f3ebe6e73c319340830a9b2825d32eb6d8475c1dac020b4f0aa774ee3b898d1c", 91 | "sha256:f627688813d0a4140153ff532537fbe4afea5a3dffce1f9deb7f91f848a832b5", 92 | "sha256:fd4305f86f53dfd8cd3522269ed7fc34856a8ee3709a5e28b2836b2db9d4cd69" 93 | ], 94 | "version": "==1.14.6" 95 | }, 96 | "charset-normalizer": { 97 | "hashes": [ 98 | "sha256:5d209c0a931f215cee683b6445e2d77677e7e75e159f78def0db09d68fafcaa6", 99 | "sha256:5ec46d183433dcbd0ab716f2d7f29d8dee50505b3fdb40c6b985c7c4f5a3591f" 100 | ], 101 | "markers": "python_version >= '3'", 102 | "version": "==2.0.6" 103 | }, 104 | "cryptography": { 105 | "hashes": [ 106 | "sha256:0a7dcbcd3f1913f664aca35d47c1331fce738d44ec34b7be8b9d332151b0b01e", 107 | "sha256:1eb7bb0df6f6f583dd8e054689def236255161ebbcf62b226454ab9ec663746b", 108 | "sha256:21ca464b3a4b8d8e86ba0ee5045e103a1fcfac3b39319727bc0fc58c09c6aff7", 109 | "sha256:34dae04a0dce5730d8eb7894eab617d8a70d0c97da76b905de9efb7128ad7085", 110 | "sha256:3520667fda779eb788ea00080124875be18f2d8f0848ec00733c0ec3bb8219fc", 111 | "sha256:3c4129fc3fdc0fa8e40861b5ac0c673315b3c902bbdc05fc176764815b43dd1d", 112 | "sha256:3fa3a7ccf96e826affdf1a0a9432be74dc73423125c8f96a909e3835a5ef194a", 113 | "sha256:5b0fbfae7ff7febdb74b574055c7466da334a5371f253732d7e2e7525d570498", 114 | "sha256:695104a9223a7239d155d7627ad912953b540929ef97ae0c34c7b8bf30857e89", 115 | "sha256:8695456444f277af73a4877db9fc979849cd3ee74c198d04fc0776ebc3db52b9", 116 | "sha256:94cc5ed4ceaefcbe5bf38c8fba6a21fc1d365bb8fb826ea1688e3370b2e24a1c", 117 | "sha256:94fff993ee9bc1b2440d3b7243d488c6a3d9724cc2b09cdb297f6a886d040ef7", 118 | "sha256:9965c46c674ba8cc572bc09a03f4c649292ee73e1b683adb1ce81e82e9a6a0fb", 119 | "sha256:a00cf305f07b26c351d8d4e1af84ad7501eca8a342dedf24a7acb0e7b7406e14", 120 | "sha256:a305600e7a6b7b855cd798e00278161b681ad6e9b7eca94c721d5f588ab212af", 121 | "sha256:cd65b60cfe004790c795cc35f272e41a3df4631e2fb6b35aa7ac6ef2859d554e", 122 | "sha256:d2a6e5ef66503da51d2110edf6c403dc6b494cc0082f85db12f54e9c5d4c3ec5", 123 | "sha256:d9ec0e67a14f9d1d48dd87a2531009a9b251c02ea42851c060b25c782516ff06", 124 | "sha256:f44d141b8c4ea5eb4dbc9b3ad992d45580c1d22bf5e24363f2fbf50c2d7ae8a7" 125 | ], 126 | "markers": "python_version >= '3.6'", 127 | "version": "==3.4.8" 128 | }, 129 | "distro": { 130 | "hashes": [ 131 | "sha256:83f5e5a09f9c5f68f60173de572930effbcc0287bb84fdc4426cb4168c088424", 132 | "sha256:c8713330ab31a034623a9515663ed87696700b55f04556b97c39cd261aa70dc7" 133 | ], 134 | "version": "==1.6.0" 135 | }, 136 | "google-auth": { 137 | "hashes": [ 138 | "sha256:7ae5eda089d393ca01658b550df24913cbbbdd34e9e6dedc1cea747485ae0c04", 139 | "sha256:bde03220ed56e4e147dec92339c90ce95159dce657e2cccd0ac1fe82f6a96284" 140 | ], 141 | "markers": "python_version >= '3.6'", 142 | "version": "==2.1.0" 143 | }, 144 | "idna": { 145 | "hashes": [ 146 | "sha256:14475042e284991034cb48e06f6851428fb14c4dc953acd9be9a5e95c7b6dd7a", 147 | "sha256:467fbad99067910785144ce333826c71fb0e63a425657295239737f7ecd125f3" 148 | ], 149 | "markers": "python_version >= '3'", 150 | "version": "==3.2" 151 | }, 152 | "jinja2": { 153 | "hashes": [ 154 | "sha256:1f06f2da51e7b56b8f238affdd6b4e2c61e39598a378cc49345bc1bd42a978a4", 155 | "sha256:703f484b47a6af502e743c9122595cc812b0271f661722403114f71a79d0f5a4" 156 | ], 157 | "markers": "python_version >= '3.6'", 158 | "version": "==3.0.1" 159 | }, 160 | "jmespath": { 161 | "hashes": [ 162 | "sha256:b85d0567b8666149a93172712e68920734333c0ce7e89b78b3e987f71e5ed4f9", 163 | "sha256:cdf6525904cc597730141d61b36f2e4b8ecc257c420fa2f4549bac2c2d0cb72f" 164 | ], 165 | "index": "pypi", 166 | "version": "==0.10.0" 167 | }, 168 | "kubernetes": { 169 | "hashes": [ 170 | "sha256:1a2472f8b01bc6aa87e3a34781f859bded5a5c8ff791a53d889a8bd6cc550430", 171 | "sha256:4af81201520977139a143f96123fb789fa351879df37f122916b9b6ed050bbaf" 172 | ], 173 | "version": "==11.0.0" 174 | }, 175 | "markupsafe": { 176 | "hashes": [ 177 | "sha256:01a9b8ea66f1658938f65b93a85ebe8bc016e6769611be228d797c9d998dd298", 178 | "sha256:023cb26ec21ece8dc3907c0e8320058b2e0cb3c55cf9564da612bc325bed5e64", 179 | "sha256:0446679737af14f45767963a1a9ef7620189912317d095f2d9ffa183a4d25d2b", 180 | "sha256:0717a7390a68be14b8c793ba258e075c6f4ca819f15edfc2a3a027c823718567", 181 | "sha256:0955295dd5eec6cb6cc2fe1698f4c6d84af2e92de33fbcac4111913cd100a6ff", 182 | "sha256:0d4b31cc67ab36e3392bbf3862cfbadac3db12bdd8b02a2731f509ed5b829724", 183 | "sha256:10f82115e21dc0dfec9ab5c0223652f7197feb168c940f3ef61563fc2d6beb74", 184 | "sha256:168cd0a3642de83558a5153c8bd34f175a9a6e7f6dc6384b9655d2697312a646", 185 | "sha256:1d609f577dc6e1aa17d746f8bd3c31aa4d258f4070d61b2aa5c4166c1539de35", 186 | "sha256:1f2ade76b9903f39aa442b4aadd2177decb66525062db244b35d71d0ee8599b6", 187 | "sha256:2a7d351cbd8cfeb19ca00de495e224dea7e7d919659c2841bbb7f420ad03e2d6", 188 | "sha256:2d7d807855b419fc2ed3e631034685db6079889a1f01d5d9dac950f764da3dad", 189 | "sha256:2ef54abee730b502252bcdf31b10dacb0a416229b72c18b19e24a4509f273d26", 190 | "sha256:36bc903cbb393720fad60fc28c10de6acf10dc6cc883f3e24ee4012371399a38", 191 | "sha256:37205cac2a79194e3750b0af2a5720d95f786a55ce7df90c3af697bfa100eaac", 192 | "sha256:3c112550557578c26af18a1ccc9e090bfe03832ae994343cfdacd287db6a6ae7", 193 | "sha256:3dd007d54ee88b46be476e293f48c85048603f5f516008bee124ddd891398ed6", 194 | "sha256:47ab1e7b91c098ab893b828deafa1203de86d0bc6ab587b160f78fe6c4011f75", 195 | "sha256:49e3ceeabbfb9d66c3aef5af3a60cc43b85c33df25ce03d0031a608b0a8b2e3f", 196 | "sha256:4efca8f86c54b22348a5467704e3fec767b2db12fc39c6d963168ab1d3fc9135", 197 | "sha256:53edb4da6925ad13c07b6d26c2a852bd81e364f95301c66e930ab2aef5b5ddd8", 198 | "sha256:5855f8438a7d1d458206a2466bf82b0f104a3724bf96a1c781ab731e4201731a", 199 | "sha256:594c67807fb16238b30c44bdf74f36c02cdf22d1c8cda91ef8a0ed8dabf5620a", 200 | "sha256:5bb28c636d87e840583ee3adeb78172efc47c8b26127267f54a9c0ec251d41a9", 201 | "sha256:60bf42e36abfaf9aff1f50f52644b336d4f0a3fd6d8a60ca0d054ac9f713a864", 202 | "sha256:611d1ad9a4288cf3e3c16014564df047fe08410e628f89805e475368bd304914", 203 | "sha256:6557b31b5e2c9ddf0de32a691f2312a32f77cd7681d8af66c2692efdbef84c18", 204 | "sha256:693ce3f9e70a6cf7d2fb9e6c9d8b204b6b39897a2c4a1aa65728d5ac97dcc1d8", 205 | "sha256:6a7fae0dd14cf60ad5ff42baa2e95727c3d81ded453457771d02b7d2b3f9c0c2", 206 | "sha256:6c4ca60fa24e85fe25b912b01e62cb969d69a23a5d5867682dd3e80b5b02581d", 207 | "sha256:6fcf051089389abe060c9cd7caa212c707e58153afa2c649f00346ce6d260f1b", 208 | "sha256:7d91275b0245b1da4d4cfa07e0faedd5b0812efc15b702576d103293e252af1b", 209 | "sha256:905fec760bd2fa1388bb5b489ee8ee5f7291d692638ea5f67982d968366bef9f", 210 | "sha256:97383d78eb34da7e1fa37dd273c20ad4320929af65d156e35a5e2d89566d9dfb", 211 | "sha256:984d76483eb32f1bcb536dc27e4ad56bba4baa70be32fa87152832cdd9db0833", 212 | "sha256:99df47edb6bda1249d3e80fdabb1dab8c08ef3975f69aed437cb69d0a5de1e28", 213 | "sha256:a30e67a65b53ea0a5e62fe23682cfe22712e01f453b95233b25502f7c61cb415", 214 | "sha256:ab3ef638ace319fa26553db0624c4699e31a28bb2a835c5faca8f8acf6a5a902", 215 | "sha256:add36cb2dbb8b736611303cd3bfcee00afd96471b09cda130da3581cbdc56a6d", 216 | "sha256:b2f4bf27480f5e5e8ce285a8c8fd176c0b03e93dcc6646477d4630e83440c6a9", 217 | "sha256:b7f2d075102dc8c794cbde1947378051c4e5180d52d276987b8d28a3bd58c17d", 218 | "sha256:baa1a4e8f868845af802979fcdbf0bb11f94f1cb7ced4c4b8a351bb60d108145", 219 | "sha256:be98f628055368795d818ebf93da628541e10b75b41c559fdf36d104c5787066", 220 | "sha256:bf5d821ffabf0ef3533c39c518f3357b171a1651c1ff6827325e4489b0e46c3c", 221 | "sha256:c47adbc92fc1bb2b3274c4b3a43ae0e4573d9fbff4f54cd484555edbf030baf1", 222 | "sha256:d7f9850398e85aba693bb640262d3611788b1f29a79f0c93c565694658f4071f", 223 | "sha256:d8446c54dc28c01e5a2dbac5a25f071f6653e6e40f3a8818e8b45d790fe6ef53", 224 | "sha256:e0f138900af21926a02425cf736db95be9f4af72ba1bb21453432a07f6082134", 225 | "sha256:e9936f0b261d4df76ad22f8fee3ae83b60d7c3e871292cd42f40b81b70afae85", 226 | "sha256:f5653a225f31e113b152e56f154ccbe59eeb1c7487b39b9d9f9cdb58e6c79dc5", 227 | "sha256:f826e31d18b516f653fe296d967d700fddad5901ae07c622bb3705955e1faa94", 228 | "sha256:f8ba0e8349a38d3001fae7eadded3f6606f0da5d748ee53cc1dab1d6527b9509", 229 | "sha256:f9081981fe268bd86831e5c75f7de206ef275defcb82bc70740ae6dc507aee51", 230 | "sha256:fa130dd50c57d53368c9d59395cb5526eda596d3ffe36666cd81a44d56e48872" 231 | ], 232 | "markers": "python_version >= '3.6'", 233 | "version": "==2.0.1" 234 | }, 235 | "oauthlib": { 236 | "hashes": [ 237 | "sha256:42bf6354c2ed8c6acb54d971fce6f88193d97297e18602a3a886603f9d7730cc", 238 | "sha256:8f0215fcc533dd8dd1bee6f4c412d4f0cd7297307d43ac61666389e3bc3198a3" 239 | ], 240 | "markers": "python_version >= '3.6'", 241 | "version": "==3.1.1" 242 | }, 243 | "openshift": { 244 | "hashes": [ 245 | "sha256:110b0d3c84a83500f0fd150ab26dee29615157e6659bf72808788aa79fc17afc" 246 | ], 247 | "index": "pypi", 248 | "version": "==0.11.2" 249 | }, 250 | "packaging": { 251 | "hashes": [ 252 | "sha256:7dc96269f53a4ccec5c0670940a4281106dd0bb343f47b7471f779df49c2fbe7", 253 | "sha256:c86254f9220d55e31cc94d69bade760f0847da8000def4dfe1c6b872fd14ff14" 254 | ], 255 | "markers": "python_version >= '3.6'", 256 | "version": "==21.0" 257 | }, 258 | "pyasn1": { 259 | "hashes": [ 260 | "sha256:014c0e9976956a08139dc0712ae195324a75e142284d5f87f1a87ee1b068a359", 261 | "sha256:03840c999ba71680a131cfaee6fab142e1ed9bbd9c693e285cc6aca0d555e576", 262 | "sha256:0458773cfe65b153891ac249bcf1b5f8f320b7c2ce462151f8fa74de8934becf", 263 | "sha256:08c3c53b75eaa48d71cf8c710312316392ed40899cb34710d092e96745a358b7", 264 | "sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d", 265 | "sha256:5c9414dcfede6e441f7e8f81b43b34e834731003427e5b09e4e00e3172a10f00", 266 | "sha256:6e7545f1a61025a4e58bb336952c5061697da694db1cae97b116e9c46abcf7c8", 267 | "sha256:78fa6da68ed2727915c4767bb386ab32cdba863caa7dbe473eaae45f9959da86", 268 | "sha256:7ab8a544af125fb704feadb008c99a88805126fb525280b2270bb25cc1d78a12", 269 | "sha256:99fcc3c8d804d1bc6d9a099921e39d827026409a58f2a720dcdb89374ea0c776", 270 | "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba", 271 | "sha256:e89bf84b5437b532b0803ba5c9a5e054d21fec423a89952a74f87fa2c9b7bce2", 272 | "sha256:fec3e9d8e36808a28efb59b489e4528c10ad0f480e57dcc32b4de5c9d8c9fdf3" 273 | ], 274 | "version": "==0.4.8" 275 | }, 276 | "pyasn1-modules": { 277 | "hashes": [ 278 | "sha256:0845a5582f6a02bb3e1bde9ecfc4bfcae6ec3210dd270522fee602365430c3f8", 279 | "sha256:0fe1b68d1e486a1ed5473f1302bd991c1611d319bba158e98b106ff86e1d7199", 280 | "sha256:15b7c67fabc7fc240d87fb9aabf999cf82311a6d6fb2c70d00d3d0604878c811", 281 | "sha256:426edb7a5e8879f1ec54a1864f16b882c2837bfd06eee62f2c982315ee2473ed", 282 | "sha256:65cebbaffc913f4fe9e4808735c95ea22d7a7775646ab690518c056784bc21b4", 283 | "sha256:905f84c712230b2c592c19470d3ca8d552de726050d1d1716282a1f6146be65e", 284 | "sha256:a50b808ffeb97cb3601dd25981f6b016cbb3d31fbf57a8b8a87428e6158d0c74", 285 | "sha256:a99324196732f53093a84c4369c996713eb8c89d360a496b599fb1a9c47fc3eb", 286 | "sha256:b80486a6c77252ea3a3e9b1e360bc9cf28eaac41263d173c032581ad2f20fe45", 287 | "sha256:c29a5e5cc7a3f05926aff34e097e84f8589cd790ce0ed41b67aed6857b26aafd", 288 | "sha256:cbac4bc38d117f2a49aeedec4407d23e8866ea4ac27ff2cf7fb3e5b570df19e0", 289 | "sha256:f39edd8c4ecaa4556e989147ebf219227e2cd2e8a43c7e7fcb1f1c18c5fd6a3d", 290 | "sha256:fe0644d9ab041506b62782e92b06b8c68cca799e1a9636ec398675459e031405" 291 | ], 292 | "version": "==0.2.8" 293 | }, 294 | "pycparser": { 295 | "hashes": [ 296 | "sha256:2d475327684562c3a96cc71adf7dc8c4f0565175cf86b6d7a404ff4c771f15f0", 297 | "sha256:7582ad22678f0fcd81102833f60ef8d0e57288b6b5fb00323d101be910e35705" 298 | ], 299 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", 300 | "version": "==2.20" 301 | }, 302 | "pyparsing": { 303 | "hashes": [ 304 | "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1", 305 | "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b" 306 | ], 307 | "markers": "python_version >= '2.6' and python_version not in '3.0, 3.1, 3.2, 3.3'", 308 | "version": "==2.4.7" 309 | }, 310 | "python-dateutil": { 311 | "hashes": [ 312 | "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86", 313 | "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9" 314 | ], 315 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", 316 | "version": "==2.8.2" 317 | }, 318 | "python-string-utils": { 319 | "hashes": [ 320 | "sha256:dcf9060b03f07647c0a603408dc8b03f807f3b54a05c6e19eb14460256fac0cb", 321 | "sha256:f1a88700baf99db1a9b6953f44181ad9ca56623c81e257e6009707e2e7851fa4" 322 | ], 323 | "markers": "python_version >= '3.5'", 324 | "version": "==1.0.0" 325 | }, 326 | "pyyaml": { 327 | "hashes": [ 328 | "sha256:08682f6b72c722394747bddaf0aa62277e02557c0fd1c42cb853016a38f8dedf", 329 | "sha256:0f5f5786c0e09baddcd8b4b45f20a7b5d61a7e7e99846e3c799b05c7c53fa696", 330 | "sha256:129def1b7c1bf22faffd67b8f3724645203b79d8f4cc81f674654d9902cb4393", 331 | "sha256:294db365efa064d00b8d1ef65d8ea2c3426ac366c0c4368d930bf1c5fb497f77", 332 | "sha256:3b2b1824fe7112845700f815ff6a489360226a5609b96ec2190a45e62a9fc922", 333 | "sha256:3bd0e463264cf257d1ffd2e40223b197271046d09dadf73a0fe82b9c1fc385a5", 334 | "sha256:4465124ef1b18d9ace298060f4eccc64b0850899ac4ac53294547536533800c8", 335 | "sha256:49d4cdd9065b9b6e206d0595fee27a96b5dd22618e7520c33204a4a3239d5b10", 336 | "sha256:4e0583d24c881e14342eaf4ec5fbc97f934b999a6828693a99157fde912540cc", 337 | "sha256:5accb17103e43963b80e6f837831f38d314a0495500067cb25afab2e8d7a4018", 338 | "sha256:607774cbba28732bfa802b54baa7484215f530991055bb562efbed5b2f20a45e", 339 | "sha256:6c78645d400265a062508ae399b60b8c167bf003db364ecb26dcab2bda048253", 340 | "sha256:72a01f726a9c7851ca9bfad6fd09ca4e090a023c00945ea05ba1638c09dc3347", 341 | "sha256:74c1485f7707cf707a7aef42ef6322b8f97921bd89be2ab6317fd782c2d53183", 342 | "sha256:895f61ef02e8fed38159bb70f7e100e00f471eae2bc838cd0f4ebb21e28f8541", 343 | "sha256:8c1be557ee92a20f184922c7b6424e8ab6691788e6d86137c5d93c1a6ec1b8fb", 344 | "sha256:bb4191dfc9306777bc594117aee052446b3fa88737cd13b7188d0e7aa8162185", 345 | "sha256:bfb51918d4ff3d77c1c856a9699f8492c612cde32fd3bcd344af9be34999bfdc", 346 | "sha256:c20cfa2d49991c8b4147af39859b167664f2ad4561704ee74c1de03318e898db", 347 | "sha256:cb333c16912324fd5f769fff6bc5de372e9e7a202247b48870bc251ed40239aa", 348 | "sha256:d2d9808ea7b4af864f35ea216be506ecec180628aced0704e34aca0b040ffe46", 349 | "sha256:d483ad4e639292c90170eb6f7783ad19490e7a8defb3e46f97dfe4bacae89122", 350 | "sha256:dd5de0646207f053eb0d6c74ae45ba98c3395a571a2891858e87df7c9b9bd51b", 351 | "sha256:e1d4970ea66be07ae37a3c2e48b5ec63f7ba6804bdddfdbd3cfd954d25a82e63", 352 | "sha256:e4fac90784481d221a8e4b1162afa7c47ed953be40d31ab4629ae917510051df", 353 | "sha256:fa5ae20527d8e831e8230cbffd9f8fe952815b2b7dae6ffec25318803a7528fc", 354 | "sha256:fd7f6999a8070df521b6384004ef42833b9bd62cfee11a09bda1079b4b704247", 355 | "sha256:fdc842473cd33f45ff6bce46aea678a54e3d21f1b61a7750ce3c498eedfe25d6", 356 | "sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0" 357 | ], 358 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'", 359 | "version": "==5.4.1" 360 | }, 361 | "requests": { 362 | "hashes": [ 363 | "sha256:6c1246513ecd5ecd4528a0906f910e8f0f9c6b8ec72030dc9fd154dc1a6efd24", 364 | "sha256:b8aa58f8cf793ffd8782d3d8cb19e66ef36f7aba4353eec859e74678b01b07a7" 365 | ], 366 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'", 367 | "version": "==2.26.0" 368 | }, 369 | "requests-oauthlib": { 370 | "hashes": [ 371 | "sha256:7f71572defaecd16372f9006f33c2ec8c077c3cfa6f5911a9a90202beb513f3d", 372 | "sha256:b4261601a71fd721a8bd6d7aa1cc1d6a8a93b4a9f5e96626f8e4d91e8beeaa6a", 373 | "sha256:fa6c47b933f01060936d87ae9327fead68768b69c6c9ea2109c48be30f2d4dbc" 374 | ], 375 | "version": "==1.3.0" 376 | }, 377 | "resolvelib": { 378 | "hashes": [ 379 | "sha256:8113ae3ed6d33c6be0bcbf03ffeb06c0995c099b7b8aaa5ddf2e9b3b3df4e915", 380 | "sha256:9b9b80d5c60e4c2a8b7fbf0712c3449dc01d74e215632e5199850c9eca687628" 381 | ], 382 | "version": "==0.5.4" 383 | }, 384 | "rsa": { 385 | "hashes": [ 386 | "sha256:78f9a9bf4e7be0c5ded4583326e7461e3a3c5aae24073648b4bdfa797d78c9d2", 387 | "sha256:9d689e6ca1b3038bc82bf8d23e944b6b6037bc02301a574935b2dd946e0353b9" 388 | ], 389 | "markers": "python_version >= '3.5' and python_version < '4'", 390 | "version": "==4.7.2" 391 | }, 392 | "ruamel.yaml": { 393 | "hashes": [ 394 | "sha256:1a771fc92d3823682b7f0893ad56cb5a5c87c48e62b5399d6f42c8759a583b33", 395 | "sha256:ea21da1198c4b41b8e7a259301cc9710d3b972bf8ba52f06218478e6802dd1f1" 396 | ], 397 | "markers": "python_version >= '3'", 398 | "version": "==0.17.16" 399 | }, 400 | "ruamel.yaml.clib": { 401 | "hashes": [ 402 | "sha256:0847201b767447fc33b9c235780d3aa90357d20dd6108b92be544427bea197dd", 403 | "sha256:1866cf2c284a03b9524a5cc00daca56d80057c5ce3cdc86a52020f4c720856f0", 404 | "sha256:31ea73e564a7b5fbbe8188ab8b334393e06d997914a4e184975348f204790277", 405 | "sha256:3fb9575a5acd13031c57a62cc7823e5d2ff8bc3835ba4d94b921b4e6ee664104", 406 | "sha256:4ff604ce439abb20794f05613c374759ce10e3595d1867764dd1ae675b85acbd", 407 | "sha256:72a2b8b2ff0a627496aad76f37a652bcef400fd861721744201ef1b45199ab78", 408 | "sha256:78988ed190206672da0f5d50c61afef8f67daa718d614377dcd5e3ed85ab4a99", 409 | "sha256:7b2927e92feb51d830f531de4ccb11b320255ee95e791022555971c466af4527", 410 | "sha256:7f7ecb53ae6848f959db6ae93bdff1740e651809780822270eab111500842a84", 411 | "sha256:825d5fccef6da42f3c8eccd4281af399f21c02b32d98e113dbc631ea6a6ecbc7", 412 | "sha256:846fc8336443106fe23f9b6d6b8c14a53d38cef9a375149d61f99d78782ea468", 413 | "sha256:89221ec6d6026f8ae859c09b9718799fea22c0e8da8b766b0b2c9a9ba2db326b", 414 | "sha256:9efef4aab5353387b07f6b22ace0867032b900d8e91674b5d8ea9150db5cae94", 415 | "sha256:a32f8d81ea0c6173ab1b3da956869114cae53ba1e9f72374032e33ba3118c233", 416 | "sha256:a49e0161897901d1ac9c4a79984b8410f450565bbad64dbfcbf76152743a0cdb", 417 | "sha256:ada3f400d9923a190ea8b59c8f60680c4ef8a4b0dfae134d2f2ff68429adfab5", 418 | "sha256:bf75d28fa071645c529b5474a550a44686821decebdd00e21127ef1fd566eabe", 419 | "sha256:cfdb9389d888c5b74af297e51ce357b800dd844898af9d4a547ffc143fa56751", 420 | "sha256:d67f273097c368265a7b81e152e07fb90ed395df6e552b9fa858c6d2c9f42502", 421 | "sha256:dc6a613d6c74eef5a14a214d433d06291526145431c3b964f5e16529b1842bed", 422 | "sha256:de9c6b8a1ba52919ae919f3ae96abb72b994dd0350226e28f3686cb4f142165c" 423 | ], 424 | "markers": "python_version < '3.10' and platform_python_implementation == 'CPython'", 425 | "version": "==0.2.6" 426 | }, 427 | "selinux": { 428 | "hashes": [ 429 | "sha256:820adcf1b4451c9cc7759848797703263ba0eb6a4cad76d73548a9e0d57b7926", 430 | "sha256:d435f514e834e3fdc0941f6a29d086b80b2ea51b28112aee6254bd104ee42a74" 431 | ], 432 | "index": "pypi", 433 | "version": "==0.2.1" 434 | }, 435 | "six": { 436 | "hashes": [ 437 | "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926", 438 | "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254" 439 | ], 440 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", 441 | "version": "==1.16.0" 442 | }, 443 | "urllib3": { 444 | "hashes": [ 445 | "sha256:4987c65554f7a2dbf30c18fd48778ef124af6fab771a377103da0585e2336ece", 446 | "sha256:c4fdf4019605b6e5423637e01bc9fe4daef873709a7973e195ceba0a62bbc844" 447 | ], 448 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4' and python_version < '4'", 449 | "version": "==1.26.7" 450 | }, 451 | "websocket-client": { 452 | "hashes": [ 453 | "sha256:0133d2f784858e59959ce82ddac316634229da55b498aac311f1620567a710ec", 454 | "sha256:8dfb715d8a992f5712fff8c843adae94e22b22a99b2c5e6b0ec4a1a981cc4e0d" 455 | ], 456 | "markers": "python_version >= '3.6'", 457 | "version": "==1.2.1" 458 | } 459 | }, 460 | "develop": { 461 | "astroid": { 462 | "hashes": [ 463 | "sha256:87de48a92e29cedf7210ffa853d11441e7ad94cb47bacd91b023499b51cbc756", 464 | "sha256:d25869fc7f44f1d9fb7d24fd7ea0639656f5355fc3089cd1f3d18c6ec6b124c7" 465 | ], 466 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", 467 | "version": "==1.6.6" 468 | }, 469 | "isort": { 470 | "hashes": [ 471 | "sha256:9c2ea1e62d871267b78307fe511c0838ba0da28698c5732d54e2790bf3ba9899", 472 | "sha256:e17d6e2b81095c9db0a03a8025a957f334d6ea30b26f9ec70805411e5c7c81f2" 473 | ], 474 | "markers": "python_version < '4.0' and python_full_version >= '3.6.1'", 475 | "version": "==5.9.3" 476 | }, 477 | "lazy-object-proxy": { 478 | "hashes": [ 479 | "sha256:17e0967ba374fc24141738c69736da90e94419338fd4c7c7bef01ee26b339653", 480 | "sha256:1fee665d2638491f4d6e55bd483e15ef21f6c8c2095f235fef72601021e64f61", 481 | "sha256:22ddd618cefe54305df49e4c069fa65715be4ad0e78e8d252a33debf00f6ede2", 482 | "sha256:24a5045889cc2729033b3e604d496c2b6f588c754f7a62027ad4437a7ecc4837", 483 | "sha256:410283732af311b51b837894fa2f24f2c0039aa7f220135192b38fcc42bd43d3", 484 | "sha256:4732c765372bd78a2d6b2150a6e99d00a78ec963375f236979c0626b97ed8e43", 485 | "sha256:489000d368377571c6f982fba6497f2aa13c6d1facc40660963da62f5c379726", 486 | "sha256:4f60460e9f1eb632584c9685bccea152f4ac2130e299784dbaf9fae9f49891b3", 487 | "sha256:5743a5ab42ae40caa8421b320ebf3a998f89c85cdc8376d6b2e00bd12bd1b587", 488 | "sha256:85fb7608121fd5621cc4377a8961d0b32ccf84a7285b4f1d21988b2eae2868e8", 489 | "sha256:9698110e36e2df951c7c36b6729e96429c9c32b3331989ef19976592c5f3c77a", 490 | "sha256:9d397bf41caad3f489e10774667310d73cb9c4258e9aed94b9ec734b34b495fd", 491 | "sha256:b579f8acbf2bdd9ea200b1d5dea36abd93cabf56cf626ab9c744a432e15c815f", 492 | "sha256:b865b01a2e7f96db0c5d12cfea590f98d8c5ba64ad222300d93ce6ff9138bcad", 493 | "sha256:bf34e368e8dd976423396555078def5cfc3039ebc6fc06d1ae2c5a65eebbcde4", 494 | "sha256:c6938967f8528b3668622a9ed3b31d145fab161a32f5891ea7b84f6b790be05b", 495 | "sha256:d1c2676e3d840852a2de7c7d5d76407c772927addff8d742b9808fe0afccebdf", 496 | "sha256:d7124f52f3bd259f510651450e18e0fd081ed82f3c08541dffc7b94b883aa981", 497 | "sha256:d900d949b707778696fdf01036f58c9876a0d8bfe116e8d220cfd4b15f14e741", 498 | "sha256:ebfd274dcd5133e0afae738e6d9da4323c3eb021b3e13052d8cbd0e457b1256e", 499 | "sha256:ed361bb83436f117f9917d282a456f9e5009ea12fd6de8742d1a4752c3017e93", 500 | "sha256:f5144c75445ae3ca2057faac03fda5a902eff196702b0a24daf1d6ce0650514b" 501 | ], 502 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'", 503 | "version": "==1.6.0" 504 | }, 505 | "mccabe": { 506 | "hashes": [ 507 | "sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42", 508 | "sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f" 509 | ], 510 | "version": "==0.6.1" 511 | }, 512 | "pylint": { 513 | "hashes": [ 514 | "sha256:367e3d49813d349a905390ac27989eff82ab84958731c5ef0bef867452cfdc42", 515 | "sha256:97a42df23d436c70132971d1dcb9efad2fe5c0c6add55b90161e773caf729300" 516 | ], 517 | "index": "pypi", 518 | "version": "==1.9.5" 519 | }, 520 | "six": { 521 | "hashes": [ 522 | "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926", 523 | "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254" 524 | ], 525 | "markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'", 526 | "version": "==1.16.0" 527 | }, 528 | "wrapt": { 529 | "hashes": [ 530 | "sha256:b62ffa81fb85f4332a4f609cab4ac40709470da05643a082ec1eb88e6d9b97d7" 531 | ], 532 | "version": "==1.12.1" 533 | } 534 | } 535 | } 536 | --------------------------------------------------------------------------------