├── .gitignore ├── LICENSE ├── README.md ├── ansible.cfg ├── inventory └── group_vars │ └── all.yml ├── playbooks ├── addons │ ├── cicd.yml │ ├── cockroachdb.yml │ ├── helm.yml │ ├── istio.yml │ ├── kubelego.yml │ └── prometheus.yml ├── cluster │ ├── build.yml │ ├── cni.yml │ ├── coredns.yml │ ├── dashboard.yml │ ├── etcd.yml │ ├── external.yml │ ├── ingress.yml │ ├── kubedns.yml │ ├── master.yml │ ├── node.yml │ ├── policy.yml │ ├── registry.yml │ ├── stats.yml │ └── user.yml ├── network │ ├── lb.yml │ └── setup.yml ├── prepare-gce.yml ├── prepare-vps.yml ├── setup-addons.yml ├── setup-base.yml ├── setup-build.yml ├── setup-cicd.yml ├── setup-gateway.yml ├── ssl │ ├── build.yml │ ├── master.yml │ └── setup.yml ├── storage │ ├── cloud.yml │ └── volumes.yml ├── system │ ├── firewall.yml │ ├── gateway-lb.yml │ ├── logs-rotation.yaml │ ├── master-lb.yml │ ├── node-lb.yml │ ├── permissions.yml │ └── update.yml ├── toolchain │ ├── build.yml │ └── setup.yml └── vm │ ├── setup.yml │ └── templates │ └── inventory.j2 └── roles ├── build ├── README.md ├── defaults │ └── main.yml ├── handlers │ └── main.yml ├── meta │ └── main.yml ├── tasks │ └── main.yml ├── templates │ └── docker-config.json.j2 ├── tests │ ├── inventory │ └── test.yml └── vars │ └── main.yml ├── cicd ├── README.md ├── defaults │ └── main.yml ├── handlers │ └── main.yml ├── meta │ └── main.yml ├── tasks │ └── main.yml ├── templates │ ├── deploy-k8s-community-secrets.sh.j2 │ ├── deploy-k8s-community-user-db.sh │ └── k8s-community-secrets.yaml.j2 ├── tests │ ├── inventory │ └── test.yml └── vars │ └── main.yml ├── cni ├── README.md ├── defaults │ └── main.yml ├── handlers │ └── main.yml ├── meta │ └── main.yml ├── tasks │ └── main.yml ├── templates │ ├── canal.yaml │ ├── deploy-cni.sh │ └── romana.yaml ├── tests │ ├── inventory │ └── test.yml └── vars │ └── main.yml ├── cockroachdb ├── README.md ├── defaults │ └── main.yml ├── handlers │ └── main.yml ├── meta │ └── main.yml ├── tasks │ └── main.yml ├── templates │ ├── cockroachdb-backup.yaml │ ├── cockroachdb-client.yaml │ ├── cockroachdb-init.yaml │ ├── cockroachdb-volumes.yaml │ ├── cockroachdb.yaml │ └── deploy-cockroachdb.sh ├── tests │ ├── inventory │ └── test.yml └── vars │ └── main.yml ├── coredns ├── README.md ├── defaults │ └── main.yml ├── handlers │ └── main.yml ├── meta │ └── main.yml ├── tasks │ └── main.yml ├── templates │ ├── coredns.yaml │ └── deploy-coredns.sh ├── tests │ ├── inventory │ └── test.yml └── vars │ └── main.yml ├── dashboard ├── README.md ├── defaults │ └── main.yml ├── handlers │ └── main.yml ├── meta │ └── main.yml ├── tasks │ └── main.yml ├── templates │ ├── dashboard.yaml │ └── deploy-dashboard.sh ├── tests │ ├── inventory │ └── test.yml └── vars │ └── main.yml ├── etcd ├── README.md ├── defaults │ └── main.yml ├── handlers │ └── main.yml ├── meta │ └── main.yml ├── tasks │ └── main.yml ├── templates │ ├── etcd.conf.j2 │ └── profile.sh.j2 ├── tests │ ├── inventory │ └── test.yml └── vars │ └── main.yml ├── external ├── README.md ├── defaults │ └── main.yml ├── handlers │ └── main.yml ├── meta │ └── main.yml ├── tasks │ └── main.yml ├── templates │ ├── deploy-external-services.sh │ └── external-services.yaml ├── tests │ ├── inventory │ └── test.yml └── vars │ └── main.yml ├── haproxy ├── README.md ├── defaults │ └── main.yml ├── handlers │ └── main.yml ├── meta │ └── main.yml ├── tasks │ └── main.yml ├── templates │ └── haproxy.cfg.j2 ├── tests │ ├── inventory │ └── test.yml └── vars │ └── main.yml ├── helm ├── README.md ├── defaults │ └── main.yml ├── handlers │ └── main.yml ├── meta │ └── main.yml ├── tasks │ └── main.yml ├── templates │ ├── deploy-tiller.sh │ └── helm-tiller.yaml ├── tests │ ├── inventory │ └── test.yml └── vars │ └── main.yml ├── ingress ├── README.md ├── defaults │ └── main.yml ├── handlers │ └── main.yml ├── meta │ └── main.yml ├── tasks │ └── main.yml ├── templates │ ├── deploy-ingress.sh │ ├── echoheaders.yaml │ ├── gce-ingress-controller.yaml │ ├── haproxy-ingress-controller.yaml │ └── nginx-ingress-controller.yaml ├── tests │ ├── inventory │ └── test.yml └── vars │ └── main.yml ├── instance ├── README.md ├── defaults │ └── main.yml ├── handlers │ └── main.yml ├── meta │ └── main.yml ├── tasks │ └── main.yml ├── tests │ ├── inventory │ └── test.yml └── vars │ └── main.yml ├── istio ├── README.md ├── defaults │ └── main.yml ├── handlers │ └── main.yml ├── meta │ └── main.yml ├── tasks │ └── main.yml ├── templates │ ├── accounts.yaml │ ├── config.yaml │ ├── deploy-istio.sh │ ├── grafana.yaml │ ├── ingress-certs-secret.yaml │ ├── initializer.yaml │ ├── istio.yaml │ ├── prometheus.yaml │ ├── servicegraph.yaml │ └── zipkin.yaml ├── tests │ ├── inventory │ └── test.yml └── vars │ └── main.yml ├── journald ├── README.md ├── defaults │ └── main.yml ├── handlers │ └── main.yml ├── meta │ └── main.yml ├── tasks │ └── main.yml ├── tests │ ├── inventory │ └── test.yml └── vars │ └── main.yml ├── kubedns ├── README.md ├── defaults │ └── main.yml ├── handlers │ └── main.yml ├── meta │ └── main.yml ├── tasks │ └── main.yml ├── templates │ ├── deploy-kubedns.sh │ ├── kubedns-autoscaler.yaml │ └── kubedns.yaml ├── tests │ ├── inventory │ └── test.yml └── vars │ └── main.yml ├── kubelego ├── README.md ├── defaults │ └── main.yml ├── handlers │ └── main.yml ├── meta │ └── main.yml ├── tasks │ └── main.yml ├── templates │ ├── alert-manager.yaml │ ├── deploy-kubelego.sh │ ├── kube-lego.yaml │ ├── kube-stats.yaml │ └── node-exporter.yaml ├── tests │ ├── inventory │ └── test.yml └── vars │ └── main.yml ├── kubernetes ├── README.md ├── defaults │ └── main.yml ├── handlers │ └── main.yml ├── meta │ └── main.yml ├── tasks │ └── main.yml ├── templates │ ├── config │ ├── etcd-ep-profile.sh │ ├── kubelet │ ├── kubelet.service │ ├── kubernetes-accounting.conf │ ├── manifests │ │ ├── kube-apiserver.yaml │ │ ├── kube-controller-manager.yaml │ │ ├── kube-proxy.yaml │ │ └── kube-scheduler.yaml │ └── policy │ │ ├── basic-auth.csv │ │ ├── known-tokens.csv │ │ └── kubeconfig.client ├── tests │ ├── inventory │ └── test.yml └── vars │ └── main.yml ├── lb ├── README.md ├── defaults │ └── main.yml ├── handlers │ └── main.yml ├── meta │ └── main.yml ├── tasks │ └── main.yml ├── tests │ ├── inventory │ └── test.yml └── vars │ └── main.yml ├── logrotate ├── README.md ├── defaults │ └── main.yml ├── handlers │ └── main.yml ├── meta │ └── main.yml ├── tasks │ └── main.yml ├── tests │ ├── inventory │ └── test.yml └── vars │ └── main.yml ├── network ├── README.md ├── defaults │ └── main.yml ├── handlers │ └── main.yml ├── meta │ └── main.yml ├── tasks │ └── main.yml ├── tests │ ├── inventory │ └── test.yml └── vars │ └── main.yml ├── policy ├── README.md ├── defaults │ └── main.yml ├── handlers │ └── main.yml ├── meta │ └── main.yml ├── tasks │ └── main.yml ├── templates │ ├── admin-clusterrolebinding.yaml │ ├── ceph-secret.yaml │ ├── cluster-reader-clusterrole.yaml │ ├── cluster-reader-clusterrolebinding.yaml │ ├── deploy-policy.sh │ ├── namespaces.yaml │ ├── release-role.yaml │ ├── release-rolebinding.yaml │ └── tls-secret.yaml ├── tests │ ├── inventory │ └── test.yml └── vars │ └── main.yml ├── prometheus ├── README.md ├── defaults │ └── main.yml ├── handlers │ └── main.yml ├── meta │ └── main.yml ├── tasks │ └── main.yml ├── templates │ ├── alert-manager.yaml │ ├── alerts │ │ ├── all.yml │ │ ├── app.yml │ │ ├── cockroachdb.yml │ │ ├── common.yml │ │ ├── deployment.yml │ │ ├── ingress.yml │ │ ├── kubernetes.yml │ │ ├── node.yml │ │ ├── pods.yml │ │ ├── prometheus.yml │ │ └── service.yml │ ├── blackbox-exporter.yaml │ ├── config.yaml │ ├── deploy-prometheus.sh │ ├── grafana-dashboards │ │ ├── all.yml │ │ ├── capacity-planning.json │ │ ├── cluster-health.json │ │ ├── cluster-monitoring.json │ │ ├── cluster-status.json │ │ ├── cockroachdb │ │ │ ├── replicas.json │ │ │ ├── runtime.json │ │ │ ├── sql.json │ │ │ └── storage.json │ │ ├── control-plane-status.json │ │ ├── deployment.json │ │ ├── nodes.json │ │ ├── pods.json │ │ ├── resource-requests.json │ │ └── template.json │ ├── grafana.yaml │ ├── kube-state-metrics.yaml │ ├── node-exporter.yaml │ ├── prometheus.yaml │ ├── push-gateway.yaml │ ├── scrape_configs │ │ ├── all.yml │ │ ├── cockroachdb.yml │ │ ├── istio.yml │ │ ├── kubernetes.yml │ │ └── prometheus.yml │ └── server.yaml └── vars │ └── main.yml ├── readiness ├── README.md ├── defaults │ └── main.yml ├── handlers │ └── main.yml ├── meta │ └── main.yml ├── tasks │ └── main.yml ├── tests │ ├── inventory │ └── test.yml └── vars │ └── main.yml ├── registry ├── README.md ├── defaults │ └── main.yml ├── handlers │ └── main.yml ├── meta │ └── main.yml ├── tasks │ └── main.yml ├── templates │ ├── deploy-registry.sh │ └── kube-registry.yaml ├── tests │ ├── inventory │ └── test.yml └── vars │ └── main.yml ├── ssl ├── README.md ├── defaults │ └── main.yml ├── handlers │ └── main.yml ├── meta │ └── main.yml ├── tasks │ └── main.yml ├── templates │ ├── config.json.j2 │ ├── csr-ca.json.j2 │ ├── csr-client.json.j2 │ └── csr-common.json.j2 ├── tests │ ├── inventory │ └── test.yml └── vars │ └── main.yml ├── stats ├── README.md ├── defaults │ └── main.yml ├── handlers │ └── main.yml ├── meta │ └── main.yml ├── tasks │ └── main.yml ├── templates │ ├── deploy-stats.sh │ └── heapster.yaml ├── tests │ ├── inventory │ └── test.yml └── vars │ └── main.yml ├── storage ├── README.md ├── defaults │ └── main.yml ├── handlers │ └── main.yml ├── meta │ └── main.yml ├── tasks │ └── main.yml ├── tests │ ├── inventory │ └── test.yml └── vars │ └── main.yml ├── toolchain ├── README.md ├── defaults │ └── main.yml ├── files │ └── docker ├── handlers │ └── main.yml ├── meta │ └── main.yml ├── tasks │ └── main.yml ├── tests │ ├── inventory │ └── test.yml └── vars │ └── main.yml ├── user ├── README.md ├── defaults │ └── main.yml ├── handlers │ └── main.yml ├── meta │ └── main.yml ├── tasks │ └── main.yml ├── templates │ └── kubeconfig.default ├── tests │ ├── inventory │ └── test.yml └── vars │ └── main.yml └── volumes ├── README.md ├── defaults └── main.yml ├── handlers └── main.yml ├── meta └── main.yml ├── tasks └── main.yml ├── templates ├── ceph-storage.yaml ├── deploy-volumes.sh ├── gce-storage.yaml ├── local-storage.yaml └── volumes.yaml ├── tests ├── inventory └── test.yml └── vars └── main.yml /.gitignore: -------------------------------------------------------------------------------- 1 | *.retry 2 | community 3 | dev 4 | stage 5 | stable 6 | inventory/cluster 7 | inventory/.certs 8 | .idea 9 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Kubernetes Community 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | roles_path = ./roles 3 | host_key_checking = False 4 | inventory = inventory 5 | # use alternative inventory 6 | # inventory = gce 7 | 8 | remote_user = deploy 9 | # uncomment it if would like to use special key for ssh operations 10 | # private_key_file = ~/.ssh/cluster-deploy.key 11 | -------------------------------------------------------------------------------- /playbooks/addons/cicd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: build 3 | remote_user: '{{ k8s_ssh_user }}' 4 | become: true 5 | roles: 6 | - role: cicd 7 | -------------------------------------------------------------------------------- /playbooks/addons/cockroachdb.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: addons 3 | remote_user: '{{ k8s_ssh_user }}' 4 | become: true 5 | roles: 6 | - readiness 7 | - role: cockroachdb 8 | k8s_master_hosts: '{{ groups["master"] }}' 9 | k8s_node_hosts: '{{ groups["node"] }}' 10 | -------------------------------------------------------------------------------- /playbooks/addons/helm.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: addons 3 | remote_user: '{{ k8s_ssh_user }}' 4 | become: true 5 | roles: 6 | - readiness 7 | - role: helm 8 | k8s_master_hosts: '{{ groups["master"] }}' 9 | -------------------------------------------------------------------------------- /playbooks/addons/istio.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: addons 3 | remote_user: '{{ k8s_ssh_user }}' 4 | become: true 5 | roles: 6 | - readiness 7 | - role: istio 8 | k8s_master_hosts: '{{ groups["master"] }}' 9 | k8s_node_hosts: '{{ groups["node"] }}' 10 | -------------------------------------------------------------------------------- /playbooks/addons/kubelego.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: addons 3 | remote_user: '{{ k8s_ssh_user }}' 4 | become: true 5 | roles: 6 | - readiness 7 | - role: kubelego 8 | k8s_master_hosts: '{{ groups["master"] }}' 9 | -------------------------------------------------------------------------------- /playbooks/addons/prometheus.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: addons 3 | remote_user: '{{ k8s_ssh_user }}' 4 | become: true 5 | roles: 6 | - role: prometheus 7 | k8s_master_hosts: '{{ groups["master"] }}' 8 | k8s_node_hosts: '{{ groups["node"] }}' 9 | -------------------------------------------------------------------------------- /playbooks/cluster/build.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: build 3 | remote_user: '{{ k8s_ssh_user }}' 4 | become: true 5 | roles: 6 | - user 7 | - build 8 | 9 | - hosts: build 10 | remote_user: '{{ k8s_build_ssh_user }}' 11 | roles: 12 | - user 13 | - build 14 | -------------------------------------------------------------------------------- /playbooks/cluster/cni.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: 3 | - master 4 | - node 5 | remote_user: '{{ k8s_ssh_user }}' 6 | become: true 7 | roles: 8 | - role: cni 9 | k8s_master_hosts: '{{ groups["master"] }}' 10 | 11 | - hosts: master 12 | remote_user: '{{ k8s_ssh_user }}' 13 | become: true 14 | 15 | vars: 16 | # Kubernetes configs path 17 | k8s_conf_dir: /etc/kubernetes 18 | k8s_cni_dir: '{{ k8s_conf_dir }}/cni' 19 | 20 | tasks: 21 | 22 | - name: Awaiting for k8s API 23 | wait_for: 24 | host: 127.0.0.1 25 | port: 8080 26 | 27 | - name: Awaiting for nodes 28 | wait_for: 29 | host: '{{ item }}' 30 | port: 10250 31 | state: drained 32 | with_items: '{{ groups["node"] }}' 33 | 34 | - name: Start cni 35 | command: '{{ k8s_cni_dir }}/deploy-cni.sh' 36 | when: inventory_hostname in groups["master"][0] 37 | -------------------------------------------------------------------------------- /playbooks/cluster/coredns.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: master 3 | remote_user: '{{ k8s_ssh_user }}' 4 | become: true 5 | roles: 6 | - readiness 7 | - role: coredns 8 | k8s_master_hosts: '{{ groups["master"] }}' 9 | -------------------------------------------------------------------------------- /playbooks/cluster/dashboard.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: master 3 | remote_user: '{{ k8s_ssh_user }}' 4 | become: true 5 | roles: 6 | - readiness 7 | - role: dashboard 8 | k8s_master_hosts: '{{ groups["master"] }}' 9 | -------------------------------------------------------------------------------- /playbooks/cluster/etcd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: etcd 3 | remote_user: '{{ k8s_ssh_user }}' 4 | become: true 5 | roles: 6 | - role: etcd 7 | etcd_hosts: '{{ groups["etcd"] }}' 8 | -------------------------------------------------------------------------------- /playbooks/cluster/external.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: master 3 | remote_user: '{{ k8s_ssh_user }}' 4 | become: true 5 | roles: 6 | - role: external 7 | k8s_master_hosts: '{{ groups["master"] }}' 8 | -------------------------------------------------------------------------------- /playbooks/cluster/ingress.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: master 3 | remote_user: '{{ k8s_ssh_user }}' 4 | become: true 5 | roles: 6 | - readiness 7 | - role: ingress 8 | k8s_master_hosts: '{{ groups["master"] }}' 9 | k8s_node_hosts: '{{ groups["node"] }}' 10 | -------------------------------------------------------------------------------- /playbooks/cluster/kubedns.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: master 3 | remote_user: '{{ k8s_ssh_user }}' 4 | become: true 5 | roles: 6 | - readiness 7 | - role: kubedns 8 | k8s_master_hosts: '{{ groups["master"] }}' 9 | -------------------------------------------------------------------------------- /playbooks/cluster/master.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: master 3 | remote_user: '{{ k8s_ssh_user }}' 4 | become: true 5 | # use serial = 1 in production environment 6 | # serial: 1 7 | roles: 8 | - role: kubernetes 9 | k8s_master_hosts: '{{ groups["master"] }}' 10 | k8s_node_hosts: '{{ groups["node"] }}' 11 | -------------------------------------------------------------------------------- /playbooks/cluster/node.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: node 3 | remote_user: '{{ k8s_ssh_user }}' 4 | become: true 5 | # use serial = 1 in production environment 6 | # serial: 1 7 | roles: 8 | - role: kubernetes 9 | k8s_master_hosts: '{{ groups["master"] }}' 10 | k8s_node_hosts: '{{ groups["node"] }}' 11 | -------------------------------------------------------------------------------- /playbooks/cluster/policy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: master 3 | remote_user: '{{ k8s_ssh_user }}' 4 | become: true 5 | roles: 6 | - role: policy 7 | k8s_master_hosts: '{{ groups["master"] }}' 8 | -------------------------------------------------------------------------------- /playbooks/cluster/registry.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: master 3 | remote_user: '{{ k8s_ssh_user }}' 4 | become: true 5 | roles: 6 | - readiness 7 | - role: registry 8 | k8s_master_hosts: '{{ groups["master"] }}' 9 | k8s_storage_hosts: '{{ groups["storage"] }}' 10 | -------------------------------------------------------------------------------- /playbooks/cluster/stats.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: master 3 | remote_user: '{{ k8s_ssh_user }}' 4 | become: true 5 | roles: 6 | - readiness 7 | - role: stats 8 | k8s_master_hosts: '{{ groups["master"] }}' 9 | -------------------------------------------------------------------------------- /playbooks/cluster/user.yml: -------------------------------------------------------------------------------- 1 | 2 | - hosts: 3 | - master 4 | - node 5 | remote_user: '{{ k8s_ssh_user }}' 6 | become: true 7 | roles: 8 | - role: user 9 | k8s_master_hosts: '{{ groups["master"] }}' 10 | 11 | - hosts: 12 | - master 13 | - node 14 | remote_user: '{{ k8s_ssh_user }}' 15 | roles: 16 | - role: user 17 | k8s_master_hosts: '{{ groups["master"] }}' 18 | -------------------------------------------------------------------------------- /playbooks/network/lb.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | connection: local 4 | roles: 5 | - lb 6 | -------------------------------------------------------------------------------- /playbooks/network/setup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | connection: local 4 | roles: 5 | - network 6 | -------------------------------------------------------------------------------- /playbooks/prepare-gce.yml: -------------------------------------------------------------------------------- 1 | - include: network/setup.yml 2 | - include: vm/setup.yml 3 | - include: network/lb.yml 4 | when: k8s_lb_type == 'nginx' 5 | -------------------------------------------------------------------------------- /playbooks/prepare-vps.yml: -------------------------------------------------------------------------------- 1 | - include: system/permissions.yml 2 | - include: system/update.yml 3 | - include: system/firewall.yml 4 | - include: system/master-lb.yml 5 | - include: system/node-lb.yml 6 | - include: system/gateway-lb.yml 7 | - include: system/logs-rotation.yml 8 | -------------------------------------------------------------------------------- /playbooks/setup-addons.yml: -------------------------------------------------------------------------------- 1 | - include: addons/helm.yml 2 | - include: addons/cockroachdb.yml 3 | - include: addons/prometheus.yml 4 | -------------------------------------------------------------------------------- /playbooks/setup-base.yml: -------------------------------------------------------------------------------- 1 | - include: toolchain/setup.yml 2 | - include: ssl/setup.yml 3 | - include: ssl/master.yml 4 | - include: cluster/etcd.yml 5 | - include: cluster/user.yml 6 | - include: cluster/master.yml 7 | - include: cluster/node.yml 8 | - include: cluster/cni.yml 9 | - include: cluster/policy.yml 10 | - include: cluster/kubedns.yml 11 | - include: cluster/ingress.yml 12 | - include: cluster/stats.yml 13 | - include: cluster/registry.yml 14 | - include: cluster/external.yml 15 | - include: cluster/dashboard.yml 16 | -------------------------------------------------------------------------------- /playbooks/setup-build.yml: -------------------------------------------------------------------------------- 1 | # TODO We should wait for Helm Tiller initialization (approx.: 1 Minute) 2 | - include: toolchain/build.yml 3 | - include: ssl/build.yml 4 | - include: cluster/build.yml 5 | -------------------------------------------------------------------------------- /playbooks/setup-cicd.yml: -------------------------------------------------------------------------------- 1 | - include: addons/cicd.yml 2 | -------------------------------------------------------------------------------- /playbooks/setup-gateway.yml: -------------------------------------------------------------------------------- 1 | - include: addons/istio.yml 2 | -------------------------------------------------------------------------------- /playbooks/ssl/build.yml: -------------------------------------------------------------------------------- 1 | - hosts: build 2 | remote_user: '{{ k8s_ssh_user }}' 3 | become: true 4 | pre_tasks: 5 | - name: Copy CA 6 | copy: 7 | src: '{{ inventory_dir }}/.certs/ca/' 8 | dest: '{{ ssl_dir }}' 9 | roles: 10 | - role: ssl 11 | ssl_hosts: {} 12 | ssl_ips: {} 13 | ssl_custom: 14 | - '{{ k8s_master_name }}' 15 | - '{{ k8s_services_name }}' 16 | - '{{ k8s_registry_name }}' 17 | - '127.0.0.1' 18 | ssl_clients: 19 | - name: admin 20 | cn: system:admin 21 | org: 22 | - system:masters 23 | -------------------------------------------------------------------------------- /playbooks/ssl/master.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: 3 | - master 4 | remote_user: '{{ k8s_ssh_user }}' 5 | become: true 6 | pre_tasks: 7 | - name: Copy CA 8 | copy: 9 | src: '{{ inventory_dir }}/.certs/ca/' 10 | dest: '{{ ssl_dir }}' 11 | when: inventory_hostname in groups['master'][0] 12 | roles: 13 | - role: ssl 14 | ssl_name: master 15 | ssl_hosts: "{{ groups['master'] }}" 16 | ssl_ips: "{{ groups['master']|map('extract', hostvars, 'ansible_default_ipv4')|map(attribute='address')|list }}" 17 | ssl_custom: 18 | - 'kubernetes' 19 | - 'kubernetes.default' 20 | - 'kubernetes.default.svc' 21 | - 'kubernetes.default.svc.{{ k8s_cluster_domain }}' 22 | - 'api.kube-public.svc' 23 | - '{{ k8s_master_name }}' 24 | - '{{ k8s_services_name }}' 25 | - '{{ k8s_registry_name }}' 26 | - '127.0.0.1' 27 | - 'localhost' 28 | - '{{ k8s_cluster_service_ip }}' 29 | - '{{ k8s_master_external_ip | default("master") }}' 30 | when: inventory_hostname in groups['master'][0] 31 | # Copy certificates to all masters (we should use the same certificate absolutely identical on all masters) 32 | post_tasks: 33 | - name: Get master certificate locally 34 | fetch: 35 | src: '{{ ssl_dir }}/master.pem' 36 | dest: '{{ inventory_dir }}/.certs/master/' 37 | flat: yes 38 | when: inventory_hostname in groups['master'][0] 39 | 40 | - name: Get master key locally 41 | fetch: 42 | src: '{{ ssl_dir }}/master-key.pem' 43 | dest: '{{ inventory_dir }}/.certs/master/' 44 | flat: yes 45 | when: inventory_hostname in groups['master'][0] 46 | 47 | - name: Copy master certificate and key on all masters 48 | copy: 49 | src: '{{ inventory_dir }}/.certs/master/' 50 | dest: '{{ ssl_dir }}' 51 | when: inventory_hostname not in groups['master'][0] 52 | -------------------------------------------------------------------------------- /playbooks/storage/cloud.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | connection: local 4 | roles: 5 | - storage 6 | -------------------------------------------------------------------------------- /playbooks/storage/volumes.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: addons 3 | remote_user: '{{ k8s_ssh_user }}' 4 | become: true 5 | roles: 6 | - readiness 7 | - role: volumes 8 | k8s_storage_hosts: '{{ groups["storage"] }}' 9 | -------------------------------------------------------------------------------- /playbooks/system/gateway-lb.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: node 3 | remote_user: '{{ k8s_ssh_user }}' 4 | become: true 5 | 6 | - hosts: gateway_lb 7 | remote_user: '{{ k8s_ssh_user }}' 8 | become: true 9 | roles: 10 | - role: haproxy 11 | haproxy_hosts: '{{ groups["node"] }}' 12 | haproxy_services: '{{ gateway_services }}' 13 | -------------------------------------------------------------------------------- /playbooks/system/logs-rotation.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: 3 | - master 4 | - node 5 | - build 6 | - master_lb 7 | - node_lb 8 | - gateway_lb 9 | remote_user: '{{ k8s_ssh_user }}' 10 | become: true 11 | 12 | roles: 13 | - role: logrotate 14 | - role: journald 15 | -------------------------------------------------------------------------------- /playbooks/system/master-lb.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: master 3 | remote_user: '{{ k8s_ssh_user }}' 4 | become: true 5 | 6 | - hosts: master_lb 7 | remote_user: '{{ k8s_ssh_user }}' 8 | become: true 9 | roles: 10 | - role: haproxy 11 | haproxy_hosts: '{{ groups["master"] }}' 12 | haproxy_services: '{{ master_services }}' 13 | -------------------------------------------------------------------------------- /playbooks/system/node-lb.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: node 3 | remote_user: '{{ k8s_ssh_user }}' 4 | become: true 5 | 6 | - hosts: node_lb 7 | remote_user: '{{ k8s_ssh_user }}' 8 | become: true 9 | roles: 10 | - role: haproxy 11 | haproxy_hosts: '{{ groups["node"] }}' 12 | haproxy_services: '{{ node_services }}' 13 | -------------------------------------------------------------------------------- /playbooks/system/permissions.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: 3 | - master 4 | - node 5 | - build 6 | - master_lb 7 | - node_lb 8 | - gateway_lb 9 | remote_user: root 10 | 11 | tasks: 12 | 13 | - name: Added ssh permissions for allowed hosts 14 | lineinfile: 15 | dest: /etc/ssh/sshd_config 16 | regexp: "^AllowUsers {{ k8s_ssh_user }}@{{ item }}" 17 | line: "AllowUsers {{ k8s_ssh_user }}@{{ item }}" 18 | with_items: 19 | - "{{ k8s_ssh_allowed_ips }}" 20 | notify: 21 | - restart sshd 22 | 23 | handlers: 24 | - name: restart sshd 25 | service: 26 | name: sshd 27 | state: restarted 28 | -------------------------------------------------------------------------------- /playbooks/system/update.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: 3 | - master 4 | - node 5 | - build 6 | - master_lb 7 | - node_lb 8 | - gateway_lb 9 | remote_user: '{{ k8s_ssh_user }}' 10 | become: true 11 | 12 | tasks: 13 | - name: Update CentOS packages 14 | yum: 15 | name: '*' 16 | state: latest 17 | notify: 18 | - reboot vps 19 | - wait to come up 20 | 21 | handlers: 22 | 23 | - name: reboot vps 24 | command: /sbin/shutdown -r +1 25 | async: 0 26 | poll: 0 27 | 28 | - name: wait to come up 29 | wait_for: 30 | host: '{{ inventory_hostname }}' 31 | port: 22 32 | delay: 90 33 | delegate_to: localhost 34 | become: false 35 | -------------------------------------------------------------------------------- /playbooks/toolchain/build.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: 3 | - build 4 | remote_user: '{{ k8s_ssh_user }}' 5 | become: true 6 | roles: 7 | - toolchain 8 | -------------------------------------------------------------------------------- /playbooks/toolchain/setup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: 3 | - master 4 | - node 5 | remote_user: '{{ k8s_ssh_user }}' 6 | become: true 7 | roles: 8 | - toolchain 9 | -------------------------------------------------------------------------------- /playbooks/vm/setup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | connection: local 4 | pre_tasks: 5 | - name: Check inventory hosts file 6 | stat: 7 | path: '{{ k8s_inventory_file }}' 8 | register: st 9 | - name: Create inventory hosts file from template 10 | template: 11 | src: inventory.j2 12 | dest: '{{ k8s_inventory_file }}' 13 | when: not st.stat.exists 14 | tasks: 15 | - name: Create VM instances groups 16 | include_role: 17 | name: instance 18 | vars: 19 | gce_instance_group: '{{ gce_group.name }}' 20 | gce_instance_names: '{{ gce_group.nodes | join(",") }}' 21 | gce_machine_type: '{{ gce_group.type }}' 22 | with_items: '{{ gce_groups }}' 23 | loop_control: 24 | loop_var: gce_group 25 | -------------------------------------------------------------------------------- /playbooks/vm/templates/inventory.j2: -------------------------------------------------------------------------------- 1 | # This inventory hosts file should be generated automatically. 2 | # Please do not edit all that bellow 3 | 4 | {% for item in gce_groups %} 5 | [{{ item.name }}] 6 | 7 | {% if item.name == 'build' %} 8 | [all:vars] 9 | 10 | [etcd:children] 11 | master 12 | 13 | [addons:children] 14 | master 15 | 16 | [storage:children] 17 | master 18 | 19 | [master_lb] 20 | 21 | [node_lb] 22 | 23 | [gateway_lb] 24 | 25 | {% endif %} 26 | {% endfor %} 27 | -------------------------------------------------------------------------------- /roles/build/README.md: -------------------------------------------------------------------------------- 1 | Ansible Role: Build Machine 2 | =========================== 3 | 4 | This role install Build Machine on Redhat linux based systems. 5 | 6 | [![Contributions Welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)](https://github.com/k8s-community/cluster-deploy/issues) 7 | 8 | Requirements 9 | ------------ 10 | 11 | No special requirements. 12 | 13 | 14 | Role Variables 15 | -------------- 16 | 17 | Available variables are listed below, along with default values (see `defaults/main.yml`): 18 | 19 | 20 | Kubernetes hyperkube version 21 | ```yaml 22 | k8s_version: 1.7.6 23 | ``` 24 | 25 | Helm package manager version 26 | ```yaml 27 | helm_version: 2.6.1 28 | ``` 29 | 30 | Golang compiller version 31 | ```yaml 32 | go_version: 1.9 33 | ``` 34 | 35 | Account name of remote user. Ansible will use this user account to ssh into 36 | the build machines. The user must be able to use sudo without asking 37 | for password for build components 38 | ```yaml 39 | k8s_build_ssh_user: dev 40 | ``` 41 | 42 | Docker registry host name 43 | ```yaml 44 | k8s_registry_name: registry.your-domain-name 45 | ``` 46 | 47 | Docker registry auth code 48 | ```yaml 49 | k8s_docker_registry_auth_code: 'docker-registry-auth-code' 50 | ``` 51 | 52 | Executable files path 53 | ```yaml 54 | k8s_bin_dir: /usr/bin 55 | ``` 56 | 57 | Go tools path 58 | ```yaml 59 | go_dir: /usr/local 60 | ``` 61 | 62 | Go sources/packages path in home directory 63 | ```yaml 64 | go_path: 'gocode' 65 | ``` 66 | 67 | 68 | Example Playbook 69 | ---------------- 70 | 71 | - hosts: build 72 | roles: 73 | - build 74 | 75 | License 76 | ------- 77 | 78 | MIT 79 | 80 | Author Information 81 | ------------------ 82 | 83 | Kubernets Community [k8s-community](https://github.com/k8s-community) 84 | -------------------------------------------------------------------------------- /roles/build/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Kubernetes version 3 | k8s_version: 1.8.5 4 | 5 | # Helm package manager version 6 | helm_version: 2.7.2 7 | 8 | # Golang compiller version 9 | go_version: 1.9.2 10 | 11 | # Account name of remote user. Ansible will use this user account to ssh into 12 | # the build machines. The user must be able to use sudo without asking 13 | # for password for build components 14 | k8s_build_ssh_user: dev 15 | 16 | # Docker registry host name 17 | k8s_registry_name: registry.your-domain-name 18 | 19 | # Docker registry auth code 20 | k8s_docker_registry_auth_code: 'docker-registry-auth-code' 21 | 22 | # Executable files path 23 | k8s_bin_dir: /usr/bin 24 | 25 | # Go tools path 26 | go_dir: /usr/local 27 | 28 | # Go sources/packages path in home directory 29 | go_path: 'gocode' 30 | 31 | tmp_dir: /tmp 32 | -------------------------------------------------------------------------------- /roles/build/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /roles/build/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Igor Dolzhikov 3 | description: Setup build machine 4 | company: Kubernetes Community 5 | 6 | # If the issue tracker for your role is not on github, uncomment the 7 | # next line and provide a value 8 | # issue_tracker_url: http://example.com/issue/tracker 9 | 10 | # Some suggested licenses: 11 | # - BSD (default) 12 | # - MIT 13 | # - GPLv2 14 | # - GPLv3 15 | # - Apache 16 | # - CC-BY 17 | license: MIT 18 | 19 | min_ansible_version: 2.2 20 | 21 | # If this a Container Enabled role, provide the minimum Ansible Container version. 22 | # min_ansible_container_version: 23 | 24 | # Optionally specify the branch Galaxy will use when accessing the GitHub 25 | # repo for this role. During role install, if no tags are available, 26 | # Galaxy will use this branch. During import Galaxy will access files on 27 | # this branch. If Travis integration is configured, only notifications for this 28 | # branch will be accepted. Otherwise, in all cases, the repo's default branch 29 | # (usually master) will be used. 30 | #github_branch: 31 | 32 | # 33 | # platforms is a list of platforms, and each platform has a name and a list of versions. 34 | # 35 | # platforms: 36 | # - name: Fedora 37 | # versions: 38 | # - all 39 | # - 25 40 | # - name: SomePlatform 41 | # versions: 42 | # - all 43 | # - 1.0 44 | # - 7 45 | # - 99.99 46 | 47 | galaxy_tags: [] 48 | # List tags for your role here, one per line. A tag is a keyword that describes 49 | # and categorizes the role. Users find roles by searching for tags. Be sure to 50 | # remove the '[]' above, if you add tags to this list. 51 | # 52 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters. 53 | # Maximum 20 tags per role. 54 | 55 | dependencies: [] 56 | # List your role dependencies here, one per line. Be sure to remove the '[]' above, 57 | # if you add dependencies to this list. 58 | -------------------------------------------------------------------------------- /roles/build/templates/docker-config.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "auths": { 3 | "{{ k8s_registry_name }}": { 4 | "auth": "{{ k8s_docker_registry_auth_code }}" 5 | } 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /roles/build/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | -------------------------------------------------------------------------------- /roles/build/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - build 6 | -------------------------------------------------------------------------------- /roles/build/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for kubernetes 3 | -------------------------------------------------------------------------------- /roles/cicd/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # CICD integration services 3 | k8s_cicd_services: 4 | - github-integration 5 | - user-manager 6 | - ui 7 | 8 | # CICD repository 9 | k8s_cicd_repo: github.com/k8s-community/cicd 10 | 11 | # Kubernetes configs path 12 | k8s_conf_dir: /etc/kubernetes 13 | k8s_policy_dir: '{{ k8s_conf_dir }}/policy' 14 | 15 | # Go tools path 16 | go_dir: /usr/local 17 | 18 | # Go sources/packages path in home directory 19 | go_path: 'gocode' 20 | 21 | tmp_dir: /tmp 22 | 23 | # k8s.community services exchange token 24 | k8s_community_token: 'k8s-community-token' 25 | 26 | # k8s.community services databases credentials 27 | k8s_community_db_username: 'k8s-community' 28 | k8s_community_db_password: 'k8s.community' 29 | k8s_github_integration_db_username: 'github-integration' 30 | k8s_github_integration_db_password: 'github.integration' 31 | 32 | # k8s.community Github integration services secrets 33 | k8s_github_client_id: 'github client id here' 34 | k8s_github_client_secret: 'github client secret here' 35 | k8s_github_state: 'github state here' 36 | k8s_github_integration_id: 'github integration id here' 37 | k8s_github_integration_token: 'github integration token here' 38 | k8s_github_integration_private_key: | 39 | -----BEGIN RSA PRIVATE KEY----- 40 | - Your RSA private key here - 41 | -----END RSA PRIVATE KEY----- 42 | 43 | # SSL certificate and private key for k8s-community services 44 | k8s_community_cert: | 45 | -----BEGIN CERTIFICATE----- 46 | - Your CERTIFICATE here - 47 | ------END CERTIFICATE------ 48 | k8s_community_key: | 49 | -----BEGIN RSA PRIVATE KEY----- 50 | - Your RSA private key here - 51 | -----END RSA PRIVATE KEY----- 52 | 53 | # SSL certificate and private key for k8s-community code generator 54 | k8s_codegen_cert: | 55 | -----BEGIN CERTIFICATE----- 56 | - Your CERTIFICATE here - 57 | ------END CERTIFICATE------ 58 | k8s_codegen_key: | 59 | -----BEGIN RSA PRIVATE KEY----- 60 | - Your RSA private key here - 61 | -----END RSA PRIVATE KEY----- 62 | -------------------------------------------------------------------------------- /roles/cicd/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /roles/cicd/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Igor Dolzhikov 3 | description: Setup CICD 4 | company: Kubernetes Community 5 | 6 | # If the issue tracker for your role is not on github, uncomment the 7 | # next line and provide a value 8 | # issue_tracker_url: http://example.com/issue/tracker 9 | 10 | # Some suggested licenses: 11 | # - BSD (default) 12 | # - MIT 13 | # - GPLv2 14 | # - GPLv3 15 | # - Apache 16 | # - CC-BY 17 | license: MIT 18 | 19 | min_ansible_version: 2.2 20 | 21 | # If this a Container Enabled role, provide the minimum Ansible Container version. 22 | # min_ansible_container_version: 23 | 24 | # Optionally specify the branch Galaxy will use when accessing the GitHub 25 | # repo for this role. During role install, if no tags are available, 26 | # Galaxy will use this branch. During import Galaxy will access files on 27 | # this branch. If Travis integration is configured, only notifications for this 28 | # branch will be accepted. Otherwise, in all cases, the repo's default branch 29 | # (usually master) will be used. 30 | #github_branch: 31 | 32 | # 33 | # platforms is a list of platforms, and each platform has a name and a list of versions. 34 | # 35 | # platforms: 36 | # - name: Fedora 37 | # versions: 38 | # - all 39 | # - 25 40 | # - name: SomePlatform 41 | # versions: 42 | # - all 43 | # - 1.0 44 | # - 7 45 | # - 99.99 46 | 47 | galaxy_tags: [] 48 | # List tags for your role here, one per line. A tag is a keyword that describes 49 | # and categorizes the role. Users find roles by searching for tags. Be sure to 50 | # remove the '[]' above, if you add tags to this list. 51 | # 52 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters. 53 | # Maximum 20 tags per role. 54 | 55 | dependencies: [] 56 | # List your role dependencies here, one per line. Be sure to remove the '[]' above, 57 | # if you add dependencies to this list. 58 | -------------------------------------------------------------------------------- /roles/cicd/templates/deploy-k8s-community-secrets.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function deploy_k8s_community_secrets { 4 | if kubectl get secrets --namespace=k8s-community | grep github-oauth &> /dev/null; then 5 | echo "k8s.community secret already exists" 6 | else 7 | echo "Creating k8s.community secrets" 8 | kubectl apply -f {{ k8s_policy_dir }}/k8s-community-secrets.yaml 9 | fi 10 | 11 | echo 12 | } 13 | 14 | deploy_k8s_community_secrets 15 | -------------------------------------------------------------------------------- /roles/cicd/templates/deploy-k8s-community-user-db.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | # Drop users if they already exist 6 | function user_rm() { 7 | kubectl exec -n k8s-community "cockroachdb-${1}" -- /cockroach/cockroach user rm ${2} \ 8 | --host "cockroachdb-${1}.cockroachdb" \ 9 | --insecure 10 | } 11 | 12 | user_rm 0 {{ k8s_community_db_username }} 13 | user_rm 0 {{ k8s_github_integration_db_username }} 14 | 15 | # Create database and user with priveleges. 16 | function sql() { 17 | kubectl exec -n k8s-community "cockroachdb-${1}" -- /cockroach/cockroach sql \ 18 | --host "cockroachdb-${1}.cockroachdb" \ 19 | --insecure \ 20 | -e "$(cat /dev/stdin)" 21 | } 22 | 23 | cat < /dev/null; then 5 | echo "Romana daemonset already exists" 6 | else 7 | echo "Creating romana daemonset" 8 | kubectl apply -f {{ k8s_cni_dir }}/romana.yaml 9 | fi 10 | 11 | echo 12 | } 13 | 14 | function deploy_canal { 15 | if kubectl get daemonset -l k8s-app=canal --namespace=kube-system | grep canal &> /dev/null; then 16 | echo "Canal network policy daemonset already exists" 17 | else 18 | echo "Creating canal network policy daemonset" 19 | kubectl apply -f {{ k8s_cni_dir }}/canal.yaml 20 | fi 21 | 22 | echo 23 | } 24 | 25 | {% if cni_type == 'calico' %} 26 | deploy_canal 27 | {% endif %} 28 | {% if cni_type == 'romana' %} 29 | deploy_romana 30 | {% endif %} 31 | -------------------------------------------------------------------------------- /roles/cni/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | 3 | -------------------------------------------------------------------------------- /roles/cni/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - cni 6 | -------------------------------------------------------------------------------- /roles/cni/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for kubernetes 3 | -------------------------------------------------------------------------------- /roles/cockroachdb/README.md: -------------------------------------------------------------------------------- 1 | Ansible Role: Cockroach DB 2 | ========================== 3 | 4 | This role install Cockroach DB on Red Hat linux based systems. 5 | 6 | [![Contributions Welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)](https://github.com/k8s-community/cluster-deploy/issues) 7 | 8 | Requirements 9 | ------------ 10 | 11 | No special requirements. 12 | 13 | 14 | Role Variables 15 | -------------- 16 | 17 | Available variables are listed below, along with default values (see `defaults/main.yml`): 18 | 19 | Kubernetes configs path 20 | ```yaml 21 | k8s_conf_dir: /etc/kubernetes 22 | k8s_addons_dir: '{{ k8s_conf_dir }}/addons' 23 | ``` 24 | 25 | Secure deployment (recommended for production) 26 | ```yaml 27 | k8s_cockroachdb_secure: false 28 | ``` 29 | 30 | Enable/Disable privileged mode 31 | It's useful if linux running with enforsing selinux mode 32 | ```yaml 33 | k8s_cockroachdb_privileged: false 34 | ``` 35 | 36 | Namespace for Cocroach DB 37 | ```yaml 38 | k8s_cockroachdb_namespace: default 39 | ``` 40 | 41 | Cockroach DB data dir 42 | ```yaml 43 | k8s_cockroachdb_dir: /var/lib/cockroachdb 44 | ``` 45 | 46 | Cockroach DB volume and cache size 47 | ```yaml 48 | k8s_cockroachdb_volume_size: 1Gi 49 | k8s_cockroachdb_cache_size: 25% 50 | k8s_cockroachdb_max_sql_memory: 25% 51 | ``` 52 | 53 | List of databases for backup 54 | ```yaml 55 | k8s_cockroachdb_backup: {} 56 | ``` 57 | 58 | Master hosts nsmes 59 | ```yaml 60 | k8s_master_hosts: {} 61 | ``` 62 | 63 | Node hosts nsmes 64 | ```yaml 65 | k8s_node_hosts: {} 66 | ``` 67 | 68 | Example Playbook 69 | ---------------- 70 | 71 | - hosts: addons 72 | roles: 73 | - cockroachdb 74 | 75 | License 76 | ------- 77 | 78 | MIT 79 | 80 | Author Information 81 | ------------------ 82 | 83 | Kubernets Community [k8s-community](https://github.com/k8s-community) 84 | -------------------------------------------------------------------------------- /roles/cockroachdb/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # It will be used as the Internal dns domain name if DNS is enabled. 3 | # Services will be discoverable under 4 | # ..svc.., e.g. 5 | # myservice.default.svc.k8s.cluster 6 | k8s_domain_name: k8s 7 | k8s_cluster_name: cluster 8 | k8s_cluster_domain: '{{ k8s_domain_name }}.{{ k8s_cluster_name }}' 9 | 10 | # Kubernetes services host names 11 | k8s_services_name: services.your-domain-name 12 | 13 | # Kubernetes configs path 14 | k8s_conf_dir: /etc/kubernetes 15 | k8s_addons_dir: '{{ k8s_conf_dir }}/addons' 16 | 17 | # Secure deployment (recommended for production) 18 | k8s_cockroachdb_secure: false 19 | 20 | # Enable/Disable privileged mode 21 | # It's useful if linux running with enforsing selinux mode 22 | k8s_cockroachdb_privileged: false 23 | 24 | # Namespace for Cocroach DB 25 | k8s_cockroachdb_namespace: default 26 | 27 | # Cockroach DB data dir 28 | k8s_cockroachdb_dir: /var/lib/cockroachdb 29 | 30 | # List of databases for backup 31 | k8s_cockroachdb_backup: {} 32 | 33 | # Master hosts nsmes 34 | k8s_master_hosts: {} 35 | 36 | # Nodes hosts names 37 | k8s_node_hosts: {} 38 | 39 | # Cockroach DB volume and cache size 40 | k8s_cockroachdb_volume_size: 1Gi 41 | k8s_cockroachdb_cache_size: 25% 42 | k8s_cockroachdb_max_sql_memory: 25% 43 | 44 | # Cockroach DB images 45 | k8s_cockroachdb_image: cockroachdb/cockroach 46 | k8s_cockroachdb_image_tag: v1.1.3 47 | k8s_cockroachdb_request_cert_image: cockroachdb/cockroach-k8s-request-cert 48 | k8s_cockroachdb_request_cert_image_tag: 0.2 49 | -------------------------------------------------------------------------------- /roles/cockroachdb/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for addons 3 | -------------------------------------------------------------------------------- /roles/cockroachdb/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Igor Dolzhikov 3 | description: Setup Cockroach DB 4 | company: Kubernetes Community 5 | 6 | # If the issue tracker for your role is not on github, uncomment the 7 | # next line and provide a value 8 | # issue_tracker_url: http://example.com/issue/tracker 9 | 10 | # Some suggested licenses: 11 | # - BSD (default) 12 | # - MIT 13 | # - GPLv2 14 | # - GPLv3 15 | # - Apache 16 | # - CC-BY 17 | license: MIT 18 | 19 | min_ansible_version: 2.2 20 | 21 | # If this a Container Enabled role, provide the minimum Ansible Container version. 22 | # min_ansible_container_version: 23 | 24 | # Optionally specify the branch Galaxy will use when accessing the GitHub 25 | # repo for this role. During role install, if no tags are available, 26 | # Galaxy will use this branch. During import Galaxy will access files on 27 | # this branch. If Travis integration is configured, only notifications for this 28 | # branch will be accepted. Otherwise, in all cases, the repo's default branch 29 | # (usually master) will be used. 30 | #github_branch: 31 | 32 | # 33 | # platforms is a list of platforms, and each platform has a name and a list of versions. 34 | # 35 | # platforms: 36 | # - name: Fedora 37 | # versions: 38 | # - all 39 | # - 25 40 | # - name: SomePlatform 41 | # versions: 42 | # - all 43 | # - 1.0 44 | # - 7 45 | # - 99.99 46 | 47 | galaxy_tags: [] 48 | # List tags for your role here, one per line. A tag is a keyword that describes 49 | # and categorizes the role. Users find roles by searching for tags. Be sure to 50 | # remove the '[]' above, if you add tags to this list. 51 | # 52 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters. 53 | # Maximum 20 tags per role. 54 | 55 | dependencies: [] 56 | # List your role dependencies here, one per line. Be sure to remove the '[]' above, 57 | # if you add dependencies to this list. 58 | -------------------------------------------------------------------------------- /roles/cockroachdb/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check addons directories 3 | file: 4 | path: '{{ item }}' 5 | state: directory 6 | mode: 0755 7 | with_items: 8 | - '{{ k8s_addons_dir }}' 9 | - '{{ k8s_cockroachdb_dir }}' 10 | 11 | - name: Cockroach DB 12 | template: 13 | src: "{{ item }}" 14 | dest: "{{ k8s_addons_dir }}/{{ item }}" 15 | with_items: 16 | - cockroachdb.yaml 17 | - cockroachdb-init.yaml 18 | - cockroachdb-volumes.yaml 19 | - cockroachdb-client.yaml 20 | - cockroachdb-backup.yaml 21 | 22 | - name: Deploy script for Cockroach DB 23 | template: 24 | src: deploy-cockroachdb.sh 25 | dest: "{{ k8s_addons_dir }}/deploy-cockroachdb.sh" 26 | mode: 0755 27 | 28 | - name: Run deploy script for cockroachdb 29 | command: "{{ k8s_addons_dir }}/deploy-cockroachdb.sh" 30 | when: inventory_hostname in k8s_master_hosts[0] 31 | -------------------------------------------------------------------------------- /roles/cockroachdb/templates/cockroachdb-client.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | apiVersion: v1 4 | kind: Pod 5 | metadata: 6 | name: cockroachdb-client 7 | namespace: {{ k8s_cockroachdb_namespace }} 8 | labels: 9 | app: cockroachdb-client 10 | spec: 11 | {% if k8s_cockroachdb_secure %} 12 | serviceAccountName: cockroachdb 13 | initContainers: 14 | # The init-certs container sends a certificate signing request to the 15 | # kubernetes cluster. 16 | # You can see pending requests using: kubectl get csr 17 | # CSRs can be approved using: kubectl certificate approve 18 | # 19 | # In addition to the client certificate and key, the init-certs entrypoint will symlink 20 | # the cluster CA to the certs directory. 21 | - name: init-certs 22 | image: {{ k8s_cockroachdb_request_cert_image }}:{{ k8s_cockroachdb_request_cert_image_tag }} 23 | imagePullPolicy: IfNotPresent 24 | command: 25 | - "/bin/ash" 26 | - "-ecx" 27 | - "/request-cert -namespace=${POD_NAMESPACE} -certs-dir=/cockroach-certs -type=client -user=root -symlink-ca-from=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" 28 | env: 29 | - name: POD_NAMESPACE 30 | valueFrom: 31 | fieldRef: 32 | fieldPath: metadata.namespace 33 | volumeMounts: 34 | - name: client-certs 35 | mountPath: /cockroach-certs 36 | {% endif %} 37 | containers: 38 | - name: cockroachdb-client 39 | image: {{ k8s_cockroachdb_image }}:{{ k8s_cockroachdb_image_tag }} 40 | imagePullPolicy: IfNotPresent 41 | # Keep a pod open indefinitely so kubectl exec can be used to get a shell to it 42 | # and run cockroach client commands, such as cockroach sql, cockroach node status, etc. 43 | command: 44 | - sleep 45 | - "2147483648" # 2^31 46 | {% if k8s_cockroachdb_secure %} 47 | volumeMounts: 48 | - name: client-certs 49 | mountPath: /cockroach-certs 50 | volumes: 51 | - name: client-certs 52 | emptyDir: {} 53 | {% endif %} 54 | -------------------------------------------------------------------------------- /roles/cockroachdb/templates/cockroachdb-volumes.yaml: -------------------------------------------------------------------------------- 1 | {% for host in k8s_node_hosts %} 2 | --- 3 | 4 | kind: PersistentVolume 5 | apiVersion: v1 6 | metadata: 7 | name: cockroachdb-volume-{{ loop.index0 }} 8 | labels: 9 | app: cockroachdb 10 | db: db-{{ loop.index0 }} 11 | kubernetes.io/cluster-service: "true" 12 | addonmanager.kubernetes.io/mode: Reconcile 13 | spec: 14 | accessModes: 15 | - ReadWriteOnce 16 | capacity: 17 | storage: {{ k8s_cockroachdb_volume_size }} 18 | hostPath: 19 | path: {{ k8s_cockroachdb_dir }}/{{ loop.index0 }} 20 | persistentVolumeReclaimPolicy: Recycle 21 | 22 | --- 23 | 24 | kind: PersistentVolumeClaim 25 | apiVersion: v1 26 | metadata: 27 | name: datadir-cockroachdb-{{ loop.index0 }} 28 | labels: 29 | app: cockroachdb 30 | kubernetes.io/cluster-service: "true" 31 | addonmanager.kubernetes.io/mode: Reconcile 32 | namespace: {{ k8s_cockroachdb_namespace }} 33 | spec: 34 | accessModes: 35 | - ReadWriteOnce 36 | resources: 37 | requests: 38 | storage: {{ k8s_cockroachdb_volume_size }} 39 | selector: 40 | matchLabels: 41 | db: db-{{ loop.index0 }} 42 | 43 | {% endfor %} 44 | -------------------------------------------------------------------------------- /roles/cockroachdb/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | -------------------------------------------------------------------------------- /roles/cockroachdb/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - cockroachdb 6 | -------------------------------------------------------------------------------- /roles/cockroachdb/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for addons 3 | -------------------------------------------------------------------------------- /roles/coredns/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # It will be used as the Internal dns domain name if DNS is enabled. 3 | # Services will be discoverable under 4 | # ..svc.., e.g. 5 | # myservice.default.svc.k8s.cluster 6 | k8s_domain_name: k8s 7 | k8s_cluster_name: cluster 8 | k8s_cluster_domain: '{{ k8s_domain_name }}.{{ k8s_cluster_name }}' 9 | 10 | # Kubernetes internal network for services. 11 | # Kubernetes services will get fake IP addresses from this range. 12 | # This range must not conflict with anything in your infrastructure. These 13 | # addresses do not need to be routable and must just be an unused block of space. 14 | k8s_services_network: 10.254.0.0/16 15 | 16 | # IP address of Kube DNS 17 | # It should be in range of services subnet 18 | k8s_cluster_dns: 10.254.0.10 19 | 20 | # Internal overlay network. It will assign IP 21 | # addresses from this range to individual pods. 22 | # This network must be unused block of space. 23 | k8s_cluster_cidr: 10.20.0.0/16 24 | 25 | # Kubernetes configs path 26 | k8s_conf_dir: /etc/kubernetes 27 | k8s_addons_dir: '{{ k8s_conf_dir }}/addons' 28 | 29 | # Master hosts nsmes 30 | k8s_master_hosts: {} 31 | 32 | # KubeDNS images 33 | k8s_coredns_image: coredns/coredns 34 | k8s_coredns_image_tag: 1.0.2 35 | -------------------------------------------------------------------------------- /roles/coredns/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for addons 3 | -------------------------------------------------------------------------------- /roles/coredns/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Igor Dolzhikov 3 | description: Setup Core DNS 4 | company: Kubernetes Community 5 | 6 | # If the issue tracker for your role is not on github, uncomment the 7 | # next line and provide a value 8 | # issue_tracker_url: http://example.com/issue/tracker 9 | 10 | # Some suggested licenses: 11 | # - BSD (default) 12 | # - MIT 13 | # - GPLv2 14 | # - GPLv3 15 | # - Apache 16 | # - CC-BY 17 | license: MIT 18 | 19 | min_ansible_version: 2.2 20 | 21 | # If this a Container Enabled role, provide the minimum Ansible Container version. 22 | # min_ansible_container_version: 23 | 24 | # Optionally specify the branch Galaxy will use when accessing the GitHub 25 | # repo for this role. During role install, if no tags are available, 26 | # Galaxy will use this branch. During import Galaxy will access files on 27 | # this branch. If Travis integration is configured, only notifications for this 28 | # branch will be accepted. Otherwise, in all cases, the repo's default branch 29 | # (usually master) will be used. 30 | #github_branch: 31 | 32 | # 33 | # platforms is a list of platforms, and each platform has a name and a list of versions. 34 | # 35 | # platforms: 36 | # - name: Fedora 37 | # versions: 38 | # - all 39 | # - 25 40 | # - name: SomePlatform 41 | # versions: 42 | # - all 43 | # - 1.0 44 | # - 7 45 | # - 99.99 46 | 47 | galaxy_tags: [] 48 | # List tags for your role here, one per line. A tag is a keyword that describes 49 | # and categorizes the role. Users find roles by searching for tags. Be sure to 50 | # remove the '[]' above, if you add tags to this list. 51 | # 52 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters. 53 | # Maximum 20 tags per role. 54 | 55 | dependencies: [] 56 | # List your role dependencies here, one per line. Be sure to remove the '[]' above, 57 | # if you add dependencies to this list. 58 | -------------------------------------------------------------------------------- /roles/coredns/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check addons directories 3 | file: 4 | path: '{{ item }}' 5 | state: directory 6 | mode: 0755 7 | with_items: 8 | - '{{ k8s_addons_dir }}' 9 | 10 | - name: Core DNS 11 | template: 12 | src: "{{ item }}" 13 | dest: "{{ k8s_addons_dir }}/{{ item }}" 14 | with_items: 15 | - coredns.yaml 16 | 17 | - name: Deploy script for Core DNS 18 | template: 19 | src: deploy-coredns.sh 20 | dest: "{{ k8s_addons_dir }}/deploy-coredns.sh" 21 | mode: 0755 22 | 23 | - name: Run deploy script for Core DNS 24 | command: "{{ k8s_addons_dir }}/deploy-coredns.sh" 25 | when: inventory_hostname in k8s_master_hosts[0] 26 | -------------------------------------------------------------------------------- /roles/coredns/templates/deploy-coredns.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function deploy_dns { 4 | if kubectl get deploy -l k8s-app=coredns --namespace=kube-system | grep coredns &> /dev/null; then 5 | echo "CoreDNS deployment already exists" 6 | else 7 | echo "Creating CoreDNS deployment" 8 | kubectl create -f {{ k8s_addons_dir }}/coredns.yaml 9 | fi 10 | 11 | echo 12 | } 13 | 14 | 15 | deploy_dns 16 | -------------------------------------------------------------------------------- /roles/coredns/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | -------------------------------------------------------------------------------- /roles/coredns/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - coredns 6 | -------------------------------------------------------------------------------- /roles/coredns/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for addons 3 | -------------------------------------------------------------------------------- /roles/dashboard/README.md: -------------------------------------------------------------------------------- 1 | Ansible Role: Dashboard 2 | ======================= 3 | 4 | This role install Dashboard on Red Hat linux based systems. 5 | 6 | [![Contributions Welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)](https://github.com/k8s-community/cluster-deploy/issues) 7 | 8 | Requirements 9 | ------------ 10 | 11 | No special requirements. 12 | 13 | 14 | Role Variables 15 | -------------- 16 | 17 | Available variables are listed below, along with default values (see `defaults/main.yml`): 18 | 19 | Kubernetes configs path 20 | ```yaml 21 | k8s_conf_dir: /etc/kubernetes 22 | k8s_addons_dir: '{{ k8s_conf_dir }}/addons' 23 | ``` 24 | 25 | Master hosts nsmes 26 | ``` 27 | k8s_master_hosts: {} 28 | ``` 29 | 30 | Example Playbook 31 | ---------------- 32 | 33 | - hosts: master 34 | roles: 35 | - dashboard 36 | 37 | License 38 | ------- 39 | 40 | MIT 41 | 42 | Author Information 43 | ------------------ 44 | 45 | Kubernets Community [k8s-community](https://github.com/k8s-community) 46 | -------------------------------------------------------------------------------- /roles/dashboard/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Kubernetes configs path 3 | k8s_conf_dir: /etc/kubernetes 4 | k8s_addons_dir: '{{ k8s_conf_dir }}/addons' 5 | 6 | # Master hosts nsmes 7 | k8s_master_hosts: {} 8 | 9 | # Dashboard image 10 | k8s_dashboard_image: gcr.io/google_containers/kubernetes-dashboard-amd64 11 | k8s_dashboard_image_tag: v1.8.0 12 | 13 | # SSL certificate and private key for running dashboard into Kubernetes 14 | k8s_services_cert: | 15 | ----BEGIN CERTIFICATE---- 16 | - Your certificate here - 17 | -----END CERTIFICATE----- 18 | k8s_services_cert_key: | 19 | ----BEGIN PRIVATE KEY---- 20 | - Your private key here - 21 | -----END PRIVATE KEY----- 22 | -------------------------------------------------------------------------------- /roles/dashboard/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for addons 3 | -------------------------------------------------------------------------------- /roles/dashboard/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Igor Dolzhikov 3 | description: Setup Dashboard 4 | company: Kubernetes Community 5 | 6 | # If the issue tracker for your role is not on github, uncomment the 7 | # next line and provide a value 8 | # issue_tracker_url: http://example.com/issue/tracker 9 | 10 | # Some suggested licenses: 11 | # - BSD (default) 12 | # - MIT 13 | # - GPLv2 14 | # - GPLv3 15 | # - Apache 16 | # - CC-BY 17 | license: MIT 18 | 19 | min_ansible_version: 2.2 20 | 21 | # If this a Container Enabled role, provide the minimum Ansible Container version. 22 | # min_ansible_container_version: 23 | 24 | # Optionally specify the branch Galaxy will use when accessing the GitHub 25 | # repo for this role. During role install, if no tags are available, 26 | # Galaxy will use this branch. During import Galaxy will access files on 27 | # this branch. If Travis integration is configured, only notifications for this 28 | # branch will be accepted. Otherwise, in all cases, the repo's default branch 29 | # (usually master) will be used. 30 | #github_branch: 31 | 32 | # 33 | # platforms is a list of platforms, and each platform has a name and a list of versions. 34 | # 35 | # platforms: 36 | # - name: Fedora 37 | # versions: 38 | # - all 39 | # - 25 40 | # - name: SomePlatform 41 | # versions: 42 | # - all 43 | # - 1.0 44 | # - 7 45 | # - 99.99 46 | 47 | galaxy_tags: [] 48 | # List tags for your role here, one per line. A tag is a keyword that describes 49 | # and categorizes the role. Users find roles by searching for tags. Be sure to 50 | # remove the '[]' above, if you add tags to this list. 51 | # 52 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters. 53 | # Maximum 20 tags per role. 54 | 55 | dependencies: [] 56 | # List your role dependencies here, one per line. Be sure to remove the '[]' above, 57 | # if you add dependencies to this list. 58 | -------------------------------------------------------------------------------- /roles/dashboard/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check addons directories 3 | file: 4 | path: '{{ item }}' 5 | state: directory 6 | mode: 0755 7 | with_items: 8 | - '{{ k8s_addons_dir }}' 9 | 10 | - name: Dashboard 11 | template: 12 | src: "{{ item }}" 13 | dest: "{{ k8s_addons_dir }}/{{ item }}" 14 | with_items: 15 | - dashboard.yaml 16 | 17 | - name: Deploy script for dashboard 18 | template: 19 | src: deploy-dashboard.sh 20 | dest: "{{ k8s_addons_dir }}/deploy-dashboard.sh" 21 | mode: 0755 22 | 23 | - name: Run deploy script for dashboard 24 | command: "{{ k8s_addons_dir }}/deploy-dashboard.sh" 25 | when: inventory_hostname in k8s_master_hosts[0] 26 | -------------------------------------------------------------------------------- /roles/dashboard/templates/deploy-dashboard.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function deploy_dashboard { 4 | if kubectl get deploy -l k8s-app=kubernetes-dashboard --namespace=kube-system | grep kubernetes-dashboard &> /dev/null; then 5 | echo "Kubernetes Dashboard deployment already exists" 6 | else 7 | echo "Creating Kubernetes Dashboard deployment" 8 | kubectl apply -f {{ k8s_addons_dir }}/dashboard.yaml 9 | fi 10 | 11 | echo 12 | } 13 | 14 | deploy_dashboard 15 | -------------------------------------------------------------------------------- /roles/dashboard/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | -------------------------------------------------------------------------------- /roles/dashboard/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - dashboard 6 | -------------------------------------------------------------------------------- /roles/dashboard/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for addons 3 | -------------------------------------------------------------------------------- /roles/etcd/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: reload systemd 3 | command: systemctl daemon-reload 4 | 5 | - name: restart etcd 6 | service: 7 | name: etcd 8 | state: restarted 9 | -------------------------------------------------------------------------------- /roles/etcd/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Igor Dolzhikov 3 | description: Setup ETCD registry 4 | company: Kubernetes Community 5 | 6 | # If the issue tracker for your role is not on github, uncomment the 7 | # next line and provide a value 8 | # issue_tracker_url: http://example.com/issue/tracker 9 | 10 | # Some suggested licenses: 11 | # - BSD (default) 12 | # - MIT 13 | # - GPLv2 14 | # - GPLv3 15 | # - Apache 16 | # - CC-BY 17 | license: MIT 18 | 19 | min_ansible_version: 2.2 20 | 21 | # If this a Container Enabled role, provide the minimum Ansible Container version. 22 | # min_ansible_container_version: 23 | 24 | # Optionally specify the branch Galaxy will use when accessing the GitHub 25 | # repo for this role. During role install, if no tags are available, 26 | # Galaxy will use this branch. During import Galaxy will access files on 27 | # this branch. If Travis integration is configured, only notifications for this 28 | # branch will be accepted. Otherwise, in all cases, the repo's default branch 29 | # (usually master) will be used. 30 | #github_branch: 31 | 32 | # 33 | # platforms is a list of platforms, and each platform has a name and a list of versions. 34 | # 35 | # platforms: 36 | # - name: Fedora 37 | # versions: 38 | # - all 39 | # - 25 40 | # - name: SomePlatform 41 | # versions: 42 | # - all 43 | # - 1.0 44 | # - 7 45 | # - 99.99 46 | 47 | galaxy_tags: [] 48 | # List tags for your role here, one per line. A tag is a keyword that describes 49 | # and categorizes the role. Users find roles by searching for tags. Be sure to 50 | # remove the '[]' above, if you add tags to this list. 51 | # 52 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters. 53 | # Maximum 20 tags per role. 54 | 55 | dependencies: [] 56 | # List your role dependencies here, one per line. Be sure to remove the '[]' above, 57 | # if you add dependencies to this list. -------------------------------------------------------------------------------- /roles/etcd/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install etcd 3 | yum: 4 | name: etcd 5 | state: present 6 | 7 | - name: Enable etcd 8 | service: 9 | name: etcd 10 | enabled: yes 11 | 12 | - name: Check etcd directories 13 | file: 14 | path: '{{ item }}' 15 | state: directory 16 | mode: 0755 17 | with_items: 18 | - '{{ ssl_dir }}' 19 | - '{{ etcd_conf_dir }}' 20 | - '{{ etcd_data_dir }}' 21 | 22 | - name: Setup etcd 23 | template: 24 | src: etcd.conf.j2 25 | dest: '{{ etcd_conf_dir }}/etcd.conf' 26 | mode: 0644 27 | notify: 28 | - reload systemd 29 | - restart etcd 30 | 31 | - name: Set profile 32 | template: 33 | src: profile.sh.j2 34 | dest: /etc/profile.d/etcd.sh 35 | -------------------------------------------------------------------------------- /roles/etcd/templates/profile.sh.j2: -------------------------------------------------------------------------------- 1 | export ETCDCTL_ENDPOINT=https://127.0.0.1:{{ etcd_client_port }} 2 | -------------------------------------------------------------------------------- /roles/etcd/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | 3 | -------------------------------------------------------------------------------- /roles/etcd/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - etcd -------------------------------------------------------------------------------- /roles/etcd/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for etcd -------------------------------------------------------------------------------- /roles/external/README.md: -------------------------------------------------------------------------------- 1 | Ansible Role: External Services 2 | =============================== 3 | 4 | This role install External Services on Red Hat linux based systems. 5 | 6 | [![Contributions Welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)](https://github.com/k8s-community/cluster-deploy/issues) 7 | 8 | Requirements 9 | ------------ 10 | 11 | No special requirements. 12 | 13 | 14 | Role Variables 15 | -------------- 16 | 17 | Available variables are listed below, along with default values (see `defaults/main.yml`): 18 | 19 | External services which can be outside of k8s cluster 20 | ```yaml 21 | external_services: {} 22 | ``` 23 | 24 | Additional Kubernetes namespaces 25 | ```yaml 26 | k8s_namespaces: 27 | - dev 28 | ``` 29 | 30 | Kubernetes configs path 31 | ```yaml 32 | k8s_conf_dir: /etc/kubernetes 33 | k8s_addons_dir: '{{ k8s_conf_dir }}/addons' 34 | ``` 35 | 36 | Master hosts nsmes 37 | ```yaml 38 | k8s_master_hosts: {} 39 | ``` 40 | 41 | Example Playbook 42 | ---------------- 43 | 44 | - hosts: 45 | - master 46 | roles: 47 | - external 48 | 49 | License 50 | ------- 51 | 52 | MIT 53 | 54 | Author Information 55 | ------------------ 56 | 57 | Kubernets Community [k8s-community](https://github.com/k8s-community) 58 | -------------------------------------------------------------------------------- /roles/external/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # External services which can be outside of k8s cluster 3 | external_services: {} 4 | 5 | # Additional Kubernetes namespaces 6 | k8s_namespaces: 7 | - dev 8 | 9 | # Kubernetes configs path 10 | k8s_conf_dir: /etc/kubernetes 11 | k8s_addons_dir: '{{ k8s_conf_dir }}/addons' 12 | 13 | # Master hosts nsmes 14 | k8s_master_hosts: {} 15 | -------------------------------------------------------------------------------- /roles/external/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /roles/external/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Igor Dolzhikov 3 | description: Setup External services 4 | company: Kubernetes Community 5 | 6 | # If the issue tracker for your role is not on github, uncomment the 7 | # next line and provide a value 8 | # issue_tracker_url: http://example.com/issue/tracker 9 | 10 | # Some suggested licenses: 11 | # - BSD (default) 12 | # - MIT 13 | # - GPLv2 14 | # - GPLv3 15 | # - Apache 16 | # - CC-BY 17 | license: MIT 18 | 19 | min_ansible_version: 2.2 20 | 21 | # If this a Container Enabled role, provide the minimum Ansible Container version. 22 | # min_ansible_container_version: 23 | 24 | # Optionally specify the branch Galaxy will use when accessing the GitHub 25 | # repo for this role. During role install, if no tags are available, 26 | # Galaxy will use this branch. During import Galaxy will access files on 27 | # this branch. If Travis integration is configured, only notifications for this 28 | # branch will be accepted. Otherwise, in all cases, the repo's default branch 29 | # (usually master) will be used. 30 | #github_branch: 31 | 32 | # 33 | # platforms is a list of platforms, and each platform has a name and a list of versions. 34 | # 35 | # platforms: 36 | # - name: Fedora 37 | # versions: 38 | # - all 39 | # - 25 40 | # - name: SomePlatform 41 | # versions: 42 | # - all 43 | # - 1.0 44 | # - 7 45 | # - 99.99 46 | 47 | galaxy_tags: [] 48 | # List tags for your role here, one per line. A tag is a keyword that describes 49 | # and categorizes the role. Users find roles by searching for tags. Be sure to 50 | # remove the '[]' above, if you add tags to this list. 51 | # 52 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters. 53 | # Maximum 20 tags per role. 54 | 55 | dependencies: [] 56 | # List your role dependencies here, one per line. Be sure to remove the '[]' above, 57 | # if you add dependencies to this list. 58 | -------------------------------------------------------------------------------- /roles/external/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check kubernetes directories 3 | file: 4 | path: '{{ item }}' 5 | state: directory 6 | mode: 0755 7 | with_items: 8 | - '{{ k8s_addons_dir }}' 9 | 10 | - name: External services 11 | template: 12 | src: "{{ item }}" 13 | dest: "{{ k8s_addons_dir }}/{{ item }}" 14 | with_items: 15 | - external-services.yaml 16 | 17 | - name: Deploy script for external services 18 | template: 19 | src: deploy-external-services.sh 20 | dest: "{{ k8s_addons_dir }}/deploy-external-services.sh" 21 | mode: 0755 22 | 23 | - name: Awaiting for kubernetes API 24 | wait_for: 25 | host: 127.0.0.1 26 | port: 8080 27 | when: inventory_hostname in k8s_master_hosts[0] 28 | 29 | - name: Run deploy script for external services 30 | command: "{{ k8s_addons_dir }}/deploy-external-services.sh" 31 | when: inventory_hostname in k8s_master_hosts[0] 32 | -------------------------------------------------------------------------------- /roles/external/templates/deploy-external-services.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function deploy_external_services { 4 | if kubectl get -n {{ k8s_namespaces[0] }} services | grep {% if external_services | length > 0 %} {{ external_services[0].name }} {% endif %} &> /dev/null; then 5 | echo "External services already exist" 6 | else 7 | echo "Creating external services" 8 | kubectl apply -f {{ k8s_addons_dir }}/external-services.yaml 9 | fi 10 | 11 | echo 12 | } 13 | 14 | 15 | deploy_external_services 16 | -------------------------------------------------------------------------------- /roles/external/templates/external-services.yaml: -------------------------------------------------------------------------------- 1 | {% for namespace in k8s_namespaces %} 2 | {% for service in external_services %} 3 | --- 4 | 5 | apiVersion: v1 6 | kind: Service 7 | metadata: 8 | name: {{ service.name }} 9 | namespace: {{ namespace }} 10 | spec: 11 | externalName: {{ service.host }} 12 | ports: 13 | - port: {{ service.port }} 14 | protocol: TCP 15 | type: ExternalName 16 | 17 | {% endfor %} 18 | {% endfor %} 19 | -------------------------------------------------------------------------------- /roles/external/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | 3 | -------------------------------------------------------------------------------- /roles/external/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - external 6 | -------------------------------------------------------------------------------- /roles/external/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for kubernetes 3 | -------------------------------------------------------------------------------- /roles/haproxy/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # HAProxy port used for stats 3 | haproxy_stats_port: 7111 4 | 5 | # HAProxy check attributes 6 | haproxy_check_interval: 5000 7 | haproxy_check_rise: 3 8 | haproxy_check_fall: 3 9 | 10 | # HAProxy services 11 | # List of services which use TCP LB 12 | haproxy_services: {} 13 | # Example: 14 | # - name: node-80 15 | # port: 80 16 | # httpRedirect: true 17 | # - name: node-443 18 | # port: 443 19 | # nodePort: 443 20 | # httpCheck: 21 | # path: /healthz 22 | # status: 200 23 | # sslProxy: true 24 | # - name: whoisd 25 | # port: 43 26 | # nodePort: 30043 27 | 28 | # Hosts names 29 | haproxy_hosts: {} 30 | 31 | # SSL dir for haproxy 32 | haproxy_ssl_dir: /etc/haproxy/ssl 33 | 34 | # SSL certificate and private key for running user services into Kubernetes 35 | k8s_services_cert: | 36 | ----BEGIN CERTIFICATE---- 37 | - Your certificate here - 38 | -----END CERTIFICATE----- 39 | k8s_services_cert_key: | 40 | ----BEGIN PRIVATE KEY---- 41 | - Your private key here - 42 | -----END PRIVATE KEY----- 43 | -------------------------------------------------------------------------------- /roles/haproxy/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: enable haproxy 3 | service: 4 | name: haproxy 5 | enabled: yes 6 | 7 | - name: reload haproxy 8 | service: 9 | name: haproxy 10 | state: reloaded 11 | 12 | - name: restart haproxy 13 | service: 14 | name: haproxy 15 | state: restarted 16 | 17 | -------------------------------------------------------------------------------- /roles/haproxy/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Igor Dolzhikov 3 | description: Setup HAProxy balancer 4 | company: Kubernetes Community 5 | 6 | # If the issue tracker for your role is not on github, uncomment the 7 | # next line and provide a value 8 | # issue_tracker_url: http://example.com/issue/tracker 9 | 10 | # Some suggested licenses: 11 | # - BSD (default) 12 | # - MIT 13 | # - GPLv2 14 | # - GPLv3 15 | # - Apache 16 | # - CC-BY 17 | license: MIT 18 | 19 | min_ansible_version: 2.2 20 | 21 | # If this a Container Enabled role, provide the minimum Ansible Container version. 22 | # min_ansible_container_version: 23 | 24 | # Optionally specify the branch Galaxy will use when accessing the GitHub 25 | # repo for this role. During role install, if no tags are available, 26 | # Galaxy will use this branch. During import Galaxy will access files on 27 | # this branch. If Travis integration is configured, only notifications for this 28 | # branch will be accepted. Otherwise, in all cases, the repo's default branch 29 | # (usually master) will be used. 30 | #github_branch: 31 | 32 | # 33 | # platforms is a list of platforms, and each platform has a name and a list of versions. 34 | # 35 | # platforms: 36 | # - name: Fedora 37 | # versions: 38 | # - all 39 | # - 25 40 | # - name: SomePlatform 41 | # versions: 42 | # - all 43 | # - 1.0 44 | # - 7 45 | # - 99.99 46 | 47 | galaxy_tags: [] 48 | # List tags for your role here, one per line. A tag is a keyword that describes 49 | # and categorizes the role. Users find roles by searching for tags. Be sure to 50 | # remove the '[]' above, if you add tags to this list. 51 | # 52 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters. 53 | # Maximum 20 tags per role. 54 | 55 | dependencies: [] 56 | # List your role dependencies here, one per line. Be sure to remove the '[]' above, 57 | # if you add dependencies to this list. 58 | -------------------------------------------------------------------------------- /roles/haproxy/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install HAProxy and dependencies 3 | yum: 4 | name: '{{ item }}' 5 | state: present 6 | with_items: 7 | - haproxy 8 | notify: 9 | - enable haproxy 10 | - restart haproxy 11 | 12 | - name: Check directories 13 | file: 14 | path: '{{ item }}' 15 | state: directory 16 | mode: 0755 17 | with_items: 18 | - '{{ haproxy_ssl_dir }}' 19 | 20 | - name: Create SSL certificate 21 | copy: 22 | content: '{{ k8s_services_cert }}{{ k8s_services_cert_key }}' 23 | dest: '{{ haproxy_ssl_dir }}/certs.pem' 24 | 25 | - name: Configure balancer 26 | template: 27 | src: haproxy.cfg.j2 28 | dest: /etc/haproxy/haproxy.cfg 29 | notify: 30 | - reload haproxy 31 | -------------------------------------------------------------------------------- /roles/haproxy/templates/haproxy.cfg.j2: -------------------------------------------------------------------------------- 1 | global 2 | log /dev/log local0 3 | log /dev/log local1 notice 4 | tune.ssl.default-dh-param 2048 5 | stats socket /var/run/haproxy.sock mode 666 level user 6 | maxconn 8192 7 | user haproxy 8 | group haproxy 9 | daemon 10 | 11 | defaults 12 | log global 13 | mode tcp 14 | option tcpka 15 | option tcplog 16 | option dontlognull 17 | option redispatch 18 | option log-health-checks 19 | retries 3 20 | maxconn 2000 21 | timeout check 5s 22 | timeout queue 5s 23 | timeout connect 5s 24 | timeout client 50s 25 | timeout server 50s 26 | timeout tunnel 1h # to use with WebSocket 27 | 28 | {% for item in haproxy_services %} 29 | listen {{ item.name }} 30 | bind *:{{ item.port }}{% if item.sslTerminate is defined and item.sslTerminate %} ssl crt {{ haproxy_ssl_dir }}/certs.pem no-sslv3{% endif %} 31 | 32 | {% if item.httpRedirect is defined and item.httpRedirect %} 33 | mode http 34 | redirect scheme https 35 | {% else %} 36 | {% if item.httpCheck is defined %} 37 | option httpchk GET {{ item.httpCheck.path }} 38 | http-check expect status {{ item.httpCheck.status }} 39 | {% endif %} 40 | default-server inter {{ haproxy_check_interval }} rise {{ haproxy_check_rise }} fall {{ haproxy_check_fall }} 41 | {% for host in haproxy_hosts %} 42 | server {{ host }} {{ hostvars[host].ansible_default_ipv4.address }}:{{ item.nodePort }}{% if item.sslProxy is defined and item.sslProxy %} verify none check-ssl{% endif %}{% if item.sslTerminate is defined and item.sslTerminate %} ssl verify none{% endif %} check 43 | {% endfor %} 44 | {% endif %} 45 | 46 | {% endfor %} 47 | listen stats 48 | bind *:{{ haproxy_stats_port }} ssl crt {{ haproxy_ssl_dir }}/certs.pem no-sslv3 49 | mode http 50 | option forwardfor 51 | stats enable 52 | stats uri / 53 | -------------------------------------------------------------------------------- /roles/haproxy/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | -------------------------------------------------------------------------------- /roles/haproxy/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - haproxy 6 | -------------------------------------------------------------------------------- /roles/haproxy/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for addons 3 | -------------------------------------------------------------------------------- /roles/helm/README.md: -------------------------------------------------------------------------------- 1 | Ansible Role: Helm tiller 2 | ========================= 3 | 4 | This role install Helm tiller on Red Hat linux based systems. 5 | 6 | [![Contributions Welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)](https://github.com/k8s-community/cluster-deploy/issues) 7 | 8 | Requirements 9 | ------------ 10 | 11 | No special requirements. 12 | 13 | 14 | Role Variables 15 | -------------- 16 | 17 | Available variables are listed below, along with default values (see `defaults/main.yml`): 18 | 19 | Helm package manager version 20 | ```yaml 21 | helm_version: 2.6.1 22 | ``` 23 | 24 | Kubernetes configs path 25 | ```yaml 26 | k8s_conf_dir: /etc/kubernetes 27 | k8s_addons_dir: '{{ k8s_conf_dir }}/addons' 28 | ``` 29 | 30 | Master hosts nsmes 31 | ```yaml 32 | k8s_master_hosts: {} 33 | ``` 34 | 35 | Example Playbook 36 | ---------------- 37 | 38 | - hosts: addons 39 | roles: 40 | - helm 41 | 42 | License 43 | ------- 44 | 45 | MIT 46 | 47 | Author Information 48 | ------------------ 49 | 50 | Kubernets Community [k8s-community](https://github.com/k8s-community) 51 | -------------------------------------------------------------------------------- /roles/helm/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Helm package manager version 3 | helm_version: 2.7.2 4 | 5 | # Kubernetes configs path 6 | k8s_conf_dir: /etc/kubernetes 7 | k8s_addons_dir: '{{ k8s_conf_dir }}/addons' 8 | 9 | # Master hosts nsmes 10 | k8s_master_hosts: {} 11 | 12 | # Helm Tiller image 13 | k8s_helm_tiller_image: gcr.io/kubernetes-helm/tiller 14 | k8s_helm_tiller_image_tag: v{{ helm_version }} 15 | -------------------------------------------------------------------------------- /roles/helm/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for addons 3 | -------------------------------------------------------------------------------- /roles/helm/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Igor Dolzhikov 3 | description: Setup Helm 4 | company: Kubernetes Community 5 | 6 | # If the issue tracker for your role is not on github, uncomment the 7 | # next line and provide a value 8 | # issue_tracker_url: http://example.com/issue/tracker 9 | 10 | # Some suggested licenses: 11 | # - BSD (default) 12 | # - MIT 13 | # - GPLv2 14 | # - GPLv3 15 | # - Apache 16 | # - CC-BY 17 | license: MIT 18 | 19 | min_ansible_version: 2.2 20 | 21 | # If this a Container Enabled role, provide the minimum Ansible Container version. 22 | # min_ansible_container_version: 23 | 24 | # Optionally specify the branch Galaxy will use when accessing the GitHub 25 | # repo for this role. During role install, if no tags are available, 26 | # Galaxy will use this branch. During import Galaxy will access files on 27 | # this branch. If Travis integration is configured, only notifications for this 28 | # branch will be accepted. Otherwise, in all cases, the repo's default branch 29 | # (usually master) will be used. 30 | #github_branch: 31 | 32 | # 33 | # platforms is a list of platforms, and each platform has a name and a list of versions. 34 | # 35 | # platforms: 36 | # - name: Fedora 37 | # versions: 38 | # - all 39 | # - 25 40 | # - name: SomePlatform 41 | # versions: 42 | # - all 43 | # - 1.0 44 | # - 7 45 | # - 99.99 46 | 47 | galaxy_tags: [] 48 | # List tags for your role here, one per line. A tag is a keyword that describes 49 | # and categorizes the role. Users find roles by searching for tags. Be sure to 50 | # remove the '[]' above, if you add tags to this list. 51 | # 52 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters. 53 | # Maximum 20 tags per role. 54 | 55 | dependencies: [] 56 | # List your role dependencies here, one per line. Be sure to remove the '[]' above, 57 | # if you add dependencies to this list. 58 | -------------------------------------------------------------------------------- /roles/helm/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check addons directories 3 | file: 4 | path: '{{ item }}' 5 | state: directory 6 | mode: 0755 7 | with_items: 8 | - '{{ k8s_addons_dir }}' 9 | 10 | - name: Helm tiller 11 | template: 12 | src: "{{ item }}" 13 | dest: "{{ k8s_addons_dir }}/{{ item }}" 14 | with_items: 15 | - helm-tiller.yaml 16 | 17 | - name: Deploy script for tiller 18 | template: 19 | src: deploy-tiller.sh 20 | dest: "{{ k8s_addons_dir }}/deploy-tiller.sh" 21 | mode: 0755 22 | 23 | - name: Run deploy script for tiller 24 | command: "{{ k8s_addons_dir }}/deploy-tiller.sh" 25 | when: inventory_hostname in k8s_master_hosts[0] 26 | -------------------------------------------------------------------------------- /roles/helm/templates/deploy-tiller.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function deploy_helm_tiller { 4 | if kubectl get deploy -l app=helm,name=tiller --namespace=kube-system | grep tiller-deploy &> /dev/null; then 5 | echo "Helm tiller already exists" 6 | else 7 | echo "Creating Helm tiller" 8 | kubectl apply -f {{ k8s_addons_dir }}/helm-tiller.yaml 9 | fi 10 | 11 | echo 12 | } 13 | 14 | deploy_helm_tiller 15 | -------------------------------------------------------------------------------- /roles/helm/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | -------------------------------------------------------------------------------- /roles/helm/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - helm 6 | -------------------------------------------------------------------------------- /roles/helm/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for addons 3 | -------------------------------------------------------------------------------- /roles/ingress/README.md: -------------------------------------------------------------------------------- 1 | Ansible Role: Ingress controller 2 | ================================ 3 | 4 | This role install Ingress Controller on Red Hat linux based systems. 5 | 6 | [![Contributions Welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)](https://github.com/k8s-community/cluster-deploy/issues) 7 | 8 | Requirements 9 | ------------ 10 | 11 | No special requirements. 12 | 13 | 14 | Role Variables 15 | -------------- 16 | 17 | Available variables are listed below, along with default values (see `defaults/main.yml`): 18 | 19 | Kubernetes load balancer type, valid values: `nginx` 20 | TODO: load balancer `gce`, `haproxy` 21 | ```yaml 22 | k8s_lb_type: nginx 23 | ``` 24 | 25 | PROXY protocol for ingress 26 | https://www.haproxy.com/blog/haproxy/proxy-protocol/ 27 | ```yaml 28 | k8s_ingress_proxy_protocol: false 29 | ``` 30 | 31 | Kubernetes configs path 32 | ```yaml 33 | k8s_conf_dir: /etc/kubernetes 34 | k8s_addons_dir: '{{ k8s_conf_dir }}/addons' 35 | ``` 36 | 37 | Master hosts nsmes 38 | ```yaml 39 | k8s_master_hosts: {} 40 | ``` 41 | 42 | Node hosts nsmes 43 | ```yaml 44 | k8s_node_hosts: {} 45 | ``` 46 | 47 | Example Playbook 48 | ---------------- 49 | 50 | - hosts: addons 51 | roles: 52 | - ingress 53 | 54 | License 55 | ------- 56 | 57 | MIT 58 | 59 | Author Information 60 | ------------------ 61 | 62 | Kubernets Community [k8s-community](https://github.com/k8s-community) 63 | -------------------------------------------------------------------------------- /roles/ingress/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Kubernetes load balancer type, valid values: `gce`, `nginx`, `haproxy` 3 | # TODO: load balancer `gce`, `haproxy` 4 | k8s_lb_type: nginx 5 | 6 | # PROXY protocol for ingress 7 | # https://www.haproxy.com/blog/haproxy/proxy-protocol/ 8 | k8s_ingress_proxy_protocol: false 9 | 10 | # Kubernetes configs path 11 | k8s_conf_dir: /etc/kubernetes 12 | k8s_addons_dir: '{{ k8s_conf_dir }}/addons' 13 | 14 | # Master hosts nsmes 15 | k8s_master_hosts: {} 16 | 17 | # Nodes hosts names 18 | k8s_node_hosts: {} 19 | 20 | # Nginx Ingress controller images 21 | k8s_defaultbackend_image: gcr.io/google_containers/defaultbackend 22 | k8s_defaultbackend_image_tag: 1.4 23 | k8s_echoserver_image: gcr.io/google_containers/echoserver 24 | k8s_echoserver_image_tag: 1.8 25 | k8s_nginx_ingress_controller_image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller 26 | k8s_nginx_ingress_controller_image_tag: 0.9.0 27 | -------------------------------------------------------------------------------- /roles/ingress/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for addons 3 | -------------------------------------------------------------------------------- /roles/ingress/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Igor Dolzhikov 3 | description: Setup Ingress 4 | company: Kubernetes Community 5 | 6 | # If the issue tracker for your role is not on github, uncomment the 7 | # next line and provide a value 8 | # issue_tracker_url: http://example.com/issue/tracker 9 | 10 | # Some suggested licenses: 11 | # - BSD (default) 12 | # - MIT 13 | # - GPLv2 14 | # - GPLv3 15 | # - Apache 16 | # - CC-BY 17 | license: MIT 18 | 19 | min_ansible_version: 2.2 20 | 21 | # If this a Container Enabled role, provide the minimum Ansible Container version. 22 | # min_ansible_container_version: 23 | 24 | # Optionally specify the branch Galaxy will use when accessing the GitHub 25 | # repo for this role. During role install, if no tags are available, 26 | # Galaxy will use this branch. During import Galaxy will access files on 27 | # this branch. If Travis integration is configured, only notifications for this 28 | # branch will be accepted. Otherwise, in all cases, the repo's default branch 29 | # (usually master) will be used. 30 | #github_branch: 31 | 32 | # 33 | # platforms is a list of platforms, and each platform has a name and a list of versions. 34 | # 35 | # platforms: 36 | # - name: Fedora 37 | # versions: 38 | # - all 39 | # - 25 40 | # - name: SomePlatform 41 | # versions: 42 | # - all 43 | # - 1.0 44 | # - 7 45 | # - 99.99 46 | 47 | galaxy_tags: [] 48 | # List tags for your role here, one per line. A tag is a keyword that describes 49 | # and categorizes the role. Users find roles by searching for tags. Be sure to 50 | # remove the '[]' above, if you add tags to this list. 51 | # 52 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters. 53 | # Maximum 20 tags per role. 54 | 55 | dependencies: [] 56 | # List your role dependencies here, one per line. Be sure to remove the '[]' above, 57 | # if you add dependencies to this list. 58 | -------------------------------------------------------------------------------- /roles/ingress/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check addons directories 3 | file: 4 | path: '{{ item }}' 5 | state: directory 6 | mode: 0755 7 | with_items: 8 | - '{{ k8s_addons_dir }}' 9 | 10 | - name: Ingress services 11 | template: 12 | src: "{{ item }}" 13 | dest: "{{ k8s_addons_dir }}/{{ item }}" 14 | with_items: 15 | - nginx-ingress-controller.yaml 16 | - haproxy-ingress-controller.yaml 17 | - gce-ingress-controller.yaml 18 | - echoheaders.yaml 19 | 20 | - name: Deploy script for ingress 21 | template: 22 | src: deploy-ingress.sh 23 | dest: "{{ k8s_addons_dir }}/deploy-ingress.sh" 24 | mode: 0755 25 | 26 | - name: Run deploy script for ingress 27 | command: "{{ k8s_addons_dir }}/deploy-ingress.sh" 28 | when: inventory_hostname in k8s_master_hosts[0] 29 | -------------------------------------------------------------------------------- /roles/ingress/templates/deploy-ingress.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function deploy_nginx_ingress_controller { 4 | if kubectl get ds -l k8s-app=nginx-ingress-controller --namespace=kube-system | grep nginx-ingress-controller &> /dev/null; then 5 | echo "Nginx Ingress controller already exists" 6 | else 7 | echo "Creating Nginx Ingress controller" 8 | kubectl apply -f {{ k8s_addons_dir }}/nginx-ingress-controller.yaml 9 | fi 10 | 11 | echo 12 | } 13 | 14 | function deploy_haproxy_ingress_controller { 15 | if kubectl get deploy -l k8s-app=haproxy-ingress-controller --namespace=kube-system | grep haproxy-ingress-controller &> /dev/null; then 16 | echo "HAProxy Ingress controller already exists" 17 | else 18 | echo "Creating HAProxy Ingress controller" 19 | kubectl apply -f {{ k8s_addons_dir }}/haproxy-ingress-controller.yaml 20 | fi 21 | 22 | echo 23 | } 24 | 25 | function deploy_gce_ingress_controller { 26 | if kubectl get deploy -l k8s-app=gce-ingress-controller --namespace=kube-system | grep gce-ingress-controller &> /dev/null; then 27 | echo "Google L7 Ingress controller already exists" 28 | else 29 | echo "Creating Google L7 Ingress controller" 30 | kubectl apply -f {{ k8s_addons_dir }}/gce-ingress-controller.yaml 31 | fi 32 | 33 | echo 34 | } 35 | 36 | 37 | {% if k8s_lb_type == 'nginx' %} 38 | deploy_nginx_ingress_controller 39 | {% endif %} 40 | {% if k8s_lb_type == 'haproxy' %} 41 | deploy_haproxy_ingress_controller 42 | {% endif %} 43 | {% if k8s_lb_type == 'gce' %} 44 | deploy_gce_ingress_controller 45 | {% endif %} 46 | -------------------------------------------------------------------------------- /roles/ingress/templates/echoheaders.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | apiVersion: extensions/v1beta1 4 | kind: Deployment 5 | metadata: 6 | name: echoheaders 7 | spec: 8 | replicas: 2 9 | template: 10 | metadata: 11 | labels: 12 | app: echoheaders 13 | spec: 14 | containers: 15 | - name: echoheaders 16 | image: {{ k8s_echoserver_image }}:{{ k8s_echoserver_image_tag }} 17 | ports: 18 | - containerPort: 8080 19 | readinessProbe: 20 | httpGet: 21 | path: /healthz 22 | port: 8080 23 | periodSeconds: 1 24 | timeoutSeconds: 1 25 | successThreshold: 1 26 | failureThreshold: 10 27 | 28 | --- 29 | 30 | apiVersion: v1 31 | kind: Service 32 | metadata: 33 | name: echoheaders 34 | labels: 35 | app: echoheaders 36 | spec: 37 | ports: 38 | - port: 80 39 | targetPort: 8080 40 | protocol: TCP 41 | name: http 42 | selector: 43 | app: echoheaders 44 | 45 | --- 46 | 47 | apiVersion: extensions/v1beta1 48 | kind: Ingress 49 | metadata: 50 | annotations: 51 | ingress.kubernetes.io/rewrite-target: / 52 | name: echoheaders 53 | spec: 54 | tls: 55 | - hosts: 56 | - {{ k8s_services_name }} 57 | secretName: tls-secret 58 | rules: 59 | - host: {{ k8s_services_name }} 60 | http: 61 | paths: 62 | - path: /echo1 63 | backend: 64 | serviceName: echoheaders 65 | servicePort: 80 66 | - path: /echo2 67 | backend: 68 | serviceName: echoheaders 69 | servicePort: 80 70 | -------------------------------------------------------------------------------- /roles/ingress/templates/gce-ingress-controller.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /roles/ingress/templates/haproxy-ingress-controller.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /roles/ingress/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | -------------------------------------------------------------------------------- /roles/ingress/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - ingress 6 | -------------------------------------------------------------------------------- /roles/ingress/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for addons 3 | -------------------------------------------------------------------------------- /roles/instance/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for instance -------------------------------------------------------------------------------- /roles/instance/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Igor Dolzhikov 3 | description: Setup VM instance 4 | company: Kubernetes Community 5 | 6 | # If the issue tracker for your role is not on github, uncomment the 7 | # next line and provide a value 8 | # issue_tracker_url: http://example.com/issue/tracker 9 | 10 | # Some suggested licenses: 11 | # - BSD (default) 12 | # - MIT 13 | # - GPLv2 14 | # - GPLv3 15 | # - Apache 16 | # - CC-BY 17 | license: MIT 18 | 19 | min_ansible_version: 2.2 20 | 21 | # If this a Container Enabled role, provide the minimum Ansible Container version. 22 | # min_ansible_container_version: 23 | 24 | # Optionally specify the branch Galaxy will use when accessing the GitHub 25 | # repo for this role. During role install, if no tags are available, 26 | # Galaxy will use this branch. During import Galaxy will access files on 27 | # this branch. If Travis integration is configured, only notifications for this 28 | # branch will be accepted. Otherwise, in all cases, the repo's default branch 29 | # (usually master) will be used. 30 | #github_branch: 31 | 32 | # 33 | # platforms is a list of platforms, and each platform has a name and a list of versions. 34 | # 35 | # platforms: 36 | # - name: Fedora 37 | # versions: 38 | # - all 39 | # - 25 40 | # - name: SomePlatform 41 | # versions: 42 | # - all 43 | # - 1.0 44 | # - 7 45 | # - 99.99 46 | 47 | galaxy_tags: [] 48 | # List tags for your role here, one per line. A tag is a keyword that describes 49 | # and categorizes the role. Users find roles by searching for tags. Be sure to 50 | # remove the '[]' above, if you add tags to this list. 51 | # 52 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters. 53 | # Maximum 20 tags per role. 54 | 55 | dependencies: [] 56 | # List your role dependencies here, one per line. Be sure to remove the '[]' above, 57 | # if you add dependencies to this list. -------------------------------------------------------------------------------- /roles/instance/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create VM instances group {{ gce_instance_group }} 3 | gce: 4 | instance_names: '{{ gce_instance_names }}' 5 | zone: '{{ gce_instances_zone }}' 6 | machine_type: '{{ gce_machine_type }}' 7 | image: '{{ gce_image }}' 8 | disk_size: '{{ gce_disk_size }}' 9 | network: '{{ gce_network_name }}' 10 | subnetwork: '{{ gce_subnet_name }}' 11 | ip_forward: '{{ gce_ip_forward | lower }}' 12 | metadata: '{{ gce_metadata }}' 13 | service_account_permissions: '{{ gce_sa_permissions }}' 14 | service_account_email: '{{ gce_service_account_email }}' 15 | credentials_file: '{{ gce_credentials_file }}' 16 | project_id: '{{ gce_project_id }}' 17 | register: gce 18 | 19 | - name: Wait for SSH to come up 20 | wait_for: 21 | host: '{{ item.public_ip }}' 22 | port: 22 23 | with_items: '{{ gce.instance_data | default([], true) }}' 24 | 25 | - lineinfile: 26 | dest: '{{ k8s_inventory_file }}' 27 | insertafter: "^\\[{{ gce_instance_group }}\\]" 28 | line: '{{ item.name }} ansible_host={{ item.public_ip }}' 29 | with_items: '{{ gce.instance_data | default([], true) }}' 30 | -------------------------------------------------------------------------------- /roles/instance/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | 3 | -------------------------------------------------------------------------------- /roles/instance/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | connection: local 4 | roles: 5 | - instance -------------------------------------------------------------------------------- /roles/instance/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for instance -------------------------------------------------------------------------------- /roles/istio/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for addons 3 | -------------------------------------------------------------------------------- /roles/istio/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Igor Dolzhikov 3 | description: Setup Istio 4 | company: Kubernetes Community 5 | 6 | # If the issue tracker for your role is not on github, uncomment the 7 | # next line and provide a value 8 | # issue_tracker_url: http://example.com/issue/tracker 9 | 10 | # Some suggested licenses: 11 | # - BSD (default) 12 | # - MIT 13 | # - GPLv2 14 | # - GPLv3 15 | # - Apache 16 | # - CC-BY 17 | license: MIT 18 | 19 | min_ansible_version: 2.2 20 | 21 | # If this a Container Enabled role, provide the minimum Ansible Container version. 22 | # min_ansible_container_version: 23 | 24 | # Optionally specify the branch Galaxy will use when accessing the GitHub 25 | # repo for this role. During role install, if no tags are available, 26 | # Galaxy will use this branch. During import Galaxy will access files on 27 | # this branch. If Travis integration is configured, only notifications for this 28 | # branch will be accepted. Otherwise, in all cases, the repo's default branch 29 | # (usually master) will be used. 30 | #github_branch: 31 | 32 | # 33 | # platforms is a list of platforms, and each platform has a name and a list of versions. 34 | # 35 | # platforms: 36 | # - name: Fedora 37 | # versions: 38 | # - all 39 | # - 25 40 | # - name: SomePlatform 41 | # versions: 42 | # - all 43 | # - 1.0 44 | # - 7 45 | # - 99.99 46 | 47 | galaxy_tags: [] 48 | # List tags for your role here, one per line. A tag is a keyword that describes 49 | # and categorizes the role. Users find roles by searching for tags. Be sure to 50 | # remove the '[]' above, if you add tags to this list. 51 | # 52 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters. 53 | # Maximum 20 tags per role. 54 | 55 | dependencies: [] 56 | # List your role dependencies here, one per line. Be sure to remove the '[]' above, 57 | # if you add dependencies to this list. 58 | -------------------------------------------------------------------------------- /roles/istio/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check istio directories 3 | file: 4 | path: '{{ item }}' 5 | state: directory 6 | mode: 0755 7 | with_items: 8 | - '{{ k8s_istio_dir }}' 9 | 10 | - name: Get Istio 11 | unarchive: 12 | src: 'https://github.com/istio/istio/releases/download/{{ istio_version }}/istio-{{ istio_version }}-{{ ansible_system|lower }}.tar.gz' 13 | dest: '{{ tmp_dir }}' 14 | remote_src: true 15 | 16 | - name: Setup istio Control 17 | copy: 18 | src: '{{ tmp_dir }}/istio-{{ istio_version }}/bin/istioctl' 19 | dest: '{{ k8s_bin_dir }}/istioctl' 20 | mode: 0755 21 | remote_src: true 22 | 23 | - name: Istio Ingress Certs Secret 24 | template: 25 | src: ingress-certs-secret.yaml 26 | dest: "{{ k8s_istio_dir }}/ingress-certs-secret.yaml" 27 | when: k8s_services_cert | length > 1000 28 | 29 | - name: Istio 30 | template: 31 | src: "{{ item }}" 32 | dest: "{{ k8s_istio_dir }}/{{ item }}" 33 | with_items: 34 | - config.yaml 35 | - accounts.yaml 36 | - initializer.yaml 37 | - istio.yaml 38 | - zipkin.yaml 39 | - grafana.yaml 40 | - servicegraph.yaml 41 | - prometheus.yaml 42 | 43 | - name: Deploy script for istio 44 | template: 45 | src: deploy-istio.sh 46 | dest: "{{ k8s_istio_dir }}/deploy-istio.sh" 47 | mode: 0755 48 | 49 | - name: Run deploy script for istio 50 | command: "{{ k8s_istio_dir }}/deploy-istio.sh" 51 | when: inventory_hostname in k8s_master_hosts[0] 52 | -------------------------------------------------------------------------------- /roles/istio/templates/grafana.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | name: grafana 7 | namespace: {{ k8s_istio_namespace }} 8 | spec: 9 | ports: 10 | - port: 3000 11 | protocol: TCP 12 | name: grafana 13 | selector: 14 | app: grafana 15 | 16 | --- 17 | 18 | apiVersion: extensions/v1beta1 19 | kind: Deployment 20 | metadata: 21 | name: grafana 22 | namespace: {{ k8s_istio_namespace }} 23 | annotations: 24 | sidecar.istio.io/inject: "false" 25 | spec: 26 | replicas: 1 27 | template: 28 | metadata: 29 | labels: 30 | app: grafana 31 | spec: 32 | containers: 33 | - name: grafana 34 | image: {{ k8s_istio_grafana_image }}:{{ k8s_istio_grafana_image_tag }} 35 | imagePullPolicy: IfNotPresent 36 | ports: 37 | - containerPort: 3000 38 | env: 39 | - name: GRAFANA_PORT 40 | value: "3000" 41 | - name: GF_AUTH_BASIC_ENABLED 42 | value: "false" 43 | - name: GF_AUTH_ANONYMOUS_ENABLED 44 | value: "true" 45 | - name: GF_AUTH_ANONYMOUS_ORG_ROLE 46 | value: Admin 47 | - name: GF_PATHS_DATA 48 | value: /data/grafana 49 | volumeMounts: 50 | - mountPath: /data/grafana 51 | name: grafana-data 52 | volumes: 53 | - name: grafana-data 54 | emptyDir: {} 55 | -------------------------------------------------------------------------------- /roles/istio/templates/ingress-certs-secret.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | apiVersion: v1 4 | kind: Secret 5 | metadata: 6 | name: istio-ingress-certs 7 | namespace: {{ k8s_istio_namespace }} 8 | data: 9 | tls.crt: {{ k8s_services_cert | b64encode }} 10 | tls.key: {{ k8s_services_cert_key | b64encode }} 11 | -------------------------------------------------------------------------------- /roles/istio/templates/servicegraph.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | name: servicegraph 7 | namespace: {{ k8s_istio_namespace }} 8 | spec: 9 | ports: 10 | - name: http 11 | port: 8088 12 | selector: 13 | app: servicegraph 14 | 15 | --- 16 | 17 | apiVersion: extensions/v1beta1 18 | kind: Deployment 19 | metadata: 20 | name: servicegraph 21 | namespace: {{ k8s_istio_namespace }} 22 | annotations: 23 | sidecar.istio.io/inject: "false" 24 | spec: 25 | replicas: 1 26 | template: 27 | metadata: 28 | labels: 29 | app: servicegraph 30 | spec: 31 | containers: 32 | - name: servicegraph 33 | image: {{ k8s_istio_servicegraph_image }}:{{ k8s_istio_servicegraph_image_tag }} 34 | ports: 35 | - containerPort: 8088 36 | args: 37 | - --prometheusAddr=http://prometheus:9090 38 | -------------------------------------------------------------------------------- /roles/istio/templates/zipkin.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Zipkin tracking service 4 | apiVersion: v1 5 | kind: Service 6 | metadata: 7 | name: zipkin 8 | namespace: {{ k8s_istio_namespace }} 9 | spec: 10 | ports: 11 | - name: http 12 | port: 9411 13 | selector: 14 | app: zipkin 15 | 16 | --- 17 | 18 | apiVersion: extensions/v1beta1 19 | kind: Deployment 20 | metadata: 21 | name: zipkin 22 | namespace: {{ k8s_istio_namespace }} 23 | annotations: 24 | sidecar.istio.io/inject: "false" 25 | spec: 26 | replicas: 1 27 | template: 28 | metadata: 29 | labels: 30 | app: zipkin 31 | spec: 32 | containers: 33 | - name: zipkin 34 | image: {{ k8s_istio_zipkin_image }}:{{ k8s_istio_zipkin_image_tag }} 35 | ports: 36 | - containerPort: 9411 37 | env: 38 | - name: POD_NAMESPACE 39 | valueFrom: 40 | fieldRef: 41 | fieldPath: metadata.namespace 42 | -------------------------------------------------------------------------------- /roles/istio/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | -------------------------------------------------------------------------------- /roles/istio/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - istio 6 | -------------------------------------------------------------------------------- /roles/istio/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for addons 3 | -------------------------------------------------------------------------------- /roles/journald/README.md: -------------------------------------------------------------------------------- 1 | Ansible Role: Setup configs for journald 2 | ============================ 3 | 4 | This role setups configs for journald on Red Hat linux based systems. 5 | 6 | [![Contributions Welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)](https://github.com/openprovider/cloud-setup/issues) 7 | 8 | Requirements 9 | ------------ 10 | 11 | No special requirements. 12 | 13 | Role Variables 14 | -------------- 15 | 16 | Available variables are listed below, along with default values (see `defaults/main.yml`): 17 | 18 | Example Playbook 19 | ---------------- 20 | 21 | - hosts: master 22 | roles: 23 | - role: journald 24 | 25 | License 26 | ------- 27 | 28 | MIT 29 | 30 | Author Information 31 | ------------------ 32 | 33 | Kubernets Community [k8s-community](https://github.com/k8s-community) 34 | -------------------------------------------------------------------------------- /roles/journald/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for journald 3 | 4 | # Journald configs' path 5 | journald_config_path: /etc/systemd/journald.conf 6 | 7 | # The maximum disk space that can be used by the journal in persistent storage 8 | system_max_use: 512M 9 | 10 | # How large individual journal files can grow to in persistent storage before being rotated 11 | system_max_file_size: 64M 12 | 13 | # Max retention time (in seconds) 14 | max_retention_sec: 604800 15 | 16 | # The maximum disk space that can be used in volatile storage 17 | # (within the /run filesystem) 18 | runtime_max_use: 256M 19 | 20 | # The amount of space that an individual journal file can take up 21 | # in volatile storage (within the /run filesystem) before being rotated 22 | runtime_max_file_size: 64M 23 | -------------------------------------------------------------------------------- /roles/journald/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for journald 3 | 4 | - name: restart journald 5 | service: 6 | name: systemd-journald 7 | state: restarted 8 | -------------------------------------------------------------------------------- /roles/journald/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for journald 3 | 4 | - name: Set maximum disk space in persistent storage for journald 5 | lineinfile: 6 | dest: '{{ journald_config_path }}' 7 | regexp: 'SystemMaxUse=' 8 | line: 'SystemMaxUse={{ system_max_use }}' 9 | notify: 10 | - restart journald 11 | 12 | - name: Set max file size for individual journal files in persistent storage 13 | lineinfile: 14 | dest: '{{ journald_config_path }}' 15 | regexp: 'SystemMaxFileSize=' 16 | line: 'SystemMaxFileSize={{ system_max_file_size }}' 17 | notify: 18 | - restart journald 19 | 20 | - name: Set max retention time (in seconds) for journald information 21 | lineinfile: 22 | dest: '{{ journald_config_path }}' 23 | regexp: 'MaxRetentionSec=' 24 | line: 'MaxRetentionSec={{ max_retention_sec }}' 25 | notify: 26 | - restart journald 27 | 28 | - name: Set maximum disk space in volatile storage for journald 29 | lineinfile: 30 | dest: '{{ journald_config_path }}' 31 | regexp: 'RuntimeMaxUse=' 32 | line: 'RuntimeMaxUse={{ runtime_max_use }}' 33 | notify: 34 | - restart journald 35 | 36 | - name: Set max file size for individual journal files in volatile storage 37 | lineinfile: 38 | dest: '{{ journald_config_path }}' 39 | regexp: 'RuntimeMaxFileSize=' 40 | line: 'RuntimeMaxFileSize={{ runtime_max_file_size }}' 41 | notify: 42 | - restart journald 43 | -------------------------------------------------------------------------------- /roles/journald/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | 3 | -------------------------------------------------------------------------------- /roles/journald/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - journald 6 | -------------------------------------------------------------------------------- /roles/journald/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for journald 3 | -------------------------------------------------------------------------------- /roles/kubedns/README.md: -------------------------------------------------------------------------------- 1 | Ansible Role: Kube DNS 2 | ====================== 3 | 4 | This role install Kube DNS on Red Hat linux based systems. 5 | 6 | [![Contributions Welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)](https://github.com/k8s-community/cluster-deploy/issues) 7 | 8 | Requirements 9 | ------------ 10 | 11 | No special requirements. 12 | 13 | 14 | Role Variables 15 | -------------- 16 | 17 | Available variables are listed below, along with default values (see `defaults/main.yml`): 18 | 19 | Log level 0 - debug 20 | ```yaml 21 | k8s_log_level: 3 22 | ``` 23 | 24 | It will be used as the Internal dns domain name if DNS is enabled. 25 | Services will be discoverable under 26 | `..svc..`, e.g. 27 | myservice.default.svc.k8s.cluster 28 | ```yaml 29 | k8s_domain_name: k8s 30 | k8s_cluster_name: cluster 31 | k8s_cluster_domain: '{{ k8s_domain_name }}.{{ k8s_cluster_name }}' 32 | ``` 33 | 34 | IP address of Kube DNS 35 | It should be in range of services subnet 36 | ```yaml 37 | k8s_cluster_dns: 10.254.0.10 38 | ``` 39 | 40 | Kubernetes configs path 41 | ```yaml 42 | k8s_conf_dir: /etc/kubernetes 43 | k8s_addons_dir: '{{ k8s_conf_dir }}/addons' 44 | ``` 45 | 46 | Master hosts nsmes 47 | ```yaml 48 | k8s_master_hosts: {} 49 | ``` 50 | 51 | Example Playbook 52 | ---------------- 53 | 54 | - hosts: master 55 | roles: 56 | - kubedns 57 | 58 | License 59 | ------- 60 | 61 | MIT 62 | 63 | Author Information 64 | ------------------ 65 | 66 | Kubernets Community [k8s-community](https://github.com/k8s-community) 67 | -------------------------------------------------------------------------------- /roles/kubedns/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Log level 0 - debug 3 | k8s_log_level: 3 4 | 5 | # It will be used as the Internal dns domain name if DNS is enabled. 6 | # Services will be discoverable under 7 | # ..svc.., e.g. 8 | # myservice.default.svc.k8s.cluster 9 | k8s_domain_name: k8s 10 | k8s_cluster_name: cluster 11 | k8s_cluster_domain: '{{ k8s_domain_name }}.{{ k8s_cluster_name }}' 12 | 13 | # IP address of Kube DNS 14 | # It should be in range of services subnet 15 | k8s_cluster_dns: 10.254.0.10 16 | 17 | # Kubernetes configs path 18 | k8s_conf_dir: /etc/kubernetes 19 | k8s_addons_dir: '{{ k8s_conf_dir }}/addons' 20 | 21 | # Master hosts nsmes 22 | k8s_master_hosts: {} 23 | 24 | # KubeDNS images 25 | k8s_kubedns_image: gcr.io/google_containers/k8s-dns-kube-dns-amd64 26 | k8s_kubedns_image_tag: 1.14.7 27 | k8s_kubednsmasq_image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64 28 | k8s_kubednsmasq_image_tag: 1.14.7 29 | k8s_kubednssidecar_image: gcr.io/google_containers/k8s-dns-sidecar-amd64 30 | k8s_kubednssidecar_image_tag: 1.14.7 31 | k8s_cluster_autoscaler_image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64 32 | k8s_cluster_autoscaler_image_tag: 1.1.2 33 | -------------------------------------------------------------------------------- /roles/kubedns/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for addons 3 | -------------------------------------------------------------------------------- /roles/kubedns/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Igor Dolzhikov 3 | description: Setup Kube DNS 4 | company: Kubernetes Community 5 | 6 | # If the issue tracker for your role is not on github, uncomment the 7 | # next line and provide a value 8 | # issue_tracker_url: http://example.com/issue/tracker 9 | 10 | # Some suggested licenses: 11 | # - BSD (default) 12 | # - MIT 13 | # - GPLv2 14 | # - GPLv3 15 | # - Apache 16 | # - CC-BY 17 | license: MIT 18 | 19 | min_ansible_version: 2.2 20 | 21 | # If this a Container Enabled role, provide the minimum Ansible Container version. 22 | # min_ansible_container_version: 23 | 24 | # Optionally specify the branch Galaxy will use when accessing the GitHub 25 | # repo for this role. During role install, if no tags are available, 26 | # Galaxy will use this branch. During import Galaxy will access files on 27 | # this branch. If Travis integration is configured, only notifications for this 28 | # branch will be accepted. Otherwise, in all cases, the repo's default branch 29 | # (usually master) will be used. 30 | #github_branch: 31 | 32 | # 33 | # platforms is a list of platforms, and each platform has a name and a list of versions. 34 | # 35 | # platforms: 36 | # - name: Fedora 37 | # versions: 38 | # - all 39 | # - 25 40 | # - name: SomePlatform 41 | # versions: 42 | # - all 43 | # - 1.0 44 | # - 7 45 | # - 99.99 46 | 47 | galaxy_tags: [] 48 | # List tags for your role here, one per line. A tag is a keyword that describes 49 | # and categorizes the role. Users find roles by searching for tags. Be sure to 50 | # remove the '[]' above, if you add tags to this list. 51 | # 52 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters. 53 | # Maximum 20 tags per role. 54 | 55 | dependencies: [] 56 | # List your role dependencies here, one per line. Be sure to remove the '[]' above, 57 | # if you add dependencies to this list. 58 | -------------------------------------------------------------------------------- /roles/kubedns/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check addons directories 3 | file: 4 | path: '{{ item }}' 5 | state: directory 6 | mode: 0755 7 | with_items: 8 | - '{{ k8s_addons_dir }}' 9 | 10 | - name: Kube DNS 11 | template: 12 | src: "{{ item }}" 13 | dest: "{{ k8s_addons_dir }}/{{ item }}" 14 | with_items: 15 | - kubedns.yaml 16 | - kubedns-autoscaler.yaml 17 | 18 | - name: Deploy script for Kube DNS 19 | template: 20 | src: deploy-kubedns.sh 21 | dest: "{{ k8s_addons_dir }}/deploy-kubedns.sh" 22 | mode: 0755 23 | 24 | - name: Run deploy script for Kube DNS 25 | command: "{{ k8s_addons_dir }}/deploy-kubedns.sh" 26 | when: inventory_hostname in k8s_master_hosts[0] 27 | -------------------------------------------------------------------------------- /roles/kubedns/templates/deploy-kubedns.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function deploy_dns { 4 | if kubectl get deploy -l k8s-app=kube-dns --namespace=kube-system | grep kube-dns &> /dev/null; then 5 | echo "KubeDNS deployment already exists" 6 | else 7 | echo "Creating KubeDNS deployment" 8 | kubectl apply -f {{ k8s_addons_dir }}/kubedns.yaml 9 | fi 10 | 11 | echo 12 | } 13 | 14 | function deploy_dns_autoscaler { 15 | if kubectl get deploy -l k8s-app=kube-dns-autoscaler --namespace=kube-system | grep kube-dns-autoscaler &> /dev/null; then 16 | echo "KubeDNS Autoscaler deployment already exists" 17 | else 18 | echo "Creating KubeDNS Autoscaler deployment" 19 | kubectl apply -f {{ k8s_addons_dir }}/kubedns-autoscaler.yaml 20 | fi 21 | 22 | echo 23 | } 24 | 25 | deploy_dns 26 | deploy_dns_autoscaler 27 | -------------------------------------------------------------------------------- /roles/kubedns/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | -------------------------------------------------------------------------------- /roles/kubedns/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - kubedns 6 | -------------------------------------------------------------------------------- /roles/kubedns/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for addons 3 | -------------------------------------------------------------------------------- /roles/kubelego/README.md: -------------------------------------------------------------------------------- 1 | Ansible Role: Kube Lego 2 | ======================= 3 | 4 | This role install Kube Lego on Red Hat linux based systems. 5 | 6 | [![Contributions Welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)](https://github.com/k8s-community/cluster-deploy/issues) 7 | 8 | Requirements 9 | ------------ 10 | 11 | No special requirements. 12 | 13 | 14 | Role Variables 15 | -------------- 16 | 17 | Available variables are listed below, along with default values (see `defaults/main.yml`): 18 | 19 | Kubernetes configs path 20 | ```yaml 21 | k8s_conf_dir: /etc/kubernetes 22 | k8s_addons_dir: '{{ k8s_conf_dir }}/addons' 23 | ``` 24 | 25 | Master hosts nsmes 26 | ```yaml 27 | k8s_master_hosts: {} 28 | ``` 29 | 30 | Example Playbook 31 | ---------------- 32 | 33 | - hosts: master 34 | roles: 35 | - kubelego 36 | 37 | License 38 | ------- 39 | 40 | MIT 41 | 42 | Author Information 43 | ------------------ 44 | 45 | Kubernets Community [k8s-community](https://github.com/k8s-community) 46 | -------------------------------------------------------------------------------- /roles/kubelego/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Kubernetes configs path 3 | k8s_conf_dir: /etc/kubernetes 4 | k8s_addons_dir: '{{ k8s_conf_dir }}/addons' 5 | 6 | # Master hosts nsmes 7 | k8s_master_hosts: {} 8 | 9 | # Kube Lego image 10 | k8s_kube_lego_image: jetstack/kube-lego 11 | k8s_kube_lego_image_tag: 0.1.5 12 | -------------------------------------------------------------------------------- /roles/kubelego/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for addons 3 | -------------------------------------------------------------------------------- /roles/kubelego/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Igor Dolzhikov 3 | description: Setup Kube Lego 4 | company: Kubernetes Community 5 | 6 | # If the issue tracker for your role is not on github, uncomment the 7 | # next line and provide a value 8 | # issue_tracker_url: http://example.com/issue/tracker 9 | 10 | # Some suggested licenses: 11 | # - BSD (default) 12 | # - MIT 13 | # - GPLv2 14 | # - GPLv3 15 | # - Apache 16 | # - CC-BY 17 | license: MIT 18 | 19 | min_ansible_version: 2.2 20 | 21 | # If this a Container Enabled role, provide the minimum Ansible Container version. 22 | # min_ansible_container_version: 23 | 24 | # Optionally specify the branch Galaxy will use when accessing the GitHub 25 | # repo for this role. During role install, if no tags are available, 26 | # Galaxy will use this branch. During import Galaxy will access files on 27 | # this branch. If Travis integration is configured, only notifications for this 28 | # branch will be accepted. Otherwise, in all cases, the repo's default branch 29 | # (usually master) will be used. 30 | #github_branch: 31 | 32 | # 33 | # platforms is a list of platforms, and each platform has a name and a list of versions. 34 | # 35 | # platforms: 36 | # - name: Fedora 37 | # versions: 38 | # - all 39 | # - 25 40 | # - name: SomePlatform 41 | # versions: 42 | # - all 43 | # - 1.0 44 | # - 7 45 | # - 99.99 46 | 47 | galaxy_tags: [] 48 | # List tags for your role here, one per line. A tag is a keyword that describes 49 | # and categorizes the role. Users find roles by searching for tags. Be sure to 50 | # remove the '[]' above, if you add tags to this list. 51 | # 52 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters. 53 | # Maximum 20 tags per role. 54 | 55 | dependencies: [] 56 | # List your role dependencies here, one per line. Be sure to remove the '[]' above, 57 | # if you add dependencies to this list. 58 | -------------------------------------------------------------------------------- /roles/kubelego/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check addons directories 3 | file: 4 | path: '{{ item }}' 5 | state: directory 6 | mode: 0755 7 | with_items: 8 | - '{{ k8s_addons_dir }}' 9 | 10 | - name: Kube Lego 11 | template: 12 | src: "{{ item }}" 13 | dest: "{{ k8s_addons_dir }}/{{ item }}" 14 | with_items: 15 | - kube-lego.yaml 16 | 17 | - name: Deploy script for Kube Lego 18 | template: 19 | src: deploy-kubelego.sh 20 | dest: "{{ k8s_addons_dir }}/deploy-kubelego.sh" 21 | mode: 0755 22 | 23 | - name: Run deploy script for Kube Lego 24 | command: "{{ k8s_addons_dir }}/deploy-kubelego.sh" 25 | when: inventory_hostname in k8s_master_hosts[0] 26 | -------------------------------------------------------------------------------- /roles/kubelego/templates/alert-manager.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/k8s-community/cluster-deploy/9e82b9ff5755fec04aac83f629ec52da6606e2a3/roles/kubelego/templates/alert-manager.yaml -------------------------------------------------------------------------------- /roles/kubelego/templates/deploy-kubelego.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function deploy_kube_lego { 4 | if kubectl get deploy -l app=kube-lego --namespace=kube-lego | grep kube-lego &> /dev/null; then 5 | echo "Kube Lego already exists" 6 | else 7 | echo "Creating Kube Lego" 8 | kubectl apply -f {{ k8s_addons_dir }}/kube-lego.yaml 9 | fi 10 | 11 | echo 12 | } 13 | 14 | deploy_kube_lego 15 | -------------------------------------------------------------------------------- /roles/kubelego/templates/kube-stats.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/k8s-community/cluster-deploy/9e82b9ff5755fec04aac83f629ec52da6606e2a3/roles/kubelego/templates/kube-stats.yaml -------------------------------------------------------------------------------- /roles/kubelego/templates/node-exporter.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/k8s-community/cluster-deploy/9e82b9ff5755fec04aac83f629ec52da6606e2a3/roles/kubelego/templates/node-exporter.yaml -------------------------------------------------------------------------------- /roles/kubelego/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | -------------------------------------------------------------------------------- /roles/kubelego/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - kubelego 6 | -------------------------------------------------------------------------------- /roles/kubelego/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for addons 3 | -------------------------------------------------------------------------------- /roles/kubernetes/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: reload systemd 3 | command: systemctl daemon-reload 4 | notify: 5 | - enable kubelet 6 | 7 | - name: enable kubelet 8 | service: 9 | name: kubelet 10 | enabled: yes 11 | 12 | - name: restart kubelet 13 | service: 14 | name: kubelet 15 | state: restarted 16 | -------------------------------------------------------------------------------- /roles/kubernetes/templates/config: -------------------------------------------------------------------------------- 1 | ### 2 | # kubernetes system config 3 | # 4 | # The following values are used to configure various aspects of all 5 | # kubernetes services, including 6 | # 7 | # kube-apiserver.service 8 | # kube-controller-manager.service 9 | # kube-scheduler.service 10 | # kubelet.service 11 | # kube-proxy.service 12 | # logging to stderr means we get it in the systemd journal 13 | KUBE_LOGTOSTDERR="--logtostderr=true" 14 | 15 | # journal message level, 0 is debug 16 | KUBE_LOG_LEVEL="--v={{ k8s_log_level }}" 17 | 18 | # Should this cluster be allowed to run privileged docker containers 19 | KUBE_ALLOW_PRIV="--allow-privileged=true" 20 | 21 | # How the controller-manager, scheduler, and proxy find the apiserver 22 | KUBE_MASTER="--master={{ k8s_url_scheme }}://{% if inventory_hostname in k8s_master_hosts %}127.0.0.1{% else %}{{ k8s_master_external_ip | default(k8s_master_name) }}{% endif %}:{{ k8s_api_port }}" 23 | -------------------------------------------------------------------------------- /roles/kubernetes/templates/etcd-ep-profile.sh: -------------------------------------------------------------------------------- 1 | export ETCD_ENDPOINTS={{ k8s_etcd_servers }} 2 | -------------------------------------------------------------------------------- /roles/kubernetes/templates/kubelet: -------------------------------------------------------------------------------- 1 | ### 2 | # kubernetes kubelet config 3 | 4 | # The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces) 5 | KUBELET_ADDRESS="--address=0.0.0.0" 6 | 7 | # The port for the info server to serve on 8 | # KUBELET_PORT="--port=10250" 9 | 10 | # You may leave this blank to use the actual hostname 11 | KUBELET_HOSTNAME="--hostname-override={{ inventory_hostname }}" 12 | 13 | KUBELET_ARGS="--allow-privileged=true \ 14 | --cgroup-driver=systemd \ 15 | --cgroups-per-qos=true \ 16 | --client-ca-file={{ ssl_dir }}/ca.pem \ 17 | --cni-bin-dir={{ cni_bin_dir }} \ 18 | --cni-conf-dir={{ cni_conf_dir }} \ 19 | --cluster-dns={{ k8s_cluster_dns }} \ 20 | --cluster-domain={{ k8s_cluster_domain }} \ 21 | --fail-swap-on=false \ 22 | --kubeconfig={{ k8s_policy_dir }}/kubelet.kubeconfig \ 23 | --network-plugin=cni \ 24 | --node-labels={% if inventory_hostname in k8s_master_hosts %}node-role.kubernetes.io/master=true{% else %}node-role.kubernetes.io/node=true{% endif %} \ 25 | --pod-manifest-path={{ k8s_manifests_dir }} \ 26 | --register-node=true {% if inventory_hostname in k8s_master_hosts %}--register-with-taints=node-role.kubernetes.io/master=true:NoSchedule {% endif %}\ 27 | --require-kubeconfig=true \ 28 | --serialize-image-pulls=false \ 29 | --v={{ k8s_log_level }} \ 30 | --vmodule={{ k8s_log_spec }}" 31 | -------------------------------------------------------------------------------- /roles/kubernetes/templates/kubelet.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes Kubelet Server 3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 4 | After=docker.service 5 | Requires=docker.service 6 | 7 | [Service] 8 | WorkingDirectory={{ k8s_kubelet_dir }} 9 | EnvironmentFile=-{{ k8s_conf_dir }}/config 10 | EnvironmentFile=-{{ k8s_conf_dir }}/kubelet 11 | ExecStart=/usr/bin/kubelet \ 12 | $KUBE_LOGTOSTDERR \ 13 | $KUBE_LOG_LEVEL \ 14 | $KUBELET_ADDRESS \ 15 | $KUBELET_PORT \ 16 | $KUBELET_HOSTNAME \ 17 | $KUBE_ALLOW_PRIV \ 18 | $KUBELET_POD_INFRA_CONTAINER \ 19 | $KUBELET_ARGS 20 | Restart=on-failure 21 | 22 | [Install] 23 | WantedBy=multi-user.target 24 | -------------------------------------------------------------------------------- /roles/kubernetes/templates/kubernetes-accounting.conf: -------------------------------------------------------------------------------- 1 | [Manager] 2 | DefaultCPUAccounting=yes 3 | DefaultMemoryAccounting=yes 4 | -------------------------------------------------------------------------------- /roles/kubernetes/templates/manifests/kube-controller-manager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: kube-controller-manager 5 | namespace: kube-system 6 | spec: 7 | hostNetwork: true 8 | containers: 9 | - name: kube-controller-manager 10 | image: {{ k8s_hyperkube_image }}:{{ k8s_hyperkube_image_tag }} 11 | command: 12 | - /hyperkube 13 | - controller-manager 14 | - --allocate-node-cidrs=true 15 | - --cluster-cidr={{ k8s_cluster_cidr }} 16 | - --cluster-signing-cert-file={{ ssl_dir }}/ca.pem 17 | - --cluster-signing-key-file={{ ssl_dir }}/ca-key.pem 18 | - --controllers=* 19 | - --feature-gates=AllAlpha=false 20 | - --master=https://127.0.0.1:{{ k8s_api_port }} 21 | - --kubeconfig={{ k8s_policy_dir }}/controller.kubeconfig 22 | - --leader-elect=true 23 | - --root-ca-file={{ ssl_dir }}/ca.pem 24 | - --service-account-private-key-file={{ ssl_dir }}/master-key.pem 25 | - --service-cluster-ip-range={{ k8s_services_network }} 26 | - --use-service-account-credentials 27 | - --v={{ k8s_log_level }} 28 | - --vmodule={{ k8s_log_spec }} 29 | resources: 30 | requests: 31 | cpu: 200m 32 | livenessProbe: 33 | httpGet: 34 | path: /healthz 35 | host: 127.0.0.1 36 | port: 10252 37 | initialDelaySeconds: 15 38 | timeoutSeconds: 15 39 | volumeMounts: 40 | - name: policy 41 | mountPath: {{ k8s_policy_dir }} 42 | readOnly: true 43 | - name: certificates 44 | mountPath: {{ ssl_dir }} 45 | readOnly: true 46 | volumes: 47 | - name: policy 48 | hostPath: 49 | path: {{ k8s_policy_dir }} 50 | - name: certificates 51 | hostPath: 52 | path: {{ ssl_dir }} 53 | -------------------------------------------------------------------------------- /roles/kubernetes/templates/manifests/kube-proxy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: kube-proxy 5 | namespace: kube-system 6 | spec: 7 | hostNetwork: true 8 | containers: 9 | - name: kube-proxy 10 | image: {{ k8s_hyperkube_image }}:{{ k8s_hyperkube_image_tag }} 11 | command: 12 | - /hyperkube 13 | - proxy 14 | - --feature-gates=AllAlpha=false 15 | - --master={{ k8s_url_scheme }}://{% if inventory_hostname in k8s_master_hosts %}127.0.0.1{% else %}{{ k8s_master_external_ip | default(k8s_master_name) }}{% endif %}:{{ k8s_api_port }} 16 | - --hostname-override={{ inventory_hostname }} 17 | - --kubeconfig={{ k8s_policy_dir }}/kube-proxy.kubeconfig 18 | - --v={{ k8s_log_level }} 19 | securityContext: 20 | privileged: true 21 | volumeMounts: 22 | - name: policy 23 | mountPath: {{ k8s_policy_dir }} 24 | readOnly: true 25 | - name: certificates 26 | mountPath: {{ ssl_dir }} 27 | readOnly: true 28 | volumes: 29 | - name: policy 30 | hostPath: 31 | path: {{ k8s_policy_dir }} 32 | - name: certificates 33 | hostPath: 34 | path: {{ ssl_dir }} 35 | -------------------------------------------------------------------------------- /roles/kubernetes/templates/manifests/kube-scheduler.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: kube-scheduler 5 | namespace: kube-system 6 | spec: 7 | hostNetwork: true 8 | containers: 9 | - name: kube-scheduler 10 | image: {{ k8s_hyperkube_image }}:{{ k8s_hyperkube_image_tag }} 11 | command: 12 | - /hyperkube 13 | - scheduler 14 | - --feature-gates=AllAlpha=false 15 | - --master=https://127.0.0.1:{{ k8s_api_port }} 16 | - --kubeconfig={{ k8s_policy_dir }}/scheduler.kubeconfig 17 | - --leader-elect=true 18 | - --v={{ k8s_log_level }} 19 | resources: 20 | requests: 21 | cpu: 100m 22 | livenessProbe: 23 | httpGet: 24 | path: /healthz 25 | host: 127.0.0.1 26 | port: 10251 27 | initialDelaySeconds: 15 28 | timeoutSeconds: 15 29 | volumeMounts: 30 | - name: policy 31 | mountPath: {{ k8s_policy_dir }} 32 | readOnly: true 33 | - name: certificates 34 | mountPath: {{ ssl_dir }} 35 | readOnly: true 36 | volumes: 37 | - name: policy 38 | hostPath: 39 | path: {{ k8s_policy_dir }} 40 | - name: certificates 41 | hostPath: 42 | path: {{ ssl_dir }} 43 | -------------------------------------------------------------------------------- /roles/kubernetes/templates/policy/basic-auth.csv: -------------------------------------------------------------------------------- 1 | {{ k8s_admin_password }},{{ k8s_admin_username }},{{ k8s_admin_username }} 2 | {{ k8s_release_password }},{{ k8s_release_username }},{{ k8s_release_username }} 3 | {{ k8s_guest_password }},{{ k8s_guest_username }},{{ k8s_guest_username }} 4 | -------------------------------------------------------------------------------- /roles/kubernetes/templates/policy/known-tokens.csv: -------------------------------------------------------------------------------- 1 | {{ k8s_admin_token }},{{ k8s_admin_username }},{{ k8s_admin_username }} 2 | {{ k8s_release_token }},{{ k8s_release_username }},{{ k8s_release_username }} 3 | {{ k8s_guest_token }},{{ k8s_guest_username }},{{ k8s_guest_username }} 4 | -------------------------------------------------------------------------------- /roles/kubernetes/templates/policy/kubeconfig.client: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Config 3 | clusters: 4 | - name: {{ k8s_cluster_domain }} 5 | cluster: 6 | certificate-authority: {{ ssl_dir }}/ca.pem 7 | server: {{ k8s_url_scheme }}://{% if inventory_hostname in k8s_master_hosts %}127.0.0.1{% else %}{{ k8s_master_external_ip | default(k8s_master_name) }}{% endif %}:{{ k8s_api_port }} 8 | contexts: 9 | - name: client-context 10 | context: 11 | cluster: {{ k8s_cluster_domain }} 12 | user: {{ item }} 13 | current-context: client-context 14 | users: 15 | - name: {{ item }} 16 | user: 17 | client-certificate: {{ ssl_dir }}/client-{{ item }}.pem 18 | client-key: {{ ssl_dir }}/client-{{ item }}-key.pem 19 | -------------------------------------------------------------------------------- /roles/kubernetes/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | -------------------------------------------------------------------------------- /roles/kubernetes/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - kubernetes 6 | -------------------------------------------------------------------------------- /roles/kubernetes/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for kubernetes 3 | -------------------------------------------------------------------------------- /roles/lb/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for network 3 | -------------------------------------------------------------------------------- /roles/lb/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Igor Dolzhikov 3 | description: Setup GCE Load Balancer 4 | company: Kubernetes Community 5 | 6 | # If the issue tracker for your role is not on github, uncomment the 7 | # next line and provide a value 8 | # issue_tracker_url: http://example.com/issue/tracker 9 | 10 | # Some suggested licenses: 11 | # - BSD (default) 12 | # - MIT 13 | # - GPLv2 14 | # - GPLv3 15 | # - Apache 16 | # - CC-BY 17 | license: MIT 18 | 19 | min_ansible_version: 2.2 20 | 21 | # If this a Container Enabled role, provide the minimum Ansible Container version. 22 | # min_ansible_container_version: 23 | 24 | # Optionally specify the branch Galaxy will use when accessing the GitHub 25 | # repo for this role. During role install, if no tags are available, 26 | # Galaxy will use this branch. During import Galaxy will access files on 27 | # this branch. If Travis integration is configured, only notifications for this 28 | # branch will be accepted. Otherwise, in all cases, the repo's default branch 29 | # (usually master) will be used. 30 | #github_branch: 31 | 32 | # 33 | # platforms is a list of platforms, and each platform has a name and a list of versions. 34 | # 35 | # platforms: 36 | # - name: Fedora 37 | # versions: 38 | # - all 39 | # - 25 40 | # - name: SomePlatform 41 | # versions: 42 | # - all 43 | # - 1.0 44 | # - 7 45 | # - 99.99 46 | 47 | galaxy_tags: [] 48 | # List tags for your role here, one per line. A tag is a keyword that describes 49 | # and categorizes the role. Users find roles by searching for tags. Be sure to 50 | # remove the '[]' above, if you add tags to this list. 51 | # 52 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters. 53 | # Maximum 20 tags per role. 54 | 55 | dependencies: [] 56 | # List your role dependencies here, one per line. Be sure to remove the '[]' above, 57 | # if you add dependencies to this list. 58 | -------------------------------------------------------------------------------- /roles/lb/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | 3 | -------------------------------------------------------------------------------- /roles/lb/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | connection: local 4 | roles: 5 | - lb 6 | -------------------------------------------------------------------------------- /roles/lb/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for network 3 | -------------------------------------------------------------------------------- /roles/logrotate/README.md: -------------------------------------------------------------------------------- 1 | Ansible Role: Setup logrotate 2 | ====================== 3 | 4 | This role installs logrotate on Red Hat linux based systems. 5 | 6 | [![Contributions Welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)](https://github.com/k8s-community/cluster-deploy/issues) 7 | 8 | Requirements 9 | ------------ 10 | 11 | No special requirements. 12 | 13 | Role Variables 14 | -------------- 15 | 16 | Available variables are listed below, along with default values (see `defaults/main.yml`): 17 | 18 | Example Playbook 19 | ---------------- 20 | 21 | - hosts: master 22 | roles: 23 | - role: logrotate 24 | rotate_period: weekly 25 | rotate_count: 1 26 | 27 | License 28 | ------- 29 | 30 | MIT 31 | 32 | Author Information 33 | ------------------ 34 | 35 | Kubernets Community [k8s-community](https://github.com/k8s-community) 36 | -------------------------------------------------------------------------------- /roles/logrotate/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for logrotate 3 | 4 | logrotate_config_path: /etc/logrotate.conf 5 | syslog_path: /etc/logrotate.d/syslog 6 | 7 | # Log files are rotated 8 | rotate_period: daily 9 | 10 | # Logs are stored for periods 11 | rotate_count: 7 12 | 13 | # If the log file is missing, go on to the next one without issuing an error message. 14 | missingok: true 15 | 16 | # Do not rotate the log if it is empty 17 | notifempty: true 18 | 19 | # Old versions of log files are compressed with gzip(1) by default. 20 | compress: true 21 | 22 | # Postpone compression of the previous log file to the next rotation cycle. 23 | # This only has effect when used in combination with compress. 24 | # It can be used when some program cannot be told to close its logfile and 25 | # thus might continue writing to the previous log file for some time. 26 | delaycompress: true 27 | 28 | # Log files are rotated only if they grow bigger then size bytes. 29 | # If size is followed by k, the size is assumed to be in kilobytes. 30 | # If the M is used, the size is in megabytes, and if G is used, the size 31 | # is in gigabytes. So size 100, size 100k, size 100M and size 100G are all valid. 32 | size: 50M 33 | -------------------------------------------------------------------------------- /roles/logrotate/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for logrotate 3 | 4 | - name: apply logrotate configs 5 | command: 'logrotate {{ logrotate_config_path }}' 6 | -------------------------------------------------------------------------------- /roles/logrotate/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Vlad Saveliev 3 | description: Setup logrotate 4 | company: Kubernetes Community 5 | 6 | # If the issue tracker for your role is not on github, uncomment the 7 | # next line and provide a value 8 | # issue_tracker_url: http://example.com/issue/tracker 9 | 10 | # Some suggested licenses: 11 | # - BSD (default) 12 | # - MIT 13 | # - GPLv2 14 | # - GPLv3 15 | # - Apache 16 | # - CC-BY 17 | license: MIT 18 | 19 | min_ansible_version: 2.2 20 | 21 | # If this a Container Enabled role, provide the minimum Ansible Container version. 22 | # min_ansible_container_version: 23 | 24 | # Optionally specify the branch Galaxy will use when accessing the GitHub 25 | # repo for this role. During role install, if no tags are available, 26 | # Galaxy will use this branch. During import Galaxy will access files on 27 | # this branch. If Travis integration is configured, only notifications for this 28 | # branch will be accepted. Otherwise, in all cases, the repo's default branch 29 | # (usually master) will be used. 30 | #github_branch: 31 | 32 | # 33 | # platforms is a list of platforms, and each platform has a name and a list of versions. 34 | # 35 | # platforms: 36 | # - name: Fedora 37 | # versions: 38 | # - all 39 | # - 25 40 | # - name: SomePlatform 41 | # versions: 42 | # - all 43 | # - 1.0 44 | # - 7 45 | # - 99.99 46 | 47 | galaxy_tags: [] 48 | # List tags for your role here, one per line. A tag is a keyword that describes 49 | # and categorizes the role. Users find roles by searching for tags. Be sure to 50 | # remove the '[]' above, if you add tags to this list. 51 | # 52 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters. 53 | # Maximum 20 tags per role. 54 | 55 | dependencies: [] 56 | # List your role dependencies here, one per line. Be sure to remove the '[]' above, 57 | # if you add dependencies to this list. 58 | -------------------------------------------------------------------------------- /roles/logrotate/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | 3 | -------------------------------------------------------------------------------- /roles/logrotate/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - logrotate 6 | -------------------------------------------------------------------------------- /roles/logrotate/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for logrotate 3 | -------------------------------------------------------------------------------- /roles/network/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for network -------------------------------------------------------------------------------- /roles/network/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | 3 | -------------------------------------------------------------------------------- /roles/network/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | connection: local 4 | roles: 5 | - network 6 | -------------------------------------------------------------------------------- /roles/network/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for network -------------------------------------------------------------------------------- /roles/policy/README.md: -------------------------------------------------------------------------------- 1 | Ansible Role: Policy rules 2 | ========================== 3 | 4 | This role install Policy rules on Red Hat linux based systems. 5 | 6 | [![Contributions Welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)](https://github.com/k8s-community/cluster-deploy/issues) 7 | 8 | Requirements 9 | ------------ 10 | 11 | No special requirements. 12 | 13 | 14 | Role Variables 15 | -------------- 16 | 17 | Available variables are listed below, along with default values (see `defaults/main.yml`): 18 | 19 | Additional Kubernetes namespaces 20 | ```yaml 21 | k8s_namespaces: 22 | - dev 23 | ``` 24 | 25 | Users access data 26 | ```yaml 27 | k8s_admin_username: admin 28 | k8s_release_username: release 29 | k8s_guest_username: guest 30 | ``` 31 | 32 | Kubernetes configs path 33 | ```yaml 34 | k8s_conf_dir: /etc/kubernetes 35 | k8s_policy_dir: '{{ k8s_conf_dir }}/policy' 36 | ``` 37 | 38 | Master hosts nsmes 39 | ```yaml 40 | k8s_master_hosts: {} 41 | ``` 42 | 43 | SSL base certificate name 44 | SSL folder and file names will use the same name 45 | ```yaml 46 | ssl_name: kubernetes 47 | ``` 48 | 49 | Path to files with SSL certificates and keys 50 | ```yaml 51 | ssl_dir: /etc/ssl/kubernetes 52 | ``` 53 | 54 | SSL certificate and private key for running user services into Kubernetes 55 | ```yaml 56 | k8s_services_cert: | 57 | ----BEGIN CERTIFICATE---- 58 | - Your certificate here - 59 | -----END CERTIFICATE----- 60 | k8s_services_cert_key: | 61 | ----BEGIN PRIVATE KEY---- 62 | - Your private key here - 63 | -----END PRIVATE KEY----- 64 | ``` 65 | 66 | 67 | Example Playbook 68 | ---------------- 69 | 70 | - hosts: 71 | - master 72 | roles: 73 | - policy 74 | 75 | License 76 | ------- 77 | 78 | MIT 79 | 80 | Author Information 81 | ------------------ 82 | 83 | Kubernets Community [k8s-community](https://github.com/k8s-community) 84 | -------------------------------------------------------------------------------- /roles/policy/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Using of network storage 3 | # If network storage disabled will use local disk for every requested claim 4 | network_storage: false 5 | 6 | # Kubernetes network persistent disk type, valid values: `gce`, `ceph` 7 | # TODO: AWS persistent disk `aws` 8 | network_storage_type: gce 9 | 10 | # Additional Kubernetes namespaces 11 | k8s_namespaces: 12 | - dev 13 | 14 | # Users access data 15 | k8s_admin_username: admin 16 | k8s_release_username: release 17 | k8s_guest_username: guest 18 | 19 | # Kubernetes configs path 20 | k8s_conf_dir: /etc/kubernetes 21 | k8s_policy_dir: '{{ k8s_conf_dir }}/policy' 22 | 23 | # Master hosts nsmes 24 | k8s_master_hosts: {} 25 | 26 | # SSL base certificate name 27 | # SSL folder and file names will use the same name 28 | ssl_name: kubernetes 29 | 30 | # Path to files with SSL certificates and keys 31 | ssl_dir: /etc/ssl/{{ ssl_name }} 32 | 33 | # Ceph user token 34 | # ceph auth print-key client.admin | base64 35 | ceph_user_token: 'ceph user token here' 36 | 37 | # SSL certificate and private key for running user services into Kubernetes 38 | k8s_services_cert: | 39 | ----BEGIN CERTIFICATE---- 40 | - Your certificate here - 41 | -----END CERTIFICATE----- 42 | k8s_services_cert_key: | 43 | ----BEGIN PRIVATE KEY---- 44 | - Your private key here - 45 | -----END PRIVATE KEY----- 46 | -------------------------------------------------------------------------------- /roles/policy/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /roles/policy/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Igor Dolzhikov 3 | description: Setup Policy rules 4 | company: Kubernetes Community 5 | 6 | # If the issue tracker for your role is not on github, uncomment the 7 | # next line and provide a value 8 | # issue_tracker_url: http://example.com/issue/tracker 9 | 10 | # Some suggested licenses: 11 | # - BSD (default) 12 | # - MIT 13 | # - GPLv2 14 | # - GPLv3 15 | # - Apache 16 | # - CC-BY 17 | license: MIT 18 | 19 | min_ansible_version: 2.2 20 | 21 | # If this a Container Enabled role, provide the minimum Ansible Container version. 22 | # min_ansible_container_version: 23 | 24 | # Optionally specify the branch Galaxy will use when accessing the GitHub 25 | # repo for this role. During role install, if no tags are available, 26 | # Galaxy will use this branch. During import Galaxy will access files on 27 | # this branch. If Travis integration is configured, only notifications for this 28 | # branch will be accepted. Otherwise, in all cases, the repo's default branch 29 | # (usually master) will be used. 30 | #github_branch: 31 | 32 | # 33 | # platforms is a list of platforms, and each platform has a name and a list of versions. 34 | # 35 | # platforms: 36 | # - name: Fedora 37 | # versions: 38 | # - all 39 | # - 25 40 | # - name: SomePlatform 41 | # versions: 42 | # - all 43 | # - 1.0 44 | # - 7 45 | # - 99.99 46 | 47 | galaxy_tags: [] 48 | # List tags for your role here, one per line. A tag is a keyword that describes 49 | # and categorizes the role. Users find roles by searching for tags. Be sure to 50 | # remove the '[]' above, if you add tags to this list. 51 | # 52 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters. 53 | # Maximum 20 tags per role. 54 | 55 | dependencies: [] 56 | # List your role dependencies here, one per line. Be sure to remove the '[]' above, 57 | # if you add dependencies to this list. 58 | -------------------------------------------------------------------------------- /roles/policy/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check kubernetes directories 3 | file: 4 | path: '{{ item }}' 5 | state: directory 6 | mode: 0755 7 | with_items: 8 | - '{{ k8s_policy_dir }}' 9 | 10 | - name: Policy files and roles 11 | template: 12 | src: "{{ item }}" 13 | dest: "{{ k8s_policy_dir }}/{{ item }}" 14 | with_items: 15 | - admin-clusterrolebinding.yaml 16 | - cluster-reader-clusterrole.yaml 17 | - cluster-reader-clusterrolebinding.yaml 18 | - release-role.yaml 19 | - release-rolebinding.yaml 20 | - namespaces.yaml 21 | 22 | - name: TLS Secret 23 | template: 24 | src: tls-secret.yaml 25 | dest: "{{ k8s_policy_dir }}/tls-secret.yaml" 26 | when: k8s_services_cert | length > 1000 27 | 28 | - name: Ceph Secret 29 | template: 30 | src: ceph-secret.yaml 31 | dest: "{{ k8s_policy_dir }}/ceph-secret.yaml" 32 | when: network_storage and network_storage_type == 'ceph' 33 | 34 | - name: Deploy script for policy 35 | template: 36 | src: deploy-policy.sh 37 | dest: "{{ k8s_policy_dir }}/deploy-policy.sh" 38 | mode: 0755 39 | 40 | - name: Awaiting for kubernetes API 41 | wait_for: 42 | host: 127.0.0.1 43 | port: 8080 44 | when: inventory_hostname in k8s_master_hosts[0] 45 | 46 | - name: Run deploy script for policy 47 | command: "{{ k8s_policy_dir }}/deploy-policy.sh" 48 | when: inventory_hostname in k8s_master_hosts[0] 49 | -------------------------------------------------------------------------------- /roles/policy/templates/admin-clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: admin 5 | labels: 6 | basic.auth/user: admin 7 | roleRef: 8 | apiGroup: rbac.authorization.k8s.io 9 | kind: ClusterRole 10 | name: cluster-admin 11 | subjects: 12 | - kind: User 13 | name: {{ k8s_admin_username }} 14 | -------------------------------------------------------------------------------- /roles/policy/templates/ceph-secret.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | apiVersion: v1 4 | kind: Secret 5 | type: kubernetes.io/rbd 6 | metadata: 7 | name: ceph-secret 8 | namespace: kube-system 9 | data: 10 | key: {{ ceph_user_token }} 11 | 12 | --- 13 | 14 | apiVersion: v1 15 | kind: Secret 16 | type: kubernetes.io/rbd 17 | metadata: 18 | name: ceph-secret 19 | namespace: kube-public 20 | data: 21 | key: {{ ceph_user_token }} 22 | 23 | --- 24 | 25 | apiVersion: v1 26 | kind: Secret 27 | type: kubernetes.io/rbd 28 | metadata: 29 | name: ceph-secret 30 | namespace: default 31 | data: 32 | key: {{ ceph_user_token }} 33 | 34 | {% for namespace in k8s_namespaces %} 35 | --- 36 | 37 | apiVersion: v1 38 | kind: Secret 39 | type: kubernetes.io/rbd 40 | metadata: 41 | name: ceph-secret 42 | namespace: {{ namespace }} 43 | data: 44 | key: {{ ceph_user_token }} 45 | 46 | {% endfor %} 47 | -------------------------------------------------------------------------------- /roles/policy/templates/cluster-reader-clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: cluster-reader 5 | labels: 6 | basic.auth/role: cluster-reader 7 | rules: 8 | - apiGroups: 9 | - '*' 10 | resources: 11 | - '*' 12 | verbs: 13 | - 'get' 14 | - 'list' 15 | - 'watch' 16 | - nonResourceURLs: 17 | - '*' 18 | verbs: 19 | - 'get' 20 | -------------------------------------------------------------------------------- /roles/policy/templates/cluster-reader-clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: cluster-reader 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: cluster-reader 9 | subjects: 10 | - kind: User 11 | name: {{ k8s_guest_username }} 12 | - kind: User 13 | name: {{ k8s_release_username }} 14 | -------------------------------------------------------------------------------- /roles/policy/templates/namespaces.yaml: -------------------------------------------------------------------------------- 1 | {% for namespace in k8s_namespaces %} 2 | --- 3 | 4 | apiVersion: v1 5 | kind: Namespace 6 | metadata: 7 | name: {{ namespace }} 8 | 9 | {% endfor %} 10 | -------------------------------------------------------------------------------- /roles/policy/templates/release-role.yaml: -------------------------------------------------------------------------------- 1 | {% for namespace in k8s_namespaces %} 2 | --- 3 | 4 | apiVersion: rbac.authorization.k8s.io/v1 5 | kind: Role 6 | metadata: 7 | name: {{ namespace }}-admin 8 | namespace: {{ namespace }} 9 | labels: 10 | basic.auth/role: {{ namespace }}-admin 11 | rules: 12 | - apiGroups: 13 | - '*' 14 | resources: 15 | - '*' 16 | verbs: 17 | - '*' 18 | 19 | {% endfor %} 20 | -------------------------------------------------------------------------------- /roles/policy/templates/release-rolebinding.yaml: -------------------------------------------------------------------------------- 1 | {% for namespace in k8s_namespaces %} 2 | --- 3 | 4 | apiVersion: rbac.authorization.k8s.io/v1 5 | kind: RoleBinding 6 | metadata: 7 | name: {{ namespace }} 8 | namespace: {{ namespace }} 9 | roleRef: 10 | apiGroup: rbac.authorization.k8s.io 11 | kind: Role 12 | name: {{ namespace }}-admin 13 | subjects: 14 | - kind: User 15 | name: {{ k8s_release_username }} 16 | 17 | {% endfor %} -------------------------------------------------------------------------------- /roles/policy/templates/tls-secret.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | apiVersion: v1 4 | kind: Secret 5 | metadata: 6 | name: tls-secret 7 | namespace: default 8 | data: 9 | tls.crt: {{ k8s_services_cert | b64encode }} 10 | tls.key: {{ k8s_services_cert_key | b64encode }} 11 | 12 | --- 13 | 14 | apiVersion: v1 15 | kind: Secret 16 | metadata: 17 | name: tls-secret 18 | namespace: kube-system 19 | data: 20 | tls.crt: {{ k8s_services_cert | b64encode }} 21 | tls.key: {{ k8s_services_cert_key | b64encode }} 22 | 23 | --- 24 | 25 | apiVersion: v1 26 | kind: Secret 27 | metadata: 28 | name: tls-secret 29 | namespace: kube-public 30 | data: 31 | tls.crt: {{ k8s_services_cert | b64encode }} 32 | tls.key: {{ k8s_services_cert_key | b64encode }} 33 | 34 | {% for namespace in k8s_namespaces %} 35 | --- 36 | 37 | apiVersion: v1 38 | kind: Secret 39 | metadata: 40 | name: tls-secret 41 | namespace: {{ namespace }} 42 | data: 43 | tls.crt: {{ k8s_services_cert | b64encode }} 44 | tls.key: {{ k8s_services_cert_key | b64encode }} 45 | 46 | {% endfor %} 47 | -------------------------------------------------------------------------------- /roles/policy/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | 3 | -------------------------------------------------------------------------------- /roles/policy/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - policy 6 | -------------------------------------------------------------------------------- /roles/policy/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for kubernetes 3 | -------------------------------------------------------------------------------- /roles/prometheus/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for prometheus 3 | -------------------------------------------------------------------------------- /roles/prometheus/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Vladislav Saveliev 3 | description: Setup Prometheus 4 | company: Kubernetes Community 5 | 6 | # If the issue tracker for your role is not on github, uncomment the 7 | # next line and provide a value 8 | # issue_tracker_url: http://example.com/issue/tracker 9 | 10 | # Some suggested licenses: 11 | # - BSD (default) 12 | # - MIT 13 | # - GPLv2 14 | # - GPLv3 15 | # - Apache 16 | # - CC-BY 17 | license: MIT 18 | 19 | min_ansible_version: 2.2 20 | 21 | # If this a Container Enabled role, provide the minimum Ansible Container version. 22 | # min_ansible_container_version: 23 | 24 | # Optionally specify the branch Galaxy will use when accessing the GitHub 25 | # repo for this role. During role install, if no tags are available, 26 | # Galaxy will use this branch. During import Galaxy will access files on 27 | # this branch. If Travis integration is configured, only notifications for this 28 | # branch will be accepted. Otherwise, in all cases, the repo's default branch 29 | # (usually master) will be used. 30 | #github_branch: 31 | 32 | # 33 | # platforms is a list of platforms, and each platform has a name and a list of versions. 34 | # 35 | # platforms: 36 | # - name: Fedora 37 | # versions: 38 | # - all 39 | # - 25 40 | # - name: SomePlatform 41 | # versions: 42 | # - all 43 | # - 1.0 44 | # - 7 45 | # - 99.99 46 | 47 | galaxy_tags: [] 48 | # List tags for your role here, one per line. A tag is a keyword that describes 49 | # and categorizes the role. Users find roles by searching for tags. Be sure to 50 | # remove the '[]' above, if you add tags to this list. 51 | # 52 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters. 53 | # Maximum 20 tags per role. 54 | 55 | dependencies: [] 56 | # List your role dependencies here, one per line. Be sure to remove the '[]' above, 57 | # if you add dependencies to this list. 58 | -------------------------------------------------------------------------------- /roles/prometheus/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check addons directories 3 | file: 4 | path: '{{ item }}' 5 | state: directory 6 | mode: 0755 7 | with_items: 8 | - '{{ k8s_prometheus_dir }}' 9 | 10 | - name: Prometheus 11 | template: 12 | src: "{{ item }}" 13 | dest: "{{ k8s_prometheus_dir }}/{{ item }}" 14 | with_items: 15 | - config.yaml 16 | - prometheus.yaml 17 | 18 | - name: Deploy script for Prometheus 19 | template: 20 | src: deploy-prometheus.sh 21 | dest: "{{ k8s_prometheus_dir }}/deploy-prometheus.sh" 22 | mode: 0755 23 | 24 | - name: Run deploy script for Prometheus 25 | command: "{{ k8s_prometheus_dir }}/deploy-prometheus.sh" 26 | when: inventory_hostname in k8s_master_hosts[0] 27 | -------------------------------------------------------------------------------- /roles/prometheus/templates/alerts/all.yml: -------------------------------------------------------------------------------- 1 | {% include 'alerts/common.yml' %} 2 | 3 | groups: 4 | {% include 'alerts/app.yml' %} 5 | 6 | {% include 'alerts/deployment.yml' %} 7 | 8 | {% include 'alerts/pods.yml' %} 9 | 10 | {% include 'alerts/node.yml' %} 11 | 12 | {% include 'alerts/ingress.yml' %} 13 | 14 | {% include 'alerts/service.yml' %} 15 | 16 | {% include 'alerts/prometheus.yml' %} 17 | 18 | {% include 'alerts/kubernetes.yml' %} 19 | 20 | {% if k8s_prometheus_scrape_cockroachdb_metrics %} 21 | 22 | {% include 'alerts/cockroachdb.yml' %} 23 | {% endif %} 24 | -------------------------------------------------------------------------------- /roles/prometheus/templates/alerts/app.yml: -------------------------------------------------------------------------------- 1 | {% raw %} 2 | # 3 | # Applications alerts (from /metrics handler) 4 | # 5 | - name: application-alerts 6 | rules: 7 | # 8 | # Alert on each 5XX code of any http service 9 | # 10 | - alert: HttpServiceFatalError 11 | expr: rate(http_requests_total{job="kubernetes-service-endpoints",status=~"5.."}[1m]) > 0 12 | labels: 13 | severity: warning 14 | notify: dev 15 | annotations: 16 | summary: "{{ $labels.kubernetes_namespace }}/{{ $label.kubernetes_pod_name }}: Service returns 5XX error" 17 | description: "{{ $labels.kubernetes_namespace }}/{{ $label.kubernetes_pod_name }}: Service has processed some request incorrectly (code = {{ $labels.status }})" 18 | 19 | # 20 | # Alert for any instance that have a median request latency > 0.5s 21 | # 22 | #- alert: HttpServiceHighRequestLatency 23 | # expr: http_request_duration_seconds_bucket{le="0.5"} > 0.5 24 | # for: 1m 25 | # labels: 26 | # severity: warning 27 | # notify: sre 28 | # annotations: 29 | # summary: "{{ $labels.kubernetes_namespace }}/{{ $label.kubernetes_pod_name }}: High request latency" 30 | # description: "{{ $labels.kubernetes_namespace }}/{{ $label.kubernetes_pod_name }} (instance {{ $labels.instance }}): Service has a median request latency > 0.5s (current value: {{ $value }}s)" 31 | 32 | {% endraw %} 33 | -------------------------------------------------------------------------------- /roles/prometheus/templates/alerts/common.yml: -------------------------------------------------------------------------------- 1 | # 2 | # Common rules of monitoring: 3 | # 4 | # Severity of problem: 5 | # severity = debug | info | warning | high | disaster 6 | # 7 | # Responsible for reaction on problem: 8 | # notify = sre | dev | support | ... | all 9 | # 10 | # 11 | # Monitored services must have labels like component and other 12 | # TODO: describe it 13 | # 14 | # 15 | 16 | -------------------------------------------------------------------------------- /roles/prometheus/templates/alerts/deployment.yml: -------------------------------------------------------------------------------- 1 | {% raw %} 2 | # 3 | # Deployment alerts (from kube-state-metrics) 4 | # 5 | - name: deployments-alerts 6 | rules: 7 | # 8 | # Alert on deployment has not enough replicas 9 | # 10 | - alert: DeploymentReplicasMismatch 11 | expr: (kube_deployment_spec_replicas != kube_deployment_status_replicas_available) 12 | or (kube_deployment_spec_replicas unless kube_deployment_status_replicas_unavailable) 13 | for: 5m 14 | labels: 15 | severity: warning 16 | notify: sre 17 | annotations: 18 | summary: "{{ $labels.namespace }}/{{ $labels.deployment }}: Deployment is failed" 19 | description: "{{ $labels.namespace }}/{{ $labels.deployment }}: Deployment is failed - observed replicas != intended replicas" 20 | 21 | {% endraw %} 22 | -------------------------------------------------------------------------------- /roles/prometheus/templates/alerts/ingress.yml: -------------------------------------------------------------------------------- 1 | {% raw %} 2 | # 3 | # Ingress alerts (from blackbox-exporter) 4 | # 5 | - name: ingress-alerts 6 | rules: 7 | # 8 | # Alert on host (in ingress) is unreachable > 5 min 9 | # 10 | - alert: IngressIsUnreachable 11 | expr: probe_success{job="kubernetes-ingresses"} == 0 or absent(probe_success{job="kubernetes-ingresses"} == 1) 12 | for: 5m 13 | labels: 14 | severity: warning 15 | notify: sre 16 | annotations: 17 | summary: "{{ $labels.kubernetes_namespace }}/{{ $labels.kubernetes_ingress_name }}: Ingress is unreachable" 18 | description: "{{ $labels.kubernetes_namespace }}/{{ $labels.kubernetes_ingress_name }}: Ingress is unreachable more than 5 minutes (domain is {{ $labels.instance }})" 19 | 20 | # 21 | # Alert on SSL certificate will be expired < 30 days 22 | # 23 | - alert: SSLCertExpiringSoon 24 | expr: probe_ssl_earliest_cert_expiry{job="kubernetes-ingresses"} - time() < 86400 * 30 25 | for: 5m 26 | labels: 27 | severity: warning 28 | notify: sre 29 | annotations: 30 | summary: "{{ $labels.kubernetes_namespace }}/{{ $labels.kubernetes_ingress_name }}: SSL certificate will be expired soon" 31 | description: "{{ $labels.kubernetes_namespace }}/{{ $labels.kubernetes_ingress_name }}: SSL certificate will be expired less than 30 days (domain is {{ $labels.instance }})" 32 | 33 | {% endraw %} 34 | -------------------------------------------------------------------------------- /roles/prometheus/templates/alerts/prometheus.yml: -------------------------------------------------------------------------------- 1 | {% raw %} 2 | # 3 | # Applications alerts (from /metrics handler) 4 | # 5 | - name: prometheus-alerts 6 | rules: 7 | # 8 | # Alert on failed config reload 9 | # 10 | - alert: FailedReload 11 | expr: prometheus_config_last_reload_successful == 0 12 | for: 10m 13 | labels: 14 | severity: warning 15 | notify: sre 16 | annotations: 17 | description: "Reloading Prometheus' configuration has failed" 18 | summary: "Prometheus configuration reload has failed" 19 | 20 | # 21 | # Alert to check pipeline 22 | # 23 | - alert: DeadMansSwitch 24 | expr: vector(1) 25 | labels: 26 | severity: none 27 | annotations: 28 | summary: "Alerting DeadMansSwitch" 29 | description: "This is a DeadMansSwitch meant to ensure that the entire Alerting pipeline is functional" 30 | {% endraw %} 31 | -------------------------------------------------------------------------------- /roles/prometheus/templates/alerts/service.yml: -------------------------------------------------------------------------------- 1 | {% raw %} 2 | # 3 | # Service alerts (from blackbox-exporter) 4 | # 5 | - name: service-alerts 6 | rules: 7 | 8 | # 9 | # Alert on service is unreachable > 5 min 10 | # 11 | - alert: ServiceIsUnreachable 12 | expr: probe_success{job="kubernetes-services"} == 0 or absent(probe_success{job="kubernetes-services"} == 1) 13 | for: 5m 14 | labels: 15 | severity: warning 16 | notify: sre 17 | annotations: 18 | description: "{{ $labels.kubernetes_namespace }}/{{ $labels.kubernetes_service_name }}: Service is unreachable" 19 | summary: "{{ $labels.kubernetes_namespace }}/{{ $labels.kubernetes_service_name }}: Service is unreachable more than 5 minutes" 20 | 21 | {% endraw %} 22 | -------------------------------------------------------------------------------- /roles/prometheus/templates/config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | apiVersion: v1 4 | kind: Namespace 5 | metadata: 6 | name: {{ k8s_prometheus_namespace }} 7 | 8 | --- 9 | 10 | apiVersion: v1 11 | kind: Secret 12 | metadata: 13 | name: tls-secret 14 | namespace: {{ k8s_prometheus_namespace }} 15 | data: 16 | tls.crt: {{ k8s_services_cert | b64encode }} 17 | tls.key: {{ k8s_services_cert_key | b64encode }} 18 | 19 | --- 20 | 21 | apiVersion: v1 22 | kind: Secret 23 | metadata: 24 | name: registry-pull-secret 25 | namespace: {{ k8s_prometheus_namespace }} 26 | type: kubernetes.io/dockerconfigjson 27 | data: 28 | .dockerconfigjson: {{ k8s_docker_registry_auth_token }} 29 | 30 | --- 31 | 32 | apiVersion: rbac.authorization.k8s.io/v1 33 | kind: Role 34 | metadata: 35 | name: {{ k8s_prometheus_namespace }}-admin 36 | namespace: {{ k8s_prometheus_namespace }} 37 | labels: 38 | basic.auth/role: {{ k8s_prometheus_namespace }}-admin 39 | rules: 40 | - apiGroups: 41 | - '*' 42 | resources: 43 | - '*' 44 | verbs: 45 | - '*' 46 | 47 | --- 48 | 49 | apiVersion: rbac.authorization.k8s.io/v1 50 | kind: RoleBinding 51 | metadata: 52 | name: {{ k8s_prometheus_namespace }} 53 | namespace: {{ k8s_prometheus_namespace }} 54 | roleRef: 55 | apiGroup: rbac.authorization.k8s.io 56 | kind: Role 57 | name: {{ k8s_prometheus_namespace }}-admin 58 | subjects: 59 | - kind: User 60 | name: {{ k8s_release_username }} 61 | -------------------------------------------------------------------------------- /roles/prometheus/templates/deploy-prometheus.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function deploy_prometheus_config { 4 | if kubectl get namespaces | grep {{ k8s_prometheus_namespace }} &> /dev/null; then 5 | echo "Prometheus config already exists" 6 | else 7 | echo "Creating Prometheus config" 8 | kubectl apply -f {{ k8s_prometheus_dir }}/config.yaml 9 | fi 10 | 11 | echo 12 | } 13 | 14 | function deploy_prometheus { 15 | if kubectl get deploy -l app=prometheus -n {{ k8s_prometheus_namespace }} | grep prometheus &> /dev/null; then 16 | echo "Updating Prometheus (config-maps will be auto applied)" 17 | kubectl apply -f {{ k8s_prometheus_dir }}/prometheus.yaml 18 | else 19 | echo "Creating Prometheus" 20 | kubectl create -f {{ k8s_prometheus_dir }}/prometheus.yaml 21 | fi 22 | 23 | echo 24 | } 25 | 26 | deploy_prometheus_config 27 | deploy_prometheus 28 | -------------------------------------------------------------------------------- /roles/prometheus/templates/grafana-dashboards/all.yml: -------------------------------------------------------------------------------- 1 | capacity-planning-dashboard.json: |- 2 | {% include 'grafana-dashboards/capacity-planning.json' %} 3 | 4 | cluster-health-dashboard.json: |- 5 | {% include 'grafana-dashboards/cluster-health.json' %} 6 | 7 | cluster-monitoring-dashboard.json: |- 8 | {% include 'grafana-dashboards/cluster-monitoring.json' %} 9 | 10 | cluster-status-dashboard.json: |- 11 | {% include 'grafana-dashboards/cluster-status.json' %} 12 | 13 | control-plane-status-dashboard.json: |- 14 | {% include 'grafana-dashboards/control-plane-status.json' %} 15 | 16 | deployment-dashboard.json: |- 17 | {% include 'grafana-dashboards/deployment.json' %} 18 | 19 | nodes-dashboard.json: |- 20 | {% include 'grafana-dashboards/nodes.json' %} 21 | 22 | pods-dashboard.json: |- 23 | {% include 'grafana-dashboards/pods.json' %} 24 | 25 | resource-requests-dashboard.json: |- 26 | {% include 'grafana-dashboards/resource-requests.json' %} 27 | 28 | {% if k8s_prometheus_scrape_cockroachdb_metrics %} 29 | 30 | cockroachdb-replicas-dashboard.json: |- 31 | {% include 'grafana-dashboards/cockroachdb/replicas.json' %} 32 | 33 | cockroachdb-runtime-dashboard.json: |- 34 | {% include 'grafana-dashboards/cockroachdb/runtime.json' %} 35 | 36 | cockroachdb-sql-dashboard.json: |- 37 | {% include 'grafana-dashboards/cockroachdb/sql.json' %} 38 | 39 | cockroachdb-storage-dashboard.json: |- 40 | {% include 'grafana-dashboards/cockroachdb/storage.json' %} 41 | 42 | 43 | 44 | {% endif %} 45 | -------------------------------------------------------------------------------- /roles/prometheus/templates/grafana-dashboards/template.json: -------------------------------------------------------------------------------- 1 | {% raw %} 2 | { 3 | "dashboard": { 4 | ... ... 5 | }, 6 | "overwrite": true, 7 | "inputs": [ 8 | { 9 | "name": "DS_PROMETHEUS", 10 | "type": "datasource", 11 | "pluginId": "prometheus", 12 | "value": "prometheus" 13 | } 14 | ] 15 | }{% endraw %} 16 | -------------------------------------------------------------------------------- /roles/prometheus/templates/prometheus.yaml: -------------------------------------------------------------------------------- 1 | {% include 'server.yaml' %} 2 | 3 | 4 | {% include 'alert-manager.yaml' %} 5 | 6 | 7 | {% include 'node-exporter.yaml' %} 8 | 9 | 10 | {% include 'push-gateway.yaml' %} 11 | 12 | 13 | {% include 'kube-state-metrics.yaml' %} 14 | 15 | 16 | {% include 'blackbox-exporter.yaml' %} 17 | 18 | 19 | {% include 'grafana.yaml' %} 20 | -------------------------------------------------------------------------------- /roles/prometheus/templates/scrape_configs/all.yml: -------------------------------------------------------------------------------- 1 | {% include 'scrape_configs/prometheus.yml' %} 2 | 3 | {% include 'scrape_configs/kubernetes.yml' %} 4 | 5 | {% if k8s_prometheus_scrape_istio_metrics %} 6 | {% include 'scrape_configs/istio.yml' %} 7 | {% endif %} 8 | 9 | {% if k8s_prometheus_scrape_cockroachdb_metrics %} 10 | {% include 'scrape_configs/cockroachdb.yml' %} 11 | {% endif %} 12 | -------------------------------------------------------------------------------- /roles/prometheus/templates/scrape_configs/cockroachdb.yml: -------------------------------------------------------------------------------- 1 | - job_name: 'cockroach' 2 | 3 | kubernetes_sd_configs: 4 | - role: endpoints 5 | 6 | tls_config: 7 | insecure_skip_verify: true 8 | 9 | relabel_configs: 10 | - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] 11 | action: keep 12 | regex: true 13 | - source_labels: [__meta_kubernetes_pod_name] 14 | action: keep 15 | regex: cockroachdb-(\d+) 16 | - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] 17 | action: replace 18 | target_label: __scheme__ 19 | regex: (https?) 20 | - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] 21 | action: replace 22 | target_label: __metrics_path__ 23 | regex: (.+) 24 | - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] 25 | action: replace 26 | target_label: __address__ 27 | regex: (.+)(?::\d+);(\d+) 28 | replacement: $1:$2 29 | - source_labels: [__meta_kubernetes_pod_node_name] 30 | action: replace 31 | target_label: kubernetes_pod_node_name 32 | - action: labelmap 33 | regex: __meta_kubernetes_service_label_(.+) 34 | - source_labels: [__meta_kubernetes_namespace] 35 | action: replace 36 | target_label: kubernetes_namespace 37 | - source_labels: [__meta_kubernetes_pod_name] 38 | action: replace 39 | target_label: kubernetes_pod_name 40 | - source_labels: [__address__] 41 | action: replace 42 | target_label: cluster 43 | regex: (.+) 44 | replacement: cockroachdb-cluster 45 | -------------------------------------------------------------------------------- /roles/prometheus/templates/scrape_configs/istio.yml: -------------------------------------------------------------------------------- 1 | # Scrape configuration for istio services - mesh, envoy, mixer 2 | # 3 | - job_name: 'istio-mesh' 4 | # Override the global default and scrape targets from this job every 5 seconds. 5 | scrape_interval: 5s 6 | # metrics_path defaults to '/metrics' 7 | # scheme defaults to 'http'. 8 | static_configs: 9 | - targets: ['istio-mixer:42422'] 10 | 11 | - job_name: 'envoy' 12 | # Override the global default and scrape targets from this job every 5 seconds. 13 | scrape_interval: 5s 14 | # metrics_path defaults to '/metrics' 15 | # scheme defaults to 'http'. 16 | static_configs: 17 | - targets: ['istio-mixer:9102'] 18 | 19 | - job_name: 'mixer' 20 | # Override the global default and scrape targets from this job every 5 seconds. 21 | scrape_interval: 5s 22 | # metrics_path defaults to '/metrics' 23 | # scheme defaults to 'http'. 24 | static_configs: 25 | - targets: ['istio-mixer:9093'] 26 | -------------------------------------------------------------------------------- /roles/prometheus/templates/scrape_configs/prometheus.yml: -------------------------------------------------------------------------------- 1 | # A scrape configuration for short-lived jobs (ephemeral and batch jobs) 2 | # 3 | - job_name: prometheus 4 | static_configs: 5 | - targets: 6 | - localhost:9090 7 | 8 | - job_name: 'prometheus-pushgateway' 9 | honor_labels: true 10 | 11 | kubernetes_sd_configs: 12 | - role: service 13 | 14 | relabel_configs: 15 | - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] 16 | action: keep 17 | regex: pushgateway 18 | -------------------------------------------------------------------------------- /roles/prometheus/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for prometheus 3 | -------------------------------------------------------------------------------- /roles/readiness/README.md: -------------------------------------------------------------------------------- 1 | Ansible Role: Check readiness 2 | ============================= 3 | 4 | This role install Readiness checking on Red Hat linux based systems. 5 | 6 | [![Contributions Welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)](https://github.com/k8s-community/cluster-deploy/issues) 7 | 8 | Requirements 9 | ------------ 10 | 11 | No special requirements. 12 | 13 | 14 | Role Variables 15 | -------------- 16 | 17 | Available variables are listed below, along with default values (see `defaults/main.yml`): 18 | 19 | Conteiner Network Interface type, valid values: `calico`, `romana` 20 | ```yaml 21 | cni_type: calico 22 | ``` 23 | 24 | Container Network Interface (CNI) bin path 25 | ```yaml 26 | cni_bin_dir: /opt/cni/bin 27 | ``` 28 | 29 | Example Playbook 30 | ---------------- 31 | 32 | - hosts: addons 33 | roles: 34 | - readiness 35 | 36 | License 37 | ------- 38 | 39 | MIT 40 | 41 | Author Information 42 | ------------------ 43 | 44 | Kubernets Community [k8s-community](https://github.com/k8s-community) 45 | -------------------------------------------------------------------------------- /roles/readiness/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Conteiner Network Interface type, valid values: `calico`, `romana` 3 | cni_type: calico 4 | 5 | # Container Network Interface (CNI) bin path 6 | cni_bin_dir: /opt/cni/bin 7 | -------------------------------------------------------------------------------- /roles/readiness/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for addons 3 | -------------------------------------------------------------------------------- /roles/readiness/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Igor Dolzhikov 3 | description: Check readiness 4 | company: Kubernetes Community 5 | 6 | # If the issue tracker for your role is not on github, uncomment the 7 | # next line and provide a value 8 | # issue_tracker_url: http://example.com/issue/tracker 9 | 10 | # Some suggested licenses: 11 | # - BSD (default) 12 | # - MIT 13 | # - GPLv2 14 | # - GPLv3 15 | # - Apache 16 | # - CC-BY 17 | license: MIT 18 | 19 | min_ansible_version: 2.2 20 | 21 | # If this a Container Enabled role, provide the minimum Ansible Container version. 22 | # min_ansible_container_version: 23 | 24 | # Optionally specify the branch Galaxy will use when accessing the GitHub 25 | # repo for this role. During role install, if no tags are available, 26 | # Galaxy will use this branch. During import Galaxy will access files on 27 | # this branch. If Travis integration is configured, only notifications for this 28 | # branch will be accepted. Otherwise, in all cases, the repo's default branch 29 | # (usually master) will be used. 30 | #github_branch: 31 | 32 | # 33 | # platforms is a list of platforms, and each platform has a name and a list of versions. 34 | # 35 | # platforms: 36 | # - name: Fedora 37 | # versions: 38 | # - all 39 | # - 25 40 | # - name: SomePlatform 41 | # versions: 42 | # - all 43 | # - 1.0 44 | # - 7 45 | # - 99.99 46 | 47 | galaxy_tags: [] 48 | # List tags for your role here, one per line. A tag is a keyword that describes 49 | # and categorizes the role. Users find roles by searching for tags. Be sure to 50 | # remove the '[]' above, if you add tags to this list. 51 | # 52 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters. 53 | # Maximum 20 tags per role. 54 | 55 | dependencies: [] 56 | # List your role dependencies here, one per line. Be sure to remove the '[]' above, 57 | # if you add dependencies to this list. 58 | -------------------------------------------------------------------------------- /roles/readiness/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Awaiting for CNI loading 3 | wait_for: 4 | path: '{{ cni_bin_dir }}/{{ cni_type }}' 5 | 6 | # There is not possible to wait /readiness 7 | # TODO: we should replace it for real URL, not only TCP 8 | - name: Awaiting for Calico readiness 9 | wait_for: 10 | host: 127.0.0.1 11 | port: 9099 12 | when: cni_type == 'calico' 13 | -------------------------------------------------------------------------------- /roles/readiness/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | -------------------------------------------------------------------------------- /roles/readiness/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - readiness 6 | -------------------------------------------------------------------------------- /roles/readiness/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for addons 3 | -------------------------------------------------------------------------------- /roles/registry/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for registry 3 | -------------------------------------------------------------------------------- /roles/registry/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Igor Dolzhikov 3 | description: Setup Kube registry 4 | company: Kubernetes Community 5 | 6 | # If the issue tracker for your role is not on github, uncomment the 7 | # next line and provide a value 8 | # issue_tracker_url: http://example.com/issue/tracker 9 | 10 | # Some suggested licenses: 11 | # - BSD (default) 12 | # - MIT 13 | # - GPLv2 14 | # - GPLv3 15 | # - Apache 16 | # - CC-BY 17 | license: MIT 18 | 19 | min_ansible_version: 2.2 20 | 21 | # If this a Container Enabled role, provide the minimum Ansible Container version. 22 | # min_ansible_container_version: 23 | 24 | # Optionally specify the branch Galaxy will use when accessing the GitHub 25 | # repo for this role. During role install, if no tags are available, 26 | # Galaxy will use this branch. During import Galaxy will access files on 27 | # this branch. If Travis integration is configured, only notifications for this 28 | # branch will be accepted. Otherwise, in all cases, the repo's default branch 29 | # (usually master) will be used. 30 | #github_branch: 31 | 32 | # 33 | # platforms is a list of platforms, and each platform has a name and a list of versions. 34 | # 35 | # platforms: 36 | # - name: Fedora 37 | # versions: 38 | # - all 39 | # - 25 40 | # - name: SomePlatform 41 | # versions: 42 | # - all 43 | # - 1.0 44 | # - 7 45 | # - 99.99 46 | 47 | galaxy_tags: [] 48 | # List tags for your role here, one per line. A tag is a keyword that describes 49 | # and categorizes the role. Users find roles by searching for tags. Be sure to 50 | # remove the '[]' above, if you add tags to this list. 51 | # 52 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters. 53 | # Maximum 20 tags per role. 54 | 55 | dependencies: [] 56 | # List your role dependencies here, one per line. Be sure to remove the '[]' above, 57 | # if you add dependencies to this list. 58 | -------------------------------------------------------------------------------- /roles/registry/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check addons directories 3 | file: 4 | path: '{{ item }}' 5 | state: directory 6 | mode: 0755 7 | with_items: 8 | - '{{ k8s_addons_dir }}' 9 | - '{{ k8s_kube_registry_dir }}' 10 | 11 | - name: Kube registry 12 | template: 13 | src: "{{ item }}" 14 | dest: "{{ k8s_addons_dir }}/{{ item }}" 15 | with_items: 16 | - kube-registry.yaml 17 | 18 | - name: Deploy script for kube registry 19 | template: 20 | src: deploy-registry.sh 21 | dest: "{{ k8s_addons_dir }}/deploy-registry.sh" 22 | mode: 0755 23 | 24 | - name: Run deploy script for registry 25 | command: "{{ k8s_addons_dir }}/deploy-registry.sh" 26 | when: inventory_hostname in k8s_master_hosts[0] 27 | -------------------------------------------------------------------------------- /roles/registry/templates/deploy-registry.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function deploy_kube_registry { 4 | if kubectl get deploy -l k8s-app=kube-registry --namespace=kube-system | grep kube-registry &> /dev/null; then 5 | echo "Kube Registry already exists" 6 | else 7 | echo "Creating Kube Registry" 8 | kubectl apply -f {{ k8s_addons_dir }}/kube-registry.yaml 9 | fi 10 | 11 | echo 12 | } 13 | 14 | deploy_kube_registry 15 | -------------------------------------------------------------------------------- /roles/registry/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | -------------------------------------------------------------------------------- /roles/registry/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - registry 6 | -------------------------------------------------------------------------------- /roles/registry/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for registry 3 | -------------------------------------------------------------------------------- /roles/ssl/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Generate CA certificate only 3 | ssl_ca_only: false 4 | 5 | # SSL certificate name 6 | # The file names will use the same name 7 | ssl_name: kubernetes 8 | 9 | # Path to SSL generators `cfssl` and `cfsssljson` 10 | ssl_bin_dir: /usr/bin 11 | 12 | # Path to files with SSL certificates and keys 13 | ssl_dir: /etc/ssl/kubernetes 14 | 15 | # Country name which used in `C` attribute of certificates (`NL`,`RU`, etc) 16 | ssl_country: country-name 17 | 18 | # City name which used in `L` attribute of certificates 19 | ssl_city: city-name 20 | 21 | # Organization name which used in `O` attribute of certificates 22 | ssl_org: organization-name 23 | 24 | # Organization Unit name which used in `OU` attribute of certificates 25 | ssl_division: organization-unit-name 26 | 27 | # State name which used in `ST` attribute of certificates 28 | ssl_state: state-name 29 | 30 | # Key encoding algorithm 31 | ssl_key_algo: rsa 32 | 33 | # Size of hash for key encoding algorithm 34 | ssl_key_size: 2048 35 | 36 | # How much hours certificate will used before expiration 37 | ssl_expiry_hours: 43800h 38 | 39 | # Host names used in X509v3 Subject Alternative Name field 40 | ssl_hosts: 41 | - '{{ inventory_hostname }}' 42 | 43 | # IP addresses used in X509v3 Subject Alternative Name field 44 | ssl_ips: 45 | - '{{ ansible_default_ipv4.address }}' 46 | 47 | # Additional host names or IP addresses used in X509v3 Subject Alternative Name field 48 | ssl_custom: 49 | - '127.0.0.1' 50 | 51 | # Special certificates for Kube clients. The fields `O` and `OU` used as user and his group 52 | ssl_clients: [] 53 | -------------------------------------------------------------------------------- /roles/ssl/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: enable ca certificate 3 | command: update-ca-trust enable 4 | 5 | - name: extract ca certificate 6 | command: update-ca-trust extract 7 | -------------------------------------------------------------------------------- /roles/ssl/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Igor Dolzhikov 3 | description: Setup SSL Certificates 4 | company: Kubernetes Community 5 | 6 | # If the issue tracker for your role is not on github, uncomment the 7 | # next line and provide a value 8 | # issue_tracker_url: http://example.com/issue/tracker 9 | 10 | # Some suggested licenses: 11 | # - BSD (default) 12 | # - MIT 13 | # - GPLv2 14 | # - GPLv3 15 | # - Apache 16 | # - CC-BY 17 | license: MIT 18 | 19 | min_ansible_version: 2.2 20 | 21 | # If this a Container Enabled role, provide the minimum Ansible Container version. 22 | # min_ansible_container_version: 23 | 24 | # Optionally specify the branch Galaxy will use when accessing the GitHub 25 | # repo for this role. During role install, if no tags are available, 26 | # Galaxy will use this branch. During import Galaxy will access files on 27 | # this branch. If Travis integration is configured, only notifications for this 28 | # branch will be accepted. Otherwise, in all cases, the repo's default branch 29 | # (usually master) will be used. 30 | #github_branch: 31 | 32 | # 33 | # platforms is a list of platforms, and each platform has a name and a list of versions. 34 | # 35 | # platforms: 36 | # - name: Fedora 37 | # versions: 38 | # - all 39 | # - 25 40 | # - name: SomePlatform 41 | # versions: 42 | # - all 43 | # - 1.0 44 | # - 7 45 | # - 99.99 46 | 47 | galaxy_tags: [] 48 | # List tags for your role here, one per line. A tag is a keyword that describes 49 | # and categorizes the role. Users find roles by searching for tags. Be sure to 50 | # remove the '[]' above, if you add tags to this list. 51 | # 52 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters. 53 | # Maximum 20 tags per role. 54 | 55 | dependencies: [] 56 | # List your role dependencies here, one per line. Be sure to remove the '[]' above, 57 | # if you add dependencies to this list. 58 | -------------------------------------------------------------------------------- /roles/ssl/templates/config.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "signing": { 3 | "default": { 4 | "expiry": "{{ ssl_expiry_hours }}" 5 | }, 6 | "profiles": { 7 | "kubernetes": { 8 | "expiry": "{{ ssl_expiry_hours }}", 9 | "usages": [ 10 | "signing", 11 | "key encipherment", 12 | "server auth", 13 | "client auth" 14 | ] 15 | } 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /roles/ssl/templates/csr-ca.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "{{ ssl_org }}", 3 | "key": { 4 | "algo": "{{ ssl_key_algo }}", 5 | "size": {{ ssl_key_size }} 6 | }, 7 | "names": [ 8 | { 9 | "C": "{{ ssl_country }}", 10 | "L": "{{ ssl_city }} ", 11 | "O": "{{ ssl_org }}", 12 | "OU": "{{ ssl_division }}", 13 | "ST": "{{ ssl_state }}" 14 | } 15 | ] 16 | } 17 | 18 | -------------------------------------------------------------------------------- /roles/ssl/templates/csr-client.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "{{ item.cn }}", 3 | "hosts": [""], 4 | "key": { 5 | "algo": "{{ ssl_key_algo }}", 6 | "size": {{ ssl_key_size }} 7 | }, 8 | "names": [ 9 | {% for org in item.org %} 10 | { 11 | "O": "{{ org }}" 12 | }{% if not loop.last %},{% endif %} 13 | 14 | {% endfor %} 15 | ] 16 | } 17 | -------------------------------------------------------------------------------- /roles/ssl/templates/csr-common.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "{{ inventory_hostname }}", 3 | "hosts": [ 4 | {% for host in ssl_hosts %}"{{ host }}", 5 | {% endfor %} 6 | {% for host in ssl_ips %}"{{ host }}", 7 | {% endfor %} 8 | {% for host in ssl_custom %}"{{ host }}"{% if not loop.last %},{% endif %} 9 | {% endfor %} 10 | ], 11 | "key": { 12 | "algo": "{{ ssl_key_algo }}", 13 | "size": {{ ssl_key_size }} 14 | }, 15 | "names": [ 16 | { 17 | "C": "{{ ssl_country }}", 18 | "L": "{{ ssl_city }} ", 19 | "O": "{{ ssl_org }}", 20 | "OU": "{{ ssl_division }}", 21 | "ST": "{{ ssl_state }}" 22 | } 23 | ] 24 | } 25 | -------------------------------------------------------------------------------- /roles/ssl/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | 3 | -------------------------------------------------------------------------------- /roles/ssl/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - ssl 6 | -------------------------------------------------------------------------------- /roles/ssl/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for ssl 3 | -------------------------------------------------------------------------------- /roles/stats/README.md: -------------------------------------------------------------------------------- 1 | Ansible Role: Statistics services 2 | ================================= 3 | 4 | This role install Statistics services on Red Hat linux based systems. 5 | 6 | [![Contributions Welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)](https://github.com/k8s-community/cluster-deploy/issues) 7 | 8 | Requirements 9 | ------------ 10 | 11 | No special requirements. 12 | 13 | 14 | Role Variables 15 | -------------- 16 | 17 | Available variables are listed below, along with default values (see `defaults/main.yml`): 18 | 19 | Kubernetes configs path 20 | ```yaml 21 | k8s_conf_dir: /etc/kubernetes 22 | k8s_addons_dir: '{{ k8s_conf_dir }}/addons' 23 | ``` 24 | 25 | Master hosts nsmes 26 | ```yaml 27 | k8s_master_hosts: {} 28 | ``` 29 | 30 | 31 | Example Playbook 32 | ---------------- 33 | 34 | - hosts: master 35 | roles: 36 | - stats 37 | 38 | License 39 | ------- 40 | 41 | MIT 42 | 43 | Author Information 44 | ------------------ 45 | 46 | Kubernets Community [k8s-community](https://github.com/k8s-community) 47 | -------------------------------------------------------------------------------- /roles/stats/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Kubernetes configs path 3 | k8s_conf_dir: /etc/kubernetes 4 | k8s_addons_dir: '{{ k8s_conf_dir }}/addons' 5 | 6 | # Master hosts nsmes 7 | k8s_master_hosts: {} 8 | 9 | # Heapster images 10 | k8s_heapster_image: gcr.io/google_containers/heapster-amd64 11 | k8s_heapster_image_tag: v1.5.0 12 | k8s_heapster_addon_resizer_image: gcr.io/google_containers/addon-resizer 13 | k8s_heapster_addon_resizer_image_tag: 1.8.1 14 | k8s_heapster_influxdb_image: gcr.io/google_containers/heapster-influxdb-amd64 15 | k8s_heapster_influxdb_image_tag: v1.3.3 16 | k8s_heapster_grafana_image: gcr.io/google_containers/heapster-grafana-amd64 17 | k8s_heapster_grafana_image_tag: v4.4.3 18 | 19 | # Heapster parameters 20 | k8s_heapster_base_metrics_memory: "140Mi" 21 | k8s_heapster_base_metrics_cpu: "80m" 22 | k8s_heapster_base_eventer_memory: "190Mi" 23 | k8s_heapster_metrics_memory_per_node: 4 24 | k8s_heapster_metrics_cpu_per_node: 0.5 25 | k8s_heapster_eventer_memory_per_node: 500 26 | k8s_heapster_num_nodes: 3 27 | k8s_heapster_nanny_memory_per_node: 200 28 | k8s_heapster_nanny_memory: '{% if k8s_heapster_num_nodes >= 0 %} {{ (90 * 1024 + k8s_heapster_num_nodes * k8s_heapster_nanny_memory_per_node)|string + "Ki" }} {% else %} "90Mi" {% endif %}' 29 | -------------------------------------------------------------------------------- /roles/stats/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for addons 3 | -------------------------------------------------------------------------------- /roles/stats/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Igor Dolzhikov 3 | description: Setup Statistics services 4 | company: Kubernetes Community 5 | 6 | # If the issue tracker for your role is not on github, uncomment the 7 | # next line and provide a value 8 | # issue_tracker_url: http://example.com/issue/tracker 9 | 10 | # Some suggested licenses: 11 | # - BSD (default) 12 | # - MIT 13 | # - GPLv2 14 | # - GPLv3 15 | # - Apache 16 | # - CC-BY 17 | license: MIT 18 | 19 | min_ansible_version: 2.2 20 | 21 | # If this a Container Enabled role, provide the minimum Ansible Container version. 22 | # min_ansible_container_version: 23 | 24 | # Optionally specify the branch Galaxy will use when accessing the GitHub 25 | # repo for this role. During role install, if no tags are available, 26 | # Galaxy will use this branch. During import Galaxy will access files on 27 | # this branch. If Travis integration is configured, only notifications for this 28 | # branch will be accepted. Otherwise, in all cases, the repo's default branch 29 | # (usually master) will be used. 30 | #github_branch: 31 | 32 | # 33 | # platforms is a list of platforms, and each platform has a name and a list of versions. 34 | # 35 | # platforms: 36 | # - name: Fedora 37 | # versions: 38 | # - all 39 | # - 25 40 | # - name: SomePlatform 41 | # versions: 42 | # - all 43 | # - 1.0 44 | # - 7 45 | # - 99.99 46 | 47 | galaxy_tags: [] 48 | # List tags for your role here, one per line. A tag is a keyword that describes 49 | # and categorizes the role. Users find roles by searching for tags. Be sure to 50 | # remove the '[]' above, if you add tags to this list. 51 | # 52 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters. 53 | # Maximum 20 tags per role. 54 | 55 | dependencies: [] 56 | # List your role dependencies here, one per line. Be sure to remove the '[]' above, 57 | # if you add dependencies to this list. 58 | -------------------------------------------------------------------------------- /roles/stats/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check addons directories 3 | file: 4 | path: '{{ item }}' 5 | state: directory 6 | mode: 0755 7 | with_items: 8 | - '{{ k8s_addons_dir }}' 9 | 10 | - name: Statistics services 11 | template: 12 | src: "{{ item }}" 13 | dest: "{{ k8s_addons_dir }}/{{ item }}" 14 | with_items: 15 | - heapster.yaml 16 | 17 | - name: Deploy script for statistics services 18 | template: 19 | src: deploy-stats.sh 20 | dest: "{{ k8s_addons_dir }}/deploy-stats.sh" 21 | mode: 0755 22 | 23 | - name: Run deploy script for stats 24 | command: "{{ k8s_addons_dir }}/deploy-stats.sh" 25 | when: inventory_hostname in k8s_master_hosts[0] 26 | -------------------------------------------------------------------------------- /roles/stats/templates/deploy-stats.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function deploy_heapster { 4 | if kubectl get deploy -l k8s-app=heapster --namespace=kube-system | grep heapster &> /dev/null; then 5 | echo "Heapster deployment already exists" 6 | else 7 | echo "Creating Heapster deployment" 8 | kubectl apply -f {{ k8s_addons_dir }}/heapster.yaml 9 | fi 10 | 11 | echo 12 | } 13 | 14 | deploy_heapster 15 | -------------------------------------------------------------------------------- /roles/stats/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | -------------------------------------------------------------------------------- /roles/stats/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - stats 6 | -------------------------------------------------------------------------------- /roles/stats/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for addons 3 | -------------------------------------------------------------------------------- /roles/storage/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # A service account's credentials include a generated email address that is unique. 3 | # Specify the email address of the user account 4 | # You can create one according to the procedure specified in this reference 5 | # https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount 6 | gce_service_account_email: '...-compute@developer.gserviceaccount.com' 7 | 8 | # The full path of your unique service account credentials file. 9 | # Details on generating this can be found at 10 | # https://docs.ansible.com/ansible/guide_gce.html#credentials 11 | # You can download json credentials according to the procedure specified in this reference 12 | # https://support.google.com/cloud/answer/6158849?hl=en&ref_topic=6262490#serviceaccounts 13 | gce_credentials_file: '{{ ansible_env.HOME }}/gcloud.json' 14 | 15 | # Specify your project ID which one used from your GCP account 16 | gce_project_id: my-project-id 17 | 18 | # A zone is an isolated location within a region. 19 | # Resources that live in a zone, such as instances, 20 | # are referred to as zonal resources 21 | gce_instances_zone: europe-west1-b 22 | 23 | # Size of network persistent disk in Gb 24 | network_storage_size: 100 25 | 26 | # Name of GCE persistent disk 27 | gce_storage_name: pd-std 28 | 29 | # Type of GCE storage, options: `slow`, `fast` 30 | gce_storage_type: slow 31 | 32 | gce_disk_type: '{% if gce_storage_type == "slow" %}pd-standard{% else %}pd-ssd{% endif %}' 33 | -------------------------------------------------------------------------------- /roles/storage/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for network 3 | -------------------------------------------------------------------------------- /roles/storage/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Igor Dolzhikov 3 | description: Setup GCE Persistent Disk 4 | company: Kubernetes Community 5 | 6 | # If the issue tracker for your role is not on github, uncomment the 7 | # next line and provide a value 8 | # issue_tracker_url: http://example.com/issue/tracker 9 | 10 | # Some suggested licenses: 11 | # - BSD (default) 12 | # - MIT 13 | # - GPLv2 14 | # - GPLv3 15 | # - Apache 16 | # - CC-BY 17 | license: MIT 18 | 19 | min_ansible_version: 2.2 20 | 21 | # If this a Container Enabled role, provide the minimum Ansible Container version. 22 | # min_ansible_container_version: 23 | 24 | # Optionally specify the branch Galaxy will use when accessing the GitHub 25 | # repo for this role. During role install, if no tags are available, 26 | # Galaxy will use this branch. During import Galaxy will access files on 27 | # this branch. If Travis integration is configured, only notifications for this 28 | # branch will be accepted. Otherwise, in all cases, the repo's default branch 29 | # (usually master) will be used. 30 | #github_branch: 31 | 32 | # 33 | # platforms is a list of platforms, and each platform has a name and a list of versions. 34 | # 35 | # platforms: 36 | # - name: Fedora 37 | # versions: 38 | # - all 39 | # - 25 40 | # - name: SomePlatform 41 | # versions: 42 | # - all 43 | # - 1.0 44 | # - 7 45 | # - 99.99 46 | 47 | galaxy_tags: [] 48 | # List tags for your role here, one per line. A tag is a keyword that describes 49 | # and categorizes the role. Users find roles by searching for tags. Be sure to 50 | # remove the '[]' above, if you add tags to this list. 51 | # 52 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters. 53 | # Maximum 20 tags per role. 54 | 55 | dependencies: [] 56 | # List your role dependencies here, one per line. Be sure to remove the '[]' above, 57 | # if you add dependencies to this list. 58 | -------------------------------------------------------------------------------- /roles/storage/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create Persistent Disk 3 | gce_pd: 4 | name: '{{ gce_storage_name }}' 5 | disk_type: '{{ gce_disk_type }}' 6 | mode: READ_WRITE 7 | size_gb: '{{ network_storage_size }}' 8 | zone: '{{ gce_instances_zone }}' 9 | service_account_email: '{{ gce_service_account_email }}' 10 | credentials_file: '{{ gce_credentials_file }}' 11 | project_id: '{{ gce_project_id }}' 12 | register: pd 13 | 14 | - name: Persistent Disk details 15 | debug: 16 | msg: '{{ pd }}' 17 | -------------------------------------------------------------------------------- /roles/storage/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | 3 | -------------------------------------------------------------------------------- /roles/storage/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | connection: local 4 | roles: 5 | - lb 6 | -------------------------------------------------------------------------------- /roles/storage/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for network 3 | -------------------------------------------------------------------------------- /roles/toolchain/README.md: -------------------------------------------------------------------------------- 1 | Ansible Role: Tool Chain 2 | ======================== 3 | 4 | This role install Network & System Tools on Redhat linux based systems. 5 | 6 | [![Contributions Welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)](https://github.com/k8s-community/cluster-deploy/issues) 7 | 8 | Requirements 9 | ------------ 10 | 11 | No special requirements. 12 | 13 | 14 | Role Variables 15 | -------------- 16 | 17 | Available variables are listed below, along with default values (see `defaults/main.yml`): 18 | 19 | Install network tools. 20 | ```yaml 21 | toolchain_install_net_tools: true 22 | ``` 23 | 24 | Install system tools. 25 | ```yaml 26 | toolchain_install_system_tools: true 27 | ``` 28 | 29 | Install docker. 30 | ```yaml 31 | toolchain_install_docker: true 32 | ``` 33 | 34 | Example Playbook 35 | ---------------- 36 | 37 | - hosts: 38 | - master 39 | - node 40 | - build 41 | roles: 42 | - toolchain 43 | 44 | License 45 | ------- 46 | 47 | MIT 48 | 49 | Author Information 50 | ------------------ 51 | 52 | Kubernets Community [k8s-community](https://github.com/k8s-community) 53 | -------------------------------------------------------------------------------- /roles/toolchain/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | toolchain_install_net_tools: true 3 | toolchain_install_system_tools: true 4 | toolchain_install_docker: true 5 | -------------------------------------------------------------------------------- /roles/toolchain/files/docker: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | sudo /usr/bin/docker "$@" 4 | -------------------------------------------------------------------------------- /roles/toolchain/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart docker 3 | service: 4 | name: docker 5 | state: restarted 6 | -------------------------------------------------------------------------------- /roles/toolchain/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install net tools 3 | yum: 4 | name: '{{ item }}' 5 | state: present 6 | with_items: 7 | - lsof 8 | - mtr 9 | - net-tools 10 | - nmap 11 | - tcpdump 12 | - telnet 13 | when: toolchain_install_net_tools 14 | 15 | - name: Install system tools 16 | yum: 17 | name: '{{ item }}' 18 | state: present 19 | with_items: 20 | - git 21 | - iotop 22 | - iperf 23 | - rsync 24 | - screen 25 | - strace 26 | - wget 27 | when: toolchain_install_system_tools 28 | 29 | - name: Install docker 30 | yum: 31 | name: docker 32 | state: present 33 | when: toolchain_install_docker 34 | notify: 35 | - restart docker 36 | 37 | - name: Enable docker 38 | service: 39 | name: docker 40 | enabled: yes 41 | when: toolchain_install_docker 42 | notify: 43 | - restart docker 44 | 45 | - name: Install docker wrapper 46 | copy: 47 | src: docker 48 | dest: /usr/local/bin/docker 49 | mode: 0555 50 | when: toolchain_install_docker 51 | -------------------------------------------------------------------------------- /roles/toolchain/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost -------------------------------------------------------------------------------- /roles/toolchain/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - toolchain 6 | -------------------------------------------------------------------------------- /roles/toolchain/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for nettools 3 | -------------------------------------------------------------------------------- /roles/user/README.md: -------------------------------------------------------------------------------- 1 | Ansible Role: User credentials 2 | ============================== 3 | 4 | This role install Kubernetes user credentials on Red Hat linux based systems. 5 | 6 | [![Contributions Welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)](https://github.com/k8s-community/cluster-deploy/issues) 7 | 8 | Requirements 9 | ------------ 10 | 11 | No special requirements. 12 | 13 | 14 | Role Variables 15 | -------------- 16 | 17 | Available variables are listed below, along with default values (see `defaults/main.yml`): 18 | 19 | It will be used as the Internal dns domain name if DNS is enabled. 20 | Services will be discoverable under 21 | `..svc..`, e.g. myservice.default.svc.k8s.cluster 22 | ```yaml 23 | k8s_domain_name: k8s 24 | k8s_cluster_name: cluster 25 | k8s_cluster_domain: '{{ k8s_domain_name }}.{{ k8s_cluster_name }}' 26 | ``` 27 | 28 | Kubernetes master and services host names 29 | ```yaml 30 | k8s_master_name: master.your-domain-name 31 | ``` 32 | 33 | SSL base certificate name 34 | SSL folder and file names will use the same name 35 | ```yaml 36 | ssl_name: kubernetes 37 | ``` 38 | 39 | Path to files with SSL certificates and keys 40 | ```yaml 41 | ssl_dir: /etc/ssl/{{ ssl_name }} 42 | ``` 43 | 44 | URL scheme for kubernetes services 45 | ```yaml 46 | k8s_url_scheme: https 47 | ``` 48 | 49 | Kubernetes service API port 50 | ```yaml 51 | k8s_api_port: 443 52 | ``` 53 | 54 | Master hosts nsmes 55 | ```yaml 56 | k8s_master_hosts: {} 57 | ``` 58 | 59 | Example Playbook 60 | ---------------- 61 | 62 | - hosts: 63 | - master 64 | - node 65 | - build 66 | roles: 67 | - user 68 | 69 | License 70 | ------- 71 | 72 | MIT 73 | 74 | Author Information 75 | ------------------ 76 | 77 | Kubernets Community [k8s-community](https://github.com/k8s-community) 78 | -------------------------------------------------------------------------------- /roles/user/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # It will be used as the Internal dns domain name if DNS is enabled. 3 | # Services will be discoverable under 4 | # ..svc.., e.g. 5 | # myservice.default.svc.k8s.cluster 6 | k8s_domain_name: k8s 7 | k8s_cluster_name: cluster 8 | k8s_cluster_domain: '{{ k8s_domain_name }}.{{ k8s_cluster_name }}' 9 | 10 | # Kubernetes master and services host names 11 | k8s_master_name: master.your-domain-name 12 | 13 | # SSL base certificate name 14 | # SSL folder and file names will use the same name 15 | ssl_name: kubernetes 16 | 17 | # Path to files with SSL certificates and keys 18 | ssl_dir: /etc/ssl/{{ ssl_name }} 19 | 20 | # URL scheme for kubernetes services 21 | k8s_url_scheme: https 22 | # Kubernetes service API port 23 | k8s_api_port: 443 24 | 25 | # Master hosts nsmes 26 | k8s_master_hosts: {} 27 | 28 | -------------------------------------------------------------------------------- /roles/user/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /roles/user/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check config directory 3 | file: 4 | path: '{{ item }}' 5 | state: directory 6 | mode: 0755 7 | with_items: 8 | - '{{ ansible_env.HOME }}/.kube' 9 | 10 | - name: Default user config 11 | template: 12 | src: kubeconfig.default 13 | dest: "{{ ansible_env.HOME }}/.kube/config" 14 | -------------------------------------------------------------------------------- /roles/user/templates/kubeconfig.default: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Config 3 | clusters: 4 | - name: {{ k8s_cluster_domain }} 5 | cluster: 6 | certificate-authority: {{ ssl_dir }}/ca.pem 7 | server: {{ k8s_url_scheme }}://{% if k8s_master_hosts is defined and inventory_hostname in k8s_master_hosts %}127.0.0.1{% else %}{{ k8s_master_external_ip | default(k8s_master_name) }}{% endif %}:{{ k8s_api_port }} 8 | contexts: 9 | - name: {{ inventory_dir.split('/') | last }} 10 | context: 11 | cluster: {{ k8s_cluster_domain }} 12 | user: admin 13 | current-context: {{ inventory_dir.split('/') | last }} 14 | users: 15 | - name: admin 16 | user: 17 | client-certificate: {{ ssl_dir }}/client-admin.pem 18 | client-key: {{ ssl_dir }}/client-admin-key.pem 19 | -------------------------------------------------------------------------------- /roles/user/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | 3 | -------------------------------------------------------------------------------- /roles/user/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - user 6 | -------------------------------------------------------------------------------- /roles/user/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for kubernetes 3 | -------------------------------------------------------------------------------- /roles/volumes/README.md: -------------------------------------------------------------------------------- 1 | Ansible Role: Services Volumes 2 | ============================== 3 | 4 | This role install Services Volumes on Red Hat linux based systems. 5 | 6 | [![Contributions Welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)](https://github.com/k8s-community/cluster-deploy/issues) 7 | 8 | Requirements 9 | ------------ 10 | 11 | No special requirements. 12 | 13 | 14 | Role Variables 15 | -------------- 16 | 17 | Available variables are listed below, along with default values (see `defaults/main.yml`): 18 | 19 | Size of volume in Gb. That volume used by services 20 | ```yaml 21 | k8s_services_volume: 30 22 | ``` 23 | 24 | Size of volume in Gb. That volume used for backups 25 | ```yaml 26 | k8s_backup_volume: 30 27 | ``` 28 | 29 | Kubernetes configs path 30 | ```yaml 31 | k8s_conf_dir: /etc/kubernetes 32 | k8s_addons_dir: '{{ k8s_conf_dir }}/addons' 33 | ``` 34 | 35 | Services volumes host path 36 | ```yaml 37 | k8s_services_dir: /var/lib/data 38 | ``` 39 | 40 | Backup volumes host path 41 | ```yaml 42 | k8s_backup_dir: /var/lib/backup 43 | ``` 44 | 45 | Using of network storage 46 | If network storage disabled will use local disk for every requested claim 47 | ```yaml 48 | network_storage: false 49 | ``` 50 | 51 | Kubernetes network persistent disk type, valid values: `gce`, `ceph` 52 | TODO: AWS persistent disk `aws` 53 | ```yaml 54 | network_storage_type: gce 55 | ``` 56 | 57 | Ceph monitor port 58 | ```yaml 59 | ceph_monitor_port: 6789 60 | ``` 61 | 62 | Storage hosts names 63 | ```yaml 64 | k8s_storage_hosts: {} 65 | ``` 66 | 67 | Example Playbook 68 | ---------------- 69 | 70 | - hosts: addons 71 | roles: 72 | - volumes 73 | 74 | License 75 | ------- 76 | 77 | MIT 78 | 79 | Author Information 80 | ------------------ 81 | 82 | Kubernets Community [k8s-community](https://github.com/k8s-community) 83 | -------------------------------------------------------------------------------- /roles/volumes/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Size of volume in Gb. That volume used by services 3 | k8s_services_volume: 30 4 | 5 | # Size of volume in Gb. That volume used for backups 6 | k8s_backup_volume: 30 7 | 8 | # Kubernetes configs path 9 | k8s_conf_dir: /etc/kubernetes 10 | k8s_addons_dir: '{{ k8s_conf_dir }}/addons' 11 | 12 | # Services volumes host path 13 | k8s_services_dir: /var/lib/data 14 | 15 | # Backup volumes host path 16 | k8s_backup_dir: /var/lib/backup 17 | 18 | # Using of network storage 19 | # If network storage disabled will use local disk for every requested claim 20 | network_storage: false 21 | 22 | # Kubernetes network persistent disk type, valid values: `gce`, `ceph` 23 | # TODO: AWS persistent disk `aws` 24 | network_storage_type: gce 25 | 26 | # Storage hosts names 27 | k8s_storage_hosts: {} 28 | 29 | # A zone is an isolated location within a region. 30 | # Resources that live in a zone, such as instances, 31 | # are referred to as zonal resources 32 | gce_instances_zone: europe-west1-b 33 | 34 | # Name of GCE persistent disk 35 | gce_storage_name: pd-std 36 | 37 | # Type of GCE storage, options: `slow`, `fast` 38 | gce_storage_type: slow 39 | 40 | # Ceph monitor port 41 | ceph_monitor_port: 6789 42 | 43 | # Ceph pool name 44 | ceph_pull_name: rbd 45 | 46 | # Ceph RBD image name 47 | ceph_rbd_image_name: rbdstore 48 | 49 | # Ceph user name 50 | ceph_user_name: admin 51 | 52 | # Ceph monitor servers 53 | ceph_monitor_servers: "{% for host in k8s_storage_hosts %}{{ host }}:{{ ceph_monitor_port }}{% if not loop.last %},{% endif %}{% endfor %}" 54 | -------------------------------------------------------------------------------- /roles/volumes/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for addons 3 | -------------------------------------------------------------------------------- /roles/volumes/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check addons directories 3 | file: 4 | path: '{{ item }}' 5 | state: directory 6 | mode: 0755 7 | with_items: 8 | - '{{ k8s_addons_dir }}' 9 | 10 | - name: Service volumes 11 | template: 12 | src: "{{ item }}" 13 | dest: "{{ k8s_addons_dir }}/{{ item }}" 14 | with_items: 15 | - gce-storage.yaml 16 | - ceph-storage.yaml 17 | - local-storage.yaml 18 | - volumes.yaml 19 | 20 | - name: Deploy script for services volumes 21 | template: 22 | src: deploy-volumes.sh 23 | dest: "{{ k8s_addons_dir }}/deploy-volumes.sh" 24 | mode: 0755 25 | 26 | - name: Run deploy script for services volumes 27 | command: "{{ k8s_addons_dir }}/deploy-volumes.sh" 28 | when: inventory_hostname in k8s_storage_hosts[0] 29 | -------------------------------------------------------------------------------- /roles/volumes/templates/ceph-storage.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: storage.k8s.io/v1 3 | kind: StorageClass 4 | metadata: 5 | name: standard 6 | annotations: 7 | storageclass.beta.kubernetes.io/is-default-class: "true" 8 | labels: 9 | addonmanager.kubernetes.io/mode: Reconcile 10 | provisioner: kubernetes.io/rbd 11 | parameters: 12 | monitors: {{ ceph_monitor_servers }} 13 | adminId: {{ ceph_user_name }} 14 | adminSecretName: ceph-secret 15 | adminSecretNamespace: kube-system 16 | pool: {{ ceph_pull_name }} 17 | userId: {{ ceph_user_name }} 18 | userSecretName: ceph-secret 19 | -------------------------------------------------------------------------------- /roles/volumes/templates/deploy-volumes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function deploy_default_storage { 4 | if kubectl get storageclass | grep standard &> /dev/null; then 5 | echo "Default storage class already exists" 6 | else 7 | echo "Creating default storage class" 8 | {% if network_storage and network_storage_type == 'gce' %} 9 | kubectl create -f {{ k8s_addons_dir }}/gce-storage.yaml 10 | {% endif %} 11 | {% if network_storage and network_storage_type == 'ceph' %} 12 | kubectl create -f {{ k8s_addons_dir }}/ceph-storage.yaml 13 | {% endif %} 14 | {% if not network_storage %} 15 | kubectl create -f {{ k8s_addons_dir }}/local-storage.yaml 16 | {% endif %} 17 | fi 18 | } 19 | 20 | 21 | function deploy_volumes { 22 | if kubectl get pv | grep services &> /dev/null; then 23 | echo "Serices volumes already exists" 24 | else 25 | echo "Creating serices volumes" 26 | kubectl create -f {{ k8s_addons_dir }}/volumes.yaml 27 | fi 28 | 29 | echo 30 | } 31 | 32 | # deploy_default_storage 33 | deploy_volumes 34 | -------------------------------------------------------------------------------- /roles/volumes/templates/gce-storage.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: storage.k8s.io/v1 3 | kind: StorageClass 4 | metadata: 5 | name: standard 6 | annotations: 7 | storageclass.beta.kubernetes.io/is-default-class: "true" 8 | labels: 9 | kubernetes.io/cluster-service: "true" 10 | addonmanager.kubernetes.io/mode: EnsureExists 11 | provisioner: kubernetes.io/gce-pd 12 | parameters: 13 | {% if gce_storage_type == 'slow' %} 14 | type: pd-standard 15 | {% else %} 16 | type: pd-ssd 17 | {% endif %} 18 | zone: {{ gce_instances_zone }} 19 | -------------------------------------------------------------------------------- /roles/volumes/templates/local-storage.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: storage.k8s.io/v1 3 | kind: StorageClass 4 | metadata: 5 | name: standard 6 | annotations: 7 | storageclass.beta.kubernetes.io/is-default-class: "true" 8 | labels: 9 | addonmanager.kubernetes.io/mode: Reconcile 10 | provisioner: kubernetes.io/host-path 11 | -------------------------------------------------------------------------------- /roles/volumes/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | -------------------------------------------------------------------------------- /roles/volumes/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - volumes 6 | -------------------------------------------------------------------------------- /roles/volumes/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for addons 3 | --------------------------------------------------------------------------------