├── LICENSE ├── README.md ├── chapter1 └── ansible │ ├── LICENSE │ ├── README.md │ ├── Vagrantfile │ ├── ansible.cfg │ ├── group_vars │ ├── all.yml │ └── kube-cluster.yml │ ├── hack │ └── setup-vms.sh │ ├── hosts.ini │ ├── reset-site.yaml │ ├── roles │ ├── cni │ │ ├── defaults │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ ├── calico-etcd.yml.j2 │ │ │ ├── calico-rbac.yml.j2 │ │ │ ├── calico.yml.j2 │ │ │ ├── flannel-rbac.yml.j2 │ │ │ └── flannel.yml.j2 │ ├── commons │ │ ├── os-checker │ │ │ ├── defaults │ │ │ │ └── main.yml │ │ │ └── tasks │ │ │ │ └── main.yml │ │ └── pre-install │ │ │ ├── meta │ │ │ └── main.yml │ │ │ ├── tasks │ │ │ ├── main.yml │ │ │ └── pkg.yml │ │ │ └── templates │ │ │ └── 20-extra-args.conf.j2 │ ├── docker │ │ ├── defaults │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── main.yml │ │ │ └── pkg.yml │ │ └── templates │ │ │ ├── docker.j2 │ │ │ └── docker.service.j2 │ ├── healthcheck │ │ ├── tasks │ │ │ └── main.yml │ │ └── vars │ │ │ └── main.yml │ ├── helm │ │ ├── files │ │ │ └── rbac-config.yml │ │ └── tasks │ │ │ └── main.yml │ ├── kubernetes │ │ ├── master │ │ │ ├── handlers │ │ │ │ └── main.yml │ │ │ ├── meta │ │ │ │ └── main.yml │ │ │ └── tasks │ │ │ │ ├── init.yml │ │ │ │ └── main.yml │ │ └── node │ │ │ ├── handlers │ │ │ └── main.yml │ │ │ ├── meta │ │ │ └── main.yml │ │ │ └── tasks │ │ │ ├── join.yml │ │ │ └── main.yml │ └── metallb │ │ ├── tasks │ │ └── main.yml │ │ ├── templates │ │ └── metallb-layer-2-config.yml.j2 │ │ └── vars │ │ └── main.yml │ ├── site.yaml │ └── utils │ └── Vagrantfile ├── chapter10 ├── efk │ ├── elastic.yaml │ ├── fluent-bit-values.yaml │ └── kibana.yaml ├── postgres │ ├── cm-postgres.yaml │ ├── postgres.yaml │ ├── pvc-postgres.yaml │ └── svc-postgres.yaml └── telepresence │ └── index.html ├── chapter2 ├── helm │ ├── crb-helm.yaml │ ├── customhelmrepo.yaml │ ├── install-helm.sh │ ├── mychart │ │ ├── Chart.yaml │ │ ├── mychart-0.1.0.tgz │ │ ├── templates │ │ │ ├── NOTES.txt │ │ │ ├── _helpers.tpl │ │ │ ├── deployment.yaml │ │ │ ├── ingress.yaml │ │ │ ├── service.yaml │ │ │ └── tests │ │ │ │ └── test-connection.yaml │ │ └── values.yaml │ └── sa-helm.yaml ├── kustomize │ ├── nginx │ │ ├── deployment-nginx.yaml │ │ └── kustomization.yaml │ └── registry │ │ ├── base │ │ ├── deployment-registry.yaml │ │ ├── kustomization.yaml │ │ ├── pvc-registry.yaml │ │ └── service-registry.yaml │ │ └── overlays │ │ ├── dev │ │ └── kustomization.yaml │ │ └── prod │ │ └── kustomization.yaml ├── postgres-operator │ └── ui │ │ └── postgres-ui.yaml └── yaml │ └── deployment-nginx.yaml ├── chapter3 ├── aws │ └── buildspec.yaml └── gcp │ ├── sample-app-v2.tgz │ └── spinnaker-config.yaml ├── chapter4 ├── gremlin │ └── nginx.yaml ├── litmus │ ├── ce-container-kill.yaml │ ├── nginx │ │ ├── nginx.yaml │ │ └── rbac.yaml │ ├── prometheus │ │ ├── cr-prometheus.yaml │ │ ├── deployment-prometheus.yaml │ │ ├── ns-prometheus.yaml │ │ ├── prom-config.yaml │ │ └── svc-prometheus.yaml │ └── sa-container-kill.yaml └── stackstorm │ ├── first_rule.yaml │ └── svc-st2.yaml ├── chapter5 ├── aws │ ├── aws-secret.yaml │ ├── csi │ │ ├── cs-aws-csi-ebs.yaml │ │ ├── pod.yaml │ │ ├── pvc-csi-ebs.yaml │ │ └── secret.yaml │ ├── redis-statefulset.yml │ ├── sc-aws-gp2.yaml │ └── sc-aws-io1-slow.yaml ├── azure │ └── redis-statefulset.yml ├── gcp │ ├── cs-gce-pds-ssd.yaml │ ├── pv-gce-disk-1.yaml │ ├── pvc-gce-disk-1.yaml │ └── redis-statefulset.yml ├── openebs │ ├── minio.yaml │ ├── nfs │ │ ├── crb-openebs-nfs.yaml │ │ ├── openebs-nfs.yaml │ │ ├── psp-openebs-nfs.yaml │ │ ├── pvc-openebs.nfs.yaml │ │ ├── sc-openebs-nfs.yaml │ │ └── svc-openebs-nfs.yaml │ ├── sc-cstor.yaml │ └── spc-cstor.yaml └── rook │ ├── mysql.yaml │ ├── nfs.yaml │ ├── toolbox.yaml │ └── wordpress.yaml ├── chapter6 ├── kasten │ ├── myapp.yaml │ ├── ns-backup.yaml │ └── pvc-backup.yaml ├── minio │ └── minio.yaml └── velero │ ├── myapp.yaml │ ├── ns-backup-example.yaml │ └── pvc-backup-example.yaml ├── chapter7 ├── autoheal │ └── minio │ │ ├── minio-livenessprobe.yaml │ │ └── minio.yaml ├── bluegreen │ └── blue-percona.yaml ├── charts │ ├── node │ │ ├── Chart.yaml │ │ ├── README.md │ │ ├── charts │ │ │ └── mongodb-7.2.10.tgz │ │ ├── requirements.lock │ │ ├── requirements.yaml │ │ ├── templates │ │ │ ├── NOTES.txt │ │ │ ├── _helpers.tpl │ │ │ ├── deployment.yaml │ │ │ ├── ingress.yaml │ │ │ ├── mongodb-binding.yaml │ │ │ ├── pvc.yaml │ │ │ └── svc.yaml │ │ └── values.yaml │ ├── todo-dev │ │ ├── Chart.yaml │ │ ├── README.md │ │ ├── charts │ │ │ └── mongodb-7.2.10.tgz │ │ ├── requirements.lock │ │ ├── requirements.yaml │ │ ├── templates │ │ │ ├── NOTES.txt │ │ │ ├── _helpers.tpl │ │ │ ├── deployment.yaml │ │ │ ├── ingress.yaml │ │ │ ├── mongodb-binding.yaml │ │ │ ├── pvc.yaml │ │ │ └── svc.yaml │ │ └── values.yaml │ └── todo-prod │ │ ├── Chart.yaml │ │ ├── README.md │ │ ├── charts │ │ └── mongodb-7.2.10.tgz │ │ ├── requirements.lock │ │ ├── requirements.yaml │ │ ├── templates │ │ ├── NOTES.txt │ │ ├── _helpers.tpl │ │ ├── deployment.yaml │ │ ├── ingress.yaml │ │ ├── mongodb-binding.yaml │ │ ├── pvc.yaml │ │ └── svc.yaml │ │ └── values.yaml ├── hpa-my-ch7-app.yaml ├── hpav2-my-ch7-app.yaml ├── lb │ ├── minio.yaml │ └── svc-minio.yaml └── linkerd │ ├── emojivoto.yml │ └── ingress-nginx.yaml ├── chapter8 ├── cloudwatch │ ├── cwagent-configmap.yaml │ ├── cwagent-serviceaccount.yaml │ └── cwagent.yaml ├── debug │ ├── minio-liveness.yaml │ ├── mongo-image.yaml │ ├── mongo-sc.yaml │ ├── node-problem-detector.yaml │ ├── sc-gp2.yaml │ ├── termination-image.yaml │ └── termination.yaml └── prometheus │ └── custom-values.yaml └── chapter9 ├── cis ├── job-eks.yaml ├── job-iks.yaml ├── job-master.yaml ├── job-node.yaml └── job.yaml ├── devsecops ├── .circleci │ └── config.yml └── .gitlab-ci.yml ├── falco ├── client.yaml ├── custom_rules.yaml ├── dump.php ├── mysql.yaml └── ping.yaml ├── psp ├── aks-privileged-psp.yaml ├── eks-privileged-psp.yaml ├── gce-privileged-psp.yaml ├── restricted-psp.yaml └── restricted-vol-psp.yaml ├── rbac ├── binding-deployer.yaml ├── config-user3445.yaml └── role-deployer.yaml └── vault └── policy.hcl /README.md: -------------------------------------------------------------------------------- 1 | # src -------------------------------------------------------------------------------- /chapter1/ansible/README.md: -------------------------------------------------------------------------------- 1 | # Kubeadm Ansible Playbook 2 | 3 | Credits: This repository is a clone of the original located (https://github.com/kairen/kubeadm-ansible). Please refer to the original for the latest updates. 4 | 5 | 6 | Build a Kubernetes cluster using Ansible with kubeadm. The goal is easily install a Kubernetes cluster on machines running: 7 | 8 | - Ubuntu 16.04 9 | - CentOS 7 10 | - Debian 9 11 | 12 | System requirements: 13 | 14 | - Deployment environment must have Ansible `2.4.0+` 15 | - Master and nodes must have passwordless SSH access 16 | 17 | # Usage 18 | 19 | Add the system information gathered above into a file called `hosts.ini`. For example: 20 | ``` 21 | [master] 22 | 192.16.35.12 23 | 24 | [node] 25 | 192.16.35.[10:11] 26 | 27 | [kube-cluster:children] 28 | master 29 | node 30 | ``` 31 | 32 | If you're working with ubuntu, add the following properties to each host `ansible_python_interpreter='python3'`: 33 | ``` 34 | [master] 35 | 192.16.35.12 ansible_python_interpreter='python3' 36 | 37 | [node] 38 | 192.16.35.[10:11] ansible_python_interpreter='python3' 39 | 40 | [kube-cluster:children] 41 | master 42 | node 43 | 44 | ``` 45 | 46 | Before continuing, edit `group_vars/all.yml` to your specified configuration. 47 | 48 | For example, I choose to run `flannel` instead of calico, and thus: 49 | 50 | ```yaml 51 | # Network implementation('flannel', 'calico') 52 | network: flannel 53 | ``` 54 | 55 | **Note:** Depending on your setup, you may need to modify `cni_opts` to an available network interface. By default, `kubeadm-ansible` uses `eth1`. Your default interface may be `eth0`. 56 | 57 | After going through the setup, run the `site.yaml` playbook: 58 | 59 | ```sh 60 | $ ansible-playbook site.yaml 61 | ... 62 | ==> master1: TASK [addon : Create Kubernetes dashboard deployment] ************************** 63 | ==> master1: changed: [192.16.35.12 -> 192.16.35.12] 64 | ==> master1: 65 | ==> master1: PLAY RECAP ********************************************************************* 66 | ==> master1: 192.16.35.10 : ok=18 changed=14 unreachable=0 failed=0 67 | ==> master1: 192.16.35.11 : ok=18 changed=14 unreachable=0 failed=0 68 | ==> master1: 192.16.35.12 : ok=34 changed=29 unreachable=0 failed=0 69 | ``` 70 | 71 | The playbook will download `/etc/kubernetes/admin.conf` file to `$HOME/admin.conf`. 72 | 73 | If it doesn't work download the `admin.conf` from the master node: 74 | 75 | ```sh 76 | $ scp k8s@k8s-master:/etc/kubernetes/admin.conf . 77 | ``` 78 | 79 | Verify cluster is fully running using kubectl: 80 | 81 | ```sh 82 | 83 | $ export KUBECONFIG=~/admin.conf 84 | $ kubectl get node 85 | NAME STATUS AGE VERSION 86 | master1 Ready 22m v1.6.3 87 | node1 Ready 20m v1.6.3 88 | node2 Ready 20m v1.6.3 89 | 90 | $ kubectl get po -n kube-system 91 | NAME READY STATUS RESTARTS AGE 92 | etcd-master1 1/1 Running 0 23m 93 | ... 94 | ``` 95 | 96 | # Resetting the environment 97 | 98 | Finally, reset all kubeadm installed state using `reset-site.yaml` playbook: 99 | 100 | ```sh 101 | $ ansible-playbook reset-site.yaml 102 | ``` 103 | 104 | # Additional features 105 | These are features that you could want to install to make your life easier. 106 | 107 | Enable/disable these features in `group_vars/all.yml` (all disabled by default): 108 | ``` 109 | # Additional feature to install 110 | additional_features: 111 | helm: false 112 | metallb: false 113 | healthcheck: false 114 | ``` 115 | 116 | ## Helm 117 | This will install helm in your cluster (https://helm.sh/) so you can deploy charts. 118 | 119 | ## MetalLB 120 | This will install MetalLB (https://metallb.universe.tf/), very useful if you deploy the cluster locally and you need a load balancer to access the services. 121 | 122 | ## Healthcheck 123 | This will install k8s-healthcheck (https://github.com/emrekenci/k8s-healthcheck), a small application to report cluster status. 124 | 125 | # Utils 126 | Collection of scripts/utilities 127 | 128 | ## Vagrantfile 129 | This Vagrantfile is taken from https://github.com/ecomm-integration-ballerina/kubernetes-cluster and slightly modified to copy ssh keys inside the cluster (install https://github.com/dotless-de/vagrant-vbguest is highly recommended) 130 | 131 | # Tips & Tricks 132 | If you use vagrant or your remote user is root, add this to `hosts.ini` 133 | ``` 134 | [master] 135 | 192.16.35.12 ansible_user='root' 136 | 137 | [node] 138 | 192.16.35.[10:11] ansible_user='root' 139 | ``` 140 | -------------------------------------------------------------------------------- /chapter1/ansible/Vagrantfile: -------------------------------------------------------------------------------- 1 | Vagrant.require_version ">= 1.7.0" 2 | 3 | $os_image = (ENV['OS_IMAGE'] || "ubuntu16").to_sym 4 | 5 | def set_vbox(vb, config) 6 | vb.gui = false 7 | vb.memory = 2048 8 | vb.cpus = 1 9 | 10 | case $os_image 11 | when :centos7 12 | config.vm.box = "bento/centos-7.2" 13 | when :ubuntu16 14 | config.vm.box = "bento/ubuntu-16.04" 15 | end 16 | end 17 | 18 | Vagrant.configure("2") do |config| 19 | config.vm.provider "virtualbox" 20 | master = 1 21 | node = 2 22 | 23 | private_count = 10 24 | (1..(master + node)).each do |mid| 25 | name = (mid <= node) ? "n" : "m" 26 | id = (mid <= node) ? mid : (mid - node) 27 | 28 | config.vm.define "k8s-#{name}#{id}" do |n| 29 | n.vm.hostname = "k8s-#{name}#{id}" 30 | ip_addr = "192.16.35.#{private_count}" 31 | n.vm.network :private_network, ip: "#{ip_addr}", auto_config: true 32 | 33 | n.vm.provider :virtualbox do |vb, override| 34 | vb.name = "#{n.vm.hostname}" 35 | set_vbox(vb, override) 36 | end 37 | private_count += 1 38 | end 39 | end 40 | 41 | # Install of dependency packages using script 42 | config.vm.provision :shell, path: "./hack/setup-vms.sh" 43 | end 44 | -------------------------------------------------------------------------------- /chapter1/ansible/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | roles_path = ./roles 3 | inventory = ./hosts.ini 4 | 5 | remote_tmp = $HOME/.ansible/tmp 6 | local_tmp = $HOME/.ansible/tmp 7 | pipelining = True 8 | become = True 9 | host_key_checking = False 10 | deprecation_warnings = False 11 | callback_whitelist = profile_tasks 12 | -------------------------------------------------------------------------------- /chapter1/ansible/group_vars/all.yml: -------------------------------------------------------------------------------- 1 | # Ansible 2 | # ansible_user: root 3 | 4 | # Kubernetes 5 | kube_version: v1.14.0 6 | token: b0f7b8.8d1767876297d85c 7 | 8 | # 1.8.x feature: --feature-gates SelfHosting=true 9 | init_opts: "" 10 | 11 | # Any other additional opts you want to add.. 12 | kubeadm_opts: "" 13 | # For example: 14 | # kubeadm_opts: '--apiserver-cert-extra-sans "k8s.domain.com,kubernetes.domain.com"' 15 | 16 | service_cidr: "10.96.0.0/12" 17 | pod_network_cidr: "10.244.0.0/16" 18 | 19 | calico_etcd_service: "10.96.232.136" 20 | 21 | # Network implementation('flannel', 'calico') 22 | network: calico 23 | 24 | # Change this to an appropriate interface, preferably a private network. 25 | # For example, on DigitalOcean, you would use eth1 as that is the default private network interface. 26 | network_interface: "" 27 | 28 | enable_dashboard: yes 29 | 30 | # A list of insecure registries you might need to define 31 | # insecure_registries: [] 32 | insecure_registries: ['gcr.io'] 33 | 34 | systemd_dir: /lib/systemd/system 35 | system_env_dir: /etc/sysconfig 36 | network_dir: /etc/kubernetes/network 37 | kubeadmin_config: /etc/kubernetes/admin.conf 38 | kube_addon_dir: /etc/kubernetes/addon 39 | 40 | # Additional feature to install 41 | additional_features: 42 | helm: false 43 | metallb: false 44 | healthcheck: false 45 | 46 | # temporary directory used by additional features 47 | tmp_dir: /tmp/kubeadm-ansible-files 48 | 49 | -------------------------------------------------------------------------------- /chapter1/ansible/group_vars/kube-cluster.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | master_ip: "{{ hostvars[groups['master'][0]]['ansible_default_ipv4'].address | default(groups['master'][0]) }}" 4 | -------------------------------------------------------------------------------- /chapter1/ansible/hack/setup-vms.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Program: Initial vagrant. 4 | # History: 2017/1/16 Kyle.b Release 5 | 6 | 7 | function set_hosts() { 8 | cat < ~/hosts 9 | 127.0.0.1 localhost 10 | ::1 localhost 11 | 12 | 192.16.35.10 k8s-n1 13 | 192.16.35.11 k8s-n2 14 | 192.16.35.12 k8s-m1 15 | 16 | EOF 17 | } 18 | 19 | set -e 20 | HOST_NAME=$(hostname) 21 | OS_NAME=$(awk -F= '/^NAME/{print $2}' /etc/os-release | grep -o "\w*"| head -n 1) 22 | 23 | if [ ${HOST_NAME} == "k8s-m1" ]; then 24 | case "${OS_NAME}" in 25 | "CentOS") 26 | sudo yum install -y epel-release 27 | sudo yum install -y git ansible sshpass python-netaddr openssl-devel 28 | ;; 29 | "Ubuntu") 30 | sudo sed -i 's/us.archive.ubuntu.com/tw.archive.ubuntu.com/g' /etc/apt/sources.list 31 | sudo apt-add-repository -y ppa:ansible/ansible 32 | sudo apt-get update && sudo apt-get install -y ansible git sshpass python-netaddr libssl-dev 33 | ;; 34 | *) 35 | echo "${OS_NAME} is not support ..."; exit 1 36 | esac 37 | 38 | yes "/root/.ssh/id_rsa" | sudo ssh-keygen -t rsa -N "" 39 | HOSTS="192.16.35.10 192.16.35.11 192.16.35.12" 40 | for host in ${HOSTS}; do 41 | sudo sshpass -p "vagrant" ssh -o StrictHostKeyChecking=no vagrant@${host} "sudo mkdir -p /root/.ssh" 42 | sudo cat /root/.ssh/id_rsa.pub | \ 43 | sudo sshpass -p "vagrant" ssh -o StrictHostKeyChecking=no vagrant@${host} "sudo tee /root/.ssh/authorized_keys" 44 | done 45 | 46 | cd /vagrant 47 | set_hosts 48 | sudo cp ~/hosts /etc/ 49 | sudo ansible-playbook -e network_interface=eth1 site.yaml 50 | else 51 | set_hosts 52 | sudo cp ~/hosts /etc/ 53 | fi 54 | -------------------------------------------------------------------------------- /chapter1/ansible/hosts.ini: -------------------------------------------------------------------------------- 1 | [master] 2 | 192.16.35.12 3 | 4 | [node] 5 | 192.16.35.[10:11] 6 | 7 | [kube-cluster:children] 8 | master 9 | node 10 | 11 | -------------------------------------------------------------------------------- /chapter1/ansible/reset-site.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: kube-cluster 4 | gather_facts: no 5 | become: yes 6 | tasks: 7 | - name: Reset Kubernetes component 8 | shell: "kubeadm reset --force" 9 | ignore_errors: True 10 | 11 | - name: Delete flannel.1 interface 12 | command: ip link delete flannel.1 13 | when: network == "flannel" 14 | ignore_errors: True 15 | 16 | - name: Delete cni0 interface 17 | command: ip link delete cni0 18 | when: network == "flannel" 19 | ignore_errors: True 20 | -------------------------------------------------------------------------------- /chapter1/ansible/roles/cni/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | calico_cni_opts: "interface={{ network_interface }}" 4 | flannel_cni_opts: "--iface={{ network_interface }}" 5 | 6 | -------------------------------------------------------------------------------- /chapter1/ansible/roles/cni/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Create Kubernetes addon directory 4 | file: 5 | path: "{{ network_dir }}" 6 | state: directory 7 | 8 | - name: "Copy {{ network }} YAML files" 9 | template: 10 | src: "{{ item }}" 11 | dest: "{{ network_dir }}/{{ item | basename | regex_replace('\\.j2','') }}" 12 | with_fileglob: 13 | - ../templates/{{ network }}*.j2 14 | 15 | - name: "Check {{ network }} daemonset is working" 16 | shell: kubectl --kubeconfig={{ kubeadmin_config }} get ds --all-namespaces | grep {{ network }} 17 | delegate_to: "{{ groups['master'][0] }}" 18 | run_once: true 19 | register: check_net 20 | ignore_errors: true 21 | changed_when: false 22 | 23 | - name: "Create {{ network }} network daemonset" 24 | when: check_net is failed 25 | command: kubectl apply --kubeconfig={{ kubeadmin_config }} -f {{ network_dir }}/ 26 | delegate_to: "{{ groups['master'][0] }}" 27 | run_once: true 28 | -------------------------------------------------------------------------------- /chapter1/ansible/roles/cni/templates/calico-etcd.yml.j2: -------------------------------------------------------------------------------- 1 | # This manifest installs the Calico etcd on the kubeadm master. This uses a DaemonSet 2 | # to force it to run on the master even when the master isn't schedulable, and uses 3 | # nodeSelector to ensure it only runs on the master. 4 | apiVersion: extensions/v1beta1 5 | kind: DaemonSet 6 | metadata: 7 | name: calico-etcd 8 | namespace: kube-system 9 | labels: 10 | k8s-app: calico-etcd 11 | spec: 12 | template: 13 | metadata: 14 | labels: 15 | k8s-app: calico-etcd 16 | annotations: 17 | # Mark this pod as a critical add-on; when enabled, the critical add-on scheduler 18 | # reserves resources for critical add-on pods so that they can be rescheduled after 19 | # a failure. This annotation works in tandem with the toleration below. 20 | scheduler.alpha.kubernetes.io/critical-pod: '' 21 | spec: 22 | tolerations: 23 | # This taint is set by all kubelets running `--cloud-provider=external` 24 | # so we should tolerate it to schedule the Calico pods 25 | - key: node.cloudprovider.kubernetes.io/uninitialized 26 | value: "true" 27 | effect: NoSchedule 28 | # Allow this pod to run on the master. 29 | - key: node-role.kubernetes.io/master 30 | effect: NoSchedule 31 | # Allow this pod to be rescheduled while the node is in "critical add-ons only" mode. 32 | # This, along with the annotation above marks this pod as a critical add-on. 33 | - key: CriticalAddonsOnly 34 | operator: Exists 35 | # Only run this pod on the master. 36 | nodeSelector: 37 | node-role.kubernetes.io/master: "" 38 | hostNetwork: true 39 | containers: 40 | - name: calico-etcd 41 | image: quay.io/coreos/etcd:v3.3.9 42 | env: 43 | - name: CALICO_ETCD_IP 44 | valueFrom: 45 | fieldRef: 46 | fieldPath: status.podIP 47 | command: 48 | - /usr/local/bin/etcd 49 | args: 50 | - --name=calico 51 | - --data-dir=/var/etcd/calico-data 52 | - --advertise-client-urls=http://$(CALICO_ETCD_IP):6666 53 | - --listen-client-urls=http://0.0.0.0:6666 54 | - --listen-peer-urls=http://0.0.0.0:6667 55 | - --auto-compaction-retention=1 56 | volumeMounts: 57 | - name: var-etcd 58 | mountPath: /var/etcd 59 | volumes: 60 | - name: var-etcd 61 | hostPath: 62 | path: /var/etcd 63 | 64 | --- 65 | 66 | # This manifest installs the Service which gets traffic to the Calico 67 | # etcd. 68 | apiVersion: v1 69 | kind: Service 70 | metadata: 71 | labels: 72 | k8s-app: calico-etcd 73 | name: calico-etcd 74 | namespace: kube-system 75 | spec: 76 | # Select the calico-etcd pod running on the master. 77 | selector: 78 | k8s-app: calico-etcd 79 | # This ClusterIP needs to be known in advance, since we cannot rely 80 | # on DNS to get access to etcd. 81 | clusterIP: {{ calico_etcd_service }} 82 | ports: 83 | - port: 6666 84 | -------------------------------------------------------------------------------- /chapter1/ansible/roles/cni/templates/calico-rbac.yml.j2: -------------------------------------------------------------------------------- 1 | # Calico Version v3.3.0 2 | # https://docs.projectcalico.org/v3.3/releases#v3.3.0 3 | 4 | --- 5 | 6 | kind: ClusterRole 7 | apiVersion: rbac.authorization.k8s.io/v1beta1 8 | metadata: 9 | name: calico-kube-controllers 10 | rules: 11 | - apiGroups: 12 | - "" 13 | - extensions 14 | resources: 15 | - pods 16 | - namespaces 17 | - networkpolicies 18 | - nodes 19 | - serviceaccounts 20 | verbs: 21 | - watch 22 | - list 23 | - apiGroups: 24 | - networking.k8s.io 25 | resources: 26 | - networkpolicies 27 | verbs: 28 | - watch 29 | - list 30 | --- 31 | kind: ClusterRoleBinding 32 | apiVersion: rbac.authorization.k8s.io/v1beta1 33 | metadata: 34 | name: calico-kube-controllers 35 | roleRef: 36 | apiGroup: rbac.authorization.k8s.io 37 | kind: ClusterRole 38 | name: calico-kube-controllers 39 | subjects: 40 | - kind: ServiceAccount 41 | name: calico-kube-controllers 42 | namespace: kube-system 43 | 44 | --- 45 | 46 | kind: ClusterRole 47 | apiVersion: rbac.authorization.k8s.io/v1beta1 48 | metadata: 49 | name: calico-node 50 | rules: 51 | - apiGroups: [""] 52 | resources: 53 | - pods 54 | - nodes 55 | - namespaces 56 | verbs: 57 | - get 58 | 59 | --- 60 | 61 | apiVersion: rbac.authorization.k8s.io/v1beta1 62 | kind: ClusterRoleBinding 63 | metadata: 64 | name: calico-node 65 | roleRef: 66 | apiGroup: rbac.authorization.k8s.io 67 | kind: ClusterRole 68 | name: calico-node 69 | subjects: 70 | - kind: ServiceAccount 71 | name: calico-node 72 | namespace: kube-system -------------------------------------------------------------------------------- /chapter1/ansible/roles/cni/templates/flannel-rbac.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | kind: ClusterRole 3 | apiVersion: rbac.authorization.k8s.io/v1beta1 4 | metadata: 5 | name: flannel 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - pods 11 | verbs: 12 | - get 13 | - apiGroups: 14 | - "" 15 | resources: 16 | - nodes 17 | verbs: 18 | - list 19 | - watch 20 | - apiGroups: 21 | - "" 22 | resources: 23 | - nodes/status 24 | verbs: 25 | - patch 26 | --- 27 | kind: ClusterRoleBinding 28 | apiVersion: rbac.authorization.k8s.io/v1beta1 29 | metadata: 30 | name: flannel 31 | roleRef: 32 | apiGroup: rbac.authorization.k8s.io 33 | kind: ClusterRole 34 | name: flannel 35 | subjects: 36 | - kind: ServiceAccount 37 | name: flannel 38 | namespace: kube-system 39 | --- 40 | apiVersion: v1 41 | kind: ServiceAccount 42 | metadata: 43 | name: flannel 44 | namespace: kube-system 45 | --- 46 | -------------------------------------------------------------------------------- /chapter1/ansible/roles/commons/os-checker/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | systemd_dir: /lib/systemd/system 4 | system_env_dir: /etc/sysconfig 5 | network_dir: /etc/kubernetes/network 6 | kubeadmin_config: /etc/kubernetes/admin.conf 7 | kube_addon_dir: /etc/kubernetes/addon 8 | -------------------------------------------------------------------------------- /chapter1/ansible/roles/commons/os-checker/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Fact os vars 3 | 4 | - name: Get os_version from /etc/os-release 5 | when: ansible_os_family is not defined 6 | raw: "grep '^VERSION_ID=' /etc/os-release | sed s'/VERSION_ID=//'" 7 | register: os_version 8 | changed_when: False 9 | 10 | - name: Get distro name from /etc/os-release 11 | when: ansible_os_family is not defined 12 | raw: "grep '^NAME=' /etc/os-release | sed s'/NAME=//'" 13 | register: distro 14 | changed_when: False 15 | 16 | - name: Set fact ansible_os_family var to Debian 17 | when: 18 | - ansible_os_family is not defined 19 | - "'Debian' in distro.stdout" 20 | set_fact: 21 | ansible_os_family: Debian 22 | 23 | - name: Set fact ansible_os_family var to Debian 24 | when: 25 | - ansible_os_family is not defined 26 | - "'Ubuntu' in distro.stdout" 27 | set_fact: 28 | ansible_os_family: Debian 29 | 30 | - name: Set fact ansible_os_family var to RedHat 31 | when: 32 | - ansible_os_family is not defined 33 | - "'CentOS' in distro.stdout" 34 | set_fact: 35 | ansible_os_family: RedHat 36 | 37 | - name: Override config file directory for Debian 38 | when: ansible_os_family == "Debian" 39 | set_fact: 40 | system_env_dir: "/etc/default" 41 | -------------------------------------------------------------------------------- /chapter1/ansible/roles/commons/pre-install/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - { role: commons/os-checker } 4 | -------------------------------------------------------------------------------- /chapter1/ansible/roles/commons/pre-install/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Install Kubernetes packages 4 | include_tasks: pkg.yml 5 | 6 | - name: Disable system swap 7 | shell: "swapoff -a" 8 | 9 | - name: Remove current swaps from fstab 10 | lineinfile: 11 | dest: /etc/fstab 12 | regexp: '(?i)^([^#][\S]+\s+(none|swap)\s+swap.*)' 13 | line: '# \1' 14 | backrefs: yes 15 | state: present 16 | 17 | - name: Disable swappiness and pass bridged IPv4 traffic to iptable's chains 18 | sysctl: 19 | name: "{{ item.name }}" 20 | value: "{{ item.value }}" 21 | state: present 22 | with_items: 23 | - { name: 'vm.swappiness', value: '0' } 24 | - { name: 'net.bridge.bridge-nf-call-iptables', value: '1' } 25 | 26 | - name: Create service drop-in directory 27 | file: 28 | path: /etc/systemd/system/kubelet.service.d/ 29 | state: directory 30 | owner: "{{ ansible_user | default(ansible_user_id) }}" 31 | group: "{{ ansible_user | default(ansible_user_id) }}" 32 | mode: 0755 33 | 34 | - name: Copy kubeadm conf to drop-in directory 35 | template: src=20-extra-args.conf.j2 dest=/etc/systemd/system/kubelet.service.d/20-extra-args.conf 36 | 37 | - name: Reload kubelet daemon 38 | systemd: 39 | name: kubelet 40 | daemon_reload: yes 41 | enabled: yes 42 | -------------------------------------------------------------------------------- /chapter1/ansible/roles/commons/pre-install/tasks/pkg.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Add Kubernetes APT GPG key 4 | when: ansible_os_family == "Debian" 5 | apt_key: 6 | url: https://packages.cloud.google.com/apt/doc/apt-key.gpg 7 | state: present 8 | 9 | - name: Add Kubernetes APT repository 10 | when: ansible_os_family == "Debian" 11 | apt_repository: 12 | repo: deb http://apt.kubernetes.io/ kubernetes-xenial main 13 | state: present 14 | filename: 'kubernetes' 15 | 16 | - name: Add Kubernetes yum repository 17 | when: ansible_os_family == "RedHat" 18 | yum_repository: 19 | name: Kubernetes 20 | description: Kubernetes Repository 21 | file: kubernetes 22 | baseurl: http://yum.kubernetes.io/repos/kubernetes-el7-x86_64 23 | enabled: yes 24 | gpgcheck: no 25 | 26 | - name: Install kubernetes packages (RHEL/CentOS) 27 | when: ansible_os_family == "RedHat" 28 | yum: 29 | name: "{{ item }}-{{ kube_version | replace('v', '') }}" 30 | update_cache: yes 31 | state: installed 32 | with_items: "{{ pkgs }}" 33 | 34 | - name: Install kubernetes packages (Debian/Ubuntu) 35 | when: ansible_os_family == "Debian" 36 | apt: 37 | name: "{{ item }}-{{ kube_version | replace('v', '') }}" 38 | update_cache: yes 39 | state: installed 40 | with_items: "{{ pkgs }}" 41 | -------------------------------------------------------------------------------- /chapter1/ansible/roles/commons/pre-install/templates/20-extra-args.conf.j2: -------------------------------------------------------------------------------- 1 | [Service] 2 | Environment="KUBELET_EXTRA_ARGS=--fail-swap-on=false" 3 | -------------------------------------------------------------------------------- /chapter1/ansible/roles/docker/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | docker_version: 17.03 -------------------------------------------------------------------------------- /chapter1/ansible/roles/docker/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - { role: commons/os-checker } 4 | -------------------------------------------------------------------------------- /chapter1/ansible/roles/docker/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Docker container engine 3 | include_tasks: pkg.yml 4 | 5 | - name: Copy Docker engine service file 6 | register: change_docker 7 | template: 8 | src: "docker.service.j2" 9 | dest: "{{ systemd_dir }}/docker.service" 10 | owner: root 11 | group: root 12 | mode: 0755 13 | 14 | - name: Copy Docker environment config file 15 | template: src=docker.j2 dest={{ system_env_dir }}/docker 16 | 17 | - name: Add any insecure registries to Docker config 18 | when: insecure_registries is defined and insecure_registries | length > 0 19 | lineinfile: dest={{ system_env_dir }}/docker regexp=^INSECURE_REGISTRY= line=INSECURE_REGISTRY="{% for reg in insecure_registries %}--insecure-registry={{ reg }} {% endfor %}" 20 | 21 | - name: Add registry to Docker config 22 | when: add_registry is defined and add_registry > 0 23 | lineinfile: dest={{ system_env_dir }}/docker regexp=^ADD_REGISTRY= line=ADD_REGISTRY="{% for reg in add_registry %}--add-registry={{ reg }} {%endfor %}" 24 | 25 | - name: Enable and check Docker service 26 | systemd: 27 | name: docker 28 | daemon_reload: yes 29 | state: started 30 | enabled: yes 31 | register: started_docker 32 | -------------------------------------------------------------------------------- /chapter1/ansible/roles/docker/tasks/pkg.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install apt-transport-https 3 | when: ansible_os_family == "Debian" 4 | apt: 5 | name: "apt-transport-https" 6 | state: present 7 | update_cache: yes 8 | 9 | - name: Add Docker APT GPG key 10 | when: ansible_os_family == "Debian" 11 | apt_key: 12 | url: https://download.docker.com/linux/ubuntu/gpg 13 | 14 | - name: Add Docker APT repository 15 | when: ansible_os_family == "Debian" 16 | apt_repository: 17 | repo: deb https://download.docker.com/linux/ubuntu xenial stable 18 | state: present 19 | filename: 'docker' 20 | 21 | - name: Add Docker yum repository 22 | when: ansible_os_family == "RedHat" 23 | yum_repository: 24 | name: Docker 25 | description: Docker Repository 26 | file: docker 27 | baseurl: https://yum.dockerproject.org/repo/main/centos/7/ 28 | enabled: yes 29 | gpgcheck: yes 30 | gpgkey: https://yum.dockerproject.org/gpg 31 | 32 | - name: Install docker engine (RHEL/CentOS) 33 | when: ansible_os_family == "RedHat" 34 | yum: 35 | name: "docker-engine-{{ docker_version }}.*" 36 | state: present 37 | 38 | - name: Install docker engine (Debian/Ubuntu) 39 | when: ansible_os_family == "Debian" 40 | apt: 41 | update_cache: yes 42 | name: "docker-ce={{ docker_version }}*" 43 | state: present 44 | 45 | - name: Hold docker version 46 | when: ansible_os_family == "Debian" 47 | dpkg_selections: 48 | name: docker-ce 49 | selection: hold 50 | -------------------------------------------------------------------------------- /chapter1/ansible/roles/docker/templates/docker.j2: -------------------------------------------------------------------------------- 1 | INSECURE_REGISTRY="" 2 | DOCKER_OPTS="" 3 | {% if ansible_os_family == "RedHat" -%} 4 | DOCKER_STORAGE_OPTIONS="--storage-driver=overlay" 5 | {% endif -%} 6 | -------------------------------------------------------------------------------- /chapter1/ansible/roles/docker/templates/docker.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Docker Engine 3 | After=network.target 4 | 5 | [Service] 6 | Type=notify 7 | EnvironmentFile=-{{ system_env_dir }}/docker 8 | ExecStartPost=/sbin/iptables -I FORWARD -s 0.0.0.0/0 -j ACCEPT 9 | ExecStart=/usr/bin/dockerd {% if ansible_os_family == 'Debian' -%} -H fd:// {% endif -%} \ 10 | $OPTIONS \ 11 | $DOCKER_STORAGE_OPTIONS \ 12 | $DOCKER_OPTS \ 13 | $DOCKER_NETWORK_OPTIONS \ 14 | $ADD_REGISTRY \ 15 | $BLOCK_REGISTRY \ 16 | $INSECURE_REGISTRY 17 | 18 | ExecReload=/bin/kill -s HUP $MAINPID 19 | Restart=on-failure 20 | LimitNOFILE=1048576 21 | LimitNPROC=infinity 22 | LimitCORE=infinity 23 | TimeoutStartSec=0 24 | Delegate=yes 25 | KillMode=process 26 | 27 | [Install] 28 | WantedBy=multi-user.target 29 | -------------------------------------------------------------------------------- /chapter1/ansible/roles/healthcheck/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Create tmp directory" 3 | file: 4 | path: "{{ tmp_dir }}" 5 | state: directory 6 | mode: 0755 7 | tags: healthcheck 8 | 9 | - name: "Create checkout directory" 10 | file: 11 | path: "{{ tmp_dir }}/healthcheck" 12 | state: directory 13 | mode: 0755 14 | tags: healthcheck 15 | 16 | - name: "Clone git repo" 17 | git: 18 | repo: "{{ healthcheck_git_url }}" 19 | dest: "{{ tmp_dir }}/healthcheck" 20 | tags: healthcheck 21 | 22 | - name: "Install Healthcheck" 23 | shell: "kubectl apply -f {{ tmp_dir }}/healthcheck/kubernetes/" 24 | tags: healthcheck 25 | 26 | - name: "Clean-up" 27 | file: 28 | path: "{{ tmp_dir }}" 29 | state: absent 30 | ignore_errors: yes 31 | tags: healthcheck 32 | -------------------------------------------------------------------------------- /chapter1/ansible/roles/healthcheck/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | healthcheck_git_url: https://github.com/emrekenci/k8s-healthcheck.git 3 | -------------------------------------------------------------------------------- /chapter1/ansible/roles/helm/files/rbac-config.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: tiller 5 | namespace: kube-system 6 | --- 7 | apiVersion: rbac.authorization.k8s.io/v1 8 | kind: ClusterRoleBinding 9 | metadata: 10 | name: tiller 11 | roleRef: 12 | apiGroup: rbac.authorization.k8s.io 13 | kind: ClusterRole 14 | name: cluster-admin 15 | subjects: 16 | - kind: ServiceAccount 17 | name: tiller 18 | namespace: kube-system 19 | -------------------------------------------------------------------------------- /chapter1/ansible/roles/helm/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Create tmp directory" 3 | file: 4 | path: "{{ tmp_dir }}" 5 | state: directory 6 | mode: 0755 7 | tags: helm 8 | 9 | - name: "Check if Helm is installed" 10 | shell: command -v helm >/dev/null 2>&1 11 | register: helm_exists 12 | ignore_errors: yes 13 | tags: helm 14 | 15 | - name: "Install Helm" 16 | block: 17 | - name: "Get Helm installer" 18 | get_url: 19 | url: https://raw.githubusercontent.com/helm/helm/master/scripts/get 20 | dest: "{{ tmp_dir }}/get_helm.sh" 21 | mode: 0755 22 | 23 | - name: "Run the installer" 24 | shell: "{{ tmp_dir }}/get_helm.sh" 25 | 26 | when: helm_exists.rc > 0 27 | tags: helm 28 | 29 | - name: "Copy yaml file" 30 | copy: 31 | src: "rbac-config.yml" 32 | dest: "{{ tmp_dir }}/rbac-config.yml" 33 | mode: 0644 34 | tags: helm 35 | 36 | - name: "RBAC configuration" 37 | shell: "kubectl apply -f {{ tmp_dir }}/rbac-config.yml" 38 | tags: helm 39 | 40 | - name: "Init Helm" 41 | shell: "helm init --service-account tiller" 42 | tags: helm 43 | 44 | - name: "Update Helm repo" 45 | shell: "helm repo update" 46 | tags: helm 47 | 48 | - name: "Clean-up" 49 | file: 50 | path: "{{ tmp_dir }}" 51 | state: absent 52 | ignore_errors: yes 53 | tags: helm 54 | -------------------------------------------------------------------------------- /chapter1/ansible/roles/kubernetes/master/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Deploy kubernetes dashboard into cluster 4 | when: init_cluster and started_kubelet and enable_dashboard 5 | command: | 6 | kubectl --kubeconfig={{ kubeadmin_config }} \ 7 | apply -f https://raw.githubusercontent.com/kubernetes/dashboard/master/aio/deploy/recommended/kubernetes-dashboard.yaml 8 | register: create_result 9 | until: create_result.rc == 0 10 | retries: 5 11 | delay: 2 12 | ignore_errors: true 13 | -------------------------------------------------------------------------------- /chapter1/ansible/roles/kubernetes/master/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - { role: commons/os-checker } 4 | - { role: commons/pre-install, pkgs: ["kubelet", "kubeadm", "kubectl"] } 5 | -------------------------------------------------------------------------------- /chapter1/ansible/roles/kubernetes/master/tasks/init.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Reset Kubernetes component 4 | shell: "kubeadm reset --force" 5 | register: reset_cluster 6 | 7 | - name: Init Kubernetes cluster 8 | when: reset_cluster is succeeded 9 | shell: | 10 | kubeadm init --service-cidr {{ service_cidr }} \ 11 | --kubernetes-version {{ kube_version }} \ 12 | --pod-network-cidr {{ pod_network_cidr }} \ 13 | --token {{ token }} \ 14 | --apiserver-advertise-address {{ master_ip }} \ 15 | {{ kubeadm_opts }} \ 16 | {{ init_opts }} 17 | register: init_cluster 18 | 19 | - name: Create Kubernetes config directory 20 | file: 21 | path: ".kube/" 22 | state: directory 23 | 24 | - name: Copy admin.conf to Home directory 25 | when: init_cluster is succeeded 26 | copy: 27 | src: "{{ kubeadmin_config }}" 28 | dest: ".kube/config" 29 | owner: "{{ ansible_user | default(ansible_user_id) }}" 30 | group: "{{ ansible_user | default(ansible_user_id) }}" 31 | mode: 0755 32 | remote_src: true 33 | 34 | - name: Deploy kubernetes dashboard into cluster 35 | when: init_cluster is succeeded and enable_dashboard 36 | command: | 37 | kubectl --kubeconfig={{ kubeadmin_config }} \ 38 | apply -f https://raw.githubusercontent.com/kubernetes/dashboard/master/aio/deploy/recommended.yaml 39 | register: create_result 40 | until: create_result.rc == 0 41 | retries: 5 42 | delay: 2 43 | ignore_errors: true 44 | -------------------------------------------------------------------------------- /chapter1/ansible/roles/kubernetes/master/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Check if kubeadm has already run 4 | stat: 5 | path: "/etc/kubernetes/pki/ca.key" 6 | register: kubeadm_ca 7 | 8 | - name: Init cluster if needed 9 | include_tasks: init.yml 10 | when: not kubeadm_ca.stat.exists 11 | run_once: yes 12 | 13 | - name: Enable and check kubelet service 14 | systemd: 15 | name: kubelet 16 | daemon_reload: yes 17 | state: started 18 | enabled: yes 19 | register: started_kubelet 20 | 21 | - name: "Copy config file" 22 | fetch: 23 | src: /etc/kubernetes/admin.conf 24 | dest: "{{ lookup('env', 'HOME') }}/admin.conf" 25 | flat: yes 26 | run_once: yes 27 | ignore_errors: yes 28 | 29 | -------------------------------------------------------------------------------- /chapter1/ansible/roles/kubernetes/node/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Recreate kube-dns 3 | command: kubectl --kubeconfig={{ kubeadmin_config }} -n kube-system delete pods -l k8s-app=kube-dns 4 | delegate_to: "{{ groups['master'][0] }}" 5 | run_once: true 6 | ignore_errors: true 7 | -------------------------------------------------------------------------------- /chapter1/ansible/roles/kubernetes/node/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - { role: commons/os-checker } 4 | - { role: commons/pre-install , pkgs: ["kubelet", "kubeadm"] } 5 | -------------------------------------------------------------------------------- /chapter1/ansible/roles/kubernetes/node/tasks/join.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Reset Kubernetes component 4 | shell: "kubeadm reset --force" 5 | register: reset_cluster 6 | 7 | - name: Join to Kubernetes cluster 8 | when: reset_cluster is succeeded 9 | shell: | 10 | kubeadm join --token {{ token }} \ 11 | --discovery-token-unsafe-skip-ca-verification \ 12 | {{ master_ip }}:6443 13 | register: join_cluster 14 | notify: 15 | - Recreate kube-dns 16 | -------------------------------------------------------------------------------- /chapter1/ansible/roles/kubernetes/node/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Check if kubelet.conf exists 4 | stat: 5 | path: "/etc/kubernetes/kubelet.conf" 6 | register: kubelet_conf 7 | 8 | - name: Join to cluster if needed 9 | include_tasks: join.yml 10 | when: not kubelet_conf.stat.exists 11 | 12 | - name: Enable and check kubelet service 13 | systemd: 14 | name: kubelet 15 | daemon_reload: yes 16 | state: started 17 | enabled: yes 18 | -------------------------------------------------------------------------------- /chapter1/ansible/roles/metallb/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Create tmp directory" 3 | file: 4 | path: "{{ tmp_dir }}" 5 | state: directory 6 | mode: 0755 7 | tags: metallb 8 | 9 | - name: "Install MetalLB" 10 | shell: "kubectl apply -f {{ metallb_yaml_url }}" 11 | tags: metallb 12 | 13 | - name: "Create configmap file" 14 | template: 15 | src: metallb-layer-2-config.yml.j2 16 | dest: "{{ tmp_dir }}/metallb-layer-2-config.yml" 17 | tags: metallb 18 | 19 | - name: "Create MetalLB configmap in kubernetes" 20 | shell: "kubectl apply -f {{ tmp_dir }}/metallb-layer-2-config.yml" 21 | tags: metallb 22 | 23 | - name: "Clean-up" 24 | file: 25 | path: "{{ tmp_dir }}" 26 | state: absent 27 | ignore_errors: yes 28 | tags: metallb 29 | -------------------------------------------------------------------------------- /chapter1/ansible/roles/metallb/templates/metallb-layer-2-config.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | namespace: metallb-system 5 | name: config 6 | data: 7 | config: | 8 | address-pools: 9 | - name: metallb-ip-space 10 | protocol: layer2 11 | addresses: 12 | - {{ metallb_address_space }} 13 | -------------------------------------------------------------------------------- /chapter1/ansible/roles/metallb/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | metallb_version: v0.7.3 3 | metallb_yaml_url: "https://raw.githubusercontent.com/google/metallb/{{ metallb_version }}/manifests/metallb.yaml" 4 | metallb_address_space: 192.168.205.200-192.168.205.210 5 | -------------------------------------------------------------------------------- /chapter1/ansible/site.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: kube-cluster 3 | gather_facts: yes 4 | become: yes 5 | roles: 6 | - { role: docker, tags: docker } 7 | 8 | - hosts: master 9 | gather_facts: yes 10 | become: yes 11 | roles: 12 | - { role: kubernetes/master, tags: master } 13 | - { role: cni, tags: cni } 14 | 15 | - hosts: node 16 | gather_facts: yes 17 | become: yes 18 | roles: 19 | - { role: kubernetes/node, tags: node } 20 | 21 | - hosts: master 22 | gather_facts: yes 23 | become: yes 24 | tasks: 25 | - name: "Helm role" 26 | include_role: 27 | name: helm 28 | when: "additional_features.helm" 29 | run_once: yes 30 | tags: helm 31 | 32 | - name: "MetalLB role" 33 | include_role: 34 | name: metallb 35 | when: "additional_features.metallb" 36 | run_once: yes 37 | tags: metallb 38 | 39 | - name: "Healthcheck role" 40 | include_role: 41 | name: healthcheck 42 | when: "additional_features.healthcheck" 43 | run_once: yes 44 | tags: healthcheck 45 | 46 | -------------------------------------------------------------------------------- /chapter1/ansible/utils/Vagrantfile: -------------------------------------------------------------------------------- 1 | servers = [ 2 | { 3 | :name => "k8s-nxt-head-1", 4 | :type => "master", 5 | :box => "ubuntu/xenial64", 6 | :box_version => "20180831.0.0", 7 | :eth1 => "192.168.205.16", 8 | :mem => "4096", 9 | :cpu => "2" 10 | }, 11 | { 12 | :name => "k8s-nxt-node-1", 13 | :type => "node", 14 | :box => "ubuntu/xenial64", 15 | :box_version => "20180831.0.0", 16 | :eth1 => "192.168.205.17", 17 | :mem => "4096", 18 | :cpu => "2" 19 | }, 20 | { 21 | :name => "k8s-nxt-node-2", 22 | :type => "node", 23 | :box => "ubuntu/xenial64", 24 | :box_version => "20180831.0.0", 25 | :eth1 => "192.168.205.18", 26 | :mem => "4096", 27 | :cpu => "2" 28 | } 29 | ] 30 | 31 | # This script to install k8s using kubeadm will get executed after a box is provisioned 32 | $configureBox = <<-SCRIPT 33 | # install docker v17.03 34 | # reason for not using docker provision is that it always installs latest version of the docker, but kubeadm requires 17.03 or older 35 | apt-get update 36 | apt-get install -y apt-transport-https ca-certificates curl software-properties-common 37 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - 38 | add-apt-repository "deb https://download.docker.com/linux/$(. /etc/os-release; echo "$ID") $(lsb_release -cs) stable" 39 | apt-get update && apt-get install -y docker-ce=$(apt-cache madison docker-ce | grep 17.03 | head -1 | awk '{print $3}') 40 | # run docker commands as vagrant user (sudo not required) 41 | usermod -aG docker vagrant 42 | # install kubeadm 43 | apt-get install -y apt-transport-https curl 44 | curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - 45 | cat </etc/apt/sources.list.d/kubernetes.list 46 | deb http://apt.kubernetes.io/ kubernetes-xenial main 47 | EOF 48 | apt-get update 49 | apt-get install -y kubelet kubeadm kubectl 50 | apt-mark hold kubelet kubeadm kubectl 51 | # kubelet requires swap off 52 | swapoff -a 53 | # keep swap off after reboot 54 | sudo sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab 55 | # ip of this box 56 | IP_ADDR=`ifconfig enp0s8 | grep Mask | awk '{print $2}'| cut -f2 -d:` 57 | # set node-ip 58 | sudo sed -i "/^[^#]*KUBELET_EXTRA_ARGS=/c\KUBELET_EXTRA_ARGS=--node-ip=$IP_ADDR" /etc/default/kubelet 59 | sudo systemctl restart kubelet 60 | sudo cp /tmp/authorized_keys_root /root/.ssh/authorized_keys 61 | SCRIPT 62 | 63 | Vagrant.configure("2") do |config| 64 | 65 | servers.each do |opts| 66 | config.vm.define opts[:name] do |config| 67 | 68 | config.vm.box = opts[:box] 69 | config.vm.box_version = opts[:box_version] 70 | config.vm.hostname = opts[:name] 71 | config.vm.network :private_network, ip: opts[:eth1] 72 | config.vm.provision "file", source: "~/.ssh/id_rsa.pub", destination: "/tmp/authorized_keys_root" 73 | 74 | config.vm.provider "virtualbox" do |v| 75 | 76 | v.name = opts[:name] 77 | v.customize ["modifyvm", :id, "--groups", "/k8s_nxt"] 78 | v.customize ["modifyvm", :id, "--memory", opts[:mem]] 79 | v.customize ["modifyvm", :id, "--cpus", opts[:cpu]] 80 | 81 | end 82 | 83 | config.vm.provision "shell", inline: $configureBox 84 | 85 | end 86 | 87 | end 88 | 89 | end 90 | -------------------------------------------------------------------------------- /chapter10/efk/elastic.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: elasticsearch.k8s.elastic.co/v1beta1 2 | kind: Elasticsearch 3 | metadata: 4 | name: elasticsearch 5 | namespace: logging 6 | spec: 7 | version: 7.4.2 8 | nodeSets: 9 | - name: default 10 | count: 1 11 | config: 12 | node.master: true 13 | node.data: true 14 | node.ingest: true 15 | node.store.allow_mmap: false 16 | -------------------------------------------------------------------------------- /chapter10/efk/fluent-bit-values.yaml: -------------------------------------------------------------------------------- 1 | 2 | backend: 3 | type: es 4 | es: 5 | host: elasticsearch-es-http 6 | port: 9200 7 | # Optional username credential for Elastic X-Pack access 8 | http_user: elastic 9 | # Password for user defined in HTTP_User 10 | http_passwd: m2zr9fz49zqbkbpksprf4r76 11 | # Optional TLS encryption to ElasticSearch instance 12 | tls: "on" 13 | tls_verify: "off" 14 | 15 | -------------------------------------------------------------------------------- /chapter10/efk/kibana.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kibana.k8s.elastic.co/v1beta1 2 | kind: Kibana 3 | metadata: 4 | name: mykibana 5 | namespace: logging 6 | spec: 7 | version: 7.4.2 8 | count: 1 9 | elasticsearchRef: 10 | name: elasticsearch 11 | # http: 12 | # service: 13 | # spec: 14 | # type: LoadBalancer 15 | -------------------------------------------------------------------------------- /chapter10/postgres/cm-postgres.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: postgres-config 5 | labels: 6 | app: postgres 7 | data: 8 | POSTGRES_DB: postgresdb 9 | POSTGRES_USER: testuser 10 | POSTGRES_PASSWORD: testpassword123 11 | -------------------------------------------------------------------------------- /chapter10/postgres/postgres.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: postgres 5 | spec: 6 | serviceName: "postgres" 7 | replicas: 2 8 | selector: 9 | matchLabels: 10 | app: postgres 11 | template: 12 | metadata: 13 | labels: 14 | app: postgres 15 | spec: 16 | containers: 17 | - name: postgres 18 | image: postgres:latest 19 | envFrom: 20 | - configMapRef: 21 | name: postgres-config 22 | ports: 23 | - containerPort: 5432 24 | name: postgredb 25 | volumeMounts: 26 | - name: postgredb 27 | mountPath: /var/lib/postgresql/data 28 | subPath: postgres 29 | volumeClaimTemplates: 30 | - metadata: 31 | name: postgredb 32 | spec: 33 | accessModes: [ "ReadWriteOnce" ] 34 | storageClassName: openebs-jiva-default 35 | resources: 36 | requests: 37 | storage: 5Gi 38 | -------------------------------------------------------------------------------- /chapter10/postgres/pvc-postgres.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolumeClaim 2 | apiVersion: v1 3 | metadata: 4 | name: postgres-pv-claim 5 | labels: 6 | app: postgres 7 | spec: 8 | storageClassName: openebs-jiva-default 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: 10G 14 | -------------------------------------------------------------------------------- /chapter10/postgres/svc-postgres.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: postgres 5 | labels: 6 | app: postgres 7 | spec: 8 | type: NodePort 9 | ports: 10 | - port: 5432 11 | selector: 12 | app: postgres 13 | -------------------------------------------------------------------------------- /chapter10/telepresence/index.html: -------------------------------------------------------------------------------- 1 | hello this server runs locally on my laptop 2 | -------------------------------------------------------------------------------- /chapter2/helm/crb-helm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: tiller 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: cluster-admin 9 | subjects: 10 | - kind: ServiceAccount 11 | name: tiller 12 | namespace: kube-system 13 | -------------------------------------------------------------------------------- /chapter2/helm/customhelmrepo.yaml: -------------------------------------------------------------------------------- 1 | env: 2 | open: 3 | STORAGE: local 4 | persistence: 5 | enabled: true 6 | accessMode: ReadWriteOnce 7 | size: 10Gi 8 | secret: 9 | BASIC_AUTH_USER: helmcurator 10 | BASIC_AUTH_PASS: myhelmpassword 11 | -------------------------------------------------------------------------------- /chapter2/helm/install-helm.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Copyright The Helm Authors. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # The install script is based off of the MIT-licensed script from glide, 18 | # the package manager for Go: https://github.com/Masterminds/glide.sh/blob/master/get 19 | 20 | PROJECT_NAME="helm" 21 | TILLER_NAME="tiller" 22 | 23 | : ${USE_SUDO:="true"} 24 | : ${HELM_INSTALL_DIR:="/usr/local/bin"} 25 | 26 | # initArch discovers the architecture for this system. 27 | initArch() { 28 | ARCH=$(uname -m) 29 | case $ARCH in 30 | armv5*) ARCH="armv5";; 31 | armv6*) ARCH="armv6";; 32 | armv7*) ARCH="arm";; 33 | aarch64) ARCH="arm64";; 34 | x86) ARCH="386";; 35 | x86_64) ARCH="amd64";; 36 | i686) ARCH="386";; 37 | i386) ARCH="386";; 38 | esac 39 | } 40 | 41 | # initOS discovers the operating system for this system. 42 | initOS() { 43 | OS=$(echo `uname`|tr '[:upper:]' '[:lower:]') 44 | 45 | case "$OS" in 46 | # Minimalist GNU for Windows 47 | mingw*) OS='windows';; 48 | esac 49 | } 50 | 51 | # runs the given command as root (detects if we are root already) 52 | runAsRoot() { 53 | local CMD="$*" 54 | 55 | if [ $EUID -ne 0 -a $USE_SUDO = "true" ]; then 56 | CMD="sudo $CMD" 57 | fi 58 | 59 | $CMD 60 | } 61 | 62 | # verifySupported checks that the os/arch combination is supported for 63 | # binary builds. 64 | verifySupported() { 65 | local supported="darwin-386\ndarwin-amd64\nlinux-386\nlinux-amd64\nlinux-arm\nlinux-arm64\nlinux-ppc64le\nwindows-386\nwindows-amd64" 66 | if ! echo "${supported}" | grep -q "${OS}-${ARCH}"; then 67 | echo "No prebuilt binary for ${OS}-${ARCH}." 68 | echo "To build from source, go to https://github.com/helm/helm" 69 | exit 1 70 | fi 71 | 72 | if ! type "curl" > /dev/null && ! type "wget" > /dev/null; then 73 | echo "Either curl or wget is required" 74 | exit 1 75 | fi 76 | } 77 | 78 | # checkDesiredVersion checks if the desired version is available. 79 | checkDesiredVersion() { 80 | if [ "x$DESIRED_VERSION" == "x" ]; then 81 | # Get tag from release URL 82 | local latest_release_url="https://github.com/helm/helm/releases/latest" 83 | if type "curl" > /dev/null; then 84 | TAG=$(curl -Ls -o /dev/null -w %{url_effective} $latest_release_url | grep -oE "[^/]+$" ) 85 | elif type "wget" > /dev/null; then 86 | TAG=$(wget $latest_release_url --server-response -O /dev/null 2>&1 | awk '/^ Location: /{DEST=$2} END{ print DEST}' | grep -oE "[^/]+$") 87 | fi 88 | else 89 | TAG=$DESIRED_VERSION 90 | fi 91 | } 92 | 93 | # checkHelmInstalledVersion checks which version of helm is installed and 94 | # if it needs to be changed. 95 | checkHelmInstalledVersion() { 96 | if [[ -f "${HELM_INSTALL_DIR}/${PROJECT_NAME}" ]]; then 97 | local version=$("${HELM_INSTALL_DIR}/${PROJECT_NAME}" version -c | grep '^Client' | cut -d'"' -f2) 98 | if [[ "$version" == "$TAG" ]]; then 99 | echo "Helm ${version} is already ${DESIRED_VERSION:-latest}" 100 | return 0 101 | else 102 | echo "Helm ${TAG} is available. Changing from version ${version}." 103 | return 1 104 | fi 105 | else 106 | return 1 107 | fi 108 | } 109 | 110 | # downloadFile downloads the latest binary package and also the checksum 111 | # for that binary. 112 | downloadFile() { 113 | HELM_DIST="helm-$TAG-$OS-$ARCH.tar.gz" 114 | DOWNLOAD_URL="https://get.helm.sh/$HELM_DIST" 115 | CHECKSUM_URL="$DOWNLOAD_URL.sha256" 116 | HELM_TMP_ROOT="$(mktemp -dt helm-installer-XXXXXX)" 117 | HELM_TMP_FILE="$HELM_TMP_ROOT/$HELM_DIST" 118 | HELM_SUM_FILE="$HELM_TMP_ROOT/$HELM_DIST.sha256" 119 | echo "Downloading $DOWNLOAD_URL" 120 | if type "curl" > /dev/null; then 121 | curl -SsL "$CHECKSUM_URL" -o "$HELM_SUM_FILE" 122 | elif type "wget" > /dev/null; then 123 | wget -q -O "$HELM_SUM_FILE" "$CHECKSUM_URL" 124 | fi 125 | if type "curl" > /dev/null; then 126 | curl -SsL "$DOWNLOAD_URL" -o "$HELM_TMP_FILE" 127 | elif type "wget" > /dev/null; then 128 | wget -q -O "$HELM_TMP_FILE" "$DOWNLOAD_URL" 129 | fi 130 | } 131 | 132 | # installFile verifies the SHA256 for the file, then unpacks and 133 | # installs it. 134 | installFile() { 135 | HELM_TMP="$HELM_TMP_ROOT/$PROJECT_NAME" 136 | local sum=$(openssl sha1 -sha256 ${HELM_TMP_FILE} | awk '{print $2}') 137 | local expected_sum=$(cat ${HELM_SUM_FILE}) 138 | if [ "$sum" != "$expected_sum" ]; then 139 | echo "SHA sum of ${HELM_TMP_FILE} does not match. Aborting." 140 | exit 1 141 | fi 142 | 143 | mkdir -p "$HELM_TMP" 144 | tar xf "$HELM_TMP_FILE" -C "$HELM_TMP" 145 | HELM_TMP_BIN="$HELM_TMP/$OS-$ARCH/$PROJECT_NAME" 146 | TILLER_TMP_BIN="$HELM_TMP/$OS-$ARCH/$TILLER_NAME" 147 | echo "Preparing to install $PROJECT_NAME and $TILLER_NAME into ${HELM_INSTALL_DIR}" 148 | runAsRoot cp "$HELM_TMP_BIN" "$HELM_INSTALL_DIR" 149 | echo "$PROJECT_NAME installed into $HELM_INSTALL_DIR/$PROJECT_NAME" 150 | if [ -x "$TILLER_TMP_BIN" ]; then 151 | runAsRoot cp "$TILLER_TMP_BIN" "$HELM_INSTALL_DIR" 152 | echo "$TILLER_NAME installed into $HELM_INSTALL_DIR/$TILLER_NAME" 153 | else 154 | echo "info: $TILLER_NAME binary was not found in this release; skipping $TILLER_NAME installation" 155 | fi 156 | } 157 | 158 | # fail_trap is executed if an error occurs. 159 | fail_trap() { 160 | result=$? 161 | if [ "$result" != "0" ]; then 162 | if [[ -n "$INPUT_ARGUMENTS" ]]; then 163 | echo "Failed to install $PROJECT_NAME with the arguments provided: $INPUT_ARGUMENTS" 164 | help 165 | else 166 | echo "Failed to install $PROJECT_NAME" 167 | fi 168 | echo -e "\tFor support, go to https://github.com/helm/helm." 169 | fi 170 | cleanup 171 | exit $result 172 | } 173 | 174 | # testVersion tests the installed client to make sure it is working. 175 | testVersion() { 176 | set +e 177 | HELM="$(which $PROJECT_NAME)" 178 | if [ "$?" = "1" ]; then 179 | echo "$PROJECT_NAME not found. Is $HELM_INSTALL_DIR on your "'$PATH?' 180 | exit 1 181 | fi 182 | set -e 183 | echo "Run '$PROJECT_NAME init' to configure $PROJECT_NAME." 184 | } 185 | 186 | # help provides possible cli installation arguments 187 | help () { 188 | echo "Accepted cli arguments are:" 189 | echo -e "\t[--help|-h ] ->> prints this help" 190 | echo -e "\t[--version|-v ] . When not defined it defaults to latest" 191 | echo -e "\te.g. --version v2.4.0 or -v latest" 192 | echo -e "\t[--no-sudo] ->> install without sudo" 193 | } 194 | 195 | # cleanup temporary files to avoid https://github.com/helm/helm/issues/2977 196 | cleanup() { 197 | if [[ -d "${HELM_TMP_ROOT:-}" ]]; then 198 | rm -rf "$HELM_TMP_ROOT" 199 | fi 200 | } 201 | 202 | # Execution 203 | 204 | #Stop execution on any error 205 | trap "fail_trap" EXIT 206 | set -e 207 | 208 | # Parsing input arguments (if any) 209 | export INPUT_ARGUMENTS="${@}" 210 | set -u 211 | while [[ $# -gt 0 ]]; do 212 | case $1 in 213 | '--version'|-v) 214 | shift 215 | if [[ $# -ne 0 ]]; then 216 | export DESIRED_VERSION="${1}" 217 | else 218 | echo -e "Please provide the desired version. e.g. --version v2.4.0 or -v latest" 219 | exit 0 220 | fi 221 | ;; 222 | '--no-sudo') 223 | USE_SUDO="false" 224 | ;; 225 | '--help'|-h) 226 | help 227 | exit 0 228 | ;; 229 | *) exit 1 230 | ;; 231 | esac 232 | shift 233 | done 234 | set +u 235 | 236 | initArch 237 | initOS 238 | verifySupported 239 | checkDesiredVersion 240 | if ! checkHelmInstalledVersion; then 241 | downloadFile 242 | installFile 243 | fi 244 | testVersion 245 | cleanup 246 | -------------------------------------------------------------------------------- /chapter2/helm/mychart/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | appVersion: "1.0" 3 | description: A Helm chart for Kubernetes 4 | name: mychart 5 | version: 0.1.0 6 | -------------------------------------------------------------------------------- /chapter2/helm/mychart/mychart-0.1.0.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/k8sdevopscookbook/src/3631ebffe7a71e5df5bbae546538ed2128daa5ac/chapter2/helm/mychart/mychart-0.1.0.tgz -------------------------------------------------------------------------------- /chapter2/helm/mychart/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | 1. Get the application URL by running these commands: 2 | {{- if .Values.ingress.enabled }} 3 | {{- range $host := .Values.ingress.hosts }} 4 | {{- range .paths }} 5 | http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ . }} 6 | {{- end }} 7 | {{- end }} 8 | {{- else if contains "NodePort" .Values.service.type }} 9 | export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "mychart.fullname" . }}) 10 | export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") 11 | echo http://$NODE_IP:$NODE_PORT 12 | {{- else if contains "LoadBalancer" .Values.service.type }} 13 | NOTE: It may take a few minutes for the LoadBalancer IP to be available. 14 | You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "mychart.fullname" . }}' 15 | export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "mychart.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') 16 | echo http://$SERVICE_IP:{{ .Values.service.port }} 17 | {{- else if contains "ClusterIP" .Values.service.type }} 18 | export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "mychart.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") 19 | echo "Visit http://127.0.0.1:8080 to use your application" 20 | kubectl port-forward $POD_NAME 8080:80 21 | {{- end }} 22 | -------------------------------------------------------------------------------- /chapter2/helm/mychart/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "mychart.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "mychart.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Create chart name and version as used by the chart label. 29 | */}} 30 | {{- define "mychart.chart" -}} 31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} 32 | {{- end -}} 33 | 34 | {{/* 35 | Common labels 36 | */}} 37 | {{- define "mychart.labels" -}} 38 | app.kubernetes.io/name: {{ include "mychart.name" . }} 39 | helm.sh/chart: {{ include "mychart.chart" . }} 40 | app.kubernetes.io/instance: {{ .Release.Name }} 41 | {{- if .Chart.AppVersion }} 42 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 43 | {{- end }} 44 | app.kubernetes.io/managed-by: {{ .Release.Service }} 45 | {{- end -}} 46 | -------------------------------------------------------------------------------- /chapter2/helm/mychart/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: {{ include "mychart.fullname" . }} 5 | labels: 6 | {{ include "mychart.labels" . | indent 4 }} 7 | spec: 8 | replicas: {{ .Values.replicaCount }} 9 | selector: 10 | matchLabels: 11 | app.kubernetes.io/name: {{ include "mychart.name" . }} 12 | app.kubernetes.io/instance: {{ .Release.Name }} 13 | template: 14 | metadata: 15 | labels: 16 | app.kubernetes.io/name: {{ include "mychart.name" . }} 17 | app.kubernetes.io/instance: {{ .Release.Name }} 18 | spec: 19 | {{- with .Values.imagePullSecrets }} 20 | imagePullSecrets: 21 | {{- toYaml . | nindent 8 }} 22 | {{- end }} 23 | containers: 24 | - name: {{ .Chart.Name }} 25 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" 26 | imagePullPolicy: {{ .Values.image.pullPolicy }} 27 | ports: 28 | - name: http 29 | containerPort: 80 30 | protocol: TCP 31 | livenessProbe: 32 | httpGet: 33 | path: / 34 | port: http 35 | readinessProbe: 36 | httpGet: 37 | path: / 38 | port: http 39 | resources: 40 | {{- toYaml .Values.resources | nindent 12 }} 41 | {{- with .Values.nodeSelector }} 42 | nodeSelector: 43 | {{- toYaml . | nindent 8 }} 44 | {{- end }} 45 | {{- with .Values.affinity }} 46 | affinity: 47 | {{- toYaml . | nindent 8 }} 48 | {{- end }} 49 | {{- with .Values.tolerations }} 50 | tolerations: 51 | {{- toYaml . | nindent 8 }} 52 | {{- end }} 53 | -------------------------------------------------------------------------------- /chapter2/helm/mychart/templates/ingress.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.ingress.enabled -}} 2 | {{- $fullName := include "mychart.fullname" . -}} 3 | apiVersion: extensions/v1beta1 4 | kind: Ingress 5 | metadata: 6 | name: {{ $fullName }} 7 | labels: 8 | {{ include "mychart.labels" . | indent 4 }} 9 | {{- with .Values.ingress.annotations }} 10 | annotations: 11 | {{- toYaml . | nindent 4 }} 12 | {{- end }} 13 | spec: 14 | {{- if .Values.ingress.tls }} 15 | tls: 16 | {{- range .Values.ingress.tls }} 17 | - hosts: 18 | {{- range .hosts }} 19 | - {{ . | quote }} 20 | {{- end }} 21 | secretName: {{ .secretName }} 22 | {{- end }} 23 | {{- end }} 24 | rules: 25 | {{- range .Values.ingress.hosts }} 26 | - host: {{ .host | quote }} 27 | http: 28 | paths: 29 | {{- range .paths }} 30 | - path: {{ . }} 31 | backend: 32 | serviceName: {{ $fullName }} 33 | servicePort: http 34 | {{- end }} 35 | {{- end }} 36 | {{- end }} 37 | -------------------------------------------------------------------------------- /chapter2/helm/mychart/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ include "mychart.fullname" . }} 5 | labels: 6 | {{ include "mychart.labels" . | indent 4 }} 7 | spec: 8 | type: {{ .Values.service.type }} 9 | ports: 10 | - port: {{ .Values.service.port }} 11 | targetPort: http 12 | protocol: TCP 13 | name: http 14 | selector: 15 | app.kubernetes.io/name: {{ include "mychart.name" . }} 16 | app.kubernetes.io/instance: {{ .Release.Name }} 17 | -------------------------------------------------------------------------------- /chapter2/helm/mychart/templates/tests/test-connection.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: "{{ include "mychart.fullname" . }}-test-connection" 5 | labels: 6 | {{ include "mychart.labels" . | indent 4 }} 7 | annotations: 8 | "helm.sh/hook": test-success 9 | spec: 10 | containers: 11 | - name: wget 12 | image: busybox 13 | command: ['wget'] 14 | args: ['{{ include "mychart.fullname" . }}:{{ .Values.service.port }}'] 15 | restartPolicy: Never 16 | -------------------------------------------------------------------------------- /chapter2/helm/mychart/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for mychart. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | 5 | replicaCount: 1 6 | 7 | image: 8 | repository: nginx 9 | tag: stable 10 | pullPolicy: IfNotPresent 11 | 12 | imagePullSecrets: [] 13 | nameOverride: "" 14 | fullnameOverride: "" 15 | 16 | service: 17 | type: ClusterIP 18 | port: 80 19 | 20 | ingress: 21 | enabled: false 22 | annotations: {} 23 | # kubernetes.io/ingress.class: nginx 24 | # kubernetes.io/tls-acme: "true" 25 | hosts: 26 | - host: chart-example.local 27 | paths: [] 28 | 29 | tls: [] 30 | # - secretName: chart-example-tls 31 | # hosts: 32 | # - chart-example.local 33 | 34 | resources: {} 35 | # We usually recommend not to specify default resources and to leave this as a conscious 36 | # choice for the user. This also increases chances charts run on environments with little 37 | # resources, such as Minikube. If you do want to specify resources, uncomment the following 38 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'. 39 | # limits: 40 | # cpu: 100m 41 | # memory: 128Mi 42 | # requests: 43 | # cpu: 100m 44 | # memory: 128Mi 45 | 46 | nodeSelector: {} 47 | 48 | tolerations: [] 49 | 50 | affinity: {} 51 | -------------------------------------------------------------------------------- /chapter2/helm/sa-helm.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: tiller 5 | namespace: kube-system 6 | -------------------------------------------------------------------------------- /chapter2/kustomize/nginx/deployment-nginx.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-deployment 5 | labels: 6 | app: nginx 7 | spec: 8 | replicas: 2 9 | selector: 10 | matchLabels: 11 | app: nginx 12 | template: 13 | metadata: 14 | labels: 15 | app: nginx 16 | spec: 17 | containers: 18 | - name: nginx 19 | image: nginx:1.7.9 20 | ports: 21 | - containerPort: 80 22 | -------------------------------------------------------------------------------- /chapter2/kustomize/nginx/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | resources: 4 | - deployment-nginx.yaml 5 | images: 6 | - name: nginx 7 | newName: nginx 8 | newTag: 1.16.0 9 | commonAnnotations: 10 | kubernetes.io/change-cause: "Initial deployment with 1.16.0" 11 | -------------------------------------------------------------------------------- /chapter2/kustomize/registry/base/deployment-registry.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: kube-registry-v1 5 | labels: 6 | app: kube-registry 7 | spec: 8 | selector: 9 | matchLabels: 10 | app: kube-registry 11 | strategy: 12 | type: Recreate 13 | template: 14 | metadata: 15 | labels: 16 | app: kube-registry 17 | spec: 18 | containers: 19 | - image: registry:2 20 | name: registry 21 | volumeMounts: 22 | - name: docker 23 | mountPath: /var/run/docker.sock 24 | - name: registry-storage 25 | mountPath: /var/lib/registry 26 | ports: 27 | - containerPort: 5000 28 | name: registry 29 | - name: registryui 30 | image: hyper/docker-registry-web:latest 31 | ports: 32 | - containerPort: 8080 33 | env: 34 | - name: REGISTRY_URL 35 | value: http://localhost:5000/v2 36 | - name: REGISTRY_NAME 37 | value: cluster-registry 38 | volumes: 39 | - name: docker 40 | hostPath: 41 | path: /var/run/docker.sock 42 | - name: registry-storage 43 | persistentVolumeClaim: 44 | claimName: registry-pvc 45 | -------------------------------------------------------------------------------- /chapter2/kustomize/registry/base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | resources: 4 | - deployment-registry.yaml 5 | - service-registry.yaml 6 | - pvc-registry.yaml 7 | -------------------------------------------------------------------------------- /chapter2/kustomize/registry/base/pvc-registry.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: registry-pvc 5 | labels: 6 | app: kube-registry-pv-claim 7 | spec: 8 | accessModes: 9 | - ReadWriteOnce 10 | resources: 11 | requests: 12 | storage: 10G 13 | -------------------------------------------------------------------------------- /chapter2/kustomize/registry/base/service-registry.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kube-registry 5 | labels: 6 | app: kube-registry 7 | spec: 8 | type: NodePort 9 | ports: 10 | - name: registry 11 | port: 5000 12 | protocol: TCP 13 | nodePort: 30120 14 | - name: registry-ui 15 | port: 80 16 | protocol: TCP 17 | nodePort: 30220 18 | selector: 19 | app: kube-registry 20 | -------------------------------------------------------------------------------- /chapter2/kustomize/registry/overlays/dev/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | bases: 4 | - ../../base 5 | namePrefix: dev- 6 | commonAnnotations: 7 | note: Hello, I am development! 8 | -------------------------------------------------------------------------------- /chapter2/kustomize/registry/overlays/prod/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | bases: 4 | - ../../base 5 | namePrefix: prod- 6 | commonAnnotations: 7 | note: Hello, I am production! 8 | -------------------------------------------------------------------------------- /chapter2/postgres-operator/ui/postgres-ui.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "apps/v1" 2 | kind: "Deployment" 3 | metadata: 4 | name: "postgres-operator-ui" 5 | namespace: "default" 6 | labels: 7 | application: "postgres-operator-ui" 8 | team: "acid" 9 | spec: 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | application: "postgres-operator-ui" 14 | template: 15 | metadata: 16 | labels: 17 | application: "postgres-operator-ui" 18 | team: "acid" 19 | spec: 20 | serviceAccountName: postgres-operator-ui 21 | containers: 22 | - name: "service" 23 | image: muratkarslioglu/postgres-operator-ui:v1.2.0 24 | ports: 25 | - containerPort: 8081 26 | protocol: "TCP" 27 | readinessProbe: 28 | httpGet: 29 | path: "/health" 30 | port: 8081 31 | initialDelaySeconds: 5 32 | timeoutSeconds: 1 33 | resources: 34 | limits: 35 | cpu: "300m" 36 | memory: "3000Mi" 37 | requests: 38 | cpu: "100m" 39 | memory: "100Mi" 40 | env: 41 | - name: "APP_URL" 42 | value: "http://localhost:8081" 43 | - name: "OPERATOR_API_URL" 44 | value: "http://localhost:8080" 45 | - name: "TARGET_NAMESPACE" 46 | value: "default" 47 | - name: "TEAMS" 48 | value: |- 49 | [ 50 | "acid" 51 | ] 52 | - name: "OPERATOR_UI_CONFIG" 53 | value: |- 54 | { 55 | "docs_link":"https://postgres-operator.readthedocs.io/en/latest/", 56 | "dns_format_string": "{1}-{0}.{2}", 57 | "databases_visible": true, 58 | "master_load_balancer_visible": true, 59 | "nat_gateways_visible": false, 60 | "replica_load_balancer_visible": true, 61 | "resources_visible": true, 62 | "users_visible": true, 63 | "postgresql_versions": [ 64 | "11", 65 | "10", 66 | "9.6" 67 | ] 68 | } 69 | --- 70 | apiVersion: "networking.k8s.io/v1beta1" 71 | kind: "Ingress" 72 | metadata: 73 | name: "postgres-operator-ui" 74 | namespace: "default" 75 | labels: 76 | application: "postgres-operator-ui" 77 | spec: 78 | rules: 79 | - host: "ui.example.org" 80 | http: 81 | paths: 82 | - backend: 83 | serviceName: "postgres-operator-ui" 84 | servicePort: 80 85 | --- 86 | apiVersion: "v1" 87 | kind: "Service" 88 | metadata: 89 | name: "postgres-operator-ui" 90 | namespace: "default" 91 | labels: 92 | application: "postgres-operator-ui" 93 | spec: 94 | type: "ClusterIP" 95 | selector: 96 | application: "postgres-operator-ui" 97 | ports: 98 | - port: 80 99 | protocol: "TCP" 100 | targetPort: 8081 101 | --- 102 | apiVersion: v1 103 | kind: ServiceAccount 104 | metadata: 105 | name: postgres-operator-ui 106 | namespace: default 107 | 108 | --- 109 | apiVersion: rbac.authorization.k8s.io/v1beta1 110 | kind: ClusterRole 111 | metadata: 112 | name: postgres-operator-ui 113 | rules: 114 | - apiGroups: 115 | - acid.zalan.do 116 | resources: 117 | - postgresqls 118 | verbs: 119 | - create 120 | - delete 121 | - get 122 | - list 123 | - patch 124 | - update 125 | - apiGroups: 126 | - "" 127 | resources: 128 | - pods 129 | verbs: 130 | - get 131 | - list 132 | - watch 133 | - apiGroups: 134 | - "" 135 | resources: 136 | - services 137 | verbs: 138 | - get 139 | - list 140 | - apiGroups: 141 | - apps 142 | resources: 143 | - statefulsets 144 | verbs: 145 | - get 146 | - list 147 | - apiGroups: 148 | - "" 149 | resources: 150 | - namespaces 151 | verbs: 152 | - get 153 | - list 154 | --- 155 | apiVersion: rbac.authorization.k8s.io/v1 156 | kind: ClusterRoleBinding 157 | metadata: 158 | name: postgres-operator-ui 159 | roleRef: 160 | apiGroup: rbac.authorization.k8s.io 161 | kind: ClusterRole 162 | name: postgres-operator-ui 163 | subjects: 164 | - kind: ServiceAccount 165 | # note: the cluster role binding needs to be defined 166 | # for every namespace the operator-ui service account lives in. 167 | name: postgres-operator-ui 168 | namespace: default 169 | 170 | -------------------------------------------------------------------------------- /chapter2/yaml/deployment-nginx.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-deployment 5 | labels: 6 | app: nginx 7 | spec: 8 | replicas: 2 9 | selector: 10 | matchLabels: 11 | app: nginx 12 | template: 13 | metadata: 14 | labels: 15 | app: nginx 16 | spec: 17 | containers: 18 | - name: nginx 19 | image: nginx:1.7.9 20 | ports: 21 | - containerPort: 80 22 | -------------------------------------------------------------------------------- /chapter3/aws/buildspec.yaml: -------------------------------------------------------------------------------- 1 | version: 0.2 2 | phases: 3 | install: 4 | runtime-versions: 5 | docker: 18 6 | pre_build: 7 | commands: 8 | - echo Logging in to Amazon ECR... 9 | - $(aws ecr get-login --no-include-email --region $AWS_DEFAULT_REGION) 10 | build: 11 | commands: 12 | - echo Build started on `date` 13 | - echo Building the Docker image... 14 | - docker build -t $IMAGE_REPO_NAME:$IMAGE_TAG . 15 | - docker tag $IMAGE_REPO_NAME:$IMAGE_TAG $AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com/$IMAGE_REPO_NAME:$IMAGE_TAG 16 | post_build: 17 | commands: 18 | - echo Build completed on `date` 19 | - echo Pushing the Docker image... 20 | - docker push $AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com/$IMAGE_REPO_NAME:$IMAGE_TAG 21 | -------------------------------------------------------------------------------- /chapter3/gcp/sample-app-v2.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/k8sdevopscookbook/src/3631ebffe7a71e5df5bbae546538ed2128daa5ac/chapter3/gcp/sample-app-v2.tgz -------------------------------------------------------------------------------- /chapter3/gcp/spinnaker-config.yaml: -------------------------------------------------------------------------------- 1 | gcs: 2 | enabled: true 3 | bucket: devopscookbook-ci-config 4 | project: devopscookbok 5 | jsonKey: '$CICDKEY_JSON' 6 | 7 | dockerRegistries: 8 | - name: gcr 9 | address: https://gcr.io 10 | username: _json_key 11 | password: '$CICDKEY_JSON' 12 | email: youremail@domain.com 13 | 14 | # Disable minio as the default storage backend 15 | minio: 16 | enabled: false 17 | 18 | # Configure Spinnaker to enable GCP services 19 | halyard: 20 | spinnakerVersion: 1.10.2 21 | image: 22 | tag: 1.12.0 23 | additionalScripts: 24 | create: true 25 | data: 26 | enable_gcs_artifacts.sh: |- 27 | \$HAL_COMMAND config artifact gcs account add gcs-devopscookbook --json-path /opt/gcs/key.json 28 | \$HAL_COMMAND config artifact gcs enable 29 | enable_pubsub_triggers.sh: |- 30 | \$HAL_COMMAND config pubsub google enable 31 | \$HAL_COMMAND config pubsub google subscription add gcr-triggers \ 32 | --subscription-name gcr-triggers \ 33 | --json-path /opt/gcs/key.json \ 34 | --project devopscookbook \ 35 | --message-format GCR 36 | -------------------------------------------------------------------------------- /chapter4/gremlin/nginx.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: nginx 6 | name: nginx-deployment 7 | namespace: default 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | app: nginx 13 | template: 14 | metadata: 15 | labels: 16 | app: nginx 17 | spec: 18 | containers: 19 | - image: nginx:1.7.9 20 | imagePullPolicy: IfNotPresent 21 | name: nginx 22 | ports: 23 | - containerPort: 80 24 | protocol: TCP 25 | -------------------------------------------------------------------------------- /chapter4/litmus/ce-container-kill.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: litmuschaos.io/v1alpha1 2 | kind: ChaosEngine 3 | metadata: 4 | name: nginx-chaos 5 | namespace: nginx 6 | spec: 7 | annotationCheck: 'true' 8 | engineState: 'active' 9 | appinfo: 10 | appns: 'nginx' 11 | applabel: 'app=nginx' 12 | appkind: 'deployment' 13 | chaosServiceAccount: container-kill-sa 14 | # use retain to keep the job for debug 15 | jobCleanUpPolicy: 'delete' 16 | experiments: 17 | - name: container-kill 18 | spec: 19 | components: 20 | env: 21 | # specify the name of the container to be killed 22 | - name: TARGET_CONTAINER 23 | value: 'nginx' 24 | -------------------------------------------------------------------------------- /chapter4/litmus/nginx/nginx.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: nginx 6 | name: nginx-deployment 7 | namespace: default 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | app: nginx 13 | template: 14 | metadata: 15 | labels: 16 | app: nginx 17 | spec: 18 | containers: 19 | - image: nginx:1.7.9 20 | imagePullPolicy: IfNotPresent 21 | name: nginx 22 | ports: 23 | - containerPort: 80 24 | protocol: TCP 25 | -------------------------------------------------------------------------------- /chapter4/litmus/nginx/rbac.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: nginx 6 | labels: 7 | app: nginx 8 | --- 9 | kind: ClusterRole 10 | apiVersion: rbac.authorization.k8s.io/v1 11 | metadata: 12 | name: nginx 13 | rules: 14 | - apiGroups: ["", "extensions", "apps", "batch", "litmuschaos.io"] 15 | resources: ["daemonsets", "deployments", "replicasets", "jobs", "pods", "pods/exec", "events", "chaosengines", "chaosexperiments", "chaosresults"] 16 | verbs: ["*"] 17 | --- 18 | kind: ClusterRoleBinding 19 | apiVersion: rbac.authorization.k8s.io/v1 20 | metadata: 21 | name: nginx 22 | subjects: 23 | - kind: ServiceAccount 24 | name: nginx 25 | namespace: default 26 | roleRef: 27 | kind: ClusterRole 28 | name: nginx 29 | apiGroup: rbac.authorization.k8s.io 30 | -------------------------------------------------------------------------------- /chapter4/litmus/prometheus/cr-prometheus.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1beta1 2 | kind: ClusterRole 3 | metadata: 4 | name: prometheus 5 | rules: 6 | - apiGroups: [""] 7 | resources: 8 | - nodes 9 | - nodes/proxy 10 | - services 11 | - endpoints 12 | - pods 13 | verbs: ["get", "list", "watch"] 14 | - apiGroups: 15 | - extensions 16 | resources: 17 | - ingresses 18 | verbs: ["get", "list", "watch"] 19 | - nonResourceURLs: ["/metrics"] 20 | verbs: ["get"] 21 | --- 22 | apiVersion: rbac.authorization.k8s.io/v1beta1 23 | kind: ClusterRoleBinding 24 | metadata: 25 | name: prometheus 26 | roleRef: 27 | apiGroup: rbac.authorization.k8s.io 28 | kind: ClusterRole 29 | name: prometheus 30 | subjects: 31 | - kind: ServiceAccount 32 | name: default 33 | namespace: monitoring 34 | -------------------------------------------------------------------------------- /chapter4/litmus/prometheus/deployment-prometheus.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: prometheus-deployment 5 | namespace: monitoring 6 | spec: 7 | replicas: 1 8 | template: 9 | metadata: 10 | labels: 11 | app: prometheus-server 12 | spec: 13 | containers: 14 | - name: prometheus 15 | image: prom/prometheus:v2.2.1 16 | args: 17 | - "--config.file=/etc/prometheus/prom-config.yaml" 18 | - "--storage.tsdb.path=/prometheus/" 19 | ports: 20 | - containerPort: 9090 21 | volumeMounts: 22 | - name: prom-config-volume 23 | mountPath: /etc/prometheus/prom-config.yaml 24 | subPath: prom-config.yaml 25 | - name: prometheus-storage-volume 26 | mountPath: /prometheus/ 27 | volumes: 28 | - name: prom-config-volume 29 | configMap: 30 | defaultMode: 420 31 | name: prometheus-config 32 | 33 | - name: prometheus-storage-volume 34 | emptyDir: {} 35 | -------------------------------------------------------------------------------- /chapter4/litmus/prometheus/ns-prometheus.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: monitoring 5 | -------------------------------------------------------------------------------- /chapter4/litmus/prometheus/prom-config.yaml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 15s # By default, scrape targets every 15 seconds. 3 | # Attach these labels to any time series or alerts when communicating with 4 | # external systems (federation, remote storage, Alertmanager). 5 | external_labels: 6 | monitor: 'chaos-monitor' 7 | # Scraping Prometheus itself 8 | scrape_configs: 9 | - job_name: 'chaos-metrics' 10 | scrape_interval: 5s 11 | static_configs: 12 | - targets: ['10.23.250.214:8080'] 13 | - job_name: 'kubernetes-service-endpoints' 14 | kubernetes_sd_configs: 15 | - role: endpoints 16 | relabel_configs: 17 | - action: labelmap 18 | regex: __meta_kubernetes_service_label_(.+) 19 | - source_labels: [__meta_kubernetes_namespace] 20 | action: replace 21 | target_label: kubernetes_namespace 22 | - source_labels: [__meta_kubernetes_service_name] 23 | action: replace 24 | target_label: kubernetes_name 25 | -------------------------------------------------------------------------------- /chapter4/litmus/prometheus/svc-prometheus.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: prometheus-service 5 | namespace: monitoring 6 | annotations: 7 | prometheus.io/scrape: 'true' 8 | prometheus.io/path: / 9 | prometheus.io/port: '8080' 10 | spec: 11 | selector: 12 | app: prometheus-server 13 | type: NodePort 14 | ports: 15 | - port: 8080 16 | targetPort: 9090 17 | nodePort: 30000 18 | -------------------------------------------------------------------------------- /chapter4/litmus/sa-container-kill.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: container-kill-sa 6 | namespace: nginx 7 | labels: 8 | name: container-kill-sa 9 | --- 10 | apiVersion: rbac.authorization.k8s.io/v1beta1 11 | kind: Role 12 | metadata: 13 | name: container-kill-sa 14 | namespace: nginx 15 | labels: 16 | name: container-kill-sa 17 | rules: 18 | - apiGroups: ["","litmuschaos.io","batch","apps"] 19 | resources: ["pods","jobs","daemonsets","pods/exec","pods/log","events","chaosengines","chaosexperiments","chaosresults"] 20 | verbs: ["create","list","get","patch","update","delete"] 21 | --- 22 | apiVersion: rbac.authorization.k8s.io/v1beta1 23 | kind: RoleBinding 24 | metadata: 25 | name: container-kill-sa 26 | namespace: nginx 27 | labels: 28 | name: container-kill-sa 29 | roleRef: 30 | apiGroup: rbac.authorization.k8s.io 31 | kind: Role 32 | name: container-kill-sa 33 | subjects: 34 | - kind: ServiceAccount 35 | name: container-kill-sa 36 | namespace: nginx 37 | -------------------------------------------------------------------------------- /chapter4/stackstorm/first_rule.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: "sample_rule_with_webhook" 3 | pack: "examples" 4 | description: "Sample rule dumping webhook payload to a file." 5 | enabled: true 6 | 7 | trigger: 8 | type: "core.st2.webhook" 9 | parameters: 10 | url: "sample" 11 | 12 | criteria: 13 | trigger.body.name: 14 | pattern: "st2" 15 | type: "equals" 16 | 17 | action: 18 | ref: "core.local" 19 | parameters: 20 | cmd: "echo \"{{trigger.body}}\" >> ~/st2.webhook_sample.out ; sync" 21 | -------------------------------------------------------------------------------- /chapter4/stackstorm/svc-st2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: st2-service 5 | namespace: stackstorm 6 | spec: 7 | type: LoadBalancer 8 | ports: 9 | - port: 80 10 | targetPort: 80 11 | protocol: TCP 12 | selector: 13 | app: st2web 14 | -------------------------------------------------------------------------------- /chapter5/aws/aws-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: aws-secret 5 | namespace: kube-system 6 | stringData: 7 | key_id: "YOUR_KEY_ID_HERE" 8 | access_key: "YOUR_ACCESS_KEY_HERE" 9 | -------------------------------------------------------------------------------- /chapter5/aws/csi/cs-aws-csi-ebs.yaml: -------------------------------------------------------------------------------- 1 | kind: StorageClass 2 | apiVersion: storage.k8s.io/v1 3 | metadata: 4 | name: aws-csi-ebs 5 | provisioner: ebs.csi.aws.com 6 | volumeBindingMode: WaitForFirstConsumer 7 | -------------------------------------------------------------------------------- /chapter5/aws/csi/pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: mytestapp 5 | spec: 6 | containers: 7 | - name: app 8 | image: centos 9 | command: ["/bin/sh"] 10 | args: ["-c", "while true; do echo $(date -u) >> /data/out.txt; sleep 5; done"] 11 | volumeMounts: 12 | - name: persistent-storage 13 | mountPath: /data 14 | volumes: 15 | - name: persistent-storage 16 | persistentVolumeClaim: 17 | claimName: csi-ebs-pvc 18 | -------------------------------------------------------------------------------- /chapter5/aws/csi/pvc-csi-ebs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: csi-ebs-pvc 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | storageClassName: aws-csi-ebs 9 | resources: 10 | requests: 11 | storage: 4Gi 12 | -------------------------------------------------------------------------------- /chapter5/aws/csi/secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: aws-secret 5 | namespace: kube-system 6 | stringData: 7 | key_id: "" 8 | access_key: "" 9 | -------------------------------------------------------------------------------- /chapter5/aws/redis-statefulset.yml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: rd 5 | labels: 6 | app: redis 7 | spec: 8 | serviceName: "redis" 9 | replicas: 3 10 | selector: 11 | matchLabels: 12 | app: redis 13 | template: 14 | metadata: 15 | labels: 16 | app: rd 17 | spec: 18 | initContainers: 19 | - name: install 20 | image: gcr.io/google_containers/redis-install-3.2.0:e2e 21 | imagePullPolicy: Always 22 | args: 23 | - "--install-into=/opt" 24 | - "--work-dir=/work-dir" 25 | volumeMounts: 26 | - name: opt 27 | mountPath: "/opt" 28 | - name: workdir 29 | mountPath: "/work-dir" 30 | - name: bootstrap 31 | image: debian:jessie 32 | command: 33 | - "/work-dir/peer-finder" 34 | args: 35 | - -on-start="/work-dir/on-start.sh" 36 | - "-service=redis" 37 | env: 38 | - name: POD_NAMESPACE 39 | valueFrom: 40 | fieldRef: 41 | apiVersion: v1 42 | fieldPath: metadata.namespace 43 | volumeMounts: 44 | - name: opt 45 | mountPath: "/opt" 46 | - name: workdir 47 | mountPath: "/work-dir" 48 | containers: 49 | - name: redis 50 | image: debian:jessie 51 | ports: 52 | - containerPort: 6379 53 | name: peer 54 | command: 55 | - /opt/redis/redis-server 56 | args: 57 | - /opt/redis/redis.conf 58 | readinessProbe: 59 | exec: 60 | command: 61 | - sh 62 | - -c 63 | - "/opt/redis/redis-cli -h $(hostname) ping" 64 | initialDelaySeconds: 15 65 | timeoutSeconds: 5 66 | volumeMounts: 67 | - name: datadir 68 | mountPath: /data 69 | - name: opt 70 | mountPath: /opt 71 | volumes: 72 | - name: opt 73 | emptyDir: {} 74 | - name: workdir 75 | emptyDir: {} 76 | volumeClaimTemplates: 77 | - metadata: 78 | name: datadir 79 | annotations: 80 | volume.beta.kubernetes.io/storage-class: aws-gp2 81 | spec: 82 | accessModes: [ "ReadWriteOnce" ] 83 | resources: 84 | requests: 85 | storage: 1G 86 | --- 87 | # A headless service to create DNS records 88 | apiVersion: v1 89 | kind: Service 90 | metadata: 91 | annotations: 92 | service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" 93 | name: redis 94 | labels: 95 | app: redis 96 | spec: 97 | ports: 98 | - port: 6379 99 | name: peer 100 | # *.redis.default.svc.cluster.local 101 | clusterIP: None 102 | selector: 103 | app: redis 104 | -------------------------------------------------------------------------------- /chapter5/aws/sc-aws-gp2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | metadata: 4 | name: aws-gp2 5 | provisioner: kubernetes.io/aws-ebs 6 | parameters: 7 | type: gp2 8 | fsType: ext4 9 | reclaimPolicy: Retain 10 | allowVolumeExpansion: true 11 | mountOptions: 12 | - debug 13 | volumeBindingMode: Immediate 14 | -------------------------------------------------------------------------------- /chapter5/aws/sc-aws-io1-slow.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | metadata: 4 | name: aws-io1-slow 5 | annotations: 6 | storageclass.kubernetes.io/is-default-class: "true" 7 | provisioner: kubernetes.io/aws-ebs 8 | parameters: 9 | type: io1 10 | iopsPerGB: "10" 11 | fsType: ext4 12 | reclaimPolicy: Retain 13 | allowVolumeExpansion: true 14 | -------------------------------------------------------------------------------- /chapter5/azure/redis-statefulset.yml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta1 2 | kind: StatefulSet 3 | metadata: 4 | name: rd 5 | spec: 6 | serviceName: "redis" 7 | replicas: 3 8 | template: 9 | metadata: 10 | labels: 11 | app: redis 12 | spec: 13 | initContainers: 14 | - name: install 15 | image: gcr.io/google_containers/redis-install-3.2.0:e2e 16 | imagePullPolicy: Always 17 | args: 18 | - "--install-into=/opt" 19 | - "--work-dir=/work-dir" 20 | volumeMounts: 21 | - name: opt 22 | mountPath: "/opt" 23 | - name: workdir 24 | mountPath: "/work-dir" 25 | - name: bootstrap 26 | image: debian:jessie 27 | command: 28 | - "/work-dir/peer-finder" 29 | args: 30 | - -on-start="/work-dir/on-start.sh" 31 | - "-service=redis" 32 | env: 33 | - name: POD_NAMESPACE 34 | valueFrom: 35 | fieldRef: 36 | apiVersion: v1 37 | fieldPath: metadata.namespace 38 | volumeMounts: 39 | - name: opt 40 | mountPath: "/opt" 41 | - name: workdir 42 | mountPath: "/work-dir" 43 | containers: 44 | - name: redis 45 | image: debian:jessie 46 | ports: 47 | - containerPort: 6379 48 | name: peer 49 | command: 50 | - /opt/redis/redis-server 51 | args: 52 | - /opt/redis/redis.conf 53 | readinessProbe: 54 | exec: 55 | command: 56 | - sh 57 | - -c 58 | - "/opt/redis/redis-cli -h $(hostname) ping" 59 | initialDelaySeconds: 15 60 | timeoutSeconds: 5 61 | volumeMounts: 62 | - name: datadir 63 | mountPath: /data 64 | - name: opt 65 | mountPath: /opt 66 | volumes: 67 | - name: opt 68 | emptyDir: {} 69 | - name: workdir 70 | emptyDir: {} 71 | volumeClaimTemplates: 72 | - metadata: 73 | name: datadir 74 | annotations: 75 | volume.beta.kubernetes.io/storage-class: azure-zrs 76 | spec: 77 | accessModes: [ "ReadWriteOnce" ] 78 | resources: 79 | requests: 80 | storage: 1G 81 | --- 82 | # A headless service to create DNS records 83 | apiVersion: v1 84 | kind: Service 85 | metadata: 86 | annotations: 87 | service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" 88 | name: redis 89 | labels: 90 | app: redis 91 | spec: 92 | ports: 93 | - port: 6379 94 | name: peer 95 | # *.redis.default.svc.cluster.local 96 | clusterIP: None 97 | selector: 98 | app: redis 99 | -------------------------------------------------------------------------------- /chapter5/gcp/cs-gce-pds-ssd.yaml: -------------------------------------------------------------------------------- 1 | kind: StorageClass 2 | apiVersion: storage.k8s.io/v1 3 | metadata: 4 | name: gce-pd-ssd 5 | annotations: 6 | storageclass.kubernetes.io/is-default-class: "true" 7 | provisioner: kubernetes.io/gce-pd 8 | parameters: 9 | type: pd-ssd 10 | reclaimPolicy: Retain 11 | allowVolumeExpansion: true 12 | volumeBindingMode: WaitForFirstConsumer 13 | allowedTopologies: 14 | - matchLabelExpressions: 15 | - key: failure-domain.beta.kubernetes.io/zone 16 | values: 17 | - us-central1-a 18 | - us-central1-b 19 | -------------------------------------------------------------------------------- /chapter5/gcp/pv-gce-disk-1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: gce-disk-1 5 | spec: 6 | storageClassName: "" 7 | capacity: 8 | storage: 500G 9 | accessModes: 10 | - ReadWriteOnce 11 | gcePersistentDisk: 12 | pdName: gce-disk-1 13 | fsType: ext4 14 | -------------------------------------------------------------------------------- /chapter5/gcp/pvc-gce-disk-1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: pvc-gcedisk1 5 | spec: 6 | storageClassName: "" 7 | volumeName: gce-disk-1 8 | accessModes: 9 | - ReadWriteOnce 10 | resources: 11 | requests: 12 | storage: 500G 13 | -------------------------------------------------------------------------------- /chapter5/gcp/redis-statefulset.yml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta1 2 | kind: StatefulSet 3 | metadata: 4 | name: rd 5 | spec: 6 | serviceName: "redis" 7 | replicas: 3 8 | template: 9 | metadata: 10 | labels: 11 | app: redis 12 | spec: 13 | initContainers: 14 | - name: install 15 | image: gcr.io/google_containers/redis-install-3.2.0:e2e 16 | imagePullPolicy: Always 17 | args: 18 | - "--install-into=/opt" 19 | - "--work-dir=/work-dir" 20 | volumeMounts: 21 | - name: opt 22 | mountPath: "/opt" 23 | - name: workdir 24 | mountPath: "/work-dir" 25 | - name: bootstrap 26 | image: debian:jessie 27 | command: 28 | - "/work-dir/peer-finder" 29 | args: 30 | - -on-start="/work-dir/on-start.sh" 31 | - "-service=redis" 32 | env: 33 | - name: POD_NAMESPACE 34 | valueFrom: 35 | fieldRef: 36 | apiVersion: v1 37 | fieldPath: metadata.namespace 38 | volumeMounts: 39 | - name: opt 40 | mountPath: "/opt" 41 | - name: workdir 42 | mountPath: "/work-dir" 43 | containers: 44 | - name: redis 45 | image: debian:jessie 46 | ports: 47 | - containerPort: 6379 48 | name: peer 49 | command: 50 | - /opt/redis/redis-server 51 | args: 52 | - /opt/redis/redis.conf 53 | readinessProbe: 54 | exec: 55 | command: 56 | - sh 57 | - -c 58 | - "/opt/redis/redis-cli -h $(hostname) ping" 59 | initialDelaySeconds: 15 60 | timeoutSeconds: 5 61 | volumeMounts: 62 | - name: datadir 63 | mountPath: /data 64 | - name: opt 65 | mountPath: /opt 66 | volumes: 67 | - name: opt 68 | emptyDir: {} 69 | - name: workdir 70 | emptyDir: {} 71 | volumeClaimTemplates: 72 | - metadata: 73 | name: datadir 74 | annotations: 75 | volume.beta.kubernetes.io/storage-class: gce-pd 76 | spec: 77 | accessModes: [ "ReadWriteOnce" ] 78 | resources: 79 | requests: 80 | storage: 1G 81 | --- 82 | # A headless service to create DNS records 83 | apiVersion: v1 84 | kind: Service 85 | metadata: 86 | annotations: 87 | service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" 88 | name: redis 89 | labels: 90 | app: redis 91 | spec: 92 | ports: 93 | - port: 6379 94 | name: peer 95 | # *.redis.default.svc.cluster.local 96 | clusterIP: None 97 | selector: 98 | app: redis 99 | -------------------------------------------------------------------------------- /chapter5/openebs/minio.yaml: -------------------------------------------------------------------------------- 1 | # For k8s versions before 1.9.0 use apps/v1beta2 and before 1.8.0 use extensions/v1beta1 2 | apiVersion: apps/v1beta2 3 | kind: Deployment 4 | metadata: 5 | # This name uniquely identifies the Deployment 6 | name: minio-deployment 7 | spec: 8 | selector: 9 | matchLabels: 10 | app: minio 11 | strategy: 12 | type: Recreate 13 | template: 14 | metadata: 15 | labels: 16 | # Label is used as selector in the service. 17 | app: minio 18 | spec: 19 | # Refer to the PVC 20 | volumes: 21 | - name: storage 22 | persistentVolumeClaim: 23 | # Name of the PVC created earlier 24 | claimName: minio-pv-claim 25 | containers: 26 | - name: minio 27 | # Pulls the default Minio image from Docker Hub 28 | image: minio/minio:latest 29 | args: 30 | - server 31 | - /storage 32 | env: 33 | # Minio access key and secret key 34 | - name: MINIO_ACCESS_KEY 35 | value: "minio" 36 | - name: MINIO_SECRET_KEY 37 | value: "minio123" 38 | ports: 39 | - containerPort: 9000 40 | hostPort: 9000 41 | # Mount the volume into the pod 42 | volumeMounts: 43 | - name: storage # must match the volume name, above 44 | mountPath: "/storage" 45 | --- 46 | apiVersion: v1 47 | kind: PersistentVolumeClaim 48 | metadata: 49 | name: minio-pv-claim 50 | labels: 51 | app: minio-storage-claim 52 | spec: 53 | storageClassName: openebs-cstor-default 54 | accessModes: 55 | - ReadWriteOnce 56 | resources: 57 | requests: 58 | storage: 10G 59 | --- 60 | apiVersion: v1 61 | kind: Service 62 | metadata: 63 | name: minio-service 64 | spec: 65 | type: LoadBalancer 66 | ports: 67 | - port: 9000 68 | nodePort: 32701 69 | protocol: TCP 70 | selector: 71 | app: minio 72 | sessionAffinity: None 73 | -------------------------------------------------------------------------------- /chapter5/openebs/nfs/crb-openebs-nfs.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRole 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: openebs-nfs-provisioner-runner 5 | rules: 6 | - apiGroups: [""] 7 | resources: ["persistentvolumes"] 8 | verbs: ["get", "list", "watch", "create", "delete"] 9 | - apiGroups: [""] 10 | resources: ["persistentvolumeclaims"] 11 | verbs: ["get", "list", "watch", "update"] 12 | - apiGroups: ["storage.k8s.io"] 13 | resources: ["storageclasses"] 14 | verbs: ["get", "list", "watch"] 15 | - apiGroups: [""] 16 | resources: ["events"] 17 | verbs: ["create", "update", "patch"] 18 | - apiGroups: [""] 19 | resources: ["services", "endpoints"] 20 | verbs: ["get"] 21 | - apiGroups: ["extensions"] 22 | resources: ["podsecuritypolicies"] 23 | resourceNames: ["nfs-provisioner"] 24 | verbs: ["use"] 25 | --- 26 | kind: ClusterRoleBinding 27 | apiVersion: rbac.authorization.k8s.io/v1 28 | metadata: 29 | name: openebs-run-nfs-provisioner 30 | subjects: 31 | - kind: ServiceAccount 32 | name: openebs-nfs-provisioner 33 | # replace with namespace where provisioner is deployed 34 | namespace: default 35 | roleRef: 36 | kind: ClusterRole 37 | name: openebs-nfs-provisioner-runner 38 | apiGroup: rbac.authorization.k8s.io 39 | --- 40 | kind: Role 41 | apiVersion: rbac.authorization.k8s.io/v1 42 | metadata: 43 | name: openebs-leader-locking-nfs-provisioner 44 | rules: 45 | - apiGroups: [""] 46 | resources: ["endpoints"] 47 | verbs: ["get", "list", "watch", "create", "update", "patch"] 48 | --- 49 | kind: RoleBinding 50 | apiVersion: rbac.authorization.k8s.io/v1 51 | metadata: 52 | name: openebs-leader-locking-nfs-provisioner 53 | subjects: 54 | - kind: ServiceAccount 55 | name: openebs-nfs-provisioner 56 | # replace with namespace where provisioner is deployed 57 | roleRef: 58 | kind: Role 59 | name: openebs-leader-locking-nfs-provisioner 60 | apiGroup: rbac.authorization.k8s.io 61 | -------------------------------------------------------------------------------- /chapter5/openebs/nfs/openebs-nfs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment # Creating deployment for openebs-nfs-provisoner 3 | metadata: 4 | name: openebs-nfs-provisioner 5 | spec: 6 | selector: 7 | matchLabels: 8 | app: openebs-nfs-provisioner 9 | replicas: 1 10 | strategy: 11 | type: Recreate 12 | template: 13 | metadata: 14 | labels: 15 | app: openebs-nfs-provisioner 16 | spec: 17 | serviceAccount: openebs-nfs-provisioner 18 | containers: 19 | - name: openebs-nfs-provisioner 20 | image: quay.io/kubernetes_incubator/nfs-provisioner:latest 21 | ports: 22 | - name: nfs 23 | containerPort: 2049 24 | - name: mountd 25 | containerPort: 20048 26 | - name: rpcbind 27 | containerPort: 111 28 | - name: rpcbind-udp 29 | containerPort: 111 30 | protocol: UDP 31 | securityContext: 32 | capabilities: 33 | add: 34 | - DAC_READ_SEARCH 35 | - SYS_RESOURCE 36 | args: 37 | - "-provisioner=openebs.io/nfs" # Name of the provisioner 38 | env: 39 | - name: POD_IP 40 | valueFrom: 41 | fieldRef: 42 | fieldPath: status.podIP 43 | - name: SERVICE_NAME 44 | value: openebs-nfs-provisioner 45 | - name: POD_NAMESPACE 46 | valueFrom: 47 | fieldRef: 48 | fieldPath: metadata.namespace 49 | imagePullPolicy: "IfNotPresent" 50 | volumeMounts: 51 | - name: export-volume 52 | mountPath: /export 53 | volumes: 54 | - name: export-volume 55 | persistentVolumeClaim: 56 | claimName: openebspvc 57 | -------------------------------------------------------------------------------- /chapter5/openebs/nfs/psp-openebs-nfs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: PodSecurityPolicy 3 | metadata: 4 | name: openebs-nfs-provisioner 5 | spec: 6 | fsGroup: 7 | rule: RunAsAny 8 | allowedCapabilities: 9 | - DAC_READ_SEARCH 10 | - SYS_RESOURCE 11 | runAsUser: 12 | rule: RunAsAny 13 | seLinux: 14 | rule: RunAsAny 15 | supplementalGroups: 16 | rule: RunAsAny 17 | volumes: 18 | - configMap 19 | - downwardAPI 20 | - emptyDir 21 | - persistentVolumeClaim 22 | - secret 23 | - hostPath 24 | -------------------------------------------------------------------------------- /chapter5/openebs/nfs/pvc-openebs.nfs.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim # Creating PVC for openebs-nfs-provisoner to mount on it 4 | metadata: 5 | name: openebspvc 6 | spec: 7 | storageClassName: openebs-jiva-default 8 | accessModes: 9 | - ReadWriteOnce 10 | resources: 11 | requests: 12 | storage: "110G" 13 | -------------------------------------------------------------------------------- /chapter5/openebs/nfs/sc-openebs-nfs.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: storage.k8s.io/v1 3 | kind: StorageClass # Creating storage class for applications to point to openebs-nfs-provisioner 4 | metadata: 5 | name: openebs-nfs 6 | provisioner: openebs.io/nfs 7 | parameters: 8 | mountOptions: "vers=4.1" # TODO: reconcile with StorageClass.mountOptions 9 | -------------------------------------------------------------------------------- /chapter5/openebs/nfs/svc-openebs-nfs.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount # Creating a service account for openebs-nfs-provisioner 4 | metadata: 5 | name: openebs-nfs-provisioner 6 | --- 7 | apiVersion: v1 8 | kind: Service # Creating a service for openebs-nfs-provisioner 9 | metadata: 10 | name: openebs-nfs-provisioner 11 | labels: 12 | app: openebs-nfs-provisioner 13 | spec: 14 | ports: 15 | - name: nfs 16 | port: 2049 17 | - name: mountd 18 | port: 20048 19 | - name: rpcbind 20 | port: 111 21 | - name: rpcbind-udp 22 | port: 111 23 | protocol: UDP 24 | selector: 25 | app: openebs-nfs-provisioner 26 | -------------------------------------------------------------------------------- /chapter5/openebs/sc-cstor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | metadata: 4 | name: openebs-cstor-default 5 | annotations: 6 | openebs.io/cas-type: cstor 7 | cas.openebs.io/config: | 8 | - name: StoragePoolClaim 9 | value: "cstor-disk-pool" 10 | - name: ReplicaCount 11 | value: "3" 12 | provisioner: openebs.io/provisioner-iscsi 13 | -------------------------------------------------------------------------------- /chapter5/openebs/spc-cstor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: openebs.io/v1alpha1 2 | kind: StoragePoolClaim 3 | metadata: 4 | name: cstor-disk-pool 5 | annotations: 6 | cas.openebs.io/config: | 7 | - name: PoolResourceRequests 8 | value: |- 9 | memory: 2Gi 10 | - name: PoolResourceLimits 11 | value: |- 12 | memory: 4Gi 13 | spec: 14 | name: cstor-disk-pool 15 | type: disk 16 | poolSpec: 17 | poolType: striped 18 | blockDevices: 19 | blockDeviceList: 20 | - blockdevice-1c10eb1bb14c94f02a00373f2fa09b93 21 | - blockdevice-77f834edba45b03318d9de5b79af0734 22 | - blockdevice-936911c5c9b0218ed59e64009cc83c8f 23 | -------------------------------------------------------------------------------- /chapter5/rook/mysql.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: wordpress-mysql 5 | labels: 6 | app: wordpress 7 | spec: 8 | ports: 9 | - port: 3306 10 | selector: 11 | app: wordpress 12 | tier: mysql 13 | clusterIP: None 14 | --- 15 | apiVersion: v1 16 | kind: PersistentVolumeClaim 17 | metadata: 18 | name: mysql-pv-claim 19 | labels: 20 | app: wordpress 21 | spec: 22 | storageClassName: rook-ceph-block 23 | accessModes: 24 | - ReadWriteOnce 25 | resources: 26 | requests: 27 | storage: 20Gi 28 | --- 29 | apiVersion: apps/v1 30 | kind: Deployment 31 | metadata: 32 | name: wordpress-mysql 33 | labels: 34 | app: wordpress 35 | tier: mysql 36 | spec: 37 | selector: 38 | matchLabels: 39 | app: wordpress 40 | tier: mysql 41 | strategy: 42 | type: Recreate 43 | template: 44 | metadata: 45 | labels: 46 | app: wordpress 47 | tier: mysql 48 | spec: 49 | containers: 50 | - image: mysql:5.6 51 | name: mysql 52 | env: 53 | - name: MYSQL_ROOT_PASSWORD 54 | value: changeme 55 | ports: 56 | - containerPort: 3306 57 | name: mysql 58 | volumeMounts: 59 | - name: mysql-persistent-storage 60 | mountPath: /var/lib/mysql 61 | volumes: 62 | - name: mysql-persistent-storage 63 | persistentVolumeClaim: 64 | claimName: mysql-pv-claim 65 | -------------------------------------------------------------------------------- /chapter5/rook/nfs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: rook-nfs 5 | --- 6 | # A default storageclass must be present 7 | apiVersion: v1 8 | kind: PersistentVolumeClaim 9 | metadata: 10 | name: nfs-default-claim 11 | namespace: rook-nfs 12 | spec: 13 | accessModes: 14 | - ReadWriteMany 15 | resources: 16 | requests: 17 | storage: 1Gi 18 | --- 19 | apiVersion: nfs.rook.io/v1alpha1 20 | kind: NFSServer 21 | metadata: 22 | name: rook-nfs 23 | namespace: rook-nfs 24 | spec: 25 | serviceAccountName: rook-nfs 26 | replicas: 1 27 | exports: 28 | - name: share1 29 | server: 30 | accessMode: ReadWrite 31 | squash: "none" 32 | # A Persistent Volume Claim must be created before creating NFS CRD instance. 33 | persistentVolumeClaim: 34 | claimName: nfs-default-claim 35 | # A key/value list of annotations 36 | annotations: 37 | # key: value 38 | -------------------------------------------------------------------------------- /chapter5/rook/toolbox.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: rook-ceph-tools 5 | namespace: rook-ceph 6 | labels: 7 | app: rook-ceph-tools 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: rook-ceph-tools 13 | template: 14 | metadata: 15 | labels: 16 | app: rook-ceph-tools 17 | spec: 18 | dnsPolicy: ClusterFirstWithHostNet 19 | containers: 20 | - name: rook-ceph-tools 21 | image: rook/ceph:master 22 | command: ["/tini"] 23 | args: ["-g", "--", "/usr/local/bin/toolbox.sh"] 24 | imagePullPolicy: IfNotPresent 25 | env: 26 | - name: ROOK_ADMIN_SECRET 27 | valueFrom: 28 | secretKeyRef: 29 | name: rook-ceph-mon 30 | key: admin-secret 31 | securityContext: 32 | privileged: true 33 | volumeMounts: 34 | - mountPath: /dev 35 | name: dev 36 | - mountPath: /sys/bus 37 | name: sysbus 38 | - mountPath: /lib/modules 39 | name: libmodules 40 | - name: mon-endpoint-volume 41 | mountPath: /etc/rook 42 | # if hostNetwork: false, the "rbd map" command hangs, see https://github.com/rook/rook/issues/2021 43 | hostNetwork: true 44 | volumes: 45 | - name: dev 46 | hostPath: 47 | path: /dev 48 | - name: sysbus 49 | hostPath: 50 | path: /sys/bus 51 | - name: libmodules 52 | hostPath: 53 | path: /lib/modules 54 | - name: mon-endpoint-volume 55 | configMap: 56 | name: rook-ceph-mon-endpoints 57 | items: 58 | - key: data 59 | path: mon-endpoints 60 | -------------------------------------------------------------------------------- /chapter5/rook/wordpress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: wordpress 5 | labels: 6 | app: wordpress 7 | spec: 8 | ports: 9 | - port: 80 10 | selector: 11 | app: wordpress 12 | tier: frontend 13 | type: LoadBalancer 14 | --- 15 | apiVersion: v1 16 | kind: PersistentVolumeClaim 17 | metadata: 18 | name: wp-pv-claim 19 | labels: 20 | app: wordpress 21 | spec: 22 | storageClassName: rook-ceph-block 23 | accessModes: 24 | - ReadWriteOnce 25 | resources: 26 | requests: 27 | storage: 20Gi 28 | --- 29 | apiVersion: apps/v1 30 | kind: Deployment 31 | metadata: 32 | name: wordpress 33 | labels: 34 | app: wordpress 35 | tier: frontend 36 | spec: 37 | selector: 38 | matchLabels: 39 | app: wordpress 40 | tier: frontend 41 | strategy: 42 | type: Recreate 43 | template: 44 | metadata: 45 | labels: 46 | app: wordpress 47 | tier: frontend 48 | spec: 49 | containers: 50 | - image: wordpress:4.6.1-apache 51 | name: wordpress 52 | env: 53 | - name: WORDPRESS_DB_HOST 54 | value: wordpress-mysql 55 | - name: WORDPRESS_DB_PASSWORD 56 | value: changeme 57 | ports: 58 | - containerPort: 80 59 | name: wordpress 60 | volumeMounts: 61 | - name: wordpress-persistent-storage 62 | mountPath: /var/www/html 63 | volumes: 64 | - name: wordpress-persistent-storage 65 | persistentVolumeClaim: 66 | claimName: wp-pv-claim 67 | -------------------------------------------------------------------------------- /chapter6/kasten/myapp.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: myapp 5 | namespace: backup-example 6 | labels: 7 | app: app2backup 8 | spec: 9 | containers: 10 | - name: app 11 | image: centos 12 | command: ["/bin/sh"] 13 | args: ["-c", "while true; do echo $(date -u) >> /data/out.txt; sleep 5; done"] 14 | volumeMounts: 15 | - name: persistent-storage 16 | mountPath: /data 17 | - name: kanister-sidecar 18 | image: kanisterio/kanister-tools:0.20.0 19 | command: ["bash", "-c"] 20 | args: 21 | - "tail -f /dev/null" 22 | volumeMounts: 23 | - name: persistent-storage 24 | mountPath: /data 25 | volumes: 26 | - name: persistent-storage 27 | persistentVolumeClaim: 28 | claimName: pvc2backup 29 | -------------------------------------------------------------------------------- /chapter6/kasten/ns-backup.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: backup-example 5 | labels: 6 | app: app2backup 7 | -------------------------------------------------------------------------------- /chapter6/kasten/pvc-backup.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: pvc2backup 5 | namespace: backup-example 6 | labels: 7 | app: app2backup 8 | spec: 9 | accessModes: 10 | - ReadWriteOnce 11 | storageClassName: openebs-cstor-default 12 | resources: 13 | requests: 14 | storage: 4Gi 15 | -------------------------------------------------------------------------------- /chapter6/minio/minio.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: minio 5 | labels: 6 | app: minio 7 | spec: 8 | clusterIP: None 9 | ports: 10 | - port: 9000 11 | name: minio 12 | selector: 13 | app: minio 14 | --- 15 | apiVersion: apps/v1 16 | kind: StatefulSet 17 | metadata: 18 | name: minio 19 | spec: 20 | selector: 21 | matchLabels: 22 | app: minio 23 | serviceName: minio 24 | replicas: 4 25 | template: 26 | metadata: 27 | labels: 28 | app: minio 29 | spec: 30 | containers: 31 | - name: minio 32 | env: 33 | - name: MINIO_ACCESS_KEY 34 | value: "minio" 35 | - name: MINIO_SECRET_KEY 36 | value: "minio123" 37 | image: minio/minio 38 | args: 39 | - server 40 | - http://minio-{0...3}.minio.default.svc.cluster.local/data 41 | ports: 42 | - containerPort: 9000 43 | # These volume mounts are persistent. Each pod in the PetSet 44 | # gets a volume mounted based on this field. 45 | volumeMounts: 46 | - name: data 47 | mountPath: /data 48 | # These are converted to volume claims by the controller 49 | # and mounted at the paths mentioned above. 50 | volumeClaimTemplates: 51 | - metadata: 52 | name: data 53 | spec: 54 | accessModes: 55 | - ReadWriteOnce 56 | resources: 57 | requests: 58 | storage: 10Gi 59 | # Uncomment and add storageClass specific to your requirements below. Read more https://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1 60 | #storageClassName: 61 | --- 62 | apiVersion: v1 63 | kind: Service 64 | metadata: 65 | name: minio-service 66 | spec: 67 | type: LoadBalancer 68 | ports: 69 | - port: 9000 70 | targetPort: 9000 71 | protocol: TCP 72 | selector: 73 | app: minio 74 | -------------------------------------------------------------------------------- /chapter6/velero/myapp.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: myapp 5 | namespace: backup-example 6 | labels: 7 | app: app2backup 8 | spec: 9 | containers: 10 | - name: app 11 | image: centos 12 | command: ["/bin/sh"] 13 | args: ["-c", "while true; do echo $(date -u) >> /data/out.txt; sleep 5; done"] 14 | volumeMounts: 15 | - name: persistent-storage 16 | mountPath: /data 17 | volumes: 18 | - name: persistent-storage 19 | persistentVolumeClaim: 20 | claimName: pvc2backup 21 | -------------------------------------------------------------------------------- /chapter6/velero/ns-backup-example.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: backup-example 5 | labels: 6 | app: app2backup 7 | -------------------------------------------------------------------------------- /chapter6/velero/pvc-backup-example.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: pvc2backup 5 | namespace: backup-example 6 | labels: 7 | app: app2backup 8 | spec: 9 | accessModes: 10 | - ReadWriteOnce 11 | storageClassName: aws-csi-ebs 12 | resources: 13 | requests: 14 | storage: 4Gi 15 | -------------------------------------------------------------------------------- /chapter7/autoheal/minio/minio-livenessprobe.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: minio 5 | labels: 6 | app: minio 7 | spec: 8 | clusterIP: None 9 | ports: 10 | - port: 9000 11 | name: minio 12 | selector: 13 | app: minio 14 | --- 15 | apiVersion: apps/v1beta1 16 | kind: StatefulSet 17 | metadata: 18 | name: minio 19 | spec: 20 | serviceName: minio 21 | replicas: 4 22 | template: 23 | metadata: 24 | labels: 25 | app: minio 26 | spec: 27 | containers: 28 | - name: minio 29 | env: 30 | - name: MINIO_ACCESS_KEY 31 | value: "minio" 32 | - name: MINIO_SECRET_KEY 33 | value: "minio123" 34 | image: minio/minio 35 | args: 36 | - server 37 | - http://minio-{0...3}.minio.default.svc.cluster.local/data 38 | ports: 39 | - containerPort: 9000 40 | # These volume mounts are persistent. Each pod in the PetSet 41 | # gets a volume mounted based on this field. 42 | volumeMounts: 43 | - name: data 44 | mountPath: /data 45 | livenessProbe: 46 | httpGet: 47 | path: /minio/health/live 48 | port: 9000 49 | initialDelaySeconds: 120 50 | periodSeconds: 20 51 | # These are converted to volume claims by the controller 52 | # and mounted at the paths mentioned above. 53 | volumeClaimTemplates: 54 | - metadata: 55 | name: data 56 | spec: 57 | accessModes: 58 | - ReadWriteOnce 59 | resources: 60 | requests: 61 | storage: 10G 62 | # Uncomment and add storageClass specific to your requirements below. Read more https://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1 63 | #storageClassName: 64 | --- 65 | apiVersion: v1 66 | kind: Service 67 | metadata: 68 | name: minio-service 69 | spec: 70 | type: LoadBalancer 71 | ports: 72 | - port: 9000 73 | targetPort: 9000 74 | protocol: TCP 75 | selector: 76 | app: minio 77 | -------------------------------------------------------------------------------- /chapter7/autoheal/minio/minio.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: minio 5 | labels: 6 | app: minio 7 | spec: 8 | clusterIP: None 9 | ports: 10 | - port: 9000 11 | name: minio 12 | selector: 13 | app: minio 14 | --- 15 | apiVersion: apps/v1beta1 16 | kind: StatefulSet 17 | metadata: 18 | name: minio 19 | spec: 20 | serviceName: minio 21 | replicas: 4 22 | template: 23 | metadata: 24 | labels: 25 | app: minio 26 | spec: 27 | containers: 28 | - name: minio 29 | env: 30 | - name: MINIO_ACCESS_KEY 31 | value: "minio" 32 | - name: MINIO_SECRET_KEY 33 | value: "minio123" 34 | image: minio/minio 35 | args: 36 | - server 37 | - http://minio-{0...3}.minio.default.svc.cluster.local/data 38 | ports: 39 | - containerPort: 9000 40 | # These volume mounts are persistent. Each pod in the PetSet 41 | # gets a volume mounted based on this field. 42 | volumeMounts: 43 | - name: data 44 | mountPath: /data 45 | # These are converted to volume claims by the controller 46 | # and mounted at the paths mentioned above. 47 | volumeClaimTemplates: 48 | - metadata: 49 | name: data 50 | spec: 51 | accessModes: 52 | - ReadWriteOnce 53 | resources: 54 | requests: 55 | storage: 10G 56 | # Uncomment and add storageClass specific to your requirements below. Read more https://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1 57 | #storageClassName: 58 | --- 59 | apiVersion: v1 60 | kind: Service 61 | metadata: 62 | name: minio-service 63 | spec: 64 | type: LoadBalancer 65 | ports: 66 | - port: 9000 67 | targetPort: 9000 68 | protocol: TCP 69 | selector: 70 | app: minio 71 | -------------------------------------------------------------------------------- /chapter7/bluegreen/blue-percona.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: blue 6 | labels: 7 | name: blue 8 | app: blue 9 | spec: 10 | containers: 11 | - resources: 12 | limits: 13 | cpu: 0.5 14 | name: blue 15 | image: percona 16 | args: 17 | - "--ignore-db-dir" 18 | - "lost+found" 19 | env: 20 | - name: MYSQL_ROOT_PASSWORD 21 | value: k8sDem0 22 | ports: 23 | - containerPort: 3306 24 | name: mysql 25 | volumeMounts: 26 | - mountPath: /var/lib/mysql 27 | name: demo-vol1 28 | volumes: 29 | - name: demo-vol1 30 | persistentVolumeClaim: 31 | claimName: demo-vol1-claim 32 | --- 33 | apiVersion: v1 34 | kind: PersistentVolumeClaim 35 | metadata: 36 | name: demo-vol1-claim 37 | spec: 38 | storageClassName: openebs-cstor-default 39 | accessModes: 40 | - ReadWriteOnce 41 | resources: 42 | requests: 43 | storage: 5G 44 | -------------------------------------------------------------------------------- /chapter7/charts/node/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | name: node 3 | version: 11.2.3 4 | appVersion: 10.16.3 5 | description: Event-driven I/O server-side JavaScript environment based on V8 6 | keywords: 7 | - node 8 | - javascript 9 | - nodejs 10 | - git 11 | home: http://nodejs.org/ 12 | sources: 13 | - https://github.com/bitnami/bitnami-docker-node 14 | maintainers: 15 | - name: Bitnami 16 | email: containers@bitnami.com 17 | engine: gotpl 18 | icon: https://bitnami.com/assets/stacks/nodejs/img/nodejs-stack-110x117.png 19 | -------------------------------------------------------------------------------- /chapter7/charts/node/charts/mongodb-7.2.10.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/k8sdevopscookbook/src/3631ebffe7a71e5df5bbae546538ed2128daa5ac/chapter7/charts/node/charts/mongodb-7.2.10.tgz -------------------------------------------------------------------------------- /chapter7/charts/node/requirements.lock: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - name: mongodb 3 | repository: https://kubernetes-charts.storage.googleapis.com/ 4 | version: 7.2.10 5 | digest: sha256:97b996c785336242f22d81d40e05e4a1ed92969e6052125b4138c87ab7e5ac44 6 | generated: "2019-10-02T23:46:10.841330443Z" 7 | -------------------------------------------------------------------------------- /chapter7/charts/node/requirements.yaml: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - name: mongodb 3 | repository: https://kubernetes-charts.storage.googleapis.com/ 4 | version: 7.x.x 5 | condition: mongodb.install 6 | -------------------------------------------------------------------------------- /chapter7/charts/node/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | 2 | 1. Get the URL of your Node app by running: 3 | 4 | {{- if contains "NodePort" .Values.service.type }} 5 | 6 | export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "node.fullname" . }}) 7 | export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") 8 | echo "Node app URL: http://$NODE_IP:$NODE_PORT/" 9 | 10 | {{- else if contains "LoadBalancer" .Values.service.type }} 11 | 12 | NOTE: It may take a few minutes for the LoadBalancer IP to be available. 13 | Watch the status with: 'kubectl get svc -w {{ template "node.fullname" . }} --namespace {{ .Release.Namespace }}' 14 | 15 | export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "node.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") 16 | echo "Node app URL: http://$SERVICE_IP/" 17 | 18 | {{- else if contains "ClusterIP" .Values.service.type }} 19 | 20 | kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "node.fullname" . }} {{ .Values.service.port }}:{{ .Values.service.port }} 21 | echo "Node app URL: http://127.0.0.1:{{ .Values.service.port }}/" 22 | 23 | {{- end }} 24 | 25 | {{ include "node.checkRollingTags" . }} 26 | -------------------------------------------------------------------------------- /chapter7/charts/node/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: {{ template "node.fullname" . }} 5 | labels: 6 | app: {{ template "node.name" . }} 7 | chart: {{ template "node.chart" . }} 8 | release: "{{ .Release.Name }}" 9 | heritage: "{{ .Release.Service }}" 10 | spec: 11 | selector: 12 | matchLabels: 13 | app: {{ template "node.name" . }} 14 | release: "{{ .Release.Name }}" 15 | replicas: {{ .Values.replicas }} 16 | template: 17 | metadata: 18 | labels: 19 | app: {{ template "node.name" . }} 20 | chart: {{ template "node.chart" . }} 21 | release: {{ .Release.Name | quote }} 22 | heritage: "{{ .Release.Service }}" 23 | spec: 24 | {{- if .Values.securityContext.enabled }} 25 | securityContext: 26 | fsGroup: {{ .Values.securityContext.fsGroup }} 27 | runAsUser: {{ .Values.securityContext.runAsUser }} 28 | {{- end }} 29 | {{- include "node.imagePullSecrets" . | indent 6 }} 30 | initContainers: 31 | - name: git-clone-repository 32 | image: "{{ template "git.image" . }}" 33 | imagePullPolicy: {{ .Values.git.pullPolicy | quote }} 34 | command: [ '/bin/sh', '-c' , 'git clone {{ .Values.repository }} /app && cd /app && git checkout {{ .Values.revision }}'] 35 | volumeMounts: 36 | - name: app 37 | mountPath: /app 38 | - name: npm-install 39 | image: "{{ template "node.image" . }}" 40 | imagePullPolicy: {{ .Values.image.pullPolicy | quote }} 41 | workingDir: /app 42 | command: ['/bin/bash', '-c', 'npm install'] 43 | env: 44 | - name: HOME 45 | value: /tmp 46 | volumeMounts: 47 | - name: app 48 | mountPath: /app 49 | {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} 50 | - name: volume-permissions 51 | image: "{{ template "node.volumePermissions.image" . }}" 52 | imagePullPolicy: {{ default "" .Values.volumePermissions.image.pullPolicy | quote }} 53 | command: ["chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}", "{{ .Values.persistence.path }}"] 54 | securityContext: 55 | runAsUser: 0 56 | resources: {{ toYaml .Values.volumePermissions.resources | nindent 10 }} 57 | volumeMounts: 58 | - name: data 59 | mountPath: {{ .Values.persistence.path }} 60 | {{- end }} 61 | containers: 62 | - name: {{ template "node.fullname" . }} 63 | image: "{{ template "node.image" . }}" 64 | imagePullPolicy: {{ .Values.image.pullPolicy | quote }} 65 | env: 66 | {{- if .Values.mongodb.install }} 67 | - name: DATABASE_HOST 68 | value: {{ template "node.mongodb.fullname" . }} 69 | - name: DATABASE_PORT 70 | value: "27017" 71 | - name: DATABASE_USER 72 | value: {{ .Values.mongodb.mongodbUsername | quote }} 73 | - name: DATABASE_PASSWORD 74 | valueFrom: 75 | secretKeyRef: 76 | name: {{ template "node.mongodb.fullname" . }} 77 | key: mongodb-password 78 | - name: DATABASE_NAME 79 | value: {{ .Values.mongodb.mongodbDatabase | quote }} 80 | - name: DATABASE_CONNECTION_OPTIONS 81 | value: "" 82 | {{ else }} 83 | {{- $type := dict "type" .Values.externaldb.type }} 84 | - name: DATABASE_HOST 85 | valueFrom: 86 | secretKeyRef: 87 | name: {{ template "node.secretName" . }} 88 | key: {{ template "externaldb.host" $type }} 89 | {{- if not .Values.externaldb.broker.serviceInstanceName }} 90 | - name: DATABASE_NAME 91 | valueFrom: 92 | secretKeyRef: 93 | name: {{ template "node.secretName" . }} 94 | key: database 95 | {{ else }} 96 | - name: DATABASE_NAME 97 | value: "" 98 | {{- end }} 99 | - name: DATABASE_PORT 100 | valueFrom: 101 | secretKeyRef: 102 | name: {{ template "node.secretName" . }} 103 | key: {{ template "externaldb.port" $type }} 104 | - name: DATABASE_USER 105 | valueFrom: 106 | secretKeyRef: 107 | name: {{ template "node.secretName" . }} 108 | key: {{ template "externaldb.username" $type }} 109 | - name: DATABASE_PASSWORD 110 | valueFrom: 111 | secretKeyRef: 112 | name: {{ template "node.secretName" . }} 113 | key: {{ template "externaldb.password" $type }} 114 | {{- if .Values.externaldb.ssl }} 115 | - name: DATABASE_CONNECTION_OPTIONS 116 | value: "ssl=true" 117 | {{ else }} 118 | - name: DATABASE_CONNECTION_OPTIONS 119 | value: "" 120 | {{- end }} 121 | {{- end }} 122 | - name: DATA_FOLDER 123 | value: "/app" 124 | {{- if .Values.extraEnv }} 125 | {{ toYaml .Values.extraEnv | indent 8 }} 126 | {{- end }} 127 | workingDir: /app 128 | command: ['/bin/bash', '-c', 'npm start'] 129 | ports: 130 | - name: http 131 | containerPort: {{ .Values.applicationPort }} 132 | livenessProbe: 133 | httpGet: 134 | path: / 135 | port: http 136 | initialDelaySeconds: 60 137 | timeoutSeconds: 5 138 | failureThreshold: 6 139 | readinessProbe: 140 | httpGet: 141 | path: / 142 | port: http 143 | initialDelaySeconds: 10 144 | timeoutSeconds: 3 145 | periodSeconds: 5 146 | resources: 147 | {{ toYaml .Values.resources | indent 10 }} 148 | volumeMounts: 149 | - name: app 150 | mountPath: /app 151 | - name: data 152 | mountPath: {{ .Values.persistence.path }} 153 | volumes: 154 | - name: app 155 | emptyDir: {} 156 | - name: data 157 | {{- if .Values.persistence.enabled }} 158 | persistentVolumeClaim: 159 | claimName: {{ template "node.fullname" . }} 160 | {{- else }} 161 | emptyDir: {} 162 | {{- end }} 163 | {{- with .Values.affinity }} 164 | affinity: 165 | {{ toYaml . | indent 8 }} 166 | {{- end }} 167 | -------------------------------------------------------------------------------- /chapter7/charts/node/templates/ingress.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.ingress.enabled }} 2 | {{- range .Values.ingress.hosts }} 3 | apiVersion: extensions/v1beta1 4 | kind: Ingress 5 | metadata: 6 | name: {{ template "node.fullname" $ }} 7 | labels: 8 | app: {{ template "node.name" $ }} 9 | chart: "{{ $.Chart.Name }}-{{ $.Chart.Version }}" 10 | release: "{{ $.Release.Name }}" 11 | heritage: "{{ $.Release.Service }}" 12 | annotations: 13 | {{- if .certManager }} 14 | kubernetes.io/tls-acme: "true" 15 | {{- end }} 16 | {{- range $key, $value := .annotations }} 17 | {{ $key }}: {{ $value | quote }} 18 | {{- end }} 19 | spec: 20 | rules: 21 | - host: {{ .name }} 22 | http: 23 | paths: 24 | - path: {{ default "/" .path }} 25 | backend: 26 | serviceName: {{ template "node.fullname" $ }} 27 | servicePort: 80 28 | {{- if .tls }} 29 | tls: 30 | - hosts: 31 | - {{ .name }} 32 | secretName: {{ .tlsSecret }} 33 | {{- end }} 34 | --- 35 | {{- end }} 36 | {{- end }} 37 | 38 | -------------------------------------------------------------------------------- /chapter7/charts/node/templates/mongodb-binding.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.externaldb.broker.serviceInstanceName }} 2 | apiVersion: servicecatalog.k8s.io/v1beta1 3 | kind: ServiceBinding 4 | metadata: 5 | name: {{ template "node.mongodb.fullname" . }}-binding 6 | labels: 7 | app: {{ template "node.name" . }} 8 | chart: {{ template "node.chart" . }} 9 | release: "{{ .Release.Name }}" 10 | heritage: "{{ .Release.Service }}" 11 | spec: 12 | instanceRef: 13 | name: {{ .Values.externaldb.broker.serviceInstanceName }} 14 | secretName: {{ template "node.secretName" . }} 15 | {{- end }} 16 | -------------------------------------------------------------------------------- /chapter7/charts/node/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.persistence.enabled }} 2 | kind: PersistentVolumeClaim 3 | apiVersion: v1 4 | metadata: 5 | name: {{ template "node.fullname" . }} 6 | labels: 7 | app: {{ template "node.name" . }} 8 | chart: {{ template "node.chart" . }} 9 | release: "{{ .Release.Name }}" 10 | heritage: "{{ .Release.Service }}" 11 | annotations: 12 | volume.alpha.kubernetes.io/storage-class: {{ ternary "default" (include "node.storageClass" .) (empty (include "node.storageClass" .)) }} 13 | spec: 14 | accessModes: 15 | - {{ .Values.persistence.accessMode | quote }} 16 | resources: 17 | requests: 18 | storage: {{ .Values.persistence.size | quote }} 19 | {{ include "node.storageClass" . }} 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /chapter7/charts/node/templates/svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ template "node.fullname" . }} 5 | labels: 6 | app: {{ template "node.name" . }} 7 | chart: {{ template "node.chart" . }} 8 | release: "{{ .Release.Name }}" 9 | heritage: "{{ .Release.Service }}" 10 | annotations: 11 | {{- if .Values.service.annotations }} 12 | {{ toYaml .Values.service.annotations | indent 4 }} 13 | {{- end }} 14 | spec: 15 | type: {{ .Values.service.type }} 16 | {{- if and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerIP }} 17 | loadBalancerIP: {{ .Values.service.loadBalancerIP }} 18 | {{- end }} 19 | ports: 20 | - name: http 21 | port: {{ .Values.service.port }} 22 | targetPort: http 23 | {{- if .Values.service.nodePort }} 24 | nodePort: {{ .Values.service.nodePort }} 25 | {{- end }} 26 | selector: 27 | app: {{ template "node.name" . }} 28 | release: "{{ .Release.Name }}" 29 | -------------------------------------------------------------------------------- /chapter7/charts/node/values.yaml: -------------------------------------------------------------------------------- 1 | ## Global Docker image parameters 2 | ## Please, note that this will override the image parameters, including dependencies, configured to use the global value 3 | ## Current available global Docker image parameters: imageRegistry and imagePullSecrets 4 | ## 5 | # global: 6 | # imageRegistry: myRegistryName 7 | # imagePullSecrets: 8 | # - myRegistryKeySecretName 9 | # storageClass: myStorageClass 10 | 11 | ## Bitnami node image version 12 | ## ref: https://hub.docker.com/r/bitnami/node/tags/ 13 | ## 14 | image: 15 | registry: docker.io 16 | repository: bitnami/node 17 | tag: 10.16.3-debian-9-r27 18 | ## Specify a imagePullPolicy 19 | ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' 20 | ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images 21 | ## 22 | pullPolicy: IfNotPresent 23 | ## Optionally specify an array of imagePullSecrets. 24 | ## Secrets must be manually created in the namespace. 25 | ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ 26 | ## 27 | # pullSecrets: 28 | # - myRegistryKeySecretName 29 | 30 | ## String to partially override node.fullname template (will maintain the release name) 31 | ## 32 | # nameOverride: 33 | 34 | ## String to fully override node.fullname template 35 | ## 36 | # fullnameOverride: 37 | 38 | ## Init containers parameters: 39 | ## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. 40 | ## 41 | volumePermissions: 42 | enabled: false 43 | image: 44 | registry: docker.io 45 | repository: bitnami/minideb 46 | tag: stretch 47 | pullPolicy: Always 48 | ## Optionally specify an array of imagePullSecrets. 49 | ## Secrets must be manually created in the namespace. 50 | ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ 51 | ## 52 | # pullSecrets: 53 | # - myRegistryKeySecretName 54 | resources: {} 55 | 56 | ## Bitnami git image version 57 | ## ref: https://hub.docker.com/r/bitnami/git/tags/ 58 | ## 59 | git: 60 | registry: docker.io 61 | repository: bitnami/git 62 | tag: 2.23.0-debian-9-r24 63 | pullPolicy: IfNotPresent 64 | ## Optionally specify an array of imagePullSecrets. 65 | ## Secrets must be manually created in the namespace. 66 | ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ 67 | ## 68 | # pullSecrets: 69 | # - myRegistryKeySecretName 70 | 71 | ## Git repository http/https 72 | ## 73 | repository: https://github.com/bitnami/sample-mean.git 74 | 75 | ## Git repository revision to checkout 76 | ## 77 | revision: master 78 | 79 | ## Specify the number of replicas for the application 80 | ## 81 | replicas: 1 82 | 83 | ## Specify the port where your application will be running 84 | ## 85 | applicationPort: 3000 86 | 87 | # Define custom environment variables to pass to the image here 88 | extraEnv: {} 89 | 90 | ## Kubernetes Service Configuration 91 | service: 92 | ## For minikube, set this to NodePort, elsewhere use LoadBalancer 93 | ## 94 | type: LoadBalancer 95 | port: 80 96 | ## Specify the nodePort value for the LoadBalancer and NodePort service types. 97 | ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport 98 | ## 99 | # nodePort: 100 | ## Provide any additional annotations which may be required. This can be used to 101 | ## set the LoadBalancer service type to internal only. 102 | ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer 103 | ## 104 | annotations: {} 105 | # loadBalancerIP: 106 | 107 | ## Enable persistence using Persistent Volume Claims 108 | ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ 109 | ## 110 | persistence: 111 | enabled: false 112 | path: /app/data 113 | ## If defined, volume.beta.kubernetes.io/storage-class: 114 | ## Default: volume.alpha.kubernetes.io/storage-class: default 115 | ## 116 | # storageClass: 117 | accessMode: ReadWriteOnce 118 | size: 1Gi 119 | 120 | ## Configure resource requests and limits 121 | ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ 122 | ## 123 | resources: {} 124 | # limits: 125 | # cpu: 500m 126 | # memory: 512Mi 127 | # requests: 128 | # cpu: 500m 129 | # memory: 512Mi 130 | 131 | ## 132 | ## MongoDB chart configuration 133 | ## 134 | ## https://github.com/helm/charts/blob/master/stable/mongodb/values.yaml 135 | ## 136 | mongodb: 137 | ## Whether to deploy a mongodb server to satisfy the applications database requirements. 138 | ## To use an external database set this to false and configure the externaldb parameters 139 | install: true # Check mongodb chart for configuration values 140 | ## MongoDB custom user and database 141 | ## ref: https://github.com/bitnami/bitnami-docker-mongodb/blob/master/README.md#creating-a-user-and-database-on-first-run 142 | ## 143 | mongodbUsername: user 144 | mongodbDatabase: test_db 145 | mongodbPassword: secret_password 146 | 147 | ## Provision an external database (Only if mongodb.install is false) 148 | ## You can: 149 | ## 1) Pass an already existing Secret with your database credentials 150 | ## 2) Pass an already existing ServiceInstance name and specify the service catalog broker to automatically create a ServiceBinding for your application. 151 | externaldb: 152 | # Set to true if your external database has ssl enabled 153 | ssl: false 154 | # You can use an existing secret containing your database credentials 155 | # Please refer to the respective section in the README to know the details about this secret. 156 | secretName: 157 | # Only if using Kubernetes Service Catalog you can specify the kind of broker used. Available options are osba|gce|aws 158 | type: osba 159 | # If you provide the serviceInstanceName, the chart will create a ServiceBinding for that ServiceInstance 160 | broker: 161 | serviceInstanceName: 162 | 163 | ## Pod Security Context 164 | ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ 165 | ## 166 | securityContext: 167 | enabled: true 168 | fsGroup: 1001 169 | runAsUser: 1001 170 | 171 | ## Configure ingress resource that allow you to access the application. 172 | ## ref: http://kubernetes.io/docs/user-guide/ingress/ 173 | ## 174 | ingress: 175 | ## Set to true to enable ingress record generation 176 | enabled: false 177 | 178 | ## The list of hostnames to be covered with this ingress record. 179 | ## Most likely this will be just one host, but in the event more hosts are needed, this is an array 180 | hosts: 181 | - name: node.local 182 | 183 | ## Set this to true in order to enable TLS on the ingress record 184 | tls: false 185 | 186 | ## Set this to true in order to add the corresponding annotations for cert-manager 187 | certManager: false 188 | 189 | ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS 190 | tlsSecret: node.local-tls 191 | 192 | ## Ingress annotations done as key:value pairs 193 | ## For a full list of possible ingress annotations, please see 194 | ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/annotations.md 195 | ## 196 | ## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set 197 | annotations: 198 | # kubernetes.io/ingress.class: nginx 199 | 200 | secrets: 201 | ## If you're providing your own certificates, please use this to add the certificates as secrets 202 | ## key and certificate should start with -----BEGIN CERTIFICATE----- or 203 | ## -----BEGIN RSA PRIVATE KEY----- 204 | ## 205 | ## name should line up with a tlsSecret set further up 206 | ## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set 207 | ## 208 | ## It is also possible to create and manage the certificates outside of this helm chart 209 | ## Please see README.md for more information 210 | # - name: node.local-tls 211 | # key: 212 | # certificate: 213 | 214 | ## Affinity for pod assignment 215 | ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity 216 | ## 217 | affinity: {} 218 | -------------------------------------------------------------------------------- /chapter7/charts/todo-dev/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | name: todo-dev 3 | version: 11.2.3 4 | appVersion: 10.16.3 5 | description: Event-driven I/O server-side JavaScript environment based on V8 6 | keywords: 7 | - node 8 | - javascript 9 | - nodejs 10 | - git 11 | home: http://nodejs.org/ 12 | sources: 13 | - https://github.com/bitnami/bitnami-docker-node 14 | maintainers: 15 | - name: Bitnami 16 | email: containers@bitnami.com 17 | engine: gotpl 18 | icon: https://bitnami.com/assets/stacks/nodejs/img/nodejs-stack-110x117.png 19 | -------------------------------------------------------------------------------- /chapter7/charts/todo-dev/charts/mongodb-7.2.10.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/k8sdevopscookbook/src/3631ebffe7a71e5df5bbae546538ed2128daa5ac/chapter7/charts/todo-dev/charts/mongodb-7.2.10.tgz -------------------------------------------------------------------------------- /chapter7/charts/todo-dev/requirements.lock: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - name: mongodb 3 | repository: https://kubernetes-charts.storage.googleapis.com/ 4 | version: 7.2.10 5 | digest: sha256:97b996c785336242f22d81d40e05e4a1ed92969e6052125b4138c87ab7e5ac44 6 | generated: "2019-10-02T23:46:10.841330443Z" 7 | -------------------------------------------------------------------------------- /chapter7/charts/todo-dev/requirements.yaml: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - name: mongodb 3 | repository: https://kubernetes-charts.storage.googleapis.com/ 4 | version: 7.x.x 5 | condition: mongodb.install 6 | -------------------------------------------------------------------------------- /chapter7/charts/todo-dev/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | 2 | 1. Get the URL of your Node app by running: 3 | 4 | {{- if contains "NodePort" .Values.service.type }} 5 | 6 | export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "node.fullname" . }}) 7 | export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") 8 | echo "Node app URL: http://$NODE_IP:$NODE_PORT/" 9 | 10 | {{- else if contains "LoadBalancer" .Values.service.type }} 11 | 12 | NOTE: It may take a few minutes for the LoadBalancer IP to be available. 13 | Watch the status with: 'kubectl get svc -w {{ template "node.fullname" . }} --namespace {{ .Release.Namespace }}' 14 | 15 | export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "node.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") 16 | echo "Node app URL: http://$SERVICE_IP/" 17 | 18 | {{- else if contains "ClusterIP" .Values.service.type }} 19 | 20 | kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "node.fullname" . }} {{ .Values.service.port }}:{{ .Values.service.port }} 21 | echo "Node app URL: http://127.0.0.1:{{ .Values.service.port }}/" 22 | 23 | {{- end }} 24 | 25 | {{ include "node.checkRollingTags" . }} 26 | -------------------------------------------------------------------------------- /chapter7/charts/todo-dev/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: {{ template "node.fullname" . }} 5 | labels: 6 | app: {{ template "node.name" . }} 7 | chart: {{ template "node.chart" . }} 8 | release: "{{ .Release.Name }}" 9 | heritage: "{{ .Release.Service }}" 10 | spec: 11 | selector: 12 | matchLabels: 13 | app: {{ template "node.name" . }} 14 | release: "{{ .Release.Name }}" 15 | replicas: {{ .Values.replicas }} 16 | template: 17 | metadata: 18 | labels: 19 | app: {{ template "node.name" . }} 20 | chart: {{ template "node.chart" . }} 21 | release: {{ .Release.Name | quote }} 22 | heritage: "{{ .Release.Service }}" 23 | spec: 24 | {{- if .Values.securityContext.enabled }} 25 | securityContext: 26 | fsGroup: {{ .Values.securityContext.fsGroup }} 27 | runAsUser: {{ .Values.securityContext.runAsUser }} 28 | {{- end }} 29 | {{- include "node.imagePullSecrets" . | indent 6 }} 30 | initContainers: 31 | - name: git-clone-repository 32 | image: "{{ template "git.image" . }}" 33 | imagePullPolicy: {{ .Values.git.pullPolicy | quote }} 34 | command: [ '/bin/sh', '-c' , 'git clone {{ .Values.repository }} /app && cd /app && git checkout {{ .Values.revision }}'] 35 | volumeMounts: 36 | - name: app 37 | mountPath: /app 38 | - name: npm-install 39 | image: "{{ template "node.image" . }}" 40 | imagePullPolicy: {{ .Values.image.pullPolicy | quote }} 41 | workingDir: /app 42 | command: ['/bin/bash', '-c', 'npm install'] 43 | env: 44 | - name: HOME 45 | value: /tmp 46 | volumeMounts: 47 | - name: app 48 | mountPath: /app 49 | {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} 50 | - name: volume-permissions 51 | image: "{{ template "node.volumePermissions.image" . }}" 52 | imagePullPolicy: {{ default "" .Values.volumePermissions.image.pullPolicy | quote }} 53 | command: ["chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}", "{{ .Values.persistence.path }}"] 54 | securityContext: 55 | runAsUser: 0 56 | resources: {{ toYaml .Values.volumePermissions.resources | nindent 10 }} 57 | volumeMounts: 58 | - name: data 59 | mountPath: {{ .Values.persistence.path }} 60 | {{- end }} 61 | nodeSelector: 62 | environment: "{{ .Values.environment }}" 63 | containers: 64 | - name: {{ template "node.fullname" . }} 65 | image: "{{ template "node.image" . }}" 66 | imagePullPolicy: {{ .Values.image.pullPolicy | quote }} 67 | env: 68 | {{- if .Values.mongodb.install }} 69 | - name: DATABASE_HOST 70 | value: {{ template "node.mongodb.fullname" . }} 71 | - name: DATABASE_PORT 72 | value: "27017" 73 | - name: DATABASE_USER 74 | value: {{ .Values.mongodb.mongodbUsername | quote }} 75 | - name: DATABASE_PASSWORD 76 | valueFrom: 77 | secretKeyRef: 78 | name: {{ template "node.mongodb.fullname" . }} 79 | key: mongodb-password 80 | - name: DATABASE_NAME 81 | value: {{ .Values.mongodb.mongodbDatabase | quote }} 82 | - name: DATABASE_CONNECTION_OPTIONS 83 | value: "" 84 | {{ else }} 85 | {{- $type := dict "type" .Values.externaldb.type }} 86 | - name: DATABASE_HOST 87 | valueFrom: 88 | secretKeyRef: 89 | name: {{ template "node.secretName" . }} 90 | key: {{ template "externaldb.host" $type }} 91 | {{- if not .Values.externaldb.broker.serviceInstanceName }} 92 | - name: DATABASE_NAME 93 | valueFrom: 94 | secretKeyRef: 95 | name: {{ template "node.secretName" . }} 96 | key: database 97 | {{ else }} 98 | - name: DATABASE_NAME 99 | value: "" 100 | {{- end }} 101 | - name: DATABASE_PORT 102 | valueFrom: 103 | secretKeyRef: 104 | name: {{ template "node.secretName" . }} 105 | key: {{ template "externaldb.port" $type }} 106 | - name: DATABASE_USER 107 | valueFrom: 108 | secretKeyRef: 109 | name: {{ template "node.secretName" . }} 110 | key: {{ template "externaldb.username" $type }} 111 | - name: DATABASE_PASSWORD 112 | valueFrom: 113 | secretKeyRef: 114 | name: {{ template "node.secretName" . }} 115 | key: {{ template "externaldb.password" $type }} 116 | {{- if .Values.externaldb.ssl }} 117 | - name: DATABASE_CONNECTION_OPTIONS 118 | value: "ssl=true" 119 | {{ else }} 120 | - name: DATABASE_CONNECTION_OPTIONS 121 | value: "" 122 | {{- end }} 123 | {{- end }} 124 | - name: DATA_FOLDER 125 | value: "/app" 126 | {{- if .Values.extraEnv }} 127 | {{ toYaml .Values.extraEnv | indent 8 }} 128 | {{- end }} 129 | workingDir: /app 130 | command: ['/bin/bash', '-c', 'npm start'] 131 | ports: 132 | - name: http 133 | containerPort: {{ .Values.applicationPort }} 134 | livenessProbe: 135 | httpGet: 136 | path: / 137 | port: http 138 | initialDelaySeconds: 60 139 | timeoutSeconds: 5 140 | failureThreshold: 6 141 | readinessProbe: 142 | httpGet: 143 | path: / 144 | port: http 145 | initialDelaySeconds: 10 146 | timeoutSeconds: 3 147 | periodSeconds: 5 148 | resources: 149 | {{ toYaml .Values.resources | indent 10 }} 150 | volumeMounts: 151 | - name: app 152 | mountPath: /app 153 | - name: data 154 | mountPath: {{ .Values.persistence.path }} 155 | volumes: 156 | - name: app 157 | emptyDir: {} 158 | - name: data 159 | {{- if .Values.persistence.enabled }} 160 | persistentVolumeClaim: 161 | claimName: {{ template "node.fullname" . }} 162 | {{- else }} 163 | emptyDir: {} 164 | {{- end }} 165 | {{- with .Values.affinity }} 166 | affinity: 167 | {{ toYaml . | indent 8 }} 168 | {{- end }} 169 | -------------------------------------------------------------------------------- /chapter7/charts/todo-dev/templates/ingress.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.ingress.enabled }} 2 | {{- range .Values.ingress.hosts }} 3 | apiVersion: extensions/v1beta1 4 | kind: Ingress 5 | metadata: 6 | name: {{ template "node.fullname" $ }} 7 | labels: 8 | app: {{ template "node.name" $ }} 9 | chart: "{{ $.Chart.Name }}-{{ $.Chart.Version }}" 10 | release: "{{ $.Release.Name }}" 11 | heritage: "{{ $.Release.Service }}" 12 | annotations: 13 | {{- if .certManager }} 14 | kubernetes.io/tls-acme: "true" 15 | {{- end }} 16 | {{- range $key, $value := .annotations }} 17 | {{ $key }}: {{ $value | quote }} 18 | {{- end }} 19 | spec: 20 | rules: 21 | - host: {{ .name }} 22 | http: 23 | paths: 24 | - path: {{ default "/" .path }} 25 | backend: 26 | serviceName: {{ template "node.fullname" $ }} 27 | servicePort: 80 28 | {{- if .tls }} 29 | tls: 30 | - hosts: 31 | - {{ .name }} 32 | secretName: {{ .tlsSecret }} 33 | {{- end }} 34 | --- 35 | {{- end }} 36 | {{- end }} 37 | 38 | -------------------------------------------------------------------------------- /chapter7/charts/todo-dev/templates/mongodb-binding.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.externaldb.broker.serviceInstanceName }} 2 | apiVersion: servicecatalog.k8s.io/v1beta1 3 | kind: ServiceBinding 4 | metadata: 5 | name: {{ template "node.mongodb.fullname" . }}-binding 6 | labels: 7 | app: {{ template "node.name" . }} 8 | chart: {{ template "node.chart" . }} 9 | release: "{{ .Release.Name }}" 10 | heritage: "{{ .Release.Service }}" 11 | spec: 12 | instanceRef: 13 | name: {{ .Values.externaldb.broker.serviceInstanceName }} 14 | secretName: {{ template "node.secretName" . }} 15 | {{- end }} 16 | -------------------------------------------------------------------------------- /chapter7/charts/todo-dev/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.persistence.enabled }} 2 | kind: PersistentVolumeClaim 3 | apiVersion: v1 4 | metadata: 5 | name: {{ template "node.fullname" . }} 6 | labels: 7 | app: {{ template "node.name" . }} 8 | chart: {{ template "node.chart" . }} 9 | release: "{{ .Release.Name }}" 10 | heritage: "{{ .Release.Service }}" 11 | annotations: 12 | volume.alpha.kubernetes.io/storage-class: {{ ternary "default" (include "node.storageClass" .) (empty (include "node.storageClass" .)) }} 13 | spec: 14 | accessModes: 15 | - {{ .Values.persistence.accessMode | quote }} 16 | resources: 17 | requests: 18 | storage: {{ .Values.persistence.size | quote }} 19 | {{ include "node.storageClass" . }} 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /chapter7/charts/todo-dev/templates/svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ template "node.fullname" . }} 5 | labels: 6 | app: {{ template "node.name" . }} 7 | chart: {{ template "node.chart" . }} 8 | release: "{{ .Release.Name }}" 9 | heritage: "{{ .Release.Service }}" 10 | annotations: 11 | {{- if .Values.service.annotations }} 12 | {{ toYaml .Values.service.annotations | indent 4 }} 13 | {{- end }} 14 | spec: 15 | type: {{ .Values.service.type }} 16 | {{- if and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerIP }} 17 | loadBalancerIP: {{ .Values.service.loadBalancerIP }} 18 | {{- end }} 19 | ports: 20 | - name: http 21 | port: {{ .Values.service.port }} 22 | targetPort: http 23 | {{- if .Values.service.nodePort }} 24 | nodePort: {{ .Values.service.nodePort }} 25 | {{- end }} 26 | selector: 27 | app: {{ template "node.name" . }} 28 | release: "{{ .Release.Name }}" 29 | -------------------------------------------------------------------------------- /chapter7/charts/todo-prod/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | name: todo-prod 3 | version: 11.2.3 4 | appVersion: 10.16.3 5 | description: Event-driven I/O server-side JavaScript environment based on V8 6 | keywords: 7 | - node 8 | - javascript 9 | - nodejs 10 | - git 11 | home: http://nodejs.org/ 12 | sources: 13 | - https://github.com/bitnami/bitnami-docker-node 14 | maintainers: 15 | - name: Bitnami 16 | email: containers@bitnami.com 17 | engine: gotpl 18 | icon: https://bitnami.com/assets/stacks/nodejs/img/nodejs-stack-110x117.png 19 | -------------------------------------------------------------------------------- /chapter7/charts/todo-prod/charts/mongodb-7.2.10.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/k8sdevopscookbook/src/3631ebffe7a71e5df5bbae546538ed2128daa5ac/chapter7/charts/todo-prod/charts/mongodb-7.2.10.tgz -------------------------------------------------------------------------------- /chapter7/charts/todo-prod/requirements.lock: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - name: mongodb 3 | repository: https://kubernetes-charts.storage.googleapis.com/ 4 | version: 7.2.10 5 | digest: sha256:97b996c785336242f22d81d40e05e4a1ed92969e6052125b4138c87ab7e5ac44 6 | generated: "2019-10-02T23:46:10.841330443Z" 7 | -------------------------------------------------------------------------------- /chapter7/charts/todo-prod/requirements.yaml: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - name: mongodb 3 | repository: https://kubernetes-charts.storage.googleapis.com/ 4 | version: 7.x.x 5 | condition: mongodb.install 6 | -------------------------------------------------------------------------------- /chapter7/charts/todo-prod/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | 2 | 1. Get the URL of your Node app by running: 3 | 4 | {{- if contains "NodePort" .Values.service.type }} 5 | 6 | export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "node.fullname" . }}) 7 | export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") 8 | echo "Node app URL: http://$NODE_IP:$NODE_PORT/" 9 | 10 | {{- else if contains "LoadBalancer" .Values.service.type }} 11 | 12 | NOTE: It may take a few minutes for the LoadBalancer IP to be available. 13 | Watch the status with: 'kubectl get svc -w {{ template "node.fullname" . }} --namespace {{ .Release.Namespace }}' 14 | 15 | export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "node.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") 16 | echo "Node app URL: http://$SERVICE_IP/" 17 | 18 | {{- else if contains "ClusterIP" .Values.service.type }} 19 | 20 | kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "node.fullname" . }} {{ .Values.service.port }}:{{ .Values.service.port }} 21 | echo "Node app URL: http://127.0.0.1:{{ .Values.service.port }}/" 22 | 23 | {{- end }} 24 | 25 | {{ include "node.checkRollingTags" . }} 26 | -------------------------------------------------------------------------------- /chapter7/charts/todo-prod/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: {{ template "node.fullname" . }} 5 | labels: 6 | app: {{ template "node.name" . }} 7 | chart: {{ template "node.chart" . }} 8 | release: "{{ .Release.Name }}" 9 | heritage: "{{ .Release.Service }}" 10 | spec: 11 | selector: 12 | matchLabels: 13 | app: {{ template "node.name" . }} 14 | release: "{{ .Release.Name }}" 15 | replicas: {{ .Values.replicas }} 16 | template: 17 | metadata: 18 | labels: 19 | app: {{ template "node.name" . }} 20 | chart: {{ template "node.chart" . }} 21 | release: {{ .Release.Name | quote }} 22 | heritage: "{{ .Release.Service }}" 23 | spec: 24 | {{- if .Values.securityContext.enabled }} 25 | securityContext: 26 | fsGroup: {{ .Values.securityContext.fsGroup }} 27 | runAsUser: {{ .Values.securityContext.runAsUser }} 28 | {{- end }} 29 | {{- include "node.imagePullSecrets" . | indent 6 }} 30 | initContainers: 31 | - name: git-clone-repository 32 | image: "{{ template "git.image" . }}" 33 | imagePullPolicy: {{ .Values.git.pullPolicy | quote }} 34 | command: [ '/bin/sh', '-c' , 'git clone {{ .Values.repository }} /app && cd /app && git checkout {{ .Values.revision }}'] 35 | volumeMounts: 36 | - name: app 37 | mountPath: /app 38 | - name: npm-install 39 | image: "{{ template "node.image" . }}" 40 | imagePullPolicy: {{ .Values.image.pullPolicy | quote }} 41 | workingDir: /app 42 | command: ['/bin/bash', '-c', 'npm install'] 43 | env: 44 | - name: HOME 45 | value: /tmp 46 | volumeMounts: 47 | - name: app 48 | mountPath: /app 49 | {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} 50 | - name: volume-permissions 51 | image: "{{ template "node.volumePermissions.image" . }}" 52 | imagePullPolicy: {{ default "" .Values.volumePermissions.image.pullPolicy | quote }} 53 | command: ["chown", "-R", "{{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }}", "{{ .Values.persistence.path }}"] 54 | securityContext: 55 | runAsUser: 0 56 | resources: {{ toYaml .Values.volumePermissions.resources | nindent 10 }} 57 | volumeMounts: 58 | - name: data 59 | mountPath: {{ .Values.persistence.path }} 60 | {{- end }} 61 | containers: 62 | - name: {{ template "node.fullname" . }} 63 | image: "{{ template "node.image" . }}" 64 | imagePullPolicy: {{ .Values.image.pullPolicy | quote }} 65 | env: 66 | {{- if .Values.mongodb.install }} 67 | - name: DATABASE_HOST 68 | value: {{ template "node.mongodb.fullname" . }} 69 | - name: DATABASE_PORT 70 | value: "27017" 71 | - name: DATABASE_USER 72 | value: {{ .Values.mongodb.mongodbUsername | quote }} 73 | - name: DATABASE_PASSWORD 74 | valueFrom: 75 | secretKeyRef: 76 | name: {{ template "node.mongodb.fullname" . }} 77 | key: mongodb-password 78 | - name: DATABASE_NAME 79 | value: {{ .Values.mongodb.mongodbDatabase | quote }} 80 | - name: DATABASE_CONNECTION_OPTIONS 81 | value: "" 82 | {{ else }} 83 | {{- $type := dict "type" .Values.externaldb.type }} 84 | - name: DATABASE_HOST 85 | valueFrom: 86 | secretKeyRef: 87 | name: {{ template "node.secretName" . }} 88 | key: {{ template "externaldb.host" $type }} 89 | {{- if not .Values.externaldb.broker.serviceInstanceName }} 90 | - name: DATABASE_NAME 91 | valueFrom: 92 | secretKeyRef: 93 | name: {{ template "node.secretName" . }} 94 | key: database 95 | {{ else }} 96 | - name: DATABASE_NAME 97 | value: "" 98 | {{- end }} 99 | - name: DATABASE_PORT 100 | valueFrom: 101 | secretKeyRef: 102 | name: {{ template "node.secretName" . }} 103 | key: {{ template "externaldb.port" $type }} 104 | - name: DATABASE_USER 105 | valueFrom: 106 | secretKeyRef: 107 | name: {{ template "node.secretName" . }} 108 | key: {{ template "externaldb.username" $type }} 109 | - name: DATABASE_PASSWORD 110 | valueFrom: 111 | secretKeyRef: 112 | name: {{ template "node.secretName" . }} 113 | key: {{ template "externaldb.password" $type }} 114 | {{- if .Values.externaldb.ssl }} 115 | - name: DATABASE_CONNECTION_OPTIONS 116 | value: "ssl=true" 117 | {{ else }} 118 | - name: DATABASE_CONNECTION_OPTIONS 119 | value: "" 120 | {{- end }} 121 | {{- end }} 122 | - name: DATA_FOLDER 123 | value: "/app" 124 | {{- if .Values.extraEnv }} 125 | {{ toYaml .Values.extraEnv | indent 8 }} 126 | {{- end }} 127 | workingDir: /app 128 | command: ['/bin/bash', '-c', 'npm start'] 129 | ports: 130 | - name: http 131 | containerPort: {{ .Values.applicationPort }} 132 | livenessProbe: 133 | httpGet: 134 | path: / 135 | port: http 136 | initialDelaySeconds: 60 137 | timeoutSeconds: 5 138 | failureThreshold: 6 139 | readinessProbe: 140 | httpGet: 141 | path: / 142 | port: http 143 | initialDelaySeconds: 10 144 | timeoutSeconds: 3 145 | periodSeconds: 5 146 | resources: 147 | {{ toYaml .Values.resources | indent 10 }} 148 | volumeMounts: 149 | - name: app 150 | mountPath: /app 151 | - name: data 152 | mountPath: {{ .Values.persistence.path }} 153 | volumes: 154 | - name: app 155 | emptyDir: {} 156 | - name: data 157 | {{- if .Values.persistence.enabled }} 158 | persistentVolumeClaim: 159 | claimName: {{ template "node.fullname" . }} 160 | {{- else }} 161 | emptyDir: {} 162 | {{- end }} 163 | {{- with .Values.affinity }} 164 | affinity: 165 | {{ toYaml . | indent 8 }} 166 | {{- end }} 167 | -------------------------------------------------------------------------------- /chapter7/charts/todo-prod/templates/ingress.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.ingress.enabled }} 2 | {{- range .Values.ingress.hosts }} 3 | apiVersion: extensions/v1beta1 4 | kind: Ingress 5 | metadata: 6 | name: {{ template "node.fullname" $ }} 7 | labels: 8 | app: {{ template "node.name" $ }} 9 | chart: "{{ $.Chart.Name }}-{{ $.Chart.Version }}" 10 | release: "{{ $.Release.Name }}" 11 | heritage: "{{ $.Release.Service }}" 12 | annotations: 13 | {{- if .certManager }} 14 | kubernetes.io/tls-acme: "true" 15 | {{- end }} 16 | {{- range $key, $value := .annotations }} 17 | {{ $key }}: {{ $value | quote }} 18 | {{- end }} 19 | spec: 20 | rules: 21 | - host: {{ .name }} 22 | http: 23 | paths: 24 | - path: {{ default "/" .path }} 25 | backend: 26 | serviceName: {{ template "node.fullname" $ }} 27 | servicePort: 80 28 | {{- if .tls }} 29 | tls: 30 | - hosts: 31 | - {{ .name }} 32 | secretName: {{ .tlsSecret }} 33 | {{- end }} 34 | --- 35 | {{- end }} 36 | {{- end }} 37 | 38 | -------------------------------------------------------------------------------- /chapter7/charts/todo-prod/templates/mongodb-binding.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.externaldb.broker.serviceInstanceName }} 2 | apiVersion: servicecatalog.k8s.io/v1beta1 3 | kind: ServiceBinding 4 | metadata: 5 | name: {{ template "node.mongodb.fullname" . }}-binding 6 | labels: 7 | app: {{ template "node.name" . }} 8 | chart: {{ template "node.chart" . }} 9 | release: "{{ .Release.Name }}" 10 | heritage: "{{ .Release.Service }}" 11 | spec: 12 | instanceRef: 13 | name: {{ .Values.externaldb.broker.serviceInstanceName }} 14 | secretName: {{ template "node.secretName" . }} 15 | {{- end }} 16 | -------------------------------------------------------------------------------- /chapter7/charts/todo-prod/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.persistence.enabled }} 2 | kind: PersistentVolumeClaim 3 | apiVersion: v1 4 | metadata: 5 | name: {{ template "node.fullname" . }} 6 | labels: 7 | app: {{ template "node.name" . }} 8 | chart: {{ template "node.chart" . }} 9 | release: "{{ .Release.Name }}" 10 | heritage: "{{ .Release.Service }}" 11 | annotations: 12 | volume.alpha.kubernetes.io/storage-class: {{ ternary "default" (include "node.storageClass" .) (empty (include "node.storageClass" .)) }} 13 | spec: 14 | accessModes: 15 | - {{ .Values.persistence.accessMode | quote }} 16 | resources: 17 | requests: 18 | storage: {{ .Values.persistence.size | quote }} 19 | {{ include "node.storageClass" . }} 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /chapter7/charts/todo-prod/templates/svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ template "node.fullname" . }} 5 | labels: 6 | app: {{ template "node.name" . }} 7 | chart: {{ template "node.chart" . }} 8 | release: "{{ .Release.Name }}" 9 | heritage: "{{ .Release.Service }}" 10 | annotations: 11 | {{- if .Values.service.annotations }} 12 | {{ toYaml .Values.service.annotations | indent 4 }} 13 | {{- end }} 14 | spec: 15 | type: {{ .Values.service.type }} 16 | {{- if and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerIP }} 17 | loadBalancerIP: {{ .Values.service.loadBalancerIP }} 18 | {{- end }} 19 | ports: 20 | - name: http 21 | port: {{ .Values.service.port }} 22 | targetPort: http 23 | {{- if .Values.service.nodePort }} 24 | nodePort: {{ .Values.service.nodePort }} 25 | {{- end }} 26 | selector: 27 | app: {{ template "node.name" . }} 28 | release: "{{ .Release.Name }}" 29 | -------------------------------------------------------------------------------- /chapter7/hpa-my-ch7-app.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: autoscaling/v1 2 | kind: HorizontalPodAutoscaler 3 | metadata: 4 | name: my-ch7-app-node 5 | namespace: default 6 | spec: 7 | scaleTargetRef: 8 | apiVersion: apps/v1 9 | kind: Deployment 10 | name: my-ch7-app-node 11 | minReplicas: 1 12 | maxReplicas: 10 13 | targetCPUUtilizationPercentage: 50 14 | -------------------------------------------------------------------------------- /chapter7/hpav2-my-ch7-app.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: autoscaling/v2beta2 2 | kind: HorizontalPodAutoscaler 3 | metadata: 4 | name: my-ch7-app-node 5 | namespace: default 6 | spec: 7 | maxReplicas: 5 8 | metrics: 9 | - resource: 10 | name: cpu 11 | target: 12 | averageUtilization: 50 13 | type: Utilization 14 | type: Resource 15 | minReplicas: 1 16 | scaleTargetRef: 17 | apiVersion: extensions/v1beta1 18 | kind: Deployment 19 | name: my-ch7-app-node 20 | -------------------------------------------------------------------------------- /chapter7/lb/minio.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: minio 5 | labels: 6 | app: minio 7 | spec: 8 | clusterIP: None 9 | ports: 10 | - port: 9000 11 | name: minio 12 | selector: 13 | app: minio 14 | --- 15 | apiVersion: apps/v1beta1 16 | kind: StatefulSet 17 | metadata: 18 | name: minio 19 | spec: 20 | serviceName: minio 21 | replicas: 4 22 | template: 23 | metadata: 24 | labels: 25 | app: minio 26 | spec: 27 | containers: 28 | - name: minio 29 | env: 30 | - name: MINIO_ACCESS_KEY 31 | value: "minio" 32 | - name: MINIO_SECRET_KEY 33 | value: "minio123" 34 | image: minio/minio 35 | args: 36 | - server 37 | - http://minio-{0...3}.minio.default.svc.cluster.local/data 38 | ports: 39 | - containerPort: 9000 40 | # These volume mounts are persistent. Each pod in the PetSet 41 | # gets a volume mounted based on this field. 42 | volumeMounts: 43 | - name: data 44 | mountPath: /data 45 | # These are converted to volume claims by the controller 46 | # and mounted at the paths mentioned above. 47 | volumeClaimTemplates: 48 | - metadata: 49 | name: data 50 | spec: 51 | accessModes: 52 | - ReadWriteOnce 53 | resources: 54 | requests: 55 | storage: 10G 56 | # Uncomment and add storageClass specific to your requirements below. Read more https://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1 57 | #storageClassName: 58 | -------------------------------------------------------------------------------- /chapter7/lb/svc-minio.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: minio-service 5 | spec: 6 | type: LoadBalancer 7 | ports: 8 | - port: 9000 9 | targetPort: 9000 10 | protocol: TCP 11 | selector: 12 | app: minio 13 | -------------------------------------------------------------------------------- /chapter7/linkerd/emojivoto.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: emojivoto 6 | --- 7 | kind: ServiceAccount 8 | apiVersion: v1 9 | metadata: 10 | name: emoji 11 | namespace: emojivoto 12 | --- 13 | kind: ServiceAccount 14 | apiVersion: v1 15 | metadata: 16 | name: voting 17 | namespace: emojivoto 18 | --- 19 | kind: ServiceAccount 20 | apiVersion: v1 21 | metadata: 22 | name: web 23 | namespace: emojivoto 24 | --- 25 | apiVersion: apps/v1 26 | kind: Deployment 27 | metadata: 28 | creationTimestamp: null 29 | name: emoji 30 | namespace: emojivoto 31 | spec: 32 | replicas: 1 33 | selector: 34 | matchLabels: 35 | app: emoji-svc 36 | strategy: {} 37 | template: 38 | metadata: 39 | creationTimestamp: null 40 | labels: 41 | app: emoji-svc 42 | spec: 43 | serviceAccountName: emoji 44 | containers: 45 | - env: 46 | - name: GRPC_PORT 47 | value: "8080" 48 | image: buoyantio/emojivoto-emoji-svc:v8 49 | name: emoji-svc 50 | ports: 51 | - containerPort: 8080 52 | name: grpc 53 | resources: 54 | requests: 55 | cpu: 100m 56 | status: {} 57 | --- 58 | apiVersion: v1 59 | kind: Service 60 | metadata: 61 | name: emoji-svc 62 | namespace: emojivoto 63 | spec: 64 | selector: 65 | app: emoji-svc 66 | clusterIP: None 67 | ports: 68 | - name: grpc 69 | port: 8080 70 | targetPort: 8080 71 | --- 72 | apiVersion: apps/v1 73 | kind: Deployment 74 | metadata: 75 | creationTimestamp: null 76 | name: voting 77 | namespace: emojivoto 78 | spec: 79 | replicas: 1 80 | selector: 81 | matchLabels: 82 | app: voting-svc 83 | strategy: {} 84 | template: 85 | metadata: 86 | creationTimestamp: null 87 | labels: 88 | app: voting-svc 89 | spec: 90 | serviceAccountName: voting 91 | containers: 92 | - env: 93 | - name: GRPC_PORT 94 | value: "8080" 95 | image: buoyantio/emojivoto-voting-svc:v8 96 | name: voting-svc 97 | ports: 98 | - containerPort: 8080 99 | name: grpc 100 | resources: 101 | requests: 102 | cpu: 100m 103 | status: {} 104 | --- 105 | apiVersion: v1 106 | kind: Service 107 | metadata: 108 | name: voting-svc 109 | namespace: emojivoto 110 | spec: 111 | selector: 112 | app: voting-svc 113 | clusterIP: None 114 | ports: 115 | - name: grpc 116 | port: 8080 117 | targetPort: 8080 118 | --- 119 | apiVersion: apps/v1 120 | kind: Deployment 121 | metadata: 122 | creationTimestamp: null 123 | name: web 124 | namespace: emojivoto 125 | spec: 126 | replicas: 1 127 | selector: 128 | matchLabels: 129 | app: web-svc 130 | strategy: {} 131 | template: 132 | metadata: 133 | creationTimestamp: null 134 | labels: 135 | app: web-svc 136 | spec: 137 | serviceAccountName: web 138 | containers: 139 | - env: 140 | - name: WEB_PORT 141 | value: "8080" 142 | - name: EMOJISVC_HOST 143 | value: emoji-svc.emojivoto:8080 144 | - name: VOTINGSVC_HOST 145 | value: voting-svc.emojivoto:8080 146 | - name: INDEX_BUNDLE 147 | value: dist/index_bundle.js 148 | image: buoyantio/emojivoto-web:v8 149 | name: web-svc 150 | ports: 151 | - containerPort: 8080 152 | name: http 153 | resources: 154 | requests: 155 | cpu: 100m 156 | status: {} 157 | --- 158 | apiVersion: v1 159 | kind: Service 160 | metadata: 161 | name: web-svc 162 | namespace: emojivoto 163 | spec: 164 | type: LoadBalancer 165 | selector: 166 | app: web-svc 167 | ports: 168 | - name: http 169 | port: 80 170 | targetPort: 8080 171 | --- 172 | apiVersion: apps/v1 173 | kind: Deployment 174 | metadata: 175 | creationTimestamp: null 176 | name: vote-bot 177 | namespace: emojivoto 178 | spec: 179 | replicas: 1 180 | selector: 181 | matchLabels: 182 | app: vote-bot 183 | strategy: {} 184 | template: 185 | metadata: 186 | creationTimestamp: null 187 | labels: 188 | app: vote-bot 189 | spec: 190 | containers: 191 | - command: 192 | - emojivoto-vote-bot 193 | env: 194 | - name: WEB_HOST 195 | value: web-svc.emojivoto:80 196 | image: buoyantio/emojivoto-web:v8 197 | name: vote-bot 198 | resources: 199 | requests: 200 | cpu: 10m 201 | status: {} 202 | --- 203 | -------------------------------------------------------------------------------- /chapter7/linkerd/ingress-nginx.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | type: Opaque 4 | metadata: 5 | name: web-ingress-auth 6 | namespace: linkerd 7 | data: 8 | auth: YWRtaW46JGFwcjEkbjdDdTZnSGwkRTQ3b2dmN0NPOE5SWWpFakJPa1dNLgoK 9 | --- 10 | apiVersion: extensions/v1beta1 11 | kind: Ingress 12 | metadata: 13 | name: web-ingress 14 | namespace: linkerd 15 | annotations: 16 | kubernetes.io/ingress.class: "nginx" 17 | nginx.ingress.kubernetes.io/configuration-snippet: | 18 | proxy_set_header l5d-dst-override $service_name.$namespace.svc.cluster.local:8084; 19 | proxy_set_header Origin ""; 20 | proxy_hide_header l5d-remote-ip; 21 | proxy_hide_header l5d-server-id; 22 | nginx.ingress.kubernetes.io/auth-type: basic 23 | nginx.ingress.kubernetes.io/auth-secret: web-ingress-auth 24 | nginx.ingress.kubernetes.io/auth-realm: "Authentication Required" 25 | spec: 26 | rules: 27 | - host: dashboard.example.com 28 | http: 29 | paths: 30 | - backend: 31 | serviceName: linkerd-web 32 | servicePort: 8084 33 | -------------------------------------------------------------------------------- /chapter8/cloudwatch/cwagent-configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: cwagentconfig 5 | namespace: amazon-cloudwatch 6 | data: 7 | cwagentconfig.json: | 8 | { 9 | "logs": { 10 | "metrics_collected": { 11 | "kubernetes": { 12 | "cluster_name": "{{cluster_name}}", 13 | "metrics_collection_interval": 60 14 | } 15 | }, 16 | "force_flush_interval": 5 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /chapter8/cloudwatch/cwagent-serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | # create cwagent service account and role binding 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: cloudwatch-agent 6 | namespace: amazon-cloudwatch 7 | 8 | --- 9 | kind: ClusterRole 10 | apiVersion: rbac.authorization.k8s.io/v1 11 | metadata: 12 | name: cloudwatch-agent-role 13 | rules: 14 | - apiGroups: [""] 15 | resources: ["pods", "nodes", "endpoints"] 16 | verbs: ["list", "watch"] 17 | - apiGroups: ["apps"] 18 | resources: ["replicasets"] 19 | verbs: ["list", "watch"] 20 | - apiGroups: ["batch"] 21 | resources: ["jobs"] 22 | verbs: ["list", "watch"] 23 | - apiGroups: [""] 24 | resources: ["nodes/proxy"] 25 | verbs: ["get"] 26 | - apiGroups: [""] 27 | resources: ["nodes/stats", "configmaps", "events"] 28 | verbs: ["create"] 29 | - apiGroups: [""] 30 | resources: ["configmaps"] 31 | resourceNames: ["cwagent-clusterleader"] 32 | verbs: ["get","update"] 33 | 34 | --- 35 | kind: ClusterRoleBinding 36 | apiVersion: rbac.authorization.k8s.io/v1 37 | metadata: 38 | name: cloudwatch-agent-role-binding 39 | subjects: 40 | - kind: ServiceAccount 41 | name: cloudwatch-agent 42 | namespace: amazon-cloudwatch 43 | roleRef: 44 | kind: ClusterRole 45 | name: cloudwatch-agent-role 46 | apiGroup: rbac.authorization.k8s.io 47 | -------------------------------------------------------------------------------- /chapter8/cloudwatch/cwagent.yaml: -------------------------------------------------------------------------------- 1 | # deploy cwagent as daemonset 2 | apiVersion: apps/v1 3 | kind: DaemonSet 4 | metadata: 5 | name: cloudwatch-agent 6 | namespace: amazon-cloudwatch 7 | spec: 8 | selector: 9 | matchLabels: 10 | name: cloudwatch-agent 11 | template: 12 | metadata: 13 | labels: 14 | name: cloudwatch-agent 15 | spec: 16 | containers: 17 | - name: cloudwatch-agent 18 | image: amazon/cloudwatch-agent:1.226589.0 19 | ports: 20 | - containerPort: 8125 21 | hostPort: 8125 22 | protocol: UDP 23 | resources: 24 | limits: 25 | cpu: 200m 26 | memory: 200Mi 27 | requests: 28 | cpu: 200m 29 | memory: 200Mi 30 | # Please don't change below envs 31 | env: 32 | - name: HOST_IP 33 | valueFrom: 34 | fieldRef: 35 | fieldPath: status.hostIP 36 | - name: HOST_NAME 37 | valueFrom: 38 | fieldRef: 39 | fieldPath: spec.nodeName 40 | - name: K8S_NAMESPACE 41 | valueFrom: 42 | fieldRef: 43 | fieldPath: metadata.namespace 44 | - name: CI_VERSION 45 | value: "k8s/1.0.0" 46 | # Please don't change the mountPath 47 | volumeMounts: 48 | - name: cwagentconfig 49 | mountPath: /etc/cwagentconfig 50 | - name: rootfs 51 | mountPath: /rootfs 52 | readOnly: true 53 | - name: dockersock 54 | mountPath: /var/run/docker.sock 55 | readOnly: true 56 | - name: varlibdocker 57 | mountPath: /var/lib/docker 58 | readOnly: true 59 | - name: sys 60 | mountPath: /sys 61 | readOnly: true 62 | - name: devdisk 63 | mountPath: /dev/disk 64 | readOnly: true 65 | volumes: 66 | - name: cwagentconfig 67 | configMap: 68 | name: cwagentconfig 69 | - name: rootfs 70 | hostPath: 71 | path: / 72 | - name: dockersock 73 | hostPath: 74 | path: /var/run/docker.sock 75 | - name: varlibdocker 76 | hostPath: 77 | path: /var/lib/docker 78 | - name: sys 79 | hostPath: 80 | path: /sys 81 | - name: devdisk 82 | hostPath: 83 | path: /dev/disk/ 84 | terminationGracePeriodSeconds: 60 85 | serviceAccountName: cloudwatch-agent 86 | -------------------------------------------------------------------------------- /chapter8/debug/minio-liveness.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: minio 5 | labels: 6 | app: minio 7 | spec: 8 | clusterIP: None 9 | ports: 10 | - port: 9000 11 | name: minio 12 | selector: 13 | app: minio 14 | --- 15 | apiVersion: apps/v1beta1 16 | kind: StatefulSet 17 | metadata: 18 | name: minio 19 | spec: 20 | serviceName: minio 21 | replicas: 4 22 | template: 23 | metadata: 24 | labels: 25 | app: minio 26 | spec: 27 | containers: 28 | - name: minio 29 | env: 30 | - name: MINIO_ACCESS_KEY 31 | value: "minio" 32 | - name: MINIO_SECRET_KEY 33 | value: "minio123" 34 | image: minio/minio 35 | args: 36 | - server 37 | - http://minio-{0...3}.minio.default.svc.cluster.local/data 38 | ports: 39 | - containerPort: 9000 40 | # These volume mounts are persistent. Each pod in the PetSet 41 | # gets a volume mounted based on this field. 42 | volumeMounts: 43 | - name: data 44 | mountPath: /data 45 | livenessProbe: 46 | httpGet: 47 | path: /minio/health/live 48 | port: 9000 49 | initialDelaySeconds: 10 50 | periodSeconds: 5 51 | # These are converted to volume claims by the controller 52 | # and mounted at the paths mentioned above. 53 | volumeClaimTemplates: 54 | - metadata: 55 | name: data 56 | spec: 57 | accessModes: 58 | - ReadWriteOnce 59 | resources: 60 | requests: 61 | storage: 10G 62 | # Uncomment and add storageClass specific to your requirements below. Read more https://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1 63 | storageClassName: gp2 64 | --- 65 | apiVersion: v1 66 | kind: Service 67 | metadata: 68 | name: minio-service 69 | spec: 70 | type: LoadBalancer 71 | ports: 72 | - port: 9000 73 | targetPort: 9000 74 | protocol: TCP 75 | selector: 76 | app: minio 77 | -------------------------------------------------------------------------------- /chapter8/debug/mongo-image.yaml: -------------------------------------------------------------------------------- 1 | # Headless service for stable DNS entries of StatefulSet members. 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: mongo 6 | labels: 7 | app: mongo 8 | spec: 9 | ports: 10 | - port: 27017 11 | targetPort: 27017 12 | clusterIP: None 13 | selector: 14 | role: mongo 15 | --- 16 | apiVersion: apps/v1beta1 17 | kind: StatefulSet 18 | metadata: 19 | name: mongo 20 | labels: 21 | app: mongo 22 | spec: 23 | serviceName: "mongo" 24 | replicas: 3 25 | template: 26 | metadata: 27 | labels: 28 | app: mongo 29 | role: mongo 30 | environment: test 31 | spec: 32 | terminationGracePeriodSeconds: 10 33 | containers: 34 | - name: mongo 35 | image: mongi 36 | command: 37 | # - mongod 38 | # - "--replSet" 39 | # - rs0 40 | # - "--smallfiles" 41 | # - "--noprealloc" 42 | # - "--bind_ip_all" 43 | ports: 44 | - containerPort: 27017 45 | volumeMounts: 46 | - name: mongo-pvc 47 | mountPath: /data/db 48 | - name: mongo-sidecar 49 | image: cvallance/mongo-k8s-sidecar 50 | env: 51 | - name: MONGO_SIDECAR_POD_LABELS 52 | value: "role=mongo,environment=test" 53 | volumeClaimTemplates: 54 | - metadata: 55 | name: mongo-pvc 56 | spec: 57 | storageClassName: storageclass 58 | accessModes: 59 | - ReadWriteOnce 60 | resources: 61 | requests: 62 | storage: 5G 63 | -------------------------------------------------------------------------------- /chapter8/debug/mongo-sc.yaml: -------------------------------------------------------------------------------- 1 | # Headless service for stable DNS entries of StatefulSet members. 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: mongo 6 | labels: 7 | app: mongo 8 | spec: 9 | ports: 10 | - port: 27017 11 | targetPort: 27017 12 | clusterIP: None 13 | selector: 14 | role: mongo 15 | --- 16 | apiVersion: apps/v1beta1 17 | kind: StatefulSet 18 | metadata: 19 | name: mongo 20 | labels: 21 | app: mongo 22 | spec: 23 | serviceName: "mongo" 24 | replicas: 3 25 | template: 26 | metadata: 27 | labels: 28 | app: mongo 29 | role: mongo 30 | environment: test 31 | spec: 32 | terminationGracePeriodSeconds: 10 33 | containers: 34 | - name: mongo 35 | image: mongo 36 | command: 37 | # - mongod 38 | # - "--replSet" 39 | # - rs0 40 | # - "--smallfiles" 41 | # - "--noprealloc" 42 | # - "--bind_ip_all" 43 | ports: 44 | - containerPort: 27017 45 | volumeMounts: 46 | - name: mongo-pvc 47 | mountPath: /data/db 48 | - name: mongo-sidecar 49 | image: cvallance/mongo-k8s-sidecar 50 | env: 51 | - name: MONGO_SIDECAR_POD_LABELS 52 | value: "role=mongo,environment=test" 53 | volumeClaimTemplates: 54 | - metadata: 55 | name: mongo-pvc 56 | spec: 57 | storageClassName: storageclass 58 | accessModes: 59 | - ReadWriteOnce 60 | resources: 61 | requests: 62 | storage: 5G 63 | -------------------------------------------------------------------------------- /chapter8/debug/node-problem-detector.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: node-problem-detector-v0.1 5 | namespace: kube-system 6 | labels: 7 | k8s-app: node-problem-detector 8 | version: v0.1 9 | kubernetes.io/cluster-service: "true" 10 | spec: 11 | selector: 12 | matchLabels: 13 | k8s-app: node-problem-detector 14 | version: v0.1 15 | kubernetes.io/cluster-service: "true" 16 | template: 17 | metadata: 18 | labels: 19 | k8s-app: node-problem-detector 20 | version: v0.1 21 | kubernetes.io/cluster-service: "true" 22 | spec: 23 | hostNetwork: true 24 | containers: 25 | - name: node-problem-detector 26 | image: k8s.gcr.io/node-problem-detector:v0.1 27 | securityContext: 28 | privileged: true 29 | resources: 30 | limits: 31 | cpu: "200m" 32 | memory: "100Mi" 33 | requests: 34 | cpu: "20m" 35 | memory: "20Mi" 36 | volumeMounts: 37 | - name: log 38 | mountPath: /log 39 | readOnly: true 40 | volumes: 41 | - name: log 42 | hostPath: 43 | path: /var/log/ 44 | -------------------------------------------------------------------------------- /chapter8/debug/sc-gp2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | metadata: 4 | name: storageclass 5 | parameters: 6 | type: gp2 7 | provisioner: kubernetes.io/aws-ebs 8 | reclaimPolicy: Delete 9 | volumeBindingMode: Immediate 10 | -------------------------------------------------------------------------------- /chapter8/debug/termination-image.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: termination-demo 5 | spec: 6 | containers: 7 | - name: termination-demo-container 8 | image: debiann 9 | command: ["/bin/sh"] 10 | args: ["-c", "sleep 10 && echo Sleep expired > /dev/termination-log"] 11 | -------------------------------------------------------------------------------- /chapter8/debug/termination.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: termination-demo 5 | spec: 6 | containers: 7 | - name: termination-demo-container 8 | image: debiann 9 | command: ["/bin/sh"] 10 | args: ["-c", "sleep 10 && echo Sleep expired > /dev/termination-log"] 11 | -------------------------------------------------------------------------------- /chapter8/prometheus/custom-values.yaml: -------------------------------------------------------------------------------- 1 | # Define persistent storage for Prometheus (PVC) 2 | prometheus: 3 | prometheusSpec: 4 | storageSpec: 5 | volumeClaimTemplate: 6 | spec: 7 | accessModes: ["ReadWriteOnce"] 8 | storageClassName: gp2 9 | resources: 10 | requests: 11 | storage: 5Gi 12 | 13 | # Define persistent storage for Grafana (PVC) 14 | grafana: 15 | # Set password for Grafana admin user 16 | adminPassword: your_admin_password 17 | persistence: 18 | enabled: true 19 | storageClassName: gp2 20 | accessModes: ["ReadWriteOnce"] 21 | size: 5Gi 22 | 23 | # Define persistent storage for Alertmanager (PVC) 24 | alertmanager: 25 | alertmanagerSpec: 26 | storage: 27 | volumeClaimTemplate: 28 | spec: 29 | accessModes: ["ReadWriteOnce"] 30 | storageClassName: gp2 31 | resources: 32 | requests: 33 | storage: 5Gi 34 | 35 | # Change default node-exporter port 36 | prometheus-node-exporter: 37 | service: 38 | port: 30206 39 | targetPort: 30206 40 | 41 | # Disable Etcd metrics 42 | kubeEtcd: 43 | enabled: false 44 | 45 | # Disable Controller metrics 46 | kubeControllerManager: 47 | enabled: false 48 | 49 | # Disable Scheduler metrics 50 | kubeScheduler: 51 | enabled: false 52 | -------------------------------------------------------------------------------- /chapter9/cis/job-eks.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: kube-bench 5 | spec: 6 | template: 7 | spec: 8 | hostPID: true 9 | containers: 10 | - name: kube-bench 11 | # Push the image to your ECR and then refer to it here 12 | image: 13 | command: ["kube-bench", "--version", "1.11"] 14 | volumeMounts: 15 | - name: var-lib-kubelet 16 | mountPath: /var/lib/kubelet 17 | - name: etc-systemd 18 | mountPath: /etc/systemd 19 | - name: etc-kubernetes 20 | mountPath: /etc/kubernetes 21 | restartPolicy: Never 22 | volumes: 23 | - name: var-lib-kubelet 24 | hostPath: 25 | path: "/var/lib/kubelet" 26 | - name: etc-systemd 27 | hostPath: 28 | path: "/etc/systemd" 29 | - name: etc-kubernetes 30 | hostPath: 31 | path: "/etc/kubernetes" 32 | 33 | -------------------------------------------------------------------------------- /chapter9/cis/job-iks.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: kube-bench 5 | spec: 6 | template: 7 | spec: 8 | hostPID: true 9 | containers: 10 | - name: kube-bench 11 | image: aquasec/kube-bench:latest 12 | command: ["kube-bench", "--version", "1.13", "node"] 13 | volumeMounts: 14 | - name: var-lib-kubelet 15 | mountPath: /var/lib/kubelet 16 | - name: etc-systemd 17 | mountPath: /etc/systemd 18 | - name: etc-kubernetes 19 | mountPath: /etc/kubernetes 20 | restartPolicy: Never 21 | volumes: 22 | - name: var-lib-kubelet 23 | hostPath: 24 | path: "/var/lib/kubelet" 25 | - name: etc-systemd 26 | hostPath: 27 | path: "/lib/systemd" 28 | - name: etc-kubernetes 29 | hostPath: 30 | path: "/etc/kubernetes" 31 | - name: usr-bin 32 | hostPath: 33 | path: "/usr/bin" 34 | -------------------------------------------------------------------------------- /chapter9/cis/job-master.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: kube-bench-master 5 | spec: 6 | template: 7 | spec: 8 | hostPID: true 9 | nodeSelector: 10 | node-role.kubernetes.io/master: "" 11 | tolerations: 12 | - key: node-role.kubernetes.io/master 13 | operator: Exists 14 | effect: NoSchedule 15 | containers: 16 | - name: kube-bench 17 | image: aquasec/kube-bench:latest 18 | command: ["kube-bench","master"] 19 | volumeMounts: 20 | - name: var-lib-etcd 21 | mountPath: /var/lib/etcd 22 | - name: etc-kubernetes 23 | mountPath: /etc/kubernetes 24 | # /usr/bin is mounted to access kubectl / kubelet, for auto-detecting the Kubernetes version. 25 | # You can omit this mount if you specify --version as part of the command. 26 | - name: usr-bin 27 | mountPath: /usr/bin 28 | restartPolicy: Never 29 | volumes: 30 | - name: var-lib-etcd 31 | hostPath: 32 | path: "/var/lib/etcd" 33 | - name: etc-kubernetes 34 | hostPath: 35 | path: "/etc/kubernetes" 36 | - name: usr-bin 37 | hostPath: 38 | path: "/usr/bin" 39 | -------------------------------------------------------------------------------- /chapter9/cis/job-node.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: kube-bench-node 5 | spec: 6 | template: 7 | spec: 8 | hostPID: true 9 | containers: 10 | - name: kube-bench 11 | image: aquasec/kube-bench:latest 12 | command: ["kube-bench","node"] 13 | volumeMounts: 14 | - name: var-lib-kubelet 15 | mountPath: /var/lib/kubelet 16 | - name: etc-systemd 17 | mountPath: /etc/systemd 18 | - name: etc-kubernetes 19 | mountPath: /etc/kubernetes 20 | # /usr/bin is mounted to access kubectl / kubelet, for auto-detecting the Kubernetes version. 21 | # You can omit this mount if you specify --version as part of the command. 22 | - name: usr-bin 23 | mountPath: /usr/bin 24 | restartPolicy: Never 25 | volumes: 26 | - name: var-lib-kubelet 27 | hostPath: 28 | path: "/var/lib/kubelet" 29 | - name: etc-systemd 30 | hostPath: 31 | path: "/etc/systemd" 32 | - name: etc-kubernetes 33 | hostPath: 34 | path: "/etc/kubernetes" 35 | - name: usr-bin 36 | hostPath: 37 | path: "/usr/bin" 38 | -------------------------------------------------------------------------------- /chapter9/cis/job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: kube-bench 5 | spec: 6 | template: 7 | metadata: 8 | labels: 9 | app: kube-bench 10 | spec: 11 | hostPID: true 12 | containers: 13 | - name: kube-bench 14 | image: aquasec/kube-bench:latest 15 | command: ["kube-bench", "--version", "1.14"] 16 | volumeMounts: 17 | - name: var-lib-etcd 18 | mountPath: /var/lib/etcd 19 | - name: var-lib-kubelet 20 | mountPath: /var/lib/kubelet 21 | - name: etc-systemd 22 | mountPath: /etc/systemd 23 | - name: etc-kubernetes 24 | mountPath: /etc/kubernetes 25 | # /usr/bin is mounted to access kubectl / kubelet, for auto-detecting the Kubernetes version. 26 | # You can omit this mount if you specify --version as part of the command. 27 | - name: usr-bin 28 | mountPath: /usr/bin 29 | restartPolicy: Never 30 | volumes: 31 | - name: var-lib-etcd 32 | hostPath: 33 | path: "/var/lib/etcd" 34 | - name: var-lib-kubelet 35 | hostPath: 36 | path: "/var/lib/kubelet" 37 | - name: etc-systemd 38 | hostPath: 39 | path: "/etc/systemd" 40 | - name: etc-kubernetes 41 | hostPath: 42 | path: "/etc/kubernetes" 43 | - name: usr-bin 44 | hostPath: 45 | path: "/usr/bin" 46 | -------------------------------------------------------------------------------- /chapter9/devsecops/.circleci/config.yml: -------------------------------------------------------------------------------- 1 | jobs: 2 | build: 3 | docker: 4 | - image: docker:18.09-git 5 | steps: 6 | - checkout 7 | - setup_remote_docker 8 | - restore_cache: 9 | key: vulnerability-db 10 | - run: 11 | name: Build image 12 | command: docker build -t trivy-ci-test:${CIRCLE_SHA1} . 13 | - run: 14 | name: Install trivy 15 | command: | 16 | apk add --update curl 17 | VERSION=$( 18 | curl --silent "https://api.github.com/repos/aquasecurity/trivy/releases/latest" | \ 19 | grep '"tag_name":' | \ 20 | sed -E 's/.*"v([^"]+)".*/\1/' 21 | ) 22 | 23 | wget https://github.com/aquasecurity/trivy/releases/download/v${VERSION}/trivy_${VERSION}_Linux-64bit.tar.gz 24 | tar zxvf trivy_${VERSION}_Linux-64bit.tar.gz 25 | mv trivy /usr/local/bin 26 | - run: 27 | name: Scan the local image with trivy 28 | command: trivy --exit-code 0 --no-progress --auto-refresh trivy-ci-test:${CIRCLE_SHA1} 29 | - save_cache: 30 | key: vulnerability-db 31 | paths: 32 | - $HOME/.cache/trivy 33 | workflows: 34 | version: 2 35 | release: 36 | jobs: 37 | - build 38 | -------------------------------------------------------------------------------- /chapter9/devsecops/.gitlab-ci.yml: -------------------------------------------------------------------------------- 1 | stages: 2 | - vulTest 3 | 4 | trivy: 5 | stage: vulTest 6 | image: docker:stable-git 7 | before_script: 8 | - docker build -t trivy-ci-test:${CI_COMMIT_REF_NAME} . 9 | - export VERSION=$(curl --silent "https://api.github.com/repos/aquasecurity/trivy/releases/latest" | grep '"tag_name":' | sed -E 's/.*"v([^"]+)".*/\1/') 10 | - wget https://github.com/aquasecurity/trivy/releases/download/v${VERSION}/trivy_${VERSION}_Linux-64bit.tar.gz 11 | - tar zxvf trivy_${VERSION}_Linux-64bit.tar.gz 12 | variables: 13 | DOCKER_DRIVER: overlay2 14 | allow_failure: true 15 | services: 16 | - docker:stable-dind 17 | script: 18 | - ./trivy --exit-code 0 --severity HIGH --no-progress --auto-refresh trivy-ci-test:${CI_COMMIT_REF_NAME} 19 | - ./trivy --exit-code 1 --severity CRITICAL --no-progress --auto-refresh trivy-ci-test:${CI_COMMIT_REF_NAME} 20 | cache: 21 | directories: 22 | - $HOME/.cache/trivy 23 | -------------------------------------------------------------------------------- /chapter9/falco/client.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: client 5 | namespace: falcotest 6 | spec: 7 | containers: 8 | - args: 9 | - sh 10 | - -c 11 | - while true; do curl www.google.com; sleep 30; done 12 | image: tutum/curl 13 | name: client 14 | -------------------------------------------------------------------------------- /chapter9/falco/custom_rules.yaml: -------------------------------------------------------------------------------- 1 | customRules: 2 | falco_rules.local.yaml: |- 3 | - macro: ping_allowed_dirs 4 | condition: evt.arg[1] startswith /var/log/apache2 or evt.arg[1] startswith /var/lib/apache2 or evt.arg[1] startswith /dev/tty 5 | 6 | - rule: Unauthorized process 7 | desc: There is a running process not described in the base template 8 | condition: spawned_process and container and k8s.ns.name=ping and k8s.deployment.name=ping and not proc.name in (apache2, sh, ping) 9 | output: Unauthorized process (%proc.cmdline) running in (%container.id) 10 | priority: ERROR 11 | tags: [process] 12 | 13 | - rule: Apache writing to non allowed directory 14 | desc: Attempt to write to directories that should be immutable 15 | condition: open_write and container and k8s.ns.name=ping and k8s.deployment.name=ping and not (ping_allowed_dirs and proc.name in (apache2)) 16 | output: "Writing to forbidden directory (user=%user.name command=%proc.cmdline file=%fd.name)" 17 | priority: ERROR 18 | tags: [filesystem] 19 | 20 | - rule: Forbidden network outbound connection 21 | desc: A non-whitelisted process is trying to reach the Internet 22 | condition: outbound and container and k8s.ns.name=ping and k8s.deployment.name=ping and not proc.name in (ping, apache2) 23 | output: Forbidden outbound connection (user=%user.name command=%proc.cmdline connection=%fd.name) 24 | priority: ERROR 25 | tags: [network] 26 | -------------------------------------------------------------------------------- /chapter9/falco/dump.php: -------------------------------------------------------------------------------- 1 | "; 10 | while ($row = mysqli_fetch_assoc($re)) { 11 | var_dump($row); 12 | } 13 | echo ""; 14 | -------------------------------------------------------------------------------- /chapter9/falco/mysql.yaml: -------------------------------------------------------------------------------- 1 | kind: Deployment 2 | apiVersion: extensions/v1beta1 3 | metadata: 4 | name: mysql 5 | namespace: falcotest 6 | labels: 7 | name: mysql-deployment 8 | app: demo 9 | spec: 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | name: mysql 14 | role: mysqldb 15 | app: demo 16 | template: 17 | spec: 18 | containers: 19 | - name: mysql 20 | image: bencer/workshop-forensics-1-mysql 21 | ports: 22 | - containerPort: 3306 23 | name: mysql 24 | env: 25 | - name: MYSQL_ROOT_PASSWORD 26 | value: foobar 27 | - name: MYSQL_DATABASE 28 | value: employees 29 | - name: SYSDIG_AGENT_CONF 30 | value: 'app_checks: [{name: mysql, check_module: mysql, pattern: {comm: mysqld}, conf: { server: 127.0.0.1, user: root, pass: foobar }}]' 31 | metadata: 32 | labels: 33 | name: mysql 34 | role: mysqldb 35 | app: demo 36 | --- 37 | apiVersion: v1 38 | kind: Service 39 | metadata: 40 | labels: 41 | name: mysql 42 | name: mysql 43 | namespace: falcotest 44 | spec: 45 | clusterIP: "None" 46 | ports: 47 | - port: 3306 48 | targetPort: 3306 49 | selector: 50 | name: mysql 51 | app: demo 52 | role: mysqldb 53 | --- 54 | -------------------------------------------------------------------------------- /chapter9/falco/ping.yaml: -------------------------------------------------------------------------------- 1 | kind: Deployment 2 | apiVersion: extensions/v1beta1 3 | metadata: 4 | name: ping 5 | namespace: falcotest 6 | labels: 7 | name: ping-deployment 8 | app: demo 9 | spec: 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | name: ping 14 | role: frontend 15 | app: demo 16 | template: 17 | spec: 18 | containers: 19 | - name: phpping 20 | image: bencer/workshop-forensics-1-phpping 21 | env: 22 | - name: DB_HOST 23 | value: mysql.ping.svc.cluster.local 24 | ports: 25 | - containerPort: 80 26 | name: phpping 27 | metadata: 28 | labels: 29 | name: ping 30 | role: frontend 31 | app: demo 32 | --- 33 | apiVersion: v1 34 | kind: Service 35 | metadata: 36 | labels: 37 | name: ping 38 | name: ping 39 | namespace: falcotest 40 | spec: 41 | type: NodePort 42 | ports: 43 | - name: http 44 | nodePort: 31337 45 | port: 80 46 | protocol: TCP 47 | targetPort: 80 48 | selector: 49 | name: ping 50 | app: demo 51 | role: frontend 52 | -------------------------------------------------------------------------------- /chapter9/psp/aks-privileged-psp.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: policy/v1beta1 3 | kind: PodSecurityPolicy 4 | metadata: 5 | name: gce.privileged 6 | annotations: 7 | kubernetes.io/description: 'privileged allows full unrestricted access to 8 | pod features, as if the PodSecurityPolicy controller was not enabled.' 9 | seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' 10 | labels: 11 | kubernetes.io/cluster-service: "true" 12 | addonmanager.kubernetes.io/mode: Reconcile 13 | spec: 14 | privileged: true 15 | allowPrivilegeEscalation: true 16 | allowedCapabilities: 17 | - '*' 18 | volumes: 19 | - '*' 20 | hostNetwork: true 21 | hostPorts: 22 | - min: 0 23 | max: 65535 24 | hostIPC: true 25 | hostPID: true 26 | runAsUser: 27 | rule: 'RunAsAny' 28 | seLinux: 29 | rule: 'RunAsAny' 30 | supplementalGroups: 31 | rule: 'RunAsAny' 32 | fsGroup: 33 | rule: 'RunAsAny' 34 | readOnlyRootFilesystem: false 35 | --- 36 | apiVersion: rbac.authorization.k8s.io/v1 37 | kind: ClusterRole 38 | metadata: 39 | name: gce:podsecuritypolicy:privileged 40 | labels: 41 | kubernetes.io/cluster-service: "true" 42 | addonmanager.kubernetes.io/mode: Reconcile 43 | rules: 44 | - apiGroups: 45 | - policy 46 | resourceNames: 47 | - gce.privileged 48 | resources: 49 | - podsecuritypolicies 50 | verbs: 51 | - use 52 | --- 53 | apiVersion: rbac.authorization.k8s.io/v1 54 | kind: ClusterRoleBinding 55 | metadata: 56 | name: gce:podsecuritypolicy:authenticated 57 | annotations: 58 | kubernetes.io/description: 'Allow all authenticated users to create privileged pods.' 59 | labels: 60 | kubernetes.io/cluster-service: "true" 61 | addonmanager.kubernetes.io/mode: Reconcile 62 | roleRef: 63 | apiGroup: rbac.authorization.k8s.io 64 | kind: ClusterRole 65 | name: gce:podsecuritypolicy:privileged 66 | subjects: 67 | - kind: Group 68 | apiGroup: rbac.authorization.k8s.io 69 | name: system:authenticated 70 | -------------------------------------------------------------------------------- /chapter9/psp/eks-privileged-psp.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: policy/v1beta1 3 | kind: PodSecurityPolicy 4 | metadata: 5 | name: eks.privileged 6 | annotations: 7 | kubernetes.io/description: 'privileged allows full unrestricted access to 8 | pod features, as if the PodSecurityPolicy controller was not enabled.' 9 | seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' 10 | labels: 11 | kubernetes.io/cluster-service: "true" 12 | eks.amazonaws.com/component: pod-security-policy 13 | spec: 14 | privileged: true 15 | allowPrivilegeEscalation: true 16 | allowedCapabilities: 17 | - '*' 18 | volumes: 19 | - '*' 20 | hostNetwork: true 21 | hostPorts: 22 | - min: 0 23 | max: 65535 24 | hostIPC: true 25 | hostPID: true 26 | runAsUser: 27 | rule: 'RunAsAny' 28 | seLinux: 29 | rule: 'RunAsAny' 30 | supplementalGroups: 31 | rule: 'RunAsAny' 32 | fsGroup: 33 | rule: 'RunAsAny' 34 | readOnlyRootFilesystem: false 35 | 36 | --- 37 | apiVersion: rbac.authorization.k8s.io/v1 38 | kind: ClusterRole 39 | metadata: 40 | name: eks:podsecuritypolicy:privileged 41 | labels: 42 | kubernetes.io/cluster-service: "true" 43 | eks.amazonaws.com/component: pod-security-policy 44 | rules: 45 | - apiGroups: 46 | - policy 47 | resourceNames: 48 | - eks.privileged 49 | resources: 50 | - podsecuritypolicies 51 | verbs: 52 | - use 53 | 54 | --- 55 | apiVersion: rbac.authorization.k8s.io/v1 56 | kind: ClusterRoleBinding 57 | metadata: 58 | name: eks:podsecuritypolicy:authenticated 59 | annotations: 60 | kubernetes.io/description: 'Allow all authenticated users to create privileged pods.' 61 | labels: 62 | kubernetes.io/cluster-service: "true" 63 | eks.amazonaws.com/component: pod-security-policy 64 | roleRef: 65 | apiGroup: rbac.authorization.k8s.io 66 | kind: ClusterRole 67 | name: eks:podsecuritypolicy:privileged 68 | subjects: 69 | - kind: Group 70 | apiGroup: rbac.authorization.k8s.io 71 | name: system:authenticated 72 | -------------------------------------------------------------------------------- /chapter9/psp/gce-privileged-psp.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: policy/v1beta1 3 | kind: PodSecurityPolicy 4 | metadata: 5 | name: gce.privileged 6 | annotations: 7 | kubernetes.io/description: 'privileged allows full unrestricted access to 8 | pod features, as if the PodSecurityPolicy controller was not enabled.' 9 | seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' 10 | labels: 11 | kubernetes.io/cluster-service: "true" 12 | addonmanager.kubernetes.io/mode: Reconcile 13 | spec: 14 | privileged: true 15 | allowPrivilegeEscalation: true 16 | allowedCapabilities: 17 | - '*' 18 | volumes: 19 | - '*' 20 | hostNetwork: true 21 | hostPorts: 22 | - min: 0 23 | max: 65535 24 | hostIPC: true 25 | hostPID: true 26 | runAsUser: 27 | rule: 'RunAsAny' 28 | seLinux: 29 | rule: 'RunAsAny' 30 | supplementalGroups: 31 | rule: 'RunAsAny' 32 | fsGroup: 33 | rule: 'RunAsAny' 34 | readOnlyRootFilesystem: false 35 | --- 36 | apiVersion: rbac.authorization.k8s.io/v1 37 | kind: ClusterRole 38 | metadata: 39 | name: gce:podsecuritypolicy:privileged 40 | labels: 41 | kubernetes.io/cluster-service: "true" 42 | addonmanager.kubernetes.io/mode: Reconcile 43 | rules: 44 | - apiGroups: 45 | - policy 46 | resourceNames: 47 | - gce.privileged 48 | resources: 49 | - podsecuritypolicies 50 | verbs: 51 | - use 52 | --- 53 | apiVersion: rbac.authorization.k8s.io/v1 54 | kind: ClusterRoleBinding 55 | metadata: 56 | name: gce:podsecuritypolicy:authenticated 57 | annotations: 58 | kubernetes.io/description: 'Allow all authenticated users to create privileged pods.' 59 | labels: 60 | kubernetes.io/cluster-service: "true" 61 | addonmanager.kubernetes.io/mode: Reconcile 62 | roleRef: 63 | apiGroup: rbac.authorization.k8s.io 64 | kind: ClusterRole 65 | name: gce:podsecuritypolicy:privileged 66 | subjects: 67 | - kind: Group 68 | apiGroup: rbac.authorization.k8s.io 69 | name: system:authenticated 70 | -------------------------------------------------------------------------------- /chapter9/psp/restricted-psp.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: PodSecurityPolicy 3 | metadata: 4 | name: restricted-psp 5 | spec: 6 | privileged: false 7 | runAsUser: 8 | rule: MustRunAsNonRoot 9 | seLinux: 10 | rule: RunAsAny 11 | fsGroup: 12 | rule: RunAsAny 13 | supplementalGroups: 14 | rule: RunAsAny 15 | volumes: 16 | - '*' 17 | -------------------------------------------------------------------------------- /chapter9/psp/restricted-vol-psp.yaml: -------------------------------------------------------------------------------- 1 | kind: PodSecurityPolicy 2 | metadata: 3 | name: restricted-vol-psp 4 | spec: 5 | privileged: false 6 | runAsUser: 7 | rule: RunAsAny 8 | seLinux: 9 | rule: RunAsAny 10 | fsGroup: 11 | rule: RunAsAny 12 | supplementalGroups: 13 | rule: RunAsAny 14 | volumes: 15 | - 'nfs' 16 | -------------------------------------------------------------------------------- /chapter9/rbac/binding-deployer.yaml: -------------------------------------------------------------------------------- 1 | kind: RoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: deployer-binding 5 | namespace: secureapp 6 | subjects: 7 | - kind: User 8 | name: john.geek 9 | apiGroup: "" 10 | roleRef: 11 | kind: Role 12 | name: deployer 13 | apiGroup: "" 14 | -------------------------------------------------------------------------------- /chapter9/rbac/config-user3445.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | clusters: 3 | - cluster: 4 | certificate-authority-data: BASE64_ENCODED_CA_CERTIFICATE 5 | server: https://api.containerized.me 6 | name: local 7 | contexts: 8 | - context: 9 | cluster: local 10 | namespace: secureapp 11 | user: user3445 12 | name: user3445-context 13 | current-context: user3445-context 14 | kind: Config 15 | preferences: {} 16 | users: 17 | - name: user3445 18 | user: 19 | client-certificate: BASE64_ENCODED_CLIENT_CERTIFICATE 20 | client-key: BASE64_ENCODED_CLIENT_KEY 21 | -------------------------------------------------------------------------------- /chapter9/rbac/role-deployer.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | namespace: secureapp 5 | name: deployer 6 | rules: 7 | - apiGroups: ["", "extensions", "apps"] 8 | resources: ["deployments", "replicasets", "pods"] 9 | verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] 10 | -------------------------------------------------------------------------------- /chapter9/vault/policy.hcl: -------------------------------------------------------------------------------- 1 | path "secret/foo" { 2 | policy = "write" 3 | } 4 | 5 | path "secret/bar/*" { 6 | capabilities = ["create", "read", "update"] 7 | } 8 | --------------------------------------------------------------------------------