├── .gitignore
├── LICENSE
├── README.md
├── chapter-01
├── README.md
├── Vagrantfile
├── ansible.cfg
├── aws-setup.yaml
├── aws-teardown.yaml
├── group_vars
│ └── remote.yaml
├── playbook.yaml
├── test.yaml
└── tests
│ ├── basic.bats
│ ├── k3s.bats
│ └── nginx.bats
├── chapter-02
├── README.md
├── Vagrantfile
├── ansible.cfg
├── aws-setup.yaml
├── aws-teardown.yaml
├── extra.yaml
├── files
│ ├── container.yaml
│ ├── crio-ver
│ └── pod.yaml
├── group_vars
│ └── remote.yaml
├── playbook.yaml
├── test.yaml
└── tests
│ ├── basic.bats
│ ├── containerd.bats
│ └── crio.bats
├── chapter-03
├── README.md
├── Vagrantfile
├── ansible.cfg
├── aws-setup.yaml
├── aws-teardown.yaml
├── files
│ ├── co-clim.yaml
│ ├── co-mlim.yaml
│ ├── co-nolim.yaml
│ ├── po-clim.yaml
│ ├── po-mlim.yaml
│ └── po-nolim.yaml
├── group_vars
│ ├── remote.yaml
│ └── servers.yaml
├── playbook.yaml
├── test.yaml
└── tests
│ ├── basic.bats
│ ├── crio-cpu-limited.bats
│ ├── crio-mem-limited.bats
│ ├── crio-unlimited.bats
│ └── iperf.bats
├── chapter-04
├── README.md
├── Vagrantfile
├── ansible.cfg
├── aws-setup.yaml
├── aws-teardown.yaml
├── files
│ ├── busybox-container.yaml
│ ├── busybox-pod.yaml
│ ├── busybox.sh
│ ├── nginx.sh
│ ├── nginx1-container.yaml
│ ├── nginx1-pod.yaml
│ ├── nginx2-container.yaml
│ ├── nginx2-pod.yaml
│ ├── stop-busybox.sh
│ └── stop-nginx.sh
├── group_vars
│ └── remote.yaml
├── playbook.yaml
├── test.yaml
└── tests
│ ├── basic.bats
│ ├── busybox.bats
│ ├── netns.bats
│ └── nginx.bats
├── chapter-05
├── README.md
├── Vagrantfile
├── ansible.cfg
├── aws-setup.yaml
├── aws-teardown.yaml
├── files
│ ├── busybox-container.yaml
│ ├── busybox-pod.yaml
│ ├── busybox.sh
│ ├── hello
│ │ └── Dockerfile
│ └── stop-busybox.sh
├── group_vars
│ └── remote.yaml
├── playbook.yaml
├── test.yaml
└── tests
│ ├── basic.bats
│ ├── build.bats
│ ├── nginx.bats
│ ├── overlay.bats
│ └── registry.bats
├── chapter-06
├── README.md
├── Vagrantfile
├── ansible.cfg
├── aws-setup.yaml
├── aws-teardown.yaml
├── extra.yaml
├── group_vars
│ ├── remote.yaml
│ └── vagrant.yaml
└── playbook.yaml
├── chapter-07
├── README.md
├── Vagrantfile
├── ansible.cfg
├── aws-setup.yaml
├── aws-teardown.yaml
├── files
│ ├── nginx-deploy.yaml
│ ├── nginx-pod.yaml
│ ├── nginx-scaler.yaml
│ ├── sleep-cronjob.yaml
│ ├── sleep-job.yaml
│ └── sleep-set.yaml
├── group_vars
│ ├── remote.yaml
│ └── vagrant.yaml
└── playbook.yaml
├── chapter-08
├── README.md
├── calico
│ ├── README.md
│ ├── Vagrantfile
│ ├── ansible.cfg
│ ├── aws-setup.yaml
│ ├── aws-teardown.yaml
│ ├── files
│ │ ├── local-pods.yaml
│ │ ├── multus-daemonset.yaml
│ │ ├── netattach.yaml
│ │ ├── pod.yaml
│ │ └── two-pods.yaml
│ ├── group_vars
│ │ ├── remote.yaml
│ │ └── vagrant.yaml
│ └── playbook.yaml
└── weavenet
│ ├── README.md
│ ├── Vagrantfile
│ ├── ansible.cfg
│ ├── aws-setup.yaml
│ ├── aws-teardown.yaml
│ ├── files
│ └── two-pods.yaml
│ ├── group_vars
│ ├── remote.yaml
│ └── vagrant.yaml
│ └── playbook.yaml
├── chapter-09
├── README.md
├── Vagrantfile
├── ansible.cfg
├── aws-setup.yaml
├── aws-teardown.yaml
├── files
│ ├── nginx-deploy.yaml
│ ├── nginx-ingress.yaml
│ ├── nginx-nodeport.yaml
│ ├── nginx-service.yaml
│ └── pod.yaml
├── group_vars
│ ├── remote.yaml
│ └── vagrant.yaml
└── playbook.yaml
├── chapter-10
├── README.md
├── Vagrantfile
├── ansible.cfg
├── aws-setup.yaml
├── aws-teardown.yaml
├── files
│ ├── crasher-deploy.yaml
│ ├── crasher.c
│ ├── nginx-selector.yaml
│ ├── nginx-typo.yaml
│ ├── postgres-fixed.yaml
│ ├── postgres-misconfig.yaml
│ ├── sleep-multiple.yaml
│ └── sleep-sensible.yaml
├── group_vars
│ ├── remote.yaml
│ └── vagrant.yaml
└── playbook.yaml
├── chapter-11
├── README.md
├── Vagrantfile
├── ansible.cfg
├── aws-setup.yaml
├── aws-teardown.yaml
├── files
│ ├── edit-bind.yaml
│ ├── pod-reader.yaml
│ ├── read-pods-bind.yaml
│ ├── read-pods-deploy.yaml
│ └── read-pods-sa.yaml
├── group_vars
│ ├── remote.yaml
│ └── vagrant.yaml
└── playbook.yaml
├── chapter-12
├── README.md
├── containerd
│ ├── README.md
│ ├── Vagrantfile
│ ├── ansible.cfg
│ ├── aws-setup.yaml
│ ├── aws-teardown.yaml
│ ├── files
│ │ ├── deploy.yaml
│ │ ├── node-evict.yaml
│ │ └── pod.yaml
│ ├── group_vars
│ │ ├── remote.yaml
│ │ └── vagrant.yaml
│ └── playbook.yaml
└── crio
│ ├── README.md
│ ├── Vagrantfile
│ ├── ansible.cfg
│ ├── aws-setup.yaml
│ ├── aws-teardown.yaml
│ ├── files
│ ├── deploy.yaml
│ ├── node-evict.yaml
│ └── pod.yaml
│ ├── group_vars
│ ├── remote.yaml
│ └── vagrant.yaml
│ └── playbook.yaml
├── chapter-13
├── README.md
├── Vagrantfile
├── ansible.cfg
├── aws-setup.yaml
├── aws-teardown.yaml
├── files
│ ├── nginx-404.yaml
│ ├── nginx-exec.yaml
│ ├── nginx-http.yaml
│ ├── nginx-ready.yaml
│ └── postgres-tcp.yaml
├── group_vars
│ ├── remote.yaml
│ └── vagrant.yaml
└── playbook.yaml
├── chapter-14
├── README.md
├── Vagrantfile
├── ansible.cfg
├── aws-setup.yaml
├── aws-teardown.yaml
├── files
│ ├── cgroup-info
│ ├── edit-bind.yaml
│ ├── iperf-limit.yaml
│ ├── iperf-server.yaml
│ ├── iperf.yaml
│ ├── nginx-limit.yaml
│ ├── quota.yaml
│ └── sleep.yaml
├── group_vars
│ ├── remote.yaml
│ └── vagrant.yaml
└── playbook.yaml
├── chapter-15
├── README.md
├── Vagrantfile
├── ansible.cfg
├── aws-setup.yaml
├── aws-teardown.yaml
├── files
│ ├── index.html
│ ├── nginx.yaml
│ ├── pgsql-set.yaml
│ ├── pv.yaml
│ ├── pvc-man.yaml
│ ├── pvc-rwx.yaml
│ └── pvc.yaml
├── group_vars
│ ├── remote.yaml
│ └── vagrant.yaml
└── playbook.yaml
├── chapter-16
├── README.md
├── Vagrantfile
├── ansible.cfg
├── aws-setup.yaml
├── aws-teardown.yaml
├── files
│ ├── etcd-env
│ ├── nginx-cm.yaml
│ ├── nginx-deploy.yaml
│ ├── pgsql-cm.yaml
│ ├── pgsql-ext-cfg.yaml
│ ├── pgsql-ext-sec.yaml
│ ├── pgsql-secret-2.yaml
│ ├── pgsql-secret.yaml
│ └── pgsql.yaml
├── group_vars
│ ├── remote.yaml
│ └── vagrant.yaml
└── playbook.yaml
├── chapter-17
├── README.md
├── Vagrantfile
├── ansible.cfg
├── aws-setup.yaml
├── aws-teardown.yaml
├── files
│ ├── crd.yaml
│ ├── pgsql.yaml
│ ├── sa.yaml
│ ├── sample-reader.yaml
│ ├── somedata.yaml
│ ├── watch.py
│ └── watch.yaml
├── group_vars
│ ├── remote.yaml
│ └── vagrant.yaml
└── playbook.yaml
├── chapter-18
├── README.md
├── Vagrantfile
├── ansible.cfg
├── aws-setup.yaml
├── aws-teardown.yaml
├── files
│ ├── add-hw.sh
│ ├── hw.yaml
│ ├── hw3.yaml
│ ├── ipf-client.yaml
│ ├── ipf-server.yaml
│ └── ipf-svc.yaml
├── group_vars
│ ├── remote.yaml
│ └── vagrant.yaml
└── playbook.yaml
├── chapter-19
├── README.md
├── Vagrantfile
├── ansible.cfg
├── aws-setup.yaml
├── aws-teardown.yaml
├── files
│ ├── best-effort.yaml
│ ├── burstable.yaml
│ ├── cgroup-info
│ ├── essential.yaml
│ ├── guaranteed.yaml
│ ├── lots.yaml
│ ├── needed.yaml
│ └── oom-info
├── group_vars
│ ├── remote.yaml
│ └── vagrant.yaml
└── playbook.yaml
├── chapter-20
├── README.md
├── Vagrantfile
├── ansible.cfg
├── aws-setup.yaml
├── aws-teardown.yaml
├── files
│ ├── api-metrics.sh
│ ├── api-server-metrics.sh
│ ├── install-kube-prometheus.sh
│ ├── rbac.yaml
│ └── svc-mon.yaml
├── group_vars
│ ├── remote.yaml
│ └── vagrant.yaml
└── playbook.yaml
└── setup
├── README.md
├── ansible.cfg
├── aws-delete.yaml
├── aws-teardown.yaml
├── collections
├── .gitignore
└── requirements.yaml
├── ec2-inventory
├── aws_ec2.yaml
└── inventory
├── requirements.txt
└── roles
├── aws-instances
├── defaults
│ └── main.yaml
├── tasks
│ ├── delete.yaml
│ ├── main.yaml
│ ├── setup.yaml
│ └── teardown.yaml
└── templates
│ └── aws-ssh.sh.j2
├── containerd
├── defaults
│ └── main.yaml
├── handlers
│ └── main.yaml
└── tasks
│ └── main.yaml
├── cri-o
├── defaults
│ └── main.yaml
├── files
│ └── 100-crio-bridge.conf
└── tasks
│ └── main.yaml
├── crictl
├── defaults
│ └── main.yaml
├── tasks
│ └── main.yaml
└── templates
│ └── crictl.yaml.j2
├── docker-ce
└── tasks
│ └── main.yaml
├── docker-registry
├── files
│ └── docker-registry.service
└── tasks
│ └── main.yaml
├── haproxy
├── handlers
│ └── main.yaml
├── tasks
│ └── main.yaml
└── templates
│ └── haproxy.cfg.j2
├── iperf
├── defaults
│ └── main.yaml
├── files
│ └── iperf.service
└── tasks
│ └── main.yaml
├── k3s
├── defaults
│ └── main.yaml
├── files
│ └── k3s.service
└── tasks
│ └── main.yaml
├── k8s
├── defaults
│ └── main.yaml
├── tasks
│ ├── install.yaml
│ ├── main.yaml
│ └── prep.yaml
└── templates
│ ├── calico-custom-resources.yaml.j2
│ ├── flannel.yaml.j2
│ ├── ingress-patch.yaml.j2
│ ├── k8s-all.j2
│ ├── k8s-ver.j2
│ ├── kube-vip.yaml.j2
│ ├── kubeadm-init.yaml.j2
│ ├── kubeadm-join.yaml.j2
│ ├── postgres
│ ├── operator-service-account-rbac.yaml.j2
│ ├── operatorconfiguration.crd.yaml.j2
│ ├── postgres-operator.yaml.j2
│ └── postgresql-operator-default-configuration.yaml.j2
│ └── weave.yaml.j2
├── keepalived
├── defaults
│ └── main.yaml
├── handlers
│ └── main.yaml
├── tasks
│ └── main.yaml
└── templates
│ └── keepalived.conf.j2
├── test
├── defaults
│ └── main.yaml
└── tasks
│ └── main.yaml
├── todo
├── defaults
│ └── main.yaml
├── tasks
│ └── main.yaml
└── templates
│ ├── application.yaml.j2
│ ├── database-deploy.yaml.j2
│ ├── database-secret.yaml.j2
│ ├── database-service.yaml.j2
│ ├── database.yaml.j2
│ ├── ingress.yaml.j2
│ ├── pvc.yaml.j2
│ ├── scaler.yaml.j2
│ └── service.yaml.j2
└── tools
└── tasks
└── main.yaml
/.gitignore:
--------------------------------------------------------------------------------
1 | debug/
2 | sshkeys/
3 | aws-ssh.sh
4 | .vagrant/
5 | *.log
6 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright 2021-2022 Alan Hohn
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy
4 | of this software and associated documentation files (the "Software"), to deal
5 | in the Software without restriction, including without limitation the rights
6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 | copies of the Software, and to permit persons to whom the Software is
8 | furnished to do so, subject to the following conditions:
9 |
10 | The above copyright notice and this permission notice shall be included in
11 | all copies or substantial portions of the Software.
12 |
13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19 | SOFTWARE.
20 |
--------------------------------------------------------------------------------
/chapter-01/README.md:
--------------------------------------------------------------------------------
1 | # Why Containers Matter
2 |
3 | This folder provides the examples for the chapter "Why Containers Matter".
4 |
5 | ## Prerequisites
6 |
7 | Be sure to start by following the instructions in the `setup` folder.
8 |
9 | ## Running in AWS
10 |
11 | Start by provisioning:
12 |
13 | ```
14 | ansible-playbook aws-setup.yaml
15 | ```
16 |
17 | Then, run the main playbook:
18 |
19 | ```
20 | ansible-playbook playbook.yaml
21 | ```
22 |
23 | You can SSH to the instance and become root by running:
24 |
25 | ```
26 | ./aws-ssh.sh host01
27 | sudo su -
28 | ```
29 |
30 | When finished, don't forget to clean up:
31 |
32 | ```
33 | ansible-playbook aws-teardown.yaml
34 | ```
35 |
36 | ## Running in Vagrant
37 |
38 | To start:
39 |
40 | ```
41 | vagrant up
42 | ```
43 |
44 | This will also run the main Ansible playbook.
45 |
46 | You can SSH to the instance and become root by running:
47 |
48 | ```
49 | vagrant ssh
50 | sudo su -
51 | ```
52 |
53 | When finished, you can clean up the VM:
54 |
55 | ```
56 | vagrant destroy
57 | ```
58 |
--------------------------------------------------------------------------------
/chapter-01/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 | cluster = {
4 | "host01" => { :ip => "192.168.61.11", :cpus => 2, :mem => 2048 }
5 | }
6 | groups = {
7 | "remote" => ["host01"],
8 | "vagrant" => ["host01"]
9 | }
10 |
11 | Vagrant.configure("2") do |config|
12 | config.vm.box = "ubuntu/focal64"
13 | cluster.each do |name, data|
14 | config.vm.define name do |host|
15 | host.vm.hostname = name
16 | host.vm.network "forwarded_port", guest: 80, host: 48080
17 | host.vm.network "private_network", ip: "#{data[:ip]}"
18 | host.vm.provider :virtualbox do |vb, override|
19 | vb.cpus = data[:cpus]
20 | vb.memory = data[:mem]
21 | end
22 | end
23 | end
24 | config.vm.provision "ansible" do |ansible|
25 | ansible.playbook = "playbook.yaml"
26 | ansible.groups = groups
27 | end
28 | config.vm.provision "test", type: "ansible", run: "never" do |ansible|
29 | ansible.playbook = "test.yaml"
30 | ansible.groups = groups
31 | end
32 | end
33 |
--------------------------------------------------------------------------------
/chapter-01/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | collections_path = ../setup/collections
3 | inventory = ../setup/ec2-inventory
4 | roles_path = ../setup/roles
5 |
6 | [ssh_connection]
7 | ssh_args="-o UserKnownHostsFile=../sshkeys/known_hosts_aws_ansible_k8s"
--------------------------------------------------------------------------------
/chapter-01/aws-setup.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_instances:
7 | host01:
8 | type: master
9 | ip: 192.168.61.11
10 | instance_type: t3.medium
11 | roles:
12 | - aws-instances
13 |
--------------------------------------------------------------------------------
/chapter-01/aws-teardown.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_k8s_teardown: true
7 | roles:
8 | - aws-instances
9 |
--------------------------------------------------------------------------------
/chapter-01/group_vars/remote.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | k8s_initial_master: 192.168.61.11
3 | kubeconfig: /etc/rancher/k3s/k3s.yaml
4 | todo_kubeconfig: "{{ kubeconfig }}"
5 | todo_namespace: default
--------------------------------------------------------------------------------
/chapter-01/playbook.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: remote
3 | become: yes
4 | roles:
5 | - tools
6 | - docker-ce
7 | - k3s
8 | - todo
9 | - hosts: aws_ec2
10 | tasks:
11 | - debug:
12 | msg: "Once your Kubernetes cluster is up, you can access the todo application at http://{{ inventory_hostname }}/todo/"
13 | - hosts: vagrant
14 | tasks:
15 | - debug:
16 | msg: "Once your Kubernetes cluster is up, you can access the todo application at http://localhost:48080/todo/"
17 |
--------------------------------------------------------------------------------
/chapter-01/test.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: remote
3 | become: yes
4 | pre_tasks:
5 | - name: install tests
6 | ansible.builtin.copy:
7 | src: tests
8 | dest: /opt
9 | owner: root
10 | group: root
11 | roles:
12 | - test
13 |
--------------------------------------------------------------------------------
/chapter-01/tests/basic.bats:
--------------------------------------------------------------------------------
1 | # bats file_tags=host01
2 |
3 | setup() {
4 | BATS_LIB_PATH=/usr/local/lib/node_modules
5 | bats_load_library bats-support
6 | bats_load_library bats-assert
7 | bats_require_minimum_version 1.5.0
8 | }
9 |
10 | @test 'docker run rocky' {
11 | run -0 docker run --rm rockylinux:8 /bin/sh -c \
12 | 'cat /etc/os-release; yum install -y procps iproute; ps -ef; ip addr; uname -v'
13 | assert_output --partial 'Rocky Linux'
14 | assert_output --partial 'UID'
15 | assert_output --partial 'inet'
16 | assert_output --partial 'Ubuntu'
17 | }
18 |
19 | @test 'docker run alpine' {
20 | run -0 docker pull alpine:3
21 | run -0 docker run --rm -v /:/host -e hello=world alpine:3 /bin/sh -c \
22 | 'cat /etc/os-release; cat /host/etc/os-release; echo $hello'
23 | assert_output --partial 'Alpine'
24 | assert_output --partial 'Ubuntu'
25 | assert_output --partial 'world'
26 | }
27 |
--------------------------------------------------------------------------------
/chapter-01/tests/k3s.bats:
--------------------------------------------------------------------------------
1 | # bats file_tags=host01
2 |
3 | setup() {
4 | BATS_LIB_PATH=/usr/local/lib/node_modules
5 | bats_load_library bats-support
6 | bats_load_library bats-assert
7 | bats_require_minimum_version 1.5.0
8 | }
9 |
10 | @test 'run kubectl' {
11 | run -0 k3s kubectl version
12 | assert_output --partial 'Server Version'
13 | run -0 k3s kubectl get nodes
14 | assert_output --partial host01
15 | }
16 |
17 | @test 'check todo' {
18 | run -0 k3s kubectl get pods
19 | assert_output --partial todo-db
20 | run -0 k3s kubectl describe svc todo
21 | assert_output --partial todo
22 | }
--------------------------------------------------------------------------------
/chapter-01/tests/nginx.bats:
--------------------------------------------------------------------------------
1 | # bats file_tags=host01
2 |
3 | setup() {
4 | BATS_LIB_PATH=/usr/local/lib/node_modules
5 | bats_load_library bats-support
6 | bats_load_library bats-assert
7 | bats_require_minimum_version 1.5.0
8 | }
9 |
10 | @test 'run nginx' {
11 | run -0 docker run -d -p 8080:80 --name nginx nginx
12 | run -0 docker ps
13 | assert_output --partial nginx
14 | run -0 curl http://localhost:8080
15 | assert_output --partial 'Welcome to nginx!'
16 | run -0 ps -ef
17 | assert_output --partial 'nginx'
18 | }
19 |
20 | teardown() {
21 | docker rm -f nginx
22 | }
--------------------------------------------------------------------------------
/chapter-02/README.md:
--------------------------------------------------------------------------------
1 | # Process Isolation
2 |
3 | This folder provides the examples for the chapter "Process Isolation".
4 |
5 | ## Prerequisites
6 |
7 | Be sure to start by following the instructions in the `setup` folder.
8 |
9 | ## Running in AWS
10 |
11 | Start by provisioning:
12 |
13 | ```
14 | ansible-playbook aws-setup.yaml
15 | ```
16 |
17 | Then, run the main playbook:
18 |
19 | ```
20 | ansible-playbook playbook.yaml
21 | ```
22 |
23 | This chapter has an optional extra playbook to skip some install steps.
24 | You can use it by running:
25 |
26 | ```
27 | ansible-playbook extra.yaml
28 | ```
29 |
30 | You can SSH to the instance and become root by running:
31 |
32 | ```
33 | ./aws-ssh.sh host01
34 | sudo su -
35 | ```
36 |
37 | When finished, don't forget to clean up:
38 |
39 | ```
40 | ansible-playbook aws-teardown.yaml
41 | ```
42 |
43 | ## Running in Vagrant
44 |
45 | To start:
46 |
47 | ```
48 | vagrant up
49 | ```
50 |
51 | This will also run the main Ansible playbook.
52 |
53 | This chapter has an optional extra playbook to skip some install steps.
54 | You can use it by running:
55 |
56 | ```
57 | vagrant provision --provision-with=extra
58 | ```
59 |
60 | You can SSH to the instance and become root by running:
61 |
62 | ```
63 | vagrant ssh
64 | sudo su -
65 | ```
66 |
67 | When finished, you can clean up the VM:
68 |
69 | ```
70 | vagrant destroy
71 | ```
72 |
--------------------------------------------------------------------------------
/chapter-02/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 | cluster = {
4 | "host01" => { :ip => "192.168.61.11", :cpus => 2, :mem => 2048 }
5 | }
6 | groups = {
7 | "remote" => ["host01"]
8 | }
9 |
10 | Vagrant.configure("2") do |config|
11 | config.vm.box = "ubuntu/focal64"
12 | cluster.each do |name, data|
13 | config.vm.define name do |host|
14 | host.vm.hostname = name
15 | host.vm.network "private_network", ip: "#{data[:ip]}"
16 | host.vm.provider :virtualbox do |vb, override|
17 | vb.cpus = data[:cpus]
18 | vb.memory = data[:mem]
19 | end
20 | end
21 | end
22 | config.vm.provision "ansible" do |ansible|
23 | ansible.playbook = "playbook.yaml"
24 | ansible.groups = groups
25 | end
26 | config.vm.provision "extra", type: "ansible", run: "never" do |ansible|
27 | ansible.playbook = "extra.yaml"
28 | ansible.groups = groups
29 | end
30 | config.vm.provision "test", type: "ansible", run: "never" do |ansible|
31 | ansible.playbook = "test.yaml"
32 | ansible.groups = groups
33 | end
34 | end
35 |
--------------------------------------------------------------------------------
/chapter-02/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | collections_path = ../setup/collections
3 | inventory = ../setup/ec2-inventory
4 | roles_path = ../setup/roles
5 |
6 | [ssh_connection]
7 | ssh_args="-o UserKnownHostsFile=../sshkeys/known_hosts_aws_ansible_k8s"
--------------------------------------------------------------------------------
/chapter-02/aws-setup.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_instances:
7 | host01:
8 | type: master
9 | ip: 192.168.61.11
10 | instance_type: t3.medium
11 | roles:
12 | - aws-instances
13 |
--------------------------------------------------------------------------------
/chapter-02/aws-teardown.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_k8s_teardown: true
7 | roles:
8 | - aws-instances
9 |
--------------------------------------------------------------------------------
/chapter-02/extra.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: remote
3 | become: yes
4 | roles:
5 | - containerd
6 | - cri-o
7 | - crictl
8 |
--------------------------------------------------------------------------------
/chapter-02/files/container.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | metadata:
3 | name: busybox
4 | image:
5 | image: docker.io/library/busybox:latest
6 | args:
7 | - "/bin/sleep"
8 | - "36000"
9 |
--------------------------------------------------------------------------------
/chapter-02/files/crio-ver:
--------------------------------------------------------------------------------
1 | export VERSION=1.20
2 | export OS=xUbuntu_20.04
3 | export ROOT=https://download.opensuse.org/repositories
4 | export REPO=$ROOT/devel:/kubic:/libcontainers:/stable
5 |
6 | export CRICTL_URL=https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.20.0/crictl-v1.20.0-linux-amd64.tar.gz
7 |
--------------------------------------------------------------------------------
/chapter-02/files/pod.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | metadata:
3 | name: busybox
4 | namespace: crio
5 | linux:
6 | security_context:
7 | namespace_options:
8 | network: 2
9 |
--------------------------------------------------------------------------------
/chapter-02/group_vars/remote.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | crictl_container_engine: crio
3 |
--------------------------------------------------------------------------------
/chapter-02/playbook.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: remote
3 | become: yes
4 | roles:
5 | - tools
6 | - hosts: remote
7 | become: yes
8 | tasks:
9 | - name: files
10 | ansible.builtin.copy:
11 | src: "{{ item }}"
12 | dest: /opt/{{ item }}
13 | owner: root
14 | group: root
15 | mode: '0644'
16 | with_list:
17 | - container.yaml
18 | - crio-ver
19 | - pod.yaml
20 |
--------------------------------------------------------------------------------
/chapter-02/test.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: remote
3 | become: yes
4 | pre_tasks:
5 | - name: install tests
6 | ansible.builtin.copy:
7 | src: tests
8 | dest: /opt
9 | owner: root
10 | group: root
11 | roles:
12 | - test
13 |
--------------------------------------------------------------------------------
/chapter-02/tests/basic.bats:
--------------------------------------------------------------------------------
1 | # bats file_tags=host01
2 |
3 | setup() {
4 | BATS_LIB_PATH=/usr/local/lib/node_modules
5 | bats_load_library bats-support
6 | bats_load_library bats-assert
7 | bats_require_minimum_version 1.5.0
8 | }
9 |
10 | @test 'available commands' {
11 | run -0 which lsns
12 | run -0 which unshare
13 | }
14 |
--------------------------------------------------------------------------------
/chapter-02/tests/containerd.bats:
--------------------------------------------------------------------------------
1 | # bats file_tags=host01
2 |
3 | setup() {
4 | BATS_LIB_PATH=/usr/local/lib/node_modules
5 | bats_load_library bats-support
6 | bats_load_library bats-assert
7 | bats_require_minimum_version 1.5.0
8 | }
9 |
10 | @test 'containerd busybox' {
11 | run -0 ctr image pull docker.io/library/busybox:latest
12 | run -0 ctr images ls
13 | assert_output --partial docker.io/library/busybox:latest
14 | run -0 ctr run --rm docker.io/library/busybox:latest v1 /bin/sh -c \
15 | 'ip a; ps'
16 | assert_output --partial 'inet'
17 | assert_output --partial 'PID'
18 | }
19 |
--------------------------------------------------------------------------------
/chapter-02/tests/crio.bats:
--------------------------------------------------------------------------------
1 | # bats file_tags=host01
2 |
3 | setup() {
4 | BATS_LIB_PATH=/usr/local/lib/node_modules
5 | bats_load_library bats-support
6 | bats_load_library bats-assert
7 | bats_require_minimum_version 1.5.0
8 | }
9 |
10 | @test 'crio container' {
11 | run -0 crictl pull docker.io/library/busybox:latest
12 | POD_ID=$(crictl runp /opt/pod.yaml)
13 | CONTAINER_ID=$(crictl create $POD_ID /opt/container.yaml /opt/pod.yaml)
14 | run -0 crictl start $CONTAINER_ID
15 | run -0 crictl ps
16 | assert_output --partial Running
17 | run -0 crictl exec $CONTAINER_ID /bin/sh -c \
18 | 'ip a; ps'
19 | assert_output --partial 'inet'
20 | assert_output --partial 'PID'
21 | }
22 |
23 | teardown() {
24 | crictl rm -a -f
25 | crictl rmp -a -f
26 | }
27 |
--------------------------------------------------------------------------------
/chapter-03/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 | cluster = {
4 | "host01" => { :ip => "192.168.61.11", :cpus => 2, :mem => 2048 },
5 | "host02" => { :ip => "192.168.61.12", :cpus => 1, :mem => 1024 }
6 | }
7 | groups = {
8 | "remote" => ["host01","host02"],
9 | "clients" => ["host01"],
10 | "servers" => ["host02"]
11 | }
12 |
13 | Vagrant.configure("2") do |config|
14 | config.vm.box = "ubuntu/focal64"
15 | cluster.each do |name, data|
16 | config.vm.define name do |host|
17 | host.vm.hostname = name
18 | host.vm.network "private_network", ip: "#{data[:ip]}"
19 | host.vm.provider :virtualbox do |vb, override|
20 | vb.cpus = data[:cpus]
21 | vb.memory = data[:mem]
22 | end
23 | end
24 | end
25 | config.vm.provision "ansible" do |ansible|
26 | ansible.playbook = "playbook.yaml"
27 | ansible.groups = groups
28 | end
29 | config.vm.provision "test", type: "ansible", run: "never" do |ansible|
30 | ansible.playbook = "test.yaml"
31 | ansible.groups = groups
32 | end
33 | end
34 |
--------------------------------------------------------------------------------
/chapter-03/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | collections_path = ../setup/collections
3 | inventory = ../setup/ec2-inventory
4 | roles_path = ../setup/roles
5 |
6 | [ssh_connection]
7 | ssh_args="-o UserKnownHostsFile=../sshkeys/known_hosts_aws_ansible_k8s"
--------------------------------------------------------------------------------
/chapter-03/aws-setup.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_instances:
7 | host01:
8 | type: client
9 | ip: 192.168.61.11
10 | instance_type: t3.medium
11 | host02:
12 | type: server
13 | ip: 192.168.61.12
14 | instance_type: t3.small
15 | roles:
16 | - aws-instances
17 |
--------------------------------------------------------------------------------
/chapter-03/aws-teardown.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_k8s_teardown: true
7 | roles:
8 | - aws-instances
9 |
--------------------------------------------------------------------------------
/chapter-03/files/co-clim.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | metadata:
3 | name: stress1
4 | image:
5 | image: docker.io/bookofkubernetes/stress:stable
6 | args:
7 | - "--cpu"
8 | - "1"
9 | - "-v"
10 | linux:
11 | resources:
12 | cpu_period: 100000
13 | cpu_quota: 10000
14 |
--------------------------------------------------------------------------------
/chapter-03/files/co-mlim.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | metadata:
3 | name: stress2
4 | image:
5 | image: docker.io/bookofkubernetes/stress:stable
6 | args:
7 | - "--vm"
8 | - "1"
9 | - "--vm-bytes"
10 | - "512M"
11 | - "-v"
12 | linux:
13 | resources:
14 | memory_limit_in_bytes: 268435456
15 | cpu_period: 100000
16 | cpu_quota: 10000
17 |
--------------------------------------------------------------------------------
/chapter-03/files/co-nolim.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | metadata:
3 | name: stress
4 | image:
5 | image: docker.io/bookofkubernetes/stress:stable
6 | args:
7 | - "--cpu"
8 | - "1"
9 | - "-v"
10 |
--------------------------------------------------------------------------------
/chapter-03/files/po-clim.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | metadata:
3 | name: stress1
4 | namespace: crio
5 | linux:
6 | cgroup_parent: pod.slice
7 | security_context:
8 | namespace_options:
9 | network: 2
--------------------------------------------------------------------------------
/chapter-03/files/po-mlim.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | metadata:
3 | name: stress2
4 | namespace: crio
5 | linux:
6 | cgroup_parent: pod.slice
7 | security_context:
8 | namespace_options:
9 | network: 2
10 |
--------------------------------------------------------------------------------
/chapter-03/files/po-nolim.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | metadata:
3 | name: stress
4 | namespace: crio
5 | linux:
6 | security_context:
7 | namespace_options:
8 | network: 2
9 |
--------------------------------------------------------------------------------
/chapter-03/group_vars/remote.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | crictl_container_engine: crio
3 |
--------------------------------------------------------------------------------
/chapter-03/group_vars/servers.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | iperf_server: true
3 |
--------------------------------------------------------------------------------
/chapter-03/playbook.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: remote
3 | become: yes
4 | roles:
5 | - tools
6 | - iperf
7 | - hosts: clients
8 | become: yes
9 | roles:
10 | - cri-o
11 | - crictl
12 | - hosts: remote
13 | become: yes
14 | tasks:
15 | - name: files
16 | ansible.builtin.copy:
17 | src: "{{ item }}"
18 | dest: /opt/{{ item }}
19 | owner: root
20 | group: root
21 | mode: '0644'
22 | loop:
23 | - co-clim.yaml
24 | - co-nolim.yaml
25 | - co-mlim.yaml
26 | - po-clim.yaml
27 | - po-nolim.yaml
28 | - po-mlim.yaml
29 |
--------------------------------------------------------------------------------
/chapter-03/test.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: remote
3 | become: yes
4 | pre_tasks:
5 | - name: install tests
6 | ansible.builtin.copy:
7 | src: tests
8 | dest: /opt
9 | owner: root
10 | group: root
11 | roles:
12 | - test
13 |
--------------------------------------------------------------------------------
/chapter-03/tests/basic.bats:
--------------------------------------------------------------------------------
1 | # bats file_tags=host01
2 |
3 | setup() {
4 | BATS_LIB_PATH=/usr/local/lib/node_modules
5 | bats_load_library bats-support
6 | bats_load_library bats-assert
7 | bats_require_minimum_version 1.5.0
8 | }
9 |
10 | @test 'available commands' {
11 | run -0 which iperf3
12 | }
13 |
--------------------------------------------------------------------------------
/chapter-03/tests/crio-cpu-limited.bats:
--------------------------------------------------------------------------------
1 | # bats file_tags=host01
2 |
3 | setup() {
4 | BATS_LIB_PATH=/usr/local/lib/node_modules
5 | bats_load_library bats-support
6 | bats_load_library bats-assert
7 | bats_require_minimum_version 1.5.0
8 | }
9 |
10 | @test 'crio container cpu limited' {
11 | run -0 crictl pull docker.io/bookofkubernetes/stress:stable
12 | PCL_ID=$(crictl runp /opt/po-clim.yaml)
13 | CCL_ID=$(crictl create $PCL_ID /opt/co-clim.yaml /opt/po-clim.yaml)
14 | run -0 crictl start $CCL_ID
15 | run -0 crictl ps
16 | assert_output --partial stress
17 | assert_output --partial Running
18 | run -0 /bin/sh -ec "\
19 | cd /sys/fs/cgroup/cpu/pod.slice/crio-${CCL_ID}.scope
20 | cat cpu.cfs_quota_us"
21 | assert_output --partial '10000'
22 | }
23 |
24 | teardown() {
25 | crictl rm -a -f
26 | crictl rmp -a -f
27 | }
28 |
--------------------------------------------------------------------------------
/chapter-03/tests/crio-mem-limited.bats:
--------------------------------------------------------------------------------
1 | # bats file_tags=host01
2 |
3 | setup() {
4 | BATS_LIB_PATH=/usr/local/lib/node_modules
5 | bats_load_library bats-support
6 | bats_load_library bats-assert
7 | bats_require_minimum_version 1.5.0
8 | }
9 |
10 | @test 'crio container memory limited' {
11 | run -0 crictl pull docker.io/bookofkubernetes/stress:stable
12 | PML_ID=$(crictl runp /opt/po-mlim.yaml)
13 | CML_ID=$(crictl create $PML_ID /opt/co-mlim.yaml /opt/po-mlim.yaml)
14 | run -0 crictl start $CML_ID
15 | run -0 crictl ps
16 | assert_output --partial stress
17 | assert_output --partial Running
18 | retry -d 1 -t 30 -- /bin/bash -c "crictl logs $CML_ID |& grep -q OOM"
19 | run -0 crictl logs $CML_ID
20 | assert_output --partial 'SIGKILL'
21 | assert_output --partial 'OOM killer'
22 | run -0 dmesg
23 | assert_output --partial 'oom_reaper'
24 | }
25 |
26 | teardown() {
27 | crictl rm -a -f
28 | crictl rmp -a -f
29 | }
30 |
--------------------------------------------------------------------------------
/chapter-03/tests/crio-unlimited.bats:
--------------------------------------------------------------------------------
1 | # bats file_tags=host01
2 |
3 | setup() {
4 | BATS_LIB_PATH=/usr/local/lib/node_modules
5 | bats_load_library bats-support
6 | bats_load_library bats-assert
7 | bats_require_minimum_version 1.5.0
8 | }
9 |
10 | @test 'crio container manually limited' {
11 | run -0 crictl pull docker.io/bookofkubernetes/stress:stable
12 | PUL_ID=$(crictl runp /opt/po-nolim.yaml)
13 | CUL_ID=$(crictl create $PUL_ID /opt/co-nolim.yaml /opt/po-nolim.yaml)
14 | run -0 crictl start $CUL_ID
15 | run -0 crictl ps
16 | assert_output --partial stress
17 | assert_output --partial Running
18 | STRESS_PIDS=$(pgrep -d , stress)
19 | run -0 top -b -n 1 -p "${STRESS_PIDS}"
20 | assert_output --partial 'stress'
21 | run -0 renice -n 19 -p $(pgrep -d ' ' stress)
22 | run -0 /bin/sh -ec "\
23 | cd /sys/fs/cgroup/cpu/system.slice/runc-${CUL_ID}.scope
24 | cat cgroup.procs
25 | cat cpu.cfs_quota_us
26 | echo '50000' > cpu.cfs_quota_us"
27 | assert_output --partial '-1'
28 | run -0 top -b -n 1 -p "${STRESS_PIDS}"
29 | assert_output --partial 'stress'
30 | }
31 |
32 | teardown() {
33 | crictl rm -a -f
34 | crictl rmp -a -f
35 | }
36 |
--------------------------------------------------------------------------------
/chapter-03/tests/iperf.bats:
--------------------------------------------------------------------------------
1 | # bats file_tags=host01
2 |
3 | setup() {
4 | BATS_LIB_PATH=/usr/local/lib/node_modules
5 | bats_load_library bats-support
6 | bats_load_library bats-assert
7 | bats_require_minimum_version 1.5.0
8 | }
9 |
10 | @test 'iperf server listening' {
11 | iperf3 -c 192.168.61.12
12 | }
13 |
--------------------------------------------------------------------------------
/chapter-04/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 | cluster = {
4 | "host01" => { :ip => "192.168.61.11", :cpus => 2, :mem => 2048 },
5 | "host02" => { :ip => "192.168.61.12", :cpus => 1, :mem => 1024 }
6 | }
7 | last = "host02"
8 | groups = {
9 | "remote" => ["host01","host02"]
10 | }
11 |
12 | Vagrant.configure("2") do |config|
13 | config.vm.box = "ubuntu/focal64"
14 | cluster.each do |name, data|
15 | config.vm.define name do |host|
16 | host.vm.hostname = name
17 | host.vm.network "private_network", ip: "#{data[:ip]}"
18 | host.vm.provider :virtualbox do |vb, override|
19 | vb.cpus = data[:cpus]
20 | vb.memory = data[:mem]
21 | end
22 | # Provision all hosts at once for efficiency
23 | if name == last
24 | host.vm.provision :ansible do |ansible|
25 | ansible.limit = "all"
26 | ansible.playbook = "playbook.yaml"
27 | ansible.groups = groups
28 | end
29 | host.vm.provision "test", type: "ansible", run: "never" do |ansible|
30 | ansible.limit = "all"
31 | ansible.playbook = "test.yaml"
32 | ansible.groups = groups
33 | end
34 | end
35 | end
36 | end
37 | end
38 |
--------------------------------------------------------------------------------
/chapter-04/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | collections_path = ../setup/collections
3 | inventory = ../setup/ec2-inventory
4 | roles_path = ../setup/roles
5 |
6 | [ssh_connection]
7 | ssh_args="-o UserKnownHostsFile=../sshkeys/known_hosts_aws_ansible_k8s"
--------------------------------------------------------------------------------
/chapter-04/aws-setup.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_instances:
7 | host01:
8 | type: master
9 | ip: 192.168.61.11
10 | instance_type: t3.medium
11 | host02:
12 | type: master
13 | ip: 192.168.61.12
14 | instance_type: t3.micro
15 | roles:
16 | - aws-instances
17 |
--------------------------------------------------------------------------------
/chapter-04/aws-teardown.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_k8s_teardown: true
7 | roles:
8 | - aws-instances
9 |
--------------------------------------------------------------------------------
/chapter-04/files/busybox-container.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | metadata:
3 | name: busybox
4 | image:
5 | image: docker.io/library/busybox:latest
6 | args:
7 | - "/bin/sleep"
8 | - "36000"
9 |
--------------------------------------------------------------------------------
/chapter-04/files/busybox-pod.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | metadata:
3 | name: busybox
4 | namespace: crio
5 |
--------------------------------------------------------------------------------
/chapter-04/files/busybox.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | crictl pull docker.io/library/busybox:latest
3 | B1P_ID=$(crictl runp busybox-pod.yaml)
4 | B1C_ID=$(crictl create $B1P_ID busybox-container.yaml busybox-pod.yaml)
5 | crictl start $B1C_ID
6 |
7 | export B1P_ID B1C_ID
8 |
--------------------------------------------------------------------------------
/chapter-04/files/nginx.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | crictl pull docker.io/library/nginx:latest
3 | N1P_ID=$(crictl runp nginx1-pod.yaml)
4 | N1C_ID=$(crictl create $N1P_ID nginx1-container.yaml nginx1-pod.yaml)
5 | crictl start $N1C_ID
6 |
7 | N2P_ID=$(crictl runp nginx2-pod.yaml)
8 | N2C_ID=$(crictl create $N2P_ID nginx2-container.yaml nginx2-pod.yaml)
9 | crictl start $N2C_ID
10 |
11 | export N1P_ID N1C_ID N2P_ID N2C_ID
12 |
--------------------------------------------------------------------------------
/chapter-04/files/nginx1-container.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | metadata:
3 | name: nginx1
4 | image:
5 | image: docker.io/library/nginx:latest
6 |
--------------------------------------------------------------------------------
/chapter-04/files/nginx1-pod.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | metadata:
3 | name: nginx1
4 | namespace: crio
5 |
--------------------------------------------------------------------------------
/chapter-04/files/nginx2-container.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | metadata:
3 | name: nginx2
4 | image:
5 | image: docker.io/library/nginx:latest
6 |
--------------------------------------------------------------------------------
/chapter-04/files/nginx2-pod.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | metadata:
3 | name: nginx2
4 | namespace: crio
5 |
--------------------------------------------------------------------------------
/chapter-04/files/stop-busybox.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | crictl stop $B1C_ID
3 | crictl rm $B1C_ID
4 | crictl stopp $B1P_ID
5 | crictl rmp $B1P_ID
6 |
--------------------------------------------------------------------------------
/chapter-04/files/stop-nginx.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | crictl stop $N1C_ID $N2C_ID
3 | crictl rm $N1C_ID $N2C_ID
4 | crictl stopp $N1P_ID $N2P_ID
5 | crictl rmp $N1P_ID $N2P_ID
6 |
--------------------------------------------------------------------------------
/chapter-04/group_vars/remote.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | crio_cni: true
3 | crictl_container_engine: crio
4 |
--------------------------------------------------------------------------------
/chapter-04/playbook.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: remote
3 | become: yes
4 | roles:
5 | - tools
6 | - cri-o
7 | - crictl
8 | - hosts: remote
9 | become: yes
10 | tasks:
11 | - name: bridge utils
12 | ansible.builtin.apt:
13 | name: bridge-utils
14 | update_cache: yes
15 | - name: files
16 | ansible.builtin.copy:
17 | src: "{{ item }}"
18 | dest: /opt/{{ item }}
19 | owner: root
20 | group: root
21 | mode: '0644'
22 | with_list:
23 | - nginx1-pod.yaml
24 | - nginx1-container.yaml
25 | - nginx2-pod.yaml
26 | - nginx2-container.yaml
27 | - busybox-pod.yaml
28 | - busybox-container.yaml
29 | - name: scripts
30 | ansible.builtin.copy:
31 | src: "{{ item }}"
32 | dest: /opt/{{ item }}
33 | owner: root
34 | group: root
35 | mode: '0755'
36 | with_list:
37 | - nginx.sh
38 | - stop-nginx.sh
39 | - busybox.sh
40 | - stop-busybox.sh
41 |
--------------------------------------------------------------------------------
/chapter-04/test.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: remote
3 | become: yes
4 | pre_tasks:
5 | - name: install tests
6 | ansible.builtin.copy:
7 | src: tests
8 | dest: /opt
9 | owner: root
10 | group: root
11 | roles:
12 | - test
13 |
--------------------------------------------------------------------------------
/chapter-04/tests/basic.bats:
--------------------------------------------------------------------------------
1 | # bats file_tags=host01
2 |
3 | setup() {
4 | BATS_LIB_PATH=/usr/local/lib/node_modules
5 | bats_load_library bats-support
6 | bats_load_library bats-assert
7 | bats_require_minimum_version 1.5.0
8 | }
9 |
10 | @test 'available commands' {
11 | run -0 which lsns
12 | run -0 which brctl
13 | }
14 |
--------------------------------------------------------------------------------
/chapter-04/tests/busybox.bats:
--------------------------------------------------------------------------------
1 | # bats file_tags=host01
2 |
3 | setup() {
4 | BATS_LIB_PATH=/usr/local/lib/node_modules
5 | bats_load_library bats-support
6 | bats_load_library bats-assert
7 | bats_require_minimum_version 1.5.0
8 | }
9 |
10 | @test 'busybox container' {
11 | run -0 /bin/bash -ec '\
12 | cd /opt
13 | source busybox.sh
14 | crictl ps
15 | crictl exec $B1C_ID /bin/sh -c "ip addr"
16 | crictl exec $B1C_ID /bin/sh -c "ping -c 1 192.168.61.11"
17 | crictl exec $B1C_ID /bin/sh -c "ip route"
18 | JQ_PATH=".info.runtimeSpec.linux.namespaces[]|select(.type==\"network\").path"
19 | NETNS_PATH=$(crictl inspectp $B1P_ID | jq -r $JQ_PATH)
20 | echo $NETNS_PATH
21 | NETNS=$(basename $NETNS_PATH)
22 | ip netns exec $NETNS ip addr'
23 | assert_output --partial 'busybox'
24 | assert_output --partial 'inet 10.85.0'
25 | assert_output --partial '64 bytes from 192.168.61.11'
26 | assert_output --partial 'default via 10.85.0.1'
27 | assert_output --partial '/var/run/netns'
28 | run -0 lsns -t net
29 | assert_output --partial '/pause'
30 | }
31 |
32 | teardown() {
33 | crictl rm -a -f
34 | crictl rmp -a -f
35 | }
36 |
--------------------------------------------------------------------------------
/chapter-04/tests/nginx.bats:
--------------------------------------------------------------------------------
1 | # bats file_tags=host01
2 |
3 | setup() {
4 | BATS_LIB_PATH=/usr/local/lib/node_modules
5 | bats_load_library bats-support
6 | bats_load_library bats-assert
7 | bats_require_minimum_version 1.5.0
8 | }
9 |
10 | @test 'nginx containers' {
11 | run -0 /bin/bash -ec '\
12 | cd /opt
13 | source nginx.sh
14 | crictl ps
15 | crictl exec $N1C_ID cat /proc/net/tcp
16 | crictl exec $N2C_ID cat /proc/net/tcp'
17 | assert_output --partial 'nginx1'
18 | assert_output --partial 'nginx2'
19 | assert_output --partial '0050'
20 | }
21 |
22 | teardown() {
23 | crictl rm -a -f
24 | crictl rmp -a -f
25 | }
26 |
--------------------------------------------------------------------------------
/chapter-05/README.md:
--------------------------------------------------------------------------------
1 | # Container Images and Runtime Layers
2 |
3 | This folder provides the examples for the chapter "Container Images and Runtime Layers".
4 |
5 | ## Prerequisites
6 |
7 | Be sure to start by following the instructions in the `setup` folder.
8 |
9 | ## Running in AWS
10 |
11 | Start by provisioning:
12 |
13 | ```
14 | ansible-playbook aws-setup.yaml
15 | ```
16 |
17 | Then, run the main playbook:
18 |
19 | ```
20 | ansible-playbook playbook.yaml
21 | ```
22 |
23 | You can SSH to `host01` and become root by running:
24 |
25 | ```
26 | ./aws-ssh.sh host01
27 | sudo su -
28 | ```
29 |
30 | When finished, don't forget to clean up:
31 |
32 | ```
33 | ansible-playbook aws-teardown.yaml
34 | ```
35 |
36 | ## Running in Vagrant
37 |
38 | To start:
39 |
40 | ```
41 | vagrant up
42 | ```
43 |
44 | This will also run the main Ansible playbook.
45 |
46 | You can SSH to `host01` and become root by running:
47 |
48 | ```
49 | vagrant ssh host01
50 | sudo su -
51 | ```
52 |
53 | When finished, you can clean up the VM:
54 |
55 | ```
56 | vagrant destroy
57 | ```
58 |
--------------------------------------------------------------------------------
/chapter-05/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 | cluster = {
4 | "host01" => { :ip => "192.168.61.11", :cpus => 2, :mem => 2048 }
5 | }
6 | last = "host01"
7 | groups = {
8 | "remote" => ["host01"]
9 | }
10 |
11 | Vagrant.configure("2") do |config|
12 | config.vm.box = "ubuntu/focal64"
13 | cluster.each do |name, data|
14 | config.vm.define name do |host|
15 | host.vm.hostname = name
16 | host.vm.network "private_network", ip: "#{data[:ip]}"
17 | host.vm.provider :virtualbox do |vb, override|
18 | vb.cpus = data[:cpus]
19 | vb.memory = data[:mem]
20 | end
21 | # Provision all hosts at once for efficiency
22 | if name == last
23 | host.vm.provision :ansible do |ansible|
24 | ansible.limit = "all"
25 | ansible.playbook = "playbook.yaml"
26 | ansible.groups = groups
27 | end
28 | host.vm.provision "test", type: "ansible", run: "never" do |ansible|
29 | ansible.limit = "all"
30 | ansible.playbook = "test.yaml"
31 | ansible.groups = groups
32 | end
33 | end
34 | end
35 | end
36 | end
37 |
--------------------------------------------------------------------------------
/chapter-05/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | collections_path = ../setup/collections
3 | inventory = ../setup/ec2-inventory
4 | roles_path = ../setup/roles
5 |
6 | [ssh_connection]
7 | ssh_args="-o UserKnownHostsFile=../sshkeys/known_hosts_aws_ansible_k8s"
--------------------------------------------------------------------------------
/chapter-05/aws-setup.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_instances:
7 | host01:
8 | type: master
9 | ip: 192.168.61.11
10 | instance_type: t3.medium
11 | roles:
12 | - aws-instances
13 |
--------------------------------------------------------------------------------
/chapter-05/aws-teardown.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_k8s_teardown: true
7 | roles:
8 | - aws-instances
9 |
--------------------------------------------------------------------------------
/chapter-05/files/busybox-container.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | metadata:
3 | name: busybox
4 | image:
5 | image: registry.local/busybox:latest
6 | args:
7 | - "/bin/sleep"
8 | - "36000"
9 |
--------------------------------------------------------------------------------
/chapter-05/files/busybox-pod.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | metadata:
3 | name: busybox
4 | namespace: crio
5 | linux:
6 | security_context:
7 | namespace_options:
8 | network: 2
9 |
--------------------------------------------------------------------------------
/chapter-05/files/busybox.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | crictl pull docker.io/library/busybox:latest
3 | B1P_ID=$(crictl runp busybox-pod.yaml)
4 | B1C_ID=$(crictl create $B1P_ID busybox-container.yaml busybox-pod.yaml)
5 | crictl start $B1C_ID
6 |
7 | export B1P_ID B1C_ID
8 |
--------------------------------------------------------------------------------
/chapter-05/files/hello/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM nginx
2 |
3 | # Add index.html
4 | RUN echo "
Hello World!
" \
5 | >/usr/share/nginx/html/index.html
6 |
--------------------------------------------------------------------------------
/chapter-05/files/stop-busybox.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | crictl stop $B1C_ID
3 | crictl rm $B1C_ID
4 | crictl stopp $B1P_ID
5 | crictl rmp $B1P_ID
6 |
--------------------------------------------------------------------------------
/chapter-05/group_vars/remote.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | crio_cni: true
3 | crio_skopeo: true
4 | crictl_container_engine: crio
5 |
--------------------------------------------------------------------------------
/chapter-05/playbook.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: remote
3 | become: yes
4 | roles:
5 | - tools
6 | - docker-ce
7 | - docker-registry
8 | - cri-o
9 | - crictl
10 | - hosts: remote
11 | become: yes
12 | tasks:
13 | - name: files
14 | ansible.builtin.copy:
15 | src: "{{ item }}"
16 | dest: /opt
17 | owner: root
18 | group: root
19 | mode: '0644'
20 | directory_mode: '0755'
21 | with_list:
22 | - busybox-container.yaml
23 | - busybox-pod.yaml
24 | - hello
25 | - name: scripts
26 | ansible.builtin.copy:
27 | src: "{{ item }}"
28 | dest: /opt
29 | owner: root
30 | group: root
31 | mode: '0755'
32 | with_list:
33 | - busybox.sh
34 | - stop-busybox.sh
35 |
--------------------------------------------------------------------------------
/chapter-05/test.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: remote
3 | become: yes
4 | pre_tasks:
5 | - name: install tests
6 | ansible.builtin.copy:
7 | src: tests
8 | dest: /opt
9 | owner: root
10 | group: root
11 | roles:
12 | - test
13 |
--------------------------------------------------------------------------------
/chapter-05/tests/basic.bats:
--------------------------------------------------------------------------------
1 | # bats file_tags=host01
2 |
3 | setup() {
4 | BATS_LIB_PATH=/usr/local/lib/node_modules
5 | bats_load_library bats-support
6 | bats_load_library bats-assert
7 | bats_require_minimum_version 1.5.0
8 | }
9 |
10 | @test 'available commands' {
11 | run -0 which docker
12 | run -0 which skopeo
13 | }
14 |
--------------------------------------------------------------------------------
/chapter-05/tests/build.bats:
--------------------------------------------------------------------------------
1 | # bats file_tags=host01
2 |
3 | setup() {
4 | BATS_LIB_PATH=/usr/local/lib/node_modules
5 | bats_load_library bats-support
6 | bats_load_library bats-assert
7 | bats_require_minimum_version 1.5.0
8 | }
9 |
10 | @test 'build docker image' {
11 | run -0 /bin/bash -c '\
12 | cd /opt/hello
13 | docker build -t hello .'
14 | run -0 docker images
15 | assert_output --partial 'hello'
16 | docker run --name hello -d -p 8080:80 hello
17 | retry -d 1 -t 30 -- curl http://localhost:8080/
18 | run -0 curl http://localhost:8080/
19 | assert_output --partial 'Hello World!'
20 | }
21 |
22 | teardown() {
23 | docker rm --force hello
24 | docker rmi --force hello
25 | }
26 |
--------------------------------------------------------------------------------
/chapter-05/tests/nginx.bats:
--------------------------------------------------------------------------------
1 | # bats file_tags=host01
2 |
3 | setup() {
4 | BATS_LIB_PATH=/usr/local/lib/node_modules
5 | bats_load_library bats-support
6 | bats_load_library bats-assert
7 | bats_require_minimum_version 1.5.0
8 | }
9 |
10 | @test 'nginx' {
11 | docker pull nginx
12 | run -0 docker images
13 | assert_output --partial 'nginx'
14 | docker run --name nginx -d nginx
15 | run -0 docker exec nginx /bin/sh -c 'ldd $(which nginx)'
16 | assert_output --partial 'libc'
17 | JQ_QUERY='.[0].SizeRw'
18 | run -0 /bin/bash -c "docker inspect -s nginx | jq ${JQ_QUERY}"
19 | assert_output --regexp '^[0-9]+$'
20 | JQ_QUERY='.[0].GraphDriver.Data.MergedDir'
21 | ROOT=$(docker inspect nginx | jq -r "${JQ_QUERY}")
22 | ls ${ROOT}
23 | run -0 mount
24 | assert_output --partial 'merged'
25 | }
26 |
27 | teardown() {
28 | docker rm --force nginx
29 | }
30 |
--------------------------------------------------------------------------------
/chapter-05/tests/overlay.bats:
--------------------------------------------------------------------------------
1 | # bats file_tags=host01
2 |
3 | setup() {
4 | BATS_LIB_PATH=/usr/local/lib/node_modules
5 | bats_load_library bats-support
6 | bats_load_library bats-assert
7 | bats_require_minimum_version 1.5.0
8 | }
9 |
10 | @test 'overlay filesystem' {
11 | mkdir /tmp/{lower,upper,work,mount}
12 | echo "hello1" > /tmp/lower/hello1
13 | echo "hello2" > /tmp/upper/hello2
14 | mount -t overlay -o rw,lowerdir=/tmp/lower,upperdir=/tmp/upper,workdir=/tmp/work overlay /tmp/mount
15 | run -0 cat /tmp/mount/hello1
16 | assert_output --partial 'hello1'
17 | run -0 cat /tmp/mount/hello2
18 | assert_output --partial 'hello2'
19 | echo "hello3" > /tmp/mount/hello3
20 | run -0 ls /tmp/lower
21 | refute_output --partial 'hello3'
22 | run -0 ls /tmp/upper
23 | assert_output --partial 'hello3'
24 | }
25 |
26 | teardown() {
27 | umount -f /tmp/mount
28 | rm -fr /tmp/{lower,upper,work,mount}
29 | }
30 |
--------------------------------------------------------------------------------
/chapter-05/tests/registry.bats:
--------------------------------------------------------------------------------
1 | # bats file_tags=host01
2 |
3 | setup() {
4 | BATS_LIB_PATH=/usr/local/lib/node_modules
5 | bats_load_library bats-support
6 | bats_load_library bats-assert
7 | bats_require_minimum_version 1.5.0
8 | }
9 |
10 | @test 'local registry' {
11 | docker pull busybox
12 | docker tag busybox registry.local/busybox
13 | docker push registry.local/busybox
14 | docker pull registry.local/busybox
15 | }
16 |
17 | teardown() {
18 | docker rmi --force busybox
19 | docker rmi --force registry.local/busybox
20 | }
21 |
--------------------------------------------------------------------------------
/chapter-06/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | collections_path = ../setup/collections
3 | inventory = ../setup/ec2-inventory
4 | roles_path = ../setup/roles
5 |
6 | [ssh_connection]
7 | ssh_args="-o UserKnownHostsFile=../sshkeys/known_hosts_aws_ansible_k8s"
--------------------------------------------------------------------------------
/chapter-06/aws-setup.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_lb:
7 | ip: 192.168.61.10
8 | ports:
9 | - name: ingress
10 | source: 80
11 | target: 80
12 | - name: api
13 | source: 6443
14 | target: 6443
15 | aws_instances:
16 | host01:
17 | type: master
18 | ip: 192.168.61.11
19 | instance_type: t3.medium
20 | host02:
21 | type: master
22 | ip: 192.168.61.12
23 | instance_type: t3.medium
24 | host03:
25 | type: master
26 | ip: 192.168.61.13
27 | instance_type: t3.medium
28 | host04:
29 | type: node
30 | ip: 192.168.61.14
31 | instance_type: t3.small
32 | roles:
33 | - aws-instances
34 |
--------------------------------------------------------------------------------
/chapter-06/aws-teardown.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_k8s_teardown: true
7 | aws_lb:
8 | ports:
9 | - name: ingress
10 | - name: api
11 | roles:
12 | - aws-instances
13 |
--------------------------------------------------------------------------------
/chapter-06/extra.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: masters:nodes
3 | become: yes
4 | roles:
5 | - k8s
6 |
--------------------------------------------------------------------------------
/chapter-06/group_vars/remote.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | containerd_cri: true
3 | k8s_control_plane_endpoint: 192.168.61.10
4 | k8s_initial_master: 192.168.61.11
5 | k8s_cluster_ips:
6 | - 192.168.61.11
7 | - 192.168.61.12
8 | - 192.168.61.13
9 | - 192.168.61.14
10 |
11 | k8s_allow_scheduling_masters: true
12 |
13 | # NOTE: In a production system, you should keep these in an encrypted store
14 | # such as Ansible Vault.
15 | k8s_join_token: 1d8fb1.2875d52d62a3282d
16 | k8s_certificate_key: 5a7e07816958efb97635e9a66256adb1
17 |
--------------------------------------------------------------------------------
/chapter-06/group_vars/vagrant.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | k8s_kube_vip: true
3 |
--------------------------------------------------------------------------------
/chapter-06/playbook.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: masters:nodes
3 | become: yes
4 | vars:
5 | k8s_install: false
6 | roles:
7 | - tools
8 | - containerd
9 | - crictl
10 | - k8s
11 |
--------------------------------------------------------------------------------
/chapter-07/README.md:
--------------------------------------------------------------------------------
1 | # Deploying Containers to Kubernetes
2 |
3 | This folder provides the examples for the chapter "Deploying Containers to Kubernetes".
4 |
5 | ## Prerequisites
6 |
7 | Be sure to start by following the instructions in the `setup` folder.
8 |
9 | ## Running in AWS
10 |
11 | Start by provisioning:
12 |
13 | ```
14 | ansible-playbook aws-setup.yaml
15 | ```
16 |
17 | Then, run the main playbook:
18 |
19 | ```
20 | ansible-playbook playbook.yaml
21 | ```
22 |
23 | You can SSH to `host01` and become root by running:
24 |
25 | ```
26 | ./aws-ssh.sh host01
27 | sudo su -
28 | ```
29 |
30 | When finished, don't forget to clean up:
31 |
32 | ```
33 | ansible-playbook aws-teardown.yaml
34 | ```
35 |
36 | ## Running in Vagrant
37 |
38 | To start:
39 |
40 | ```
41 | vagrant up
42 | ```
43 |
44 | This will also run the main Ansible playbook.
45 |
46 | You can SSH to `host01` and become root by running:
47 |
48 | ```
49 | vagrant ssh host01
50 | sudo su -
51 | ```
52 |
53 | When finished, you can clean up the VM:
54 |
55 | ```
56 | vagrant destroy
57 | ```
58 |
--------------------------------------------------------------------------------
/chapter-07/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 | cluster = {
4 | "host01" => { :ip => "192.168.61.11", :cpus => 2, :mem => 2048, :ports => [80, 48080] },
5 | "host02" => { :ip => "192.168.61.12", :cpus => 2, :mem => 2048 },
6 | "host03" => { :ip => "192.168.61.13", :cpus => 2, :mem => 2048 }
7 | }
8 | last = "host03"
9 | groups = {
10 | "vagrant" => ["host01","host02","host03"],
11 | "remote" => ["host01","host02","host03"],
12 | "masters" => ["host01","host02","host03"]
13 | }
14 |
15 | Vagrant.configure("2") do |config|
16 | config.vm.box = "ubuntu/focal64"
17 | cluster.each do |name, data|
18 | config.vm.define name do |host|
19 | host.vm.hostname = name
20 | if data.key?(:ports)
21 | host.vm.network "forwarded_port", guest: data[:ports][0], host: data[:ports][1]
22 | end
23 | host.vm.network "private_network", ip: "#{data[:ip]}"
24 | host.vm.provider :virtualbox do |vb, override|
25 | vb.cpus = data[:cpus]
26 | vb.memory = data[:mem]
27 | end
28 | # Provision all hosts at once for efficiency
29 | if name == last
30 | host.vm.provision :ansible do |ansible|
31 | ansible.limit = "all"
32 | ansible.playbook = "playbook.yaml"
33 | ansible.groups = groups
34 | end
35 | end
36 | end
37 | end
38 | end
39 |
--------------------------------------------------------------------------------
/chapter-07/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | collections_path = ../setup/collections
3 | inventory = ../setup/ec2-inventory
4 | roles_path = ../setup/roles
5 |
6 | [ssh_connection]
7 | ssh_args="-o UserKnownHostsFile=../sshkeys/known_hosts_aws_ansible_k8s"
--------------------------------------------------------------------------------
/chapter-07/aws-setup.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_lb:
7 | ip: 192.168.61.10
8 | ports:
9 | - name: ingress
10 | source: 80
11 | target: 80
12 | - name: api
13 | source: 6443
14 | target: 6443
15 | aws_instances:
16 | host01:
17 | type: master
18 | ip: 192.168.61.11
19 | instance_type: t3.medium
20 | host02:
21 | type: master
22 | ip: 192.168.61.12
23 | instance_type: t3.medium
24 | host03:
25 | type: master
26 | ip: 192.168.61.13
27 | instance_type: t3.medium
28 | roles:
29 | - aws-instances
30 |
--------------------------------------------------------------------------------
/chapter-07/aws-teardown.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_k8s_teardown: true
7 | aws_lb:
8 | ports:
9 | - name: ingress
10 | - name: api
11 | roles:
12 | - aws-instances
13 |
--------------------------------------------------------------------------------
/chapter-07/files/nginx-deploy.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: Deployment
3 | apiVersion: apps/v1
4 | metadata:
5 | name: nginx
6 | spec:
7 | replicas: 3
8 | selector:
9 | matchLabels:
10 | app: nginx
11 | template:
12 | metadata:
13 | labels:
14 | app: nginx
15 | spec:
16 | containers:
17 | - name: nginx
18 | image: nginx
19 | resources:
20 | requests:
21 | cpu: "100m"
22 |
23 |
--------------------------------------------------------------------------------
/chapter-07/files/nginx-pod.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | name: nginx
6 | spec:
7 | containers:
8 | - name: nginx
9 | image: nginx
10 |
--------------------------------------------------------------------------------
/chapter-07/files/nginx-scaler.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: autoscaling/v2
3 | kind: HorizontalPodAutoscaler
4 | metadata:
5 | name: nginx
6 | labels:
7 | app: nginx
8 | spec:
9 | scaleTargetRef:
10 | apiVersion: apps/v1
11 | kind: Deployment
12 | name: nginx
13 | minReplicas: 1
14 | maxReplicas: 10
15 | metrics:
16 | - type: Resource
17 | resource:
18 | name: cpu
19 | target:
20 | type: Utilization
21 | averageUtilization: 50
22 |
--------------------------------------------------------------------------------
/chapter-07/files/sleep-cronjob.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1
2 | kind: CronJob
3 | metadata:
4 | name: sleep
5 | spec:
6 | schedule: "0 3 * * *"
7 | jobTemplate:
8 | spec:
9 | template:
10 | spec:
11 | containers:
12 | - name: sleep
13 | image: busybox
14 | command:
15 | - "/bin/sleep"
16 | - "30"
17 | restartPolicy: OnFailure
--------------------------------------------------------------------------------
/chapter-07/files/sleep-job.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1
2 | kind: Job
3 | metadata:
4 | name: sleep
5 | spec:
6 | template:
7 | spec:
8 | containers:
9 | - name: sleep
10 | image: busybox
11 | command:
12 | - "/bin/sleep"
13 | - "30"
14 | restartPolicy: OnFailure
--------------------------------------------------------------------------------
/chapter-07/files/sleep-set.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: sleep
5 | spec:
6 | clusterIP: None
7 | selector:
8 | app: sleep
9 | ---
10 | apiVersion: apps/v1
11 | kind: StatefulSet
12 | metadata:
13 | name: sleep
14 | spec:
15 | serviceName: sleep
16 | replicas: 2
17 | selector:
18 | matchLabels:
19 | app: sleep
20 | template:
21 | metadata:
22 | labels:
23 | app: sleep
24 | spec:
25 | containers:
26 | - name: sleep
27 | image: busybox
28 | command:
29 | - "/bin/sleep"
30 | - "3600"
31 | volumeMounts:
32 | - name: sleep-volume
33 | mountPath: /storagedir
34 | volumeClaimTemplates:
35 | - metadata:
36 | name: sleep-volume
37 | spec:
38 | storageClassName: longhorn
39 | accessModes:
40 | - ReadWriteOnce
41 | resources:
42 | requests:
43 | storage: 100Mi
44 |
--------------------------------------------------------------------------------
/chapter-07/group_vars/remote.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | containerd_cri: true
3 | k8s_control_plane_endpoint: 192.168.61.10
4 | k8s_initial_master: 192.168.61.11
5 | k8s_cluster_ips:
6 | - 192.168.61.11
7 | - 192.168.61.12
8 | - 192.168.61.13
9 |
10 | k8s_allow_scheduling_masters: true
11 |
12 | # NOTE: In a production system, you should keep these in an encrypted store
13 | # such as Ansible Vault.
14 | k8s_join_token: 1d8fb1.2875d52d62a3282d
15 | k8s_certificate_key: 5a7e07816958efb97635e9a66256adb1
16 |
--------------------------------------------------------------------------------
/chapter-07/group_vars/vagrant.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | k8s_kube_vip: true
3 |
--------------------------------------------------------------------------------
/chapter-07/playbook.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: masters
3 | become: yes
4 | roles:
5 | - tools
6 | - containerd
7 | - crictl
8 | - k8s
9 | - hosts: remote
10 | become: yes
11 | tasks:
12 | - name: files
13 | ansible.builtin.copy:
14 | src: "{{ item }}"
15 | dest: /opt/{{ item }}
16 | owner: root
17 | group: root
18 | mode: '0644'
19 | with_list:
20 | - nginx-pod.yaml
21 | - nginx-deploy.yaml
22 | - nginx-scaler.yaml
23 | - sleep-job.yaml
24 | - sleep-cronjob.yaml
25 | - sleep-set.yaml
26 |
--------------------------------------------------------------------------------
/chapter-08/README.md:
--------------------------------------------------------------------------------
1 | # Overlay Networks
2 |
3 | This folder provides the examples for the chapter "Overlay Networks".
4 |
5 | ## Prerequisites
6 |
7 | Be sure to start by following the instructions in the `setup` folder.
8 |
9 | ## Organization
10 |
11 | This chapter's examples are divided into two directories, `calico` and
12 | `weavenet`. Each directory contains a full example along with a `README.md`
13 | file with instructions.
14 |
--------------------------------------------------------------------------------
/chapter-08/calico/README.md:
--------------------------------------------------------------------------------
1 | # Overlay Networks - Calico
2 |
3 | This folder provides the Calico examples for the chapter "Overlay Networks".
4 |
5 | ## Prerequisites
6 |
7 | Be sure to start by following the instructions in the `setup` folder.
8 |
9 | ## Running in AWS
10 |
11 | Start by provisioning:
12 |
13 | ```
14 | ansible-playbook aws-setup.yaml
15 | ```
16 |
17 | Then, run the main playbook:
18 |
19 | ```
20 | ansible-playbook playbook.yaml
21 | ```
22 |
23 | You can SSH to `host01` and become root by running:
24 |
25 | ```
26 | ./aws-ssh.sh host01
27 | sudo su -
28 | ```
29 |
30 | When finished, don't forget to clean up:
31 |
32 | ```
33 | ansible-playbook aws-teardown.yaml
34 | ```
35 |
36 | ## Running in Vagrant
37 |
38 | To start:
39 |
40 | ```
41 | vagrant up
42 | ```
43 |
44 | This will also run the main Ansible playbook.
45 |
46 | You can SSH to `host01` and become root by running:
47 |
48 | ```
49 | vagrant ssh host01
50 | sudo su -
51 | ```
52 |
53 | When finished, you can clean up the VM:
54 |
55 | ```
56 | vagrant destroy
57 | ```
58 |
--------------------------------------------------------------------------------
/chapter-08/calico/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 | cluster = {
4 | "host01" => { :ip => "192.168.61.11", :cpus => 2, :mem => 2048, :ports => [80, 48080] },
5 | "host02" => { :ip => "192.168.61.12", :cpus => 2, :mem => 2048 },
6 | "host03" => { :ip => "192.168.61.13", :cpus => 2, :mem => 2048 }
7 | }
8 | last = "host03"
9 | groups = {
10 | "vagrant" => ["host01","host02","host03"],
11 | "remote" => ["host01","host02","host03"],
12 | "masters" => ["host01","host02","host03"]
13 | }
14 |
15 | Vagrant.configure("2") do |config|
16 | config.vm.box = "ubuntu/focal64"
17 | cluster.each do |name, data|
18 | config.vm.define name do |host|
19 | host.vm.hostname = name
20 | if data.key?(:ports)
21 | host.vm.network "forwarded_port", guest: data[:ports][0], host: data[:ports][1]
22 | end
23 | host.vm.network "private_network", ip: "#{data[:ip]}"
24 | host.vm.provider :virtualbox do |vb, override|
25 | vb.cpus = data[:cpus]
26 | vb.memory = data[:mem]
27 | end
28 | # Provision all hosts at once for efficiency
29 | if name == last
30 | host.vm.provision :ansible do |ansible|
31 | ansible.limit = "all"
32 | ansible.playbook = "playbook.yaml"
33 | ansible.groups = groups
34 | end
35 | end
36 | end
37 | end
38 | end
39 |
--------------------------------------------------------------------------------
/chapter-08/calico/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | collections_path = ../../setup/collections
3 | inventory = ../../setup/ec2-inventory
4 | roles_path = ../../setup/roles
5 |
6 | [ssh_connection]
7 | ssh_args="-o UserKnownHostsFile=../../sshkeys/known_hosts_aws_ansible_k8s"
8 |
--------------------------------------------------------------------------------
/chapter-08/calico/aws-setup.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | aws_k8s_ssh_dir: "../../sshkeys"
6 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
7 | aws_lb:
8 | ip: 192.168.61.10
9 | ports:
10 | - name: ingress
11 | source: 80
12 | target: 80
13 | - name: api
14 | source: 6443
15 | target: 6443
16 | aws_instances:
17 | host01:
18 | type: master
19 | ip: 192.168.61.11
20 | instance_type: t3.medium
21 | host02:
22 | type: master
23 | ip: 192.168.61.12
24 | instance_type: t3.medium
25 | host03:
26 | type: master
27 | ip: 192.168.61.13
28 | instance_type: t3.medium
29 | roles:
30 | - aws-instances
31 |
--------------------------------------------------------------------------------
/chapter-08/calico/aws-teardown.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_k8s_teardown: true
7 | aws_lb:
8 | ports:
9 | - name: ingress
10 | - name: api
11 | roles:
12 | - aws-instances
13 |
--------------------------------------------------------------------------------
/chapter-08/calico/files/local-pods.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: pod1
5 | annotations:
6 | k8s.v1.cni.cncf.io/networks: macvlan-conf
7 | spec:
8 | containers:
9 | - name: pod1
10 | image: busybox
11 | command:
12 | - "sleep"
13 | - "infinity"
14 | nodeName: host01
15 | ---
16 | apiVersion: v1
17 | kind: Pod
18 | metadata:
19 | name: pod2
20 | annotations:
21 | k8s.v1.cni.cncf.io/networks: macvlan-conf
22 | spec:
23 | containers:
24 | - name: pod2
25 | image: busybox
26 | command:
27 | - "sleep"
28 | - "infinity"
29 | nodeName: host01
30 |
--------------------------------------------------------------------------------
/chapter-08/calico/files/netattach.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: k8s.cni.cncf.io/v1
3 | kind: NetworkAttachmentDefinition
4 | metadata:
5 | name: macvlan-conf
6 | spec:
7 | config: '{
8 | "cniVersion": "0.3.0",
9 | "type": "macvlan",
10 | "mode": "bridge",
11 | "ipam": {
12 | "type": "host-local",
13 | "subnet": "10.244.0.0/24",
14 | "rangeStart": "10.244.0.1",
15 | "rangeEnd": "10.244.0.254"
16 | }
17 | }'
18 |
--------------------------------------------------------------------------------
/chapter-08/calico/files/pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: pod
5 | spec:
6 | containers:
7 | - name: pod
8 | image: busybox
9 | command:
10 | - "sleep"
11 | - "infinity"
12 | nodeName: host01
13 |
--------------------------------------------------------------------------------
/chapter-08/calico/files/two-pods.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: pod1
5 | spec:
6 | containers:
7 | - name: pod1
8 | image: busybox
9 | command:
10 | - "sleep"
11 | - "infinity"
12 | nodeName: host01
13 | ---
14 | apiVersion: v1
15 | kind: Pod
16 | metadata:
17 | name: pod2
18 | spec:
19 | containers:
20 | - name: pod2
21 | image: busybox
22 | command:
23 | - "sleep"
24 | - "infinity"
25 | nodeName: host02
26 |
--------------------------------------------------------------------------------
/chapter-08/calico/group_vars/remote.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | containerd_cri: true
3 | k8s_control_plane_endpoint: 192.168.61.10
4 | k8s_initial_master: 192.168.61.11
5 | k8s_cluster_ips:
6 | - 192.168.61.11
7 | - 192.168.61.12
8 | - 192.168.61.13
9 |
10 | k8s_allow_scheduling_masters: true
11 |
12 | # NOTE: In a production system, you should keep these in an encrypted store
13 | # such as Ansible Vault.
14 | k8s_join_token: 1d8fb1.2875d52d62a3282d
15 | k8s_certificate_key: 5a7e07816958efb97635e9a66256adb1
16 |
--------------------------------------------------------------------------------
/chapter-08/calico/group_vars/vagrant.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | k8s_kube_vip: true
3 |
--------------------------------------------------------------------------------
/chapter-08/calico/playbook.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: masters
3 | become: yes
4 | roles:
5 | - tools
6 | - containerd
7 | - crictl
8 | - k8s
9 | - hosts: remote
10 | become: yes
11 | tasks:
12 | - name: files
13 | ansible.builtin.copy:
14 | src: "{{ item }}"
15 | dest: /opt/{{ item }}
16 | owner: root
17 | group: root
18 | mode: '0644'
19 | with_list:
20 | - local-pods.yaml
21 | - multus-daemonset.yaml
22 | - netattach.yaml
23 | - pod.yaml
24 | - two-pods.yaml
25 |
--------------------------------------------------------------------------------
/chapter-08/weavenet/README.md:
--------------------------------------------------------------------------------
1 | # Overlay Networks - WeaveNet
2 |
3 | This folder provides the WeaveNet examples for the chapter "Overlay Networks".
4 |
5 | ## Prerequisites
6 |
7 | Be sure to start by following the instructions in the `setup` folder.
8 |
9 | ## Running in AWS
10 |
11 | Start by provisioning:
12 |
13 | ```
14 | ansible-playbook aws-setup.yaml
15 | ```
16 |
17 | Then, run the main playbook:
18 |
19 | ```
20 | ansible-playbook playbook.yaml
21 | ```
22 |
23 | You can SSH to `host01` and become root by running:
24 |
25 | ```
26 | ./aws-ssh.sh host01
27 | sudo su -
28 | ```
29 |
30 | When finished, don't forget to clean up:
31 |
32 | ```
33 | ansible-playbook aws-teardown.yaml
34 | ```
35 |
36 | ## Running in Vagrant
37 |
38 | To start:
39 |
40 | ```
41 | vagrant up
42 | ```
43 |
44 | This will also run the main Ansible playbook.
45 |
46 | You can SSH to `host01` and become root by running:
47 |
48 | ```
49 | vagrant ssh host01
50 | sudo su -
51 | ```
52 |
53 | When finished, you can clean up the VM:
54 |
55 | ```
56 | vagrant destroy
57 | ```
58 |
--------------------------------------------------------------------------------
/chapter-08/weavenet/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 | cluster = {
4 | "host01" => { :ip => "192.168.61.11", :cpus => 2, :mem => 2048, :ports => [80, 48080] },
5 | "host02" => { :ip => "192.168.61.12", :cpus => 2, :mem => 2048 },
6 | "host03" => { :ip => "192.168.61.13", :cpus => 2, :mem => 2048 }
7 | }
8 | last = "host03"
9 | groups = {
10 | "vagrant" => ["host01","host02","host03"],
11 | "remote" => ["host01","host02","host03"],
12 | "masters" => ["host01","host02","host03"]
13 | }
14 |
15 | Vagrant.configure("2") do |config|
16 | config.vm.box = "ubuntu/focal64"
17 | cluster.each do |name, data|
18 | config.vm.define name do |host|
19 | host.vm.hostname = name
20 | if data.key?(:ports)
21 | host.vm.network "forwarded_port", guest: data[:ports][0], host: data[:ports][1]
22 | end
23 | host.vm.network "private_network", ip: "#{data[:ip]}"
24 | host.vm.provider :virtualbox do |vb, override|
25 | vb.cpus = data[:cpus]
26 | vb.memory = data[:mem]
27 | end
28 | # Provision all hosts at once for efficiency
29 | if name == last
30 | host.vm.provision :ansible do |ansible|
31 | ansible.limit = "all"
32 | ansible.playbook = "playbook.yaml"
33 | ansible.groups = groups
34 | end
35 | end
36 | end
37 | end
38 | end
39 |
--------------------------------------------------------------------------------
/chapter-08/weavenet/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | collections_path = ../../setup/collections
3 | inventory = ../../setup/ec2-inventory
4 | roles_path = ../../setup/roles
5 |
6 | [ssh_connection]
7 | ssh_args="-o UserKnownHostsFile=../../sshkeys/known_hosts_aws_ansible_k8s"
8 |
--------------------------------------------------------------------------------
/chapter-08/weavenet/aws-setup.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | aws_k8s_ssh_dir: "../../sshkeys"
6 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
7 | aws_lb:
8 | ip: 192.168.61.10
9 | ports:
10 | - name: ingress
11 | source: 80
12 | target: 80
13 | - name: api
14 | source: 6443
15 | target: 6443
16 | aws_instances:
17 | host01:
18 | type: master
19 | ip: 192.168.61.11
20 | instance_type: t3.medium
21 | host02:
22 | type: master
23 | ip: 192.168.61.12
24 | instance_type: t3.medium
25 | host03:
26 | type: master
27 | ip: 192.168.61.13
28 | instance_type: t3.medium
29 | roles:
30 | - aws-instances
31 |
--------------------------------------------------------------------------------
/chapter-08/weavenet/aws-teardown.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_k8s_teardown: true
7 | aws_lb:
8 | ports:
9 | - name: ingress
10 | - name: api
11 | roles:
12 | - aws-instances
13 |
--------------------------------------------------------------------------------
/chapter-08/weavenet/files/two-pods.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: pod1
5 | spec:
6 | containers:
7 | - name: pod1
8 | image: busybox
9 | command:
10 | - "sleep"
11 | - "infinity"
12 | nodeName: host01
13 | ---
14 | apiVersion: v1
15 | kind: Pod
16 | metadata:
17 | name: pod2
18 | spec:
19 | containers:
20 | - name: pod2
21 | image: busybox
22 | command:
23 | - "sleep"
24 | - "infinity"
25 | nodeName: host02
26 |
--------------------------------------------------------------------------------
/chapter-08/weavenet/group_vars/remote.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | containerd_cri: true
3 | k8s_control_plane_endpoint: 192.168.61.10
4 | k8s_initial_master: 192.168.61.11
5 | k8s_cluster_ips:
6 | - 192.168.61.11
7 | - 192.168.61.12
8 | - 192.168.61.13
9 | k8s_network: weave
10 |
11 | k8s_allow_scheduling_masters: true
12 |
13 | # NOTE: In a production system, you should keep these in an encrypted store
14 | # such as Ansible Vault.
15 | k8s_join_token: 1d8fb1.2875d52d62a3282d
16 | k8s_certificate_key: 5a7e07816958efb97635e9a66256adb1
17 |
--------------------------------------------------------------------------------
/chapter-08/weavenet/group_vars/vagrant.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | k8s_kube_vip: true
3 | k8s_flannel_iface: enp0s8
4 |
--------------------------------------------------------------------------------
/chapter-08/weavenet/playbook.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: masters
3 | become: yes
4 | roles:
5 | - tools
6 | - containerd
7 | - crictl
8 | - k8s
9 | - hosts: remote
10 | become: yes
11 | tasks:
12 | - name: files
13 | ansible.builtin.copy:
14 | src: "{{ item }}"
15 | dest: /opt/{{ item }}
16 | owner: root
17 | group: root
18 | mode: '0644'
19 | with_list:
20 | - two-pods.yaml
21 |
--------------------------------------------------------------------------------
/chapter-09/README.md:
--------------------------------------------------------------------------------
1 | # Service and Ingress Networks
2 |
3 | This folder provides the examples for the chapter "Service and Ingress Networks".
4 |
5 | ## Prerequisites
6 |
7 | Be sure to start by following the instructions in the `setup` folder.
8 |
9 | ## Running in AWS
10 |
11 | Start by provisioning:
12 |
13 | ```
14 | ansible-playbook aws-setup.yaml
15 | ```
16 |
17 | Then, run the main playbook:
18 |
19 | ```
20 | ansible-playbook playbook.yaml
21 | ```
22 |
23 | You can SSH to `host01` and become root by running:
24 |
25 | ```
26 | ./aws-ssh.sh host01
27 | sudo su -
28 | ```
29 |
30 | When finished, don't forget to clean up:
31 |
32 | ```
33 | ansible-playbook aws-teardown.yaml
34 | ```
35 |
36 | ## Running in Vagrant
37 |
38 | To start:
39 |
40 | ```
41 | vagrant up
42 | ```
43 |
44 | This will also run the main Ansible playbook.
45 |
46 | You can SSH to `host01` and become root by running:
47 |
48 | ```
49 | vagrant ssh host01
50 | sudo su -
51 | ```
52 |
53 | When finished, you can clean up the VM:
54 |
55 | ```
56 | vagrant destroy
57 | ```
58 |
--------------------------------------------------------------------------------
/chapter-09/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 | cluster = {
4 | "host01" => { :ip => "192.168.61.11", :cpus => 2, :mem => 2048, :ports => [80, 48080] },
5 | "host02" => { :ip => "192.168.61.12", :cpus => 2, :mem => 2048 },
6 | "host03" => { :ip => "192.168.61.13", :cpus => 2, :mem => 2048 }
7 | }
8 | last = "host03"
9 | groups = {
10 | "vagrant" => ["host01","host02","host03"],
11 | "remote" => ["host01","host02","host03"],
12 | "masters" => ["host01","host02","host03"]
13 | }
14 |
15 | Vagrant.configure("2") do |config|
16 | config.vm.box = "ubuntu/focal64"
17 | cluster.each do |name, data|
18 | config.vm.define name do |host|
19 | host.vm.hostname = name
20 | if data.key?(:ports)
21 | host.vm.network "forwarded_port", guest: data[:ports][0], host: data[:ports][1]
22 | end
23 | host.vm.network "private_network", ip: "#{data[:ip]}"
24 | host.vm.provider :virtualbox do |vb, override|
25 | vb.cpus = data[:cpus]
26 | vb.memory = data[:mem]
27 | end
28 | # Provision all hosts at once for efficiency
29 | if name == last
30 | host.vm.provision :ansible do |ansible|
31 | ansible.limit = "all"
32 | ansible.playbook = "playbook.yaml"
33 | ansible.groups = groups
34 | end
35 | end
36 | end
37 | end
38 | end
39 |
--------------------------------------------------------------------------------
/chapter-09/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | collections_path = ../setup/collections
3 | inventory = ../setup/ec2-inventory
4 | roles_path = ../setup/roles
5 |
6 | [ssh_connection]
7 | ssh_args="-o UserKnownHostsFile=../sshkeys/known_hosts_aws_ansible_k8s"
--------------------------------------------------------------------------------
/chapter-09/aws-setup.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_lb:
7 | ip: 192.168.61.10
8 | ports:
9 | - name: ingress
10 | source: 80
11 | target: 80
12 | - name: api
13 | source: 6443
14 | target: 6443
15 | aws_instances:
16 | host01:
17 | type: master
18 | ip: 192.168.61.11
19 | instance_type: t3.medium
20 | host02:
21 | type: master
22 | ip: 192.168.61.12
23 | instance_type: t3.medium
24 | host03:
25 | type: master
26 | ip: 192.168.61.13
27 | instance_type: t3.medium
28 | roles:
29 | - aws-instances
30 |
--------------------------------------------------------------------------------
/chapter-09/aws-teardown.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_k8s_teardown: true
7 | aws_lb:
8 | ports:
9 | - name: ingress
10 | - name: api
11 | roles:
12 | - aws-instances
13 |
--------------------------------------------------------------------------------
/chapter-09/files/nginx-deploy.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: Deployment
3 | apiVersion: apps/v1
4 | metadata:
5 | name: nginx
6 | spec:
7 | replicas: 5
8 | selector:
9 | matchLabels:
10 | app: nginx
11 | template:
12 | metadata:
13 | labels:
14 | app: nginx
15 | spec:
16 | containers:
17 | - name: nginx
18 | image: nginx
19 |
--------------------------------------------------------------------------------
/chapter-09/files/nginx-ingress.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: networking.k8s.io/v1
3 | kind: Ingress
4 | metadata:
5 | name: web01
6 | spec:
7 | rules:
8 | - host: web01
9 | http:
10 | paths:
11 | - path: /
12 | pathType: Prefix
13 | backend:
14 | service:
15 | name: nginx
16 | port:
17 | number: 80
18 |
--------------------------------------------------------------------------------
/chapter-09/files/nginx-nodeport.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | spec:
3 | type: NodePort
4 |
--------------------------------------------------------------------------------
/chapter-09/files/nginx-service.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: Service
3 | apiVersion: v1
4 | metadata:
5 | name: nginx
6 | spec:
7 | selector:
8 | app: nginx
9 | ports:
10 | - protocol: TCP
11 | port: 80
12 | targetPort: 80
--------------------------------------------------------------------------------
/chapter-09/files/pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: pod
5 | spec:
6 | containers:
7 | - name: pod
8 | image: alpine
9 | command:
10 | - "sleep"
11 | - "infinity"
12 |
--------------------------------------------------------------------------------
/chapter-09/group_vars/remote.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | containerd_cri: true
3 | k8s_control_plane_endpoint: 192.168.61.10
4 | k8s_initial_master: 192.168.61.11
5 | k8s_cluster_ips:
6 | - 192.168.61.11
7 | - 192.168.61.12
8 | - 192.168.61.13
9 |
10 | k8s_allow_scheduling_masters: true
11 |
12 | # NOTE: In a production system, you should keep these in an encrypted store
13 | # such as Ansible Vault.
14 | k8s_join_token: 1d8fb1.2875d52d62a3282d
15 | k8s_certificate_key: 5a7e07816958efb97635e9a66256adb1
16 |
--------------------------------------------------------------------------------
/chapter-09/group_vars/vagrant.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | k8s_kube_vip: true
3 |
--------------------------------------------------------------------------------
/chapter-09/playbook.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: masters
3 | become: yes
4 | roles:
5 | - tools
6 | - containerd
7 | - crictl
8 | - k8s
9 | - hosts: remote
10 | become: yes
11 | tasks:
12 | - name: files
13 | ansible.builtin.copy:
14 | src: "{{ item }}"
15 | dest: /opt/{{ item }}
16 | owner: root
17 | group: root
18 | mode: '0644'
19 | with_list:
20 | - nginx-deploy.yaml
21 | - nginx-ingress.yaml
22 | - nginx-nodeport.yaml
23 | - nginx-service.yaml
24 | - pod.yaml
25 |
--------------------------------------------------------------------------------
/chapter-10/README.md:
--------------------------------------------------------------------------------
1 | # When Things Go Wrong
2 |
3 | This folder provides the examples for the chapter "When Things Go Wrong".
4 |
5 | ## Prerequisites
6 |
7 | Be sure to start by following the instructions in the `setup` folder.
8 |
9 | ## Running in AWS
10 |
11 | Start by provisioning:
12 |
13 | ```
14 | ansible-playbook aws-setup.yaml
15 | ```
16 |
17 | Then, run the main playbook:
18 |
19 | ```
20 | ansible-playbook playbook.yaml
21 | ```
22 |
23 | You can SSH to `host01` and become root by running:
24 |
25 | ```
26 | ./aws-ssh.sh host01
27 | sudo su -
28 | ```
29 |
30 | When finished, don't forget to clean up:
31 |
32 | ```
33 | ansible-playbook aws-teardown.yaml
34 | ```
35 |
36 | ## Running in Vagrant
37 |
38 | To start:
39 |
40 | ```
41 | vagrant up
42 | ```
43 |
44 | This will also run the main Ansible playbook.
45 |
46 | You can SSH to `host01` and become root by running:
47 |
48 | ```
49 | vagrant ssh host01
50 | sudo su -
51 | ```
52 |
53 | When finished, you can clean up the VM:
54 |
55 | ```
56 | vagrant destroy
57 | ```
58 |
--------------------------------------------------------------------------------
/chapter-10/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 | cluster = {
4 | "host01" => { :ip => "192.168.61.11", :cpus => 2, :mem => 2048, :ports => [80, 48080] },
5 | "host02" => { :ip => "192.168.61.12", :cpus => 2, :mem => 2048 },
6 | "host03" => { :ip => "192.168.61.13", :cpus => 2, :mem => 2048 }
7 | }
8 | last = "host03"
9 | groups = {
10 | "vagrant" => ["host01","host02","host03"],
11 | "remote" => ["host01","host02","host03"],
12 | "masters" => ["host01","host02","host03"]
13 | }
14 |
15 | Vagrant.configure("2") do |config|
16 | config.vm.box = "ubuntu/focal64"
17 | cluster.each do |name, data|
18 | config.vm.define name do |host|
19 | host.vm.hostname = name
20 | if data.key?(:ports)
21 | host.vm.network "forwarded_port", guest: data[:ports][0], host: data[:ports][1]
22 | end
23 | host.vm.network "private_network", ip: "#{data[:ip]}"
24 | host.vm.provider :virtualbox do |vb, override|
25 | vb.cpus = data[:cpus]
26 | vb.memory = data[:mem]
27 | end
28 | # Provision all hosts at once for efficiency
29 | if name == last
30 | host.vm.provision :ansible do |ansible|
31 | ansible.limit = "all"
32 | ansible.playbook = "playbook.yaml"
33 | ansible.groups = groups
34 | end
35 | end
36 | end
37 | end
38 | end
39 |
--------------------------------------------------------------------------------
/chapter-10/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | collections_path = ../setup/collections
3 | inventory = ../setup/ec2-inventory
4 | roles_path = ../setup/roles
5 |
6 | [ssh_connection]
7 | ssh_args="-o UserKnownHostsFile=../sshkeys/known_hosts_aws_ansible_k8s"
--------------------------------------------------------------------------------
/chapter-10/aws-setup.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_lb:
7 | ip: 192.168.61.10
8 | ports:
9 | - name: ingress
10 | source: 80
11 | target: 80
12 | - name: api
13 | source: 6443
14 | target: 6443
15 | aws_instances:
16 | host01:
17 | type: master
18 | ip: 192.168.61.11
19 | instance_type: t3.medium
20 | host02:
21 | type: master
22 | ip: 192.168.61.12
23 | instance_type: t3.medium
24 | host03:
25 | type: master
26 | ip: 192.168.61.13
27 | instance_type: t3.medium
28 | roles:
29 | - aws-instances
30 |
--------------------------------------------------------------------------------
/chapter-10/aws-teardown.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_k8s_teardown: true
7 | aws_lb:
8 | ports:
9 | - name: ingress
10 | - name: api
11 | roles:
12 | - aws-instances
13 |
--------------------------------------------------------------------------------
/chapter-10/files/crasher-deploy.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: Deployment
3 | apiVersion: apps/v1
4 | metadata:
5 | name: crasher
6 | spec:
7 | replicas: 1
8 | selector:
9 | matchLabels:
10 | app: crasher
11 | template:
12 | metadata:
13 | labels:
14 | app: crasher
15 | spec:
16 | containers:
17 | - name: crasher
18 | image: bookofkubernetes/crasher:stable
19 |
20 |
--------------------------------------------------------------------------------
/chapter-10/files/crasher.c:
--------------------------------------------------------------------------------
1 | int main() {
2 | char *s = "12";
3 | s[2] = '3';
4 | return 0;
5 | }
6 |
--------------------------------------------------------------------------------
/chapter-10/files/nginx-selector.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | name: nginx
6 | spec:
7 | containers:
8 | - name: nginx
9 | image: nginx
10 | nodeSelector:
11 | purpose: special
12 |
--------------------------------------------------------------------------------
/chapter-10/files/nginx-typo.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | name: nginx
6 | spec:
7 | containers:
8 | - name: nginx
9 | image: nginz
10 |
--------------------------------------------------------------------------------
/chapter-10/files/postgres-fixed.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | name: postgres
6 | spec:
7 | containers:
8 | - name: postgres
9 | image: postgres
10 | env:
11 | - name: POSTGRES_PASSWORD
12 | value: "supersecret"
13 |
--------------------------------------------------------------------------------
/chapter-10/files/postgres-misconfig.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | name: postgres
6 | spec:
7 | containers:
8 | - name: postgres
9 | image: postgres
10 |
--------------------------------------------------------------------------------
/chapter-10/files/sleep-multiple.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | name: sleep
6 | spec:
7 | containers:
8 | - name: sleep
9 | image: busybox
10 | command:
11 | - "/bin/sleep"
12 | - "3600"
13 | resources:
14 | requests:
15 | cpu: "2"
16 | - name: sleep2
17 | image: busybox
18 | command:
19 | - "/bin/sleep"
20 | - "3600"
21 | resources:
22 | requests:
23 | cpu: "2"
24 |
--------------------------------------------------------------------------------
/chapter-10/files/sleep-sensible.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | name: sleep
6 | spec:
7 | containers:
8 | - name: sleep
9 | image: busybox
10 | command:
11 | - "/bin/sleep"
12 | - "3600"
13 | resources:
14 | requests:
15 | cpu: "100m"
16 | - name: sleep2
17 | image: busybox
18 | command:
19 | - "/bin/sleep"
20 | - "3600"
21 | resources:
22 | requests:
23 | cpu: "100m"
24 |
--------------------------------------------------------------------------------
/chapter-10/group_vars/remote.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | containerd_cri: true
3 | k8s_control_plane_endpoint: 192.168.61.10
4 | k8s_initial_master: 192.168.61.11
5 | k8s_cluster_ips:
6 | - 192.168.61.11
7 | - 192.168.61.12
8 | - 192.168.61.13
9 |
10 | k8s_allow_scheduling_masters: true
11 |
12 | # NOTE: In a production system, you should keep these in an encrypted store
13 | # such as Ansible Vault.
14 | k8s_join_token: 1d8fb1.2875d52d62a3282d
15 | k8s_certificate_key: 5a7e07816958efb97635e9a66256adb1
16 |
--------------------------------------------------------------------------------
/chapter-10/group_vars/vagrant.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | k8s_kube_vip: true
3 |
--------------------------------------------------------------------------------
/chapter-10/playbook.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: masters
3 | become: yes
4 | roles:
5 | - tools
6 | - containerd
7 | - crictl
8 | - k8s
9 | - hosts: remote
10 | become: yes
11 | tasks:
12 | - name: debugger
13 | ansible.builtin.apt:
14 | name: gdb
15 | update_cache: yes
16 | - name: files
17 | ansible.builtin.copy:
18 | src: "{{ item }}"
19 | dest: /opt/{{ item }}
20 | owner: root
21 | group: root
22 | mode: '0644'
23 | with_list:
24 | - crasher.c
25 | - crasher-deploy.yaml
26 | - nginx-selector.yaml
27 | - nginx-typo.yaml
28 | - postgres-fixed.yaml
29 | - postgres-misconfig.yaml
30 | - sleep-multiple.yaml
31 | - sleep-sensible.yaml
32 |
--------------------------------------------------------------------------------
/chapter-11/README.md:
--------------------------------------------------------------------------------
1 | # Control Plane and Access Control
2 |
3 | This folder provides the examples for the chapter "Control Plane and Access Control".
4 |
5 | ## Prerequisites
6 |
7 | Be sure to start by following the instructions in the `setup` folder.
8 |
9 | ## Running in AWS
10 |
11 | Start by provisioning:
12 |
13 | ```
14 | ansible-playbook aws-setup.yaml
15 | ```
16 |
17 | Then, run the main playbook:
18 |
19 | ```
20 | ansible-playbook playbook.yaml
21 | ```
22 |
23 | You can SSH to `host01` and become root by running:
24 |
25 | ```
26 | ./aws-ssh.sh host01
27 | sudo su -
28 | ```
29 |
30 | When finished, don't forget to clean up:
31 |
32 | ```
33 | ansible-playbook aws-teardown.yaml
34 | ```
35 |
36 | ## Running in Vagrant
37 |
38 | To start:
39 |
40 | ```
41 | vagrant up
42 | ```
43 |
44 | This will also run the main Ansible playbook.
45 |
46 | You can SSH to `host01` and become root by running:
47 |
48 | ```
49 | vagrant ssh host01
50 | sudo su -
51 | ```
52 |
53 | When finished, you can clean up the VM:
54 |
55 | ```
56 | vagrant destroy
57 | ```
58 |
--------------------------------------------------------------------------------
/chapter-11/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 | cluster = {
4 | "host01" => { :ip => "192.168.61.11", :cpus => 2, :mem => 2048, :ports => [80, 48080] },
5 | "host02" => { :ip => "192.168.61.12", :cpus => 2, :mem => 2048 },
6 | "host03" => { :ip => "192.168.61.13", :cpus => 2, :mem => 2048 }
7 | }
8 | last = "host03"
9 | groups = {
10 | "vagrant" => ["host01","host02","host03"],
11 | "remote" => ["host01","host02","host03"],
12 | "masters" => ["host01","host02","host03"]
13 | }
14 |
15 | Vagrant.configure("2") do |config|
16 | config.vm.box = "ubuntu/focal64"
17 | cluster.each do |name, data|
18 | config.vm.define name do |host|
19 | host.vm.hostname = name
20 | if data.key?(:ports)
21 | host.vm.network "forwarded_port", guest: data[:ports][0], host: data[:ports][1]
22 | end
23 | host.vm.network "private_network", ip: "#{data[:ip]}"
24 | host.vm.provider :virtualbox do |vb, override|
25 | vb.cpus = data[:cpus]
26 | vb.memory = data[:mem]
27 | end
28 | # Provision all hosts at once for efficiency
29 | if name == last
30 | host.vm.provision :ansible do |ansible|
31 | ansible.limit = "all"
32 | ansible.playbook = "playbook.yaml"
33 | ansible.groups = groups
34 | end
35 | end
36 | end
37 | end
38 | end
39 |
--------------------------------------------------------------------------------
/chapter-11/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | collections_path = ../setup/collections
3 | inventory = ../setup/ec2-inventory
4 | roles_path = ../setup/roles
5 |
6 | [ssh_connection]
7 | ssh_args="-o UserKnownHostsFile=../sshkeys/known_hosts_aws_ansible_k8s"
--------------------------------------------------------------------------------
/chapter-11/aws-setup.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_lb:
7 | ip: 192.168.61.10
8 | ports:
9 | - name: ingress
10 | source: 80
11 | target: 80
12 | - name: api
13 | source: 6443
14 | target: 6443
15 | aws_instances:
16 | host01:
17 | type: master
18 | ip: 192.168.61.11
19 | instance_type: t3.medium
20 | host02:
21 | type: master
22 | ip: 192.168.61.12
23 | instance_type: t3.medium
24 | host03:
25 | type: master
26 | ip: 192.168.61.13
27 | instance_type: t3.medium
28 | roles:
29 | - aws-instances
30 |
--------------------------------------------------------------------------------
/chapter-11/aws-teardown.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_k8s_teardown: true
7 | aws_lb:
8 | ports:
9 | - name: ingress
10 | - name: api
11 | roles:
12 | - aws-instances
13 |
--------------------------------------------------------------------------------
/chapter-11/files/edit-bind.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: RoleBinding
3 | metadata:
4 | name: editor
5 | namespace: sample
6 | subjects:
7 | - kind: User
8 | name: me
9 | apiGroup: rbac.authorization.k8s.io
10 | roleRef:
11 | kind: ClusterRole
12 | name: edit
13 | apiGroup: rbac.authorization.k8s.io
14 |
--------------------------------------------------------------------------------
/chapter-11/files/pod-reader.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: pod-reader
5 | rules:
6 | - apiGroups: [""]
7 | resources: ["pods"]
8 | verbs: ["get", "watch", "list"]
9 |
--------------------------------------------------------------------------------
/chapter-11/files/read-pods-bind.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: RoleBinding
4 | metadata:
5 | name: read-pods
6 | namespace: sample
7 | subjects:
8 | - kind: ServiceAccount
9 | name: read-pods
10 | namespace: sample
11 | roleRef:
12 | kind: ClusterRole
13 | name: pod-reader
14 | apiGroup: rbac.authorization.k8s.io
15 |
--------------------------------------------------------------------------------
/chapter-11/files/read-pods-deploy.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: Deployment
3 | apiVersion: apps/v1
4 | metadata:
5 | name: read-pods
6 | namespace: sample
7 | spec:
8 | replicas: 1
9 | selector:
10 | matchLabels:
11 | app: read-pods
12 | template:
13 | metadata:
14 | labels:
15 | app: read-pods
16 | spec:
17 | containers:
18 | - name: read-pods
19 | image: alpine
20 | command: ["/bin/sleep", "infinity"]
21 | serviceAccountName: read-pods
22 |
--------------------------------------------------------------------------------
/chapter-11/files/read-pods-sa.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: ServiceAccount
4 | metadata:
5 | name: read-pods
6 | namespace: sample
--------------------------------------------------------------------------------
/chapter-11/group_vars/remote.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | containerd_cri: true
3 | k8s_control_plane_endpoint: 192.168.61.10
4 | k8s_initial_master: 192.168.61.11
5 | k8s_cluster_ips:
6 | - 192.168.61.11
7 | - 192.168.61.12
8 | - 192.168.61.13
9 |
10 | k8s_allow_scheduling_masters: true
11 |
12 | # NOTE: In a production system, you should keep these in an encrypted store
13 | # such as Ansible Vault.
14 | k8s_join_token: 1d8fb1.2875d52d62a3282d
15 | k8s_certificate_key: 5a7e07816958efb97635e9a66256adb1
16 |
--------------------------------------------------------------------------------
/chapter-11/group_vars/vagrant.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | k8s_kube_vip: true
3 |
--------------------------------------------------------------------------------
/chapter-11/playbook.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: masters
3 | become: yes
4 | roles:
5 | - tools
6 | - containerd
7 | - crictl
8 | - k8s
9 | - hosts: remote
10 | become: yes
11 | tasks:
12 | - name: files
13 | ansible.builtin.copy:
14 | src: "{{ item }}"
15 | dest: /opt/{{ item }}
16 | owner: root
17 | group: root
18 | mode: '0644'
19 | with_list:
20 | - edit-bind.yaml
21 | - pod-reader.yaml
22 | - read-pods-bind.yaml
23 | - read-pods-deploy.yaml
24 | - read-pods-sa.yaml
25 |
--------------------------------------------------------------------------------
/chapter-12/README.md:
--------------------------------------------------------------------------------
1 | # Container Runtime
2 |
3 | This folder provides the examples for the chapter "Container Runtime".
4 |
5 | ## Prerequisites
6 |
7 | Be sure to start by following the instructions in the `setup` folder.
8 |
9 | ## Organization
10 |
11 | This chapter's examples are divided into two directories, `containerd` and
12 | `crio`. Each directory contains a full example along with a `README.md` file
13 | with instructions.
14 |
--------------------------------------------------------------------------------
/chapter-12/containerd/README.md:
--------------------------------------------------------------------------------
1 | # Container Runtime - Containerd
2 |
3 | This folder provides the `containerd` examples for the chapter "Container Runtime".
4 |
5 | ## Prerequisites
6 |
7 | Be sure to start by following the instructions in the `setup` folder.
8 |
9 | ## Running in AWS
10 |
11 | Start by provisioning:
12 |
13 | ```
14 | ansible-playbook aws-setup.yaml
15 | ```
16 |
17 | Then, run the main playbook:
18 |
19 | ```
20 | ansible-playbook playbook.yaml
21 | ```
22 |
23 | You can SSH to `host01` and become root by running:
24 |
25 | ```
26 | ./aws-ssh.sh host01
27 | sudo su -
28 | ```
29 |
30 | When finished, don't forget to clean up:
31 |
32 | ```
33 | ansible-playbook aws-teardown.yaml
34 | ```
35 |
36 | ## Running in Vagrant
37 |
38 | To start:
39 |
40 | ```
41 | vagrant up
42 | ```
43 |
44 | This will also run the main Ansible playbook.
45 |
46 | You can SSH to `host01` and become root by running:
47 |
48 | ```
49 | vagrant ssh host01
50 | sudo su -
51 | ```
52 |
53 | When finished, you can clean up the VM:
54 |
55 | ```
56 | vagrant destroy
57 | ```
58 |
--------------------------------------------------------------------------------
/chapter-12/containerd/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | collections_path = ../../setup/collections
3 | inventory = ../../setup/ec2-inventory
4 | roles_path = ../../setup/roles
5 |
6 | [ssh_connection]
7 | ssh_args="-o UserKnownHostsFile=../../sshkeys/known_hosts_aws_ansible_k8s"
--------------------------------------------------------------------------------
/chapter-12/containerd/aws-setup.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | aws_k8s_ssh_dir: "../../sshkeys"
6 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
7 | aws_lb:
8 | ip: 192.168.61.10
9 | ports:
10 | - name: ingress
11 | source: 80
12 | target: 80
13 | - name: api
14 | source: 6443
15 | target: 6443
16 | aws_instances:
17 | host01:
18 | type: master
19 | ip: 192.168.61.11
20 | instance_type: t3.medium
21 | host02:
22 | type: master
23 | ip: 192.168.61.12
24 | instance_type: t3.medium
25 | host03:
26 | type: master
27 | ip: 192.168.61.13
28 | instance_type: t3.medium
29 | host04:
30 | type: node
31 | ip: 192.168.61.14
32 | instance_type: t3.small
33 | roles:
34 | - aws-instances
35 |
--------------------------------------------------------------------------------
/chapter-12/containerd/aws-teardown.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_k8s_teardown: true
7 | aws_lb:
8 | ports:
9 | - name: ingress
10 | - name: api
11 | roles:
12 | - aws-instances
13 |
--------------------------------------------------------------------------------
/chapter-12/containerd/files/deploy.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: Deployment
3 | apiVersion: apps/v1
4 | metadata:
5 | name: debug
6 | spec:
7 | replicas: 8
8 | selector:
9 | matchLabels:
10 | app: debug
11 | template:
12 | metadata:
13 | labels:
14 | app: debug
15 | spec:
16 | containers:
17 | - name: debug
18 | image: busybox
19 | command:
20 | - "/bin/sleep"
21 | - "infinity"
22 | resources:
23 | requests:
24 | memory: "64Mi"
25 | cpu: "100m"
26 | limits:
27 | memory: "128Mi"
28 | cpu: "200m"
29 |
30 |
--------------------------------------------------------------------------------
/chapter-12/containerd/files/node-evict.yaml:
--------------------------------------------------------------------------------
1 | evictionHard:
2 | memory.available: "1900Mi"
3 |
--------------------------------------------------------------------------------
/chapter-12/containerd/files/pod.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: Pod
3 | apiVersion: v1
4 | metadata:
5 | name: debug
6 | spec:
7 | containers:
8 | - name: debug
9 | image: alpine
10 | command:
11 | - "/bin/sleep"
12 | - "infinity"
13 |
--------------------------------------------------------------------------------
/chapter-12/containerd/group_vars/remote.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | containerd_cri: true
3 | k8s_control_plane_endpoint: 192.168.61.10
4 | k8s_initial_master: 192.168.61.11
5 | k8s_cluster_ips:
6 | - 192.168.61.11
7 | - 192.168.61.12
8 | - 192.168.61.13
9 | - 192.168.61.14
10 |
11 | k8s_allow_scheduling_masters: true
12 |
13 | # NOTE: In a production system, you should keep these in an encrypted store
14 | # such as Ansible Vault.
15 | k8s_join_token: 1d8fb1.2875d52d62a3282d
16 | k8s_certificate_key: 5a7e07816958efb97635e9a66256adb1
17 |
--------------------------------------------------------------------------------
/chapter-12/containerd/group_vars/vagrant.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | k8s_kube_vip: true
3 |
--------------------------------------------------------------------------------
/chapter-12/containerd/playbook.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: masters:nodes
3 | become: yes
4 | roles:
5 | - tools
6 | - containerd
7 | - crictl
8 | - k8s
9 | - hosts: remote
10 | become: yes
11 | tasks:
12 | - name: files
13 | ansible.builtin.copy:
14 | src: "{{ item }}"
15 | dest: /opt/{{ item }}
16 | owner: root
17 | group: root
18 | mode: '0644'
19 | with_list:
20 | - pod.yaml
21 | - deploy.yaml
22 | - node-evict.yaml
23 |
--------------------------------------------------------------------------------
/chapter-12/crio/README.md:
--------------------------------------------------------------------------------
1 | # Container Runtime - CRI-O
2 |
3 | This folder provides the `crio` examples for the chapter "Container Runtime".
4 |
5 | ## Prerequisites
6 |
7 | Be sure to start by following the instructions in the `setup` folder.
8 |
9 | ## Running in AWS
10 |
11 | Start by provisioning:
12 |
13 | ```
14 | ansible-playbook aws-setup.yaml
15 | ```
16 |
17 | Then, run the main playbook:
18 |
19 | ```
20 | ansible-playbook playbook.yaml
21 | ```
22 |
23 | You can SSH to `host01` and become root by running:
24 |
25 | ```
26 | ./aws-ssh.sh host01
27 | sudo su -
28 | ```
29 |
30 | When finished, don't forget to clean up:
31 |
32 | ```
33 | ansible-playbook aws-teardown.yaml
34 | ```
35 |
36 | ## Running in Vagrant
37 |
38 | To start:
39 |
40 | ```
41 | vagrant up
42 | ```
43 |
44 | This will also run the main Ansible playbook.
45 |
46 | You can SSH to `host01` and become root by running:
47 |
48 | ```
49 | vagrant ssh host01
50 | sudo su -
51 | ```
52 |
53 | When finished, you can clean up the VM:
54 |
55 | ```
56 | vagrant destroy
57 | ```
58 |
--------------------------------------------------------------------------------
/chapter-12/crio/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | collections_path = ../../setup/collections
3 | inventory = ../../setup/ec2-inventory
4 | roles_path = ../../setup/roles
5 |
6 | [ssh_connection]
7 | ssh_args="-o UserKnownHostsFile=../../sshkeys/known_hosts_aws_ansible_k8s"
--------------------------------------------------------------------------------
/chapter-12/crio/aws-setup.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | aws_k8s_ssh_dir: "../../sshkeys"
6 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
7 | aws_lb:
8 | ip: 192.168.61.10
9 | ports:
10 | - name: ingress
11 | source: 80
12 | target: 80
13 | - name: api
14 | source: 6443
15 | target: 6443
16 | aws_instances:
17 | host01:
18 | type: master
19 | ip: 192.168.61.11
20 | instance_type: t3.medium
21 | host02:
22 | type: master
23 | ip: 192.168.61.12
24 | instance_type: t3.medium
25 | host03:
26 | type: master
27 | ip: 192.168.61.13
28 | instance_type: t3.medium
29 | host04:
30 | type: node
31 | ip: 192.168.61.14
32 | instance_type: t3.small
33 | roles:
34 | - aws-instances
35 |
--------------------------------------------------------------------------------
/chapter-12/crio/aws-teardown.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_k8s_teardown: true
7 | aws_lb:
8 | ports:
9 | - name: ingress
10 | - name: api
11 | roles:
12 | - aws-instances
13 |
--------------------------------------------------------------------------------
/chapter-12/crio/files/deploy.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: Deployment
3 | apiVersion: apps/v1
4 | metadata:
5 | name: debug
6 | spec:
7 | replicas: 8
8 | selector:
9 | matchLabels:
10 | app: debug
11 | template:
12 | metadata:
13 | labels:
14 | app: debug
15 | spec:
16 | containers:
17 | - name: debug
18 | image: busybox
19 | command:
20 | - "/bin/sleep"
21 | - "infinity"
22 |
--------------------------------------------------------------------------------
/chapter-12/crio/files/node-evict.yaml:
--------------------------------------------------------------------------------
1 | evictionHard:
2 | memory.available: "1900Mi"
3 |
--------------------------------------------------------------------------------
/chapter-12/crio/files/pod.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: Pod
3 | apiVersion: v1
4 | metadata:
5 | name: debug
6 | spec:
7 | containers:
8 | - name: debug
9 | image: alpine
10 | command:
11 | - "/bin/sleep"
12 | - "infinity"
13 |
--------------------------------------------------------------------------------
/chapter-12/crio/group_vars/remote.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | crictl_container_engine: crio
3 | k8s_control_plane_endpoint: 192.168.61.10
4 | k8s_initial_master: 192.168.61.11
5 | k8s_cluster_ips:
6 | - 192.168.61.11
7 | - 192.168.61.12
8 | - 192.168.61.13
9 | - 192.168.61.14
10 |
11 | k8s_allow_scheduling_masters: true
12 |
13 | # NOTE: In a production system, you should keep these in an encrypted store
14 | # such as Ansible Vault.
15 | k8s_join_token: 1d8fb1.2875d52d62a3282d
16 | k8s_certificate_key: 5a7e07816958efb97635e9a66256adb1
17 |
--------------------------------------------------------------------------------
/chapter-12/crio/group_vars/vagrant.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | k8s_kube_vip: true
3 |
--------------------------------------------------------------------------------
/chapter-12/crio/playbook.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: masters:nodes
3 | become: yes
4 | roles:
5 | - tools
6 | - cri-o
7 | - crictl
8 | - k8s
9 | - hosts: remote
10 | become: yes
11 | tasks:
12 | - name: files
13 | ansible.builtin.copy:
14 | src: "{{ item }}"
15 | dest: /opt/{{ item }}
16 | owner: root
17 | group: root
18 | mode: '0644'
19 | with_list:
20 | - pod.yaml
21 | - deploy.yaml
22 | - node-evict.yaml
23 |
--------------------------------------------------------------------------------
/chapter-13/README.md:
--------------------------------------------------------------------------------
1 | # Health Probes
2 |
3 | This folder provides the examples for the chapter "Health Probes".
4 |
5 | ## Prerequisites
6 |
7 | Be sure to start by following the instructions in the `setup` folder.
8 |
9 | ## Running in AWS
10 |
11 | Start by provisioning:
12 |
13 | ```
14 | ansible-playbook aws-setup.yaml
15 | ```
16 |
17 | Then, run the main playbook:
18 |
19 | ```
20 | ansible-playbook playbook.yaml
21 | ```
22 |
23 | You can SSH to `host01` and become root by running:
24 |
25 | ```
26 | ./aws-ssh.sh host01
27 | sudo su -
28 | ```
29 |
30 | When finished, don't forget to clean up:
31 |
32 | ```
33 | ansible-playbook aws-teardown.yaml
34 | ```
35 |
36 | ## Running in Vagrant
37 |
38 | To start:
39 |
40 | ```
41 | vagrant up
42 | ```
43 |
44 | This will also run the main Ansible playbook.
45 |
46 | You can SSH to `host01` and become root by running:
47 |
48 | ```
49 | vagrant ssh host01
50 | sudo su -
51 | ```
52 |
53 | When finished, you can clean up the VM:
54 |
55 | ```
56 | vagrant destroy
57 | ```
58 |
--------------------------------------------------------------------------------
/chapter-13/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 | cluster = {
4 | "host01" => { :ip => "192.168.61.11", :cpus => 2, :mem => 2048, :ports => [80, 48080] },
5 | "host02" => { :ip => "192.168.61.12", :cpus => 2, :mem => 2048 },
6 | "host03" => { :ip => "192.168.61.13", :cpus => 2, :mem => 2048 }
7 | }
8 | last = "host03"
9 | groups = {
10 | "vagrant" => ["host01","host02","host03"],
11 | "remote" => ["host01","host02","host03"],
12 | "masters" => ["host01","host02","host03"]
13 | }
14 |
15 | Vagrant.configure("2") do |config|
16 | config.vm.box = "ubuntu/focal64"
17 | cluster.each do |name, data|
18 | config.vm.define name do |host|
19 | host.vm.hostname = name
20 | if data.key?(:ports)
21 | host.vm.network "forwarded_port", guest: data[:ports][0], host: data[:ports][1]
22 | end
23 | host.vm.network "private_network", ip: "#{data[:ip]}"
24 | host.vm.provider :virtualbox do |vb, override|
25 | vb.cpus = data[:cpus]
26 | vb.memory = data[:mem]
27 | end
28 | # Provision all hosts at once for efficiency
29 | if name == last
30 | host.vm.provision :ansible do |ansible|
31 | ansible.limit = "all"
32 | ansible.playbook = "playbook.yaml"
33 | ansible.groups = groups
34 | end
35 | end
36 | end
37 | end
38 | end
39 |
--------------------------------------------------------------------------------
/chapter-13/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | collections_path = ../setup/collections
3 | inventory = ../setup/ec2-inventory
4 | roles_path = ../setup/roles
5 |
6 | [ssh_connection]
7 | ssh_args="-o UserKnownHostsFile=../sshkeys/known_hosts_aws_ansible_k8s"
--------------------------------------------------------------------------------
/chapter-13/aws-setup.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_lb:
7 | ip: 192.168.61.10
8 | ports:
9 | - name: ingress
10 | source: 80
11 | target: 80
12 | - name: api
13 | source: 6443
14 | target: 6443
15 | aws_instances:
16 | host01:
17 | type: master
18 | ip: 192.168.61.11
19 | instance_type: t3.medium
20 | host02:
21 | type: master
22 | ip: 192.168.61.12
23 | instance_type: t3.medium
24 | host03:
25 | type: master
26 | ip: 192.168.61.13
27 | instance_type: t3.medium
28 | roles:
29 | - aws-instances
30 |
--------------------------------------------------------------------------------
/chapter-13/aws-teardown.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_k8s_teardown: true
7 | aws_lb:
8 | ports:
9 | - name: ingress
10 | - name: api
11 | roles:
12 | - aws-instances
13 |
--------------------------------------------------------------------------------
/chapter-13/files/nginx-404.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | spec:
3 | template:
4 | spec:
5 | containers:
6 | - name: nginx
7 | livenessProbe:
8 | exec:
9 | command: ["/usr/bin/curl", "-fq", "http://localhost/missing"]
10 |
11 |
--------------------------------------------------------------------------------
/chapter-13/files/nginx-exec.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: Deployment
3 | apiVersion: apps/v1
4 | metadata:
5 | name: nginx
6 | spec:
7 | replicas: 1
8 | selector:
9 | matchLabels:
10 | app: nginx
11 | template:
12 | metadata:
13 | labels:
14 | app: nginx
15 | spec:
16 | containers:
17 | - name: nginx
18 | image: nginx
19 | livenessProbe:
20 | exec:
21 | command: ["/usr/bin/curl", "-fq", "http://localhost"]
22 | initialDelaySeconds: 10
23 | periodSeconds: 5
24 |
25 |
--------------------------------------------------------------------------------
/chapter-13/files/nginx-http.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: Deployment
3 | apiVersion: apps/v1
4 | metadata:
5 | name: nginx
6 | spec:
7 | replicas: 1
8 | selector:
9 | matchLabels:
10 | app: nginx
11 | template:
12 | metadata:
13 | labels:
14 | app: nginx
15 | spec:
16 | containers:
17 | - name: nginx
18 | image: nginx
19 | livenessProbe:
20 | httpGet:
21 | path: /
22 | port: 80
23 |
--------------------------------------------------------------------------------
/chapter-13/files/nginx-ready.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: Deployment
3 | apiVersion: apps/v1
4 | metadata:
5 | name: nginx
6 | spec:
7 | replicas: 3
8 | selector:
9 | matchLabels:
10 | app: nginx
11 | template:
12 | metadata:
13 | labels:
14 | app: nginx
15 | spec:
16 | containers:
17 | - name: nginx
18 | image: nginx
19 | livenessProbe:
20 | httpGet:
21 | path: /
22 | port: 80
23 | readinessProbe:
24 | httpGet:
25 | path: /ready
26 | port: 80
27 | ---
28 | kind: Service
29 | apiVersion: v1
30 | metadata:
31 | name: nginx
32 | spec:
33 | selector:
34 | app: nginx
35 | ports:
36 | - protocol: TCP
37 | port: 80
38 | targetPort: 80
--------------------------------------------------------------------------------
/chapter-13/files/postgres-tcp.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: Deployment
3 | apiVersion: apps/v1
4 | metadata:
5 | name: postgres
6 | spec:
7 | replicas: 1
8 | selector:
9 | matchLabels:
10 | app: postgres
11 | template:
12 | metadata:
13 | labels:
14 | app: postgres
15 | spec:
16 | containers:
17 | - name: postgres
18 | image: postgres
19 | env:
20 | - name: POSTGRES_PASSWORD
21 | value: "supersecret"
22 | livenessProbe:
23 | tcpSocket:
24 | port: 5432
25 |
--------------------------------------------------------------------------------
/chapter-13/group_vars/remote.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | containerd_cri: true
3 | k8s_control_plane_endpoint: 192.168.61.10
4 | k8s_initial_master: 192.168.61.11
5 | k8s_cluster_ips:
6 | - 192.168.61.11
7 | - 192.168.61.12
8 | - 192.168.61.13
9 |
10 | k8s_allow_scheduling_masters: true
11 |
12 | # NOTE: In a production system, you should keep these in an encrypted store
13 | # such as Ansible Vault.
14 | k8s_join_token: 1d8fb1.2875d52d62a3282d
15 | k8s_certificate_key: 5a7e07816958efb97635e9a66256adb1
16 |
--------------------------------------------------------------------------------
/chapter-13/group_vars/vagrant.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | k8s_kube_vip: true
3 |
--------------------------------------------------------------------------------
/chapter-13/playbook.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: masters
3 | become: yes
4 | roles:
5 | - tools
6 | - containerd
7 | - crictl
8 | - k8s
9 | - hosts: remote
10 | become: yes
11 | tasks:
12 | - name: files
13 | ansible.builtin.copy:
14 | src: "{{ item }}"
15 | dest: /opt/{{ item }}
16 | owner: root
17 | group: root
18 | mode: '0644'
19 | with_list:
20 | - nginx-404.yaml
21 | - nginx-exec.yaml
22 | - nginx-http.yaml
23 | - nginx-ready.yaml
24 | - postgres-tcp.yaml
25 |
--------------------------------------------------------------------------------
/chapter-14/README.md:
--------------------------------------------------------------------------------
1 | # Limits and Quotas
2 |
3 | This folder provides the examples for the chapter "Limits and Quotas".
4 |
5 | ## Prerequisites
6 |
7 | Be sure to start by following the instructions in the `setup` folder.
8 |
9 | ## Running in AWS
10 |
11 | Start by provisioning:
12 |
13 | ```
14 | ansible-playbook aws-setup.yaml
15 | ```
16 |
17 | Then, run the main playbook:
18 |
19 | ```
20 | ansible-playbook playbook.yaml
21 | ```
22 |
23 | You can SSH to `host01` and become root by running:
24 |
25 | ```
26 | ./aws-ssh.sh host01
27 | sudo su -
28 | ```
29 |
30 | When finished, don't forget to clean up:
31 |
32 | ```
33 | ansible-playbook aws-teardown.yaml
34 | ```
35 |
36 | ## Running in Vagrant
37 |
38 | To start:
39 |
40 | ```
41 | vagrant up
42 | ```
43 |
44 | This will also run the main Ansible playbook.
45 |
46 | You can SSH to `host01` and become root by running:
47 |
48 | ```
49 | vagrant ssh host01
50 | sudo su -
51 | ```
52 |
53 | When finished, you can clean up the VM:
54 |
55 | ```
56 | vagrant destroy
57 | ```
58 |
--------------------------------------------------------------------------------
/chapter-14/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 | cluster = {
4 | "host01" => { :ip => "192.168.61.11", :cpus => 2, :mem => 2048, :ports => [80, 48080] },
5 | "host02" => { :ip => "192.168.61.12", :cpus => 2, :mem => 2048 },
6 | "host03" => { :ip => "192.168.61.13", :cpus => 2, :mem => 2048 }
7 | }
8 | last = "host03"
9 | groups = {
10 | "vagrant" => ["host01","host02","host03"],
11 | "remote" => ["host01","host02","host03"],
12 | "masters" => ["host01","host02","host03"]
13 | }
14 |
15 | Vagrant.configure("2") do |config|
16 | config.vm.box = "ubuntu/focal64"
17 | cluster.each do |name, data|
18 | config.vm.define name do |host|
19 | host.vm.hostname = name
20 | if data.key?(:ports)
21 | host.vm.network "forwarded_port", guest: data[:ports][0], host: data[:ports][1]
22 | end
23 | host.vm.network "private_network", ip: "#{data[:ip]}"
24 | host.vm.provider :virtualbox do |vb, override|
25 | vb.cpus = data[:cpus]
26 | vb.memory = data[:mem]
27 | end
28 | # Provision all hosts at once for efficiency
29 | if name == last
30 | host.vm.provision :ansible do |ansible|
31 | ansible.limit = "all"
32 | ansible.playbook = "playbook.yaml"
33 | ansible.groups = groups
34 | end
35 | end
36 | end
37 | end
38 | end
39 |
--------------------------------------------------------------------------------
/chapter-14/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | collections_path = ../setup/collections
3 | inventory = ../setup/ec2-inventory
4 | roles_path = ../setup/roles
5 |
6 | [ssh_connection]
7 | ssh_args="-o UserKnownHostsFile=../sshkeys/known_hosts_aws_ansible_k8s"
--------------------------------------------------------------------------------
/chapter-14/aws-setup.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_lb:
7 | ip: 192.168.61.10
8 | ports:
9 | - name: ingress
10 | source: 80
11 | target: 80
12 | - name: api
13 | source: 6443
14 | target: 6443
15 | aws_instances:
16 | host01:
17 | type: master
18 | ip: 192.168.61.11
19 | instance_type: t3.medium
20 | host02:
21 | type: master
22 | ip: 192.168.61.12
23 | instance_type: t3.medium
24 | host03:
25 | type: master
26 | ip: 192.168.61.13
27 | instance_type: t3.medium
28 | roles:
29 | - aws-instances
30 |
--------------------------------------------------------------------------------
/chapter-14/aws-teardown.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_k8s_teardown: true
7 | aws_lb:
8 | ports:
9 | - name: ingress
10 | - name: api
11 | roles:
12 | - aws-instances
13 |
--------------------------------------------------------------------------------
/chapter-14/files/cgroup-info:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | POD=$1
3 | if [ -z "${POD}" ]
4 | then
5 | echo "Usage: cgroup-info "
6 | exit 1
7 | fi
8 |
9 | # Look up the pod ID from the POD. -q ensures we just return the ID.
10 | POD_ID=$(crictl pods --name ${POD} -q)
11 | if [ -z "${POD_ID}" ]
12 | then
13 | echo "Pod ${POD} not found"
14 | exit 2
15 | fi
16 |
17 | # Get the pod data in JSON format and pull out one field, the cgroup dir
18 | cgp_field='.info.config.linux.cgroup_parent'
19 | CGP=$(crictl inspectp $POD_ID | jq -r "$cgp_field")
20 |
21 | # The same cgroup dir is used for both CPU and memory cgroups
22 | CPU=/sys/fs/cgroup/cpu/$CGP
23 | MEM=/sys/fs/cgroup/memory/$CGP
24 |
25 | # Print our findings
26 | echo ""
27 | echo "Container Runtime"
28 | echo "-----------------"
29 | echo Pod ID: $POD_ID
30 | echo Cgroup path: $CGP
31 | echo ""
32 |
33 | echo "CPU Settings"
34 | echo "------------"
35 | echo "CPU Shares:" $(cat $CPU/cpu.shares)
36 | echo "CPU Quota (us):" $(cat $CPU/cpu.cfs_quota_us) "per" $(cat $CPU/cpu.cfs_period_us)
37 | echo ""
38 |
39 | echo "Memory Settings"
40 | echo "---------------"
41 | echo "Limit (bytes):" $(cat $MEM/memory.limit_in_bytes)
42 | echo ""
43 |
--------------------------------------------------------------------------------
/chapter-14/files/edit-bind.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: RoleBinding
3 | metadata:
4 | name: editor
5 | namespace: sample
6 | subjects:
7 | - kind: User
8 | name: me
9 | apiGroup: rbac.authorization.k8s.io
10 | roleRef:
11 | kind: ClusterRole
12 | name: edit
13 | apiGroup: rbac.authorization.k8s.io
14 |
--------------------------------------------------------------------------------
/chapter-14/files/iperf-limit.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: Pod
3 | apiVersion: v1
4 | metadata:
5 | name: iperf-limit
6 | annotations:
7 | kubernetes.io/ingress-bandwidth: 1M
8 | kubernetes.io/egress-bandwidth: 1M
9 | spec:
10 | containers:
11 | - name: iperf
12 | image: bookofkubernetes/iperf3:stable
13 | resources:
14 | requests:
15 | memory: "64Mi"
16 | cpu: "250m"
17 | limits:
18 | memory: "128Mi"
19 | cpu: "500m"
20 | nodeName: host01
21 |
--------------------------------------------------------------------------------
/chapter-14/files/iperf-server.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: Deployment
3 | apiVersion: apps/v1
4 | metadata:
5 | name: iperf-server
6 | spec:
7 | replicas: 1
8 | selector:
9 | matchLabels:
10 | app: iperf-server
11 | template:
12 | metadata:
13 | labels:
14 | app: iperf-server
15 | spec:
16 | containers:
17 | - name: iperf
18 | image: bookofkubernetes/iperf3:stable
19 | env:
20 | - name: IPERF_SERVER
21 | value: "1"
22 | resources:
23 | requests:
24 | memory: "64Mi"
25 | cpu: "250m"
26 | limits:
27 | memory: "128Mi"
28 | cpu: "500m"
29 | ---
30 | kind: Service
31 | apiVersion: v1
32 | metadata:
33 | name: iperf-server
34 | spec:
35 | selector:
36 | app: iperf-server
37 | ports:
38 | - protocol: TCP
39 | port: 5201
40 | targetPort: 5201
41 |
--------------------------------------------------------------------------------
/chapter-14/files/iperf.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: Pod
3 | apiVersion: v1
4 | metadata:
5 | name: iperf
6 | spec:
7 | containers:
8 | - name: iperf
9 | image: bookofkubernetes/iperf3:stable
10 | resources:
11 | requests:
12 | memory: "64Mi"
13 | cpu: "250m"
14 | limits:
15 | memory: "128Mi"
16 | cpu: "500m"
17 |
--------------------------------------------------------------------------------
/chapter-14/files/nginx-limit.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: Deployment
3 | apiVersion: apps/v1
4 | metadata:
5 | name: nginx
6 | spec:
7 | replicas: 1
8 | selector:
9 | matchLabels:
10 | app: nginx
11 | template:
12 | metadata:
13 | labels:
14 | app: nginx
15 | spec:
16 | containers:
17 | - name: nginx
18 | image: nginx
19 | resources:
20 | requests:
21 | memory: "64Mi"
22 | cpu: "250m"
23 | limits:
24 | memory: "128Mi"
25 | cpu: "500m"
26 | nodeName: host01
27 |
--------------------------------------------------------------------------------
/chapter-14/files/quota.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: ResourceQuota
4 | metadata:
5 | name: sample-quota
6 | namespace: sample
7 | spec:
8 | hard:
9 | requests.cpu: "1"
10 | requests.memory: 256Mi
11 | limits.cpu: "2"
12 | limits.memory: 512Mi
13 |
--------------------------------------------------------------------------------
/chapter-14/files/sleep.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: Deployment
3 | apiVersion: apps/v1
4 | metadata:
5 | name: sleep
6 | namespace: sample
7 | spec:
8 | replicas: 1
9 | selector:
10 | matchLabels:
11 | app: sleep
12 | template:
13 | metadata:
14 | labels:
15 | app: sleep
16 | spec:
17 | containers:
18 | - name: sleep
19 | image: busybox
20 | command:
21 | - "/bin/sleep"
22 | - "3600"
23 | resources:
24 | requests:
25 | memory: "64Mi"
26 | cpu: "250m"
27 | limits:
28 | memory: "128Mi"
29 | cpu: "512m"
30 |
--------------------------------------------------------------------------------
/chapter-14/group_vars/remote.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | containerd_cri: true
3 | k8s_control_plane_endpoint: 192.168.61.10
4 | k8s_initial_master: 192.168.61.11
5 | k8s_cluster_ips:
6 | - 192.168.61.11
7 | - 192.168.61.12
8 | - 192.168.61.13
9 |
10 | k8s_allow_scheduling_masters: true
11 |
12 | # NOTE: In a production system, you should keep these in an encrypted store
13 | # such as Ansible Vault.
14 | k8s_join_token: 1d8fb1.2875d52d62a3282d
15 | k8s_certificate_key: 5a7e07816958efb97635e9a66256adb1
16 |
--------------------------------------------------------------------------------
/chapter-14/group_vars/vagrant.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | k8s_kube_vip: true
3 |
--------------------------------------------------------------------------------
/chapter-14/playbook.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: masters
3 | become: yes
4 | roles:
5 | - tools
6 | - containerd
7 | - crictl
8 | - k8s
9 | - hosts: remote
10 | become: yes
11 | tasks:
12 | - name: resource files
13 | ansible.builtin.copy:
14 | src: "{{ item }}"
15 | dest: /opt/{{ item }}
16 | owner: root
17 | group: root
18 | mode: '0644'
19 | with_list:
20 | - edit-bind.yaml
21 | - iperf-limit.yaml
22 | - iperf-server.yaml
23 | - iperf.yaml
24 | - nginx-limit.yaml
25 | - sleep.yaml
26 | - quota.yaml
27 | - name: scripts
28 | ansible.builtin.copy:
29 | src: "{{ item }}"
30 | dest: /opt/{{ item }}
31 | owner: root
32 | group: root
33 | mode: '0755'
34 | with_list:
35 | - cgroup-info
36 |
--------------------------------------------------------------------------------
/chapter-15/README.md:
--------------------------------------------------------------------------------
1 | # Persistent Storage
2 |
3 | This folder provides the examples for the chapter "Persistent Storage".
4 |
5 | ## Prerequisites
6 |
7 | Be sure to start by following the instructions in the `setup` folder.
8 |
9 | ## Running in AWS
10 |
11 | Start by provisioning:
12 |
13 | ```
14 | ansible-playbook aws-setup.yaml
15 | ```
16 |
17 | Then, run the main playbook:
18 |
19 | ```
20 | ansible-playbook playbook.yaml
21 | ```
22 |
23 | You can SSH to `host01` and become root by running:
24 |
25 | ```
26 | ./aws-ssh.sh host01
27 | sudo su -
28 | ```
29 |
30 | When finished, don't forget to clean up:
31 |
32 | ```
33 | ansible-playbook aws-teardown.yaml
34 | ```
35 |
36 | ## Running in Vagrant
37 |
38 | To start:
39 |
40 | ```
41 | vagrant up
42 | ```
43 |
44 | This will also run the main Ansible playbook.
45 |
46 | You can SSH to `host01` and become root by running:
47 |
48 | ```
49 | vagrant ssh host01
50 | sudo su -
51 | ```
52 |
53 | When finished, you can clean up the VM:
54 |
55 | ```
56 | vagrant destroy
57 | ```
58 |
--------------------------------------------------------------------------------
/chapter-15/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 | cluster = {
4 | "host01" => { :ip => "192.168.61.11", :cpus => 2, :mem => 2048, :ports => [80, 48080] },
5 | "host02" => { :ip => "192.168.61.12", :cpus => 2, :mem => 2048 },
6 | "host03" => { :ip => "192.168.61.13", :cpus => 2, :mem => 2048 }
7 | }
8 | last = "host03"
9 | groups = {
10 | "vagrant" => ["host01","host02","host03"],
11 | "remote" => ["host01","host02","host03"],
12 | "masters" => ["host01","host02","host03"]
13 | }
14 |
15 | Vagrant.configure("2") do |config|
16 | config.vm.box = "ubuntu/focal64"
17 | cluster.each do |name, data|
18 | config.vm.define name do |host|
19 | host.vm.hostname = name
20 | if data.key?(:ports)
21 | host.vm.network "forwarded_port", guest: data[:ports][0], host: data[:ports][1]
22 | end
23 | host.vm.network "private_network", ip: "#{data[:ip]}"
24 | host.vm.provider :virtualbox do |vb, override|
25 | vb.cpus = data[:cpus]
26 | vb.memory = data[:mem]
27 | end
28 | # Provision all hosts at once for efficiency
29 | if name == last
30 | host.vm.provision :ansible do |ansible|
31 | ansible.limit = "all"
32 | ansible.playbook = "playbook.yaml"
33 | ansible.groups = groups
34 | end
35 | end
36 | end
37 | end
38 | end
39 |
--------------------------------------------------------------------------------
/chapter-15/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | collections_path = ../setup/collections
3 | inventory = ../setup/ec2-inventory
4 | roles_path = ../setup/roles
5 |
6 | [ssh_connection]
7 | ssh_args="-o UserKnownHostsFile=../sshkeys/known_hosts_aws_ansible_k8s"
--------------------------------------------------------------------------------
/chapter-15/aws-setup.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_lb:
7 | ip: 192.168.61.10
8 | ports:
9 | - name: ingress
10 | source: 80
11 | target: 80
12 | - name: api
13 | source: 6443
14 | target: 6443
15 | aws_instances:
16 | host01:
17 | type: master
18 | ip: 192.168.61.11
19 | instance_type: t3.medium
20 | host02:
21 | type: master
22 | ip: 192.168.61.12
23 | instance_type: t3.medium
24 | host03:
25 | type: master
26 | ip: 192.168.61.13
27 | instance_type: t3.medium
28 | roles:
29 | - aws-instances
30 |
--------------------------------------------------------------------------------
/chapter-15/aws-teardown.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_k8s_teardown: true
7 | aws_lb:
8 | ports:
9 | - name: ingress
10 | - name: api
11 | roles:
12 | - aws-instances
13 |
--------------------------------------------------------------------------------
/chapter-15/files/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | Hello, World
4 |
5 |
6 | Hello, World!
7 |
8 |
9 |
--------------------------------------------------------------------------------
/chapter-15/files/nginx.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: nginx
5 | spec:
6 | replicas: 1
7 | selector:
8 | matchLabels:
9 | app: nginx
10 | template:
11 | metadata:
12 | labels:
13 | app: nginx
14 | spec:
15 | containers:
16 | - name: nginx
17 | image: nginx
18 | volumeMounts:
19 | - name: html
20 | mountPath: /usr/share/nginx/html
21 | securityContext:
22 | fsGroup: 101
23 | volumes:
24 | - name: html
25 | persistentVolumeClaim:
26 | claimName: nginx-storage
27 |
--------------------------------------------------------------------------------
/chapter-15/files/pgsql-set.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: postgres
5 | spec:
6 | clusterIP: None
7 | selector:
8 | app: postgres
9 | ---
10 | apiVersion: apps/v1
11 | kind: StatefulSet
12 | metadata:
13 | name: postgres
14 | spec:
15 | serviceName: postgres
16 | replicas: 2
17 | selector:
18 | matchLabels:
19 | app: postgres
20 | template:
21 | metadata:
22 | labels:
23 | app: postgres
24 | spec:
25 | containers:
26 | - name: postgres
27 | image: postgres
28 | env:
29 | - name: POSTGRES_PASSWORD
30 | value: "supersecret"
31 | - name: PGDATA
32 | value: /data/pgdata
33 | volumeMounts:
34 | - name: postgres-volume
35 | mountPath: /data
36 | volumeClaimTemplates:
37 | - metadata:
38 | name: postgres-volume
39 | spec:
40 | storageClassName: longhorn
41 | accessModes:
42 | - ReadWriteOnce
43 | resources:
44 | requests:
45 | storage: 1Gi
46 |
--------------------------------------------------------------------------------
/chapter-15/files/pv.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolume
3 | metadata:
4 | name: manual
5 | spec:
6 | claimRef:
7 | name: manual
8 | namespace: default
9 | accessModes:
10 | - ReadWriteOnce
11 | capacity:
12 | storage: 100Mi
13 | csi:
14 | driver: driver.longhorn.io
15 | volumeHandle: manual
16 |
17 |
--------------------------------------------------------------------------------
/chapter-15/files/pvc-man.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | name: manual
5 | spec:
6 | storageClassName: manual
7 | accessModes:
8 | - ReadWriteOnce
9 | resources:
10 | requests:
11 | storage: 100Mi
12 |
--------------------------------------------------------------------------------
/chapter-15/files/pvc-rwx.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | name: nginx-storage
5 | spec:
6 | storageClassName: longhorn
7 | accessModes:
8 | - ReadWriteMany
9 | resources:
10 | requests:
11 | storage: 100Mi
12 |
--------------------------------------------------------------------------------
/chapter-15/files/pvc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | name: nginx-storage
5 | spec:
6 | storageClassName: longhorn
7 | accessModes:
8 | - ReadWriteOnce
9 | resources:
10 | requests:
11 | storage: 100Mi
12 |
--------------------------------------------------------------------------------
/chapter-15/group_vars/remote.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | containerd_cri: true
3 | k8s_control_plane_endpoint: 192.168.61.10
4 | k8s_initial_master: 192.168.61.11
5 | k8s_cluster_ips:
6 | - 192.168.61.11
7 | - 192.168.61.12
8 | - 192.168.61.13
9 |
10 | k8s_allow_scheduling_masters: true
11 |
12 | # NOTE: In a production system, you should keep these in an encrypted store
13 | # such as Ansible Vault.
14 | k8s_join_token: 1d8fb1.2875d52d62a3282d
15 | k8s_certificate_key: 5a7e07816958efb97635e9a66256adb1
16 |
--------------------------------------------------------------------------------
/chapter-15/group_vars/vagrant.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | k8s_kube_vip: true
3 |
--------------------------------------------------------------------------------
/chapter-15/playbook.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: masters
3 | become: yes
4 | roles:
5 | - tools
6 | - containerd
7 | - crictl
8 | - k8s
9 | - hosts: remote
10 | become: yes
11 | tasks:
12 | - name: files
13 | ansible.builtin.copy:
14 | src: "{{ item }}"
15 | dest: /opt/{{ item }}
16 | owner: root
17 | group: root
18 | mode: '0644'
19 | with_list:
20 | - index.html
21 | - nginx.yaml
22 | - pgsql-set.yaml
23 | - pvc-man.yaml
24 | - pvc-rwx.yaml
25 | - pvc.yaml
26 | - pv.yaml
27 |
--------------------------------------------------------------------------------
/chapter-16/README.md:
--------------------------------------------------------------------------------
1 | # Configuration and Secrets
2 |
3 | This folder provides the examples for the chapter "Configuration and Secrets".
4 |
5 | ## Prerequisites
6 |
7 | Be sure to start by following the instructions in the `setup` folder.
8 |
9 | ## Running in AWS
10 |
11 | Start by provisioning:
12 |
13 | ```
14 | ansible-playbook aws-setup.yaml
15 | ```
16 |
17 | Then, run the main playbook:
18 |
19 | ```
20 | ansible-playbook playbook.yaml
21 | ```
22 |
23 | You can SSH to `host01` and become root by running:
24 |
25 | ```
26 | ./aws-ssh.sh host01
27 | sudo su -
28 | ```
29 |
30 | When finished, don't forget to clean up:
31 |
32 | ```
33 | ansible-playbook aws-teardown.yaml
34 | ```
35 |
36 | ## Running in Vagrant
37 |
38 | To start:
39 |
40 | ```
41 | vagrant up
42 | ```
43 |
44 | This will also run the main Ansible playbook.
45 |
46 | You can SSH to `host01` and become root by running:
47 |
48 | ```
49 | vagrant ssh host01
50 | sudo su -
51 | ```
52 |
53 | When finished, you can clean up the VM:
54 |
55 | ```
56 | vagrant destroy
57 | ```
58 |
--------------------------------------------------------------------------------
/chapter-16/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 | cluster = {
4 | "host01" => { :ip => "192.168.61.11", :cpus => 2, :mem => 2048, :ports => [80, 48080] },
5 | "host02" => { :ip => "192.168.61.12", :cpus => 2, :mem => 2048 },
6 | "host03" => { :ip => "192.168.61.13", :cpus => 2, :mem => 2048 }
7 | }
8 | last = "host03"
9 | groups = {
10 | "vagrant" => ["host01","host02","host03"],
11 | "remote" => ["host01","host02","host03"],
12 | "masters" => ["host01","host02","host03"]
13 | }
14 |
15 | Vagrant.configure("2") do |config|
16 | config.vm.box = "ubuntu/focal64"
17 | cluster.each do |name, data|
18 | config.vm.define name do |host|
19 | host.vm.hostname = name
20 | if data.key?(:ports)
21 | host.vm.network "forwarded_port", guest: data[:ports][0], host: data[:ports][1]
22 | end
23 | host.vm.network "private_network", ip: "#{data[:ip]}"
24 | host.vm.provider :virtualbox do |vb, override|
25 | vb.cpus = data[:cpus]
26 | vb.memory = data[:mem]
27 | end
28 | # Provision all hosts at once for efficiency
29 | if name == last
30 | host.vm.provision :ansible do |ansible|
31 | ansible.limit = "all"
32 | ansible.playbook = "playbook.yaml"
33 | ansible.groups = groups
34 | end
35 | end
36 | end
37 | end
38 | end
39 |
--------------------------------------------------------------------------------
/chapter-16/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | collections_path = ../setup/collections
3 | inventory = ../setup/ec2-inventory
4 | roles_path = ../setup/roles
5 |
6 | [ssh_connection]
7 | ssh_args="-o UserKnownHostsFile=../sshkeys/known_hosts_aws_ansible_k8s"
--------------------------------------------------------------------------------
/chapter-16/aws-setup.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_lb:
7 | ip: 192.168.61.10
8 | ports:
9 | - name: ingress
10 | source: 80
11 | target: 80
12 | - name: api
13 | source: 6443
14 | target: 6443
15 | aws_instances:
16 | host01:
17 | type: master
18 | ip: 192.168.61.11
19 | instance_type: t3.medium
20 | host02:
21 | type: master
22 | ip: 192.168.61.12
23 | instance_type: t3.medium
24 | host03:
25 | type: master
26 | ip: 192.168.61.13
27 | instance_type: t3.medium
28 | roles:
29 | - aws-instances
30 |
--------------------------------------------------------------------------------
/chapter-16/aws-teardown.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_k8s_teardown: true
7 | aws_lb:
8 | ports:
9 | - name: ingress
10 | - name: api
11 | roles:
12 | - aws-instances
13 |
--------------------------------------------------------------------------------
/chapter-16/files/etcd-env:
--------------------------------------------------------------------------------
1 | export ETCDCTL_API=3
2 | export ETCDCTL_CERT=/etc/kubernetes/pki/apiserver-etcd-client.crt
3 | export ETCDCTL_CACERT=/etc/kubernetes/pki/etcd/ca.crt
4 | export ETCDCTL_KEY=/etc/kubernetes/pki/apiserver-etcd-client.key
5 | export ETCDCTL_ENDPOINTS=https://192.168.61.11:2379
6 |
--------------------------------------------------------------------------------
/chapter-16/files/nginx-cm.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: ConfigMap
3 | apiVersion: v1
4 | metadata:
5 | name: nginx
6 | data:
7 | index.html: |
8 |
9 |
10 | Hello World
11 |
12 |
13 | Hello, World from a ConfigMap!
14 |
15 |
16 |
--------------------------------------------------------------------------------
/chapter-16/files/nginx-deploy.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: Deployment
3 | apiVersion: apps/v1
4 | metadata:
5 | name: nginx
6 | spec:
7 | replicas: 1
8 | selector:
9 | matchLabels:
10 | app: nginx
11 | template:
12 | metadata:
13 | labels:
14 | app: nginx
15 | spec:
16 | containers:
17 | - name: nginx
18 | image: nginx
19 | volumeMounts:
20 | - name: nginx-files
21 | mountPath: /usr/share/nginx/html
22 | volumes:
23 | - name: nginx-files
24 | configMap:
25 | name: nginx
26 |
--------------------------------------------------------------------------------
/chapter-16/files/pgsql-cm.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: ConfigMap
3 | apiVersion: v1
4 | metadata:
5 | name: pgsql
6 | data:
7 | POSTGRES_PASSWORD: "supersecret"
8 |
--------------------------------------------------------------------------------
/chapter-16/files/pgsql-ext-cfg.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: Deployment
3 | apiVersion: apps/v1
4 | metadata:
5 | name: postgres
6 | spec:
7 | replicas: 1
8 | selector:
9 | matchLabels:
10 | app: postgres
11 | template:
12 | metadata:
13 | labels:
14 | app: postgres
15 | spec:
16 | containers:
17 | - name: postgres
18 | image: postgres
19 | envFrom:
20 | - configMapRef:
21 | name: pgsql
22 |
--------------------------------------------------------------------------------
/chapter-16/files/pgsql-ext-sec.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: Deployment
3 | apiVersion: apps/v1
4 | metadata:
5 | name: postgres
6 | spec:
7 | replicas: 1
8 | selector:
9 | matchLabels:
10 | app: postgres
11 | template:
12 | metadata:
13 | labels:
14 | app: postgres
15 | spec:
16 | containers:
17 | - name: postgres
18 | image: postgres
19 | envFrom:
20 | - secretRef:
21 | name: pgsql
22 |
--------------------------------------------------------------------------------
/chapter-16/files/pgsql-secret-2.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: Secret
3 | apiVersion: v1
4 | metadata:
5 | name: pgsql
6 | data:
7 | POSTGRES_PASSWORD: c3VwZXJzZWNyZXQ=
8 |
--------------------------------------------------------------------------------
/chapter-16/files/pgsql-secret.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: Secret
3 | apiVersion: v1
4 | metadata:
5 | name: pgsql
6 | stringData:
7 | POSTGRES_PASSWORD: "supersecret"
8 |
--------------------------------------------------------------------------------
/chapter-16/files/pgsql.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: Deployment
3 | apiVersion: apps/v1
4 | metadata:
5 | name: postgres
6 | spec:
7 | replicas: 1
8 | selector:
9 | matchLabels:
10 | app: postgres
11 | template:
12 | metadata:
13 | labels:
14 | app: postgres
15 | spec:
16 | containers:
17 | - name: postgres
18 | image: postgres
19 | env:
20 | - name: POSTGRES_PASSWORD
21 | value: "supersecret"
22 |
--------------------------------------------------------------------------------
/chapter-16/group_vars/remote.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | containerd_cri: true
3 | k8s_control_plane_endpoint: 192.168.61.10
4 | k8s_initial_master: 192.168.61.11
5 | k8s_cluster_ips:
6 | - 192.168.61.11
7 | - 192.168.61.12
8 | - 192.168.61.13
9 |
10 | k8s_allow_scheduling_masters: true
11 |
12 | # NOTE: In a production system, you should keep these in an encrypted store
13 | # such as Ansible Vault.
14 | k8s_join_token: 1d8fb1.2875d52d62a3282d
15 | k8s_certificate_key: 5a7e07816958efb97635e9a66256adb1
16 |
--------------------------------------------------------------------------------
/chapter-16/group_vars/vagrant.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | k8s_kube_vip: true
3 |
--------------------------------------------------------------------------------
/chapter-16/playbook.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: masters
3 | become: yes
4 | roles:
5 | - tools
6 | - containerd
7 | - crictl
8 | - k8s
9 | - hosts: remote
10 | become: yes
11 | tasks:
12 | - name: files
13 | ansible.builtin.copy:
14 | src: "{{ item }}"
15 | dest: /opt/{{ item }}
16 | owner: root
17 | group: root
18 | mode: '0644'
19 | with_list:
20 | - etcd-env
21 | - nginx-cm.yaml
22 | - nginx-deploy.yaml
23 | - pgsql-cm.yaml
24 | - pgsql-ext-cfg.yaml
25 | - pgsql-ext-sec.yaml
26 | - pgsql-secret-2.yaml
27 | - pgsql-secret.yaml
28 | - pgsql.yaml
29 |
--------------------------------------------------------------------------------
/chapter-17/README.md:
--------------------------------------------------------------------------------
1 | # Custom Resources and Operators
2 |
3 | This folder provides the examples for the chapter "Custom Resources and Operators".
4 |
5 | ## Prerequisites
6 |
7 | Be sure to start by following the instructions in the `setup` folder.
8 |
9 | ## Running in AWS
10 |
11 | Start by provisioning:
12 |
13 | ```
14 | ansible-playbook aws-setup.yaml
15 | ```
16 |
17 | Then, run the main playbook:
18 |
19 | ```
20 | ansible-playbook playbook.yaml
21 | ```
22 |
23 | You can SSH to `host01` and become root by running:
24 |
25 | ```
26 | ./aws-ssh.sh host01
27 | sudo su -
28 | ```
29 |
30 | When finished, don't forget to clean up:
31 |
32 | ```
33 | ansible-playbook aws-teardown.yaml
34 | ```
35 |
36 | ## Running in Vagrant
37 |
38 | To start:
39 |
40 | ```
41 | vagrant up
42 | ```
43 |
44 | This will also run the main Ansible playbook.
45 |
46 | You can SSH to `host01` and become root by running:
47 |
48 | ```
49 | vagrant ssh host01
50 | sudo su -
51 | ```
52 |
53 | When finished, you can clean up the VM:
54 |
55 | ```
56 | vagrant destroy
57 | ```
58 |
--------------------------------------------------------------------------------
/chapter-17/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 | cluster = {
4 | "host01" => { :ip => "192.168.61.11", :cpus => 2, :mem => 2048, :ports => [80, 48080] },
5 | "host02" => { :ip => "192.168.61.12", :cpus => 2, :mem => 2048 },
6 | "host03" => { :ip => "192.168.61.13", :cpus => 2, :mem => 2048 }
7 | }
8 | last = "host03"
9 | groups = {
10 | "vagrant" => ["host01","host02","host03"],
11 | "remote" => ["host01","host02","host03"],
12 | "masters" => ["host01","host02","host03"]
13 | }
14 |
15 | Vagrant.configure("2") do |config|
16 | config.vm.box = "ubuntu/focal64"
17 | cluster.each do |name, data|
18 | config.vm.define name do |host|
19 | host.vm.hostname = name
20 | if data.key?(:ports)
21 | host.vm.network "forwarded_port", guest: data[:ports][0], host: data[:ports][1]
22 | end
23 | host.vm.network "private_network", ip: "#{data[:ip]}"
24 | host.vm.provider :virtualbox do |vb, override|
25 | vb.cpus = data[:cpus]
26 | vb.memory = data[:mem]
27 | end
28 | # Provision all hosts at once for efficiency
29 | if name == last
30 | host.vm.provision :ansible do |ansible|
31 | ansible.limit = "all"
32 | ansible.playbook = "playbook.yaml"
33 | ansible.groups = groups
34 | end
35 | end
36 | end
37 | end
38 | end
39 |
--------------------------------------------------------------------------------
/chapter-17/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | collections_path = ../setup/collections
3 | inventory = ../setup/ec2-inventory
4 | roles_path = ../setup/roles
5 |
6 | [ssh_connection]
7 | ssh_args="-o UserKnownHostsFile=../sshkeys/known_hosts_aws_ansible_k8s"
--------------------------------------------------------------------------------
/chapter-17/aws-setup.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_lb:
7 | ip: 192.168.61.10
8 | ports:
9 | - name: ingress
10 | source: 80
11 | target: 80
12 | - name: api
13 | source: 6443
14 | target: 6443
15 | aws_instances:
16 | host01:
17 | type: master
18 | ip: 192.168.61.11
19 | instance_type: t3.medium
20 | host02:
21 | type: master
22 | ip: 192.168.61.12
23 | instance_type: t3.medium
24 | host03:
25 | type: master
26 | ip: 192.168.61.13
27 | instance_type: t3.medium
28 | roles:
29 | - aws-instances
30 |
--------------------------------------------------------------------------------
/chapter-17/aws-teardown.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_k8s_teardown: true
7 | aws_lb:
8 | ports:
9 | - name: ingress
10 | - name: api
11 | roles:
12 | - aws-instances
13 |
--------------------------------------------------------------------------------
/chapter-17/files/crd.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apiextensions.k8s.io/v1
3 | kind: CustomResourceDefinition
4 | metadata:
5 | name: samples.bookofkubernetes.com
6 | spec:
7 | group: bookofkubernetes.com
8 | versions:
9 | - name: v1
10 | served: true
11 | storage: true
12 | schema:
13 | openAPIV3Schema:
14 | type: object
15 | properties:
16 | spec:
17 | type: object
18 | properties:
19 | value:
20 | type: integer
21 | scope: Namespaced
22 | names:
23 | plural: samples
24 | singular: sample
25 | kind: Sample
26 | shortNames:
27 | - sam
28 |
--------------------------------------------------------------------------------
/chapter-17/files/pgsql.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: "acid.zalan.do/v1"
2 | kind: postgresql
3 | metadata:
4 | name: pgsql-cluster
5 | namespace: default
6 | spec:
7 | teamId: "pgsql"
8 | volume:
9 | size: 1Gi
10 | storageClass: longhorn
11 | numberOfInstances: 3
12 | users:
13 | dbuser:
14 | - superuser
15 | - createdb
16 | databases:
17 | defaultdb: dbuser
18 | postgresql:
19 | version: "14"
20 |
--------------------------------------------------------------------------------
/chapter-17/files/sa.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: watcher
5 | namespace: default
6 | ---
7 | kind: RoleBinding
8 | apiVersion: rbac.authorization.k8s.io/v1
9 | metadata:
10 | name: viewer
11 | namespace: default
12 | subjects:
13 | - kind: ServiceAccount
14 | name: watcher
15 | namespace: default
16 | roleRef:
17 | kind: ClusterRole
18 | name: view
19 | apiGroup: rbac.authorization.k8s.io
20 |
21 |
--------------------------------------------------------------------------------
/chapter-17/files/sample-reader.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: sample-reader
5 | labels:
6 | rbac.authorization.k8s.io/aggregate-to-view: "true"
7 | rules:
8 | - apiGroups: ["bookofkubernetes.com"]
9 | resources: ["samples"]
10 | verbs: ["get", "watch", "list"]
11 |
--------------------------------------------------------------------------------
/chapter-17/files/somedata.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: bookofkubernetes.com/v1
3 | kind: Sample
4 | metadata:
5 | namespace: default
6 | name: somedata
7 | spec:
8 | value: 123
9 |
--------------------------------------------------------------------------------
/chapter-17/files/watch.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from kubernetes import client, config, watch
3 | import json, os, sys
4 |
5 | try:
6 | config.load_incluster_config()
7 | except:
8 | print("In cluster config failed, falling back to file", file=sys.stderr)
9 | config.load_kube_config()
10 |
11 | group = os.environ.get('WATCH_GROUP', 'bookofkubernetes.com')
12 | version = os.environ.get('WATCH_VERSION', 'v1')
13 | namespace = os.environ.get('WATCH_NAMESPACE', 'default')
14 | resource = os.environ.get('WATCH_RESOURCE', 'samples')
15 |
16 | api = client.CustomObjectsApi()
17 |
18 | w = watch.Watch()
19 | for event in w.stream(api.list_namespaced_custom_object,
20 | group=group, version=version, namespace=namespace, plural=resource):
21 | json.dump(event, sys.stdout, indent=2)
22 | sys.stdout.flush()
23 |
--------------------------------------------------------------------------------
/chapter-17/files/watch.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: Deployment
3 | apiVersion: apps/v1
4 | metadata:
5 | name: watch
6 | spec:
7 | replicas: 1
8 | selector:
9 | matchLabels:
10 | app: watch
11 | template:
12 | metadata:
13 | labels:
14 | app: watch
15 | spec:
16 | containers:
17 | - name: watch
18 | image: bookofkubernetes/crdwatcher:1.0.1
19 | serviceAccountName: watcher
20 |
--------------------------------------------------------------------------------
/chapter-17/group_vars/remote.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | containerd_cri: true
3 | k8s_control_plane_endpoint: 192.168.61.10
4 | k8s_initial_master: 192.168.61.11
5 | k8s_cluster_ips:
6 | - 192.168.61.11
7 | - 192.168.61.12
8 | - 192.168.61.13
9 | k8s_postgresql_operator: prep
10 |
11 | k8s_allow_scheduling_masters: true
12 |
13 | # NOTE: In a production system, you should keep these in an encrypted store
14 | # such as Ansible Vault.
15 | k8s_join_token: 1d8fb1.2875d52d62a3282d
16 | k8s_certificate_key: 5a7e07816958efb97635e9a66256adb1
17 |
--------------------------------------------------------------------------------
/chapter-17/group_vars/vagrant.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | k8s_kube_vip: true
3 |
--------------------------------------------------------------------------------
/chapter-17/playbook.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: masters
3 | become: yes
4 | roles:
5 | - tools
6 | - containerd
7 | - crictl
8 | - k8s
9 | - hosts: remote
10 | become: yes
11 | tasks:
12 | - name: files
13 | ansible.builtin.copy:
14 | src: "{{ item }}"
15 | dest: /opt/{{ item }}
16 | owner: root
17 | group: root
18 | mode: '0644'
19 | with_list:
20 | - crd.yaml
21 | - pgsql.yaml
22 | - sample-reader.yaml
23 | - sa.yaml
24 | - somedata.yaml
25 | - watch.py
26 | - watch.yaml
27 |
--------------------------------------------------------------------------------
/chapter-18/README.md:
--------------------------------------------------------------------------------
1 | # Affinity and Devices
2 |
3 | This folder provides the examples for the chapter "Affinity and Devices".
4 |
5 | ## Prerequisites
6 |
7 | Be sure to start by following the instructions in the `setup` folder.
8 |
9 | ## Running in AWS
10 |
11 | Start by provisioning:
12 |
13 | ```
14 | ansible-playbook aws-setup.yaml
15 | ```
16 |
17 | Then, run the main playbook:
18 |
19 | ```
20 | ansible-playbook playbook.yaml
21 | ```
22 |
23 | You can SSH to `host01` and become root by running:
24 |
25 | ```
26 | ./aws-ssh.sh host01
27 | sudo su -
28 | ```
29 |
30 | When finished, don't forget to clean up:
31 |
32 | ```
33 | ansible-playbook aws-teardown.yaml
34 | ```
35 |
36 | ## Running in Vagrant
37 |
38 | To start:
39 |
40 | ```
41 | vagrant up
42 | ```
43 |
44 | This will also run the main Ansible playbook.
45 |
46 | You can SSH to `host01` and become root by running:
47 |
48 | ```
49 | vagrant ssh host01
50 | sudo su -
51 | ```
52 |
53 | When finished, you can clean up the VM:
54 |
55 | ```
56 | vagrant destroy
57 | ```
58 |
--------------------------------------------------------------------------------
/chapter-18/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 | cluster = {
4 | "host01" => { :ip => "192.168.61.11", :cpus => 2, :mem => 2048, :ports => [80, 48080] },
5 | "host02" => { :ip => "192.168.61.12", :cpus => 2, :mem => 2048 },
6 | "host03" => { :ip => "192.168.61.13", :cpus => 2, :mem => 2048 }
7 | }
8 | last = "host03"
9 | groups = {
10 | "vagrant" => ["host01","host02","host03"],
11 | "remote" => ["host01","host02","host03"],
12 | "masters" => ["host01","host02","host03"]
13 | }
14 |
15 | Vagrant.configure("2") do |config|
16 | config.vm.box = "ubuntu/focal64"
17 | cluster.each do |name, data|
18 | config.vm.define name do |host|
19 | host.vm.hostname = name
20 | if data.key?(:ports)
21 | host.vm.network "forwarded_port", guest: data[:ports][0], host: data[:ports][1]
22 | end
23 | host.vm.network "private_network", ip: "#{data[:ip]}"
24 | host.vm.provider :virtualbox do |vb, override|
25 | vb.cpus = data[:cpus]
26 | vb.memory = data[:mem]
27 | end
28 | # Provision all hosts at once for efficiency
29 | if name == last
30 | host.vm.provision :ansible do |ansible|
31 | ansible.limit = "all"
32 | ansible.playbook = "playbook.yaml"
33 | ansible.groups = groups
34 | end
35 | end
36 | end
37 | end
38 | end
39 |
--------------------------------------------------------------------------------
/chapter-18/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | collections_path = ../setup/collections
3 | inventory = ../setup/ec2-inventory
4 | roles_path = ../setup/roles
5 |
6 | [ssh_connection]
7 | ssh_args="-o UserKnownHostsFile=../sshkeys/known_hosts_aws_ansible_k8s"
--------------------------------------------------------------------------------
/chapter-18/aws-setup.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_lb:
7 | ip: 192.168.61.10
8 | ports:
9 | - name: ingress
10 | source: 80
11 | target: 80
12 | - name: api
13 | source: 6443
14 | target: 6443
15 | aws_instances:
16 | host01:
17 | type: master
18 | ip: 192.168.61.11
19 | instance_type: t3.medium
20 | host02:
21 | type: master
22 | ip: 192.168.61.12
23 | instance_type: t3.medium
24 | host03:
25 | type: master
26 | ip: 192.168.61.13
27 | instance_type: t3.medium
28 | roles:
29 | - aws-instances
30 |
--------------------------------------------------------------------------------
/chapter-18/aws-teardown.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_k8s_teardown: true
7 | aws_lb:
8 | ports:
9 | - name: ingress
10 | - name: api
11 | roles:
12 | - aws-instances
13 |
--------------------------------------------------------------------------------
/chapter-18/files/add-hw.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | conf=/etc/kubernetes/admin.conf
3 | cert=/etc/kubernetes/pki/admin.crt
4 | key=/etc/kubernetes/pki/admin.key
5 | ca=/etc/kubernetes/pki/ca.crt
6 |
7 | grep client-key-data $conf | cut -d" " -f 6 | base64 -d > $key
8 | grep client-cert $conf | cut -d" " -f 6 | base64 -d > $cert
9 |
10 | patch='
11 | [
12 | {
13 | "op": "add",
14 | "path": "/status/capacity/bookofkubernetes.com~1special-hw",
15 | "value": "3"
16 | }
17 | ]
18 | '
19 |
20 | curl --cacert $ca --cert $cert --key $key \
21 | -H "Content-Type: application/json-patch+json" \
22 | -X PATCH -d "$patch" \
23 | https://192.168.61.10:6443/api/v1/nodes/host02/status
24 | echo ""
25 |
--------------------------------------------------------------------------------
/chapter-18/files/hw.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | name: sleep
6 | spec:
7 | containers:
8 | - name: sleep
9 | image: busybox
10 | command: ["/bin/sleep", "infinity"]
11 | resources:
12 | limits:
13 | bookofkubernetes.com/special-hw: 1
14 |
--------------------------------------------------------------------------------
/chapter-18/files/hw3.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | name: sleep3
6 | spec:
7 | containers:
8 | - name: sleep
9 | image: busybox
10 | command: ["/bin/sleep", "infinity"]
11 | resources:
12 | limits:
13 | bookofkubernetes.com/special-hw: 3
14 |
--------------------------------------------------------------------------------
/chapter-18/files/ipf-client.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: Deployment
3 | apiVersion: apps/v1
4 | metadata:
5 | name: iperf
6 | spec:
7 | replicas: 3
8 | selector:
9 | matchLabels:
10 | app: iperf
11 | template:
12 | metadata:
13 | labels:
14 | app: iperf
15 | spec:
16 | affinity:
17 | podAntiAffinity:
18 | requiredDuringSchedulingIgnoredDuringExecution:
19 | - labelSelector:
20 | matchExpressions:
21 | - key: app
22 | operator: In
23 | values:
24 | - iperf
25 | topologyKey: "kubernetes.io/hostname"
26 | podAffinity:
27 | requiredDuringSchedulingIgnoredDuringExecution:
28 | - labelSelector:
29 | matchExpressions:
30 | - key: app
31 | operator: In
32 | values:
33 | - iperf-server
34 | topologyKey: "kubernetes.io/hostname"
35 | containers:
36 | - name: iperf
37 | image: bookofkubernetes/iperf3:1.0.0
38 |
--------------------------------------------------------------------------------
/chapter-18/files/ipf-server.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: Deployment
3 | apiVersion: apps/v1
4 | metadata:
5 | name: iperf-server
6 | spec:
7 | replicas: 3
8 | selector:
9 | matchLabels:
10 | app: iperf-server
11 | template:
12 | metadata:
13 | labels:
14 | app: iperf-server
15 | spec:
16 | affinity:
17 | podAntiAffinity:
18 | requiredDuringSchedulingIgnoredDuringExecution:
19 | - labelSelector:
20 | matchExpressions:
21 | - key: app
22 | operator: In
23 | values:
24 | - iperf-server
25 | topologyKey: "kubernetes.io/hostname"
26 | containers:
27 | - name: iperf
28 | image: bookofkubernetes/iperf3:1.0.0
29 | env:
30 | - name: IPERF_SERVER
31 | value: "1"
32 |
--------------------------------------------------------------------------------
/chapter-18/files/ipf-svc.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: Service
3 | apiVersion: v1
4 | metadata:
5 | name: iperf-server
6 | spec:
7 | selector:
8 | app: iperf-server
9 | ports:
10 | - protocol: TCP
11 | port: 5201
12 | targetPort: 5201
13 |
--------------------------------------------------------------------------------
/chapter-18/group_vars/remote.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | containerd_cri: true
3 | k8s_control_plane_endpoint: 192.168.61.10
4 | k8s_initial_master: 192.168.61.11
5 | k8s_cluster_ips:
6 | - 192.168.61.11
7 | - 192.168.61.12
8 | - 192.168.61.13
9 |
10 | k8s_allow_scheduling_masters: true
11 |
12 | # NOTE: In a production system, you should keep these in an encrypted store
13 | # such as Ansible Vault.
14 | k8s_join_token: 1d8fb1.2875d52d62a3282d
15 | k8s_certificate_key: 5a7e07816958efb97635e9a66256adb1
16 |
--------------------------------------------------------------------------------
/chapter-18/group_vars/vagrant.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | k8s_kube_vip: true
3 |
--------------------------------------------------------------------------------
/chapter-18/playbook.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: masters
3 | become: yes
4 | roles:
5 | - tools
6 | - containerd
7 | - crictl
8 | - k8s
9 | - hosts: remote
10 | become: yes
11 | tasks:
12 | - name: files
13 | ansible.builtin.copy:
14 | src: "{{ item }}"
15 | dest: /opt/{{ item }}
16 | owner: root
17 | group: root
18 | mode: '0644'
19 | with_list:
20 | - hw.yaml
21 | - hw3.yaml
22 | - ipf-client.yaml
23 | - ipf-server.yaml
24 | - ipf-svc.yaml
25 | - name: scripts
26 | ansible.builtin.copy:
27 | src: "{{ item }}"
28 | dest: /opt/{{ item }}
29 | owner: root
30 | group: root
31 | mode: '0755'
32 | with_list:
33 | - add-hw.sh
34 |
--------------------------------------------------------------------------------
/chapter-19/README.md:
--------------------------------------------------------------------------------
1 | # Tuning Quality of Service
2 |
3 | This folder provides the examples for the chapter "Tuning Quality of Service".
4 |
5 | ## Prerequisites
6 |
7 | Be sure to start by following the instructions in the `setup` folder.
8 |
9 | ## Running in AWS
10 |
11 | Start by provisioning:
12 |
13 | ```
14 | ansible-playbook aws-setup.yaml
15 | ```
16 |
17 | Then, run the main playbook:
18 |
19 | ```
20 | ansible-playbook playbook.yaml
21 | ```
22 |
23 | You can SSH to `host01` and become root by running:
24 |
25 | ```
26 | ./aws-ssh.sh host01
27 | sudo su -
28 | ```
29 |
30 | When finished, don't forget to clean up:
31 |
32 | ```
33 | ansible-playbook aws-teardown.yaml
34 | ```
35 |
36 | ## Running in Vagrant
37 |
38 | To start:
39 |
40 | ```
41 | vagrant up
42 | ```
43 |
44 | This will also run the main Ansible playbook.
45 |
46 | You can SSH to `host01` and become root by running:
47 |
48 | ```
49 | vagrant ssh host01
50 | sudo su -
51 | ```
52 |
53 | When finished, you can clean up the VM:
54 |
55 | ```
56 | vagrant destroy
57 | ```
58 |
--------------------------------------------------------------------------------
/chapter-19/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 | cluster = {
4 | "host01" => { :ip => "192.168.61.11", :cpus => 2, :mem => 2048, :ports => [80, 48080] },
5 | "host02" => { :ip => "192.168.61.12", :cpus => 2, :mem => 2048 },
6 | "host03" => { :ip => "192.168.61.13", :cpus => 2, :mem => 2048 }
7 | }
8 | last = "host03"
9 | groups = {
10 | "vagrant" => ["host01","host02","host03"],
11 | "remote" => ["host01","host02","host03"],
12 | "masters" => ["host01","host02","host03"]
13 | }
14 |
15 | Vagrant.configure("2") do |config|
16 | config.vm.box = "ubuntu/focal64"
17 | cluster.each do |name, data|
18 | config.vm.define name do |host|
19 | host.vm.hostname = name
20 | if data.key?(:ports)
21 | host.vm.network "forwarded_port", guest: data[:ports][0], host: data[:ports][1]
22 | end
23 | host.vm.network "private_network", ip: "#{data[:ip]}"
24 | host.vm.provider :virtualbox do |vb, override|
25 | vb.cpus = data[:cpus]
26 | vb.memory = data[:mem]
27 | end
28 | # Provision all hosts at once for efficiency
29 | if name == last
30 | host.vm.provision :ansible do |ansible|
31 | ansible.limit = "all"
32 | ansible.playbook = "playbook.yaml"
33 | ansible.groups = groups
34 | end
35 | end
36 | end
37 | end
38 | end
39 |
--------------------------------------------------------------------------------
/chapter-19/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | collections_path = ../setup/collections
3 | inventory = ../setup/ec2-inventory
4 | roles_path = ../setup/roles
5 |
6 | [ssh_connection]
7 | ssh_args="-o UserKnownHostsFile=../sshkeys/known_hosts_aws_ansible_k8s"
--------------------------------------------------------------------------------
/chapter-19/aws-setup.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_lb:
7 | ip: 192.168.61.10
8 | ports:
9 | - name: ingress
10 | source: 80
11 | target: 80
12 | - name: api
13 | source: 6443
14 | target: 6443
15 | aws_instances:
16 | host01:
17 | type: master
18 | ip: 192.168.61.11
19 | instance_type: t3.medium
20 | host02:
21 | type: master
22 | ip: 192.168.61.12
23 | instance_type: t3.medium
24 | host03:
25 | type: master
26 | ip: 192.168.61.13
27 | instance_type: t3.medium
28 | roles:
29 | - aws-instances
30 |
--------------------------------------------------------------------------------
/chapter-19/aws-teardown.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_k8s_teardown: true
7 | aws_lb:
8 | ports:
9 | - name: ingress
10 | - name: api
11 | roles:
12 | - aws-instances
13 |
--------------------------------------------------------------------------------
/chapter-19/files/best-effort.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | name: best-effort
6 | spec:
7 | containers:
8 | - name: best-effort
9 | image: busybox
10 | command: ["/bin/sleep", "infinity"]
11 | nodeName: host01
12 |
--------------------------------------------------------------------------------
/chapter-19/files/burstable.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | name: burstable
6 | spec:
7 | containers:
8 | - name: burstable
9 | image: busybox
10 | command: ["/bin/sleep", "infinity"]
11 | resources:
12 | requests:
13 | memory: "64Mi"
14 | cpu: "50m"
15 | limits:
16 | memory: "128Mi"
17 | cpu: "100m"
18 | nodeName: host01
19 |
--------------------------------------------------------------------------------
/chapter-19/files/cgroup-info:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | POD=$1
3 | if [ -z "${POD}" ]
4 | then
5 | echo "Usage: cgroup-info "
6 | exit 1
7 | fi
8 |
9 | # Look up the pod ID from the POD. -q ensures we just return the ID.
10 | POD_ID=$(crictl pods --name ${POD} -q)
11 | if [ -z "${POD_ID}" ]
12 | then
13 | echo "Pod ${POD} not found"
14 | exit 2
15 | fi
16 |
17 | # Get the pod data in JSON format and pull out one field, the cgroup dir
18 | cgp_field='.info.config.linux.cgroup_parent'
19 | CGP=$(crictl inspectp $POD_ID | jq -r "$cgp_field")
20 |
21 | # The same cgroup dir is used for both CPU and memory cgroups
22 | CPU=/sys/fs/cgroup/cpu/$CGP
23 | MEM=/sys/fs/cgroup/memory/$CGP
24 |
25 | # Print our findings
26 | echo ""
27 | echo "Container Runtime"
28 | echo "-----------------"
29 | echo Pod ID: $POD_ID
30 | echo Cgroup path: $CGP
31 | echo ""
32 |
33 | echo "CPU Settings"
34 | echo "------------"
35 | echo "CPU Shares:" $(cat $CPU/cpu.shares)
36 | echo "CPU Quota (us):" $(cat $CPU/cpu.cfs_quota_us) "per" $(cat $CPU/cpu.cfs_period_us)
37 | echo ""
38 |
39 | echo "Memory Settings"
40 | echo "---------------"
41 | echo "Limit (bytes):" $(cat $MEM/memory.limit_in_bytes)
42 | echo ""
43 |
--------------------------------------------------------------------------------
/chapter-19/files/essential.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: scheduling.k8s.io/v1
3 | kind: PriorityClass
4 | metadata:
5 | name: essential
6 | value: 999999
7 |
--------------------------------------------------------------------------------
/chapter-19/files/guaranteed.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | name: guaranteed
6 | spec:
7 | containers:
8 | - name: guaranteed
9 | image: busybox
10 | command: ["/bin/sleep", "infinity"]
11 | resources:
12 | limits:
13 | memory: "64Mi"
14 | cpu: "50m"
15 | nodeName: host01
16 |
--------------------------------------------------------------------------------
/chapter-19/files/lots.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | kind: Deployment
3 | apiVersion: apps/v1
4 | metadata:
5 | name: lots
6 | spec:
7 | replicas: 1000
8 | selector:
9 | matchLabels:
10 | app: lots
11 | template:
12 | metadata:
13 | labels:
14 | app: lots
15 | spec:
16 | containers:
17 | - name: sleep
18 | image: busybox
19 | command: ["/bin/sleep", "infinity"]
20 | resources:
21 | limits:
22 | memory: "64Mi"
23 | cpu: "250m"
24 |
--------------------------------------------------------------------------------
/chapter-19/files/needed.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Pod
4 | metadata:
5 | name: needed
6 | spec:
7 | containers:
8 | - name: needed
9 | image: busybox
10 | command: ["/bin/sleep", "infinity"]
11 | resources:
12 | limits:
13 | memory: "64Mi"
14 | cpu: "250m"
15 | priorityClassName: essential
16 |
--------------------------------------------------------------------------------
/chapter-19/files/oom-info:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | POD=$1
3 | if [ -z "${POD}" ]
4 | then
5 | echo "Usage: oom-info "
6 | exit 1
7 | fi
8 |
9 | CNT_ID=$(crictl ps --name ${POD} -q)
10 | if [ -z "${CNT_ID}" ]
11 | then
12 | echo "Pod ${POD} not found"
13 | exit 2
14 | fi
15 |
16 | pid_field='.info.pid'
17 | PID=$(crictl inspect $CNT_ID | jq -r "$pid_field")
18 | echo -n "OOM Score Adjustment: "
19 | cat /proc/$PID/oom_score_adj
20 |
--------------------------------------------------------------------------------
/chapter-19/group_vars/remote.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | containerd_cri: true
3 | k8s_control_plane_endpoint: 192.168.61.10
4 | k8s_initial_master: 192.168.61.11
5 | k8s_cluster_ips:
6 | - 192.168.61.11
7 | - 192.168.61.12
8 | - 192.168.61.13
9 |
10 | k8s_allow_scheduling_masters: true
11 |
12 | # NOTE: In a production system, you should keep these in an encrypted store
13 | # such as Ansible Vault.
14 | k8s_join_token: 1d8fb1.2875d52d62a3282d
15 | k8s_certificate_key: 5a7e07816958efb97635e9a66256adb1
16 |
--------------------------------------------------------------------------------
/chapter-19/group_vars/vagrant.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | k8s_kube_vip: true
3 |
--------------------------------------------------------------------------------
/chapter-19/playbook.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: masters
3 | become: yes
4 | roles:
5 | - tools
6 | - containerd
7 | - crictl
8 | - k8s
9 | - hosts: remote
10 | become: yes
11 | tasks:
12 | - name: files
13 | ansible.builtin.copy:
14 | src: "{{ item }}"
15 | dest: /opt/{{ item }}
16 | owner: root
17 | group: root
18 | mode: '0644'
19 | with_list:
20 | - best-effort.yaml
21 | - burstable.yaml
22 | - essential.yaml
23 | - guaranteed.yaml
24 | - lots.yaml
25 | - needed.yaml
26 | - name: scripts
27 | ansible.builtin.copy:
28 | src: "{{ item }}"
29 | dest: /opt/{{ item }}
30 | owner: root
31 | group: root
32 | mode: '0755'
33 | with_list:
34 | - cgroup-info
35 | - oom-info
36 |
--------------------------------------------------------------------------------
/chapter-20/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | collections_path = ../setup/collections
3 | inventory = ../setup/ec2-inventory
4 | roles_path = ../setup/roles
5 |
6 | [ssh_connection]
7 | ssh_args="-o UserKnownHostsFile=../sshkeys/known_hosts_aws_ansible_k8s"
--------------------------------------------------------------------------------
/chapter-20/aws-setup.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_lb:
7 | ip: 192.168.61.10
8 | ports:
9 | - name: ingress
10 | source: 80
11 | target: 80
12 | - name: api
13 | source: 6443
14 | target: 6443
15 | aws_instances:
16 | host01:
17 | type: master
18 | ip: 192.168.61.11
19 | instance_type: t3.medium
20 | host02:
21 | type: master
22 | ip: 192.168.61.12
23 | instance_type: t3.medium
24 | host03:
25 | type: master
26 | ip: 192.168.61.13
27 | instance_type: t3.medium
28 | host04:
29 | type: node
30 | ip: 192.168.61.14
31 | instance_type: t3.medium
32 | host05:
33 | type: node
34 | ip: 192.168.61.15
35 | instance_type: t3.medium
36 | host06:
37 | type: node
38 | ip: 192.168.61.16
39 | instance_type: t3.medium
40 | roles:
41 | - aws-instances
42 |
--------------------------------------------------------------------------------
/chapter-20/aws-teardown.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_k8s_teardown: true
7 | aws_lb:
8 | ports:
9 | - name: ingress
10 | - name: api
11 | roles:
12 | - aws-instances
13 |
--------------------------------------------------------------------------------
/chapter-20/files/api-metrics.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | conf=/etc/kubernetes/admin.conf
3 | cert=/etc/kubernetes/pki/admin.crt
4 | key=/etc/kubernetes/pki/admin.key
5 | ca=/etc/kubernetes/pki/ca.crt
6 |
7 | grep client-key-data $conf | cut -d" " -f 6 | base64 -d > $key
8 | grep client-cert $conf | cut -d" " -f 6 | base64 -d > $cert
9 |
10 | curl --cacert $ca --cert $cert --key $key https://192.168.61.10:6443/metrics
11 | echo ""
12 |
--------------------------------------------------------------------------------
/chapter-20/files/api-server-metrics.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | conf=/etc/kubernetes/admin.conf
3 | cert=/etc/kubernetes/pki/admin.crt
4 | key=/etc/kubernetes/pki/admin.key
5 | ca=/etc/kubernetes/pki/ca.crt
6 |
7 | grep client-key-data $conf | cut -d" " -f 6 | base64 -d > $key
8 | grep client-cert $conf | cut -d" " -f 6 | base64 -d > $cert
9 |
10 | curl --cacert $ca --cert $cert --key $key https://192.168.61.10:6443/metrics
11 | echo ""
12 |
--------------------------------------------------------------------------------
/chapter-20/files/install-kube-prometheus.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -e
2 | source /opt/k8sver
3 | cd /etc/kubernetes
4 | curl -Lo kube-prom.zip $prometheus_url
5 | unzip -qqo kube-prom.zip '*/manifests/*'
6 | rm -fr /etc/kubernetes/prometheus
7 | mv kube-prometheus-release-* prometheus
8 | kubectl apply --server-side -f /etc/kubernetes/prometheus/manifests/setup
9 | until kubectl get servicemonitors --all-namespaces ; do sleep 1; done
10 | kubectl apply -f /etc/kubernetes/prometheus/manifests
11 | kubectl patch -n monitoring svc/grafana -p \
12 | '{"spec":{"type":"NodePort","ports":[{"port": 3000, "nodePort": 3000}]}}'
13 | kubectl patch -n monitoring svc/prometheus-k8s -p \
14 | '{"spec":{"type":"NodePort","ports":[{"port": 9090, "nodePort": 9090}]}}'
15 |
--------------------------------------------------------------------------------
/chapter-20/files/rbac.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: Role
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: prometheus
6 | app.kubernetes.io/instance: k8s
7 | app.kubernetes.io/name: prometheus
8 | app.kubernetes.io/part-of: kube-prometheus
9 | app.kubernetes.io/version: 2.32.1
10 | name: prometheus-k8s
11 | rules:
12 | - apiGroups:
13 | - ""
14 | resources:
15 | - services
16 | - endpoints
17 | - pods
18 | verbs:
19 | - get
20 | - list
21 | - watch
22 | - apiGroups:
23 | - extensions
24 | resources:
25 | - ingresses
26 | verbs:
27 | - get
28 | - list
29 | - watch
30 | - apiGroups:
31 | - networking.k8s.io
32 | resources:
33 | - ingresses
34 | verbs:
35 | - get
36 | - list
37 | - watch
38 | ---
39 | apiVersion: rbac.authorization.k8s.io/v1
40 | kind: RoleBinding
41 | metadata:
42 | labels:
43 | app.kubernetes.io/component: prometheus
44 | app.kubernetes.io/instance: k8s
45 | app.kubernetes.io/name: prometheus
46 | app.kubernetes.io/part-of: kube-prometheus
47 | app.kubernetes.io/version: 2.32.1
48 | name: prometheus-k8s
49 | roleRef:
50 | apiGroup: rbac.authorization.k8s.io
51 | kind: Role
52 | name: prometheus-k8s
53 | subjects:
54 | - kind: ServiceAccount
55 | name: prometheus-k8s
56 | namespace: monitoring
57 |
--------------------------------------------------------------------------------
/chapter-20/files/svc-mon.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | name: todo
5 | spec:
6 | selector:
7 | matchLabels:
8 | app: todo
9 | endpoints:
10 | - port: web
11 |
--------------------------------------------------------------------------------
/chapter-20/group_vars/remote.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | containerd_cri: true
3 | k8s_control_plane_endpoint: 192.168.61.10
4 | k8s_initial_master: 192.168.61.11
5 | k8s_cluster_ips:
6 | - 192.168.61.11
7 | - 192.168.61.12
8 | - 192.168.61.13
9 | - 192.168.61.14
10 | - 192.168.61.15
11 | - 192.168.61.16
12 |
13 | k8s_allow_scheduling_masters: true
14 | k8s_prometheus: true
15 | k8s_postgresql_operator: install
16 | k8s_postgresql_operator_namespace: todo
17 |
18 | todo_prefix: /
19 | todo_postgres_operator: true
20 | todo_ingress: false
21 | todo_stage: true
22 | todo_scaler: true
23 |
24 | # NOTE: In a production system, you should keep these in an encrypted store
25 | # such as Ansible Vault.
26 | k8s_join_token: 1d8fb1.2875d52d62a3282d
27 | k8s_certificate_key: 5a7e07816958efb97635e9a66256adb1
28 |
--------------------------------------------------------------------------------
/chapter-20/group_vars/vagrant.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | k8s_kube_vip: true
3 |
--------------------------------------------------------------------------------
/setup/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | collections_path = ./collections
3 | inventory = ./ec2-inventory
4 |
--------------------------------------------------------------------------------
/setup/aws-delete.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_k8s_teardown: true
7 | aws_k8s_delete: true
8 | roles:
9 | - aws-instances
10 |
--------------------------------------------------------------------------------
/setup/aws-teardown.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | connection: local
4 | vars:
5 | ansible_python_interpreter: "{{ ansible_playbook_python }}"
6 | aws_k8s_teardown: true
7 | roles:
8 | - aws-instances
9 |
--------------------------------------------------------------------------------
/setup/collections/.gitignore:
--------------------------------------------------------------------------------
1 | *
2 | !requirements.yaml
3 | !.gitignore
4 |
--------------------------------------------------------------------------------
/setup/collections/requirements.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | collections:
3 | - name: amazon.aws
4 | - name: ansible.posix
5 | - name: cloud.common
6 | - name: community.aws
7 | - name: community.crypto
8 | - name: community.general
9 | - name: kubernetes.core
10 |
--------------------------------------------------------------------------------
/setup/ec2-inventory/aws_ec2.yaml:
--------------------------------------------------------------------------------
1 | plugin: amazon.aws.aws_ec2
2 | regions:
3 | - us-east-1
4 | filters:
5 | tag:env: aws_ansible_k8s
6 | keyed_groups:
7 | - key: tags.type
8 | prefix: tag_type
9 | hostnames:
10 | - ip-address
11 |
--------------------------------------------------------------------------------
/setup/ec2-inventory/inventory:
--------------------------------------------------------------------------------
1 | # This file exists to turn the dynamic AWS groups into simplified names for
2 | # the playbook.yaml and the group_vars dir.
3 |
4 | # Add explicit localhost to get rid of the warning when there are no
5 | # AWS EC2 hosts yet.
6 | localhost
7 |
8 | # Add an empty vagrant group to get rid of a different warning.
9 | [vagrant]
10 |
11 | # Declare the dynamic groups so Ansible knows they exist.
12 | [tag_type_master]
13 |
14 | [tag_type_node]
15 |
16 | [tag_type_client]
17 |
18 | [tag_type_server]
19 |
20 | # Give those dynamic groups a friendlier name by creating parent groups around
21 | # them.
22 | [masters:children]
23 | tag_type_master
24 |
25 | [nodes:children]
26 | tag_type_node
27 |
28 | [clients:children]
29 | tag_type_client
30 |
31 | [servers:children]
32 | tag_type_server
33 |
34 | # Configure SSH for AWS servers.
35 | [aws_ec2:vars]
36 | ansible_python_interpreter=/usr/bin/python3
37 | ansible_user=ubuntu
38 | ansible_ssh_private_key_file={{ inventory_dir }}/../../sshkeys/id_rsa_aws_ansible_k8s
39 |
40 | # Specify the "remote" group that is shared by AWS and Vagrant.
41 | [remote:children]
42 | aws_ec2
43 |
--------------------------------------------------------------------------------
/setup/requirements.txt:
--------------------------------------------------------------------------------
1 | awscli>=1.19.16
--------------------------------------------------------------------------------
/setup/roles/aws-instances/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | aws_region: us-east-1
3 | aws_subnet_az: us-east-1d
4 | aws_private_cidr: 192.168.61.0/24
5 | aws_k8s_pod_cidr: 172.31.0.0/16
6 | aws_k8s_svc_cidr: 10.96.0.0/12
7 | aws_key: aws_ansible_k8s
8 | aws_ubuntu_owner: "099720109477"
9 | aws_ubuntu_image: "ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*"
10 | aws_k8s_teardown: false
11 | aws_k8s_delete: false
12 | aws_k8s_ssh_dir: "../sshkeys"
13 |
--------------------------------------------------------------------------------
/setup/roles/aws-instances/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - ansible.builtin.include_tasks: setup.yaml
3 | when: not aws_k8s_teardown
4 |
5 | - ansible.builtin.include_tasks: teardown.yaml
6 | when: aws_k8s_teardown
7 |
8 | - ansible.builtin.include_tasks: delete.yaml
9 | when: aws_k8s_teardown and aws_k8s_delete
10 |
--------------------------------------------------------------------------------
/setup/roles/aws-instances/tasks/teardown.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: terminate instances
3 | community.aws.ec2_instance:
4 | region: "{{ aws_region }}"
5 | state: absent
6 | filters:
7 | "tag:env": "aws_ansible_k8s"
8 |
9 | - name: delete load balancer
10 | community.aws.elb_network_lb:
11 | region: "{{ aws_region }}"
12 | name: ansible-k8s
13 | state: absent
14 | when: aws_lb is defined
15 |
16 | - name: delete target groups
17 | community.aws.elb_target_group:
18 | name: "ansible-k8s-{{ item.name }}"
19 | state: absent
20 | with_list: "{{ aws_lb.ports }}"
21 | when: aws_lb is defined
22 |
--------------------------------------------------------------------------------
/setup/roles/aws-instances/templates/aws-ssh.sh.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | HOST=$1
3 | case $HOST in
4 | {% for instance in aws_ec2_instance_data %}
5 |
6 | {{ instance[0] }})
7 | IP={{ instance[1] }}
8 | ;;
9 | {% endfor %}
10 |
11 | *)
12 | echo "Usage: ./aws-ssh.sh "
13 | echo " Valid hosts: {{ aws_instances.keys() | join(', ') }}"
14 | exit 1
15 | ;;
16 |
17 | esac
18 |
19 | ssh_args="-o UserKnownHostsFile={{ aws_k8s_ssh_dir }}/known_hosts_aws_ansible_k8s"
20 | ssh_args+=" -i {{ aws_k8s_ssh_dir }}/id_rsa_aws_ansible_k8s"
21 | ssh $ssh_args ubuntu@$IP
22 |
--------------------------------------------------------------------------------
/setup/roles/containerd/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | containerd_cri: false
--------------------------------------------------------------------------------
/setup/roles/containerd/handlers/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart containerd
3 | ansible.builtin.systemd:
4 | name: containerd
5 | state: restarted
6 |
--------------------------------------------------------------------------------
/setup/roles/containerd/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: prerequisites
3 | ansible.builtin.apt:
4 | name: apt-transport-https
5 | update_cache: yes
6 |
7 | - name: gpg key
8 | ansible.builtin.apt_key:
9 | url: https://download.docker.com/linux/ubuntu/gpg
10 | state: present
11 |
12 | - name: repository
13 | ansible.builtin.apt_repository:
14 | repo: deb https://download.docker.com/linux/ubuntu focal stable
15 | state: present
16 | filename: docker
17 |
18 | - name: install
19 | ansible.builtin.apt:
20 | name: containerd.io
21 | update_cache: yes
22 |
23 | - name: enable cri
24 | ansible.builtin.lineinfile:
25 | path: /etc/containerd/config.toml
26 | regexp: '^disabled_plugins'
27 | line: "disabled_plugins = []"
28 | when: containerd_cri
29 | notify:
30 | - restart containerd
31 |
32 | - meta: flush_handlers
33 |
--------------------------------------------------------------------------------
/setup/roles/cri-o/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | crio_version: "1.20"
3 | crio_os: xUbuntu_20.04
4 | crio_repo: https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable
5 | crio_skopeo: false
6 | crio_cni: false
7 | cni_plugin_version: v0.9.1
8 | cni_plugin_url: https://github.com/containernetworking/plugins/releases/download/{{ cni_plugin_version }}/cni-plugins-linux-amd64-{{ cni_plugin_version }}.tgz
9 |
--------------------------------------------------------------------------------
/setup/roles/cri-o/files/100-crio-bridge.conf:
--------------------------------------------------------------------------------
1 | {
2 | "cniVersion": "0.3.1",
3 | "name": "crio",
4 | "type": "bridge",
5 | "bridge": "cni0",
6 | "isGateway": true,
7 | "ipMasq": true,
8 | "hairpinMode": true,
9 | "ipam": {
10 | "type": "host-local",
11 | "routes": [
12 | { "dst": "0.0.0.0/0" }
13 | ],
14 | "ranges": [
15 | [{ "subnet": "10.85.0.0/16" }]
16 | ]
17 | }
18 | }
19 |
--------------------------------------------------------------------------------
/setup/roles/cri-o/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: gpg key
3 | ansible.builtin.apt_key:
4 | url: "{{ crio_repo }}/{{ crio_os }}/Release.key"
5 | state: present
6 |
7 | - name: kubic repository
8 | ansible.builtin.apt_repository:
9 | repo: "deb {{ crio_repo }}/{{ crio_os }}/ /"
10 | state: present
11 | filename: kubic
12 |
13 | - name: cri-o repository
14 | ansible.builtin.apt_repository:
15 | repo: "deb {{ crio_repo }}:/cri-o:/{{ crio_version }}/{{ crio_os }}/ /"
16 | state: present
17 | filename: kubic.cri-o
18 |
19 | - name: install
20 | ansible.builtin.apt:
21 | name:
22 | - cri-o
23 | - cri-o-runc
24 | update_cache: yes
25 |
26 | - name: skopeo
27 | ansible.builtin.apt:
28 | name: skopeo
29 | when: crio_skopeo
30 |
31 | - name: service
32 | ansible.builtin.service:
33 | name: crio
34 | state: started
35 | enabled: yes
36 |
37 | - name: cni plugins
38 | ansible.builtin.unarchive:
39 | src: "{{ cni_plugin_url }}"
40 | dest: /opt/cni/bin
41 | remote_src: yes
42 | when: crio_cni
43 |
44 | - name: cni config
45 | ansible.builtin.copy:
46 | src: 100-crio-bridge.conf
47 | dest: /etc/cni/net.d/100-crio-bridge.conf
48 | owner: root
49 | group: root
50 | mode: '0644'
51 | when: crio_cni
52 |
--------------------------------------------------------------------------------
/setup/roles/crictl/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | crictl_container_engine: containerd
3 | crictl_version: v1.20.0
4 | crictl_url: https://github.com/kubernetes-sigs/cri-tools/releases/download/{{ crictl_version }}/crictl-{{ crictl_version }}-linux-amd64.tar.gz
5 |
--------------------------------------------------------------------------------
/setup/roles/crictl/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: install
3 | ansible.builtin.unarchive:
4 | src: "{{ crictl_url }}"
5 | dest: /usr/local/bin
6 | remote_src: yes
7 |
8 | - name: configure
9 | ansible.builtin.template:
10 | src: crictl.yaml.j2
11 | dest: /etc/crictl.yaml
12 | mode: '0644'
13 |
--------------------------------------------------------------------------------
/setup/roles/crictl/templates/crictl.yaml.j2:
--------------------------------------------------------------------------------
1 | {% if crictl_container_engine == 'crio' %}
2 | runtime-endpoint: unix:///var/run/crio/crio.sock
3 | image-endpoint: unix:///var/run/crio/crio.sock
4 | {% else %}
5 | runtime-endpoint: unix:///var/run/containerd/containerd.sock
6 | image-endpoint: unix:///var/run/containerd/containerd.sock
7 | {% endif %}
8 | timeout: 10
9 |
--------------------------------------------------------------------------------
/setup/roles/docker-ce/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: prerequisites
3 | apt:
4 | name: apt-transport-https
5 | update_cache: yes
6 |
7 | - name: gpg key
8 | ansible.builtin.apt_key:
9 | url: https://download.docker.com/linux/ubuntu/gpg
10 | state: present
11 |
12 | - name: repository
13 | ansible.builtin.apt_repository:
14 | repo: deb https://download.docker.com/linux/ubuntu focal stable
15 | state: present
16 | filename: docker
17 |
18 | - name: install
19 | apt:
20 | name: docker-ce
21 | update_cache: yes
22 |
--------------------------------------------------------------------------------
/setup/roles/docker-registry/files/docker-registry.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Docker Registry
3 | After=docker.target
4 | StartLimitIntervalSec=0
5 |
6 | [Service]
7 | Type=simple
8 | Restart=always
9 | RestartSec=5
10 | User=root
11 | ExecStart=docker run --name registry \
12 | -v /etc/ssl:/certs \
13 | -e REGISTRY_HTTP_ADDR=0.0.0.0:443 \
14 | -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/certs/registry.crt \
15 | -e REGISTRY_HTTP_TLS_KEY=/certs/private/registry.pem \
16 | -p 443:443 \
17 | registry:2
18 | ExecStop=docker rm -f registry
19 |
20 | [Install]
21 | WantedBy=multi-user.target
--------------------------------------------------------------------------------
/setup/roles/haproxy/handlers/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart haproxy
3 | ansible.builtin.systemd:
4 | name: haproxy
5 | state: restarted
6 |
--------------------------------------------------------------------------------
/setup/roles/haproxy/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: install
3 | ansible.builtin.apt:
4 | name: haproxy
5 | update_cache: yes
6 |
7 | - name: configure
8 | ansible.builtin.template:
9 | src: haproxy.cfg.j2
10 | dest: /etc/haproxy/haproxy.cfg
11 | mode: '0644'
12 | notify: restart haproxy
13 |
14 | - name: service
15 | ansible.builtin.systemd:
16 | name: haproxy
17 | state: started
18 | enabled: yes
19 |
20 | - meta: flush_handlers
21 |
--------------------------------------------------------------------------------
/setup/roles/haproxy/templates/haproxy.cfg.j2:
--------------------------------------------------------------------------------
1 | defaults
2 | timeout connect 10s
3 | timeout client 30s
4 | timeout server 30s
5 | log global
6 |
7 | {% for name, config in haproxy_config.items() %}
8 | frontend {{ name }}-front
9 | bind *:{{ config['port'] }}
10 | option tcplog
11 | mode tcp
12 | default_backend {{ name }}-back
13 |
14 | backend {{ name }}-back
15 | mode tcp
16 | balance roundrobin
17 | {% for ip in config['backends'] %}
18 | server {{ ip }} {{ ip }}:{{ config['targetPort'] }} check
19 | {% endfor %}
20 |
21 | {% endfor %}
22 |
--------------------------------------------------------------------------------
/setup/roles/iperf/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | iperf_server: false
--------------------------------------------------------------------------------
/setup/roles/iperf/files/iperf.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=iperf server
3 | After=network.target
4 | StartLimitIntervalSec=0
5 |
6 | [Service]
7 | Type=simple
8 | Restart=always
9 | RestartSec=5
10 | User=root
11 | ExecStart=/usr/bin/iperf3 -s
12 |
13 | [Install]
14 | WantedBy=multi-user.target
--------------------------------------------------------------------------------
/setup/roles/iperf/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: install
3 | apt:
4 | name: iperf3
5 | update_cache: yes
6 |
7 | - name: service file
8 | ansible.builtin.copy:
9 | src: iperf.service
10 | dest: /etc/systemd/system/iperf.service
11 | owner: root
12 | group: root
13 | mode: '0644'
14 | when: iperf_server
15 |
16 | - name: service
17 | ansible.builtin.systemd:
18 | name: iperf
19 | state: started
20 | enabled: yes
21 | daemon_reload: yes
22 | when: iperf_server
23 |
--------------------------------------------------------------------------------
/setup/roles/k3s/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | k3s_version: "v1.23.4+k3s1"
3 | k3s_url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s
--------------------------------------------------------------------------------
/setup/roles/k3s/files/k3s.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=k3s
3 | After=network.target
4 | StartLimitIntervalSec=0
5 |
6 | [Service]
7 | Type=simple
8 | Restart=always
9 | RestartSec=5
10 | User=root
11 | ExecStart=/usr/local/bin/k3s server
12 |
13 | [Install]
14 | WantedBy=multi-user.target
--------------------------------------------------------------------------------
/setup/roles/k3s/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: download
3 | ansible.builtin.get_url:
4 | url: "{{ k3s_url }}"
5 | dest: /usr/local/bin/k3s
6 | mode: '0755'
7 |
8 | - name: service file
9 | ansible.builtin.copy:
10 | src: k3s.service
11 | dest: /etc/systemd/system/k3s.service
12 | owner: root
13 | group: root
14 | mode: '0644'
15 |
16 | - name: service
17 | ansible.builtin.systemd:
18 | name: k3s
19 | state: started
20 | enabled: yes
21 | daemon_reload: yes
22 |
--------------------------------------------------------------------------------
/setup/roles/k8s/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | k8s_install: true
3 |
4 | k8s_pod_cidr: 172.31.0.0/16
5 | k8s_allow_scheduling_masters: false
6 | k8s_kube_vip: false
7 | k8s_network: calico
8 | k8s_prometheus: false
9 | k8s_postgresql_operator: none
10 | k8s_postgresql_operator_namespace: default
11 |
12 | k8s_version: 1.28.0
13 | calico_version: 3.27.2
14 | k8s_longhorn_version: 1.6.0
15 | metrics_server_version: 0.7.0
16 | ingress_version: 1.10.0
17 | k8s_prometheus_version: "0.10"
18 |
--------------------------------------------------------------------------------
/setup/roles/k8s/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - ansible.builtin.include_tasks: prep.yaml
3 |
4 | - ansible.builtin.include_tasks: install.yaml
5 | when: k8s_install
6 |
--------------------------------------------------------------------------------
/setup/roles/k8s/templates/calico-custom-resources.yaml.j2:
--------------------------------------------------------------------------------
1 | apiVersion: operator.tigera.io/v1
2 | kind: Installation
3 | metadata:
4 | name: default
5 | spec:
6 | # Configures Calico networking.
7 | calicoNetwork:
8 | ipPools:
9 | - blockSize: 26
10 | cidr: {{ k8s_pod_cidr }}
11 | encapsulation: VXLANCrossSubnet
12 | natOutgoing: Enabled
13 | nodeSelector: all()
14 | nodeAddressAutodetectionV4:
15 | canReach: {{ k8s_initial_master }}
16 | {% if k8s_prometheus %}
17 | ---
18 | apiVersion: crd.projectcalico.org/v1
19 | kind: FelixConfiguration
20 | metadata:
21 | name: default
22 | spec:
23 | prometheusMetricsEnabled: true
24 | {% endif %}
--------------------------------------------------------------------------------
/setup/roles/k8s/templates/ingress-patch.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: ingress-nginx-controller
6 | namespace: ingress-nginx
7 | spec:
8 | ports:
9 | - port: 80
10 | nodePort: 80
11 | - port: 443
12 | nodePort: 443
13 | ---
14 | apiVersion: networking.k8s.io/v1
15 | kind: IngressClass
16 | metadata:
17 | name: nginx
18 | namespace: ingress-nginx
19 | annotations:
20 | ingressclass.kubernetes.io/is-default-class: "true"
21 |
--------------------------------------------------------------------------------
/setup/roles/k8s/templates/k8s-all.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | echo "Running command locally"
3 | eval $@
4 | for host in {{ k8s_cluster_ips | reject("eq", k8s_initial_master) | join(' ') }}
5 | do
6 | echo "Running command on $host"
7 | ssh -T -o StrictHostKeyChecking=no -i /root/.ssh/id_rsa_k8s_all root@${host} $@
8 | done
9 |
--------------------------------------------------------------------------------
/setup/roles/k8s/templates/k8s-ver.j2:
--------------------------------------------------------------------------------
1 | export K8SV="{{ k8s_version }}-*"
2 | export calico_url="https://raw.githubusercontent.com/projectcalico/calico/v{{ calico_version }}/manifests/tigera-operator.yaml"
3 | export longhorn_url="https://raw.githubusercontent.com/longhorn/longhorn/v{{ k8s_longhorn_version }}/deploy/longhorn.yaml"
4 | export metrics_url="https://github.com/kubernetes-sigs/metrics-server/releases/download/v{{ metrics_server_version }}/components.yaml"
5 | export ingress_url="https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v{{ ingress_version }}/deploy/static/provider/cloud/deploy.yaml"
6 | export prometheus_url="https://github.com/prometheus-operator/kube-prometheus/archive/refs/heads/release-{{ k8s_prometheus_version }}.zip"
7 |
--------------------------------------------------------------------------------
/setup/roles/k8s/templates/kubeadm-init.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kubeadm.k8s.io/v1beta3
3 | kind: InitConfiguration
4 | bootstrapTokens:
5 | - groups:
6 | - system:bootstrappers:kubeadm:default-node-token
7 | token: {{ k8s_join_token }}
8 | ttl: 2h0m0s
9 | usages:
10 | - signing
11 | - authentication
12 | nodeRegistration:
13 | kubeletExtraArgs:
14 | node-ip: {{ k8s_initial_master }}
15 | {% if k8s_allow_scheduling_masters %}
16 | taints: []
17 | {% endif %}
18 | localAPIEndpoint:
19 | advertiseAddress: {{ k8s_initial_master }}
20 | certificateKey: "{{ k8s_certificate_key }}"
21 | ---
22 | apiVersion: kubeadm.k8s.io/v1beta3
23 | kind: ClusterConfiguration
24 | kubernetesVersion: {{ k8s_version }}
25 | apiServer:
26 | extraArgs:
27 | service-node-port-range: 80-32767
28 | networking:
29 | podSubnet: "{{ k8s_pod_cidr }}"
30 | controlPlaneEndpoint: "{{ k8s_control_plane_endpoint }}:6443"
31 | ---
32 | apiVersion: kubelet.config.k8s.io/v1beta1
33 | kind: KubeletConfiguration
34 | serverTLSBootstrap: true
35 |
--------------------------------------------------------------------------------
/setup/roles/k8s/templates/kubeadm-join.yaml.j2:
--------------------------------------------------------------------------------
1 | apiVersion: kubeadm.k8s.io/v1beta3
2 | kind: JoinConfiguration
3 | discovery:
4 | bootstrapToken:
5 | apiServerEndpoint: {{ k8s_control_plane_endpoint }}:6443
6 | token: {{ k8s_join_token }}
7 | unsafeSkipCAVerification: true
8 | timeout: 5m0s
9 | nodeRegistration:
10 | kubeletExtraArgs:
11 | node-ip: {{ ansible_facts['all_ipv4_addresses'] | intersect(k8s_cluster_ips) | join('') }}
12 | {% if k8s_allow_scheduling_masters %}
13 | taints: []
14 | {% endif %}
15 | {% if k8s_kube_vip %}
16 | ignorePreflightErrors:
17 | - DirAvailable--etc-kubernetes-manifests
18 | {% endif %}
19 | {% if 'masters' in group_names %}
20 | controlPlane:
21 | localAPIEndpoint:
22 | advertiseAddress: {{ ansible_facts['all_ipv4_addresses'] | intersect(k8s_cluster_ips) | join('') }}
23 | certificateKey: "{{ k8s_certificate_key }}"
24 | {% endif %}
25 |
--------------------------------------------------------------------------------
/setup/roles/k8s/templates/postgres/postgres-operator.yaml.j2:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: postgres-operator
5 | namespace: {{ k8s_postgresql_operator_namespace }}
6 | labels:
7 | application: postgres-operator
8 | spec:
9 | replicas: 1
10 | strategy:
11 | type: "Recreate"
12 | selector:
13 | matchLabels:
14 | name: postgres-operator
15 | template:
16 | metadata:
17 | labels:
18 | name: postgres-operator
19 | spec:
20 | serviceAccountName: postgres-operator
21 | containers:
22 | - name: postgres-operator
23 | image: registry.opensource.zalan.do/acid/postgres-operator:v1.7.1
24 | imagePullPolicy: IfNotPresent
25 | resources:
26 | requests:
27 | cpu: 100m
28 | memory: 250Mi
29 | limits:
30 | cpu: 500m
31 | memory: 500Mi
32 | securityContext:
33 | runAsUser: 1000
34 | runAsNonRoot: true
35 | readOnlyRootFilesystem: true
36 | allowPrivilegeEscalation: false
37 | env:
38 | - name: POSTGRES_OPERATOR_CONFIGURATION_OBJECT
39 | value: postgresql-operator-default-configuration
40 |
--------------------------------------------------------------------------------
/setup/roles/keepalived/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | keepalived_router_id: 51
--------------------------------------------------------------------------------
/setup/roles/keepalived/handlers/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: restart keepalived
3 | ansible.builtin.systemd:
4 | name: keepalived
5 | state: restarted
6 |
--------------------------------------------------------------------------------
/setup/roles/keepalived/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: install
3 | ansible.builtin.apt:
4 | name: keepalived
5 | update_cache: yes
6 |
7 | - name: configure
8 | ansible.builtin.template:
9 | src: keepalived.conf.j2
10 | dest: /etc/keepalived/keepalived.conf
11 | mode: '0644'
12 | notify: restart keepalived
13 |
14 | - name: service
15 | ansible.builtin.systemd:
16 | name: keepalived
17 | state: started
18 | enabled: yes
19 |
20 | - meta: flush_handlers
21 |
--------------------------------------------------------------------------------
/setup/roles/keepalived/templates/keepalived.conf.j2:
--------------------------------------------------------------------------------
1 | {% for ifacename in ansible_interfaces %}
2 | {% set ifacevar = 'ansible_' + ifacename %}
3 | {% if 'ipv4' in hostvars[inventory_hostname][ifacevar] %}
4 | {% set addr = hostvars[inventory_hostname][ifacevar]['ipv4']['address'] %}
5 | {% for host in keepalived_hosts %}
6 | {% if addr == host['ip'] %}
7 | vrrp_instance VI_1 {
8 | state MASTER
9 | interface {{ ifacename }}
10 | virtual_router_id {{ keepalived_router_id }}
11 | priority {{ host['priority'] }}
12 | advert_int 1
13 | virtual_ipaddress {
14 | {{ keepalived_vip }}
15 | }
16 | unicast_src_ip {{ addr }}
17 | unicast_peer {
18 | {% for ip in keepalived_hosts %}
19 | {% if ip['ip'] != addr %}
20 | {{ ip['ip'] }}
21 | {% endif %}
22 | {% endfor %}
23 | }
24 | }
25 | {% endif %}
26 | {% endfor %}
27 | {% endif %}
28 | {% endfor %}
29 |
--------------------------------------------------------------------------------
/setup/roles/test/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | test_options: "-r --filter-tags {{ ansible_hostname }}"
3 | test_path: /opt/tests
--------------------------------------------------------------------------------
/setup/roles/test/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: install retry
3 | ansible.builtin.apt:
4 | name:
5 | - retry
6 | update_cache: yes
7 |
8 | - name: install node
9 | ansible.builtin.apt:
10 | name:
11 | - nodejs
12 | - npm
13 |
14 | - name: install bats
15 | ansible.builtin.shell: >
16 | npm install -g
17 | bats
18 | bats-assert
19 | bats-support@git+https://github.com/ztombol/bats-support.git#v0.2.0
20 |
21 | - name: run tests
22 | ansible.builtin.shell: bats {{ test_options }} {{ test_path }}
23 |
--------------------------------------------------------------------------------
/setup/roles/todo/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | todo_kubeconfig: /etc/kubernetes/admin.conf
3 | todo_version: 1.0.2
4 | todo_replicas: 3
5 | todo_prefix: /todo
6 | todo_namespace: todo
7 | todo_postgres_operator: false
8 | todo_ingress: true
9 | todo_stage: false
10 | todo_scaler: false
11 |
--------------------------------------------------------------------------------
/setup/roles/todo/templates/database-deploy.yaml.j2:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: todo-db
5 | labels:
6 | app: todo-db
7 | spec:
8 | replicas: 1
9 | selector:
10 | matchLabels:
11 | app: todo-db
12 | template:
13 | metadata:
14 | labels:
15 | app: todo-db
16 | spec:
17 | containers:
18 | - name: todo-db
19 | image: postgres:14
20 | envFrom:
21 | - secretRef:
22 | name: todo-db
23 | env:
24 | - name: PGDATA
25 | value: "/data/pgdata"
26 | volumeMounts:
27 | - mountPath: /data
28 | name: todo-data
29 | volumes:
30 | - name: todo-data
31 | persistentVolumeClaim:
32 | claimName: todo-data
--------------------------------------------------------------------------------
/setup/roles/todo/templates/database-secret.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | kind: Secret
3 | apiVersion: v1
4 | metadata:
5 | name: todo-db
6 | stringData:
7 | POSTGRES_USER: todo
8 | POSTGRES_PASSWORD: todopw
9 |
--------------------------------------------------------------------------------
/setup/roles/todo/templates/database-service.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | kind: Service
3 | apiVersion: v1
4 | metadata:
5 | name: todo-db
6 | spec:
7 | selector:
8 | app: todo-db
9 | ports:
10 | - protocol: TCP
11 | port: 5432
--------------------------------------------------------------------------------
/setup/roles/todo/templates/database.yaml.j2:
--------------------------------------------------------------------------------
1 | apiVersion: "acid.zalan.do/v1"
2 | kind: postgresql
3 | metadata:
4 | name: todo-db
5 | spec:
6 | teamId: todo
7 | volume:
8 | size: 1Gi
9 | storageClass: longhorn
10 | numberOfInstances: 3
11 | users:
12 | todo:
13 | - superuser
14 | - createdb
15 | databases:
16 | todo: todo
17 | postgresql:
18 | version: "14"
19 |
--------------------------------------------------------------------------------
/setup/roles/todo/templates/ingress.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: networking.k8s.io/v1
3 | kind: Ingress
4 | metadata:
5 | name: todo-ingress
6 | spec:
7 | rules:
8 | - http:
9 | paths:
10 | - path: /todo
11 | pathType: Prefix
12 | backend:
13 | service:
14 | name: todo
15 | port:
16 | number: 5000
17 |
--------------------------------------------------------------------------------
/setup/roles/todo/templates/pvc.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | kind: PersistentVolumeClaim
3 | apiVersion: v1
4 | metadata:
5 | name: todo-data
6 | spec:
7 | {% if todo_storageclass is defined %}
8 | storageClassName: {{ todo_storageclass }}
9 | {% endif %}
10 | accessModes:
11 | - ReadWriteOnce
12 | resources:
13 | requests:
14 | storage: 1Gi
--------------------------------------------------------------------------------
/setup/roles/todo/templates/scaler.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: autoscaling/v2
3 | kind: HorizontalPodAutoscaler
4 | metadata:
5 | name: todo
6 | labels:
7 | app: todo
8 | spec:
9 | scaleTargetRef:
10 | apiVersion: apps/v1
11 | kind: Deployment
12 | name: todo
13 | minReplicas: 3
14 | maxReplicas: 10
15 | metrics:
16 | - type: Resource
17 | resource:
18 | name: cpu
19 | target:
20 | type: Utilization
21 | averageUtilization: 50
22 |
--------------------------------------------------------------------------------
/setup/roles/todo/templates/service.yaml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | kind: Service
3 | apiVersion: v1
4 | metadata:
5 | name: todo
6 | labels:
7 | app: todo
8 | spec:
9 | {% if not todo_ingress %}
10 | type: NodePort
11 | {% endif %}
12 | selector:
13 | app: todo
14 | ports:
15 | - name: web
16 | protocol: TCP
17 | port: 5000
18 | {% if todo_ingress %}
19 | targetPort: 5000
20 | {% else %}
21 | nodePort: 5000
22 | {% endif %}
23 |
--------------------------------------------------------------------------------
/setup/roles/tools/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: install
3 | apt:
4 | name:
5 | - jq
6 | - tmux
7 | - unzip
8 | - ca-certificates
9 | state: latest
10 | update_cache: yes
11 |
--------------------------------------------------------------------------------