├── .vscode
└── settings.json
├── 00-Intro.md
├── 01-PKI.md
├── 02-Build-Binaries.md
├── 03-Prepare-Config.md
├── 04-Etcd.md
├── 05-Control-Plane.md
├── 06-Worker-Nodes.md
├── 07-Remote-Access.md
├── 08-Network-Routes.md
├── 09-Core-DNS.md
├── 10-Smoke-Test.md
├── 11-References.md
├── 12-Ingress-Controller.md
├── 13-TLS-Termination.md
├── 14-Metrics-Server.md
├── LICENSE
├── README.md
├── img
├── kubernetes-running.png
└── rasbperrypi-clusterhat.jpg
└── pki_scripts
├── ca-config.json
├── cert_data.json
└── pki.sh
/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "cSpell.words": [
3 | "ALPN",
4 | "Apath",
5 | "CNAT",
6 | "Cgroups",
7 | "Corefile",
8 | "Flare's",
9 | "GOARCH",
10 | "GOARM",
11 | "GOFLAGS",
12 | "GOOS",
13 | "Hightower",
14 | "Kubelet",
15 | "Kubenetes",
16 | "LDFLAGS",
17 | "Masq",
18 | "NOFILE",
19 | "NPROC",
20 | "Tahoma",
21 | "WORKDIR",
22 | "Werror",
23 | "abelperezok",
24 | "aescbc",
25 | "algo",
26 | "apiextensions",
27 | "apiserver",
28 | "armel",
29 | "armhf",
30 | "armv",
31 | "autoregister",
32 | "autoregistration",
33 | "autoupdate",
34 | "binfmt",
35 | "btrfs",
36 | "busybox",
37 | "cacert",
38 | "cafile",
39 | "cert's",
40 | "certfile",
41 | "cfssl",
42 | "cfssljson",
43 | "cgroup",
44 | "chmod",
45 | "clusterctrl",
46 | "clusterrole",
47 | "clusterrolebinding",
48 | "cnio",
49 | "componentstatuses",
50 | "configmap",
51 | "configmaps",
52 | "conntrack",
53 | "containerd",
54 | "coredns",
55 | "cpuacct",
56 | "cpuset",
57 | "crictl",
58 | "customresourcedefinition",
59 | "daemonset",
60 | "dnsutils",
61 | "dphys",
62 | "entrypoint",
63 | "envsubst",
64 | "etcdctl",
65 | "etes",
66 | "extldflags",
67 | "flowcontrol",
68 | "gencert",
69 | "globalconfiguration",
70 | "globalconfigurations",
71 | "haproxy",
72 | "healthz",
73 | "hexdump",
74 | "hostnamectl",
75 | "ingressclass",
76 | "initca",
77 | "installsuffix",
78 | "ipam",
79 | "keyfile",
80 | "keyout",
81 | "kube",
82 | "kubeconfig",
83 | "kubectl",
84 | "kubeproxy",
85 | "kubern",
86 | "kubescheduler",
87 | "libbtrfs",
88 | "libcap",
89 | "libgcc",
90 | "libseccomp",
91 | "linkmode",
92 | "loadbalance",
93 | "maxage",
94 | "maxbackup",
95 | "maxsize",
96 | "mkdir",
97 | "multiuse",
98 | "myapp",
99 | "mydata",
100 | "mykey",
101 | "netdev",
102 | "netfilter",
103 | "netmask",
104 | "newkey",
105 | "nginxinc",
106 | "nslookup",
107 | "overlayfs",
108 | "poststarthook",
109 | "proc",
110 | "progs",
111 | "protobuf",
112 | "proxier",
113 | "qemu",
114 | "rasbperrypi",
115 | "rbac",
116 | "readyz",
117 | "recvtty",
118 | "requestheader",
119 | "resolv",
120 | "rhkf",
121 | "roundrobin",
122 | "rsync",
123 | "runc",
124 | "rwxr",
125 | "sbin",
126 | "serviceaccount",
127 | "setcap",
128 | "snapshotter",
129 | "socat",
130 | "ssid",
131 | "sslforfree",
132 | "subj",
133 | "swapfile",
134 | "swapoff",
135 | "tcplog",
136 | "titilambert",
137 | "tmnnr",
138 | "transportserver",
139 | "transportservers",
140 | "urandom",
141 | "virtualserver",
142 | "virtualserverroutes",
143 | "virtualservers"
144 | ]
145 | }
--------------------------------------------------------------------------------
/00-Intro.md:
--------------------------------------------------------------------------------
1 | # Introduction - Setting Up the Cluster
2 | This is the second attempt after some time when I first tried to set up such a cluster and I started learning [Kubernetes](https://kubernetes.io/). One of the reasons I wanted to do this is because I have two [Raspberry Pi Zero](https://www.raspberrypi.org/products/raspberry-pi-zero/) (and [Zero W](https://www.raspberrypi.org/products/raspberry-pi-zero-w/)) lying around and a new (at the time) [Raspberry Pi 3](https://www.raspberrypi.org/products/raspberry-pi-3-model-b/). After experimenting for a bit with them individually, I decided to put them to work together.
3 |
4 | Some research brought my attention to [ClusterHAT](https://clusterhat.com/) which simplified all the messing around with USB gadgets to make the Pi Zeros believe they’re connected to a network using USB. Having tested it for a while, I decided to give it a go and install a Kubernetes cluster.
5 | ## Hardware
6 |
7 | The specific hardware I used for this exercise is:
8 |
9 | * Raspberry Pi 3
10 | * Raspberry Pi Zero
11 | * Raspberry Pi Zero W
12 | * Cluster HAT v2.3
13 | * 3 x micro SD cards (16 GB for master and 8 GB for workers)
14 |
15 | For instruction on how to set up the hardware, see [their website](https://clusterctrl.com/setup-assembly).
16 |
17 | ## Operating System
18 |
19 | Download the images for each Pi: controller CNAT and each pi (p1 .. p4) in this case I only used p1 and p2 as I only have two pi zeros. As of this writing, these are the files available to download from [clusterctrl downloads](https://clusterctrl.com/setup-software) I chose the lite version (no Desktop or GUI) and CNAT to use the internal NAT network.
20 |
21 | * [Controller] 2020-08-20-8-ClusterCTRL-armhf-lite-CNAT.img
22 | * [Worker 1] 2020-08-20-8-ClusterCTRL-armhf-lite-p1.img
23 | * [Worker 2] 2020-08-20-8-ClusterCTRL-armhf-lite-p2.img
24 |
25 |
26 |
27 | ## Preparing the Controller
28 |
29 | If you're using Wi-Fi, it needs setting up before booting to make it easy to connect totally headless.
30 |
31 | ### Setting Up Wi-Fi on the Controller
32 |
33 | Mount the microSD card and in /boot partition modify the file `/boot/wpa_supplicant.conf`
34 |
35 | ```
36 | ctrl_interface=DIR=/var/run/wpa_supplicant GROUP=netdev
37 | update_config=1
38 | country=GB
39 |
40 | network={
41 | ssid="XYZ"
42 | psk="abcdef.zyx.1234"
43 | }
44 | ```
45 |
46 | Create an empty file to allow SSH service to start with the system.
47 |
48 | ```shell
49 | touch /boot/ssh
50 | ```
51 |
52 |
53 | ### Understand the Networking Model
54 |
55 |
56 | Host name | External IP | Internal IP | Role |
57 | ---------------|---------------|----------------|--------|
58 | rpi-k8s-master | 192.168.1.164 | 172.19.181.254 | master |
59 | p1 | NAT | 172.19.181.1 | worker |
60 | p2 | NAT | 172.19.181.2 | worker |
61 |
62 |
63 |
64 | Diagram
65 |
66 | ### Upgrade the System
67 |
68 | It's always a good start with a fresh up to date system. Something particular of Raspbian is that you should use `full-upgrade` instead of just `upgrade` as it could be the case that it doesn't download all the dependencies of the new packages, kind of weird.
69 |
70 | ```shell
71 | $ sudo apt update
72 | $ sudo apt full-upgrade
73 | ```
74 |
75 | ### Change the hostname
76 |
77 | In my case, I wanted to identify the master node from the rest, I updated to `rpi-k8s-master`
78 |
79 | ```shell
80 | $ sudo hostnamectl set-hostname rpi-k8s-master
81 | ```
82 |
83 | ### Set Up and Verify Connectivity
84 |
85 | Once all the base hardware is up and running, it'll be much easier if ssh config file is configured to connect to the pi zeros.
86 |
87 | Create a config file if it's not already there.
88 |
89 | ```shell
90 | vi ~/.ssh/config
91 | ```
92 |
93 | ```
94 | Host *
95 | ServerAliveInterval 180
96 | ServerAliveCountMax 2
97 | IdentitiesOnly=yes
98 | IdentityFile ~/.ssh/local_rsa
99 |
100 | Host p1
101 | Hostname 172.19.181.1
102 | User pi
103 | Host p2
104 | Hostname 172.19.181.2
105 | User pi
106 | ```
107 |
108 | Add the pi zeros IPs to the local hosts file.
109 |
110 | ```shell
111 | $ cat | sudo tee -a /etc/hosts << HERE
112 | 172.19.181.1 p1
113 | 172.19.181.2 p2
114 | HERE
115 | ```
116 |
117 | ### Generate SSH Keys and Copy to the Pi Zeros
118 |
119 | ```shell
120 | ssh-keygen -t rsa -b 4096 -N "" -f ~/.ssh/local_rsa -C "local key ClusterHAT"
121 |
122 | ssh-copy-id -i .ssh/local_rsa.pub p1
123 |
124 | ssh-copy-id -i .ssh/local_rsa.pub p2
125 | ```
126 | ### Install the Client Tools
127 |
128 | The Client Tools (on the Pi 3) this can be installed optionally on your local environment as well.
129 |
130 | Get the CloudFlare's PKI toolkit
131 |
132 | ```shell
133 | sudo apt install golang-cfssl
134 | ```
135 |
136 | Verify it’s working
137 | ```shell
138 | $ cfssl version
139 | Version: 1.2.0
140 | Revision: dev
141 | Runtime: go1.8.3
142 | ```
143 |
144 | Install tmux
145 |
146 | ```shell
147 | sudo apt install tmux
148 | ```
149 |
150 | ### Working Directories
151 |
152 | The following directories are going to be used to store the files produced by the command outputs during this process.
153 |
154 | ```
155 | ~/pki
156 | ~/certs
157 | ~/config
158 | ~/bin
159 | ~/plugins
160 | ```
161 |
162 | Create the directories, positioned in $HOME directory.
163 |
164 | ```shell
165 | mkdir pki certs config bin plugins
166 | ```
167 |
168 | ### Enable Cgroups Memory
169 | On each worker node append `cgroup_enable=memory cgroup_memory=1` to */boot/cmdline.txt*. This needs to be run as root.
170 | ```
171 | sudo su
172 | echo -n ' cgroup_enable=memory cgroup_memory=1' | tee -a /boot/cmdline.txt
173 | ```
174 |
175 | Then restart the node:
176 | ```
177 | sudo shutdown -r 0
178 | ```
179 |
180 | Cgroups memory needs to be turned on, or in step 6 [Test Worker Nodes](https://github.com/abelperezok/kubernetes-raspberry-pi-cluster-hat/blob/master/06-Worker-Nodes.md#test-worker-nodes) your node status may all come up as `NotReady`.
181 |
182 | After running step 6 on master:
183 |
184 | ```
185 | kubectl get nodes --kubeconfig config/admin.kubeconfig
186 | ```
187 |
188 | the statuses were all `NotReady`. Running
189 |
190 | ```
191 | journalctl -fu kubelet
192 | ```
193 | On *p1*, showed an error:
194 |
195 | > Failed to start ContainerManager system validation failed - Following Cgroup subsystem not mounted: [memory]
196 |
197 | Turns out that the memory cgroup is disabled by default since it adds some [additional memory overhead](https://github.com/raspberrypi/linux/issues/1950).
198 |
--------------------------------------------------------------------------------
/01-PKI.md:
--------------------------------------------------------------------------------
1 | # Provision PKI infrastructure
2 |
3 | This step is pretty much the same as the original guide except for a few tweaks:
4 |
5 | * Internal and external IPs are different from the proposed computing resources.
6 | * Hostnames are different from the proposed scheme.
7 | * Only one master node in this setup, therefore only one IP to look after for etcd.
8 | * I've updated the TLS fields to my location in Manchester, England.
9 |
10 | All commands will be run in `~/pki` directory.
11 |
12 | ## Certificate Authority
13 |
14 | ```shell
15 | cat > ca-config.json < ca-csr.json < admin-csr.json < p${instance}-csr.json < kube-controller-manager-csr.json < kube-proxy-csr.json < kube-scheduler-csr.json < kubernetes-csr.json < service-account-csr.json < **Note** - kubectl can be reused for worker nodes as well, it worked for me.
43 |
44 | ## Build Kubernetes Binaries for Worker Nodes
45 |
46 | Before building kubelet, I found numerous issues with a missing cgroup (cpuset) in the raspberry pi zero. I’m not entirely sure why this is a requirement and I removed it from the code. I published my findings in the [raspberry pi forum](https://www.raspberrypi.org/forums/viewtopic.php?f=66&t=219644#p1348691).
47 |
48 | To avoid having those issues I removed the validation as follows:
49 |
50 | ```
51 | File: pkg/kubelet/cm/container_manager_linux.go
52 | - expectedCgroups := sets.NewString("cpu", "cpuacct", "cpuset", "memory")
53 | + expectedCgroups := sets.NewString("cpu", "cpuacct", "memory")
54 | ```
55 |
56 | ```shell
57 | docker run --rm -it -v "$PWD":/usr/src/myapp -w /usr/src/myapp arm32v5/golang:1.15.5-buster bash
58 |
59 | root@e3b475b2f53e:/usr/src/myapp# export GOOS="linux"
60 | root@e3b475b2f53e:/usr/src/myapp# export GOARCH="arm"
61 | root@e3b475b2f53e:/usr/src/myapp# export CGO_ENABLED=0
62 | root@e3b475b2f53e:/usr/src/myapp# export GOARM=6
63 | root@e3b475b2f53e:/usr/src/myapp# export GOFLAGS=""
64 | root@e3b475b2f53e:/usr/src/myapp# export LDFLAGS='-d -s -w -linkmode=external -extldflags="-static" -static -static-libgcc'
65 |
66 | root@e3b475b2f53e:/usr/src/myapp# apt update
67 | root@e3b475b2f53e:/usr/src/myapp# apt install rsync
68 | root@e3b475b2f53e:/usr/src/myapp# make WHAT=cmd/kubelet
69 | root@e3b475b2f53e:/usr/src/myapp# make WHAT=cmd/kube-proxy
70 | ```
71 |
72 | Copy the binaries to master to then transfer to worker nodes
73 |
74 | ```shell
75 | scp _output/local/bin/linux/arm/kubelet pi@rpi-k8s-master.local:~/bin
76 | scp _output/local/bin/linux/arm/kube-proxy pi@rpi-k8s-master.local:~/bin
77 | ```
78 |
79 | ## Build Container Networking Plugins
80 |
81 | We are not going to use all the networking plugins, but to make our life easier, build them all and distribute them to the nodes anyway.
82 |
83 | ```shell
84 | git clone https://github.com/containernetworking/plugins.git
85 | git checkout v0.8.7
86 | sed -i 's/\$GO\ build -o \"\${PWD}\/bin\/\$plugin\" \"\$\@\"/\$GO\ build -o \"\$\{PWD\}\/bin\/\$plugin\" \"\$\@\"\ \-ldflags\=\"\-d\ \-s\ \-w\"/' build_linux.sh
87 |
88 | docker run --rm -it -v "$PWD":/usr/src/myapp -w /usr/src/myapp arm32v5/golang:1.15.5-buster bash
89 |
90 | root@042cbfe7a200:/usr/src/myapp# ./build_linux.sh
91 |
92 | scp bin/* pi@rpi-k8s-master.local:~/plugins/
93 | ```
94 |
95 | ## Build Runc
96 |
97 | `runc` is the actual low level container runtime to launches the child processes.
98 |
99 | ```shell
100 | git clone https://github.com/opencontainers/runc.git
101 | cd runc
102 | git checkout v1.0.0-rc92
103 |
104 | docker run --rm -it -v "$PWD":/usr/src/myapp -w /usr/src/myapp arm32v5/golang:1.15.5-buster bash
105 |
106 | root@cda70ed7dfb3:/usr/src/myapp# apt update
107 | root@cda70ed7dfb3:/usr/src/myapp# apt install libseccomp-dev
108 | root@cda70ed7dfb3:/usr/src/myapp# make static
109 | root@cda70ed7dfb3:/usr/src/myapp# ./runc --version
110 | runc version 1.0.0-rc92
111 | commit: ff819c7e9184c13b7c2607fe6c30ae19403a7aff
112 | spec: 1.0.2-dev
113 | root@cda70ed7dfb3:/usr/src/myapp# ./contrib/cmd/recvtty/recvtty --version
114 | recvtty version 1.0.0-rc92
115 | commit: ff819c7e9184c13b7c2607fe6c30ae19403a7aff
116 |
117 | scp ./runc ./contrib/cmd/recvtty/recvtty pi@rpi-k8s-master.local:~/bin/
118 | ```
119 |
120 | ## Build Containerd
121 |
122 | `containerd` is the daemon that controls containers at runtime.
123 |
124 | ```shell
125 | git clone https://github.com/containerd/containerd.git
126 | cd containerd
127 | git checkout release/1.4
128 |
129 | docker run --rm -it -v "$PWD":/go/src/github.com/containerd/containerd -w /go/src/github.com/containerd/containerd arm32v5/golang:1.15.5-buster bash
130 |
131 | root@809ff752ed2d:/usr/src/myapp# apt update
132 | root@809ff752ed2d:/usr/src/myapp# apt install btrfs-progs libbtrfs-dev
133 | root@809ff752ed2d:/usr/src/myapp# apt install protobuf-compiler
134 | root@809ff752ed2d:/usr/src/myapp# make
135 |
136 | scp bin/* pi@rpi-k8s-master.local:~/bin/
137 | ```
138 |
139 | ## Build CRI-tools
140 |
141 | `cri-tools` provides a communication platform with the current `CRI compliant` container runtime.
142 |
143 | ```shell
144 | git clone https://github.com/kubernetes-sigs/cri-tools.git
145 | cd cri-tools
146 | git checkout v1.18.0
147 |
148 | docker run --rm -it -v "$PWD":/usr/src/myapp -w /usr/src/myapp arm32v7/golang:1.15.5-buster bash
149 |
150 | root@da0beb1cf478:/usr/src/myapp# make
151 | root@da0beb1cf478:/usr/src/myapp# _output/crictl --version
152 | crictl version 1.18.0
153 |
154 | scp _output/* pi@rpi-k8s-master.local:~/bin/
155 | ```
156 |
157 | ## Build Etcd
158 |
159 | `etcd` is the main database that maintains the cluster state.
160 |
161 | ### Compile the binaries
162 |
163 | ```shell
164 | git clone https://github.com/etcd-io/etcd.git
165 | cd etcd
166 | git checkout release-3.4
167 |
168 | docker run --rm -it -v "$PWD":/usr/src/myapp -w /usr/src/myapp arm32v7/golang:1.15.5-buster bash
169 |
170 | root@3a6d3a16f556:/usr/src/myapp# ./build
171 | root@3a6d3a16f556:/usr/src/myapp# ls -l bin/
172 | total 42624
173 | -rwxr-xr-x 1 root root 24999558 Nov 26 11:22 etcd
174 | -rwxr-xr-x 1 root root 18641042 Nov 26 11:24 etcdctl
175 | ```
176 |
177 | ### Transfer the binaries to the master
178 |
179 | ```shell
180 | scp ./bin/* pi@rpi-k8s-master.local:~/bin
181 | ```
182 |
183 | ### Test the binaries in the master node
184 |
185 | The environment variable ETCD_UNSUPPORTED_ARCH=arm needs setting, otherwise we get an explicit error message. Inside master node $HOME directory.
186 |
187 | ```shell
188 | $ ETCD_UNSUPPORTED_ARCH=arm bin/etcd --version
189 | running etcd on unsupported architecture "arm" since ETCD_UNSUPPORTED_ARCH is set
190 | etcd Version: 3.4.13
191 | Git SHA: eb0fb0e79
192 | Go Version: go1.15.5
193 | Go OS/Arch: linux/arm
194 |
195 | $ bin/etcdctl version
196 | etcdctl version: 3.4.13
197 | API version: 3.4
198 | ```
199 |
200 | ## Build Pause
201 |
202 | This step is more for the sake of completeness of building everything from the source, in case you want to create your own version of the `pause` container which is a fundamental pillar of the pod structure.
203 |
204 | ```shell
205 | docker run --rm -it -v "$PWD":/usr/src/myapp -w /usr/src/myapp arm32v5/golang:1.15.5-buster bash
206 |
207 | root@637ae3b798bc:/usr/src/myapp# cd build/pause/
208 | root@637ae3b798bc:/usr/src/myapp/build/pause# gcc -Os -Wall -Werror -static -s -g -fPIE -fPIC -o pause pause.c
209 | ```
210 |
211 | ```shell
212 | scp build/pause/pause pi@rpi-k8s-master.local:~/bin
213 | ```
--------------------------------------------------------------------------------
/03-Prepare-Config.md:
--------------------------------------------------------------------------------
1 | # Generating Kubernetes Configuration
2 |
3 | This part doesn't drift too much apart from the original guide. One of the changes is that I enforce the embedding of the certificates in the configuration file, as I'd like to be able to move those files around without worrying about the certificate reference, this could be a trade-off.
4 |
5 | Still inside the `pki` directory, common to next steps.
6 |
7 | For this step we need kubectl to be in the $PATH, move it to `/usr/local/bin` if it’s not already in a $PATH location. I found that by default `~/bin` is in the $PATH, however for the sake of consistency I always move it to `/usr/local/bin`.
8 |
9 | ```shell
10 | sudo mv ~/bin/kubectl /usr/local/bin/
11 | ```
12 | ```shell
13 | KUBERNETES_PUBLIC_ADDRESS=192.168.1.164
14 | ```
15 |
16 | ## The `kubelet` Kubernetes Configuration File
17 |
18 | ```shell
19 | for instance in p1 p2; do
20 | kubectl config set-cluster kubernetes-the-hard-way \
21 | --certificate-authority=ca.pem \
22 | --embed-certs=true \
23 | --server=https://${KUBERNETES_PUBLIC_ADDRESS}:6443 \
24 | --kubeconfig=${instance}.kubeconfig
25 |
26 | kubectl config set-credentials system:node:${instance} \
27 | --client-certificate=${instance}.pem \
28 | --client-key=${instance}-key.pem \
29 | --embed-certs=true \
30 | --kubeconfig=${instance}.kubeconfig
31 |
32 | kubectl config set-context default \
33 | --cluster=kubernetes-the-hard-way \
34 | --user=system:node:${instance} \
35 | --kubeconfig=${instance}.kubeconfig
36 |
37 | kubectl config use-context default --kubeconfig=${instance}.kubeconfig
38 | done
39 | ```
40 |
41 | ## The `kube-proxy` Kubernetes Configuration File
42 |
43 | ```shell
44 | kubectl config set-cluster kubernetes-the-hard-way \
45 | --certificate-authority=ca.pem \
46 | --embed-certs=true \
47 | --server=https://${KUBERNETES_PUBLIC_ADDRESS}:6443 \
48 | --kubeconfig=kube-proxy.kubeconfig
49 |
50 | kubectl config set-credentials system:kube-proxy \
51 | --client-certificate=kube-proxy.pem \
52 | --client-key=kube-proxy-key.pem \
53 | --embed-certs=true \
54 | --kubeconfig=kube-proxy.kubeconfig
55 |
56 | kubectl config set-context default \
57 | --cluster=kubernetes-the-hard-way \
58 | --user=system:kube-proxy \
59 | --kubeconfig=kube-proxy.kubeconfig
60 |
61 | kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
62 | ```
63 |
64 | ## The `kube-controller-manager` Kubernetes Configuration File
65 |
66 | ```shell
67 | kubectl config set-cluster kubernetes-the-hard-way \
68 | --certificate-authority=ca.pem \
69 | --embed-certs=true \
70 | --server=https://127.0.0.1:6443 \
71 | --kubeconfig=kube-controller-manager.kubeconfig
72 |
73 | kubectl config set-credentials system:kube-controller-manager \
74 | --client-certificate=kube-controller-manager.pem \
75 | --client-key=kube-controller-manager-key.pem \
76 | --embed-certs=true \
77 | --kubeconfig=kube-controller-manager.kubeconfig
78 |
79 | kubectl config set-context default \
80 | --cluster=kubernetes-the-hard-way \
81 | --user=system:kube-controller-manager \
82 | --kubeconfig=kube-controller-manager.kubeconfig
83 |
84 | kubectl config use-context default --kubeconfig=kube-controller-manager.kubeconfig
85 | ```
86 |
87 | ## The `kube-scheduler` Kubernetes Configuration File
88 |
89 | ```shell
90 | kubectl config set-cluster kubernetes-the-hard-way \
91 | --certificate-authority=ca.pem \
92 | --embed-certs=true \
93 | --server=https://127.0.0.1:6443 \
94 | --kubeconfig=kube-scheduler.kubeconfig
95 |
96 | kubectl config set-credentials system:kube-scheduler \
97 | --client-certificate=kube-scheduler.pem \
98 | --client-key=kube-scheduler-key.pem \
99 | --embed-certs=true \
100 | --kubeconfig=kube-scheduler.kubeconfig
101 |
102 | kubectl config set-context default \
103 | --cluster=kubernetes-the-hard-way \
104 | --user=system:kube-scheduler \
105 | --kubeconfig=kube-scheduler.kubeconfig
106 |
107 | kubectl config use-context default --kubeconfig=kube-scheduler.kubeconfig
108 | ```
109 |
110 | ## The `admin` Kubernetes Configuration File
111 |
112 | ```shell
113 | kubectl config set-cluster kubernetes-the-hard-way \
114 | --certificate-authority=ca.pem \
115 | --embed-certs=true \
116 | --server=https://127.0.0.1:6443 \
117 | --kubeconfig=admin.kubeconfig
118 |
119 | kubectl config set-credentials admin \
120 | --client-certificate=admin.pem \
121 | --client-key=admin-key.pem \
122 | --embed-certs=true \
123 | --kubeconfig=admin.kubeconfig
124 |
125 | kubectl config set-context default \
126 | --cluster=kubernetes-the-hard-way \
127 | --user=admin \
128 | --kubeconfig=admin.kubeconfig
129 |
130 | kubectl config use-context default --kubeconfig=admin.kubeconfig
131 | ```
132 |
133 | ## Generating the Data Encryption Config and Key
134 |
135 | ```shell
136 | ENCRYPTION_KEY=$(head -c 32 /dev/urandom | base64)
137 |
138 | cat > encryption-config.yaml < 34h v1.18.13-rc.0.15+6d211539692cee-dirty
270 | p2 Ready 12m v1.18.13-rc.0.15+6d211539692cee-dirty
271 | ```
272 |
273 | The trailing `-dirty` is because I didn’t commit my changes to the kubelet source above, therefore the build script picked up on it and updated the version id.
274 |
--------------------------------------------------------------------------------
/07-Remote-Access.md:
--------------------------------------------------------------------------------
1 | # Configuring kubectl for Remote Access
2 |
3 | Run in master node, $HOME directory, also embed all the certificates in the config file, in case we move the temporary directory pki and easier if we want to download the configuration file.
4 |
5 | ## Prepare Configuration File
6 |
7 | ```shell
8 | KUBERNETES_PUBLIC_ADDRESS=192.168.1.164
9 | ```
10 |
11 | ```shell
12 | kubectl config set-cluster kubernetes-the-hard-way \
13 | --certificate-authority=pki/ca.pem \
14 | --embed-certs=true \
15 | --server=https://${KUBERNETES_PUBLIC_ADDRESS}:6443
16 |
17 | kubectl config set-credentials admin \
18 | --client-certificate=pki/admin.pem \
19 | --client-key=pki/admin-key.pem \
20 | --embed-certs=true
21 |
22 | kubectl config set-context kubernetes-the-hard-way \
23 | --cluster=kubernetes-the-hard-way \
24 | --user=admin
25 |
26 | kubectl config use-context kubernetes-the-hard-way
27 | ```
28 |
29 | The result is stored in `~/.kube/config` file, download the config file.
30 |
31 | ```shell
32 | scp pi@rpi-k8s-master.local:~/.kube/config /home/abel/.kube/
33 | ```
34 |
35 | ## Verification from Remote Computer
36 |
37 | ```shell
38 | $ kubectl version
39 | Client Version: version.Info{Major:"1", Minor:"19", GitVersion:"v1.19.4", GitCommit:"d360454c9bcd1634cf4cc52d1867af5491dc9c5f", GitTreeState:"clean", BuildDate:"2020-11-11T13:17:17Z", GoVersion:"go1.15.2", Compiler:"gc", Platform:"linux/amd64"}
40 | Server Version: version.Info{Major:"1", Minor:"18+", GitVersion:"v1.18.13-rc.0.15+6d211539692cee", GitCommit:"6d211539692cee9ca82d8e1a6831f7e51e66558d", GitTreeState:"clean", BuildDate:"2020-11-23T19:28:31Z", GoVersion:"go1.15.5", Compiler:"gc", Platform:"linux/arm"}
41 | ```
42 |
43 | ```shell
44 | $ kubectl get componentstatuses
45 | NAME STATUS MESSAGE ERROR
46 | controller-manager Healthy ok
47 | scheduler Healthy ok
48 | etcd-0 Healthy {"health":"true"}
49 | ```
50 |
51 | ```shell
52 | $ kubectl get nodes
53 | NAME STATUS ROLES AGE VERSION
54 | p1 Ready 36h v1.18.13-rc.0.15+6d211539692cee-dirty
55 | p2 Ready 114m v1.18.13-rc.0.15+6d211539692cee-dirty
56 | ```
57 |
58 | ```shell
59 | $ kubectl get --raw='/readyz?verbose'
60 | [+]ping ok
61 | [+]log ok
62 | [+]etcd ok
63 | [+]poststarthook/start-kube-apiserver-admission-initializer ok
64 | [+]poststarthook/generic-apiserver-start-informers ok
65 | [+]poststarthook/start-apiextensions-informers ok
66 | [+]poststarthook/start-apiextensions-controllers ok
67 | [+]poststarthook/crd-informer-synced ok
68 | [+]poststarthook/bootstrap-controller ok
69 | [+]poststarthook/rbac/bootstrap-roles ok
70 | [+]poststarthook/scheduling/bootstrap-system-priority-classes ok
71 | [+]poststarthook/apiserver/bootstrap-system-flowcontrol-configuration ok
72 | [+]poststarthook/start-cluster-authentication-info-controller ok
73 | [+]poststarthook/start-kube-aggregator-informers ok
74 | [+]poststarthook/apiservice-registration-controller ok
75 | [+]poststarthook/apiservice-status-available-controller ok
76 | [+]poststarthook/kube-apiserver-autoregistration ok
77 | [+]autoregister-completion ok
78 | [+]poststarthook/apiservice-openapi-controller ok
79 | [+]shutdown ok
80 | healthz check passed
81 | ```
82 |
83 | ## Test Pod Creation
84 |
85 | From the remote computer or master node.
86 |
87 | ```shell
88 | cat < p1
108 | nginx-pod 1/1 Running 0 23s 10.200.0.54 p1
109 | ```
110 |
111 | Some output omitted for brevity. The important thing are that it was scheduled for Node `p1` and the pod IP is `10.200.0.54`. On node `p1` run:
112 |
113 | ```shell
114 | pi@p1:~ $ curl http://10.200.0.54/
115 |
116 |
117 |
118 | Welcome to nginx!
119 |
122 |
123 |
124 | Welcome to nginx!
125 | If you see this page, the nginx web server is successfully installed and working. Further configuration is required.
126 | nginx.com.
127 | Thank you for using nginx.
128 |
129 |
130 | ```
131 |
132 | Success !
133 |
--------------------------------------------------------------------------------
/08-Network-Routes.md:
--------------------------------------------------------------------------------
1 | # Provisioning Pod Network Routes
2 |
3 | In two terminals, run two pods using `arm32v5/busybox` and execute a shell in each of them. Run the following commands:
4 |
5 | ```shell
6 | kubectl run shell1 --rm -it --image arm32v5/busybox -- sh
7 | ```
8 | ```shell
9 | kubectl run shell2 --rm -it --image arm32v5/busybox -- sh
10 | ```
11 |
12 | Verify they’re scheduled in two different nodes. If not, create more pods until they are scheduled in two different nodes.
13 |
14 | ```shell
15 | kubectl get pods -o wide
16 | NAME READY STATUS RESTARTS AGE IP NODE
17 | shell1 1/1 Running 0 14m 10.200.0.59 p1
18 | shell2 1/1 Running 0 14m 10.200.1.30 p2
19 | ```
20 |
21 | ## Test without Routes
22 |
23 | ```shell
24 | kubectl run shell1 --rm -it --image arm32v5/busybox -- sh
25 | If you don't see a command prompt, try pressing enter.
26 | / # hostname -i
27 | 10.200.0.59
28 | / # ping 10.200.1.30
29 | PING 10.200.1.30 (10.200.1.30): 56 data bytes
30 | ^C
31 | --- 10.200.1.30 ping statistics ---
32 | 5 packets transmitted, 0 packets received, 100% packet loss
33 | ```
34 |
35 | ```shell
36 | kubectl run shell2 --rm -it --image arm32v5/busybox -- sh
37 | If you don't see a command prompt, try pressing enter.
38 | / # hostname -i
39 | 10.200.1.30
40 | / # ping 10.200.0.59
41 | PING 10.200.0.59 (10.200.0.59): 56 data bytes
42 | ^C
43 | --- 10.200.0.59 ping statistics ---
44 | 3 packets transmitted, 0 packets received, 100% packet loss
45 | ```
46 |
47 | ## Add the Missing Routes
48 |
49 | On master node, run the following command to add the missing routes.
50 |
51 | ```shell
52 | sudo route add -net 10.200.0.0 netmask 255.255.255.0 gw 172.19.181.1
53 | sudo route add -net 10.200.1.0 netmask 255.255.255.0 gw 172.19.181.2
54 | ```
55 |
56 | ## Repeat the Test with Routes in Place
57 |
58 | ```shell
59 | kubectl run shell1 --rm -it --image arm32v5/busybox -- sh
60 | If you don't see a command prompt, try pressing enter.
61 | / # hostname -i
62 | 10.200.0.59
63 | / # ping 10.200.1.30
64 | PING 10.200.1.30 (10.200.1.30): 56 data bytes
65 | 64 bytes from 10.200.1.30: seq=0 ttl=62 time=13.536 ms
66 | 64 bytes from 10.200.1.30: seq=0 ttl=62 time=13.536 ms
67 | ^C
68 | --- 10.200.1.30 ping statistics ---
69 | 2 packets transmitted, 2 packets received, 0% packet loss
70 | ```
71 |
72 | ```shell
73 | kubectl run shell2 --rm -it --image arm32v5/busybox -- sh
74 | If you don't see a command prompt, try pressing enter.
75 | / # hostname -i
76 | 10.200.1.30
77 | / # ping 10.200.0.59
78 | PING 10.200.0.59 (10.200.0.59): 56 data bytes
79 | 64 bytes from 10.200.1.30: seq=0 ttl=62 time=13.536 ms
80 | 64 bytes from 10.200.1.30: seq=0 ttl=62 time=13.536 ms
81 | ^C
82 | --- 10.200.0.59 ping statistics ---
83 | 2 packets transmitted, 2 packets received, 0% packet loss
84 |
85 | ```
86 |
87 | Now two pods in different nodes can communicate. Success!
--------------------------------------------------------------------------------
/09-Core-DNS.md:
--------------------------------------------------------------------------------
1 | # Deploying the DNS Cluster Add-on
2 |
3 | Up to this point the cluster is fully functional except for the DNS resolution inside the pods, i.e accessing services exposed inside the cluster by pods. It also applies to DNS resolution to external hosts.
4 |
5 | ## Deploy CoreDNS
6 |
7 | This yaml file contains the deployment of [CoreDNS](https://coredns.io/) along with some other kubernetes objects to connect with the cluster, such as `Role`, `RoleBinding`, `ConfigMap`.
8 |
9 | ```shell
10 | kubectl apply -f https://storage.googleapis.com/kubernetes-the-hard-way/coredns-1.7.0.yaml
11 | serviceaccount/coredns created
12 | clusterrole.rbac.authorization.k8s.io/system:coredns created
13 | clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
14 | configmap/coredns created
15 | deployment.apps/coredns created
16 | service/kube-dns created
17 | ```
18 |
19 | Wait a few seconds and the get the coredns pods
20 |
21 | ```shell
22 | kubectl get pods -l k8s-app=kube-dns -n kube-system
23 | NAME READY STATUS RESTARTS AGE
24 | coredns-5677dc4cdb-l7qhl 1/1 Running 0 55s
25 | coredns-5677dc4cdb-tmnnr 1/1 Running 0 55s
26 | ```
27 |
28 | Edit the configuration map to include the forwarding to our external DNS.
29 |
30 | ```shell
31 | kubectl edit -n kube-system configmaps coredns
32 | ```
33 |
34 | In this case my home router has the IP address `192.168.1.254`. Add the following line after the `kubernetes` block.
35 |
36 | `forward . 192.168.1.254`
37 |
38 | Optionally you can also add `log` to help in troubleshooting.
39 |
40 | It should read
41 |
42 | ```
43 | ...
44 | Corefile: |
45 | .:53 {
46 | errors
47 | health
48 | ready
49 | kubernetes cluster.local in-addr.arpa ip6.arpa {
50 | pods insecure
51 | fallthrough in-addr.arpa ip6.arpa
52 | }
53 | prometheus :9153
54 | cache 30
55 | loop
56 | reload
57 | loadbalance
58 | log
59 | forward . 192.168.1.254
60 | }
61 | ...
62 | ```
63 |
64 | ## Verification
65 |
66 | The original guide suggests to use `busybox` image, however I found many issues when trying it for the DNS resolution tests. Instead, I used a plain `Debian` and installed `dnsutils` package on it to achieve the same results.
67 |
68 | ### Prepare the Test Pod
69 |
70 | ```shell
71 | kubectl run debian --image=arm32v5/debian --command -- sleep 7200
72 | pod/debian created
73 | ```
74 |
75 | ```shell
76 | kubectl get pods -l run=debian -w
77 | NAME READY STATUS RESTARTS AGE
78 | debian 0/1 ContainerCreating 0 116s
79 | debian 1/1 Running 0 3m10s
80 | ```
81 |
82 | If this step works, it proves we have external hosts resolution working.
83 |
84 | ```shell
85 | kubectl exec debian -- apt update
86 | kubectl exec debian -- apt install -y dnsutils
87 | ```
88 |
89 | ### Test Resolving `kubernetes`
90 |
91 | ```shell
92 | kubectl exec debian -- nslookup kubernetes
93 | Server: 10.32.0.10
94 | Address: 10.32.0.10#53
95 |
96 | Name: kubernetes.default.svc.cluster.local
97 | Address: 10.32.0.1
98 | ```
99 |
100 | ### Test Resolving `nginx` Pod
101 |
102 | ```shell
103 | kubectl create deployment nginx --image=arm32v5/nginx
104 | deployment.apps/nginx created
105 | ```
106 |
107 | ```shell
108 | kubectl get pods -l app=nginx -w
109 | NAME READY STATUS RESTARTS AGE
110 | nginx-54cb54645d-88k7c 0/1 ContainerCreating 0 53s
111 | nginx-54cb54645d-88k7c 1/1 Running 0 76s
112 | ```
113 |
114 | Resolve nginx pod using short name `nginx`
115 |
116 | ```shell
117 | kubectl exec debian -- nslookup nginx
118 | Server: 10.32.0.10
119 | Address: 10.32.0.10#53
120 |
121 | Name: nginx.default.svc.cluster.local
122 | Address: 10.32.0.110
123 | ```
124 |
125 | Resolve nginx pod using long name `nginx.default.svc.cluster.local`
126 |
127 | ```shell
128 | kubectl exec debian -- nslookup nginx.default.svc.cluster.local
129 | Server: 10.32.0.10
130 | Address: 10.32.0.10#53
131 |
132 | Name: nginx.default.svc.cluster.local
133 | Address: 10.32.0.110
134 | ```
135 |
--------------------------------------------------------------------------------
/10-Smoke-Test.md:
--------------------------------------------------------------------------------
1 | # Smoke Test
2 |
3 | It’s time to know all this hard work has paid off, some quick verification of typical functionalities that kubernetes offers.
4 |
5 | ## Data Encryption
6 |
7 | Create a secret
8 |
9 | ```shell
10 | kubectl create secret generic kubernetes-the-hard-way \
11 | --from-literal="mykey=mydata"
12 | ```
13 |
14 | On master node, connect directly to etcd to get the raw data from the data store in hexadecimal format so it's readable.
15 |
16 | ```shell
17 | sudo ETCDCTL_API=3 etcdctl get \
18 | --endpoints=https://127.0.0.1:2379 \
19 | --cacert=/etc/etcd/ca.pem \
20 | --cert=/etc/etcd/kubernetes.pem \
21 | --key=/etc/etcd/kubernetes-key.pem\
22 | /registry/secrets/default/kubernetes-the-hard-way | hexdump -C
23 | ```
24 |
25 | Expected output should look like this
26 |
27 | ```
28 | 00000000 2f 72 65 67 69 73 74 72 79 2f 73 65 63 72 65 74 |/registry/secret|
29 | 00000010 73 2f 64 65 66 61 75 6c 74 2f 6b 75 62 65 72 6e |s/default/kubern|
30 | 00000020 65 74 65 73 2d 74 68 65 2d 68 61 72 64 2d 77 61 |etes-the-hard-wa|
31 | 00000030 79 0a 6b 38 73 3a 65 6e 63 3a 61 65 73 63 62 63 |y.k8s:enc:aescbc|
32 | 00000040 3a 76 31 3a 6b 65 79 31 3a b0 2e a0 b5 d3 e4 7c |:v1:key1:......||
33 | 00000050 34 17 0f 1d 56 d0 45 51 d1 f8 f9 82 c7 41 4f 22 |4...V.EQ.....AO"|
34 | 00000060 2e da 01 fe a1 b4 c8 99 0f 9e 3a 5a f6 ff 90 50 |..........:Z...P|
35 | 00000070 d3 5a 99 76 23 93 2b ef c4 8a 5b 15 bd 2e 06 dd |.Z.v#.+...[.....|
36 | 00000080 2f 64 9f 0f fb 96 a4 0b b5 de 28 08 e4 90 3d 05 |/d........(...=.|
37 | 00000090 b9 58 ef 32 76 ec 03 00 e7 31 67 eb 03 3b 89 87 |.X.2v....1g..;..|
38 | 000000a0 ad eb 18 3d 9e 7b e1 b5 27 53 bf c0 e8 37 92 d1 |...=.{..'S...7..|
39 | 000000b0 00 fd cd 28 9c 6b a9 f9 e9 ee 55 50 d3 de 4b 0d |...(.k....UP..K.|
40 | 000000c0 9a 1a 0a 1a 8b d9 6f dd 3d 04 d5 6e fb fe 81 4b |......o.=..n...K|
41 | 000000d0 5b f2 f9 06 eb 1d 58 ba 00 cf 4a 3d 71 19 52 ea |[.....X...J=q.R.|
42 | 000000e0 5d 16 6f 2a 14 75 14 1e 26 a1 cf 02 1e 01 18 3c |].o*.u..&......<|
43 | 000000f0 3a 1e 08 4d 5d 73 a4 95 05 57 6e 34 18 46 6e 0a |:..M]s...Wn4.Fn.|
44 | 00000100 d3 1b f1 b5 88 81 c3 d3 ba a1 64 5c 56 95 af 2a |..........d\V..*|
45 | 00000110 42 34 67 05 16 b8 6c 89 8a 07 9f c5 61 f6 ce 79 |B4g...l.....a..y|
46 | 00000120 a8 7e 5c 29 57 f2 c4 23 b6 ae de f0 67 e5 fc c5 |.~\)W..#....g...|
47 | 00000130 5b 01 c0 d7 57 bf 72 42 36 74 0f 1f a9 42 21 50 |[...W.rB6t...B!P|
48 | 00000140 85 5c a4 35 b9 5a 54 ee 74 6e 4b b4 ec 2d ce 07 |.\.5.ZT.tnK..-..|
49 | 00000150 15 3b e9 4b 57 cd af 92 36 0a |.;.KW...6.|
50 | 0000015a
51 | ```
52 |
53 | The `etcd key` should be prefixed with `k8s:enc:aescbc:v1:key1`, which indicates the aescbc provider was used to encrypt the data with the key1 encryption key.
54 |
55 | ## Deployments
56 |
57 | Create a deployment
58 |
59 | ```shell
60 | kubectl create deployment nginx --image=arm32v5/nginx
61 | deployment.apps/nginx created
62 | ```
63 |
64 | See the pods created
65 |
66 | ```shell
67 | kubectl get pods -l app=nginx
68 | NAME READY STATUS RESTARTS AGE
69 | nginx-54cb54645d-r9h5g 1/1 Running 0 2m53s
70 | ```
71 |
72 | ## Port Forwarding
73 |
74 | ```shell
75 | kubectl port-forward nginx-54cb54645d-r9h5g 8080:80
76 | Forwarding from 127.0.0.1:8080 -> 80
77 | Forwarding from [::1]:8080 -> 80
78 | ```
79 |
80 | On another terminal
81 |
82 | ```shell
83 | curl -I http://127.0.0.1:8080
84 | HTTP/1.1 200 OK
85 | Server: nginx/1.19.5
86 | Date: Mon, 14 Dec 2020 23:10:50 GMT
87 | Content-Type: text/html
88 | Content-Length: 612
89 | Last-Modified: Tue, 24 Nov 2020 13:02:03 GMT
90 | Connection: keep-alive
91 | ETag: "5fbd044b-264"
92 | Accept-Ranges: bytes
93 | ```
94 |
95 | Back to the previous terminal, `Ctrl+C` to cancel the port forwarding.
96 |
97 | ## Logs
98 |
99 | Let's see the logs generated by the nginx pod previously created.
100 |
101 | ```shell
102 | kubectl logs nginx-54cb54645d-r9h5g
103 | /docker-entrypoint.sh: /docker-entrypoint.d/ is not empty, will attempt to perform configuration
104 | /docker-entrypoint.sh: Looking for shell scripts in /docker-entrypoint.d/
105 | /docker-entrypoint.sh: Launching /docker-entrypoint.d/10-listen-on-ipv6-by-default.sh
106 | 10-listen-on-ipv6-by-default.sh: Getting the checksum of /etc/nginx/conf.d/default.conf
107 | 10-listen-on-ipv6-by-default.sh: Enabled listen on IPv6 in /etc/nginx/conf.d/default.conf
108 | /docker-entrypoint.sh: Launching /docker-entrypoint.d/20-envsubst-on-templates.sh
109 | /docker-entrypoint.sh: Configuration complete; ready for start up
110 | 127.0.0.1 - - [14/Dec/2020:23:10:50 +0000] "HEAD / HTTP/1.1" 200 0 "-" "curl/7.72.0" "-"
111 | ```
112 |
113 | ## Exec
114 |
115 | ```shell
116 | kubectl exec -ti nginx-54cb54645d-r9h5g -- nginx -v
117 | nginx version: nginx/1.19.5
118 | ```
119 |
120 | ## Services
121 |
122 | Create a service type NodePort
123 |
124 | ```shell
125 | kubectl expose deployment nginx --port 80 --type NodePort
126 | service/nginx exposed
127 | ```
128 |
129 | See the IP and port
130 |
131 | ```shell
132 | kubectl get svc
133 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
134 | kubernetes ClusterIP 10.32.0.1 443/TCP 15d
135 | nginx NodePort 10.32.0.110 80:31127/TCP 24s
136 | ```
137 |
138 | NodePort is `31127`
139 |
140 | On master node (there is no route from my local computer to the internal IPs of the CNAT, if there is such route, no need to go to master node to run this command) both worker nodes should return the same information.
141 |
142 | Test node `p1`
143 |
144 | ```shell
145 | pi@rpi-k8s-master:~ $ curl -I http://172.19.181.1:31127/
146 | HTTP/1.1 200 OK
147 | Server: nginx/1.19.5
148 | Date: Mon, 14 Dec 2020 23:18:35 GMT
149 | Content-Type: text/html
150 | Content-Length: 612
151 | Last-Modified: Tue, 24 Nov 2020 13:02:03 GMT
152 | Connection: keep-alive
153 | ETag: "5fbd044b-264"
154 | Accept-Ranges: bytes
155 | ```
156 |
157 | Test node `p2`
158 |
159 | ```shell
160 | pi@rpi-k8s-master:~ $ curl -I http://172.19.181.2:31127/
161 | HTTP/1.1 200 OK
162 | Server: nginx/1.19.5
163 | Date: Mon, 14 Dec 2020 23:18:40 GMT
164 | Content-Type: text/html
165 | Content-Length: 612
166 | Last-Modified: Tue, 24 Nov 2020 13:02:03 GMT
167 | Connection: keep-alive
168 | ETag: "5fbd044b-264"
169 | Accept-Ranges: bytes
170 | ```
171 |
--------------------------------------------------------------------------------
/11-References.md:
--------------------------------------------------------------------------------
1 | # Useful References
2 |
3 | * https://github.com/kelseyhightower/kubernetes-the-hard-way
4 |
5 | * https://github.com/maxweisspoker/k8s-multiarch-dockerfiles
6 |
7 | * https://github.com/kubernetes/kubeadm/issues/253
8 |
9 | * https://medium.com/@dhuck/the-missing-clusterhat-tutorial-45ad2241d738
10 |
11 | * https://devonhubner.org/Compile_etcd_for_use_on_Raspberry_Pi/
12 |
13 | * https://github.com/kubernetes/kubernetes/issues/26093
14 |
15 | * https://www.raspberrypi.org/forums/viewtopic.php?f=66&t=219644&p=1770842
16 |
17 | * https://downey.io/blog/exploring-cgroups-raspberry-pi/
18 |
19 | * https://github.com/kubernetes-sigs/kubespray/issues/1227
20 |
--------------------------------------------------------------------------------
/12-Ingress-Controller.md:
--------------------------------------------------------------------------------
1 | # Install Nginx Ingress Controller
2 |
3 | A fully functional Kubenetes cluster should be able to allow external traffic into the cluster to interact with the deployed applications.
4 |
5 | At this point, an Ingress Controller is needed, Nginx Ingress Controller is a very popular option for bare-metal installations like this one.
6 |
7 | Documentation can be found at these locations:
8 | * https://docs.nginx.com/nginx-ingress-controller/installation/building-ingress-controller-image/
9 | * https://docs.nginx.com/nginx-ingress-controller/installation/installation-with-manifests/
10 |
11 | Specific for bare metal
12 | * https://github.com/kubernetes/ingress-nginx/blob/master/docs/deploy/baremetal.md
13 |
14 | ## Build the Ingress Controller from Source Code
15 |
16 | Clone source code repository is https://github.com/nginxinc/kubernetes-ingress
17 |
18 | ```shell
19 | git clone https://github.com/nginxinc/kubernetes-ingress/
20 | cd kubernetes-ingress/
21 | git checkout v1.9.1
22 | ```
23 |
24 | Edit `build/Dockerfile` to adapt for `ARMv6` build
25 |
26 | Use different golang image to build `ARG GOLANG_CONTAINER=arm32v5/golang:1.15.5-buster`
27 |
28 | Use a different nginx image a base `FROM arm32v5/nginx:1.19.6 AS base`
29 |
30 | Add environment variable for cross-compiling
31 |
32 | ```
33 | ENV GOOS=linux
34 | ENV GOARCH=arm
35 | ENV GOARM=6
36 | ```
37 |
38 | Full content of Dockerfile
39 | ```
40 | # ARG GOLANG_CONTAINER=golang:latest
41 | ARG GOLANG_CONTAINER=arm32v5/golang:1.15.5-buster
42 |
43 | # FROM nginx:1.19.6 AS base
44 | FROM arm32v5/nginx:1.19.6 AS base
45 |
46 | # forward nginx access and error logs to stdout and stderr of the ingress
47 | # controller process
48 | RUN ln -sf /proc/1/fd/1 /var/log/nginx/access.log \
49 | && ln -sf /proc/1/fd/1 /var/log/nginx/stream-access.log \
50 | && ln -sf /proc/1/fd/2 /var/log/nginx/error.log
51 |
52 | RUN mkdir -p /var/lib/nginx \
53 | && mkdir -p /etc/nginx/secrets \
54 | && mkdir -p /etc/nginx/stream-conf.d \
55 | && apt-get update \
56 | && apt-get install -y libcap2-bin \
57 | && setcap 'cap_net_bind_service=+ep' /usr/sbin/nginx \
58 | && setcap 'cap_net_bind_service=+ep' /usr/sbin/nginx-debug \
59 | && chown -R nginx:0 /etc/nginx \
60 | && chown -R nginx:0 /var/cache/nginx \
61 | && chown -R nginx:0 /var/lib/nginx \
62 | && apt-get remove --purge -y libcap2-bin \
63 | && rm /etc/nginx/conf.d/* \
64 | && rm -rf /var/lib/apt/lists/*
65 |
66 | COPY internal/configs/version1/nginx.ingress.tmpl \
67 | internal/configs/version1/nginx.tmpl \
68 | internal/configs/version2/nginx.virtualserver.tmpl \
69 | internal/configs/version2/nginx.transportserver.tmpl /
70 |
71 | # Uncomment the line below if you would like to add the default.pem to the image
72 | # and use it as a certificate and key for the default server
73 | # ADD default.pem /etc/nginx/secrets/default
74 |
75 | USER nginx
76 |
77 | ENTRYPOINT ["/nginx-ingress"]
78 |
79 |
80 | FROM base AS local
81 | COPY nginx-ingress /
82 |
83 |
84 | FROM $GOLANG_CONTAINER AS builder
85 | ARG VERSION
86 | ARG GIT_COMMIT
87 | WORKDIR /go/src/github.com/nginxinc/kubernetes-ingress/nginx-ingress/cmd/nginx-ingress
88 | COPY . /go/src/github.com/nginxinc/kubernetes-ingress/nginx-ingress/
89 | ENV GOOS=linux
90 | ENV GOARCH=arm
91 | ENV GOARM=6
92 | RUN CGO_ENABLED=0 GOFLAGS='-mod=vendor' \
93 | go build -installsuffix cgo -ldflags "-w -X main.version=${VERSION} -X main.gitCommit=${GIT_COMMIT}" -o /nginx-ingress
94 |
95 |
96 | FROM base AS container
97 | COPY --from=builder /nginx-ingress /
98 | ```
99 |
100 | Build using docker hub repository and image name
101 |
102 | make `PREFIX=abelperezok/nginx-ingress-armv6`
103 |
104 | Image is pushed to my personal account `abelperezok` and the tag is the same as the version `1.9.1`
105 |
106 | `abelperezok/nginx-ingress-armv6:1.9.1`
107 |
108 | ## Install Ingress Controller
109 |
110 | Following the docs, installing with manifests, before applying the yaml manifests, some tweaks in `daemon-set/nginx-ingress.yaml`.
111 |
112 | * Update image in daemon-set/nginx-ingress.yaml `image: abelperezok/nginx-ingress-armv6:1.9.1`.
113 |
114 | * Add hostNetwork to Pod’s spec `hostNetwork: true`.
115 |
116 | This is important because in this setup, the worker nodes are in a private network and therefore are inaccessible from the outside, so using the hostNetwork is not a security risk and simplifies the process compared to using a deployment.
117 |
118 | ```shell
119 | cd deployments/
120 | ```
121 |
122 | As per docs, apply all the manifests
123 | ```shell
124 | kubectl apply -f common/ns-and-sa.yaml
125 | kubectl apply -f rbac/rbac.yaml
126 | kubectl apply -f common/default-server-secret.yaml
127 | kubectl apply -f common/nginx-config.yaml
128 | kubectl apply -f common/ingress-class.yaml
129 | # Not sure if the following resources are required
130 | kubectl apply -f common/vs-definition.yaml
131 | kubectl apply -f common/vsr-definition.yaml
132 | kubectl apply -f common/ts-definition.yaml
133 | kubectl apply -f common/policy-definition.yaml
134 | kubectl apply -f common/gc-definition.yaml
135 | kubectl apply -f common/global-configuration.yaml
136 | ```
137 |
138 | Apply daemonset manifest.
139 | ```shell
140 | $ kubectl apply -f daemon-set/nginx-ingress.yaml
141 | daemonset.apps/nginx-ingress created
142 | ```
143 |
144 | ## Verify nginx-ingress is running
145 |
146 | ```shell
147 | $ kubectl get pods -n nginx-ingress
148 | NAME READY STATUS RESTARTS AGE
149 | nginx-ingress-84xql 1/1 Running 0 17s
150 | nginx-ingress-pj2lc 1/1 Running 0 17s
151 | ```
152 |
153 | Create an Ingress resource to test the ingress controller is working. Add this to a file named `ingress.yaml`.
154 |
155 | ```shell
156 | apiVersion: networking.k8s.io/v1beta1
157 | kind: Ingress
158 | metadata:
159 | name: virtual-host-ingress
160 | namespace: default
161 | spec:
162 | rules:
163 | - host: blue.example.com
164 | http:
165 | paths:
166 | - backend:
167 | serviceName: nginx
168 | servicePort: 80
169 | path: /
170 | pathType: ImplementationSpecific
171 | ```
172 |
173 | Apply ingress manifest.
174 | ```shell
175 | kubectl apply -f ingress.yaml
176 | ```
177 |
178 | On master node
179 |
180 | ```shell
181 | pi@rpi-k8s-master:~ $ curl -H "Host: blue.example.com" 172.19.181.1
182 | pi@rpi-k8s-master:~ $ curl -H "Host: blue.example.com" 172.19.181.2
183 | ```
184 |
185 | The ingress controller works !
186 |
187 | However, the worker nodes live in a `private network` which is no benefit to access any service deployed. An `external IP` is required to access from outside. In this setup the only `external IP` that can access the private networks is the master node.
188 |
189 | ## Install Haproxy on the Master Node
190 |
191 | One of the solutions to mitigate the issue of accessing the workers nodes in a private network is to use an external proxy such as `haproxy`.
192 |
193 | Install haproxy package
194 | ```shell
195 | sudo apt install haproxy
196 | ```
197 |
198 | Update configuration
199 | ```shell
200 | sudo vi /etc/haproxy/haproxy.cfg
201 | ```
202 |
203 | Add this to the end of the file
204 |
205 | ```shell
206 | frontend http_front
207 | bind *:80
208 | stats uri /haproxy?stats
209 | default_backend http_back
210 |
211 | backend http_back
212 | balance roundrobin
213 | server p1 172.19.181.1:80 check
214 | server p2 172.19.181.2:80 check
215 |
216 | frontend https_front
217 | bind *:443
218 | option tcplog
219 | mode tcp
220 | default_backend https_back
221 |
222 | backend https_back
223 | mode tcp
224 | balance roundrobin
225 | option ssl-hello-chk
226 | server p1 172.19.181.1:443 check
227 | server p2 172.19.181.2:443 check
228 | ```
229 |
230 | Restart haproxy service
231 | ```shell
232 | sudo systemctl restart haproxy.service
233 | ```
234 |
235 | From remote computer
236 | ```shell
237 | $ curl -H "Host: blue.example.com" 192.168.1.164
238 |
239 |
240 |
241 |
242 | Welcome to nginx!
243 |
250 |
251 |
252 | Welcome to nginx!
253 | If you see this page, the nginx web server is successfully installed and
254 | working. Further configuration is required.
255 |
256 | For online documentation and support please refer to
257 | nginx.org.
258 | Commercial support is available at
259 | nginx.com.
260 |
261 | Thank you for using nginx.
262 |
263 |
264 | ```
265 |
266 |
--------------------------------------------------------------------------------
/13-TLS-Termination.md:
--------------------------------------------------------------------------------
1 | # TLS termination
2 |
3 | One of the most common scenarios using ingress is to terminate TLS and leave the (micro)service out of this concern knowing the external host it should respond to.
4 |
5 | Create a self-signed certificate
6 |
7 | ```shell
8 | openssl req -x509 -sha256 -nodes -days 365 -newkey rsa:2048 -keyout nginx-tls-example-com.key -out nginx-tls-example-com.crt -subj "/CN=*.example.com/O=example.com"
9 |
10 | Generating a RSA private key
11 | ...............................................+++++
12 | ........................................................+++++
13 | writing new private key to 'nginx-tls-example-com.key'
14 | -----
15 | ```
16 |
17 | Create a secret of type tls with the certificate named `tls-secret-example-com`
18 |
19 | ```shell
20 | kubectl create secret tls tls-secret-example-com --key nginx-tls-example-com.key --cert nginx-tls-example-com.crt
21 | secret/tls-secret-example-com created
22 | ```
23 |
24 | Add this block at the end of the ingress manifest
25 | ```
26 | tls:
27 | - hosts:
28 | - blue.example.com
29 | secretName: tls-secret-example-com
30 | ```
31 |
32 | ```shell
33 | kubectl apply -f ingress.yaml
34 | ```
35 |
36 | Test redirection HTTP to HTTPS
37 |
38 | Update `/etc/hosts` to add:
39 |
40 | ```
41 | 192.168.1.164 blue.example.com
42 | ```
43 |
44 | ```shell
45 | $ curl http://blue.example.com/ -I
46 | HTTP/1.1 301 Moved Permanently
47 | Server: nginx/1.19.6
48 | Date: Mon, 28 Dec 2020 16:09:35 GMT
49 | Content-Type: text/html
50 | Content-Length: 169
51 | Location: https://blue.example.com:443/
52 | ```
53 |
54 | Test TLS error using self-signed certificate
55 |
56 | ```shell
57 | $ curl https://blue.example.com/
58 | curl: (60) SSL certificate problem: self signed certificate
59 | More details here: https://curl.haxx.se/docs/sslcerts.html
60 |
61 | curl failed to verify the legitimacy of the server and therefore could not
62 | establish a secure connection to it. To learn more about this situation and
63 | how to fix it, please visit the web page mentioned above.
64 | ```
65 |
66 | Test ignoring the TLS validation
67 |
68 | ```shell
69 | $ curl https://blue.example.com/ -k
70 |
71 |
72 |
73 | Welcome to nginx!
74 |
81 |
82 |
83 | Welcome to nginx!
84 | If you see this page, the nginx web server is successfully installed and
85 | working. Further configuration is required.
86 |
87 | For online documentation and support please refer to
88 | nginx.org.
89 | Commercial support is available at
90 | nginx.com.
91 |
92 | Thank you for using nginx.
93 |
94 |
95 | ```
96 |
97 | ## Using a real TLS certificate
98 |
99 | Grab a real TLS certificate i.e from [Let’s Encrypt](https://letsencrypt.org/) or [sslforfree.com](https://www.sslforfree.com/). You’ll need a domain ownership to do this. In my case I’m using abelperez.info, which I own, to create my tls certificate.
100 |
101 | Create another secret with the real certificate
102 |
103 | ```shell
104 | kubectl create secret tls tls-www-abelperez-info --key private.key --cert certificate.crt
105 | secret/tls-www-abelperez-info created
106 | ```
107 |
108 | Update the ingress manifest yaml, now use the real hostname and the new secret.
109 |
110 | ```shell
111 | - host: www.abelperez.info
112 | ```
113 |
114 | ```shell
115 | tls:
116 | - hosts:
117 | - www.abelperez.info
118 | secretName: tls-www-abelperez-info
119 | ```
120 |
121 | Update `/etc/hosts` to add
122 | ```
123 | 192.168.1.164 www.abelperez.info
124 | ```
125 |
126 | ## Final Test
127 |
128 | ```shell
129 | $ curl -Lv http://www.abelperez.info/
130 | * Trying 192.168.1.164:80...
131 | * Connected to www.abelperez.info (192.168.1.164) port 80 (#0)
132 | > GET / HTTP/1.1
133 | > Host: www.abelperez.info
134 | > User-Agent: curl/7.72.0
135 | > Accept: */*
136 | >
137 | * Mark bundle as not supporting multiuse
138 | < HTTP/1.1 301 Moved Permanently
139 | < Server: nginx/1.19.6
140 | < Date: Tue, 29 Dec 2020 11:22:03 GMT
141 | < Content-Type: text/html
142 | < Content-Length: 169
143 | < Location: https://www.abelperez.info:443/
144 | <
145 | * Ignoring the response-body
146 | * Connection #0 to host www.abelperez.info left intact
147 | * Issue another request to this URL: 'https://www.abelperez.info:443/'
148 | * Trying 192.168.1.164:443...
149 | * Connected to www.abelperez.info (192.168.1.164) port 443 (#1)
150 | * ALPN, offering h2
151 | * ALPN, offering http/1.1
152 | * successfully set certificate verify locations:
153 | * CAfile: /etc/ssl/certs/ca-certificates.crt
154 | CApath: /etc/ssl/certs
155 | * TLSv1.3 (OUT), TLS handshake, Client hello (1):
156 | * TLSv1.3 (IN), TLS handshake, Server hello (2):
157 | * TLSv1.2 (IN), TLS handshake, Certificate (11):
158 | * TLSv1.2 (IN), TLS handshake, Server key exchange (12):
159 | * TLSv1.2 (IN), TLS handshake, Server finished (14):
160 | * TLSv1.2 (OUT), TLS handshake, Client key exchange (16):
161 | * TLSv1.2 (OUT), TLS change cipher, Change cipher spec (1):
162 | * TLSv1.2 (OUT), TLS handshake, Finished (20):
163 | * TLSv1.2 (IN), TLS handshake, Finished (20):
164 | * SSL connection using TLSv1.2 / ECDHE-RSA-AES256-GCM-SHA384
165 | * ALPN, server accepted to use http/1.1
166 | * Server certificate:
167 | * subject: CN=abelperez.info
168 | * start date: Dec 29 00:00:00 2020 GMT
169 | * expire date: Mar 29 23:59:59 2021 GMT
170 | * subjectAltName: host "www.abelperez.info" matched cert's "www.abelperez.info"
171 | * issuer: C=AT; O=ZeroSSL; CN=ZeroSSL RSA Domain Secure Site CA
172 | * SSL certificate verify ok.
173 | > GET / HTTP/1.1
174 | > Host: www.abelperez.info
175 | > User-Agent: curl/7.72.0
176 | > Accept: */*
177 | >
178 | * Mark bundle as not supporting multiuse
179 | < HTTP/1.1 200 OK
180 | < Server: nginx/1.19.6
181 | < Date: Tue, 29 Dec 2020 11:22:03 GMT
182 | < Content-Type: text/html
183 | < Content-Length: 612
184 | < Connection: keep-alive
185 | < Last-Modified: Tue, 15 Dec 2020 13:59:38 GMT
186 | < ETag: "5fd8c14a-264"
187 | < Accept-Ranges: bytes
188 | <
189 |
190 |
191 |
192 | Welcome to nginx!
193 |
200 |
201 |
202 | Welcome to nginx!
203 | If you see this page, the nginx web server is successfully installed and
204 | working. Further configuration is required.
205 |
206 | For online documentation and support please refer to
207 | nginx.org.
208 | Commercial support is available at
209 | nginx.com.
210 |
211 | Thank you for using nginx.
212 |
213 |
214 | * Connection #1 to host www.abelperez.info left intact
215 | ```
216 |
217 | Or just use the browser to navigate to http://www.abelperez.info/ and it will automatically redirect to the HTTPS location and you’ll see the “secure” padlock
218 |
219 | ## References
220 |
221 | * https://kubernetes.github.io/ingress-nginx/examples/tls-termination/
222 |
223 | * https://serversforhackers.com/c/using-ssl-certificates-with-haproxy
224 |
225 | * https://awkwardferny.medium.com/configuring-certificate-based-mutual-authentication-with-kubernetes-ingress-nginx-20e7e38fdfca
226 |
--------------------------------------------------------------------------------
/14-Metrics-Server.md:
--------------------------------------------------------------------------------
1 | # Metrics Server
2 |
3 | In this section we’ll install the metrics server so we can monitor the resource utilisation of the cluster. It requires some changes to the previous installation of the kube-api server as it needs to allow extensions to connect and authenticate.
4 |
5 | ## Prepare Kube-API server
6 |
7 | We need to create another CA for extensions, in this case I named it ca-ext.
8 |
9 | ### The Extension Certificate Authority
10 |
11 | ```shell
12 | cat > ca-ext-config.json < ca-ext-csr.json < ext-csr.json < 56b2e5ac6aa2
147 | Step 2/4 : COPY metrics-server /
148 | ---> 2f9af7f67783
149 | Step 3/4 : USER 65534
150 | ---> Running in c4ed7bf9ff92
151 | Removing intermediate container c4ed7bf9ff92
152 | ---> c99bdde8d29e
153 | Step 4/4 : ENTRYPOINT ["/metrics-server"]
154 | ---> Running in 479961333d53
155 | Removing intermediate container 479961333d53
156 | ---> e2f4ea68b082
157 | Successfully built e2f4ea68b082
158 | Successfully tagged abelperezok/metrics-server-armv6:0.4.1
159 | ```
160 |
161 | ```shell
162 | docker login
163 | docker push abelperezok/metrics-server-armv6:0.4.1
164 | ```
165 |
166 | Download the manifest from https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
167 |
168 | ```shell
169 | curl -o manifests.yaml https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml -L
170 | ```
171 |
172 | Find the `Deployment` object and replace
173 |
174 | The original image `image: k8s.gcr.io/metrics-server/metrics-server:v0.4.1`
175 |
176 | With the recently created `image: abelperezok/metrics-server-armv6:0.4.1`
177 |
178 | Add hostNetwork to the spec `hostNetwork: true`
179 |
180 | Add container hostPort `hostPort: 4443`
181 |
182 | Apply the manifest to install metrics-server
183 | ```shell
184 | kubectl apply -f manifests.yaml
185 | ```
186 |
187 | ## Verify it's working
188 |
189 | ```shell
190 | kubectl get pods -n kube-system -o wide -l k8s-app=metrics-server
191 | NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
192 | metrics-server-556fbf5c74-rhkf6 1/1 Running 2 84m 172.19.181.1 p1
193 | ```
194 |
195 | ```shell
196 | kubectl top nodes
197 | NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
198 | p1 392m 39% 244Mi 73%
199 | p2 336m 33% 227Mi 68%
200 | ```
201 |
202 | ```shell
203 | kubectl top pods
204 | NAME CPU(cores) MEMORY(bytes)
205 | nginx-54cb54645d-88k7c 0m 1Mi
206 | pod-pvc-normal 0m 1Mi
207 | pod-pvc-volume 0m 1Mi
208 | ```
209 |
210 | ## Useful links
211 | * https://github.com/kubernetes-retired/kube-aws/issues/1355
212 | * https://discourse.linkerd.io/t/error-no-client-ca-cert-available-for-apiextension-server/947
213 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2020 Abel Perez Martinez
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # How to install Kubernetes from scratch using Raspberry Pi and ClusterHAT
2 |
3 | This is a learning exercise on how to install Kubernetes from scratch on Raspberry Pi 3 (master) and Zeros (workers).
4 |
5 | In this guide I'll walk you through the famous guide [Kubernetes The Hard Way](https://github.com/kelseyhightower/kubernetes-the-hard-way) created by [Kelsey Hightower](https://github.com/kelseyhightower) I like to call this guide: the very very hard way.
6 |
7 |
8 |
9 | The final result should look like this:
10 |
11 |
12 |
13 | ## Prerequisites
14 |
15 | * Basic understanding on Linux command line (Debian/Raspbian is recommended)
16 | * Basic knowledge of Linux networking (general networking)
17 | * Basic understanding on containers (no docker is required)
18 | * Local Wi-fi or a free Ethernet port connection
19 | * Lots of patience - you'll need it!
20 |
21 | ## Content
22 |
23 | - [Introduction - Setting Up the Cluster](00-Intro.md)
24 | - [Part 1 - Provision PKI Infrastructure](01-PKI.md)
25 | - [Part 2 - Build Binaries From Source Code](02-Build-Binaries.md)
26 | - [Part 3 - Prepare Configuration Files](03-Prepare-Config.md)
27 | - [Part 4 - Bootstrapping the Etcd Cluster](04-Etcd.md)
28 | - [Part 5 - Bootstrapping the Control Plane](05-Control-Plane.md)
29 | - [Part 6 - Bootstrapping the Worker Nodes](06-Worker-Nodes.md)
30 | - [Part 7 - Configuring for Remote Access](07-Remote-Access.md)
31 | - [Part 8 - Provisioning Pod Network Routes](08-Network-Routes.md)
32 | - [Part 9 - Deploying the DNS Cluster Add-on](09-Core-DNS.md)
33 | - [Part 10 - Smoke Test](10-Smoke-Test.md)
34 | - [Part 11 - References](11-References.md)
35 |
36 | ## Extras
37 | - [Part 12 - NGINX Ingress Controller](12-Ingress-Controller.md)
38 | - [Part 13 - TLS Termination](13-TLS-Termination.md)
39 | - [Part 14 - Metrics Server](14-Metrics-Server.md)
40 | - Web UI (Dashboard) - TBD
--------------------------------------------------------------------------------
/img/kubernetes-running.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/abelperezok/kubernetes-raspberry-pi-cluster-hat/5c45dcb1a07e2faed9324a174ed83d9d6cbc9e9d/img/kubernetes-running.png
--------------------------------------------------------------------------------
/img/rasbperrypi-clusterhat.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/abelperezok/kubernetes-raspberry-pi-cluster-hat/5c45dcb1a07e2faed9324a174ed83d9d6cbc9e9d/img/rasbperrypi-clusterhat.jpg
--------------------------------------------------------------------------------
/pki_scripts/ca-config.json:
--------------------------------------------------------------------------------
1 | {
2 | "signing": {
3 | "default": {
4 | "expiry": "8760h"
5 | },
6 | "profiles": {
7 | "kubernetes": {
8 | "usages": ["signing", "key encipherment", "server auth", "client auth"],
9 | "expiry": "8760h"
10 | }
11 | }
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/pki_scripts/cert_data.json:
--------------------------------------------------------------------------------
1 | {
2 | "CN": "${CN}",
3 | "key": {
4 | "algo": "$KEY_ALGO",
5 | "size": $KEY_SIZE
6 | },
7 | "names": [{
8 | "C": "${COUNTRY}",
9 | "L": "${LOCALITY}",
10 | "O": "${ORG}",
11 | "OU": "${ORG_UNIT}",
12 | "ST": "${STATE_PROV}"
13 | }]
14 | }
--------------------------------------------------------------------------------
/pki_scripts/pki.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ######################################################
3 | # Tutorial Author: Abel Perez Martinez
4 | # Tutorial: https://github.com/abelperezok/kubernetes-raspberry-pi-cluster-hat
5 | # Script Author: Bryan McWhirt
6 | # Description:
7 | # This is a bash script that automates the
8 | # creation of the PKI. Make sure you understand
9 | # what is being done or you will run into issues.
10 | # I wrote this as it was tedious to do every time
11 | # I started over to experiment.
12 | #
13 | # Usage:
14 | # Fill in your information for:
15 | # COUNTRY, STATE_PROV, LOCALITY, ORG, ORG_UNIT,
16 | # KUBERNETES_PUBLIC_ADDRESS
17 | # Verify you INTERNAL_IP_BASE matches the one here.
18 | # Abel's documentation uses 172.19.181. but mine
19 | # was 172.19.180.
20 | #
21 | # Copy this file and ca-config.json to ~/ on the
22 | # orchistrator node.
23 | #
24 | # chmod 740 ~/pki.sh
25 | #
26 | # cd ~
27 | #
28 | # ./pki.sh
29 | ######################################################
30 | declare -x COUNTRY=""
31 | declare -x STATE_PROV=""
32 | declare -x LOCALITY=""
33 | declare -x ORG=""
34 | declare -x ORG_UNIT=""
35 | declare -x KUBERNETES_PUBLIC_ADDRESS=
36 | declare -x INTERNAL_IP_BASE=172.19.180.
37 | declare -ax NODES=(1 2 3 4)
38 | declare -x KEY_ALGO="rsa"
39 | declare -x KEY_SIZE=2048
40 | declare -ax CSR_FILE=(ca admin p1 p2 p3 p4\
41 | kube-controller-manager kube-proxy\
42 | kube-scheduler kubernetes service-account)
43 | declare -ax CSR_CN=(Kubernetes admin system:node:p1\
44 | system:node:p2 system:node:p3 system:node:p4\
45 | system:kube-controller-manager system:kube-proxy\
46 | system:kube-scheduler kubernetes service-accounts)
47 |
48 | declare -x KUBERNETES_HOSTNAMES=kubernetes,kubernetes.default,kubernetes.default.svc,kubernetes.default.svc.cluster,kubernetes.svc.cluster.local
49 |
50 | # Make the pki directory and copy in the ca config.
51 | mkdir -p ~/pki
52 | cp ca-config.json ~/pki
53 | cp cert_data.json ~/pki
54 | cd ~/pki
55 |
56 |
57 | # gen_csr file cn
58 | # E.g. gen_csr admin-csr admin
59 | function gen_csr {
60 | CN=${2} envsubst < ../cert_data.json > ${1}-csr.json
61 | }
62 |
63 | # Create the JSON config files.
64 | COUNT=0
65 | for cn in ${CSR_CN[@]}; do
66 | gen_csr ${CSR_FILE[COUNT]} ${cn}
67 | ((COUNT=COUNT+1))
68 | done
69 |
70 |
71 | # Generate the Certificate Authority.
72 | # The ca-config.json has no real variables so it is included.
73 | cfssl gencert -initca ca-csr.json | cfssljson -bare ca
74 |
75 | for cert in ${STD[@]}; do
76 | cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes $cert-csr.json | cfssljson -bare $cert
77 | done
78 |
79 | # Generate node certificates.
80 | for node in ${NODES[*]}; do
81 | INTERNAL_IP=${INTERNAL_IP_BASE}${node}
82 | cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -hostname=p${node},${INTERNAL_IP} -profile=kubernetes p${node}-csr.json | cfssljson -bare p${node}
83 | done
84 |
85 | # Generate API certificate.
86 | cfssl gencert \
87 | -ca=ca.pem \
88 | -ca-key=ca-key.pem \
89 | -config=ca-config.json \
90 | -hostname=10.32.0.1,${INTERNAL_IP_BASE}254,rpi-k8s-master,rpi-k8s-master.local,${KUBERNETES_PUBLIC_ADDRESS},127.0.0.1,${KUBERNETES_HOSTNAMES} \
91 | -profile=kubernetes \
92 | kubernetes-csr.json | cfssljson -bare kubernetes
93 |
--------------------------------------------------------------------------------