├── work ├── ansible.cfg ├── templates │ ├── cni-99-loopback.conf.jj │ ├── kube-proxy-config.yaml.jj │ ├── kube-scheduler.yaml.jj │ ├── encryption-config.yaml.jj │ ├── base-csr.json.jj │ ├── kubernetes.default.svc.cluster.local.jj │ ├── kube-proxy.service.jj │ ├── kube-scheduler.service.jj │ ├── cni-10-bridge.conf.jj │ ├── containerd.service.jj │ ├── kubelet-config.yaml.jj │ ├── kubelet.service.jj │ ├── containerd-config.toml.jj │ ├── kube-controller-manager.service.jj │ ├── etcd.service.jj │ ├── kube-apiserver.service.jj │ └── haproxy.cfg.jj ├── fix-resolv.conf.sh ├── inventory.tmpl ├── playbook-12-coredns.yml ├── files │ └── ca-config.json ├── group_vars │ └── _all ├── playbook-11-pod-network.yml ├── playbook-08-2-haproxy.yml ├── playbook-02-install-clienttools.yml ├── playbook-06-encryption-config.yml ├── fix-group-vars.pl ├── playbook-10-remote-access.yml ├── README.md ├── run-all.sh ├── tf │ ├── network │ │ └── main.tf │ └── compute │ │ └── main.tf ├── playbook-07-etcd-cluster.yml ├── fix-ssh-keys.pl ├── playbook-05-config-files.yml ├── playbook-00-pre-setup.yml ├── playbook-04-certificate-authority.yml ├── main.tf ├── playbook-08-1-control-plane.yml └── playbook-09-worker-nodes.yml ├── docs ├── images │ └── tmux-screenshot.png ├── 06-data-encryption-keys.md ├── 14-cleanup.md ├── 12-dns-addon.md ├── 10-configuring-kubectl.md ├── 01-prerequisites.md ├── 11-pod-network-routes.md ├── 02-client-tools.md ├── 07-bootstrapping-etcd.md ├── 05-kubernetes-configuration-files.md ├── 03-compute-resources.md ├── 09-bootstrapping-kubernetes-workers.md ├── 04-certificate-authority.md ├── 13-smoke-test.md └── 08-bootstrapping-kubernetes-controllers.md ├── .gitignore ├── CONTRIBUTING.md ├── README.md ├── deployments └── kube-dns.yaml └── LICENSE /work/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | host_key_checking = False 3 | -------------------------------------------------------------------------------- /work/templates/cni-99-loopback.conf.jj: -------------------------------------------------------------------------------- 1 | { 2 | "cniVersion": "0.3.1", 3 | "type": "loopback" 4 | } 5 | -------------------------------------------------------------------------------- /docs/images/tmux-screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/viklund/kubernetes-the-hard-way/master/docs/images/tmux-screenshot.png -------------------------------------------------------------------------------- /work/fix-resolv.conf.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ansible -i inventory workers,controllers -a 'sudo sed -i "s/127.0.0.53/8.8.8.8/" /etc/resolv.conf' -f 6 4 | -------------------------------------------------------------------------------- /work/inventory.tmpl: -------------------------------------------------------------------------------- 1 | [master] 2 | ${ip} ansible_user=ubuntu internal_ip=${ internal_ip } 3 | [workers] 4 | ${workers} 5 | [controllers] 6 | ${controllers} 7 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | ## Mine 2 | password 3 | .terraform/ 4 | venv/ 5 | bin/ 6 | terraform.tfstate* 7 | inventory 8 | *.bak 9 | /work/group_vars/all 10 | hosts 11 | *-openrc.sh 12 | -------------------------------------------------------------------------------- /work/templates/kube-proxy-config.yaml.jj: -------------------------------------------------------------------------------- 1 | kind: KubeProxyConfiguration 2 | apiVersion: kubeproxy.config.k8s.io/v1alpha1 3 | clientConnection: 4 | kubeconfig: "/var/lib/kube-proxy/kubeconfig" 5 | mode: "iptables" 6 | clusterCIDR: "10.0.0.0/24" 7 | -------------------------------------------------------------------------------- /work/templates/kube-scheduler.yaml.jj: -------------------------------------------------------------------------------- 1 | apiVersion: componentconfig/v1alpha1 2 | kind: KubeSchedulerConfiguration 3 | clientConnection: 4 | kubeconfig: "/var/lib/kubernetes/kube-scheduler.kubeconfig" 5 | leaderElection: 6 | leaderElect: true 7 | -------------------------------------------------------------------------------- /work/playbook-12-coredns.yml: -------------------------------------------------------------------------------- 1 | # vim: ts=2:sw=2 2 | - hosts: master 3 | tasks: 4 | - name: Deploy coredns add-on 5 | args: 6 | executable: /bin/bash 7 | shell: | 8 | kubectl apply -f https://storage.googleapis.com/kubernetes-the-hard-way/coredns.yaml 9 | -------------------------------------------------------------------------------- /work/templates/encryption-config.yaml.jj: -------------------------------------------------------------------------------- 1 | kind: EncryptionConfig 2 | apiVersion: v1 3 | resources: 4 | - resources: 5 | - secrets 6 | providers: 7 | - aescbc: 8 | keys: 9 | - name: key1 10 | secret: {{ encryption_key }} 11 | - identity: {} 12 | -------------------------------------------------------------------------------- /work/files/ca-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "signing": { 3 | "default": { 4 | "expiry": "8760h" 5 | }, 6 | "profiles": { 7 | "kubernetes": { 8 | "usages": ["signing", "key encipherment", "server auth", "client auth"], 9 | "expiry": "8760h" 10 | } 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /work/group_vars/_all: -------------------------------------------------------------------------------- 1 | etcd_version: "v3.3.13" 2 | kubernetes_version: "v1.12.0" 3 | master_ip: 10.0.0.10 4 | controller_00_ip: 10.0.0.9 5 | controller_01_ip: 10.0.0.5 6 | controller_02_ip: 10.0.0.15 7 | worker_00_ip: 10.0.0.8 8 | worker_01_ip: 10.0.0.13 9 | worker_02_ip: 10.0.0.6 10 | public_ip: "{{ master_ip }}" 11 | -------------------------------------------------------------------------------- /work/templates/base-csr.json.jj: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "{{ CN }}", 3 | "key": { 4 | "algo": "rsa", 5 | "size": 2048 6 | }, 7 | "names": [ 8 | { 9 | "C": "SE", 10 | "L": "Uppsala", 11 | "O": "{{ O }}", 12 | "OU": "{{ OU }}", 13 | "ST": "Uppsala" 14 | } 15 | ] 16 | } 17 | -------------------------------------------------------------------------------- /work/templates/kubernetes.default.svc.cluster.local.jj: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | server_name kubernetes.default.svc.cluster.local; 4 | 5 | location /healthz { 6 | proxy_pass https://127.0.0.1:6443/healthz; 7 | proxy_ssl_trusted_certificate /var/lib/kubernetes/ca.pem; 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /work/templates/kube-proxy.service.jj: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes Kube Proxy 3 | Documentation=https://github.com/kubernetes/kubernetes 4 | 5 | [Service] 6 | ExecStart=/usr/local/bin/kube-proxy \ 7 | --config=/var/lib/kube-proxy/kube-proxy-config.yaml 8 | Restart=on-failure 9 | RestartSec=5 10 | 11 | [Install] 12 | WantedBy=multi-user.target 13 | -------------------------------------------------------------------------------- /work/templates/kube-scheduler.service.jj: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes Scheduler 3 | Documentation=https://github.com/kubernetes/kubernetes 4 | 5 | [Service] 6 | ExecStart=/usr/local/bin/kube-scheduler \ 7 | --config=/etc/kubernetes/config/kube-scheduler.yaml \ 8 | --v=2 9 | Restart=on-failure 10 | RestartSec=5 11 | 12 | [Install] 13 | WantedBy=multi-user.target 14 | -------------------------------------------------------------------------------- /work/templates/cni-10-bridge.conf.jj: -------------------------------------------------------------------------------- 1 | { 2 | "cniVersion": "0.3.1", 3 | "name": "bridge", 4 | "type": "bridge", 5 | "bridge": "cnio0", 6 | "isGateway": true, 7 | "ipMasq": true, 8 | "ipam": { 9 | "type": "host-local", 10 | "ranges": [ 11 | [{"subnet": "{{ pod_cidr }}"}] 12 | ], 13 | "routes": [{"dst": "0.0.0.0/0"}] 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /work/templates/containerd.service.jj: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=containerd container runtime 3 | Documentation=https://containerd.io 4 | After=network.target 5 | 6 | [Service] 7 | ExecStartPre=/sbin/modprobe overlay 8 | ExecStart=/bin/containerd 9 | Restart=always 10 | RestartSec=5 11 | Delegate=yes 12 | KillMode=process 13 | OOMScoreAdjust=-999 14 | LimitNOFILE=1048576 15 | LimitNPROC=infinity 16 | LimitCORE=infinity 17 | 18 | [Install] 19 | WantedBy=multi-user.target 20 | -------------------------------------------------------------------------------- /work/playbook-11-pod-network.yml: -------------------------------------------------------------------------------- 1 | # vim: ts=2:sw=2 2 | - hosts: workers, master, controllers 3 | become: yes 4 | tasks: 5 | - name: Add network routes 6 | args: 7 | executable: /bin/bash 8 | shell: | 9 | ip addr | grep {{ worker_00_ip }} || ip route add 10.0.10.0/24 via {{ worker_00_ip }} 10 | ip addr | grep {{ worker_01_ip }} || ip route add 10.0.11.0/24 via {{ worker_01_ip }} 11 | ip addr | grep {{ worker_02_ip }} || ip route add 10.0.12.0/24 via {{ worker_02_ip }} 12 | -------------------------------------------------------------------------------- /work/playbook-08-2-haproxy.yml: -------------------------------------------------------------------------------- 1 | # vim: ts=2:sw=2 2 | - hosts: master 3 | become: yes 4 | tasks: 5 | - name: install haproxy 6 | apt: 7 | name: haproxy 8 | state: present 9 | force_apt_get: yes 10 | - name: Install haproxy config file 11 | template: 12 | src: haproxy.cfg.jj 13 | dest: /etc/haproxy/haproxy.cfg 14 | mode: 0644 15 | owner: root 16 | group: root 17 | - name: Reload haproxy 18 | systemd: 19 | enabled: yes 20 | state: restarted 21 | name: haproxy 22 | -------------------------------------------------------------------------------- /work/playbook-02-install-clienttools.yml: -------------------------------------------------------------------------------- 1 | - hosts: master 2 | become: yes 3 | tasks: 4 | - name: Install cfssl and cfssljson 5 | get_url: 6 | url: "https://pkg.cfssl.org/R1.2/{{ item }}_linux-amd64" 7 | dest: "/usr/local/bin/{{ item }}" 8 | mode: 0777 9 | with_items: 10 | - cfssl 11 | - cfssljson 12 | - name: Install kubectl 13 | get_url: 14 | url: https://storage.googleapis.com/kubernetes-release/release/v1.12.0/bin/linux/amd64/kubectl 15 | dest: /usr/local/bin/kubectl 16 | mode: 0777 17 | -------------------------------------------------------------------------------- /work/templates/kubelet-config.yaml.jj: -------------------------------------------------------------------------------- 1 | kind: KubeletConfiguration 2 | apiVersion: kubelet.config.k8s.io/v1beta1 3 | authentication: 4 | anonymous: 5 | enabled: false 6 | webhook: 7 | enabled: true 8 | x509: 9 | clientCAFile: "/var/lib/kubernetes/ca.pem" 10 | authorization: 11 | mode: Webhook 12 | clusterDomain: "cluster.local" 13 | clusterDNS: 14 | - "10.32.0.10" 15 | podCIDR: "{{ pod_cidr }}" 16 | resolvConf: "/run/systemd/resolve/resolv.conf" 17 | runtimeRequestTimeout: "15m" 18 | tlsCertFile: "/var/lib/kubelet/{{ ansible_hostname }}.pem" 19 | tlsPrivateKeyFile: "/var/lib/kubelet/{{ ansible_hostname }}-key.pem" 20 | -------------------------------------------------------------------------------- /work/playbook-06-encryption-config.yml: -------------------------------------------------------------------------------- 1 | - hosts: master 2 | tasks: 3 | - name: Generate random string 4 | shell: | 5 | head -c 32 /dev/urandom | base64 6 | register: random_string 7 | - name: Create certificate file 8 | template: 9 | dest: encryption-config.yaml 10 | src: encryption-config.yaml.jj 11 | vars: 12 | encryption_key: "{{ random_string.stdout }}" 13 | - name: Copy Certificates to the nodes 14 | args: 15 | executable: /bin/bash 16 | shell: | 17 | for instance in controller-{00,01,02}; do 18 | scp encryption-config.yaml ${instance}:~/ 19 | done 20 | -------------------------------------------------------------------------------- /work/templates/kubelet.service.jj: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes Kubelet 3 | Documentation=https://github.com/kubernetes/kubernetes 4 | After=containerd.service 5 | Requires=containerd.service 6 | 7 | [Service] 8 | ExecStart=/usr/local/bin/kubelet \ 9 | --config=/var/lib/kubelet/kubelet-config.yaml \ 10 | --container-runtime=remote \ 11 | --container-runtime-endpoint=unix:///var/run/containerd/containerd.sock \ 12 | --image-pull-progress-deadline=2m \ 13 | --kubeconfig=/var/lib/kubelet/kubeconfig \ 14 | --network-plugin=cni \ 15 | --register-node=true \ 16 | --v=2 17 | Restart=on-failure 18 | RestartSec=5 19 | 20 | [Install] 21 | WantedBy=multi-user.target 22 | -------------------------------------------------------------------------------- /work/templates/containerd-config.toml.jj: -------------------------------------------------------------------------------- 1 | [plugins] 2 | [plugins.cri.containerd] 3 | snapshotter = "overlayfs" 4 | [plugins.cri.containerd.default_runtime] 5 | runtime_type = "io.containerd.runtime.v1.linux" 6 | runtime_engine = "/usr/local/bin/runc" 7 | runtime_root = "" 8 | [plugins.cri.containerd.untrusted_workload_runtime] 9 | runtime_type = "io.containerd.runtime.v1.linux" 10 | runtime_engine = "/usr/local/bin/runsc" 11 | runtime_root = "/run/containerd/runsc" 12 | [plugins.cri.containerd.gvisor] 13 | runtime_type = "io.containerd.runtime.v1.linux" 14 | runtime_engine = "/usr/local/bin/runsc" 15 | runtime_root = "/run/containerd/runsc" 16 | -------------------------------------------------------------------------------- /work/fix-group-vars.pl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env perl 2 | 3 | use strict; 4 | use warnings; 5 | 6 | use feature 'say'; 7 | 8 | open my $HOSTS, '<', 'hosts' or die; 9 | 10 | my %ips = (); 11 | while (<$HOSTS>) { 12 | my ($ip, $name) = split; 13 | $name =~ s/-/_/g; 14 | 15 | $ips{ "${name}_ip" } = $ip; 16 | } 17 | 18 | open my $GV, '<', 'group_vars/_all' or die; 19 | my @contents; 20 | while (<$GV>) { 21 | chomp; 22 | my ($var, $setting) = /^(\S+):\s*(\S.*?)\s*$/; 23 | if ( $ips{$var} ) { 24 | push @contents, "$var: $ips{$var}"; 25 | } 26 | else { 27 | push @contents, $_; 28 | } 29 | } 30 | close $GV; 31 | 32 | open $GV, '>', 'group_vars/all' or die; 33 | say $GV $_ for @contents; 34 | close $GV; 35 | 36 | #controller_01_ip: 10.0.0.5 37 | -------------------------------------------------------------------------------- /work/playbook-10-remote-access.yml: -------------------------------------------------------------------------------- 1 | # vim: ts=2:sw=2 2 | - hosts: master 3 | tasks: 4 | - name: Create kubeconfig files 5 | args: 6 | executable: /bin/bash 7 | shell: | 8 | kubectl config set-cluster kubernetes-the-hard-way \ 9 | --certificate-authority=ca.pem \ 10 | --embed-certs=true \ 11 | --server=https://{{ public_ip }}:6443 12 | 13 | kubectl config set-credentials system:admin \ 14 | --client-certificate=admin.pem \ 15 | --client-key=admin-key.pem 16 | 17 | kubectl config set-context kubernetes-the-hard-way \ 18 | --cluster=kubernetes-the-hard-way \ 19 | --user=system:admin 20 | 21 | kubectl config use-context kubernetes-the-hard-way 22 | -------------------------------------------------------------------------------- /work/README.md: -------------------------------------------------------------------------------- 1 | # Kubernetes the hard way on openstack 2 | 3 | This is my deployment of kubernetes the hard way on top of openstack instead of 4 | GCE. 5 | 6 | It's also done with terraform and ansible instead of running the shell commands 7 | directly. 8 | 9 | ## Prerequisites 10 | 11 | Terraform and ansible is required. I've installed ansible in a python virtual 12 | environment in the `venv/` subdirectory and the terraform binary I have in the 13 | `bin/` subdirectory. 14 | 15 | ## Guide 16 | 17 | ### Terraform 18 | 19 | The terraform setup is in `main.tf` and the `tf/` subdirectory for modules. 20 | It's fairly straightforward. 21 | 22 | ### Ansible 23 | 24 | Every step of the guide is done with one (or more) ansible playbooks with a 25 | name on the form `playbook--.yml`. 26 | 27 | The smoke tests in step 13 has to be done manually. 28 | -------------------------------------------------------------------------------- /work/templates/kube-controller-manager.service.jj: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes Controller Manager 3 | Documentation=https://github.com/kubernetes/kubernetes 4 | 5 | [Service] 6 | ExecStart=/usr/local/bin/kube-controller-manager \ 7 | --address=0.0.0.0 \ 8 | --cluster-cidr=10.200.0.0/16 \ 9 | --cluster-name=kubernetes \ 10 | --cluster-signing-cert-file=/var/lib/kubernetes/ca.pem \ 11 | --cluster-signing-key-file=/var/lib/kubernetes/ca-key.pem \ 12 | --kubeconfig=/var/lib/kubernetes/kube-controller-manager.kubeconfig \ 13 | --leader-elect=true \ 14 | --root-ca-file=/var/lib/kubernetes/ca.pem \ 15 | --service-account-private-key-file=/var/lib/kubernetes/service-account-key.pem \ 16 | --service-cluster-ip-range=10.32.0.0/24 \ 17 | --use-service-account-credentials=true \ 18 | --v=2 19 | Restart=on-failure 20 | RestartSec=5 21 | 22 | [Install] 23 | WantedBy=multi-user.target 24 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | This project is made possible by contributors like YOU! While all contributions are welcomed, please be sure and follow the following suggestions to help your PR get merged. 2 | 3 | ## License 4 | 5 | This project uses an [Apache license](LICENSE). Be sure you're comfortable with the implications of that before working up a patch. 6 | 7 | ## Review and merge process 8 | 9 | Review and merge duties are managed by [@kelseyhightower](https://github.com/kelseyhightower). Expect some burden of proof for demonstrating the marginal value of adding new content to the tutorial. 10 | 11 | Here are some examples of the review and justification process: 12 | - [#208](https://github.com/kelseyhightower/kubernetes-the-hard-way/pull/208) 13 | - [#282](https://github.com/kelseyhightower/kubernetes-the-hard-way/pull/282) 14 | 15 | ## Notes on minutiae 16 | 17 | If you find a bug that breaks the guide, please do submit it. If you are considering a minor copy edit for tone, grammar, or simple inconsistent whitespace, consider the tradeoff between maintainer time and community benefit before investing too much of your time. 18 | 19 | -------------------------------------------------------------------------------- /work/run-all.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Openstack connection iformation 4 | source SNIC\ 2018_10-38-HPC2N-openrc.sh 5 | # Openstack password 6 | source password 7 | 8 | set -xe 9 | 10 | # Bring environment up 11 | ./bin/terraform apply --auto-approve 12 | sleep 60 13 | 14 | # Make sure that the local keys are updated and correct 15 | ./fix-ssh-keys.pl 16 | ./fix-group-vars.pl 17 | 18 | # Set up some ip tables routes 19 | ansible-playbook -i inventory playbook-00-*.yml; sleep 3 20 | 21 | ## Run all the playbooks. One playbook per step in the guide 22 | ansible-playbook -i inventory playbook-02-*.yml; sleep 3 23 | ansible-playbook -i inventory playbook-04-*.yml; sleep 3 24 | ansible-playbook -i inventory playbook-05-*.yml; sleep 3 25 | ansible-playbook -i inventory playbook-06-*.yml; sleep 3 26 | ansible-playbook -i inventory playbook-07-*.yml; sleep 3 27 | ansible-playbook -i inventory playbook-08-*.yml; sleep 3 28 | ansible-playbook -i inventory playbook-09-*.yml; sleep 3 29 | ansible-playbook -i inventory playbook-10-*.yml; sleep 3 30 | ansible-playbook -i inventory playbook-11-*.yml; sleep 3 31 | ansible-playbook -i inventory playbook-12-*.yml 32 | -------------------------------------------------------------------------------- /work/templates/etcd.service.jj: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=etcd 3 | Documentation=https://github.com/coreos 4 | 5 | [Service] 6 | ExecStart=/usr/local/bin/etcd \ 7 | --name {{ etcd_name }} \ 8 | --cert-file=/etc/etcd/kubernetes.pem \ 9 | --key-file=/etc/etcd/kubernetes-key.pem \ 10 | --peer-cert-file=/etc/etcd/kubernetes.pem \ 11 | --peer-key-file=/etc/etcd/kubernetes-key.pem \ 12 | --trusted-ca-file=/etc/etcd/ca.pem \ 13 | --peer-trusted-ca-file=/etc/etcd/ca.pem \ 14 | --peer-client-cert-auth \ 15 | --client-cert-auth \ 16 | --initial-advertise-peer-urls https://{{ internal_ip }}:2380 \ 17 | --listen-peer-urls https://{{ internal_ip }}:2380 \ 18 | --listen-client-urls https://{{ internal_ip }}:2379,https://127.0.0.1:2379 \ 19 | --advertise-client-urls https://{{ internal_ip }}:2379 \ 20 | --initial-cluster-token etcd-cluster-0 \ 21 | --initial-cluster controller-00=https://{{ controller_00_ip }}:2380,controller-01=https://{{ controller_01_ip }}:2380,controller-02=https://{{ controller_02_ip }}:2380 \ 22 | --initial-cluster-state new \ 23 | --data-dir=/var/lib/etcd 24 | Restart=on-failure 25 | RestartSec=5 26 | 27 | [Install] 28 | WantedBy=multi-user.target 29 | -------------------------------------------------------------------------------- /work/tf/network/main.tf: -------------------------------------------------------------------------------- 1 | variable name { 2 | description = "Name of the network" 3 | type = "string" 4 | } 5 | 6 | variable cidr { 7 | description = "CIDR of the network" 8 | type = "string" 9 | } 10 | 11 | variable external_network_id { 12 | description = "External Network ID" 13 | type = "string" 14 | } 15 | 16 | output "network-id" { 17 | value = openstack_networking_network_v2.network.id 18 | } 19 | 20 | output "subnet-id" { 21 | value = openstack_networking_subnet_v2.network-subnet.id 22 | } 23 | 24 | resource "openstack_networking_network_v2" "network" { 25 | name = var.name 26 | admin_state_up = "true" 27 | } 28 | 29 | resource "openstack_networking_subnet_v2" "network-subnet" { 30 | name = format("%s-subnet", var.name) 31 | network_id = openstack_networking_network_v2.network.id 32 | cidr = var.cidr 33 | ip_version = 4 34 | dns_nameservers = ["8.8.8.8"] 35 | } 36 | /* 37 | resource "openstack_networking_router_v2" "network-router" { 38 | name = format("%s-router", var.name) 39 | external_network_id = var.external_network_id 40 | admin_state_up = true 41 | } 42 | 43 | resource "openstack_networking_router_interface_v2" "router-interface" { 44 | router_id = openstack_networking_router_v2.network-router.id 45 | subnet_id = openstack_networking_subnet_v2.network-subnet.id 46 | } 47 | */ 48 | -------------------------------------------------------------------------------- /work/tf/compute/main.tf: -------------------------------------------------------------------------------- 1 | variable base_name { 2 | type = string 3 | } 4 | 5 | variable compute_count { 6 | type = number 7 | default = 1 8 | } 9 | 10 | variable network { 11 | type = string 12 | } 13 | 14 | variable subnet { 15 | type = string 16 | } 17 | 18 | variable image_id { 19 | type = string 20 | } 21 | 22 | 23 | output "ips" { 24 | value = join("\n", openstack_compute_instance_v2.node.*.access_ip_v4) 25 | } 26 | 27 | output "name-ips" { 28 | value = join("\n", formatlist("%s %s",openstack_compute_instance_v2.node.*.access_ip_v4, openstack_compute_instance_v2.node.*.name)) 29 | } 30 | 31 | resource "openstack_networking_port_v2" "network-port" { 32 | count = var.compute_count 33 | name = format("%s-port-%02d", var.base_name, count.index) 34 | network_id = var.network 35 | admin_state_up = "true" 36 | 37 | fixed_ip { 38 | subnet_id = var.subnet 39 | } 40 | 41 | allowed_address_pairs { 42 | ip_address = "0.0.0.0/0" 43 | } 44 | } 45 | 46 | resource "openstack_compute_instance_v2" "node" { 47 | count = var.compute_count 48 | name = format("%s-%02d", var.base_name, count.index) 49 | image_id = var.image_id 50 | flavor_name = "ssc.medium" 51 | key_pair = "viklund mac pro" 52 | security_groups = ["default"] 53 | 54 | network { 55 | port = openstack_networking_port_v2.network-port[count.index].id 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /docs/06-data-encryption-keys.md: -------------------------------------------------------------------------------- 1 | # Generating the Data Encryption Config and Key 2 | 3 | Kubernetes stores a variety of data including cluster state, application configurations, and secrets. Kubernetes supports the ability to [encrypt](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data) cluster data at rest. 4 | 5 | In this lab you will generate an encryption key and an [encryption config](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#understanding-the-encryption-at-rest-configuration) suitable for encrypting Kubernetes Secrets. 6 | 7 | ## The Encryption Key 8 | 9 | Generate an encryption key: 10 | 11 | ``` 12 | ENCRYPTION_KEY=$(head -c 32 /dev/urandom | base64) 13 | ``` 14 | 15 | ## The Encryption Config File 16 | 17 | Create the `encryption-config.yaml` encryption config file: 18 | 19 | ``` 20 | cat > encryption-config.yaml <) { 14 | if (/^(\d+\.\d+\.\d+\.\d+).*/) { 15 | my $ip = $1; 16 | if ( /ProxyJump=([^@]+@?\d+\.\d+\.\d+\.\d+)[^0-9]/ ) { 17 | push @{$hosts{$1}}, $ip; 18 | } 19 | else { 20 | push @{$hosts{_DIRECT}}, $ip; 21 | } 22 | } 23 | } 24 | close $INV; 25 | 26 | my %hashes = (); 27 | for my $proxy (sort keys %hosts) { 28 | my @command = ('ssh-keyscan', '-t', 'ssh-rsa'); 29 | my @ips = @{$hosts{$proxy}}; 30 | push @command, @ips; 31 | if ( $proxy ne '_DIRECT' ) { 32 | unshift @command, 'ssh', $proxy; 33 | } 34 | push @command, '2>/dev/null'; 35 | open my $CMD, '-|', "@command" or die "Can't open"; 36 | while (<$CMD>) { 37 | next if /^#/; 38 | chomp; 39 | my ($host) = /^(\S+)/; 40 | $hashes{$1} = $_; 41 | } 42 | } 43 | 44 | my $known_hosts_file = "$ENV{HOME}/.ssh/known_hosts"; 45 | my $backup = backup_file($known_hosts_file); 46 | 47 | open my $KNOWN_HOSTS, '<', $backup or die "Could not open backup known hosts file: $!\n"; 48 | open my $NEW_HOSTS, '>', $known_hosts_file or die "Could not open new hosts file: $!\n"; 49 | 50 | while (<$KNOWN_HOSTS>) { 51 | my ($ip) = /^(\S+)/; 52 | if (defined $ip && exists $hashes{$ip}) { 53 | say $NEW_HOSTS $hashes{$ip}; 54 | delete $hashes{$ip}; 55 | } 56 | else { 57 | print $NEW_HOSTS $_; 58 | } 59 | } 60 | 61 | for my $extra (values %hashes) { 62 | say $NEW_HOSTS $extra; 63 | } 64 | 65 | close $NEW_HOSTS; 66 | close $KNOWN_HOSTS; 67 | 68 | say "Known hosts file updated."; 69 | 70 | sub backup_file { 71 | my ($file) = @_; 72 | 73 | my $now = localtime(); 74 | my $backup_file_name = sprintf("%s.bak-%s", $file, $now->strftime("%Y%m%dT%H%M%S")); 75 | copy($file, $backup_file_name); 76 | return $backup_file_name; 77 | } 78 | -------------------------------------------------------------------------------- /docs/12-dns-addon.md: -------------------------------------------------------------------------------- 1 | # Deploying the DNS Cluster Add-on 2 | 3 | In this lab you will deploy the [DNS add-on](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/) which provides DNS based service discovery, backed by [CoreDNS](https://coredns.io/), to applications running inside the Kubernetes cluster. 4 | 5 | ## The DNS Cluster Add-on 6 | 7 | Deploy the `coredns` cluster add-on: 8 | 9 | ``` 10 | kubectl apply -f https://storage.googleapis.com/kubernetes-the-hard-way/coredns.yaml 11 | ``` 12 | 13 | > output 14 | 15 | ``` 16 | serviceaccount/coredns created 17 | clusterrole.rbac.authorization.k8s.io/system:coredns created 18 | clusterrolebinding.rbac.authorization.k8s.io/system:coredns created 19 | configmap/coredns created 20 | deployment.extensions/coredns created 21 | service/kube-dns created 22 | ``` 23 | 24 | List the pods created by the `kube-dns` deployment: 25 | 26 | ``` 27 | kubectl get pods -l k8s-app=kube-dns -n kube-system 28 | ``` 29 | 30 | > output 31 | 32 | ``` 33 | NAME READY STATUS RESTARTS AGE 34 | coredns-699f8ddd77-94qv9 1/1 Running 0 20s 35 | coredns-699f8ddd77-gtcgb 1/1 Running 0 20s 36 | ``` 37 | 38 | ## Verification 39 | 40 | Create a `busybox` deployment: 41 | 42 | ``` 43 | kubectl run busybox --image=busybox:1.28 --command -- sleep 3600 44 | ``` 45 | 46 | List the pod created by the `busybox` deployment: 47 | 48 | ``` 49 | kubectl get pods -l run=busybox 50 | ``` 51 | 52 | > output 53 | 54 | ``` 55 | NAME READY STATUS RESTARTS AGE 56 | busybox-bd8fb7cbd-vflm9 1/1 Running 0 10s 57 | ``` 58 | 59 | Retrieve the full name of the `busybox` pod: 60 | 61 | ``` 62 | POD_NAME=$(kubectl get pods -l run=busybox -o jsonpath="{.items[0].metadata.name}") 63 | ``` 64 | 65 | Execute a DNS lookup for the `kubernetes` service inside the `busybox` pod: 66 | 67 | ``` 68 | kubectl exec -ti $POD_NAME -- nslookup kubernetes 69 | ``` 70 | 71 | > output 72 | 73 | ``` 74 | Server: 10.32.0.10 75 | Address 1: 10.32.0.10 kube-dns.kube-system.svc.cluster.local 76 | 77 | Name: kubernetes 78 | Address 1: 10.32.0.1 kubernetes.default.svc.cluster.local 79 | ``` 80 | 81 | Next: [Smoke Test](13-smoke-test.md) 82 | -------------------------------------------------------------------------------- /docs/10-configuring-kubectl.md: -------------------------------------------------------------------------------- 1 | # Configuring kubectl for Remote Access 2 | 3 | In this lab you will generate a kubeconfig file for the `kubectl` command line utility based on the `admin` user credentials. 4 | 5 | > Run the commands in this lab from the same directory used to generate the admin client certificates. 6 | 7 | ## The Admin Kubernetes Configuration File 8 | 9 | Each kubeconfig requires a Kubernetes API Server to connect to. To support high availability the IP address assigned to the external load balancer fronting the Kubernetes API Servers will be used. 10 | 11 | Generate a kubeconfig file suitable for authenticating as the `admin` user: 12 | 13 | ``` 14 | { 15 | KUBERNETES_PUBLIC_ADDRESS=$(gcloud compute addresses describe kubernetes-the-hard-way \ 16 | --region $(gcloud config get-value compute/region) \ 17 | --format 'value(address)') 18 | 19 | kubectl config set-cluster kubernetes-the-hard-way \ 20 | --certificate-authority=ca.pem \ 21 | --embed-certs=true \ 22 | --server=https://${KUBERNETES_PUBLIC_ADDRESS}:6443 23 | 24 | kubectl config set-credentials admin \ 25 | --client-certificate=admin.pem \ 26 | --client-key=admin-key.pem 27 | 28 | kubectl config set-context kubernetes-the-hard-way \ 29 | --cluster=kubernetes-the-hard-way \ 30 | --user=admin 31 | 32 | kubectl config use-context kubernetes-the-hard-way 33 | } 34 | ``` 35 | 36 | ## Verification 37 | 38 | Check the health of the remote Kubernetes cluster: 39 | 40 | ``` 41 | kubectl get componentstatuses 42 | ``` 43 | 44 | > output 45 | 46 | ``` 47 | NAME STATUS MESSAGE ERROR 48 | controller-manager Healthy ok 49 | scheduler Healthy ok 50 | etcd-1 Healthy {"health":"true"} 51 | etcd-2 Healthy {"health":"true"} 52 | etcd-0 Healthy {"health":"true"} 53 | ``` 54 | 55 | List the nodes in the remote Kubernetes cluster: 56 | 57 | ``` 58 | kubectl get nodes 59 | ``` 60 | 61 | > output 62 | 63 | ``` 64 | NAME STATUS ROLES AGE VERSION 65 | worker-0 Ready 117s v1.12.0 66 | worker-1 Ready 118s v1.12.0 67 | worker-2 Ready 118s v1.12.0 68 | ``` 69 | 70 | Next: [Provisioning Pod Network Routes](11-pod-network-routes.md) 71 | -------------------------------------------------------------------------------- /work/playbook-05-config-files.yml: -------------------------------------------------------------------------------- 1 | # vim: ts=2:sw=2 2 | - hosts: master 3 | tasks: 4 | - name: Create kubeconfig files 5 | args: 6 | executable: /bin/bash 7 | shell: | 8 | kubectl config set-cluster kubernetes-the-hard-way \ 9 | --certificate-authority=ca.pem \ 10 | --embed-certs=true \ 11 | --server=https://{{ public_ip }}:6443 \ 12 | --kubeconfig={{ instance }}.kubeconfig 13 | 14 | kubectl config set-credentials {{ ns }}:{{ instance }} \ 15 | --client-certificate={{ instance }}.pem \ 16 | --client-key={{ instance }}-key.pem \ 17 | --embed-certs=true \ 18 | --kubeconfig={{ instance }}.kubeconfig 19 | 20 | kubectl config set-context default \ 21 | --cluster=kubernetes-the-hard-way \ 22 | --user={{ ns }}:{{ instance }} \ 23 | --kubeconfig={{ instance }}.kubeconfig 24 | 25 | kubectl config use-context default --kubeconfig={{ instance }}.kubeconfig 26 | vars: 27 | instance: "{{ item.instance }}" 28 | ns: "{{ item.ns }}" 29 | public_ip: "{{ item.public_ip }}" 30 | with_items: 31 | - { instance: 'worker-00', ns: 'system:node', public_ip: '{{ master_ip }}' } 32 | - { instance: 'worker-01', ns: 'system:node', public_ip: '{{ master_ip }}' } 33 | - { instance: 'worker-02', ns: 'system:node', public_ip: '{{ master_ip }}' } 34 | - { instance: 'kube-proxy', ns: 'system', public_ip: '{{ master_ip }}' } 35 | - { instance: 'kube-controller-manager', ns: 'system', public_ip: '127.0.0.1' } 36 | - { instance: 'kube-scheduler', ns: 'system', public_ip: '127.0.0.1' } 37 | - { instance: 'admin', ns: 'system', public_ip: '127.0.0.1' } 38 | - name: Distribute kubeconfig files 39 | args: 40 | executable: /bin/bash 41 | shell: | 42 | for instance in worker-{00,01,02}; do 43 | scp ${instance}.kubeconfig kube-proxy.kubeconfig ${instance}:~/ 44 | done 45 | for instance in controller-{00,01,02}; do 46 | scp admin.kubeconfig kube-controller-manager.kubeconfig kube-scheduler.kubeconfig ${instance}:~/ 47 | done 48 | -------------------------------------------------------------------------------- /docs/01-prerequisites.md: -------------------------------------------------------------------------------- 1 | # Prerequisites 2 | 3 | ## Google Cloud Platform 4 | 5 | This tutorial leverages the [Google Cloud Platform](https://cloud.google.com/) to streamline provisioning of the compute infrastructure required to bootstrap a Kubernetes cluster from the ground up. [Sign up](https://cloud.google.com/free/) for $300 in free credits. 6 | 7 | [Estimated cost](https://cloud.google.com/products/calculator/#id=78df6ced-9c50-48f8-a670-bc5003f2ddaa) to run this tutorial: $0.22 per hour ($5.39 per day). 8 | 9 | > The compute resources required for this tutorial exceed the Google Cloud Platform free tier. 10 | 11 | ## Google Cloud Platform SDK 12 | 13 | ### Install the Google Cloud SDK 14 | 15 | Follow the Google Cloud SDK [documentation](https://cloud.google.com/sdk/) to install and configure the `gcloud` command line utility. 16 | 17 | Verify the Google Cloud SDK version is 218.0.0 or higher: 18 | 19 | ``` 20 | gcloud version 21 | ``` 22 | 23 | ### Set a Default Compute Region and Zone 24 | 25 | This tutorial assumes a default compute region and zone have been configured. 26 | 27 | If you are using the `gcloud` command-line tool for the first time `init` is the easiest way to do this: 28 | 29 | ``` 30 | gcloud init 31 | ``` 32 | 33 | Otherwise set a default compute region: 34 | 35 | ``` 36 | gcloud config set compute/region us-west1 37 | ``` 38 | 39 | Set a default compute zone: 40 | 41 | ``` 42 | gcloud config set compute/zone us-west1-c 43 | ``` 44 | 45 | > Use the `gcloud compute zones list` command to view additional regions and zones. 46 | 47 | ## Running Commands in Parallel with tmux 48 | 49 | [tmux](https://github.com/tmux/tmux/wiki) can be used to run commands on multiple compute instances at the same time. Labs in this tutorial may require running the same commands across multiple compute instances, in those cases consider using tmux and splitting a window into multiple panes with `synchronize-panes` enabled to speed up the provisioning process. 50 | 51 | > The use of tmux is optional and not required to complete this tutorial. 52 | 53 | ![tmux screenshot](images/tmux-screenshot.png) 54 | 55 | > Enable `synchronize-panes`: `ctrl+b` then `shift :`. Then type `set synchronize-panes on` at the prompt. To disable synchronization: `set synchronize-panes off`. 56 | 57 | Next: [Installing the Client Tools](02-client-tools.md) 58 | -------------------------------------------------------------------------------- /docs/11-pod-network-routes.md: -------------------------------------------------------------------------------- 1 | # Provisioning Pod Network Routes 2 | 3 | Pods scheduled to a node receive an IP address from the node's Pod CIDR range. At this point pods can not communicate with other pods running on different nodes due to missing network [routes](https://cloud.google.com/compute/docs/vpc/routes). 4 | 5 | In this lab you will create a route for each worker node that maps the node's Pod CIDR range to the node's internal IP address. 6 | 7 | > There are [other ways](https://kubernetes.io/docs/concepts/cluster-administration/networking/#how-to-achieve-this) to implement the Kubernetes networking model. 8 | 9 | ## The Routing Table 10 | 11 | In this section you will gather the information required to create routes in the `kubernetes-the-hard-way` VPC network. 12 | 13 | Print the internal IP address and Pod CIDR range for each worker instance: 14 | 15 | ``` 16 | for instance in worker-0 worker-1 worker-2; do 17 | gcloud compute instances describe ${instance} \ 18 | --format 'value[separator=" "](networkInterfaces[0].networkIP,metadata.items[0].value)' 19 | done 20 | ``` 21 | 22 | > output 23 | 24 | ``` 25 | 10.240.0.20 10.200.0.0/24 26 | 10.240.0.21 10.200.1.0/24 27 | 10.240.0.22 10.200.2.0/24 28 | ``` 29 | 30 | ## Routes 31 | 32 | Create network routes for each worker instance: 33 | 34 | ``` 35 | for i in 0 1 2; do 36 | gcloud compute routes create kubernetes-route-10-200-${i}-0-24 \ 37 | --network kubernetes-the-hard-way \ 38 | --next-hop-address 10.240.0.2${i} \ 39 | --destination-range 10.200.${i}.0/24 40 | done 41 | ``` 42 | 43 | List the routes in the `kubernetes-the-hard-way` VPC network: 44 | 45 | ``` 46 | gcloud compute routes list --filter "network: kubernetes-the-hard-way" 47 | ``` 48 | 49 | > output 50 | 51 | ``` 52 | NAME NETWORK DEST_RANGE NEXT_HOP PRIORITY 53 | default-route-081879136902de56 kubernetes-the-hard-way 10.240.0.0/24 kubernetes-the-hard-way 1000 54 | default-route-55199a5aa126d7aa kubernetes-the-hard-way 0.0.0.0/0 default-internet-gateway 1000 55 | kubernetes-route-10-200-0-0-24 kubernetes-the-hard-way 10.200.0.0/24 10.240.0.20 1000 56 | kubernetes-route-10-200-1-0-24 kubernetes-the-hard-way 10.200.1.0/24 10.240.0.21 1000 57 | kubernetes-route-10-200-2-0-24 kubernetes-the-hard-way 10.200.2.0/24 10.240.0.22 1000 58 | ``` 59 | 60 | Next: [Deploying the DNS Cluster Add-on](12-dns-addon.md) 61 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | **NOTE:** My solution to this is in the `work/` subdirectory. 2 | 3 | # Kubernetes The Hard Way 4 | 5 | This tutorial walks you through setting up Kubernetes the hard way. This guide is not for people looking for a fully automated command to bring up a Kubernetes cluster. If that's you then check out [Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine), or the [Getting Started Guides](http://kubernetes.io/docs/getting-started-guides/). 6 | 7 | Kubernetes The Hard Way is optimized for learning, which means taking the long route to ensure you understand each task required to bootstrap a Kubernetes cluster. 8 | 9 | > The results of this tutorial should not be viewed as production ready, and may receive limited support from the community, but don't let that stop you from learning! 10 | 11 | ## Target Audience 12 | 13 | The target audience for this tutorial is someone planning to support a production Kubernetes cluster and wants to understand how everything fits together. 14 | 15 | ## Cluster Details 16 | 17 | Kubernetes The Hard Way guides you through bootstrapping a highly available Kubernetes cluster with end-to-end encryption between components and RBAC authentication. 18 | 19 | * [Kubernetes](https://github.com/kubernetes/kubernetes) 1.12.0 20 | * [containerd Container Runtime](https://github.com/containerd/containerd) 1.2.0-rc.0 21 | * [gVisor](https://github.com/google/gvisor) 50c283b9f56bb7200938d9e207355f05f79f0d17 22 | * [CNI Container Networking](https://github.com/containernetworking/cni) 0.6.0 23 | * [etcd](https://github.com/coreos/etcd) v3.3.9 24 | * [CoreDNS](https://github.com/coredns/coredns) v1.2.2 25 | 26 | ## Labs 27 | 28 | This tutorial assumes you have access to the [Google Cloud Platform](https://cloud.google.com). While GCP is used for basic infrastructure requirements the lessons learned in this tutorial can be applied to other platforms. 29 | 30 | * [Prerequisites](docs/01-prerequisites.md) 31 | * [Installing the Client Tools](docs/02-client-tools.md) 32 | * [Provisioning Compute Resources](docs/03-compute-resources.md) 33 | * [Provisioning the CA and Generating TLS Certificates](docs/04-certificate-authority.md) 34 | * [Generating Kubernetes Configuration Files for Authentication](docs/05-kubernetes-configuration-files.md) 35 | * [Generating the Data Encryption Config and Key](docs/06-data-encryption-keys.md) 36 | * [Bootstrapping the etcd Cluster](docs/07-bootstrapping-etcd.md) 37 | * [Bootstrapping the Kubernetes Control Plane](docs/08-bootstrapping-kubernetes-controllers.md) 38 | * [Bootstrapping the Kubernetes Worker Nodes](docs/09-bootstrapping-kubernetes-workers.md) 39 | * [Configuring kubectl for Remote Access](docs/10-configuring-kubectl.md) 40 | * [Provisioning Pod Network Routes](docs/11-pod-network-routes.md) 41 | * [Deploying the DNS Cluster Add-on](docs/12-dns-addon.md) 42 | * [Smoke Test](docs/13-smoke-test.md) 43 | * [Cleaning Up](docs/14-cleanup.md) 44 | -------------------------------------------------------------------------------- /docs/02-client-tools.md: -------------------------------------------------------------------------------- 1 | # Installing the Client Tools 2 | 3 | In this lab you will install the command line utilities required to complete this tutorial: [cfssl](https://github.com/cloudflare/cfssl), [cfssljson](https://github.com/cloudflare/cfssl), and [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl). 4 | 5 | 6 | ## Install CFSSL 7 | 8 | The `cfssl` and `cfssljson` command line utilities will be used to provision a [PKI Infrastructure](https://en.wikipedia.org/wiki/Public_key_infrastructure) and generate TLS certificates. 9 | 10 | Download and install `cfssl` and `cfssljson` from the [cfssl repository](https://pkg.cfssl.org): 11 | 12 | ### OS X 13 | 14 | ``` 15 | curl -o cfssl https://pkg.cfssl.org/R1.2/cfssl_darwin-amd64 16 | curl -o cfssljson https://pkg.cfssl.org/R1.2/cfssljson_darwin-amd64 17 | ``` 18 | 19 | ``` 20 | chmod +x cfssl cfssljson 21 | ``` 22 | 23 | ``` 24 | sudo mv cfssl cfssljson /usr/local/bin/ 25 | ``` 26 | 27 | Some OS X users may experience problems using the pre-built binaries in which case [Homebrew](https://brew.sh) might be a better option: 28 | 29 | ``` 30 | brew install cfssl 31 | ``` 32 | 33 | ### Linux 34 | 35 | ``` 36 | wget -q --show-progress --https-only --timestamping \ 37 | https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 \ 38 | https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 39 | ``` 40 | 41 | ``` 42 | chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 43 | ``` 44 | 45 | ``` 46 | sudo mv cfssl_linux-amd64 /usr/local/bin/cfssl 47 | ``` 48 | 49 | ``` 50 | sudo mv cfssljson_linux-amd64 /usr/local/bin/cfssljson 51 | ``` 52 | 53 | ### Verification 54 | 55 | Verify `cfssl` version 1.2.0 or higher is installed: 56 | 57 | ``` 58 | cfssl version 59 | ``` 60 | 61 | > output 62 | 63 | ``` 64 | Version: 1.2.0 65 | Revision: dev 66 | Runtime: go1.6 67 | ``` 68 | 69 | > The cfssljson command line utility does not provide a way to print its version. 70 | 71 | ## Install kubectl 72 | 73 | The `kubectl` command line utility is used to interact with the Kubernetes API Server. Download and install `kubectl` from the official release binaries: 74 | 75 | ### OS X 76 | 77 | ``` 78 | curl -o kubectl https://storage.googleapis.com/kubernetes-release/release/v1.12.0/bin/darwin/amd64/kubectl 79 | ``` 80 | 81 | ``` 82 | chmod +x kubectl 83 | ``` 84 | 85 | ``` 86 | sudo mv kubectl /usr/local/bin/ 87 | ``` 88 | 89 | ### Linux 90 | 91 | ``` 92 | wget https://storage.googleapis.com/kubernetes-release/release/v1.12.0/bin/linux/amd64/kubectl 93 | ``` 94 | 95 | ``` 96 | chmod +x kubectl 97 | ``` 98 | 99 | ``` 100 | sudo mv kubectl /usr/local/bin/ 101 | ``` 102 | 103 | ### Verification 104 | 105 | Verify `kubectl` version 1.12.0 or higher is installed: 106 | 107 | ``` 108 | kubectl version --client 109 | ``` 110 | 111 | > output 112 | 113 | ``` 114 | Client Version: version.Info{Major:"1", Minor:"12", GitVersion:"v1.12.0", GitCommit:"0ed33881dc4355495f623c6f22e7dd0b7632b7c0", GitTreeState:"clean", BuildDate:"2018-09-27T17:05:32Z", GoVersion:"go1.10.4", Compiler:"gc", Platform:"linux/amd64"} 115 | ``` 116 | 117 | Next: [Provisioning Compute Resources](03-compute-resources.md) 118 | -------------------------------------------------------------------------------- /work/playbook-00-pre-setup.yml: -------------------------------------------------------------------------------- 1 | # vim: ts=2:sw=2 2 | - hosts: master 3 | become: yes 4 | tasks: 5 | - name: Check if we have internal ens 6 | command: ip link show ens4 7 | register: ip_link_result 8 | ignore_errors: True 9 | - name: Start the internal interface 10 | command: ip link set ens4 up 11 | when: ip_link_result.stdout.find('state UP') == -1 12 | - name: Check if we have internal ip on ens 13 | command: ip addr show dev ens4 14 | register: ip_addr_result 15 | ignore_errors: True 16 | - name: Add ip to other interface 17 | command: ip addr add {{ internal_ip }}/24 dev ens4 18 | when: ip_addr_result.stdout.find('inet 10.0.0') == -1 19 | - name: Add route for 10.0.0.0/8 net 20 | command: ip route add 10.0.0.0/8 dev ens4 21 | when: 1 == 1 22 | - name: Enable MASQUERADING of internal network 23 | iptables: 24 | table: nat 25 | chain: POSTROUTING 26 | jump: MASQUERADE 27 | out_interface: ens3 28 | - name: Enable ip_forward 29 | sysctl: 30 | name: net.ipv4.ip_forward 31 | value: "1" 32 | sysctl_set: yes 33 | state: present 34 | reload: yes 35 | - name: Check for hosts in /etc/hosts 36 | shell: grep worker /etc/hosts 37 | register: grep_result 38 | failed_when: grep_result.rc == 54 39 | - name: Copy local file to node 40 | copy: 41 | src: hosts 42 | dest: /etc/extra-hosts 43 | when: grep_result.rc != 0 44 | - name: Add extra hosts to end of hosts file 45 | shell: cat /etc/extra-hosts >> /etc/hosts 46 | when: grep_result.rc != 0 47 | - name: Add hosts to known_hosts file 48 | args: 49 | executable: /bin/bash 50 | shell: ssh-keyscan {worker,controller}-{00,01,02} 2>/dev/null | grep ssh-rsa >> .ssh/known_hosts 51 | - hosts: workers, controllers 52 | become: yes 53 | tasks: 54 | - name: Check if default route is correct 55 | shell: ip route | grep 'default via {{ router }}' 56 | register: ip_route_result 57 | ignore_errors: True 58 | failed_when: ip_route_result.rc == 2 59 | - name: Delete incorrect default route 60 | command: ip route del default 61 | when: ip_route_result.rc != 0 62 | - name: Add correct default route 63 | command: ip route add default via {{ router }} 64 | when: ip_route_result.rc != 0 65 | - name: Fix resolv.conf 66 | lineinfile: 67 | path: /etc/resolv.conf 68 | regexp: '^nameserver' 69 | line: 'nameserver 8.8.8.8' 70 | - name: Check for hosts in /etc/hosts 71 | shell: grep worker /etc/hosts 72 | register: grep_result 73 | failed_when: grep_result.rc == 54 74 | - name: Copy local file to node 75 | copy: 76 | src: hosts 77 | dest: /etc/extra-hosts 78 | when: grep_result.rc != 0 79 | - name: Add extra hosts to end of hosts file 80 | shell: cat /etc/extra-hosts >> /etc/hosts 81 | when: grep_result.rc != 0 82 | -------------------------------------------------------------------------------- /work/playbook-04-certificate-authority.yml: -------------------------------------------------------------------------------- 1 | - hosts: master 2 | tasks: 3 | - name: Copy CA Config file 4 | copy: 5 | dest: ca-config.json 6 | src: ca-config.json 7 | - name: Create csr files 8 | template: 9 | dest: "{{item.file}}-csr.json" 10 | src: base-csr.json.jj 11 | vars: 12 | CN: "{{ item.CN }}" 13 | O: "{{ item.O }}" 14 | OU: "{{ item.OU }}" 15 | loop: 16 | - { file: 'ca', CN: "Kubernetes", O: "Kubernetes", OU: "Wizzard Training" } 17 | - { file: 'admin', CN: "admin", O: "system:masters", OU: "Kubernetes The Hard Way" } 18 | - { file: 'worker-00', CN: "system:node:worker-00", O: "system:nodes", OU: "Kubernetes The Hard Way" } 19 | - { file: 'worker-01', CN: "system:node:worker-01", O: "system:nodes", OU: "Kubernetes The Hard Way" } 20 | - { file: 'worker-02', CN: "system:node:worker-02", O: "system:nodes", OU: "Kubernetes The Hard Way" } 21 | - { file: 'kube-controller-manager', CN: "system:kube-controller-manager", O: "system:kube-controller-manager", OU: "Kubernetes The Hard Way" } 22 | - { file: 'kube-proxy', CN: 'system:kube-proxy', O: 'system:node-proxier', OU: "Kubernetes The Hard Way" } 23 | - { file: 'kube-scheduler', CN: 'system:kube-scheduler', O: 'system:kube-scheduler', OU: "Kubernetes The Hard Way" } 24 | - { file: 'kubernetes', CN: 'kubernetes', O: 'Kubernetes', OU: "Kubernetes The Hard Way" } 25 | - { file: 'service-account', CN: 'service-accounts', O: 'Kubernetes', OU: "Kubernetes The Hard Way" } 26 | - name: Create CA 27 | shell: cfssl gencert -initca ca-csr.json | cfssljson -bare ca 28 | - name: Create Client Certificates 29 | args: 30 | executable: /bin/bash 31 | shell: | 32 | for instance in worker-{00,01,02}; do 33 | echo "INSTANCE IS: $instance" 34 | INTERNAL_IP=$( grep $instance /etc/hosts | awk '{print $1}' ) 35 | cfssl gencert \ 36 | -ca=ca.pem \ 37 | -ca-key=ca-key.pem \ 38 | -config=ca-config.json \ 39 | -hostname=${instance},${INTERNAL_IP} \ 40 | -profile=kubernetes \ 41 | ${instance}-csr.json | cfssljson -bare ${instance} 42 | done 43 | - name: Create Certificates 44 | shell: cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes {{ item }}-csr.json | cfssljson -bare {{ item }} 45 | with_items: 46 | - admin 47 | - kube-controller-manager 48 | - kube-proxy 49 | - kube-scheduler 50 | - service-account 51 | - name: Create Kubernetes API Server Certificate 52 | shell: | 53 | EXTERNAL_IP=130.239.81.217 ### TODO FIX THIS 54 | INTERNAL_IP=$( ip addr | grep 10.0.0 | awk '{print $2}' | cut -d/ -f1 ) 55 | IPS=$( grep controller /etc/hosts | awk '{print $1}' | tr "\n" ',' | sed 's/,$//' ) 56 | 57 | cfssl gencert \ 58 | -ca=ca.pem \ 59 | -ca-key=ca-key.pem \ 60 | -config=ca-config.json \ 61 | -hostname=10.32.0.1,${IPS},${EXTERNAL_IP},${INTERNAL_IP},127.0.0.1,kubernetes.default \ 62 | -profile=kubernetes \ 63 | kubernetes-csr.json | cfssljson -bare kubernetes 64 | - name: Copy Certificates to the nodes 65 | args: 66 | executable: /bin/bash 67 | shell: | 68 | for instance in worker-{00,01,02}; do 69 | scp ca.pem ${instance}{,-key}.pem ${instance}:~/ 70 | done 71 | for instance in controller-{00,01,02}; do 72 | scp ca.pem ca-key.pem kubernetes{,-key}.pem service-account{,-key}.pem ${instance}:~/ 73 | done 74 | -------------------------------------------------------------------------------- /docs/07-bootstrapping-etcd.md: -------------------------------------------------------------------------------- 1 | # Bootstrapping the etcd Cluster 2 | 3 | Kubernetes components are stateless and store cluster state in [etcd](https://github.com/coreos/etcd). In this lab you will bootstrap a three node etcd cluster and configure it for high availability and secure remote access. 4 | 5 | ## Prerequisites 6 | 7 | The commands in this lab must be run on each controller instance: `controller-0`, `controller-1`, and `controller-2`. Login to each controller instance using the `gcloud` command. Example: 8 | 9 | ``` 10 | gcloud compute ssh controller-0 11 | ``` 12 | 13 | ### Running commands in parallel with tmux 14 | 15 | [tmux](https://github.com/tmux/tmux/wiki) can be used to run commands on multiple compute instances at the same time. See the [Running commands in parallel with tmux](01-prerequisites.md#running-commands-in-parallel-with-tmux) section in the Prerequisites lab. 16 | 17 | ## Bootstrapping an etcd Cluster Member 18 | 19 | ### Download and Install the etcd Binaries 20 | 21 | Download the official etcd release binaries from the [coreos/etcd](https://github.com/coreos/etcd) GitHub project: 22 | 23 | ``` 24 | wget -q --show-progress --https-only --timestamping \ 25 | "https://github.com/coreos/etcd/releases/download/v3.3.9/etcd-v3.3.9-linux-amd64.tar.gz" 26 | ``` 27 | 28 | Extract and install the `etcd` server and the `etcdctl` command line utility: 29 | 30 | ``` 31 | { 32 | tar -xvf etcd-v3.3.9-linux-amd64.tar.gz 33 | sudo mv etcd-v3.3.9-linux-amd64/etcd* /usr/local/bin/ 34 | } 35 | ``` 36 | 37 | ### Configure the etcd Server 38 | 39 | ``` 40 | { 41 | sudo mkdir -p /etc/etcd /var/lib/etcd 42 | sudo cp ca.pem kubernetes-key.pem kubernetes.pem /etc/etcd/ 43 | } 44 | ``` 45 | 46 | The instance internal IP address will be used to serve client requests and communicate with etcd cluster peers. Retrieve the internal IP address for the current compute instance: 47 | 48 | ``` 49 | INTERNAL_IP=$(curl -s -H "Metadata-Flavor: Google" \ 50 | http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ip) 51 | ``` 52 | 53 | Each etcd member must have a unique name within an etcd cluster. Set the etcd name to match the hostname of the current compute instance: 54 | 55 | ``` 56 | ETCD_NAME=$(hostname -s) 57 | ``` 58 | 59 | Create the `etcd.service` systemd unit file: 60 | 61 | ``` 62 | cat < Remember to run the above commands on each controller node: `controller-0`, `controller-1`, and `controller-2`. 105 | 106 | ## Verification 107 | 108 | List the etcd cluster members: 109 | 110 | ``` 111 | sudo ETCDCTL_API=3 etcdctl member list \ 112 | --endpoints=https://127.0.0.1:2379 \ 113 | --cacert=/etc/etcd/ca.pem \ 114 | --cert=/etc/etcd/kubernetes.pem \ 115 | --key=/etc/etcd/kubernetes-key.pem 116 | ``` 117 | 118 | > output 119 | 120 | ``` 121 | 3a57933972cb5131, started, controller-2, https://10.240.0.12:2380, https://10.240.0.12:2379 122 | f98dc20bce6225a0, started, controller-0, https://10.240.0.10:2380, https://10.240.0.10:2379 123 | ffed16798470cab5, started, controller-1, https://10.240.0.11:2380, https://10.240.0.11:2379 124 | ``` 125 | 126 | Next: [Bootstrapping the Kubernetes Control Plane](08-bootstrapping-kubernetes-controllers.md) 127 | -------------------------------------------------------------------------------- /work/main.tf: -------------------------------------------------------------------------------- 1 | #resource openstack_networking_floatingip_v2 floatip_1 { 2 | # pool = "" 3 | #} 4 | 5 | 6 | ### VARS 7 | 8 | variable "image-id" { 9 | type = string 10 | #default = "ba115875-5332-4f04-84bc-429d6730b3ab" ## Debian 11 | #default = "f8e54e0d-6337-449f-a8f3-c0a40339d827" ## Ubuntu 12 | #default = "3301175c-f322-4013-96c5-e2c3523083d5" 13 | default = "995da339-7698-49f0-91de-b20e49cb8fe2" 14 | } 15 | 16 | variable "external-network" { 17 | type = string 18 | default = "2c3fb52b-973d-434d-9284-c8f97a48ce0b" 19 | #default = "52b76a82-5f02-4a3e-9836-57536ef1cb63" 20 | } 21 | 22 | variable "floating-ip" { 23 | type = string 24 | default = "130.239.81.217" 25 | } 26 | 27 | 28 | ### SECURITY GROUPS ### 29 | 30 | 31 | ### NETWORK ### 32 | 33 | module "k8shard-net" { 34 | source = "./tf/network" 35 | name = "k8shard-net" 36 | cidr = "10.0.0.0/24" 37 | external_network_id = "52b76a82-5f02-4a3e-9836-57536ef1cb63" 38 | } 39 | 40 | resource "openstack_networking_port_v2" "network-port" { 41 | name = "a-network-port" 42 | network_id = module.k8shard-net.network-id 43 | admin_state_up = "true" 44 | 45 | fixed_ip { 46 | subnet_id = module.k8shard-net.subnet-id 47 | } 48 | 49 | allowed_address_pairs { 50 | ip_address = "0.0.0.0/0" 51 | } 52 | } 53 | 54 | 55 | 56 | ### COMPUTES ### 57 | 58 | resource "openstack_compute_instance_v2" "k8shard-master" { 59 | name = "master" 60 | image_id = var.image-id 61 | flavor_name = "ssc.medium" 62 | key_pair = "viklund mac pro" 63 | security_groups = ["default"] 64 | 65 | network { 66 | uuid = var.external-network 67 | } 68 | 69 | network { 70 | port = openstack_networking_port_v2.network-port.id 71 | } 72 | } 73 | 74 | 75 | 76 | resource "openstack_networking_subnet_v2" "controller-subnet" { 77 | name = format("k8shard-controller-subnet") 78 | network_id = module.k8shard-net.network-id 79 | cidr = "10.10.0.0/16" 80 | ip_version = 4 81 | dns_nameservers = ["8.8.8.8"] 82 | } 83 | 84 | module "controllers" { 85 | source = "./tf/compute" 86 | base_name = "controller" 87 | compute_count = 3 88 | network = module.k8shard-net.network-id 89 | subnet = openstack_networking_subnet_v2.controller-subnet.id 90 | image_id = var.image-id 91 | } 92 | 93 | 94 | resource "openstack_networking_subnet_v2" "worker-subnet" { 95 | name = "k8shard-worker-subnet" 96 | network_id = module.k8shard-net.network-id 97 | cidr = "10.20.0.0/16" 98 | ip_version = 4 99 | dns_nameservers = ["8.8.8.8"] 100 | } 101 | 102 | module "workers" { 103 | source = "./tf/compute" 104 | base_name = "worker" 105 | compute_count = 3 106 | network = module.k8shard-net.network-id 107 | subnet = openstack_networking_subnet_v2.worker-subnet.id 108 | image_id = var.image-id 109 | } 110 | 111 | resource "openstack_compute_floatingip_associate_v2" "fip_1" { 112 | floating_ip = var.floating-ip 113 | instance_id = openstack_compute_instance_v2.k8shard-master.id 114 | } 115 | 116 | data "template_file" "ansible_inventory" { 117 | template = file("${path.root}/inventory.tmpl") 118 | 119 | vars = { 120 | ip = var.floating-ip 121 | internal_ip = openstack_compute_instance_v2.k8shard-master.network[1].fixed_ip_v4 122 | workers = join("\n", formatlist("%-9s ansible_user=ubuntu ansible_ssh_common_args='-o ProxyJump=ubuntu@${var.floating-ip}' router=%s", split("\n",module.workers.ips), openstack_compute_instance_v2.k8shard-master.network[1].fixed_ip_v4)) 123 | controllers = join("\n", formatlist("%-9s ansible_user=ubuntu ansible_ssh_common_args='-o ProxyJump=ubuntu@${var.floating-ip}' router=%s", split("\n",module.controllers.ips), openstack_compute_instance_v2.k8shard-master.network[1].fixed_ip_v4)) 124 | } 125 | } 126 | 127 | data "template_file" "hosts_file" { 128 | template = "$${content}" 129 | 130 | vars = { 131 | content = format("%s %s\n%s\n%s", 132 | openstack_compute_instance_v2.k8shard-master.network[1].fixed_ip_v4, openstack_compute_instance_v2.k8shard-master.name, 133 | module.controllers.name-ips, module.workers.name-ips) 134 | } 135 | } 136 | 137 | resource "null_resource" "ansible_inventory_writer" { 138 | triggers = { uuid = uuid() } 139 | 140 | provisioner "local-exec" { 141 | command = "echo \"${data.template_file.ansible_inventory.rendered}\" > \"${path.root}/inventory\"" 142 | } 143 | } 144 | 145 | resource "null_resource" "hosts_file_writer" { 146 | triggers = { uuid = uuid() } 147 | 148 | provisioner "local-exec" { 149 | command = "echo \"${data.template_file.hosts_file.rendered}\" > \"${path.root}/hosts\"" 150 | } 151 | } 152 | -------------------------------------------------------------------------------- /work/playbook-08-1-control-plane.yml: -------------------------------------------------------------------------------- 1 | # vim: ts=2:sw=2 2 | - hosts: controllers 3 | become: yes 4 | vars: 5 | init: 1 6 | tasks: 7 | - name: Create kubernetes directories 8 | file: 9 | path: "{{ item }}" 10 | state: directory 11 | with_items: 12 | - /etc/kubernetes/config 13 | - /var/lib/kubernetes 14 | when: init == 1 15 | - name: Download Kubernetes Controller Binaries 16 | get_url: 17 | url: "https://storage.googleapis.com/kubernetes-release/release/{{ kubernetes_version }}/bin/linux/amd64/{{ item }}" 18 | dest: /usr/local/bin/ 19 | mode: 0755 20 | with_items: 21 | - kube-apiserver 22 | - kube-controller-manager 23 | - kube-scheduler 24 | - kubectl 25 | when: init == 1 26 | - name: Move files into kubernetes directory 27 | args: 28 | executable: /bin/bash 29 | shell: | 30 | mv ca{,-key}.pem \ 31 | kubernetes{,-key}.pem \ 32 | service-account{,-key}.pem \ 33 | encryption-config.yaml \ 34 | kube-controller-manager.kubeconfig \ 35 | kube-scheduler.kubeconfig \ 36 | /var/lib/kubernetes/ 37 | ignore_errors: yes 38 | when: init == 1 39 | - name: Create kube-apiserver systemd service 40 | template: 41 | src: kube-apiserver.service.jj 42 | dest: /etc/systemd/system/kube-apiserver.service 43 | vars: 44 | internal_ip: "{{ ansible_default_ipv4.address }}" 45 | - name: Create kube-controller-manager systemd service 46 | template: 47 | src: kube-controller-manager.service.jj 48 | dest: /etc/systemd/system/kube-controller-manager.service 49 | - name: Create kube-scheudler.yaml config file 50 | template: 51 | src: kube-scheduler.yaml.jj 52 | dest: /etc/kubernetes/config/kube-scheduler.yaml 53 | - name: Create kube-scheudler systemd service 54 | template: 55 | src: kube-scheduler.service.jj 56 | dest: /etc/systemd/system/kube-scheduler.service 57 | - name: Enable systemd services 58 | systemd: 59 | daemon_reload: yes 60 | enabled: yes 61 | state: started 62 | name: "{{ item }}" 63 | with_items: 64 | - kube-apiserver 65 | - kube-controller-manager 66 | - kube-scheduler 67 | - name: install nginx 68 | apt: 69 | force_apt_get: yes 70 | name: nginx 71 | state: present 72 | - name: Create nginx server 73 | template: 74 | src: kubernetes.default.svc.cluster.local.jj 75 | dest: /etc/nginx/sites-available/kubernetes.default.svc.cluster.local 76 | - name: Symlink into enabled sites 77 | file: 78 | src: /etc/nginx/sites-available/kubernetes.default.svc.cluster.local 79 | dest: /etc/nginx/sites-enabled/kubernetes.default.svc.cluster.local 80 | state: link 81 | - name: Restart and enable nginx 82 | systemd: 83 | enabled: yes 84 | state: restarted 85 | name: nginx 86 | - name: Reload nginx 87 | systemd: 88 | state: reloaded 89 | name: nginx 90 | - name: RBAC, Create ClusterRole 91 | args: 92 | executable: /bin/bash 93 | shell: | 94 | cat < Ensure a default compute zone and region have been set as described in the [Prerequisites](01-prerequisites.md#set-a-default-compute-region-and-zone) lab. 6 | 7 | ## Networking 8 | 9 | The Kubernetes [networking model](https://kubernetes.io/docs/concepts/cluster-administration/networking/#kubernetes-model) assumes a flat network in which containers and nodes can communicate with each other. In cases where this is not desired [network policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/) can limit how groups of containers are allowed to communicate with each other and external network endpoints. 10 | 11 | > Setting up network policies is out of scope for this tutorial. 12 | 13 | ### Virtual Private Cloud Network 14 | 15 | In this section a dedicated [Virtual Private Cloud](https://cloud.google.com/compute/docs/networks-and-firewalls#networks) (VPC) network will be setup to host the Kubernetes cluster. 16 | 17 | Create the `kubernetes-the-hard-way` custom VPC network: 18 | 19 | ``` 20 | gcloud compute networks create kubernetes-the-hard-way --subnet-mode custom 21 | ``` 22 | 23 | A [subnet](https://cloud.google.com/compute/docs/vpc/#vpc_networks_and_subnets) must be provisioned with an IP address range large enough to assign a private IP address to each node in the Kubernetes cluster. 24 | 25 | Create the `kubernetes` subnet in the `kubernetes-the-hard-way` VPC network: 26 | 27 | ``` 28 | gcloud compute networks subnets create kubernetes \ 29 | --network kubernetes-the-hard-way \ 30 | --range 10.240.0.0/24 31 | ``` 32 | 33 | > The `10.240.0.0/24` IP address range can host up to 254 compute instances. 34 | 35 | ### Firewall Rules 36 | 37 | Create a firewall rule that allows internal communication across all protocols: 38 | 39 | ``` 40 | gcloud compute firewall-rules create kubernetes-the-hard-way-allow-internal \ 41 | --allow tcp,udp,icmp \ 42 | --network kubernetes-the-hard-way \ 43 | --source-ranges 10.240.0.0/24,10.200.0.0/16 44 | ``` 45 | 46 | Create a firewall rule that allows external SSH, ICMP, and HTTPS: 47 | 48 | ``` 49 | gcloud compute firewall-rules create kubernetes-the-hard-way-allow-external \ 50 | --allow tcp:22,tcp:6443,icmp \ 51 | --network kubernetes-the-hard-way \ 52 | --source-ranges 0.0.0.0/0 53 | ``` 54 | 55 | > An [external load balancer](https://cloud.google.com/compute/docs/load-balancing/network/) will be used to expose the Kubernetes API Servers to remote clients. 56 | 57 | List the firewall rules in the `kubernetes-the-hard-way` VPC network: 58 | 59 | ``` 60 | gcloud compute firewall-rules list --filter="network:kubernetes-the-hard-way" 61 | ``` 62 | 63 | > output 64 | 65 | ``` 66 | NAME NETWORK DIRECTION PRIORITY ALLOW DENY 67 | kubernetes-the-hard-way-allow-external kubernetes-the-hard-way INGRESS 1000 tcp:22,tcp:6443,icmp 68 | kubernetes-the-hard-way-allow-internal kubernetes-the-hard-way INGRESS 1000 tcp,udp,icmp 69 | ``` 70 | 71 | ### Kubernetes Public IP Address 72 | 73 | Allocate a static IP address that will be attached to the external load balancer fronting the Kubernetes API Servers: 74 | 75 | ``` 76 | gcloud compute addresses create kubernetes-the-hard-way \ 77 | --region $(gcloud config get-value compute/region) 78 | ``` 79 | 80 | Verify the `kubernetes-the-hard-way` static IP address was created in your default compute region: 81 | 82 | ``` 83 | gcloud compute addresses list --filter="name=('kubernetes-the-hard-way')" 84 | ``` 85 | 86 | > output 87 | 88 | ``` 89 | NAME REGION ADDRESS STATUS 90 | kubernetes-the-hard-way us-west1 XX.XXX.XXX.XX RESERVED 91 | ``` 92 | 93 | ## Compute Instances 94 | 95 | The compute instances in this lab will be provisioned using [Ubuntu Server](https://www.ubuntu.com/server) 18.04, which has good support for the [containerd container runtime](https://github.com/containerd/containerd). Each compute instance will be provisioned with a fixed private IP address to simplify the Kubernetes bootstrapping process. 96 | 97 | ### Kubernetes Controllers 98 | 99 | Create three compute instances which will host the Kubernetes control plane: 100 | 101 | ``` 102 | for i in 0 1 2; do 103 | gcloud compute instances create controller-${i} \ 104 | --async \ 105 | --boot-disk-size 200GB \ 106 | --can-ip-forward \ 107 | --image-family ubuntu-1804-lts \ 108 | --image-project ubuntu-os-cloud \ 109 | --machine-type n1-standard-1 \ 110 | --private-network-ip 10.240.0.1${i} \ 111 | --scopes compute-rw,storage-ro,service-management,service-control,logging-write,monitoring \ 112 | --subnet kubernetes \ 113 | --tags kubernetes-the-hard-way,controller 114 | done 115 | ``` 116 | 117 | ### Kubernetes Workers 118 | 119 | Each worker instance requires a pod subnet allocation from the Kubernetes cluster CIDR range. The pod subnet allocation will be used to configure container networking in a later exercise. The `pod-cidr` instance metadata will be used to expose pod subnet allocations to compute instances at runtime. 120 | 121 | > The Kubernetes cluster CIDR range is defined by the Controller Manager's `--cluster-cidr` flag. In this tutorial the cluster CIDR range will be set to `10.200.0.0/16`, which supports 254 subnets. 122 | 123 | Create three compute instances which will host the Kubernetes worker nodes: 124 | 125 | ``` 126 | for i in 0 1 2; do 127 | gcloud compute instances create worker-${i} \ 128 | --async \ 129 | --boot-disk-size 200GB \ 130 | --can-ip-forward \ 131 | --image-family ubuntu-1804-lts \ 132 | --image-project ubuntu-os-cloud \ 133 | --machine-type n1-standard-1 \ 134 | --metadata pod-cidr=10.200.${i}.0/24 \ 135 | --private-network-ip 10.240.0.2${i} \ 136 | --scopes compute-rw,storage-ro,service-management,service-control,logging-write,monitoring \ 137 | --subnet kubernetes \ 138 | --tags kubernetes-the-hard-way,worker 139 | done 140 | ``` 141 | 142 | ### Verification 143 | 144 | List the compute instances in your default compute zone: 145 | 146 | ``` 147 | gcloud compute instances list 148 | ``` 149 | 150 | > output 151 | 152 | ``` 153 | NAME ZONE MACHINE_TYPE PREEMPTIBLE INTERNAL_IP EXTERNAL_IP STATUS 154 | controller-0 us-west1-c n1-standard-1 10.240.0.10 XX.XXX.XXX.XXX RUNNING 155 | controller-1 us-west1-c n1-standard-1 10.240.0.11 XX.XXX.X.XX RUNNING 156 | controller-2 us-west1-c n1-standard-1 10.240.0.12 XX.XXX.XXX.XX RUNNING 157 | worker-0 us-west1-c n1-standard-1 10.240.0.20 XXX.XXX.XXX.XX RUNNING 158 | worker-1 us-west1-c n1-standard-1 10.240.0.21 XX.XXX.XX.XXX RUNNING 159 | worker-2 us-west1-c n1-standard-1 10.240.0.22 XXX.XXX.XX.XX RUNNING 160 | ``` 161 | 162 | ## Configuring SSH Access 163 | 164 | SSH will be used to configure the controller and worker instances. When connecting to compute instances for the first time SSH keys will be generated for you and stored in the project or instance metadata as describe in the [connecting to instances](https://cloud.google.com/compute/docs/instances/connecting-to-instance) documentation. 165 | 166 | Test SSH access to the `controller-0` compute instances: 167 | 168 | ``` 169 | gcloud compute ssh controller-0 170 | ``` 171 | 172 | If this is your first time connecting to a compute instance SSH keys will be generated for you. Enter a passphrase at the prompt to continue: 173 | 174 | ``` 175 | WARNING: The public SSH key file for gcloud does not exist. 176 | WARNING: The private SSH key file for gcloud does not exist. 177 | WARNING: You do not have an SSH key for gcloud. 178 | WARNING: SSH keygen will be executed to generate a key. 179 | Generating public/private rsa key pair. 180 | Enter passphrase (empty for no passphrase): 181 | Enter same passphrase again: 182 | ``` 183 | 184 | At this point the generated SSH keys will be uploaded and stored in your project: 185 | 186 | ``` 187 | Your identification has been saved in /home/$USER/.ssh/google_compute_engine. 188 | Your public key has been saved in /home/$USER/.ssh/google_compute_engine.pub. 189 | The key fingerprint is: 190 | SHA256:nz1i8jHmgQuGt+WscqP5SeIaSy5wyIJeL71MuV+QruE $USER@$HOSTNAME 191 | The key's randomart image is: 192 | +---[RSA 2048]----+ 193 | | | 194 | | | 195 | | | 196 | | . | 197 | |o. oS | 198 | |=... .o .o o | 199 | |+.+ =+=.+.X o | 200 | |.+ ==O*B.B = . | 201 | | .+.=EB++ o | 202 | +----[SHA256]-----+ 203 | Updating project ssh metadata...-Updated [https://www.googleapis.com/compute/v1/projects/$PROJECT_ID]. 204 | Updating project ssh metadata...done. 205 | Waiting for SSH key to propagate. 206 | ``` 207 | 208 | After the SSH keys have been updated you'll be logged into the `controller-0` instance: 209 | 210 | ``` 211 | Welcome to Ubuntu 18.04 LTS (GNU/Linux 4.15.0-1006-gcp x86_64) 212 | 213 | ... 214 | 215 | Last login: Sun May 13 14:34:27 2018 from XX.XXX.XXX.XX 216 | ``` 217 | 218 | Type `exit` at the prompt to exit the `controller-0` compute instance: 219 | 220 | ``` 221 | $USER@controller-0:~$ exit 222 | ``` 223 | > output 224 | 225 | ``` 226 | logout 227 | Connection to XX.XXX.XXX.XXX closed 228 | ``` 229 | 230 | Next: [Provisioning a CA and Generating TLS Certificates](04-certificate-authority.md) 231 | -------------------------------------------------------------------------------- /docs/09-bootstrapping-kubernetes-workers.md: -------------------------------------------------------------------------------- 1 | # Bootstrapping the Kubernetes Worker Nodes 2 | 3 | In this lab you will bootstrap three Kubernetes worker nodes. The following components will be installed on each node: [runc](https://github.com/opencontainers/runc), [gVisor](https://github.com/google/gvisor), [container networking plugins](https://github.com/containernetworking/cni), [containerd](https://github.com/containerd/containerd), [kubelet](https://kubernetes.io/docs/admin/kubelet), and [kube-proxy](https://kubernetes.io/docs/concepts/cluster-administration/proxies). 4 | 5 | ## Prerequisites 6 | 7 | The commands in this lab must be run on each worker instance: `worker-0`, `worker-1`, and `worker-2`. Login to each worker instance using the `gcloud` command. Example: 8 | 9 | ``` 10 | gcloud compute ssh worker-0 11 | ``` 12 | 13 | ### Running commands in parallel with tmux 14 | 15 | [tmux](https://github.com/tmux/tmux/wiki) can be used to run commands on multiple compute instances at the same time. See the [Running commands in parallel with tmux](01-prerequisites.md#running-commands-in-parallel-with-tmux) section in the Prerequisites lab. 16 | 17 | ## Provisioning a Kubernetes Worker Node 18 | 19 | Install the OS dependencies: 20 | 21 | ``` 22 | { 23 | sudo apt-get update 24 | sudo apt-get -y install socat conntrack ipset 25 | } 26 | ``` 27 | 28 | > The socat binary enables support for the `kubectl port-forward` command. 29 | 30 | ### Download and Install Worker Binaries 31 | 32 | ``` 33 | wget -q --show-progress --https-only --timestamping \ 34 | https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.12.0/crictl-v1.12.0-linux-amd64.tar.gz \ 35 | https://storage.googleapis.com/kubernetes-the-hard-way/runsc-50c283b9f56bb7200938d9e207355f05f79f0d17 \ 36 | https://github.com/opencontainers/runc/releases/download/v1.0.0-rc5/runc.amd64 \ 37 | https://github.com/containernetworking/plugins/releases/download/v0.6.0/cni-plugins-amd64-v0.6.0.tgz \ 38 | https://github.com/containerd/containerd/releases/download/v1.2.0-rc.0/containerd-1.2.0-rc.0.linux-amd64.tar.gz \ 39 | https://storage.googleapis.com/kubernetes-release/release/v1.12.0/bin/linux/amd64/kubectl \ 40 | https://storage.googleapis.com/kubernetes-release/release/v1.12.0/bin/linux/amd64/kube-proxy \ 41 | https://storage.googleapis.com/kubernetes-release/release/v1.12.0/bin/linux/amd64/kubelet 42 | ``` 43 | 44 | Create the installation directories: 45 | 46 | ``` 47 | sudo mkdir -p \ 48 | /etc/cni/net.d \ 49 | /opt/cni/bin \ 50 | /var/lib/kubelet \ 51 | /var/lib/kube-proxy \ 52 | /var/lib/kubernetes \ 53 | /var/run/kubernetes 54 | ``` 55 | 56 | Install the worker binaries: 57 | 58 | ``` 59 | { 60 | sudo mv runsc-50c283b9f56bb7200938d9e207355f05f79f0d17 runsc 61 | sudo mv runc.amd64 runc 62 | chmod +x kubectl kube-proxy kubelet runc runsc 63 | sudo mv kubectl kube-proxy kubelet runc runsc /usr/local/bin/ 64 | sudo tar -xvf crictl-v1.12.0-linux-amd64.tar.gz -C /usr/local/bin/ 65 | sudo tar -xvf cni-plugins-amd64-v0.6.0.tgz -C /opt/cni/bin/ 66 | sudo tar -xvf containerd-1.2.0-rc.0.linux-amd64.tar.gz -C / 67 | } 68 | ``` 69 | 70 | ### Configure CNI Networking 71 | 72 | Retrieve the Pod CIDR range for the current compute instance: 73 | 74 | ``` 75 | POD_CIDR=$(curl -s -H "Metadata-Flavor: Google" \ 76 | http://metadata.google.internal/computeMetadata/v1/instance/attributes/pod-cidr) 77 | ``` 78 | 79 | Create the `bridge` network configuration file: 80 | 81 | ``` 82 | cat < Untrusted workloads will be run using the gVisor (runsc) runtime. 141 | 142 | Create the `containerd.service` systemd unit file: 143 | 144 | ``` 145 | cat < The `resolvConf` configuration is used to avoid loops when using CoreDNS for service discovery on systems running `systemd-resolved`. 205 | 206 | Create the `kubelet.service` systemd unit file: 207 | 208 | ``` 209 | cat < Remember to run the above commands on each worker node: `worker-0`, `worker-1`, and `worker-2`. 283 | 284 | ## Verification 285 | 286 | > The compute instances created in this tutorial will not have permission to complete this section. Run the following commands from the same machine used to create the compute instances. 287 | 288 | List the registered Kubernetes nodes: 289 | 290 | ``` 291 | gcloud compute ssh controller-0 \ 292 | --command "kubectl get nodes --kubeconfig admin.kubeconfig" 293 | ``` 294 | 295 | > output 296 | 297 | ``` 298 | NAME STATUS ROLES AGE VERSION 299 | worker-0 Ready 35s v1.12.0 300 | worker-1 Ready 36s v1.12.0 301 | worker-2 Ready 36s v1.12.0 302 | ``` 303 | 304 | Next: [Configuring kubectl for Remote Access](10-configuring-kubectl.md) 305 | -------------------------------------------------------------------------------- /docs/04-certificate-authority.md: -------------------------------------------------------------------------------- 1 | # Provisioning a CA and Generating TLS Certificates 2 | 3 | In this lab you will provision a [PKI Infrastructure](https://en.wikipedia.org/wiki/Public_key_infrastructure) using CloudFlare's PKI toolkit, [cfssl](https://github.com/cloudflare/cfssl), then use it to bootstrap a Certificate Authority, and generate TLS certificates for the following components: etcd, kube-apiserver, kube-controller-manager, kube-scheduler, kubelet, and kube-proxy. 4 | 5 | ## Certificate Authority 6 | 7 | In this section you will provision a Certificate Authority that can be used to generate additional TLS certificates. 8 | 9 | Generate the CA configuration file, certificate, and private key: 10 | 11 | ``` 12 | { 13 | 14 | cat > ca-config.json < ca-csr.json < admin-csr.json <`. In this section you will create a certificate for each Kubernetes worker node that meets the Node Authorizer requirements. 111 | 112 | Generate a certificate and private key for each Kubernetes worker node: 113 | 114 | ``` 115 | for instance in worker-0 worker-1 worker-2; do 116 | cat > ${instance}-csr.json < kube-controller-manager-csr.json < kube-proxy-csr.json < kube-scheduler-csr.json < kubernetes-csr.json < service-account-csr.json < The `kube-proxy`, `kube-controller-manager`, `kube-scheduler`, and `kubelet` client certificates will be used to generate client authentication configuration files in the next lab. 409 | 410 | Next: [Generating Kubernetes Configuration Files for Authentication](05-kubernetes-configuration-files.md) 411 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /docs/13-smoke-test.md: -------------------------------------------------------------------------------- 1 | # Smoke Test 2 | 3 | In this lab you will complete a series of tasks to ensure your Kubernetes cluster is functioning correctly. 4 | 5 | ## Data Encryption 6 | 7 | In this section you will verify the ability to [encrypt secret data at rest](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#verifying-that-data-is-encrypted). 8 | 9 | Create a generic secret: 10 | 11 | ``` 12 | kubectl create secret generic kubernetes-the-hard-way \ 13 | --from-literal="mykey=mydata" 14 | ``` 15 | 16 | Print a hexdump of the `kubernetes-the-hard-way` secret stored in etcd: 17 | 18 | ``` 19 | gcloud compute ssh controller-0 \ 20 | --command "sudo ETCDCTL_API=3 etcdctl get \ 21 | --endpoints=https://127.0.0.1:2379 \ 22 | --cacert=/etc/etcd/ca.pem \ 23 | --cert=/etc/etcd/kubernetes.pem \ 24 | --key=/etc/etcd/kubernetes-key.pem\ 25 | /registry/secrets/default/kubernetes-the-hard-way | hexdump -C" 26 | ``` 27 | 28 | > output 29 | 30 | ``` 31 | 00000000 2f 72 65 67 69 73 74 72 79 2f 73 65 63 72 65 74 |/registry/secret| 32 | 00000010 73 2f 64 65 66 61 75 6c 74 2f 6b 75 62 65 72 6e |s/default/kubern| 33 | 00000020 65 74 65 73 2d 74 68 65 2d 68 61 72 64 2d 77 61 |etes-the-hard-wa| 34 | 00000030 79 0a 6b 38 73 3a 65 6e 63 3a 61 65 73 63 62 63 |y.k8s:enc:aescbc| 35 | 00000040 3a 76 31 3a 6b 65 79 31 3a dd 3f 36 6c ce 65 9d |:v1:key1:.?6l.e.| 36 | 00000050 b3 b1 46 1a ba ae a2 1f e4 fa 13 0c 4b 6e 2c 3c |..F.........Kn,<| 37 | 00000060 15 fa 88 56 84 b7 aa c0 7a ca 66 f3 de db 2b a3 |...V....z.f...+.| 38 | 00000070 88 dc b1 b1 d8 2f 16 3e 6b 4a cb ac 88 5d 23 2d |...../.>kJ...]#-| 39 | 00000080 99 62 be 72 9f a5 01 38 15 c4 43 ac 38 5f ef 88 |.b.r...8..C.8_..| 40 | 00000090 3b 88 c1 e6 b6 06 4f ae a8 6b c8 40 70 ac 0a d3 |;.....O..k.@p...| 41 | 000000a0 3e dc 2b b6 0f 01 b6 8b e2 21 29 4d 32 d6 67 a6 |>.+......!)M2.g.| 42 | 000000b0 4e 6d bb 61 0d 85 22 ea f4 d6 2d 0a af 3c 71 85 |Nm.a.."...-.. output 68 | 69 | ``` 70 | NAME READY STATUS RESTARTS AGE 71 | nginx-dbddb74b8-6lxg2 1/1 Running 0 10s 72 | ``` 73 | 74 | ### Port Forwarding 75 | 76 | In this section you will verify the ability to access applications remotely using [port forwarding](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/). 77 | 78 | Retrieve the full name of the `nginx` pod: 79 | 80 | ``` 81 | POD_NAME=$(kubectl get pods -l run=nginx -o jsonpath="{.items[0].metadata.name}") 82 | ``` 83 | 84 | Forward port `8080` on your local machine to port `80` of the `nginx` pod: 85 | 86 | ``` 87 | kubectl port-forward $POD_NAME 8080:80 88 | ``` 89 | 90 | > output 91 | 92 | ``` 93 | Forwarding from 127.0.0.1:8080 -> 80 94 | Forwarding from [::1]:8080 -> 80 95 | ``` 96 | 97 | In a new terminal make an HTTP request using the forwarding address: 98 | 99 | ``` 100 | curl --head http://127.0.0.1:8080 101 | ``` 102 | 103 | > output 104 | 105 | ``` 106 | HTTP/1.1 200 OK 107 | Server: nginx/1.15.4 108 | Date: Sun, 30 Sep 2018 19:23:10 GMT 109 | Content-Type: text/html 110 | Content-Length: 612 111 | Last-Modified: Tue, 25 Sep 2018 15:04:03 GMT 112 | Connection: keep-alive 113 | ETag: "5baa4e63-264" 114 | Accept-Ranges: bytes 115 | ``` 116 | 117 | Switch back to the previous terminal and stop the port forwarding to the `nginx` pod: 118 | 119 | ``` 120 | Forwarding from 127.0.0.1:8080 -> 80 121 | Forwarding from [::1]:8080 -> 80 122 | Handling connection for 8080 123 | ^C 124 | ``` 125 | 126 | ### Logs 127 | 128 | In this section you will verify the ability to [retrieve container logs](https://kubernetes.io/docs/concepts/cluster-administration/logging/). 129 | 130 | Print the `nginx` pod logs: 131 | 132 | ``` 133 | kubectl logs $POD_NAME 134 | ``` 135 | 136 | > output 137 | 138 | ``` 139 | 127.0.0.1 - - [30/Sep/2018:19:23:10 +0000] "HEAD / HTTP/1.1" 200 0 "-" "curl/7.58.0" "-" 140 | ``` 141 | 142 | ### Exec 143 | 144 | In this section you will verify the ability to [execute commands in a container](https://kubernetes.io/docs/tasks/debug-application-cluster/get-shell-running-container/#running-individual-commands-in-a-container). 145 | 146 | Print the nginx version by executing the `nginx -v` command in the `nginx` container: 147 | 148 | ``` 149 | kubectl exec -ti $POD_NAME -- nginx -v 150 | ``` 151 | 152 | > output 153 | 154 | ``` 155 | nginx version: nginx/1.15.4 156 | ``` 157 | 158 | ## Services 159 | 160 | In this section you will verify the ability to expose applications using a [Service](https://kubernetes.io/docs/concepts/services-networking/service/). 161 | 162 | Expose the `nginx` deployment using a [NodePort](https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport) service: 163 | 164 | ``` 165 | kubectl expose deployment nginx --port 80 --type NodePort 166 | ``` 167 | 168 | > The LoadBalancer service type can not be used because your cluster is not configured with [cloud provider integration](https://kubernetes.io/docs/getting-started-guides/scratch/#cloud-provider). Setting up cloud provider integration is out of scope for this tutorial. 169 | 170 | Retrieve the node port assigned to the `nginx` service: 171 | 172 | ``` 173 | NODE_PORT=$(kubectl get svc nginx \ 174 | --output=jsonpath='{range .spec.ports[0]}{.nodePort}') 175 | ``` 176 | 177 | Create a firewall rule that allows remote access to the `nginx` node port: 178 | 179 | ``` 180 | gcloud compute firewall-rules create kubernetes-the-hard-way-allow-nginx-service \ 181 | --allow=tcp:${NODE_PORT} \ 182 | --network kubernetes-the-hard-way 183 | ``` 184 | 185 | Retrieve the external IP address of a worker instance: 186 | 187 | ``` 188 | EXTERNAL_IP=$(gcloud compute instances describe worker-0 \ 189 | --format 'value(networkInterfaces[0].accessConfigs[0].natIP)') 190 | ``` 191 | 192 | Make an HTTP request using the external IP address and the `nginx` node port: 193 | 194 | ``` 195 | curl -I http://${EXTERNAL_IP}:${NODE_PORT} 196 | ``` 197 | 198 | > output 199 | 200 | ``` 201 | HTTP/1.1 200 OK 202 | Server: nginx/1.15.4 203 | Date: Sun, 30 Sep 2018 19:25:40 GMT 204 | Content-Type: text/html 205 | Content-Length: 612 206 | Last-Modified: Tue, 25 Sep 2018 15:04:03 GMT 207 | Connection: keep-alive 208 | ETag: "5baa4e63-264" 209 | Accept-Ranges: bytes 210 | ``` 211 | 212 | ## Untrusted Workloads 213 | 214 | This section will verify the ability to run untrusted workloads using [gVisor](https://github.com/google/gvisor). 215 | 216 | Create the `untrusted` pod: 217 | 218 | ``` 219 | cat < output 307 | 308 | ``` 309 | I0930 19:31:31.419765 21217 x:0] *************************** 310 | I0930 19:31:31.419907 21217 x:0] Args: [runsc --root /run/containerd/runsc/k8s.io ps af7470029008a4520b5db9fb5b358c65d64c9f748fae050afb6eaf014a59fea5] 311 | I0930 19:31:31.419959 21217 x:0] Git Revision: 50c283b9f56bb7200938d9e207355f05f79f0d17 312 | I0930 19:31:31.420000 21217 x:0] PID: 21217 313 | I0930 19:31:31.420041 21217 x:0] UID: 0, GID: 0 314 | I0930 19:31:31.420081 21217 x:0] Configuration: 315 | I0930 19:31:31.420115 21217 x:0] RootDir: /run/containerd/runsc/k8s.io 316 | I0930 19:31:31.420188 21217 x:0] Platform: ptrace 317 | I0930 19:31:31.420266 21217 x:0] FileAccess: exclusive, overlay: false 318 | I0930 19:31:31.420424 21217 x:0] Network: sandbox, logging: false 319 | I0930 19:31:31.420515 21217 x:0] Strace: false, max size: 1024, syscalls: [] 320 | I0930 19:31:31.420676 21217 x:0] *************************** 321 | UID PID PPID C STIME TIME CMD 322 | 0 1 0 0 19:26 10ms app 323 | I0930 19:31:31.422022 21217 x:0] Exiting with status: 0 324 | ``` 325 | 326 | Next: [Cleaning Up](14-cleanup.md) 327 | -------------------------------------------------------------------------------- /docs/08-bootstrapping-kubernetes-controllers.md: -------------------------------------------------------------------------------- 1 | # Bootstrapping the Kubernetes Control Plane 2 | 3 | In this lab you will bootstrap the Kubernetes control plane across three compute instances and configure it for high availability. You will also create an external load balancer that exposes the Kubernetes API Servers to remote clients. The following components will be installed on each node: Kubernetes API Server, Scheduler, and Controller Manager. 4 | 5 | ## Prerequisites 6 | 7 | The commands in this lab must be run on each controller instance: `controller-0`, `controller-1`, and `controller-2`. Login to each controller instance using the `gcloud` command. Example: 8 | 9 | ``` 10 | gcloud compute ssh controller-0 11 | ``` 12 | 13 | ### Running commands in parallel with tmux 14 | 15 | [tmux](https://github.com/tmux/tmux/wiki) can be used to run commands on multiple compute instances at the same time. See the [Running commands in parallel with tmux](01-prerequisites.md#running-commands-in-parallel-with-tmux) section in the Prerequisites lab. 16 | 17 | ## Provision the Kubernetes Control Plane 18 | 19 | Create the Kubernetes configuration directory: 20 | 21 | ``` 22 | sudo mkdir -p /etc/kubernetes/config 23 | ``` 24 | 25 | ### Download and Install the Kubernetes Controller Binaries 26 | 27 | Download the official Kubernetes release binaries: 28 | 29 | ``` 30 | wget -q --show-progress --https-only --timestamping \ 31 | "https://storage.googleapis.com/kubernetes-release/release/v1.12.0/bin/linux/amd64/kube-apiserver" \ 32 | "https://storage.googleapis.com/kubernetes-release/release/v1.12.0/bin/linux/amd64/kube-controller-manager" \ 33 | "https://storage.googleapis.com/kubernetes-release/release/v1.12.0/bin/linux/amd64/kube-scheduler" \ 34 | "https://storage.googleapis.com/kubernetes-release/release/v1.12.0/bin/linux/amd64/kubectl" 35 | ``` 36 | 37 | Install the Kubernetes binaries: 38 | 39 | ``` 40 | { 41 | chmod +x kube-apiserver kube-controller-manager kube-scheduler kubectl 42 | sudo mv kube-apiserver kube-controller-manager kube-scheduler kubectl /usr/local/bin/ 43 | } 44 | ``` 45 | 46 | ### Configure the Kubernetes API Server 47 | 48 | ``` 49 | { 50 | sudo mkdir -p /var/lib/kubernetes/ 51 | 52 | sudo mv ca.pem ca-key.pem kubernetes-key.pem kubernetes.pem \ 53 | service-account-key.pem service-account.pem \ 54 | encryption-config.yaml /var/lib/kubernetes/ 55 | } 56 | ``` 57 | 58 | The instance internal IP address will be used to advertise the API Server to members of the cluster. Retrieve the internal IP address for the current compute instance: 59 | 60 | ``` 61 | INTERNAL_IP=$(curl -s -H "Metadata-Flavor: Google" \ 62 | http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ip) 63 | ``` 64 | 65 | Create the `kube-apiserver.service` systemd unit file: 66 | 67 | ``` 68 | cat < Allow up to 10 seconds for the Kubernetes API Server to fully initialize. 202 | 203 | ### Enable HTTP Health Checks 204 | 205 | A [Google Network Load Balancer](https://cloud.google.com/compute/docs/load-balancing/network) will be used to distribute traffic across the three API servers and allow each API server to terminate TLS connections and validate client certificates. The network load balancer only supports HTTP health checks which means the HTTPS endpoint exposed by the API server cannot be used. As a workaround the nginx webserver can be used to proxy HTTP health checks. In this section nginx will be installed and configured to accept HTTP health checks on port `80` and proxy the connections to the API server on `https://127.0.0.1:6443/healthz`. 206 | 207 | > The `/healthz` API server endpoint does not require authentication by default. 208 | 209 | Install a basic web server to handle HTTP health checks: 210 | 211 | ``` 212 | sudo apt-get install -y nginx 213 | ``` 214 | 215 | ``` 216 | cat > kubernetes.default.svc.cluster.local < Remember to run the above commands on each controller node: `controller-0`, `controller-1`, and `controller-2`. 279 | 280 | ## RBAC for Kubelet Authorization 281 | 282 | In this section you will configure RBAC permissions to allow the Kubernetes API Server to access the Kubelet API on each worker node. Access to the Kubelet API is required for retrieving metrics, logs, and executing commands in pods. 283 | 284 | > This tutorial sets the Kubelet `--authorization-mode` flag to `Webhook`. Webhook mode uses the [SubjectAccessReview](https://kubernetes.io/docs/admin/authorization/#checking-api-access) API to determine authorization. 285 | 286 | ``` 287 | gcloud compute ssh controller-0 288 | ``` 289 | 290 | Create the `system:kube-apiserver-to-kubelet` [ClusterRole](https://kubernetes.io/docs/admin/authorization/rbac/#role-and-clusterrole) with permissions to access the Kubelet API and perform most common tasks associated with managing pods: 291 | 292 | ``` 293 | cat < The compute instances created in this tutorial will not have permission to complete this section. Run the following commands from the same machine used to create the compute instances. 343 | 344 | 345 | ### Provision a Network Load Balancer 346 | 347 | Create the external load balancer network resources: 348 | 349 | ``` 350 | { 351 | KUBERNETES_PUBLIC_ADDRESS=$(gcloud compute addresses describe kubernetes-the-hard-way \ 352 | --region $(gcloud config get-value compute/region) \ 353 | --format 'value(address)') 354 | 355 | gcloud compute http-health-checks create kubernetes \ 356 | --description "Kubernetes Health Check" \ 357 | --host "kubernetes.default.svc.cluster.local" \ 358 | --request-path "/healthz" 359 | 360 | gcloud compute firewall-rules create kubernetes-the-hard-way-allow-health-check \ 361 | --network kubernetes-the-hard-way \ 362 | --source-ranges 209.85.152.0/22,209.85.204.0/22,35.191.0.0/16 \ 363 | --allow tcp 364 | 365 | gcloud compute target-pools create kubernetes-target-pool \ 366 | --http-health-check kubernetes 367 | 368 | gcloud compute target-pools add-instances kubernetes-target-pool \ 369 | --instances controller-0,controller-1,controller-2 370 | 371 | gcloud compute forwarding-rules create kubernetes-forwarding-rule \ 372 | --address ${KUBERNETES_PUBLIC_ADDRESS} \ 373 | --ports 6443 \ 374 | --region $(gcloud config get-value compute/region) \ 375 | --target-pool kubernetes-target-pool 376 | } 377 | ``` 378 | 379 | ### Verification 380 | 381 | Retrieve the `kubernetes-the-hard-way` static IP address: 382 | 383 | ``` 384 | KUBERNETES_PUBLIC_ADDRESS=$(gcloud compute addresses describe kubernetes-the-hard-way \ 385 | --region $(gcloud config get-value compute/region) \ 386 | --format 'value(address)') 387 | ``` 388 | 389 | Make a HTTP request for the Kubernetes version info: 390 | 391 | ``` 392 | curl --cacert ca.pem https://${KUBERNETES_PUBLIC_ADDRESS}:6443/version 393 | ``` 394 | 395 | > output 396 | 397 | ``` 398 | { 399 | "major": "1", 400 | "minor": "12", 401 | "gitVersion": "v1.12.0", 402 | "gitCommit": "0ed33881dc4355495f623c6f22e7dd0b7632b7c0", 403 | "gitTreeState": "clean", 404 | "buildDate": "2018-09-27T16:55:41Z", 405 | "goVersion": "go1.10.4", 406 | "compiler": "gc", 407 | "platform": "linux/amd64" 408 | } 409 | ``` 410 | 411 | Next: [Bootstrapping the Kubernetes Worker Nodes](09-bootstrapping-kubernetes-workers.md) 412 | --------------------------------------------------------------------------------