├── .bashrc ├── .devcontainer ├── Dockerfile └── devcontainer.json ├── Dockerfile ├── LICENSE ├── README.md ├── WIP └── storage-replication-with-rook │ └── code.md ├── advance-application-routing-with-istio ├── code.md └── script.sh ├── advance-deploy-kubernetes-on-azure ├── code.md └── install.sh ├── deploying-a-public-chart └── code.md ├── deploying-kubernetes-on-azure └── code.md ├── deploying-supergloo └── code.md ├── hack └── hey │ └── Dockerfile ├── ingress-controller ├── code.md └── deploy.sh ├── ingress-with-traefik └── code.md ├── installing-helm-on-kubernetes └── code.md ├── istio-with-supergloo └── code.md ├── keda └── code.md ├── mTLS-with-istio └── code.md ├── network-policies └── code.md ├── pod-security-policy └── code.md ├── pods-services-deployments └── code.md ├── rbac-roles-service-accounts ├── cleanup.sh ├── code.md └── script.sh ├── service-mesh-with-linkerd └── code.md ├── slides ├── ingress-controller │ ├── Slide1.jpg │ ├── Slide2.jpg │ ├── Slide3.jpg │ ├── Slide4.jpg │ └── Slide5.jpg ├── intro │ ├── Slide1.jpg │ ├── Slide2.jpg │ ├── Slide3.jpg │ ├── Slide4.jpg │ ├── Slide5.jpg │ ├── Slide6.jpg │ └── code.md ├── introduction-to-kubernetes │ ├── Slide1.jpg │ ├── Slide2.jpg │ ├── Slide3.jpg │ ├── Slide4.jpg │ └── code.md ├── kubernetes-components │ ├── Slide1.jpg │ ├── Slide10.jpg │ ├── Slide11.jpg │ ├── Slide12.jpg │ ├── Slide13.jpg │ ├── Slide14.jpg │ ├── Slide15.jpg │ ├── Slide2.jpg │ ├── Slide3.jpg │ ├── Slide4.jpg │ ├── Slide5.jpg │ ├── Slide6.jpg │ ├── Slide7.jpg │ ├── Slide8.jpg │ ├── Slide9.jpg │ └── code.md ├── pods-services-deployments │ ├── Slide1.jpg │ ├── Slide10.jpg │ ├── Slide11.jpg │ ├── Slide12.jpg │ ├── Slide13.jpg │ ├── Slide14.jpg │ ├── Slide15.jpg │ ├── Slide16.jpg │ ├── Slide17.jpg │ ├── Slide2.jpg │ ├── Slide3.jpg │ ├── Slide4.jpg │ ├── Slide5.jpg │ ├── Slide6.jpg │ ├── Slide7.jpg │ ├── Slide8.jpg │ └── Slide9.jpg ├── rbac-roles-service-accounts │ ├── Slide1.jpg │ ├── Slide2.jpg │ ├── Slide3.jpg │ ├── Slide4.jpg │ ├── Slide5.jpg │ ├── Slide6.jpg │ └── Slide7.jpg └── statefull-sets │ ├── Slide1.jpg │ ├── Slide2.jpg │ ├── Slide3.jpg │ ├── Slide4.jpg │ └── Slide5.jpg ├── statefull-sets └── code.md ├── virtual-kubelet └── code.md ├── virtual-node-with-virtual-kubelet └── code.md └── writing-our-own-chart └── code.md /.bashrc: -------------------------------------------------------------------------------- 1 | # Use bash-completion 2 | [[ -f /usr/share/bash-completion/bash_completion ]] && \ 3 | . /usr/share/bash-completion/bash_completion 4 | # Source kubectl 5 | [[ -f /usr/local/bin/kubectl ]] && \ 6 | source <(kubectl completion bash) 7 | # Source azure-cli 8 | [[ -f /usr/bin/az.completion.sh ]] && \ 9 | source /usr/bin/az.completion.sh 10 | # Change Prompt 11 | export PS1="\w \$ " 12 | 13 | -------------------------------------------------------------------------------- /.devcontainer/Dockerfile: -------------------------------------------------------------------------------- 1 | 2 | #------------------------------------------------------------------------------------------------------------- 3 | # Copyright (c) Microsoft Corporation. All rights reserved. 4 | # Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information. 5 | #------------------------------------------------------------------------------------------------------------- 6 | 7 | # Note: You can use any Debian/Ubuntu based image you want. 8 | FROM debian:9 9 | 10 | # Avoid warnings by switching to noninteractive 11 | ENV DEBIAN_FRONTEND=noninteractive 12 | 13 | # Docker Compose version 14 | ARG COMPOSE_VERSION=1.24.0 15 | 16 | # Helm version 17 | ENV HELM_VER 2.12.3 18 | 19 | # This Dockerfile adds a non-root user with sudo access. Use the "remoteUser" 20 | # property in devcontainer.json to use it. On Linux, the container user's GID/UIDs 21 | # will be updated to match your local UID/GID (when using the dockerFile property). 22 | # See https://aka.ms/vscode-remote/containers/non-root-user for details. 23 | ARG USERNAME=vscode 24 | ARG USER_UID=1000 25 | ARG USER_GID=$USER_UID 26 | 27 | # Configure apt and install packages 28 | RUN apt-get update \ 29 | && apt-get -y install --no-install-recommends apt-utils dialog 2>&1 \ 30 | # 31 | # Verify git, process tools installed 32 | && apt-get -y install curl git jq iproute2 procps \ 33 | # 34 | # Install Docker CE CLI 35 | && apt-get install -y apt-transport-https ca-certificates curl gnupg-agent software-properties-common lsb-release \ 36 | && curl -fsSL https://download.docker.com/linux/$(lsb_release -is | tr '[:upper:]' '[:lower:]')/gpg | (OUT=$(apt-key add - 2>&1) || echo $OUT) \ 37 | && add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/$(lsb_release -is | tr '[:upper:]' '[:lower:]') $(lsb_release -cs) stable" \ 38 | && apt-get update \ 39 | && apt-get install -y docker-ce-cli \ 40 | # 41 | # Install Docker Compose 42 | && curl -sSL "https://github.com/docker/compose/releases/download/${COMPOSE_VERSION}/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose \ 43 | && chmod +x /usr/local/bin/docker-compose \ 44 | # 45 | # Install Helm 46 | && curl -o helm.tgz https://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VER}-linux-amd64.tar.gz \ 47 | && tar -xzf helm.tgz \ 48 | && mv linux-amd64/helm /usr/local/bin \ 49 | && rm helm.tgz \ 50 | && helm init --client-only \ 51 | # 52 | # Install kubectl 53 | && curl -LO https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl \ 54 | && chmod +x ./kubectl \ 55 | && mv ./kubectl /usr/local/bin/kubectl \ 56 | # Install az cli 57 | && curl -sL https://aka.ms/InstallAzureCLIDeb | bash \ 58 | # install kubectx 59 | && git clone https://github.com/ahmetb/kubectx.git ~/.kubectx \ 60 | && ln -sf ~/.kubectx/kubectx /usr/local/bin/kubectx \ 61 | # Create a non-root user to use if preferred - see https://aka.ms/vscode-remote/containers/non-root-user. 62 | && groupadd --gid $USER_GID $USERNAME \ 63 | && useradd -s /bin/bash --uid $USER_UID --gid $USER_GID -m $USERNAME \ 64 | # [Optional] Add sudo support for the non-root user 65 | && apt-get install -y sudo \ 66 | && echo $USERNAME ALL=\(root\) NOPASSWD:ALL > /etc/sudoers.d/$USERNAME\ 67 | && chmod 0440 /etc/sudoers.d/$USERNAME \ 68 | # 69 | # Clean up 70 | && apt-get autoremove -y \ 71 | && apt-get clean -y \ 72 | && rm -rf /var/lib/apt/lists/* 73 | 74 | # Switch back to dialog for any ad-hoc use of apt-get 75 | ENV DEBIAN_FRONTEND=dialog 76 | -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | // For format details, see https://aka.ms/vscode-remote/devcontainer.json or the definition README at 2 | // https://github.com/microsoft/vscode-dev-containers/tree/master/containers/docker-in-docker 3 | { 4 | "name": "Docker in Docker", 5 | "dockerFile": "Dockerfile", 6 | "mounts": [ "source=/var/run/docker.sock,target=/var/run/docker.sock,type=bind" ], 7 | 8 | // Use 'settings' to set *default* container specific settings.json values on container create. 9 | // You can edit these settings after create using File > Preferences > Settings > Remote. 10 | "settings": { 11 | "terminal.integrated.shell.linux": "/bin/bash" 12 | }, 13 | 14 | // Use 'appPort' to create a container with published ports. If the port isn't working, be sure 15 | // your server accepts connections from all interfaces (0.0.0.0 or '*'), not just localhost. 16 | // "appPort": [], 17 | 18 | // Uncomment the next line to run commands after the container is created. 19 | // "postCreateCommand": "docker --version", 20 | 21 | // Uncomment the next line if you will use a ptrace-based debugger like C++, Go, and Rust 22 | // "runArgs": [ "--cap-add=SYS_PTRACE", "--security-opt", "seccomp=unconfined" ], 23 | 24 | // Uncomment the next line to have VS Code connect as an existing non-root user in the container. 25 | // On Linux, by default, the container user's UID/GID will be updated to match your local user. See 26 | // https://aka.ms/vscode-remote/containers/non-root for details on adding a non-root user if none exist. 27 | // "remoteUser": "vscode", 28 | 29 | // Add the IDs of extensions you want installed when the container is created in the array below. 30 | "extensions": [ 31 | "ms-azuretools.vscode-docker" 32 | ] 33 | } -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM docker:dind 2 | 3 | RUN apk add bash \ 4 | git \ 5 | tmux \ 6 | curl \ 7 | bash-completion \ 8 | jq \ 9 | python \ 10 | py-pip \ 11 | gcc \ 12 | libffi-dev \ 13 | musl-dev \ 14 | openssl \ 15 | openssl-dev \ 16 | python-dev \ 17 | make \ 18 | coreutils \ 19 | ca-certificates && \ 20 | curl -o kubectl https://storage.googleapis.com/kubernetes-release/release/v1.15.7/bin/linux/amd64/kubectl && \ 21 | mv kubectl /usr/local/bin && \ 22 | chmod a+x /usr/local/bin/kubectl && \ 23 | curl -o helm.tgz https://get.helm.sh/helm-v3.0.2-linux-amd64.tar.gz && \ 24 | tar -xzf helm.tgz && \ 25 | mv linux-amd64/helm /usr/local/bin && \ 26 | rm helm.tgz && \ 27 | #helm init --client-only && \ 28 | curl https://deislabs.blob.core.windows.net/porter/latest/install-linux.sh | bash && \ 29 | pip --no-cache-dir install -U pip && \ 30 | pip --no-cache-dir install azure-cli && \ 31 | curl -L -o /usr/local/bin/kubectx https://raw.githubusercontent.com/ahmetb/kubectx/v0.6.3/kubectx && \ 32 | chmod +x /usr/local/bin/kubectx && \ 33 | curl -sL https://run.linkerd.io/install | sh && \ 34 | curl -sL https://run.solo.io/supergloo/install | sh && \ 35 | mkdir -p /workshop 36 | 37 | ENV PATH="$PATH:/root/.porter" 38 | ENV PATH="$PATH:/root/.linkerd2/bin" 39 | ENV PATH="$PATH:/root/.supergloo/bin" 40 | 41 | ADD .bashrc /root/.bashrc 42 | 43 | WORKDIR /workshop 44 | 45 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Scott Coulton 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Kubernetes on Azure 2 | 3 | This is the repo that contains multiple workshops for Kubernetes on Azure and the supporting code examples. 4 | Note This repo is under a refresh. Some of the new modules dont have slides yet. 5 | 6 | ## Prerequisites 7 | 8 | ### Prior knowledge 9 | To be successful at getting the most out of these workshops you will need the following prior knowledge 10 | 11 | * A basic understanding of Linux 12 | * Be able to read bash scripts 13 | * Have a basic knowledge of what a container is 14 | 15 | ### Equipment 16 | To be able to run the labs in the workshops you will need the following 17 | 18 | * An Azure account that has access to create service principals 19 | 20 | If you dont have an Azure account and want to run the workshops, you can sign up for an [Azure trial](https://azure.microsoft.com/offers/ms-azr-0044p/?WT.mc_id=opensource-0000-sccoulto) that will give you free credit to complete the workshop. 21 | 22 | ### Installed software 23 | There are a few packages we will need to run the labs so we will need to install the following 24 | 25 | * [Docker](https://www.docker.com/) 26 | 27 | There is a pre built docker image with all the software that you need. 28 | 29 | ## How to use the workshops 30 | 31 | Pull and run the docker image 32 | ``` 33 | docker run -d --privileged --name workshop scottyc/workshop && docker exec -it workshop sh 34 | ``` 35 | if you want to keep the data from the workshop persistent, you can use the following 36 | ``` 37 | docker run -d --privileged -v {SOME_DIR}:/workshop --name workshop scottyc/workshop && docker exec -it workshop sh 38 | ``` 39 | git clone the workshop 40 | ```git clone https://github.com/scotty-c/kubernetes-on-azure-workshop.git``` 41 | Now from inside the containers shell login to the az cli with `az login` and follow the prompts. 42 | 43 | Each folder is named after a corresponding module in the workshop. Inside that folder is all the code examples for that module. 44 | 45 | Alternatively if you are running vscode and have the [remote container extension](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers%2F%3FWT.mc_id%3Daksworkshop-github-sccoulto&WT.mc_id=opensource-0000-sccoulto) you can just open up a folder in the remote container. 46 | 47 | ## Workshops 48 | 49 | At present there are four workshops in this repo. Each of them are designed for different lengths of time depending how long your time slot is to give the workshop. 50 | 51 | ### Kubernetes on Azure full workshop 52 | This is the full workshop that covers Kubernetes 101, Helm, virtual kubelet and some basic istio topic. Below are the full list of topics 53 | 54 |  Kubernetes 101 55 | 56 | * [Agenda](slides/intro/code.md)  57 | * [Introduction into Kubernetes](slides/introduction-into-kubernetes/code.md) 58 | * [Kubernetes components](slides/kubernetes-components/code.md)  59 | * [Deploying Kubernetes on Azure](deploying-kubernetes-on-azure/code.md) 60 | * [Pods, services and deployments](pods-services-deployments/code.md) 61 | * [Rabc, roles and service accounts](rbac-roles-service-accounts/code.md)  62 | * [Stateful sets](statefull-sets/code.md) 63 | * Kubernetes networking and service discovery 64 | * [Load balancing and ingress control](ingress-controller/code.md) 65 | 66 |  Helm 67 | 68 | * Introduction into Helm 69 | * Understanding charts 70 | * [Deploying Helm on Kubernetes](installing-helm-on-kubernetes/code.md) 71 | * Helm cli 72 | * [Deploying a public chart](deploying-a-public-chart/code.md) 73 | * [Writing our own chart](writing-our-own-chart/code.md) 74 | * Helm and CNAB 75 | 76 |  Kubernetes advanced topics 77 | 78 | * [Virtual kubelet](virtual-node-with-virtual-kubelet/code.md) 79 | * [Pod security context](pod-security-policy/code.md)  80 | * Introduction to istio 81 | * Advanced application routing with istio(advanced-application-routing-with-istio/code.md) 82 | * [Setting mTLS between application services with istio](mTLS-with-istio/code.md)  83 | 84 | This workshop takes about 6hrs to give as instructor lead workshop. 85 | 86 | 87 | ### Kubernetes 101 88 | This workshop is the entry level into Azure Kubernetes service. In the workshop we will cover the topics listed above in the kubernetes 101 section. This will take approx 2hrs for an instructor to give. 89 | 90 | ### Kubernetes and Helm 91 | This workshop covers the kubernetes 101 workshop and adds all the Helm modules listed above. 92 | The slides can be found [here](slides/kubernetes-helm/Kubernetes-helm.pdf). 93 | This workshop will take about 4hrs to complete 94 | 95 | ### Advanced Kubernetes 96 | This workshop adds the Kubernetes 101 modules with the advanced Kubernetes topics. 97 | The slides can be found [here](slides/kubernetes-advanced/Kubernetes-advanced.pdf) 98 | This workshop will take about 4hrs 99 | 100 | ## Further reading 101 | I have done a few blog posts on topics covered by this workshop for further . This list will continue to be updated. 102 | * [Choosing the right container base image](https://dev.to/scottyc/i-cho-cho-chose-you-container-image-part-1-227p) 103 | * [Pod security 101](https://medium.com/devopslinks/kubernetes-pod-security-101-15fe8cda829e) 104 | * [Understading application routing with istio](https://itnext.io/understanding-application-routing-in-istio-aade30d594f4) 105 | 106 | If there is something that is not covered in the workshops and you would like it to be. Please raise an issue on this repo and I will do my best to add it in where possible 107 | 108 | -------------------------------------------------------------------------------- /WIP/storage-replication-with-rook/code.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | ## Stop istio injection 4 | `kubectl label namespace default istio-injection-` 5 | 6 | ## Deploy the operator 7 | `kubectl create -f https://raw.githubusercontent.com/rook/rook/release-0.9/cluster/examples/kubernetes/ceph/operator.yaml` 8 | 9 | `kubectl -n rook-ceph-system get pod` 10 | 11 | ## Create a rook cluster 12 | ``` 13 | cat < /dev/null 2>&1 | grep Running 28 | } 29 | 30 | pre_reqs () { 31 | curl -sL "https://github.com/istio/istio/releases/download/$ISTIO_VERSION/istio-$ISTIO_VERSION-$OS.tar.gz" | tar xz 32 | if [ ! -f /usr/local/bin/istioctl ]; then 33 | echo "Installing istioctl binary" 34 | chmod +x ./istio-$ISTIO_VERSION/bin/istioctl 35 | sudo mv ./istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/istioctl 36 | fi 37 | 38 | if [ ! -f /usr/local/bin/helm ]; then 39 | echo "Installing helm binary" 40 | curl -sL "https://storage.googleapis.com/kubernetes-helm/helm-v$HELM_VERSION-$ARCH.tar.gz" | tar xz 41 | chmod +x $ARCH/helm 42 | sudo mv linux-amd64/helm /usr/local/bin/ 43 | fi 44 | } 45 | 46 | install_tiller () { 47 | echo "Checking if tiller is running" 48 | check_tiller 49 | if [ $? -eq 0 ]; then 50 | echo "Tiller is installed and running" 51 | else 52 | echo "Deploying tiller to the cluster" 53 | cat < /dev/null 2>&1 | grep Running 17 | } 18 | 19 | pre_reqs () { 20 | curl -sL "https://github.com/istio/istio/releases/download/$ISTIO_VERSION/istio-$ISTIO_VERSION-$OS.tar.gz" | tar xz 21 | if [ ! -f /usr/local/bin/istioctl ]; then 22 | echo "Installing istioctl binary" 23 | chmod +x ./istio-$ISTIO_VERSION/bin/istioctl 24 | sudo mv ./istio-$ISTIO_VERSION/bin/istioctl /usr/local/bin/istioctl 25 | fi 26 | 27 | if [ ! -f /usr/local/bin/helm ]; then 28 | echo "Installing helm binary" 29 | curl -sL "https://storage.googleapis.com/kubernetes-helm/helm-v$HELM_VERSION-$ARCH.tar.gz" | tar xz 30 | chmod +x $ARCH/helm 31 | sudo mv linux-amd64/helm /usr/local/bin/ 32 | fi 33 | } 34 | 35 | install_tiller () { 36 | echo "Checking if tiller is running" 37 | check_tiller 38 | if [ $? -eq 0 ]; then 39 | echo "Tiller is installed and running" 40 | else 41 | echo "Deploying tiller to the cluster" 42 | cat < GET /details/0 HTTP/1.1 116 | > Host: details:9080 117 | > User-Agent: curl/7.47.0 118 | > Accept: */* 119 | > 120 | * Recv failure: Connection reset by peer 121 | * Closing connection 0 122 | curl: (56) Recv failure: Connection reset by peer 123 | ``` 124 | 125 | Now lets use tcp dump 126 | ``` 127 | IP=$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1) 128 | echo $IP 129 | sudo tcpdump -vvv -A -i eth0 '((dst port 9080) and (net ))' 130 | ``` 131 | Make sure to replace `` with the output of `echo $IP` 132 | 133 | Now open a new terminal not inside the proxy and hit the public end point 134 | ``` 135 | curl -o /dev/null -s -w "%{http_code}\n" http://$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.status.loadBalancer.ingress[0].ip}')/productpage 136 | ``` 137 | 138 | And tcpdump will show you the traffic is encrypted 139 | ``` 140 | tcpdump: listening on eth0, link-type EN10MB (Ethernet), capture size 262144 bytes 141 | 01:44:26.321505 IP (tos 0x0, ttl 62, id 29808, offset 0, flags [DF], proto TCP (6), length 985) 142 | 10-244-2-10.productpage.istio-app.svc.cluster.local.56608 > details-v1-68b96b8855-8gz6d.9080: Flags [P.], cksum 0xcb35 (correct), seq 2023992897:2023993830, ack 3234861546, win 309, options [nop,nop,TS val 921785932 ecr 2584296252], length 933 143 | E...tp@.>... 144 | .. 145 | 146 | .. . #xx..A.......5.5..... 147 | 6.VL. ;<..............Z...n.L..;7c.......D+M+......z![..b......o.O.ST.8...+>.h....8.V..;....n.jB..FB t. 148 | .|.../.2...D.y<....s......y.D:.<..,.Q.6....^S....u.(.x_N..j..R...;M........hf.!..f..Z@...}.;....`.....(.....[....+.b...U.... .....IpB.ZU. O:..A...`..D.:mB..c.I....U.~.. 152 | .....\.B..R.z.BT.......G........G.*w|.B Me...<.7..;......... 153 | 01:44:26.370014 IP (tos 0x0, ttl 62, id 29809, offset 0, flags [DF], proto TCP (6), length 52) 154 | 10-244-2-10.productpage.istio-app.svc.cluster.local.56608 > details-v1-68b96b8855-8gz6d.9080: Flags [.], cksum 0x74ec (correct), seq 933, ack 366, win 329, options [nop,nop,TS val 921785981 ecr 2584854722], length 0 155 | E..4tq@.>..X 156 | .. 157 | 158 | .. . #xx......W...It...... 159 | 6.V}.... 160 | 01:44:51.719390 IP (tos 0x0, ttl 64, id 30975, offset 0, flags [DF], proto TCP (6), length 60) 161 | 10.244.1.3.52284 > details-v1-68b96b8855-8gz6d.9080: Flags [S], cksum 0x1822 (incorrect -> 0x0b29), seq 590050211, win 29200, options [mss 1460,sackOK,TS val 1051389349 ecr 0,nop,wscale 7], length 0 162 | E............. 166 | 01:44:51.719440 IP (tos 0x0, ttl 64, id 30976, offset 0, flags [DF], proto TCP (6), length 52) 167 | 10.244.1.3.52284 > details-v1-68b96b8855-8gz6d.9080: Flags [.], cksum 0x181a (incorrect -> 0xaf81), seq 590050212, ack 3111167789, win 229, options [nop,nop,TS val 1051389349 ecr 2269647784], length 0 168 | E..4y.@.@... 169 | ... 170 | .. .<#x#+s..p.-........... 171 | >....H.. 172 | 01:44:51.720032 IP (tos 0x0, ttl 64, id 30977, offset 0, flags [DF], proto TCP (6), length 76) 173 | 10.244.1.3.52284 > details-v1-68b96b8855-8gz6d.9080: Flags [P.], cksum 0x1832 (incorrect -> 0x6a52), seq 0:24, ack 1, win 229, options [nop,nop,TS val 1051389349 ecr 2269647784], length 24 174 | E..Ly.@.@... 175 | ... 176 | .. .<#x#+s..p.-.....2..... 177 | >....H..PRI * HTTP/2.0 178 | 179 | SM 180 | 181 | 182 | 01:44:51.720164 IP (tos 0x0, ttl 64, id 30978, offset 0, flags [DF], proto TCP (6), length 61) 183 | 10.244.1.3.52284 > details-v1-68b96b8855-8gz6d.9080: Flags [P.], cksum 0x1823 (incorrect -> 0xaf54), seq 24:33, ack 1, win 229, options [nop,nop,TS val 1051389349 ecr 2269647784], length 9 184 | E..=y.@.@... 185 | ... 186 | .. .<#x#+s..p.-.....#..... 187 | >....H........... 188 | 01:44:51.720191 IP (tos 0x0, ttl 64, id 30979, offset 0, flags [DF], proto TCP (6), length 52) 189 | 10.244.1.3.52284 > details-v1-68b96b8855-8gz6d.9080: Flags [.], cksum 0x181a (incorrect -> 0xaf59), seq 33, ack 8, win 229, options [nop,nop,TS val 1051389349 ecr 2269647784], length 0 190 | E..4y.@.@... 191 | ... 192 | .. .<#x#+s..p.4........... 193 | >....H.. 194 | 01:46:14.077221 IP (tos 0x0, ttl 64, id 26455, offset 0, flags [DF], proto TCP (6), length 60) 195 | 10.244.1.3.52922 > details-v1-68b96b8855-8gz6d.9080: Flags [S], cksum 0x1822 (incorrect -> 0xc5ac), seq 1534606494, win 29200, options [mss 1460,sackOK,TS val 1051471706 ecr 0,nop,wscale 7], length 0 196 | E.../Z........ 200 | 01:46:14.077300 IP (tos 0x0, ttl 64, id 26456, offset 0, flags [DF], proto TCP (6), length 52) 201 | 10.244.1.3.52922 > details-v1-68b96b8855-8gz6d.9080: Flags [.], cksum 0x181a (incorrect -> 0xfc83), seq 1534606495, ack 1446327858, win 229, options [nop,nop,TS val 1051471707 ecr 2269730142], length 0 202 | E..4gX@.@..x 203 | ... 204 | .. ..#x[x<.V562........... 205 | >./[.IU^ 206 | 01:46:14.078164 IP (tos 0x0, ttl 64, id 26457, offset 0, flags [DF], proto TCP (6), length 76) 207 | 10.244.1.3.52922 > details-v1-68b96b8855-8gz6d.9080: Flags [P.], cksum 0x1832 (incorrect -> 0xb754), seq 0:24, ack 1, win 229, options [nop,nop,TS val 1051471707 ecr 2269730142], length 24 208 | E..LgY@.@.._ 209 | ... 210 | .. ..#x[x<.V562.....2..... 211 | >./[.IU^PRI * HTTP/2.0 212 | 213 | SM 214 | 215 | 216 | 01:46:14.078204 IP (tos 0x0, ttl 64, id 26458, offset 0, flags [DF], proto TCP (6), length 61) 217 | 10.244.1.3.52922 > details-v1-68b96b8855-8gz6d.9080: Flags [P.], cksum 0x1823 (incorrect -> 0xfc56), seq 24:33, ack 1, win 229, options [nop,nop,TS val 1051471707 ecr 2269730142], length 9 218 | E..=gZ@.@..m 219 | ... 220 | .. ..#x[x<.V562.....#..... 221 | >./[.IU^......... 222 | 01:46:14.078288 IP (tos 0x0, ttl 64, id 26459, offset 0, flags [DF], proto TCP (6), length 52) 223 | 10.244.1.3.52922 > details-v1-68b96b8855-8gz6d.9080: Flags [.], cksum 0x181a (incorrect -> 0xfc59), seq 33, ack 8, win 229, options [nop,nop,TS val 1051471708 ecr 2269730143], length 0 224 | E..4g[@.@..u 225 | ... 226 | .. ..#x[x<.V569........... 227 | >./\.IU_ 228 | 229 | ^C 230 | 12 packets captured 231 | 12 packets received by filter 232 | 0 packets dropped by kernel 233 | ``` 234 | 235 | ## Clean up 236 | Now let's cleanup our environment. 237 | 238 | Delete our test app 239 | ``` 240 | kubectl --namespace istio-app delete --filename \ 241 | https://raw.githubusercontent.com/solo-io/supergloo/master/test/e2e/files/bookinfo.yaml 242 | ``` 243 | 244 | and delete istio 245 | ``` 246 | supergloo uninstall --name istio 247 | ``` -------------------------------------------------------------------------------- /keda/code.md: -------------------------------------------------------------------------------- 1 | # Keda 2 | 3 | ## Installing Keda 4 | 5 | #### Add Helm repo 6 | ``` 7 | helm repo add kedacore https://kedacore.azureedge.net/helm 8 | ``` 9 | 10 | #### Update Helm repo 11 | ``` 12 | helm repo update 13 | ``` 14 | 15 | #### Install keda-edge chart 16 | ``` 17 | helm install kedacore/keda-edge --devel --set logLevel=debug --namespace keda --name keda 18 | ``` 19 | 20 | #### Install rabbitmq 21 | ``` 22 | helm install --name rabbitmq --set rabbitmq.username=user,rabbitmq.password=PASSWORD stable/rabbitmq 23 | ``` 24 | 25 | #### Install consumer 26 | ``` 27 | cat <` (Arrow keys) 92 | 93 | In one window enter `kubectl get hpa -w` to watch the horizontal pod autoscaler 94 | In the other enter `kubectl get pods -o wide` to make sure our pods are being scheduled on virtual node. 95 | 96 | now switch back to our first window with `ctrl+b n` 97 | 98 | #### Install the publisher 99 | ``` 100 | cat < sh` 5 | `cd static` 6 | `vim index.html` 7 | `https://media.giphy.com/media/DBfYJqH5AokgM/giphy.gif` 8 | 9 | ## run as user 1000 10 | 11 | ``` 12 | cat < ca.crt` 114 | 115 | Then get the user token from our secret 116 | `USER_TOKEN=$(kubectl get secret --namespace webapp-namespace "${SECRET_NAME}" -o json | jq -r '.data["token"]' | base64 --decode)` 117 | 118 | Now will will setup our kubeconfig file 119 | ``` 120 | SERVICE_ACCOUNT_NAME="webapp-service-account" 121 | NAMESPACE="webapp-namespace" 122 | KUBECFG_FILE_NAME="admin.conf" 123 | 124 | context=$(kubectl config current-context) 125 | CLUSTER_NAME=$(kubectl config get-contexts "$context" | awk '{print $3}' | tail -n 1) 126 | ENDPOINT=$(kubectl config view -o jsonpath="{.clusters[?(@.name == \"${CLUSTER_NAME}\")].cluster.server}") 127 | kubectl config set-cluster "${CLUSTER_NAME}" --kubeconfig=admin.conf --server="${ENDPOINT}" --certificate-authority=ca.crt --embed-certs=true 128 | kubectl config set-credentials "webapp-service-account-webapp-namespace-${CLUSTER_NAME}" --kubeconfig=admin.conf --token="${USER_TOKEN}" 129 | kubectl config set-context "webapp-service-account-webapp-namespace-${CLUSTER_NAME}" --kubeconfig=admin.conf --cluster="${CLUSTER_NAME}" --user="webapp-service-account-webapp-namespace-${CLUSTER_NAME}" --namespace webapp-namespace 130 | kubectl config use-context "webapp-service-account-webapp-namespace-${CLUSTER_NAME}" --kubeconfig="${KUBECFG_FILE_NAME}" 131 | ``` 132 | Please use the script [here](script.sh) 133 | 134 | We will then load the file in our terminal 135 | `export KUBECONFIG=admin.conf` 136 | 137 | Now let's check our permissions by seeing if we can list pods in the default namespace 138 | `kubectl get pods` 139 | 140 | Now let's check our namespace 141 | `kubectl get pods --namespace=webapp-namespace` 142 | 143 | (Check [here](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#referring-to-subjects) for more info about rbac subjects) 144 | 145 | Now we have limited the blast radius of our application to only the namespace that it resides in. 146 | So there will be no way that we can leak configmaps or secrets from other applications that are not in this namespace. 147 | 148 | Lastly and this is super IMPORTANT !!!! run the cleanup script before you move to the next module 149 | `./cleanup.sh` 150 | 151 | Now we move onto the next module [here](../statefull-sets/code.md) -------------------------------------------------------------------------------- /rbac-roles-service-accounts/script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | SERVICE_ACCOUNT_NAME="webapp-service-account" 6 | NAMESPACE="webapp-namespace" 7 | KUBECFG_FILE_NAME="admin.conf" 8 | 9 | SECRET_NAME=$(kubectl get sa "${SERVICE_ACCOUNT_NAME}" --namespace="${NAMESPACE}" -o json | jq -r .secrets[].name) 10 | kubectl get secret --namespace="${NAMESPACE}" "${SECRET_NAME}" -o json | jq -r '.data["ca.crt"]' | base64 -d > ca.crt 11 | USER_TOKEN=$(kubectl get secret --namespace webapp-namespace "${SECRET_NAME}" -o json | jq -r '.data["token"]' | base64 -d) 12 | context=$(kubectl config current-context) 13 | CLUSTER_NAME=$(kubectl config get-contexts "$context" | awk '{print $3}' | tail -n 1) 14 | ENDPOINT=$(kubectl config view -o jsonpath="{.clusters[?(@.name == \"${CLUSTER_NAME}\")].cluster.server}") 15 | kubectl config set-cluster "${CLUSTER_NAME}" --kubeconfig="${KUBECFG_FILE_NAME}" --server="${ENDPOINT}" --certificate-authority=ca.crt --embed-certs=true 16 | kubectl config set-credentials "${SERVICE_ACCOUNT_NAME}-${NAMESPACE}-${CLUSTER_NAME}" --kubeconfig="${KUBECFG_FILE_NAME}" --token="${USER_TOKEN}" 17 | kubectl config set-context "${SERVICE_ACCOUNT_NAME}-${NAMESPACE}-${CLUSTER_NAME}" --kubeconfig="${KUBECFG_FILE_NAME}" --cluster="${CLUSTER_NAME}" --user="${SERVICE_ACCOUNT_NAME}-${NAMESPACE}-${CLUSTER_NAME}" --namespace="${NAMESPACE}" 18 | kubectl config use-context "${SERVICE_ACCOUNT_NAME}-${NAMESPACE}-${CLUSTER_NAME}" --kubeconfig="${KUBECFG_FILE_NAME}" -------------------------------------------------------------------------------- /service-mesh-with-linkerd/code.md: -------------------------------------------------------------------------------- 1 | # Service mesh with Linkerd 2 | 3 | In this module we will install [Linkerd](https://linkerd.io/) and look at traffic going between meshed applications. 4 | 5 | ## Installing Linkerd 6 | 7 | To install Linkerd on your cluster you issue the following command 8 | 9 | ``` 10 | linkerd install | kubectl apply -f - 11 | ``` 12 | Linkerd will install the following on your cluster 13 | 14 | ``` 15 | lusterrole.rbac.authorization.k8s.io/linkerd-linkerd-identity created 16 | clusterrolebinding.rbac.authorization.k8s.io/linkerd-linkerd-identity created 17 | serviceaccount/linkerd-identity created 18 | clusterrole.rbac.authorization.k8s.io/linkerd-linkerd-controller created 19 | clusterrolebinding.rbac.authorization.k8s.io/linkerd-linkerd-controller created 20 | serviceaccount/linkerd-controller created 21 | serviceaccount/linkerd-web created 22 | customresourcedefinition.apiextensions.k8s.io/serviceprofiles.linkerd.io created 23 | customresourcedefinition.apiextensions.k8s.io/trafficsplits.split.smi-spec.io created 24 | clusterrole.rbac.authorization.k8s.io/linkerd-linkerd-prometheus created 25 | clusterrolebinding.rbac.authorization.k8s.io/linkerd-linkerd-prometheus created 26 | serviceaccount/linkerd-prometheus created 27 | serviceaccount/linkerd-grafana created 28 | clusterrole.rbac.authorization.k8s.io/linkerd-linkerd-proxy-injector created 29 | clusterrolebinding.rbac.authorization.k8s.io/linkerd-linkerd-proxy-injector created 30 | serviceaccount/linkerd-proxy-injector created 31 | secret/linkerd-proxy-injector-tls created 32 | mutatingwebhookconfiguration.admissionregistration.k8s.io/linkerd-proxy-injector-webhook-config created 33 | clusterrole.rbac.authorization.k8s.io/linkerd-linkerd-sp-validator created 34 | clusterrolebinding.rbac.authorization.k8s.io/linkerd-linkerd-sp-validator created 35 | serviceaccount/linkerd-sp-validator created 36 | secret/linkerd-sp-validator-tls created 37 | validatingwebhookconfiguration.admissionregistration.k8s.io/linkerd-sp-validator-webhook-config created 38 | clusterrole.rbac.authorization.k8s.io/linkerd-linkerd-tap created 39 | clusterrolebinding.rbac.authorization.k8s.io/linkerd-linkerd-tap created 40 | serviceaccount/linkerd-tap created 41 | podsecuritypolicy.policy/linkerd-linkerd-control-plane created 42 | role.rbac.authorization.k8s.io/linkerd-psp created 43 | rolebinding.rbac.authorization.k8s.io/linkerd-psp created 44 | configmap/linkerd-config created 45 | secret/linkerd-identity-issuer created 46 | service/linkerd-identity created 47 | deployment.extensions/linkerd-identity created 48 | service/linkerd-controller-api created 49 | service/linkerd-destination created 50 | deployment.extensions/linkerd-controller created 51 | service/linkerd-web created 52 | deployment.extensions/linkerd-web created 53 | configmap/linkerd-prometheus-config created 54 | service/linkerd-prometheus created 55 | deployment.extensions/linkerd-prometheus created 56 | configmap/linkerd-grafana-config created 57 | service/linkerd-grafana created 58 | deployment.extensions/linkerd-grafana created 59 | deployment.apps/linkerd-proxy-injector created 60 | service/linkerd-proxy-injector created 61 | service/linkerd-sp-validator created 62 | deployment.extensions/linkerd-sp-validator created 63 | service/linkerd-tap created 64 | deployment.extensions/linkerd-tap created 65 | ``` 66 | We are going to take advantage of Linkerd's auto injection functionality to inject the envoy proxy into our application. Auto injections is done on the namespace, so we are going to create a new namespace. 67 | 68 | ``` 69 | kubectl create namespace mesh-app 70 | ``` 71 | We are then going to add the label to tell Linkerd to auto inject on this namespace 72 | ``` 73 | kubectl annotate namespace mesh-app linkerd.io/inject=enabled 74 | ``` 75 | 76 | Next we are going to deploy a web application ands use it as a mock service api 77 | 78 | ``` 79 | cat < 141 | 142 | 143 | 144 | 145 | 146 | 151 | 152 | 153 |

Awesome Web App !!!!

154 |

....and the demo worked :)

155 |

gif

156 | 157 | 158 | ``` 159 | 160 | ``` 161 | cat <