├── k8s ├── lab │ └── .gitkeep ├── app │ ├── configmap.yaml │ ├── service.yaml │ ├── ingress.yaml │ └── deployment.yaml ├── app-health │ ├── service.yaml │ ├── ingress.yaml │ └── deployment.yaml ├── app-ingress │ ├── service.yaml │ ├── ingress.yaml │ └── deployment.yaml ├── app-envvars │ ├── deployment-base.yaml │ └── deployment-with-envvars.yaml └── redis │ └── redis.yaml ├── labs ├── images │ ├── README.md │ ├── Images.png │ └── nginxWelcome.png ├── kube_infra_as_code.md ├── kube_readiness.md ├── docker_tools_container.md ├── kube_readiness_2.md ├── kube_override_cmd.md ├── kube_setup_ingress.md ├── kube_env_vars.md ├── docker_intro.md ├── kube_deploy_cloud_app.md └── docker_cloud_app.md ├── .gitignore ├── slides └── docker-and-kubernetes-dojo.pdf ├── .editorconfig ├── requirements.txt ├── .dockerignore ├── Dockerfile ├── Dockerfile-tools ├── arguments └── __init__.py ├── app.py └── README.md /k8s/lab/.gitkeep: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /labs/images/README.md: -------------------------------------------------------------------------------- 1 | Place for images 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .vscode 3 | venv 4 | __pycache__ -------------------------------------------------------------------------------- /labs/images/Images.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/javaplus/DockerKubesDojo/HEAD/labs/images/Images.png -------------------------------------------------------------------------------- /labs/images/nginxWelcome.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/javaplus/DockerKubesDojo/HEAD/labs/images/nginxWelcome.png -------------------------------------------------------------------------------- /slides/docker-and-kubernetes-dojo.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/javaplus/DockerKubesDojo/HEAD/slides/docker-and-kubernetes-dojo.pdf -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*.py] 4 | indent_style = space 5 | indent_size = 4 6 | end_of_line = cr 7 | insert_final_newline = true 8 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | Click==7.0 2 | Flask==1.1.1 3 | itsdangerous==1.1.0 4 | Jinja2==2.10.3 5 | MarkupSafe==1.1.1 6 | redis==3.3.11 7 | Werkzeug==0.16.0 8 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | .dockerignore 2 | .editorconfig 3 | .git 4 | .gitignore 5 | Dockerfile 6 | Dockerfile-tools 7 | venv 8 | k8s 9 | __pycache__ 10 | skaffold.yaml 11 | -------------------------------------------------------------------------------- /k8s/app/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | labels: 5 | app: cloud-native-demo 6 | name: cloud-native-demo 7 | data: 8 | user-defined-1: value 1 9 | user-defined-2: value 2 10 | user-defined-3: value 3 11 | -------------------------------------------------------------------------------- /k8s/app-health/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | run: cn-demo 6 | name: cn-demo 7 | spec: 8 | type: ClusterIP 9 | ports: 10 | - port: 5000 11 | protocol: TCP 12 | targetPort: 5000 13 | selector: 14 | run: cn-demo 15 | -------------------------------------------------------------------------------- /k8s/app-ingress/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | run: cn-demo 6 | name: cn-demo 7 | spec: 8 | type: ClusterIP 9 | ports: 10 | - port: 5000 11 | protocol: TCP 12 | targetPort: 5000 13 | selector: 14 | run: cn-demo 15 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3 2 | 3 | WORKDIR /usr/src/app 4 | 5 | COPY requirements.txt ./ 6 | RUN pip install --trusted-host pypi.org --no-cache-dir -r requirements.txt 7 | 8 | LABEL org.opencontainers.image.title="cloud-native-demo" 9 | 10 | COPY . . 11 | 12 | CMD [ "python", "./app.py" ] 13 | -------------------------------------------------------------------------------- /k8s/app/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | run: cloud-native-demo 6 | name: cloud-native-demo 7 | spec: 8 | type: ClusterIP 9 | ports: 10 | - port: 5000 11 | protocol: TCP 12 | targetPort: 5000 13 | selector: 14 | run: cloud-native-demo 15 | -------------------------------------------------------------------------------- /k8s/app-health/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: cn-demo 5 | annotations: 6 | nginx.ingress.kubernetes.io/rewrite-target: / 7 | spec: 8 | rules: 9 | - http: 10 | paths: 11 | - path: / 12 | backend: 13 | serviceName: cn-demo 14 | servicePort: 5000 15 | -------------------------------------------------------------------------------- /k8s/app-ingress/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: cn-demo 5 | annotations: 6 | nginx.ingress.kubernetes.io/rewrite-target: / 7 | spec: 8 | rules: 9 | - http: 10 | paths: 11 | - path: / 12 | backend: 13 | serviceName: cn-demo 14 | servicePort: 5000 15 | -------------------------------------------------------------------------------- /Dockerfile-tools: -------------------------------------------------------------------------------- 1 | FROM ubuntu:latest 2 | 3 | RUN apt update && \ 4 | apt install -y curl tmux vim zsh git jq && \ 5 | curl -LO https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl && \ 6 | chmod +x kubectl && \ 7 | mv ./kubectl /usr/local/bin/kubectl 8 | -------------------------------------------------------------------------------- /k8s/app/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: cloud-native-demo 5 | annotations: 6 | nginx.ingress.kubernetes.io/rewrite-target: / 7 | spec: 8 | rules: 9 | - http: 10 | paths: 11 | - path: / 12 | backend: 13 | serviceName: cloud-native-demo 14 | servicePort: 5000 15 | -------------------------------------------------------------------------------- /arguments/__init__.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | parser = argparse.ArgumentParser(description='Starter Python + Redis app') 4 | parser.add_argument('--port', '-p', type=int, required=False, help="port number to listen for HTTP requests", default=5000) 5 | parser.add_argument('--host', type=str, required=False, help="host to bind to", default='0.0.0.0') 6 | parser.add_argument('--redis-host', type=str, required=False, help="hostname of the backing Redis service", default='localhost') 7 | parser.add_argument('--redis-port', type=str, required=False, help="port number of the backing Redis service", default='6379') 8 | 9 | args = parser.parse_args() 10 | -------------------------------------------------------------------------------- /k8s/app-envvars/deployment-base.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | run: cn-demo 6 | name: cn-demo 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | run: cn-demo 12 | template: 13 | metadata: 14 | labels: 15 | run: cn-demo 16 | spec: 17 | containers: 18 | - image: cloud-native-demo:1 19 | imagePullPolicy: IfNotPresent 20 | name: cn-demo 21 | resources: 22 | limits: 23 | cpu: 1 24 | memory: 128Mi 25 | requests: 26 | cpu: 100m 27 | memory: 128Mi 28 | restartPolicy: Always 29 | terminationGracePeriodSeconds: 2 30 | -------------------------------------------------------------------------------- /k8s/redis/redis.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | run: redis-test 6 | name: redis-test 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | run: redis-test 12 | template: 13 | metadata: 14 | labels: 15 | run: redis-test 16 | spec: 17 | containers: 18 | - image: redis 19 | imagePullPolicy: Always 20 | name: redis-test 21 | resources: 22 | limits: 23 | cpu: 1 24 | memory: 32Mi 25 | requests: 26 | cpu: 100m 27 | memory: 32Mi 28 | --- 29 | apiVersion: v1 30 | kind: Service 31 | metadata: 32 | labels: 33 | run: redis-test 34 | name: redis-test 35 | spec: 36 | type: ClusterIP 37 | selector: 38 | run: redis-test 39 | ports: 40 | - port: 6379 41 | targetPort: 6379 42 | protocol: TCP 43 | -------------------------------------------------------------------------------- /k8s/app-ingress/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | run: cn-demo 6 | name: cn-demo 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | run: cn-demo 12 | template: 13 | metadata: 14 | labels: 15 | run: cn-demo 16 | spec: 17 | containers: 18 | - image: cloud-native-demo:1 19 | imagePullPolicy: IfNotPresent 20 | name: cloud-native-demo 21 | env: 22 | - name: REDIS_HOST 23 | value: redis-test 24 | - name: REDIS_PORT 25 | value: "6379" 26 | - name: USER_DEFINED_1 27 | value: my-user-define-value-1 28 | - name: USER_DEFINED_2 29 | value: my-user-define-value-2 30 | - name: USER_DEFINED_3 31 | value: my-user-define-value-2 32 | command: 33 | - sh 34 | - -c 35 | - | 36 | python app.py --redis-host $(REDIS_HOST) 37 | resources: 38 | limits: 39 | cpu: 1 40 | memory: 128Mi 41 | requests: 42 | cpu: 100m 43 | memory: 128Mi 44 | restartPolicy: Always 45 | terminationGracePeriodSeconds: 2 46 | -------------------------------------------------------------------------------- /k8s/app-envvars/deployment-with-envvars.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | run: cn-demo 6 | name: cn-demo 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | run: cn-demo 12 | template: 13 | metadata: 14 | labels: 15 | run: cn-demo 16 | spec: 17 | containers: 18 | - image: cloud-native-demo:1 19 | imagePullPolicy: IfNotPresent 20 | name: cn-demo 21 | env: 22 | - name: REDIS_HOST 23 | value: redis-test 24 | - name: REDIS_PORT 25 | value: "6379" 26 | - name: USER_DEFINED_1 27 | value: my-user-define-value-1 28 | - name: USER_DEFINED_2 29 | value: my-user-define-value-2 30 | - name: USER_DEFINED_3 31 | value: my-user-define-value-2 32 | command: 33 | - sh 34 | - -c 35 | - | 36 | python app.py --redis-host $(REDIS_HOST) 37 | resources: 38 | limits: 39 | cpu: 1 40 | memory: 128Mi 41 | requests: 42 | cpu: 100m 43 | memory: 128Mi 44 | restartPolicy: Always 45 | terminationGracePeriodSeconds: 2 46 | -------------------------------------------------------------------------------- /k8s/app-health/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | run: cn-demo 6 | name: cn-demo 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | run: cn-demo 12 | template: 13 | metadata: 14 | labels: 15 | run: cn-demo 16 | spec: 17 | containers: 18 | - image: cloud-native-demo:1 19 | imagePullPolicy: IfNotPresent 20 | name: cloud-native-demo 21 | env: 22 | - name: REDIS_HOST 23 | value: redis-test 24 | - name: REDIS_PORT 25 | value: "6379" 26 | - name: USER_DEFINED_1 27 | value: my-user-define-value-1 28 | - name: USER_DEFINED_2 29 | value: my-user-define-value-2 30 | - name: USER_DEFINED_3 31 | value: my-user-define-value-2 32 | command: 33 | - sh 34 | - -c 35 | - | 36 | python app.py --redis-host $(REDIS_HOST) 37 | resources: 38 | limits: 39 | cpu: 1 40 | memory: 128Mi 41 | requests: 42 | cpu: 100m 43 | memory: 128Mi 44 | readinessProbe: 45 | timeoutSeconds: 1 46 | periodSeconds: 1 47 | initialDelaySeconds: 1 48 | httpGet: 49 | path: /ready 50 | port: 5000 51 | restartPolicy: Always 52 | terminationGracePeriodSeconds: 2 53 | -------------------------------------------------------------------------------- /k8s/app/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | run: cloud-native-demo 6 | name: cloud-native-demo 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | run: cloud-native-demo 12 | strategy: 13 | rollingUpdate: 14 | maxSurge: 25% 15 | maxUnavailable: 25% 16 | type: RollingUpdate 17 | template: 18 | metadata: 19 | labels: 20 | run: cloud-native-demo 21 | spec: 22 | containers: 23 | - image: cloud-native-demo:1 24 | imagePullPolicy: IfNotPresent 25 | name: cloud-native-demo 26 | env: 27 | - name: REDIS_HOST 28 | value: redis-test 29 | - name: REDIS_PORT 30 | value: "6379" 31 | - name: USER_DEFINED_1 32 | valueFrom: 33 | configMapKeyRef: 34 | name: cloud-native-demo 35 | key: user-defined-1 36 | - name: USER_DEFINED_2 37 | valueFrom: 38 | configMapKeyRef: 39 | name: cloud-native-demo 40 | key: user-defined-2 41 | - name: USER_DEFINED_3 42 | valueFrom: 43 | configMapKeyRef: 44 | name: cloud-native-demo 45 | key: user-defined-3 46 | command: 47 | - sh 48 | - -c 49 | - | 50 | python app.py --redis-host $(REDIS_HOST) 51 | resources: 52 | limits: 53 | cpu: 1 54 | memory: 128Mi 55 | requests: 56 | cpu: 100m 57 | memory: 128Mi 58 | livenessProbe: 59 | timeoutSeconds: 1 60 | periodSeconds: 1 61 | initialDelaySeconds: 1 62 | httpGet: 63 | path: /live 64 | port: 5000 65 | readinessProbe: 66 | timeoutSeconds: 1 67 | periodSeconds: 1 68 | initialDelaySeconds: 1 69 | httpGet: 70 | path: /ready 71 | port: 5000 72 | restartPolicy: Always 73 | terminationGracePeriodSeconds: 2 74 | -------------------------------------------------------------------------------- /app.py: -------------------------------------------------------------------------------- 1 | from arguments import args 2 | from flask import Flask, Response, request, json 3 | import logging 4 | import os 5 | import redis 6 | import time 7 | 8 | app = Flask(__name__) 9 | 10 | LISTEN_HOST = args.host 11 | LISTEN_PORT = args.port 12 | REDIS_HOST = args.redis_host 13 | REDIS_PORT = args.redis_port 14 | 15 | redis_client = redis.Redis(host=REDIS_HOST, port=REDIS_PORT, db=0, decode_responses=True) 16 | livenessDelay = 0 17 | 18 | @app.route('/') 19 | def root(): 20 | data = json.dumps({ 21 | "appName": "cloud-native-demo", 22 | "version": "1.0.0", 23 | "redis-host": REDIS_HOST, 24 | "env": { 25 | "host": os.getenv('HOSTNAME'), 26 | "user_defined_1": os.getenv('USER_DEFINED_1'), 27 | "user_defined_2": os.getenv('USER_DEFINED_2'), 28 | "user_defined_3": os.getenv('USER_DEFINED_3') 29 | } 30 | }) 31 | return Response(data, mimetype="application/json") 32 | 33 | @app.route('/counter') 34 | def counter(): 35 | hostname = os.getenv("HOSTNAME") 36 | 37 | try: 38 | if redis_client.hexists("hosts", hostname): 39 | redis_client.hincrby("hosts", hostname, amount=1) 40 | else: 41 | redis_client.hset("hosts", hostname, 1) 42 | 43 | return Response(json.dumps(redis_client.hgetall("hosts")), status=200, mimetype="application/json") 44 | except: 45 | return Response(json.dumps({"error": "service unavailable"}), status=503, mimetype="application/json") 46 | 47 | @app.route('/counter/reset') 48 | def clear_counter(): 49 | hosts = redis_client.hgetall("hosts") 50 | [redis_client.hdel("hosts", key) for key in hosts.keys()] 51 | return Response(json.dumps(redis_client.hgetall("hosts")), status=200, mimetype="application/json") 52 | 53 | @app.route('/live') 54 | def live_get(): 55 | time.sleep(livenessDelay) 56 | return Response(json.dumps({"delay": livenessDelay}), status=200, mimetype="application/json") 57 | 58 | @app.route('/live/') 59 | def live_post(delay): 60 | global livenessDelay 61 | livenessDelay = delay 62 | return Response(json.dumps({"delay": livenessDelay}), status=200, mimetype="application/json") 63 | 64 | @app.route('/ready') 65 | def ready(): 66 | redis_ready = False 67 | 68 | try: 69 | redis_ready = redis_client.ping() 70 | except: 71 | logging.warning("python connection down") 72 | 73 | response = Response(mimetype="application/json") 74 | 75 | if redis_ready: 76 | response.status = "200" 77 | response.response = json.dumps({ 78 | "redis_connection": "up" 79 | }) 80 | else: 81 | response.status = "503" 82 | response.response = json.dumps({ 83 | "redis_connection": "down" 84 | }) 85 | 86 | return response 87 | 88 | if __name__ == '__main__': 89 | app.run(host=LISTEN_HOST, port=LISTEN_PORT) 90 | -------------------------------------------------------------------------------- /labs/kube_infra_as_code.md: -------------------------------------------------------------------------------- 1 | # ~~ Infrastructure as Code ~~ 2 | 3 | While the above `kubectl ...` CLI commands are useful for your Developer Inner Loop, when it comes time to deploy to Test and Production environments you want something a little more repeatable. We want the declarative configuration of the `Deployment` we were just exploring in a text based format that we can store alongside our application code in Git. Using `kubectl` again, we can query that configuration back out of the Kubernetes cluster. 4 | 5 | It's a good time to point out that `kubectl` is just a friendly CLI that gives us convenient access to the Kubernets API Server. When we add or change configuration to Kubernetes with commands like `kubectl run ...`, there are PUT/POST calls being issued to the Kubernetes API Server. Likewise, once configured, we can retrieve the configuration with an HTTP GET, or `kubectl get ...`. 6 | 7 | So there's a lot of good configuration already in our cluster for the `Deployment` we have. It'd be a shame to lose it, so let's get it out and store it in a file. 8 | 9 | > Note: Up to now, it didn't matter which directory you were running kubectl commands from, since we were just getting details on our Kubernetes environment, or else communicating directly between docker and the cluster. From here on out we will be using YAML files (included in this repository) to describe the deployment, and so you will need to execute these commands from your root project directory. 10 | 11 | ## Get the Configuration for your Deployment 12 | 13 | ```bash 14 | kubectl get deployment cn-demo -o yaml > k8s/lab/deployment.yaml 15 | ``` 16 | 17 | That command semantically says "get me the configuration of the Deployment named cn-demo, format it as YAML and write it to the file k8s/lab/deployment.yaml". Go ahead and open that file in VS Code and look around. Do you see the familiar values you defined with the `kubectl` CLI earlier for the `cloud-native-demo:1` image and the `replicas` count? There's a lot of other data in there too, some relevant, some not. 18 | 19 | Using `kubectl get ...` is a common technique to bootstrap your configuration without needing to remember all the structure of the YAML document, which in turn is really just the API specification for Kubernetes Deployments. We're going to use a cleaned up version of `deployment.yaml` for our next steps, but just remember how to use `kubectl get ... -oyaml` in the future. You'll use it often to troubleshoot your Kubernetes configuration. 20 | 21 | > Note: In this example we **piped** the results of `kubectl get ...` to a file using the `>` character. If you leave that part off, the results will be written to your console. You'll do this often when you just need to see the configuration of a particular Kubernetes object. 22 | 23 | Before we continue, let's clean up our current deployment. Going forward, we'll work with and deploy using variations of `deployment.yaml` contained in this repository. 24 | 25 | ```bash 26 | kubectl delete deployment cn-demo 27 | ``` 28 | -------------------------------------------------------------------------------- /labs/kube_readiness.md: -------------------------------------------------------------------------------- 1 | 2 | # ~~ Detecting an unhealthy app ~~ 3 | 4 | Let's try the `/counter` URL endpoint of our application by going to [http://localhost/counter](http://localhost/counter) in our browser. You should see the following. 5 | 6 | ```json 7 | { 8 | "error": "service unavailable" 9 | } 10 | ``` 11 | 12 | That's accurate, but not what we want. We have an unhealthy application that can't find it's backing service (Redis in this case). What if I had a deployment with 3 replicas and one of them could not establish the connection to Redis. I would want Kubernetes to stop routing traffic to that unhealthy application until it became healthy again. The definition of "health" will vary from app to app, and this is why Kubernetes allows you to write this logic yourself. There are various techniques, but our implementation exposes a single `/health` URL endpoint that returns a `200 OK` response if healthy, and a `503` if not. 13 | 14 | We need to configure Kubernetes to periodically call this endpoint, and remove the Pod as a valid endpoint in my `Service` if it is unhealthy. This is exactly what the `readinessProbe` feature is meant to do. In this case, it's configured for you in `k8s/app-health/deployment.yaml`. Look for the following section which configures the `readinessProbe`. Details on what all these values mean can be [found here](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes). 15 | 16 | ```yaml 17 | readinessProbe: 18 | timeoutSeconds: 1 19 | periodSeconds: 1 20 | initialDelaySeconds: 1 21 | httpGet: 22 | path: /ready 23 | port: 5000 24 | ``` 25 | 26 | ## Spotting unhealthy Pods 27 | 28 | We're going to apply this new `Deployment` with the `readinessProbe` configured, but lets look at our current Pod before we do. In our console based dashboard, you should see a single Pod running with output like the following: 29 | 30 | ```bash 31 | NAME READY STATUS RESTARTS AGE 32 | cn-demo-7bbddfb4bf-g4rlk 1/1 Running 0 3m 33 | ``` 34 | 35 | The callout here is the `1/1 READY` section. This is Kubernetes saying "I should have 1 Pod ready, and 1 is reporting as ready". This is not the case though as the Redis dependency is missing. Let's apply the new `Deployment` and observe the change. 36 | 37 | ```bash 38 | kubectl apply -f k8s/app-health 39 | ``` 40 | 41 | The configuration changed so Kubernetes will Terminate the old Pod and create a new one in its place. Check the Pod status this time though. 42 | 43 | ``` 44 | NAME READY STATUS RESTARTS AGE 45 | cn-demo-f8bfdc656-8jz57 0/1 Running 0 13s 46 | ``` 47 | 48 | It's stuck in `0/1 READY` because the newly added `readinessProbe` is failing. Let's see the effect it had on the application in the browser. Navigate one more time to [http://localhost/](http://localhost/). Not even a `404 Not Found` this time, instead we're getting a `503` error from the Nginx Ingress Controller telling us there are no ready Pods to service the request. 49 | -------------------------------------------------------------------------------- /labs/docker_tools_container.md: -------------------------------------------------------------------------------- 1 | Your team loves you so much they also created a small **tools** Docker image with handy CLI tooling to help visualize and troubleshoot what's going on in your local Kubernetes namespace. It's a great way to share tools amongst the team, so you decide to start it up. The Docker image they gave you has the `watch` and `kubectl` commands, which you're about to see are a handy way to keep tabs on what Pods or other services are configured and running in your namespace. 2 | 3 | The tools Docker image does not contain authentication tokens to connect to your local Kubernetes cluster (everyone has their own), so we'll mount those from your local machine into the running tools container so they're available to `kubectl`. We're mounting into the default location as well so we don't need to configure anything additional 4 | 5 | > Note the `-it` and `bash` in this CLI statement. These say "run the bash CLI interpreter in interactive mode" which will give us a bash prompt once the container is running. 6 | 7 | > Note: In the following command, replace `` with the path to your own local user directory. If executing in Git Bash (mintty) you will need to include `winpty` at the beginning of the command. 8 | 9 | **Windows** 10 | ```bash 11 | docker run --rm -it -v C:\Users\\.kube\:/root/.kube javaplus/kube-demo-tools bash 12 | ``` 13 | 14 | **MacOS/Linux** 15 | ```bash 16 | docker run --rm -it -v $HOME/.kube/:/root/.kube javaplus/kube-demo-tools bash 17 | ``` 18 | 19 | You should eventually see a `bash` prompt which means you're running `bash`, in a Container, on a Linux Operating System (a virtualized Linux OS if you're on Windows or MacOS). Next, run the `kubectl` command to validate our authentication tokens were mounted into the container correctly. 20 | 21 | ```bash 22 | kubectl get all,endpoints,ingress 23 | ``` 24 | 25 | You should see output similar to the following: 26 | 27 | ``` 28 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 29 | service/kubernetes ClusterIP 10.96.0.1 443/TCP 45d 30 | 31 | NAME ENDPOINTS AGE 32 | endpoints/kubernetes 192.168.65.3:6443 45d 33 | ``` 34 | 35 | Next let's play with `watch`. `watch` is simply a command that repeatedly runs another command on some interval. Let's test it by running `watch date`. `date` is the command we want to run over and over, and should just report the current date and time. 36 | 37 | ```bash 38 | watch date 39 | ``` 40 | 41 | You should see the current date and time updated every 2 seconds. 42 | 43 | ```bash 44 | Every 2.0s: date 2019-12-27 16:32:25 45 | 46 | Fri Dec 27 16:32:25 UTC 2019 47 | ``` 48 | 49 | When you combine `watch` and `kubectl`, you can get a handy console based dashboard of the current state of your Kubernetes Namespace. Run the following command, where we also include `-n 1` which will cause `watch` to rerun the `kubectl` command every 1 second instead of the default 2 seconds. 50 | 51 | ```bash 52 | watch -n 1 kubectl get all,endpoints,ingress 53 | ``` 54 | 55 | You now have a text-based dashboard updating every 1 second of your namespace showing Pods, Deployments, Services, Endpoints and Ingress. You can type `CTRL+C` to exit this dashboard and then `CTRL+D` to exit out of the container (which also terminates this particular container), but lets keep it running for now. 56 | -------------------------------------------------------------------------------- /labs/kube_readiness_2.md: -------------------------------------------------------------------------------- 1 | 2 | ## Fixing the Redis dependency 3 | 4 | Even though the application is "not ready", note that it has not actually stopped running. It's still serving responses to the `/ready` endpoint every time Kubernetes asks. It will just keep trying forever until it's finally ready. Let's fix that by deploying a Redis service that our `cloud-native-demo` deployment can resolve. 5 | 6 | In the `k8s/redis` directory is a single YAML manifest file with a VERY simple Redis `Deployment` and `Service`. This is great for local development, but is not a production ready Redis deployment by any means. Regardless, it shows off a nice feature of Kubernetes Services that we haven't really shown an example of yet. That feature is the ability to [resolve the ClusterIP of a Service](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#services) by it's logical name. In other words, you'll be able to resolve the IP address of the Redis instance just by using the name we give to its Kubernetes Service. 7 | 8 | Remember that we've configured our application to connect to Redis at the hostname of `redis-test`. This is visible in `k8s/app-health/deployment.yaml`. 9 | 10 | ```yaml 11 | env: 12 | - name: REDIS_HOST 13 | value: redis-test 14 | ``` 15 | 16 | This means we should name the Redis Service `redis-test` as well. Look at how we've named it in `k8s/redis/redis.yaml`. 17 | 18 | ```yaml 19 | apiVersion: v1 20 | kind: Service 21 | metadata: 22 | labels: 23 | run: redis-test 24 | name: redis-test 25 | ``` 26 | 27 | It looks like everything is lining up, so lets apply that Redis `Deployment` and `Service` to Kubernetes with the following command: 28 | 29 | ```bash 30 | kubectl apply -f k8s/redis 31 | ``` 32 | 33 | Watch your dashboard of runnig Pods. You should see a new `redis-test-***` Pod, and you should also see the `cn-demo-***` Pod reporting a `1/1 READY` status. 34 | 35 | ```bash 36 | NAME READY STATUS RESTARTS AGE 37 | pod/cn-demo-f8bfdc656-cxpjz 1/1 Running 0 19m 38 | pod/redis-test-57f56cb4f8-w9f2v 1/1 Running 0 10s 81 39 | ``` 40 | 41 | Now that the `cn-demo-***` Pod is ready for traffic again, you should be able to navigate to it in your browser again at [http://localhost/](http://localhost/). 42 | 43 | ## Scale and play with Readiness 44 | 45 | Let's manually scale our `Deployment` one more time to see the effect of Readiness Probes when there are more than one replicas in a `Deployment`. 46 | 47 | ```bash 48 | kubectl scale deployment cn-demo --replicas 3 49 | ``` 50 | 51 | Then let's delete the Redis deployment. What is your dashboard reporting for the Readiness of the 3 `Pods` that are running in your `Deployment`? They should all say `0/1 READY` now. Go ahead and redeploy Redis: 52 | 53 | ```bash 54 | kubectl apply -f k8s/redis 55 | ``` 56 | 57 | Did they all turn back to ready again? This is a powerful feature as Kubernetes is watching every Pod in your deployment. It's also worth reading about [Liveness Probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/). They have a similar configuration, but will actually restart the container(s) in your `Pod` if they are determined to be unhealthy. Combining these techniques of Liveness and Readiness will increase the reliability and availability of your system. -------------------------------------------------------------------------------- /labs/kube_override_cmd.md: -------------------------------------------------------------------------------- 1 | ## Pass the REDIS_HOST env var as a CLI argument to the application 2 | 3 | While our application has the ability to read directly from the configured Environment Variables, some applications are configured via CLI arguments on startup. So far, we haven't actually told Docker or Kubernetes which Python script to run at startup. It's been using the default `CMD` instruction defined in the original `Dockerfile` it was built from. You can see in the `Dockerfile` in this repository, it's running the command `python app.py` by default. 4 | 5 | This program actually accepts a few configuration values on the CLI. Our always helpful development team left a `--help` argument in there to help discover what some of them are. Let's run that now in a new container using Docker. It only needs to run long enough to give us the output. In a new console window, run the following: 6 | 7 | ```bash 8 | docker run --rm cloud-native-demo:1 python app.py --help 9 | ``` 10 | 11 | You should see the following output: 12 | 13 | ```bash 14 | docker run --rm cloud-native-demo:1 python app.py --help 15 | 16 | usage: app.py [-h] [--port PORT] [--host HOST] [--redis-host REDIS_HOST] 17 | [--redis-port REDIS_PORT] 18 | 19 | Starter Python + Redis app 20 | 21 | optional arguments: 22 | -h, --help show this help message and exit 23 | --port PORT, -p PORT port number to listen for HTTP requests 24 | --host HOST host to bind to 25 | --redis-host REDIS_HOST 26 | hostname of the backing Redis service 27 | --redis-port REDIS_PORT 28 | port number of the backing Redis service 29 | ``` 30 | 31 | Soon, we're going to deploy an instance of Redis into the cluster to connect to. Let's pass the `REDIS_HOST` environment variable we configured in the last step as a CLI argument to the Python application so we can control the Redis host the application connects to. 32 | 33 | You can override the start command (right now it's defaulting to what was defined in the Dockerfile) using the `command` property in your `Deployment` YAML. The `command` property goes at the same depth the `env` property did in the last step. Add the following to your `Deployment` and try to redeploy it with the `kubectl apply -f ...` command used before. Take note of how `REDIS_HOST` is passed, surrounded by `$()`. This is important for the variable to resolve at runtime, otherwise it would just take the literal string `$REDIS_HOST`. 34 | 35 | Go ahead and update the **k8s/app-envvars/deployment-base.yaml** now to override the command the container should run. See the syntax below. 36 | 37 | > Use the completed `deployment-with-envvars.yaml` file as a guide if you're having trouble placing this configuration in the file. 38 | 39 | 40 | ```yaml 41 | command: 42 | - sh 43 | - -c 44 | - | 45 | python app.py --redis-host $(REDIS_HOST) 46 | ``` 47 | 48 | After updating the yaml file, go ahead and test it again. 49 | If you have pods already running, you may want to delete your deployment first by using the **kubectl delete deploy** command: 50 | ```bash 51 | kubectl delete deploy 52 | ``` 53 | Now create the deployment with your new overridden command by using the **kubectl apply** command: 54 | 55 | ```bash 56 | kubectl apply -f k8s/app-envvars/deployment-base.yaml 57 | ``` 58 | Watch your console to see your new pod start up. Once you have a new running pod, we will use the port forward command to connect to the pod. 59 | 60 | Last time we created a service to expose your deployment, but if need be you can port-forward directly into a single pod like this: 61 | ```bash 62 | 63 | kubectl port-forward pod/ 5000:5000 64 | ``` 65 | Port forward to the pod and then test the application by hitting the [http://localhost:5000](http://localhost:5000) endpoint again. 66 | 67 | You should see a similar JSON response as last time, except this time you should see the redis host has the value you specified as an environment variable in your deployment definition. 68 | 69 | -------------------------------------------------------------------------------- /labs/kube_setup_ingress.md: -------------------------------------------------------------------------------- 1 | 2 | # ~~ Ingress ~~ 3 | 4 | As mentioned before, the `Service` we exposed to our Pods is a ClusterIP, and only visible within the cluster. To get to it, we had to do a port forwarding technique. In a Test or Production environment, we would probably like to have a stable DNS name to be able to access our service. This is what Kubernetes `Ingress` provides. Your cluster Operators will likely host one or a few Ingress endpoints from which you can define your own `Ingress` definitions. With your local Kubernetes environment in Docker Desktop, you can also run an Ingress Controller. You'll get the behavior of an Ingress Controller, minus an actual Load Balancer infront of it like you would find in a production scale cluster. 5 | 6 | ## Deploy the Nginx Ingress Controller 7 | 8 | To start, deploy the Nginx Ingress Controller to your local Kubernetes instance with the following commands: 9 | 10 | ```bash 11 | kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/mandatory.yaml 12 | kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/cloud-generic.yaml 13 | ``` 14 | 15 | It might take a few minutes, but when finished you should have a new namespace called `ingress-nginx` with a single `Pod` running in it which is the Ingress Controller. Let's run some commands locally to see the effect when it's all complete. Note the new namespace and how the `-n` switch can be used to query for `Pods` and `Services` in that namespace instead of the default we've used so far. Also note the `LoadBalancer` type given to the `ingress-nginx` Service. So far we've only used ClusterIP. `LoadBalancer` type in a cloud environment creates a real Load Balancer "of the cloud". An AWS ALB load balancer for example. Docker Desktop has a convenient feature where it listens on `localhost` when you create Services of type `LoadBalancer`. 16 | 17 | > NOTE: Even in production, you'll likely use ClusterIP Service types a majority of the time and use the Operator provided Ingress to get HTTP traffic to your services. 18 | 19 | ``` 20 | > kubectl get ns 21 | NAME STATUS AGE 22 | default Active 37h 23 | docker Active 36h 24 | ingress-nginx Active 35h 25 | kube-node-lease Active 37h 26 | kube-public Active 37h 27 | kube-system Active 37h 28 | 29 | > kubectl -n ingress-nginx get pod,service 30 | NAME READY STATUS RESTARTS AGE 31 | pod/nginx-ingress-controller-7dcc95dfbf-4mwqf 1/1 Running 2 35h 32 | 33 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 34 | service/ingress-nginx LoadBalancer 10.100.245.43 localhost 80:31696/TCP,443:31067/TCP 35h 35 | ``` 36 | 37 | If all has gone well up until now, you should be able to navigate to [http://localhost:80](http://localhost:80) in your browser and see a `404 Not Found` response come back. This is the default `404` page being served up by the Nginx Ingress Controller, because we haven't defined any `Ingress` configurations yet. 38 | 39 | ## Clean-up 40 | 41 | Before we proceed, let's clean up the `Deployment` from the last section. We will `kubectl delete` the same config file we used for `kubectl apply`. 42 | 43 | ```bash 44 | kubectl delete -f k8s/app-envvars/deployment-base.yaml 45 | ``` 46 | 47 | ## Apply the Deployment, Service and Ingress 48 | 49 | Are you liking declarative Infrastructure as Code yet? Full environments can come and go with ease, and we about to stand up a few things all in one shot. A working `Deployment`, `Service` and `Ingress` are ready for you in the `k8s/app-ingress` directory. Rather than `kubectl apply` these YAML files one by one, we're going to do them all at once just by applying the entire directory. 50 | 51 | ```bash 52 | kubectl apply -f k8s/app-ingress 53 | ``` 54 | 55 | We skipped over the creation of `Ingress` and `Service`, but it's more important to look at their configuration to see how they relate rather than try to memorize YAML configurations. Open their configurations in the `k8s/app-ingress` directory and try to follow the connections from Ingress, to Service, to Pod. 56 | 57 | ``` 58 | http request --> Ingress --> Service --> Pod(s) 59 | ``` 60 | 61 | Ingress forwards to a named Service on a Port, the Service then forwards to one or more Pods based on label selectors. If you're wondering how our Pods ended up with a label selector of `run: cn-demo`, check the `deployment.yaml`. 62 | 63 | With all of this applied, we should be able to access our application by going to [http://localhost:80](http://localhost:80). This time, instead of a `404` page, we should see our familiar JSON response with all our application configuration showing. Note too that we don't have to do the `kubectl port-forward` command anymore. We're coming in the front door of our local Kubernetes instance via `localhost` because we've set up an Ingress Controller there. 64 | -------------------------------------------------------------------------------- /labs/kube_env_vars.md: -------------------------------------------------------------------------------- 1 | # ~~ Environment Variables ~~ 2 | 3 | ## Redeploy the baseline 4 | 5 | One of your original asks was that this application be configurable via Environment Variables. We're now going to put that to the test. Our first steps will be to redeploy the application, now using the declarative YAML in the `k8s/app-envvars` directory of this project. To do that, we're going to use the `kubectl apply ...` command. 6 | 7 | ```bash 8 | kubectl apply -f k8s/app-envvars/deployment-base.yaml 9 | ``` 10 | 11 | If you check your dashboard, you should see a single Pod running again. Go ahead and take a look at the `k8s/app-envvars/deployment-base.yaml` file to get familiar with it. It's a slimmed down version of the `Deployment` YAML you got in the previous step with `kubectl get ... -oyaml`. 12 | 13 | ## Connect to the Container 14 | 15 | We never actually tested the application in the last step, but lets do that now by exposing the `Pod` as a `Service` and using the `kubectl port-forward` command to connect to it. A Kubernetes `Service` is a lengthy topic, and more can be [read here](https://kubernetes.io/docs/concepts/services-networking/service/). We'll be focusing on creating what's known as a ClusterIP Service, which means there will be a single addressable endpoint available in the cluster to access the Pod(s) hosting our application. First thing is first, let's expose the `Service` using `kubectl expose ...`. 16 | 17 | ```bash 18 | kubectl expose deployment cn-demo --port 5000 --target-port 5000 19 | ``` 20 | 21 | This command says "Create a new ClusterIP Service that listens on port 5000 and targets port 5000 of all Pods that are a part of the cn-demo Deployment". Next lets connect to this newly created `Service`. The `kubectl port-forward` command will expose a port on our local machine that connects to ports exposed on Pods. We need to do the `port-forward` command here because the Pod is only accessible inside the cluster right now. 22 | 23 | ```bash 24 | kubectl port-forward svc/cn-demo 5000 25 | ``` 26 | 27 | > Earlier we said that port-forward connects to a Pod, but here we're port forwarding to a Service. What??? In this case, think of the Service as an alias to one to many Pods that it load balances across. It's a kubectl trick, but just a single Pod will be selected for forwarding, even if there are many replicas of it. 28 | 29 | At this point we should be able to access [http://localhost:5000](http://localhost:5000) from our browser. Give it a try. You should see the familiar JSON output we saw earlier when running the container with Docker. Your value for `host` will vary. 30 | 31 | ```json 32 | { 33 | "appName": "cloud-native-demo", 34 | "env": { 35 | "host": "cn-demo-7df548dcf8-wpr8v", 36 | "user_defined_1": null, 37 | "user_defined_2": null, 38 | "user_defined_3": null 39 | }, 40 | "redis-host": "localhost", 41 | "version": "1.0.0" 42 | } 43 | ``` 44 | 45 | Once you're finished, stop the port forwarding command by pressing `CTRL+C`. 46 | 47 | ## Configure the User Defined variables 48 | 49 | > TIP: The completed deployment configuration is in the `k8s/app-envvars/deployment-with-envvars.yaml` file if you need help through this section. 50 | 51 | Right now we have a lot of `null` values for the `user_defined_#` variables. Let's fix that. In the **`k8s/app-envvars/deployment-base.yaml`** file, find the section that looks like the following: 52 | 53 | ```yaml 54 | containers: 55 | - image: cloud-native-demo:1 56 | imagePullPolicy: IfNotPresent 57 | name: cn-demo 58 | ``` 59 | 60 | Add a new section directly under `name: cn-demo` with the following. Indentation matters here. 61 | 62 | ```yaml 63 | env: 64 | - name: USER_DEFINED_1 65 | value: my-user-define-value-1 66 | ``` 67 | 68 | With just this configuration, re-apply this `Deployment`. Kubernetes will see the difference in configuration and redeploy a new `Pod` with the updated configuration. The old `Pod` will be terminated as it no longer matches the desired state of your deployment (it doesn't have the right configuration anymore). Assuming you didn't change the filename, run the following and watch your dashboard for how the old and new `Pods` cycle. 69 | 70 | ```bash 71 | kubectl apply -f k8s/app-envvars/deployment-base.yaml 72 | ``` 73 | 74 | Finally, run the same `port-forward` command you ran earlier and try to access the application in your browser at [http://localhost:5000](http://localhost:5000). Do you see the updated value for `user_defined_1`? If you look at the application code in the `app.py` file at the root of the repo you cloned, you can see how the Python code is reading from an Environment Variable to populate that section of the JSON response. 75 | 76 | > NOTE: Be very careful in a production system with your environment configuration. It's useful for this demo to return environment variables off one of the API endpoints. A production system though will sometimes hold sensitive information in environment variables. Would you want someone seeing your database connection string after calling a URL like this? Nope! 77 | 78 | ## Fill in the remaining environment variables 79 | 80 | Using the same pattern as above, fill in the following environment key-value pairs as environment variables and observe the changes in the running application after redeploying. 81 | 82 | ```bash 83 | USER_DEFINED_2=my-user-define-value-2 84 | USER_DEFINED_3=my-user-define-value-3 85 | REDIS_HOST=redis-test 86 | REDIS_PORT="6379" 87 | ``` 88 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DEPRECATED!!!!!!!!!!!!!!!!! 2 | 3 | ## THIS TUTORIAL HAS BEEN SPLIT INTO TWO SEPARATE TUTORIALS (links below). THIS ONE IS NO LONGER MANTAINED. 4 | 5 | [Docker Tutorial](https://github.com/javaplus/DockerDojo) 6 | [Kubernetes Tutorial](https://github.com/javaplus/KubernetesDojo) 7 | 8 | Tutorial on Using Docker and Kubernetes 9 | 10 | **Big thanks to [Michael Frayer](https://github.com/frayer) for most of the content of this tutorial is from him!** 11 | 12 | ## Presentation 13 | 14 | The presentation that accompanies this workshop is available here: [docker-and-kubernetes-dojo.pdf](https://github.com/javaplus/DockerKubesDojo/blob/master/slides/docker-and-kubernetes-dojo.pdf) 15 | 16 | This is provided to those that attended an in-person workshop who wish to reference back to topics discussed. Much of the context is missing for those that did't attend, however it is not required material in order to proceed with the rest of the workshop below. 17 | 18 | ## Pre-requisites: 19 | 20 | Generally speaking you need to have the Git client and Docker along with Kubernetes installed locally. 21 | 22 | #### A console or shell environment 23 | 24 | Some basic skills working with command line tooling are required to complete this tutorial as you will interact with the CLI often throughout. Windows Command prompt or Powershell is recommended for Window's users. MacOS and Linux users can use their shell of choice. It will be called out where there is a difference in CLI statements for Windows vs MacOS/Linux users. 25 | 26 | 27 | #### Git 28 | If you don't already have a Git Client, you can download the Git tools from here: 29 | - https://git-scm.com/downloads 30 | 31 | 32 | #### Docker & Kubernetes: 33 | 34 | Here are links and instructions per operating system: 35 | 36 | 37 | ##### Windows 38 | - Windows 10 64-bit: Pro, Enterprise, or Education (Build 15063 or later) 39 | - Docker Desktop Download which Includes Kubernetes: https://www.docker.com/products/docker-desktop 40 | - Docker Desktop Install Guide - https://docs.docker.com/docker-for-windows/install/ 41 | - Enable Kubernetes 42 | 43 | - Older Windows Versions: 44 | - Docker Toolbox: https://docs.docker.com/toolbox/toolbox_install_windows/ 45 | - Kubernetes Support via Minikube(Click on the *Windows* tab under each section): https://kubernetes.io/docs/tasks/tools/install-minikube/ 46 | - Blog on working with Minikube on Windows: https://rominirani.com/tutorial-getting-started-with-kubernetes-on-your-windows-laptop-with-minikube-3269b54a226 47 | 48 | ##### Mac 49 | - Docker Desktop for Mac : https://hub.docker.com/editions/community/docker-ce-desktop-mac 50 | 51 | ##### Linux 52 | - [MicroK8s](https://microk8s.io/) 53 | 54 | 55 | ### Testing your Installation 56 | 57 | Run the **docker version** command and you should see something like this: 58 | ``` 59 | C:\Users\tarltob1>docker version 60 | Client: Docker Engine - Community 61 | Version: 19.03.5 62 | API version: 1.40 63 | Go version: go1.12.12 64 | Git commit: 633a0ea 65 | Built: Wed Nov 13 07:22:37 2019 66 | OS/Arch: windows/amd64 67 | Experimental: false 68 | 69 | Server: Docker Engine - Community 70 | Engine: 71 | Version: 19.03.5 72 | API version: 1.40 (minimum version 1.12) 73 | Go version: go1.12.12 74 | Git commit: 633a0ea 75 | Built: Wed Nov 13 07:29:19 2019 76 | OS/Arch: linux/amd64 77 | Experimental: false 78 | containerd: 79 | Version: v1.2.10 80 | GitCommit: b34a5c8af56e510852c35414db4c1f4fa6172339 81 | runc: 82 | Version: 1.0.0-rc8+dev 83 | GitCommit: 3e425f80a8c931f88e6d94a8c831b9d5aa481657 84 | docker-init: 85 | Version: 0.18.0 86 | GitCommit: fec3683 87 | Kubernetes: 88 | Version: v1.14.8 89 | StackAPI: v1beta2 90 | ``` 91 | 92 | Test kubernetes by running the **kubectl get nodes** command. 93 | This should show you one worker node running on your machine: 94 | ``` 95 | C:\Users\tarltob1>kubectl get nodes 96 | NAME STATUS ROLES AGE VERSION 97 | docker-desktop Ready master 45d v1.14.8 98 | 99 | ``` 100 | If these both work, you should be ready to go. 101 | 102 | 103 | #### Optional Pre-reqs (all OS's) 104 | ##### Install Visual Studio Code 105 | 106 | You will be editing YAML files and viewing Python code during the course of this exercise. You can use any text editor, but Visual Studio Code is recommended. 107 | 108 | [Download and install VS Code](https://code.visualstudio.com/) 109 | 110 | 111 | ##### Install the JSON Formatter Chrome Extension 112 | 113 | This is a useful, but not required, Chrome extension for viewing JSON output in your browser. 114 | 115 | [Install using a Chrome browser](https://chrome.google.com/webstore/detail/json-formatter/bcjindcccaagfpapjjmafapmmgkkhgoa) 116 | 117 | --- 118 | 119 | # ~~ Labs ~~ 120 | 121 | 1. [Intro to Docker and Containers](labs/docker_intro.md) 122 | 123 | 1. [Docker with Cloud Native App](labs/docker_cloud_app.md) 124 | 125 | 1. [Kubernetes Intro](labs/kube_deploy_cloud_app.md) 126 | 127 | 1. [Kubernetes Infrastructure as Code](labs/kube_infra_as_code.md) 128 | 129 | 1. [Docker for Tooling](labs/docker_tools_container.md) 130 | 131 | 1. [Kubernetes Environment Variables](labs/kube_env_vars.md) 132 | 133 | 1. [Kubernetes Override Starting Command](labs/kube_override_cmd.md) 134 | 135 | 1. [Kubernetes Ingress](labs/kube_setup_ingress.md) 136 | 137 | 1. [Kubernetes Readiness](labs/kube_readiness.md) 138 | 139 | 1. [Kubernetes Readiness Part 2](labs/kube_readiness_2.md) 140 | 141 | 142 | --- 143 | 144 | 145 | 146 | 147 | # ~~ Conclusion ~~ 148 | 149 | This exercise has introduced you to some of the most commonly used features of Kubernetes for configuring and hosting applications using declarative, Infrastructure as Code techniques. Even what we've shown here only begins to scratch the surface. Here are other topics you'll want to dig deeper on as you continue your Kubernetes journey. 150 | 151 | * [Managing Compute Resources for Containers](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) 152 | * [Declaring and using ConfigMaps to configure a Deployment](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/) 153 | * [Declaring and using Secrets to configure a Deployment](https://kubernetes.io/docs/concepts/configuration/secret/) 154 | -------------------------------------------------------------------------------- /labs/docker_intro.md: -------------------------------------------------------------------------------- 1 | # ~~ The World of Containers ~~ 2 | 3 | ### Simple Docker Run 4 | 5 | We are going to jump right in and make sure you can run a Docker container locally by using the [Docker run command](https://docs.docker.com/engine/reference/commandline/run/). 6 | 7 | The typical usage of the **docker run** command is as follows: 8 | ``` 9 | docker run [OPTIONS] IMAGE [COMMAND] [ARG...] 10 | ``` 11 | Notice the "**IMAGE**" refers to a pre-defined docker image that will be pulled down to run. 12 | 13 | When we say pulled down, it means the docker image must be on your machine to run, if it doesn't exist locally, then it will be downloaded/pulled before it can be run as a container. 14 | 15 | Enough talking, let's run a container. Issue this command: 16 | 17 | ``` 18 | docker run hello-world:latest 19 | 20 | ``` 21 | This will cause Docker to pull down the latest version of the hello-world image and then run it. 22 | You should see something like this: 23 | 24 | ``` 25 | C:\Users\tarltob1>docker run hello-world:latest 26 | Unable to find image 'hello-world:latest' locally 27 | latest: Pulling from library/hello-world 28 | 1b930d010525: Pull complete 29 | Digest: sha256:4fe721ccc2e8dc7362278a29dc660d833570ec2682f4e4194f4ee23e415e1064 30 | Status: Downloaded newer image for hello-world:latest 31 | 32 | Hello from Docker! 33 | This message shows that your installation appears to be working correctly. 34 | 35 | To generate this message, Docker took the following steps: 36 | 1. The Docker client contacted the Docker daemon. 37 | 2. The Docker daemon pulled the "hello-world" image from the Docker Hub. 38 | (amd64) 39 | 3. The Docker daemon created a new container from that image which runs the 40 | executable that produces the output you are currently reading. 41 | 4. The Docker daemon streamed that output to the Docker client, which sent it 42 | to your terminal. 43 | 44 | To try something more ambitious, you can run an Ubuntu container with: 45 | $ docker run -it ubuntu bash 46 | 47 | Share images, automate workflows, and more with a free Docker ID: 48 | https://hub.docker.com/ 49 | 50 | For more examples and ideas, visit: 51 | https://docs.docker.com/get-started/ 52 | ``` 53 | 54 | The output from this command actually tells you what it did. When it says that it pulled the "hello-world" image from Docker Hub, it's referring to [hub.docker.com](https://hub.docker.com/) which is the main online repository for docker images. 55 | 56 | Speaking of images, let's see what images you have installed locally. 57 | 58 | Run the **Docker images** command to see the list of images on your machine. 59 | 60 | After running this command, you should see something like this: 61 | ![Images](/labs/images/Images.png) 62 | 63 | Notice the **hello-world** image that was downloaded when you issued the run command. 64 | 65 | The K8s and docker images in the picture above are from the local Kubernetes installation. These K8s and Docker images were downloaded when you enabled Kubernetes on Docker Desktop. 66 | 67 | ### Run NGINX 68 | 69 | Now we are going to run a simple [NGINX](http://nginx.org/en/) container that can be used to host web content. 70 | 71 | We are going to use the **docker run** command again but use a different image (the nginx image) and also specify a port to expose the running container on. 72 | 73 | Run this command: 74 | 75 | ``` 76 | docker run -p 8080:80 nginx 77 | 78 | ``` 79 | This will start a container running NGINX. NGINX listens on port 80 by default, so we are telling Docker to expose the internal port 80 to our local port 8080. Notice the -p for publishing ports follows the syntax of :. 80 | 81 | After running this command you should be able to open up a browser and go to http://localhost:8080 and see the nginx welcome screen. 82 | 83 | ![NGINX Welcome](/labs/images/nginxWelcome.png) 84 | 85 | After hitting this in the browser, you should a log in the command prompt where you ran the container that shows a request was recieved. 86 | By default when you run a container with the docker run command, the standard out goes to your console. To break out and get back to your command prompt you need to hit "CTRL + C" or similar break command. 87 | 88 | Do this now and break back to the command prompt. 89 | NOTE: On windows, this usually does not kill the running container. On unix or mac os, this will kill the running container. 90 | 91 | Run a [docker ps](https://docs.docker.com/engine/reference/commandline/ps/) to see the currently running containers. 92 | 93 | You should see something like this: 94 | ``` 95 | D:\workspaces\DockerKubesDojo>docker ps 96 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 97 | 7e65845aa3ee nginx "nginx -g 'daemon of…" 4 minutes ago Up 4 minutes 0.0.0.0:8080->80/tcp recursing_khayyam 98 | 99 | ``` 100 | Note the **CONTAINER ID** is the unique ID for your running conatiner. It's crucial to know if you want to do anything later with your running container... like stopping it. 101 | 102 | If you actually, want to stop or kill the running container, you need to use the [docker stop](https://docs.docker.com/engine/reference/commandline/stop/) or the [docker kill](https://docs.docker.com/engine/reference/commandline/kill/) command. I usually just kill them all and let the Docker gods sort em out. 103 | 104 | However, we will be more humane and issue a docker stop command. NOTE that you must end the command with the container id. 105 | 106 | ``` 107 | docker stop 108 | ``` 109 | After this, re-run your **docker ps** command and notice that the container should no longer be running. 110 | 111 | 112 | ### STRETCH GOAL 113 | 114 | Your mission if you choose to accept it, is to run an nginx container that hosts your own created HTML file. 115 | You will accomplish this by using the docker run command as you did above to create an nginx container, but this time you will use a [volume mount](https://docs.docker.com/storage/volumes/) to mount a local **folder** that contains an HTML file into the running container. 116 | 117 | To know where to mount the file so that nginx will serve it up (and see an example), read the documenation for the nginx image. You can find it by going to http://hub.docker.com and then searching for the nginx image. Click on the Official nginx image and scroll to the bottom to find the "How to use this image" section. The first example is of how to host simple static content. 118 | 119 | So, first create your own HTML file and save it to a preferably simple/short path on your hard drive (i.e. C:\dev\dojo\hello.html). 120 | 121 | Now you will need to run the nginx container with the '-v' option and replace the "/some/content" with the path to the **folder** containing your newly created HTML. NOTE: the example from hub.docker.com doesn't add the -p (publish port) option, so if you use it as is, you won't be able to access your nginx container... so add the "-p 8080:80" option to it along with the new volume option. 122 | 123 | NOTE: When you run the command the first time with a volume option, Docker will prompt you to ask you if you want to share the folder with Docker. So, watch for a pop-up to ask you to share the folder. Also, if you are on Windows, you are best to run this command from the basic command prompt or powershell. Using something like git bash can confuse things since it typically expect linux style paths. 124 | 125 | Also, be aware of the docker run syntax... 126 | ``` 127 | docker run [OPTIONS] IMAGE [COMMAND] [ARG...] 128 | 129 | ``` 130 | 131 | 132 | 133 | 134 | -------------------------------------------------------------------------------- /labs/kube_deploy_cloud_app.md: -------------------------------------------------------------------------------- 1 | # ~~ Running in your local Kubernetes cluster ~~ 2 | 3 | Now that you know how to run a single instance, it's time to try the same in Kubernetes. Kubernetes can orchestrate and schedule your container workloads in very flexible ways, but at the end of the day it's going to invoke a Docker or other [CRI compatible runtime](https://www.opencontainers.org/) to run your application in a similar manner as we just did. Accomplishing this task will get us on our way towards completing our goals (what are our goals). 4 | 5 | ## Run the cloud-native-demo image in Kubernetes 6 | 7 | Next we want to run our application in Kubernetes. 8 | 9 | In a console window run the following to run the `cloud-native-demo` image as a container in Kubernetes: 10 | 11 | ```bash 12 | kubectl run cn-demo --image=cloud-native-demo:1 13 | ``` 14 | 15 | Now you can use a **kubectl get** command to see what was created. In this case, we want to see the running container which in Kubernetes is always wrapped in a Pod. So, issue the following command to see all the current pods. 16 | 17 | ``` 18 | kubectl get pods 19 | ``` 20 | 21 | You should see a Pod that starts with the name `cn-demo-` show up. Behind the scenes, Kubernetes is retrieving the image and executing it using Docker (in the case of the Kubernetes instance provided by Docker Desktop). If all goes well, you should eventually see that the Pod is up and running with output in the dashboard similar to the following: 22 | 23 | ```bash 24 | NAME READY STATUS RESTARTS AGE 25 | pod/cn-demo-759dc65498-j2mm6 1/1 Running 0 22s 26 | ``` 27 | 28 | ## Play with the new Deployment 29 | 30 | It's time to explore some behavior and terminology of Kubernetes. First up is the [Pod](https://kubernetes.io/docs/concepts/workloads/pods/pod/). We wont' go too deep on all the specifics, but a Pod is where the configuration for your running container resides. It turns out you can run multiple containers in a single Pod, but that is outside the scope of this exercise. To learn more about how and why you would want to do this, search around for Sidecar and Ambassador patterns. 31 | 32 | Let's see what happens when we delete a Pod (which is one way to brute force simulate a failed container). Run the following command, filling in the `cn-demo-***` with the unique name Kubernetes assigned your Pod, and then keep running the **kubectl get pods** command to see what happens to your pod. Things will happen fast! 33 | 34 | ```bash 35 | kubectl delete pod cn-demo-*** 36 | ``` 37 | 38 | 39 | You deleted **that** specific Pod, but then another one with a new name showed up in its place. That Pod is running the exact same image, and something extra in the Kubernetes cluster is making sure at least 1 of your Pods is running. That **extra something** is called a [Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/), and it is always working to ensure the number of Pods running for a given `Deployment` match its configured `replica` count. The `kubectl run ...` command you just ran created a `Deployment` for you even though you didn't ask for one. 40 | 41 | In fact, you can see it by running **kubectl get deploy**. 42 | 43 | This should show the `cn-demo` deployment happily reporting that `1/1` or "1 of 1" Pods are currently available. 44 | ``` 45 | D:\workspaces\DockerKubesDojo>kubectl get deploy 46 | NAME READY UP-TO-DATE AVAILABLE AGE 47 | cn-demo 1/1 1 1 9m10s 48 | 49 | ``` 50 | 51 | If you're thinking "a Deployment must be involved in how Horizontal Scaling works", you'd be right. Let's attempt that next. 52 | 53 | ```bash 54 | kubectl scale deployment cn-demo --replicas 3 55 | ``` 56 | 57 | Run your **kubectl get pods** again. Do you see 3 Pods for `cn-demo-***` running? Your `Deployment` should report `3/3` after some time as well if you run the **kubectl get deploy** 58 | 59 | Try some other scenarios to see how Kubernetes behaves: 60 | * Delete one of the newly created `Pods` with `kubectl delete ...` 61 | * Scale the `Deployment` in and out, using different numbers for `--replicas` 62 | * Do you notice anything interesting with which Pods Kubernetes decides to keep when you scale in? Look at the time the Pod has been running. 63 | * If you need to delete the **deployment** altogether (which will remove the pods as well) you can use this command: 64 | ``` 65 | kubectl delete deploy cn-demo 66 | 67 | ``` 68 | NOTE: If you delete it, you will have to restart it for the next labs. 69 | Remember the command to start it is: 70 | ```bash 71 | kubectl run cn-demo --image=cloud-native-demo:1 72 | ``` 73 | 74 | ### Stretch Goal 75 | 76 | Get up and stretch!!! Just kidding... ok maybe that's not a bad idea... but to play more with kubernetes, let's see if we can learn how to connect to one of the running containers and get a shell so we can poke around and see the files that are in our running container. What we will do is use the **kube exec** command to get a bash shell into one of our pods. 77 | 78 | So, make sure you have at least one pod running, and then use the kube exec command to get a shell into the container. 79 | The format of the kube exec command is like this: 80 | ```bash 81 | kubectl exec -it /bin/bash 82 | ``` 83 | 84 | NOTE: Look at the [offical documentation here](https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#exec) to see what the **'-it'** is doing. 85 | 86 | Once you get a bash prompt issue a **pwd** command to see what the current working directory is. 87 | Do you know why this is the working directory? 88 | 89 | Look at the Dockerfile again that you used to create this image. 90 | 91 | Also, do an **ls** to see all the files that were copied into this working directory and then figure out how they got there. 92 | 93 | Now, try to actually create a new file in that directory. You can just do a simple echo command like this: 94 | ```bash 95 | echo "hello" > hello.txt 96 | ``` 97 | After creating the file, exit out of the shell session by simply typing **exit**. 98 | 99 | Now, reconnect to the pod again and make sure your file is still there. (It should be). 100 | 101 | Now, let's delete the pod and let the deployment spin up a new pod. 102 | 103 | When your new pod finishes starting, exec into it and see if your file is still there. 104 | Can you figure out why or why not? 105 | 106 |
107 | Click to expand for Answers 108 | 109 | #### Explanation 110 | 111 | - Why is the working directory "/usr/src/app"? 112 | - Because the [Dockerfile on line 3](https://github.com/javaplus/DockerKubesDojo/blob/42f4756afe04e07389f476a160199d7a2c12cc73/Dockerfile#L3) set the "WORKDIR" to "/usr/src/app" 113 | - What does the **'-it'** do with the exec command? 114 | - The 'i' says pass the STDIN of your command prompt to the container 115 | - The 't' says the STDIN is a TTY 116 | - Most think of the **'-it'** just as an interactive terminal because that's what it produces. 117 | 118 | - Why did the hello.txt file disappear after deleting the pod and letting the deployment create a new one? 119 | - Because the container in the Pod is an instance of the image you specify. When we added the file, we added it to that specific running instance... think of it like modifying temporary memory or modifying an instance of a class. Once we delete that Pod that deletes that instance of the container. Then the deployment starts up a new Pod which creates a new instance of the container off of the image we specified. The only thing that's going to be in the running container is what we specified in the image definition (assuming we don't give special commands to the start up). 120 | 121 | 122 |
123 | 124 | 125 | -------------------------------------------------------------------------------- /labs/docker_cloud_app.md: -------------------------------------------------------------------------------- 1 | # ~~ The Mission ~~ 2 | 3 | You and your team are on a mission to begin rearchitecting your monolithic application towards a micro-service based architecture. While your team has some experience working with Containers, they've asked for your help in realizing the full potential of Cloud Native patterns when it comes to the design and operational concerns of your new architecture. You did your homework on [12-factor apps](https://12factor.net/) and the feature set of [Kubernetes](https://kubernetes.io/docs/concepts/) and have given your team a set of requirements to produce a first cut of a microservice which will serve as the foundational codebase for all your microservices. 4 | 5 | The features you've asked for in this foundational application include: 6 | 7 | * It follows the 12-factor Application [Configuration](https://12factor.net/config) principle. Environmental configuration in this application will be read from Environment Variables. 8 | * It follows the 12-factor Application [Stateless Process](https://12factor.net/processes) principle. It is completely stateless and scales horizontally. 9 | * All dependencies are declared and ship with the application as an OCI compatible image. 10 | * The application supports Infrastructure as Code principles to enable a Continuous Delivery pipeline. 11 | * The application must expose information about its health in order for Kubernetes to know it's running and can accept traffic. 12 | 13 | And the team has delivered. They have created an [OCI compatible image](https://www.opencontainers.org/) and have already loaded it into the [Docker Trusted Registry](Change ME!). They've given you the following information on how it behaves and the default URL endpoints it exposes. 14 | 15 | * It's a Python application with a dependency on a Redis instance. The application is configurable through command line switches and the documentation for them can be found by running `python app.py --help`. 16 | * When running, it listens for HTTP connections on port `5000` by default. This is configurable. 17 | * The following URL endpoints are available: 18 | * `/` 19 | * Returns a JSON message with information about the application and its configuration. 20 | * `/counter` 21 | * Returns a count of how many times the `/counter` URL has been accessed for this particular application. The key for the count is the `HOSTNAME` environment variable for the running application. 22 | * `/counter/reset` 23 | * Resets the counter being returned in `/counter` to `0`. 24 | * `/live` 25 | * Returns a `200` HTTP status code if the application is running. Useful for a [Kubernetes liveness probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/). 26 | * `/live/{delay}}` 27 | * Introduces a `delay` for the HTTP response of the `/live` endpoint in seconds. The team left this in for you to validate your liveness probes are configured correctly for when this endpoint doesn't respond in a timely manner (which could indicate the service is not live anymore). 28 | * `/ready` 29 | * Similar to the `/live` endpoint, but this one goes the extra mile and makes sure the service has a healthy connection to the backing Redis service. This is useful for [Kubernetes readiness probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) which will not send network traffic to the service until it indicates its ready. 30 | 31 | You're ready to kick the tires on this service to get a feel for it, with an end goal of creating the Kubernetes manifests that can host the appliation. The manifests, in YAML format, will live alongside this codebase and augment the foundational codebase with the foundational Configuration as Code to run it in any Kubernetes cluster. 32 | 33 | --- 34 | 35 | ### Build and run the cloud native app 36 | 37 | Before you even think about running this in Kubernetes, you want to run a single instance of it using your local Docker Desktop instance. While a Kuberentes cluster will ultimately run your application container reliably at scale, it all starts with knowing how to build and run a single container using the Docker runtime. 38 | 39 | ## Clone this repository 40 | 41 | You'll be working with this repository and configuration contanied within. Clone it to a location of your choosing and open the directory in VS Code. 42 | 43 | > TIP: Run the following command from a directory containing no paths. On Windows, a root directory of `C:\devl\ws` is a good convention. In MacOS or Linux, this author uses a convention of `$HOME/code`. 44 | 45 | ```bash 46 | git clone https://github.com/javaplus/DockerKubesDojo.git 47 | ``` 48 | 49 | After cloning the repo locally, open a command prompt to the root of the project where the Dockerfile resides. 50 | 51 | Look at the Dockerfile: 52 | 53 | ``` 54 | FROM python:3 55 | 56 | WORKDIR /usr/src/app 57 | 58 | COPY requirements.txt ./ 59 | RUN pip install --no-cache-dir -r requirements.txt 60 | 61 | LABEL org.opencontainers.image.title="cloud-native-demo" 62 | 63 | COPY . . 64 | 65 | CMD [ "python", "./app.py" ] 66 | ``` 67 | 68 | This docker file simply starts with a pre-existing image that has python installed and then copies the local source code and requirements to the image and states that the command to run when the container starts is "python ./app.py". That is start the python app. 69 | 70 | These are the instructions that tell docker how to build this image. We will now use this file to create a Docker image with our app. 71 | 72 | Run the docker build command like this: 73 | 74 | ``` 75 | 76 | docker build -t cloud-native-demo:1 . 77 | 78 | ``` 79 | NOTE: There is a period at the end of that command and it is important! 80 | 81 | The -t tells Docker to tag this image with the name "cloud-native-demo" and create version "1". The '.' at the very end tell docker where to find the Dockerfile. In our case, this is the current directory, thus the '.'. 82 | 83 | You should see something like this(NOTE: the output below is abbreviated): 84 | 85 | ``` 86 | D:\workspaces\DockerKubesDojo>docker build -t cloud-native-demo:1 . 87 | Sending build context to Docker daemon 118.3kB 88 | Step 1/7 : FROM python:3 89 | 3: Pulling from library/python 90 | 8f0fdd3eaac0: Pull complete 91 | ... 92 | 644b4ceca849: Pull complete 93 | 50f0ac11639a: Pull complete 94 | Digest: sha256:58666f6a49048d737eb24478e8dabce32774730e2f2d0803911a2c1f61c1b805 95 | Status: Downloaded newer image for python:3 96 | ---> 038a832804a0 97 | Step 2/7 : WORKDIR /usr/src/app 98 | ---> Running in baed9580915a 99 | Removing intermediate container baed9580915a 100 | ---> e1dda44d0516 101 | Step 3/7 : COPY requirements.txt ./ 102 | ---> 378440b0c12f 103 | Step 4/7 : RUN pip install --no-cache-dir -r requirements.txt 104 | ---> Running in 665a20dc436e 105 | Step 6/7 : COPY . . 106 | ---> bff7b9c68fd1 107 | Step 7/7 : CMD [ "python", "./app.py" ] 108 | ---> Running in a4cdc2564677 109 | Removing intermediate container a4cdc2564677 110 | ---> ef5bc3de4d4f 111 | Successfully built ef5bc3de4d4f 112 | Successfully tagged cloud-native-demo:1 113 | SECURITY WARNING: You are building a Docker image from Windows against a non-Windows Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to 114 | double check and reset permissions for sensitive files and directories. 115 | ``` 116 | Now if you run a **docker images** command, you should see your newly created image: 117 | ``` 118 | D:\workspaces\DockerKubesDojo>docker images 119 | REPOSITORY TAG IMAGE ID CREATED SIZE 120 | cloud-native-demo 1 ef5bc3de4d4f 17 minutes ago 943MB 121 | nginx latest f7bb5701a33c 5 days ago 126MB 122 | python 3 038a832804a0 5 days ago 932MB 123 | 124 | ``` 125 | 126 | 127 | ## Run the cloud-native-demo image and expose its port 128 | 129 | Now we are ready to run our newly created image. To do that we are going back to the Docker run command you should be familar with. 130 | 131 | > HINT: The string `cloud-native-demo:1` captures the coordinates of the Docker/OCI image. It follows the pattern `{hostname to retrieve image}/{repository name}/{image name}:{tag}`. Since we have it locally we don't specify a hostname to retrieve the image. 132 | 133 | > HINT: The `--rm` just cleans up any remaining bits after this application runs. It's completely stateless, so we don't need to remember anything about its execution in this scenario. 134 | 135 | > HINT: The `-p` switch exposes the port the container listens on to your localhost interface. 136 | 137 | ```bash 138 | docker run --rm -it -p 5000:5000 cloud-native-demo:1 139 | ``` 140 | 141 | ## Validate the application container is running 142 | 143 | In your browser, navigate to the URL [http://localhost:5000](http://localhost:5000). You should see a JSON response with some information about the running application. If not, double check the `docker run ...` command you issued and look for any errors in the console output. 144 | 145 | If it's working, celebrate! You successfully built and ran your own image! 146 | 147 | ## Stop the container 148 | 149 | In the window where you issued the `docker run ...` command, type `CTRL+C` to stop the container. 150 | Previously hitting `CTRL+C` in the command prompt wouldn't stop the container. However, if you run the **docker ps** command now, you shouldn't see any containers running. This is because when we issued the run command this time we added the `--rm` which stops the container automatically when you break. 151 | 152 | ## Stretch Goal!!!!!! 153 | 154 | Want more practice creating your own Docker images??? In the previous stretch goal, we had you run an nginx container that mounted a local folder that allowed you to serve up your own custom HTML file. We want to end up with the same result, but this time without a file mount. Instead of using a file mount, create a Docker image that has your custom HTML file packaged in it. 155 | 156 | **HINTS** 157 | - Create a Dockerfile that starts with the base of nginx (that is **FROM nginx**) 158 | - Use the COPY command to copy your custom HTML file into the image (The destination inside the image can be determined from the nginx "How to use ths image" section on [hub.docker.com](https://hub.docker.com/_/nginx) or by looking at your volume mount location from the previous Stretch Goal 159 | - If you don't specify a CMD, it should use the one from the base image... this is what you want in this case. 160 | - Ultimately, when finished buiding your image, you should be able to do a simple docker run with only the "-p 8080:80" option to publish your port and your custom image name. 161 | 162 |
163 | Click to expand More Hints/Spoilers 164 | 165 | #### Dockerfile 166 | - Should start with **FROM nginx** 167 | - Should have a COPY command to COPY your HTML file to /usr/share/nginx/html 168 | - Can build with **docker build -t mynginx:1 .** 169 | #### Running the container 170 | - Run command should be **docker run -p 8080:80 mynginx:1** (assuming you tag it with mynginx:1) 171 |
172 | --------------------------------------------------------------------------------