├── LICENSE
├── README.md
├── ch10
└── code
│ ├── metal-lb
│ ├── README.md
│ └── metal-lb-config.yaml
│ ├── nginx
│ └── README.md
│ ├── pod-with-dns.yaml
│ └── social_graph
│ ├── configmap.yaml
│ ├── db.yaml
│ ├── ingress.yaml
│ └── social-graph-manager.yaml
├── ch12
└── code
│ ├── fission
│ └── yeah.py
│ ├── knative
│ ├── service-v2.yaml
│ └── service.yaml
│ └── openfass
│ ├── .gitignore
│ ├── openfaas-go.yml
│ └── openfaas-go
│ ├── go.mod
│ └── handler.go
├── ch13
└── code
│ ├── jaeger.yaml
│ └── now-pod.yaml
├── ch14
└── code
│ ├── httpbin-service.yaml
│ ├── httpbin-v1.yaml
│ ├── httpbin-v2.yaml
│ ├── logging-stack.yaml
│ └── tracing.yaml
├── ch15
└── code
│ ├── .python-version
│ ├── __pycache__
│ └── k.cpython-39.pyc
│ ├── candy-crd.yaml
│ ├── candy-with-flavor-crd.yaml
│ ├── candy-with-unknown-fields-crd.yaml
│ ├── chocolate-with-finalizers.yaml
│ ├── chocolate.yaml
│ ├── create_nginx_deployment.py
│ ├── custom_scheduler.py
│ ├── gummy-bear.yaml
│ ├── k.py
│ ├── kubectl-show-stale_replica_sets
│ ├── nginx-deployment.yaml
│ ├── nginx-pod.json
│ ├── some-pod-manual-scheduling.yaml
│ ├── some-pod-with-custom-scheduler.yaml
│ ├── some-pod.yaml
│ └── watch_demo.py
├── ch16
└── code
│ ├── disallow-some-roles-policy.yaml
│ ├── disallow-some-services-policy.yaml
│ ├── exclude-services-namespace-policy.yaml
│ ├── mutate-image-pull-policy.yaml
│ ├── set-request-limit-ratio.yaml
│ ├── set-request-limit-ratio2.yaml
│ ├── some-cluster-role.yaml
│ ├── some-pod-with-only-resource-requests.yaml
│ ├── some-pod.yaml
│ └── tests
│ ├── kyverno-test.yaml
│ ├── test-service-bad-name.yaml
│ ├── test-service-bad-namespace.yaml
│ └── test-service-ok.yaml
├── ch17
└── code
│ ├── cluster-config.yaml
│ ├── deployment-bad-readiness-probe.yaml
│ ├── deployment-bad-startup-probe.yaml
│ ├── deployment-err-image-pull.yaml
│ ├── deployment-hung-init-container.yaml
│ ├── deployment-pending-namespace-quota.yaml
│ ├── deployment-pending-pod-requests.yaml
│ ├── deployment-scheduling-gated.yaml
│ ├── hpa.yaml
│ ├── hpa_converted.yaml
│ ├── pod-run-container-error.yaml
│ └── trouble-ns-quota.yaml
├── ch2
└── code
│ ├── kind-ha-multi-node-config.yaml
│ └── kind-multi-node-config.yaml
├── ch3
└── code
│ ├── eks-cluster-autoscaler.yaml
│ ├── etcd-cluster.yaml
│ ├── helm-rbac.yaml
│ └── nginx-deployment.yaml
├── ch4
└── code
│ ├── cool-pod.yaml
│ ├── custom-namespace.yaml
│ ├── custom-service-account.yaml
│ ├── deny-all-network-policy.yaml
│ ├── deny-egress-network-policy.yaml
│ ├── network-policy.yaml
│ └── pod-with-secret.yaml
├── ch5
└── code
│ ├── admin-user.yaml
│ ├── base
│ ├── hue-learn.yaml
│ └── kustomization.yaml
│ ├── cron-job.yaml
│ ├── factorial-job.yaml
│ ├── hue-collect-proxy-ds.yaml
│ ├── hue-finance-deployment.yaml
│ ├── hue-fitness-pod.yaml
│ ├── hue-learn-deployment-0.4.yaml
│ ├── hue-learn-deployment.yaml
│ ├── hue-learn
│ └── Dockerfile
│ ├── hue-music-pod.yaml
│ ├── hue-reminders-deployment-with-pod-affinity.yaml
│ ├── hue-reminders-deployment-with-spread-contraitns.yaml
│ ├── hue-reminders-deployment.yaml
│ ├── hue-reminders-service.yaml
│ ├── hue-reminders
│ ├── Dockerfile
│ └── main.go
│ ├── overlays
│ ├── production
│ │ ├── kustomization.yaml
│ │ └── namespace.yaml
│ └── staging
│ │ ├── hue-learn-patch.yaml
│ │ ├── kustomization.yaml
│ │ └── namespace.yaml
│ ├── parallel-job.yaml
│ ├── restricted-namespace.yaml
│ ├── test-labels.yaml
│ ├── trouble-deployment-us-central1.yaml
│ ├── trouble-deployment-us-west2.yaml
│ ├── trouble-service.yaml
│ └── trouble-shooter.yaml
├── ch6
└── code
│ ├── dir-persistent-volume-claim.yaml
│ ├── dir-persistent-volume.yaml
│ ├── hue-global-listener
│ ├── Dockerfile
│ └── build.sh
│ ├── hue-job-scheduler
│ ├── Dockerfile
│ └── build.sh
│ ├── hue-scheduler-in-memory.yaml
│ ├── hue-scheduler.yaml
│ ├── local-persistent-volume-claim.yaml
│ ├── local-storage-class.yaml
│ ├── local-volume.yaml
│ ├── pod-with-csi-ephemeral-volume.yaml
│ ├── pod-with-generic-ephemeral-volume.yaml
│ ├── pod-with-local-claim.yaml
│ ├── rook-cluster.yaml
│ ├── shell-pod.yaml
│ ├── shell-pod2.yaml
│ └── some-persistent-volume-claim.yaml
├── ch7
└── code
│ ├── cassandra-headless-service.yaml
│ ├── cassandra-stateful-set.yaml
│ ├── db-config-map.yaml
│ ├── nginx-headless-service.yaml
│ ├── nginx-stateful-set.yaml
│ └── pod-with-db.yaml
├── ch8
└── code
│ ├── bash-loop-deployment.yaml
│ ├── compute-quota.yaml
│ ├── hue-reminders-deployment.yaml
│ ├── hue-reminders-hpa.yaml
│ ├── limits.yaml
│ ├── nginx-deployment-with-resources.yaml
│ ├── nginx-deployment.yaml
│ ├── nginx-hpa.yaml
│ ├── object-count-quota.yaml
│ └── priority-class.yaml
└── ch9
└── code
├── cool-chart
├── .helmignore
├── Chart.lock
├── Chart.yaml
├── templates
│ ├── NOTES.txt
│ ├── _helpers.tpl
│ ├── deployment.yaml
│ ├── hpa.yaml
│ ├── ingress.yaml
│ ├── service.yaml
│ ├── serviceaccount.yaml
│ └── tests
│ │ └── test-connection.yaml
└── values.yaml
└── food-chart
├── .helmignore
├── Chart.yaml
├── templates
└── config-map.yaml
└── values.yaml
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2022 Packt
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Mastering Kubernetes 4th Edition
2 |
3 | This is the code repository for [Mastering Kubernetes, Fourth Edition](https://www.packtpub.com/product/kubernetes-basics-and-beyond-fourth-edition/9781804611395), published by Packt.
4 |
5 | The following chapters have no code samples:
6 | - Chapter 1
7 | - Chapter 11
8 | - Chapter 18
9 |
10 | **Dive into Kubernetes and learn how to create and operate world-class cloud-native systems**
11 |
12 | ## About the book
13 |
14 | The fourth edition of the bestseller Mastering Kubernetes includes the most recent tools and code to enable you to learn the latest features of Kubernetes 1.25. This book contains a thorough exploration of complex concepts and best practices to help you master the skills of designing and deploying large-scale distributed systems on Kubernetes clusters.
15 |
16 | You’ll learn how to run complex stateless and stateful microservices on Kubernetes, including advanced features such as horizontal pod autoscaling, rolling updates, resource quotas, and persistent storage backends. In addition, you’ll understand how to utilize serverless computing and service meshes.
17 |
18 | Further, two new chapters have been added. “Governing Kubernetes” covers the problem of policy management, how admission control addresses it, and how policy engines provide a powerful governance solution. “Running Kubernetes in Production” shows you what it takes to run Kubernetes at scale across multiple cloud providers, multiple geographical regions, and multiple clusters, and it also explains how to handle topics such as upgrades, capacity planning, dealing with cloud provider limits/quotas, and cost management.
19 |
20 | By the end of this Kubernetes book, you’ll have a strong understanding of, and hands-on experience with, a wide range of Kubernetes capabilities.
21 |
22 | ## What you will learn
23 |
24 | - Learn how to govern Kubernetes using policy engines
25 | - Learn what it takes to run Kubernetes in production and at scale
26 | - Build and run stateful applications and complex microservices
27 | - Master Kubernetes networking with services, Ingress objects, load balancers, and service meshes
28 | - Achieve high availability for your Kubernetes clusters
29 | - Improve Kubernetes observability with tools such as Prometheus, Grafana, and Jaeger
30 | - Extend Kubernetes with the Kubernetes API, plugins, and webhooks
31 |
32 | ## Table of Contents
33 | ### Chapters
34 | 1. Understanding Kubernetes Architecture
35 | 2. Creating Kubernetes Clusters
36 | 3. High Availability and Reliability
37 | 4. Securing Kubernetes
38 | 5. Using Kubernetes Resources in Practice
39 | 6. Managing Storage
40 | 7. Running Stateful Applications with Kubernetes
41 | 8. Deploying and Updating Applications
42 | 9. Packaging Applications
43 | 10. Exploring Kubernetes Networking
44 | 11. Running Kubernetes on Multiple Clusters
45 | 12. Serverless Computing on Kubernetes
46 | 13. Monitoring Kubernetes Clusters
47 | 14. Utilizing Service Meshes
48 | 15. Extending Kubernetes
49 | 16. Governing Kubernetes
50 | 17. Running Kubernetes in Production
51 | 18. The Future of Kubernetes
52 |
53 | > If you feel this book is for you, get your [copy](https://www.amazon.com/Kubernetes-operate-world-class-container-native-systems/dp/1804611395) today!
54 |
55 |
56 | ### Following is what you need for this book: ###
57 | To follow the examples in each chapter, you need a recent version of Docker and Kubernetes installed onto your machine, ideally Kubernetes 1.18. If your operating system is Windows 10 Professional, you can enable hypervisor mode; otherwise, you will need to install VirtualBox and use a Linux guest OS. If you use macOS then you’re good to go.
58 |
59 | ## Know more on the Discord server
60 |
61 | You can get more engaged on the discord server for more latest updates and discussions in the community at [https://packt.link/cloudanddevops](https://packt.link/cloudanddevops)
62 |
63 | ## Download a free PDF
64 |
65 | _If you have already purchased a print or Kindle version of this book, you can get a DRM-free PDF version at no cost. Simply click on the link to claim your free PDF._
66 | [https://packt.link/free-ebook/9781804611395](https://packt.link/free-ebook/9781804611395)
67 |
68 | We also provide a PDF file that has color images of the screenshots/diagrams used in this book at "https://packt.link/gXMql"
69 |
70 | ## Get to Know the Author
71 |
72 | Gigi Sayfan has been developing software for 25+ years in domains as diverse as instant messaging, morphing, chip fabrication process control, embedded multimedia applications for game consoles, brain-inspired ML, custom browser development, web services for 3D distributed game platforms, IoT sensors, virtual reality, and genomics. He has written production code in languages such as Go, Python, C, C++, C#, Java, Delphi, JavaScript, and even Cobol and PowerBuilder for operating systems such as Windows (3.11 through 7), Linux, macOS, Lynx (embedded), and Sony PlayStation. His technical expertise includes databases, low-level networking, distributed systems, containers, unorthodox user interfaces, modern web applications, and general SDLC.
73 |
--------------------------------------------------------------------------------
/ch10/code/metal-lb/README.md:
--------------------------------------------------------------------------------
1 | ## Install MetalLB on minikube
2 |
3 | ```
4 | kubectl apply -f https://raw.githubusercontent.com/google/metallb/v0.8.3/manifests/metallb.yaml
5 | ```
6 |
7 | ## Apply the config
8 |
9 | Make sure the addresses contain $(minikube ip):
10 |
11 | ```
12 | kubectl apply -f metal-lb-config.yaml
13 | ```
14 |
--------------------------------------------------------------------------------
/ch10/code/metal-lb/metal-lb-config.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | namespace: metallb-system
5 | name: config
6 | data:
7 | config: |
8 | address-pools:
9 | - name: custom-ip-pool
10 | protocol: layer2
11 | addresses:
12 | - 192.168.64.2/32
--------------------------------------------------------------------------------
/ch10/code/nginx/README.md:
--------------------------------------------------------------------------------
1 | ## Enable the ingress controller on minikube
2 |
3 | ```
4 | minikube addons enable ingress
5 | ```
--------------------------------------------------------------------------------
/ch10/code/pod-with-dns.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: maker
5 | spec:
6 | selector:
7 | app: py-kube
8 | clusterIP: None # headless service
9 | ---
10 | apiVersion: v1
11 | kind: Pod
12 | metadata:
13 | name: py-kube1
14 | labels:
15 | app: py-kube
16 | spec:
17 | hostname: trouble
18 | subdomain: maker
19 | containers:
20 | - image: g1g1/py-kube:0.3
21 | command:
22 | - sleep
23 | - "9999"
24 | name: trouble
25 | ---
26 | apiVersion: v1
27 | kind: Pod
28 | metadata:
29 | name: py-kube2
30 | labels:
31 | app: py-kube
32 | spec:
33 | hostname: trouble2
34 | subdomain: maker
35 | containers:
36 | - image: g1g1/py-kube:0.3
37 | command:
38 | - sleep
39 | - "9999"
40 | name: trouble
41 |
--------------------------------------------------------------------------------
/ch10/code/social_graph/configmap.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: social-graph-manager-config
5 | namespace: default
6 | data:
7 | DELINKCIOUS_MUTUAL_AUTH: "false"
8 |
9 |
10 |
--------------------------------------------------------------------------------
/ch10/code/social_graph/db.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: social-graph-db
5 | labels:
6 | svc: social-graph
7 | app: postgres
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | svc: social-graph
13 | app: postgres
14 | template:
15 | metadata:
16 | labels:
17 | svc: social-graph
18 | app: postgres
19 | spec:
20 | containers:
21 | - name: postgres
22 | image: postgres:11.1-alpine
23 | ports:
24 | - containerPort: 5432
25 | env:
26 | - name: POSTGRES_DB
27 | value: social_graph_manager
28 | - name: POSTGRES_USER
29 | value: postgres
30 | - name: POSTGRES_PASSWORD
31 | value: postgres
32 | ---
33 | apiVersion: v1
34 | kind: Service
35 | metadata:
36 | name: social-graph-db
37 | spec:
38 | ports:
39 | - port: 5432
40 | selector:
41 | svc: social-graph
42 | app: postgres
43 |
--------------------------------------------------------------------------------
/ch10/code/social_graph/ingress.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1beta1
2 | kind: Ingress
3 | metadata:
4 | name: social-graph-manager
5 | spec:
6 | rules:
7 | - host: k8s.cluster
8 | http:
9 | paths:
10 | - path: /
11 | backend:
12 | serviceName: social-graph-manager
13 | servicePort: 9090
14 |
--------------------------------------------------------------------------------
/ch10/code/social_graph/social-graph-manager.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: social-graph-manager
5 | labels:
6 | svc: social-graph
7 | app: manager
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | svc: social-graph
13 | app: manager
14 | template:
15 | metadata:
16 | labels:
17 | svc: social-graph
18 | app: manager
19 | spec:
20 | containers:
21 | - name: social-graph-manager
22 | image: g1g1/delinkcious-social-graph:0.3
23 | imagePullPolicy: Always
24 | ports:
25 | - containerPort: 9090
26 | envFrom:
27 | - configMapRef:
28 | name: social-graph-manager-config
29 | resources:
30 | requests:
31 | cpu: 100m
32 | ---
33 | apiVersion: v1
34 | kind: Service
35 | metadata:
36 | name: social-graph-manager
37 | spec:
38 | ports:
39 | - port: 9090
40 | selector:
41 | svc: social-graph
42 | app: manager
43 |
--------------------------------------------------------------------------------
/ch12/code/fission/yeah.py:
--------------------------------------------------------------------------------
1 | def main():
2 | return 'Yeah, it works!!!'
--------------------------------------------------------------------------------
/ch12/code/knative/service-v2.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: serving.knative.dev/v1 # Current version of Knative
2 | kind: Service
3 | metadata:
4 | name: helloworld-go # The name of the app
5 | namespace: default # The namespace the app will use
6 | spec:
7 | template:
8 | spec:
9 | containers:
10 | - image: gcr.io/knative-samples/helloworld-go # The URL to the image of the app
11 | env:
12 | - name: TARGET # The environment variable printed out by the sample app
13 | value: "Yeah, it still works - version 2 !!!"
--------------------------------------------------------------------------------
/ch12/code/knative/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: serving.knative.dev/v1 # Current version of Knative
2 | kind: Service
3 | metadata:
4 | name: helloworld-go # The name of the app
5 | namespace: default # The namespace the app will use
6 | spec:
7 | template:
8 | spec:
9 | containers:
10 | - image: gcr.io/knative-samples/helloworld-go # The URL to the image of the app
11 | env:
12 | - name: TARGET # The environment variable printed out by the sample app
13 | value: "Yeah, it works!!!"
14 |
--------------------------------------------------------------------------------
/ch12/code/openfass/.gitignore:
--------------------------------------------------------------------------------
1 | template
2 | build
3 |
--------------------------------------------------------------------------------
/ch12/code/openfass/openfaas-go.yml:
--------------------------------------------------------------------------------
1 | version: 1.0
2 | provider:
3 | name: openfaas
4 | gateway: http://127.0.0.1:8080
5 | functions:
6 | openfaas-go:
7 | lang: golang-http
8 | handler: ./openfaas-go
9 | image: docker.io/g1g1/openfaas-go:latest
10 |
11 |
--------------------------------------------------------------------------------
/ch12/code/openfass/openfaas-go/go.mod:
--------------------------------------------------------------------------------
1 | module handler/function
2 |
3 | go 1.18
4 |
--------------------------------------------------------------------------------
/ch12/code/openfass/openfaas-go/handler.go:
--------------------------------------------------------------------------------
1 | package function
2 |
3 | import (
4 | "fmt"
5 | "net/http"
6 |
7 | handler "github.com/openfaas/templates-sdk/go-http"
8 | )
9 |
10 | // Handle a function invocation
11 | func Handle(req handler.Request) (handler.Response, error) {
12 | var err error
13 |
14 | message := fmt.Sprintf("Body: %s", string(req.Body))
15 |
16 | return handler.Response{
17 | Body: []byte(message),
18 | StatusCode: http.StatusOK,
19 | }, err
20 | }
21 |
--------------------------------------------------------------------------------
/ch13/code/jaeger.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: jaegertracing.io/v1
2 | kind: Jaeger
3 | metadata:
4 | name: simplest
--------------------------------------------------------------------------------
/ch13/code/now-pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: now
5 | spec:
6 | containers:
7 | - name: now
8 | image: g1g1/py-kube:0.3
9 | command: ["/bin/bash", "-c", "while true; do sleep 10; date; done"]
10 |
--------------------------------------------------------------------------------
/ch14/code/httpbin-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: httpbin
5 | labels:
6 | app: httpbin
7 | spec:
8 | ports:
9 | - name: http
10 | port: 8000
11 | targetPort: 80
12 | selector:
13 | app: httpbin
--------------------------------------------------------------------------------
/ch14/code/httpbin-v1.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: httpbin-v1
5 | spec:
6 | replicas: 1
7 | selector:
8 | matchLabels:
9 | app: httpbin
10 | version: v1
11 | template:
12 | metadata:
13 | labels:
14 | app: httpbin
15 | version: v1
16 | spec:
17 | containers:
18 | - image: docker.io/kennethreitz/httpbin
19 | imagePullPolicy: IfNotPresent
20 | name: httpbin
21 | command: ["gunicorn", "--access-logfile", "-", "-b", "0.0.0.0:80", "httpbin:app"]
22 | ports:
23 | - containerPort: 80
--------------------------------------------------------------------------------
/ch14/code/httpbin-v2.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: httpbin-v2
5 | spec:
6 | replicas: 1
7 | selector:
8 | matchLabels:
9 | app: httpbin
10 | version: v2
11 | template:
12 | metadata:
13 | labels:
14 | app: httpbin
15 | version: v2
16 | spec:
17 | containers:
18 | - image: docker.io/kennethreitz/httpbin
19 | imagePullPolicy: IfNotPresent
20 | name: httpbin
21 | command: ["gunicorn", "--access-logfile", "-", "-b", "0.0.0.0:80", "httpbin:app"]
22 | ports:
23 | - containerPort: 80
24 |
--------------------------------------------------------------------------------
/ch14/code/logging-stack.yaml:
--------------------------------------------------------------------------------
1 | # Logging Namespace. All below are a part of this namespace.
2 | apiVersion: v1
3 | kind: Namespace
4 | metadata:
5 | name: logging
6 | ---
7 | # Elasticsearch Service
8 | apiVersion: v1
9 | kind: Service
10 | metadata:
11 | name: elasticsearch
12 | namespace: logging
13 | labels:
14 | app: elasticsearch
15 | spec:
16 | ports:
17 | - port: 9200
18 | protocol: TCP
19 | targetPort: db
20 | selector:
21 | app: elasticsearch
22 | ---
23 | # Elasticsearch Deployment
24 | apiVersion: apps/v1
25 | kind: Deployment
26 | metadata:
27 | name: elasticsearch
28 | namespace: logging
29 | labels:
30 | app: elasticsearch
31 | spec:
32 | replicas: 1
33 | selector:
34 | matchLabels:
35 | app: elasticsearch
36 | template:
37 | metadata:
38 | labels:
39 | app: elasticsearch
40 | annotations:
41 | sidecar.istio.io/inject: "false"
42 | spec:
43 | containers:
44 | - image: docker.elastic.co/elasticsearch/elasticsearch-oss:6.1.1
45 | name: elasticsearch
46 | resources:
47 | # need more cpu upon initialization, therefore burstable class
48 | limits:
49 | cpu: 1000m
50 | requests:
51 | cpu: 100m
52 | env:
53 | - name: discovery.type
54 | value: single-node
55 | ports:
56 | - containerPort: 9200
57 | name: db
58 | protocol: TCP
59 | - containerPort: 9300
60 | name: transport
61 | protocol: TCP
62 | volumeMounts:
63 | - name: elasticsearch
64 | mountPath: /data
65 | volumes:
66 | - name: elasticsearch
67 | emptyDir: {}
68 | ---
69 | # Fluentd Service
70 | apiVersion: v1
71 | kind: Service
72 | metadata:
73 | name: fluentd-es
74 | namespace: logging
75 | labels:
76 | app: fluentd-es
77 | spec:
78 | ports:
79 | - name: fluentd-tcp
80 | port: 24224
81 | protocol: TCP
82 | targetPort: 24224
83 | - name: fluentd-udp
84 | port: 24224
85 | protocol: UDP
86 | targetPort: 24224
87 | selector:
88 | app: fluentd-es
89 | ---
90 | # Fluentd Deployment
91 | apiVersion: apps/v1
92 | kind: Deployment
93 | metadata:
94 | name: fluentd-es
95 | namespace: logging
96 | labels:
97 | app: fluentd-es
98 | spec:
99 | replicas: 1
100 | selector:
101 | matchLabels:
102 | app: fluentd-es
103 | template:
104 | metadata:
105 | labels:
106 | app: fluentd-es
107 | annotations:
108 | sidecar.istio.io/inject: "false"
109 | spec:
110 | containers:
111 | - name: fluentd-es
112 | image: gcr.io/google-containers/fluentd-elasticsearch:v2.0.1
113 | env:
114 | - name: FLUENTD_ARGS
115 | value: --no-supervisor -q
116 | resources:
117 | limits:
118 | memory: 500Mi
119 | requests:
120 | cpu: 100m
121 | memory: 200Mi
122 | volumeMounts:
123 | - name: config-volume
124 | mountPath: /etc/fluent/config.d
125 | terminationGracePeriodSeconds: 30
126 | volumes:
127 | - name: config-volume
128 | configMap:
129 | name: fluentd-es-config
130 | ---
131 | # Fluentd ConfigMap, contains config files.
132 | kind: ConfigMap
133 | apiVersion: v1
134 | data:
135 | forward.input.conf: |-
136 | # Takes the messages sent over TCP
137 |
138 | type forward
139 |
140 | output.conf: |-
141 |
142 | type elasticsearch
143 | log_level info
144 | include_tag_key true
145 | host elasticsearch
146 | port 9200
147 | logstash_format true
148 | # Set the chunk limits.
149 | buffer_chunk_limit 2M
150 | buffer_queue_limit 8
151 | flush_interval 5s
152 | # Never wait longer than 5 minutes between retries.
153 | max_retry_wait 30
154 | # Disable the limit on the number of retries (retry forever).
155 | disable_retry_limit
156 | # Use multiple threads for processing.
157 | num_threads 2
158 |
159 | metadata:
160 | name: fluentd-es-config
161 | namespace: logging
162 | ---
163 | # Kibana Service
164 | apiVersion: v1
165 | kind: Service
166 | metadata:
167 | name: kibana
168 | namespace: logging
169 | labels:
170 | app: kibana
171 | spec:
172 | ports:
173 | - port: 5601
174 | protocol: TCP
175 | targetPort: ui
176 | selector:
177 | app: kibana
178 | ---
179 | # Kibana Deployment
180 | apiVersion: apps/v1
181 | kind: Deployment
182 | metadata:
183 | name: kibana
184 | namespace: logging
185 | labels:
186 | app: kibana
187 | spec:
188 | replicas: 1
189 | selector:
190 | matchLabels:
191 | app: kibana
192 | template:
193 | metadata:
194 | labels:
195 | app: kibana
196 | annotations:
197 | sidecar.istio.io/inject: "false"
198 | spec:
199 | containers:
200 | - name: kibana
201 | image: docker.elastic.co/kibana/kibana-oss:6.1.1
202 | resources:
203 | # need more cpu upon initialization, therefore burstable class
204 | limits:
205 | cpu: 1000m
206 | requests:
207 | cpu: 100m
208 | env:
209 | - name: ELASTICSEARCH_URL
210 | value: http://elasticsearch:9200
211 | ports:
212 | - containerPort: 5601
213 | name: ui
214 | protocol: TCP
215 | ---
216 |
--------------------------------------------------------------------------------
/ch14/code/tracing.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: install.istio.io/v1alpha1
2 | kind: IstioOperator
3 | spec:
4 | meshConfig:
5 | enableTracing: true
6 | defaultConfig:
7 | tracing:
8 | sampling: 100
9 |
--------------------------------------------------------------------------------
/ch15/code/.python-version:
--------------------------------------------------------------------------------
1 | 3.10
2 |
--------------------------------------------------------------------------------
/ch15/code/__pycache__/k.cpython-39.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/PacktPublishing/Mastering-Kubernetes-4th-Edition/d0f1a7362dba979568a8c2a39e10c70dded58d27/ch15/code/__pycache__/k.cpython-39.pyc
--------------------------------------------------------------------------------
/ch15/code/candy-crd.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apiextensions.k8s.io/v1
2 | kind: CustomResourceDefinition
3 | metadata:
4 | # name must match the spec fields below, and be in the form: .
5 | name: candies.awesome.corp.com
6 | spec:
7 | # group name to use for REST API: /apis//
8 | group: awesome.corp.com
9 | # version name to use for REST API: /apis//
10 | versions:
11 | - name: v1
12 | # Each version can be enabled/disabled by Served flag.
13 | served: true
14 | # One and only one version must be marked as the storage version.
15 | storage: true
16 | schema:
17 | openAPIV3Schema:
18 | type: object
19 | properties:
20 | spec:
21 | type: object
22 | properties:
23 | flavor:
24 | type: string
25 | # either Namespaced or Cluster
26 | scope: Namespaced
27 | names:
28 | # plural name to be used in the URL: /apis///
29 | plural: candies
30 | # singular name to be used as an alias on the CLI and for display
31 | singular: candy
32 | # kind is normally the CamelCased singular type. Your resource manifests use this.
33 | kind: Candy
34 | # shortNames allow shorter string to match your resource on the CLI
35 | shortNames:
36 | - cn
--------------------------------------------------------------------------------
/ch15/code/candy-with-flavor-crd.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apiextensions.k8s.io/v1
2 | kind: CustomResourceDefinition
3 | metadata:
4 | # name must match the spec fields below, and be in the form: .
5 | name: candies.awesome.corp.com
6 | spec:
7 | # group name to use for REST API: /apis//
8 | group: awesome.corp.com
9 | # version name to use for REST API: /apis//
10 | versions:
11 | - name: v1
12 | # Each version can be enabled/disabled by Served flag.
13 | served: true
14 | # One and only one version must be marked as the storage version.
15 | storage: true
16 | schema:
17 | openAPIV3Schema:
18 | type: object
19 | properties:
20 | spec:
21 | type: object
22 | properties:
23 | flavor:
24 | type: string
25 | additionalPrinterColumns:
26 | - name: Flavor
27 | type: string
28 | description: The flavor of the candy
29 | jsonPath: .spec.flavor
30 | - name: Age
31 | type: date
32 | jsonPath: .metadata.creationTimestamp
33 | # either Namespaced or Cluster
34 | scope: Namespaced
35 | names:
36 | # plural name to be used in the URL: /apis///
37 | plural: candies
38 | # singular name to be used as an alias on the CLI and for display
39 | singular: candy
40 | # kind is normally the CamelCased singular type. Your resource manifests use this.
41 | kind: Candy
42 | # shortNames allow shorter string to match your resource on the CLI
43 | shortNames:
44 | - cn
--------------------------------------------------------------------------------
/ch15/code/candy-with-unknown-fields-crd.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apiextensions.k8s.io/v1
2 | kind: CustomResourceDefinition
3 | metadata:
4 | # name must match the spec fields below, and be in the form: .
5 | name: candies.awesome.corp.com
6 | spec:
7 | # group name to use for REST API: /apis//
8 | group: awesome.corp.com
9 | # version name to use for REST API: /apis//
10 | versions:
11 | - name: v1
12 | # Each version can be enabled/disabled by Served flag.
13 | served: true
14 | # One and only one version must be marked as the storage version.
15 | storage: true
16 | schema:
17 | openAPIV3Schema:
18 | type: object
19 | properties:
20 | spec:
21 | type: object
22 | x-kubernetes-preserve-unknown-fields: true
23 | properties:
24 | flavor:
25 | type: string
26 | # either Namespaced or Cluster
27 | scope: Namespaced
28 | names:
29 | # plural name to be used in the URL: /apis///
30 | plural: candies
31 | # singular name to be used as an alias on the CLI and for display
32 | singular: candy
33 | # kind is normally the CamelCased singular type. Your resource manifests use this.
34 | kind: Candy
35 | # shortNames allow shorter string to match your resource on the CLI
36 | shortNames:
37 | - cn
--------------------------------------------------------------------------------
/ch15/code/chocolate-with-finalizers.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: awesome.corp.com/v1
2 | kind: Candy
3 | metadata:
4 | name: chocolate
5 | finalizers:
6 | - eat-me
7 | - drink-me
8 | spec:
9 | flavor: sweeeeeeet
10 |
11 |
--------------------------------------------------------------------------------
/ch15/code/chocolate.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: awesome.corp.com/v1
2 | kind: Candy
3 | metadata:
4 | name: chocolate
5 | spec:
6 | flavor: sweeeeeeet
7 |
8 |
--------------------------------------------------------------------------------
/ch15/code/create_nginx_deployment.py:
--------------------------------------------------------------------------------
1 | from os import path
2 |
3 | import yaml
4 | from kubernetes import client, config
5 |
6 |
7 | def main():
8 | # Configs can be set in Configuration class directly or using
9 | # helper utility. If no argument provided, the config will be
10 | # loaded from default location.
11 | config.load_kube_config()
12 |
13 | with open(path.join(path.dirname(__file__),
14 | 'nginx-deployment.yaml')) as f:
15 | dep = yaml.safe_load(f)
16 | k8s = client.AppsV1Api()
17 | dep = k8s.create_namespaced_deployment(body=dep,
18 | namespace="default")
19 | print(f"Deployment created. status='{dep.status}'")
20 |
21 |
22 | if __name__ == '__main__':
23 | main()
24 |
--------------------------------------------------------------------------------
/ch15/code/custom_scheduler.py:
--------------------------------------------------------------------------------
1 | from kubernetes import client, config, watch
2 |
3 |
4 | def schedule_pod(cli, name):
5 | target = client.V1ObjectReference()
6 | target.kind = 'Node'
7 | target.apiVersion = 'v1'
8 | target.name = 'k3d-k3s-default-agent-0'
9 | meta = client.V1ObjectMeta()
10 | meta.name = name
11 | body = client.V1Binding(metadata=meta, target=target)
12 | print('Scheduling pod:', name)
13 | cli.api_client.configuration.client_side_validation = False
14 |
15 | # There is an open bug (https://github.com/kubernetes-client/python/issues/1616)
16 | # that causes this to raise an exception even after the operation succeeds
17 | # to workaround it just silently catcg the exception
18 | try:
19 | cli.create_namespaced_binding('default', body)
20 | except:
21 | pass
22 |
23 |
24 | def main():
25 | config.load_kube_config()
26 | cli = client.CoreV1Api()
27 | print('Waiting for pending pods...')
28 | w = watch.Watch()
29 | for event in w.stream(cli.list_namespaced_pod, 'default'):
30 | o = event['object']
31 | if event['type'] != 'ADDED' or o.status.phase != 'Pending' or o.spec.scheduler_name != 'custom-scheduler':
32 | continue
33 |
34 | schedule_pod(cli, o.metadata.name)
35 |
36 |
37 | if __name__ == '__main__':
38 | main()
39 |
--------------------------------------------------------------------------------
/ch15/code/gummy-bear.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: awesome.corp.com/v1
2 | kind: Candy
3 | metadata:
4 | name: gummy-bear
5 | spec:
6 | flavor: delicious
7 | texture: rubbery
8 |
9 |
10 |
--------------------------------------------------------------------------------
/ch15/code/k.py:
--------------------------------------------------------------------------------
1 | from subprocess import check_output
2 | import json
3 |
4 |
5 | def k(*args, use_json=False):
6 | cmd = ['kubectl'] + list(args)
7 | if use_json:
8 | cmd += ['-o', 'json']
9 | out = check_output(cmd).decode('utf-8')
10 | if use_json:
11 | out = json.loads(out)
12 |
13 | return out
14 |
15 |
--------------------------------------------------------------------------------
/ch15/code/kubectl-show-stale_replica_sets:
--------------------------------------------------------------------------------
1 | # #!/usr/bin/env python3
2 | import sh
3 |
4 |
5 | def main():
6 | """ """
7 | o = "-o custom-columns='NAME:.metadata.name,DEPLOYMENT:.metadata.ownerReferences[0].name,REPLICAS:.spec.replicas"
8 | all_rs = sh.kubectl.get.rs(o.split()).stdout.decode('utf-8').split('\n')
9 | all_rs = [r.split() for r in all_rs if r]
10 | results = ((name, deployment) for (name, deployment, replicas) in all_rs[1:] if replicas == '0')
11 |
12 | for name, deployment in results:
13 | print(name, deployment)
14 |
15 |
16 | if __name__ == '__main__':
17 | main()
18 |
--------------------------------------------------------------------------------
/ch15/code/nginx-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: nginx-deployment
5 | spec:
6 | replicas: 3
7 | selector:
8 | matchLabels:
9 | app: nginx
10 | template:
11 | metadata:
12 | labels:
13 | app: nginx
14 | spec:
15 | containers:
16 | - name: nginx
17 | image: nginx:1.17.8
18 | ports:
19 | - containerPort: 80
20 |
--------------------------------------------------------------------------------
/ch15/code/nginx-pod.json:
--------------------------------------------------------------------------------
1 | {
2 | "kind": "Pod",
3 | "apiVersion": "v1",
4 | "metadata":{
5 | "name": "nginx",
6 | "namespace": "default",
7 | "labels": {
8 | "name": "nginx"
9 | }
10 | },
11 | "spec": {
12 | "containers": [{
13 | "name": "nginx",
14 | "image": "nginx",
15 | "ports": [{"containerPort": 80}]
16 | }]
17 | }
18 | }
--------------------------------------------------------------------------------
/ch15/code/some-pod-manual-scheduling.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: some-pod-manual-scheduling
5 | spec:
6 | containers:
7 | - name: some-container
8 | image: registry.k8s.io/pause:3.8
9 | nodeName: k3d-k3s-default-agent-1
10 | #schedulerName: no-such-scheduler
--------------------------------------------------------------------------------
/ch15/code/some-pod-with-custom-scheduler.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: some-pod-with-custom-scheduler
5 | spec:
6 | containers:
7 | - name: some-container
8 | image: registry.k8s.io/pause:3.8
9 | schedulerName: custom-scheduler
--------------------------------------------------------------------------------
/ch15/code/some-pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: some-pod
5 | spec:
6 | containers:
7 | - name: some-container
8 | image: registry.k8s.io/pause:3.8
--------------------------------------------------------------------------------
/ch15/code/watch_demo.py:
--------------------------------------------------------------------------------
1 | from kubernetes import client, config, watch
2 |
3 | # Configs can be set in Configuration class directly or using helper utility
4 | config.load_kube_config()
5 | v1 = client.CoreV1Api()
6 |
7 | count = 10
8 | w = watch.Watch()
9 | for event in w.stream(v1.list_namespace, _request_timeout=60):
10 | print(f"Event: {event['type']} {event['object'].metadata.name}")
11 | count -= 1
12 | if count == 0:
13 | w.stop()
14 | print('Done.')
--------------------------------------------------------------------------------
/ch16/code/disallow-some-roles-policy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kyverno.io/v1
2 | kind: ClusterPolicy
3 | metadata:
4 | name: disallow-some-roles
5 | spec:
6 | background: false
7 | validationFailureAction: enforce
8 | rules:
9 | - name: some-rule
10 | match:
11 | all:
12 | - resources:
13 | kinds:
14 | - Service
15 | names:
16 | - "service-1"
17 | clusterRoles:
18 | - some-cluster-role
19 | validate:
20 | message: >-
21 | cluster roles: "{{ request.clusterRoles }}"
22 | deny: {}
23 |
--------------------------------------------------------------------------------
/ch16/code/disallow-some-services-policy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kyverno.io/v1
2 | kind: ClusterPolicy
3 | metadata:
4 | name: disallow-some-services
5 | spec:
6 | validationFailureAction: enforce
7 | rules:
8 | - name: some-rule
9 | match:
10 | any:
11 | - resources:
12 | kinds:
13 | - Service
14 | names:
15 | - "service-1"
16 | - "service-2"
17 | - resources:
18 | kinds:
19 | - Service
20 | namespaces:
21 | - "ns-1"
22 | validate:
23 | message: >-
24 | services named service-1 and service-2 and
25 | any service in namespace ns-1 are not allowed
26 | deny: {}
27 |
--------------------------------------------------------------------------------
/ch16/code/exclude-services-namespace-policy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kyverno.io/v1
2 | kind: ClusterPolicy
3 | metadata:
4 | name: exclude-services-namespace
5 | spec:
6 | validationFailureAction: enforce
7 | rules:
8 | - name: some-rule
9 | match:
10 | any:
11 | - resources:
12 | kinds:
13 | - Service
14 | - resources:
15 | kinds:
16 | - Service
17 | exclude:
18 | any:
19 | - resources:
20 | namespaces:
21 | - "ns-1"
22 | validate:
23 | message: >-
24 | services are not allowed, except in the ns-1 namespace
25 | deny: {}
26 |
--------------------------------------------------------------------------------
/ch16/code/mutate-image-pull-policy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kyverno.io/v1
2 | kind: ClusterPolicy
3 | metadata:
4 | name: set-image-pull-policy
5 | spec:
6 | rules:
7 | - name: set-image-pull-policy
8 | match:
9 | any:
10 | - resources:
11 | kinds:
12 | - Pod
13 | mutate:
14 | patchStrategicMerge:
15 | spec:
16 | containers:
17 | # match images which end with :latest
18 | - (image): "*:latest"
19 | # set the imagePullPolicy to "IfNotPresent"
20 | imagePullPolicy: "IfNotPresent"
--------------------------------------------------------------------------------
/ch16/code/set-request-limit-ratio.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kyverno.io/v1
2 | kind: ClusterPolicy
3 | metadata:
4 | name: set-request-limit-ratio
5 | spec:
6 | rules:
7 | - name: set-request-limit-ratio
8 | match:
9 | any:
10 | - resources:
11 | kinds:
12 | - Pod
13 | preconditions:
14 | - key: "{{ request.operation || 'BACKGROUND' }}"
15 | operator: AnyIn
16 | value:
17 | - CREATE
18 | - UPDATE
19 | mutate:
20 | patchStrategicMerge:
21 | spec:
22 | containers:
23 | - resources:
24 | +(limits):
25 | +(cpu): "{{ multiply('{{ request.object.spec.containers[0].resources.requests.cpu || \'0\' }}', '5') }}"
26 | +(memory): "{{ multiply('{{ request.object.spec.containers[0].resources.requests.memory || \'0\' }}', '5') }}"
27 |
--------------------------------------------------------------------------------
/ch16/code/set-request-limit-ratio2.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kyverno.io/v1
2 | kind: ClusterPolicy
3 | metadata:
4 | name: set-request-limit-ratio
5 | spec:
6 | rules:
7 | - name: set-request-limit-ratio
8 | match:
9 | any:
10 | - resources:
11 | kinds:
12 | - Pod
13 | preconditions:
14 | - key: "{{ request.operation || 'BACKGROUND' }}"
15 | operator: AnyIn
16 | value:
17 | - CREATE
18 | - UPDATE
19 | mutate:
20 | patchesJson6902: |-
21 | - path: "/spec/containers/resources/limits
22 | op: Add
23 | value: "{{ multiply('{{ request.object.spec.containers[0].resources.requests.cpu || \'0\' }}', '5') }}"
24 | - path: "/spec/containers/resources/limits/memory"
25 | op: Add
26 | value: "{{ multiply('{{ request.object.spec.containers[0].resources.requests.memory || \'0\' }}', '5') }}"
27 |
--------------------------------------------------------------------------------
/ch16/code/some-cluster-role.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: some-cluster-role
5 | rules:
6 | - apiGroups:
7 | - '*'
8 | resources:
9 | - Service
10 | verbs:
11 | - '*'
12 | ---
13 | apiVersion: rbac.authorization.k8s.io/v1
14 | kind: ClusterRoleBinding
15 | metadata:
16 | name: some-cluster-role-binding
17 | roleRef:
18 | apiGroup: rbac.authorization.k8s.io
19 | kind: ClusterRole
20 | name: some-cluster-role
21 | subjects:
22 | - apiGroup: rbac.authorization.k8s.io
23 | kind: Group
24 | name: system:masters
25 |
--------------------------------------------------------------------------------
/ch16/code/some-pod-with-only-resource-requests.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: some-pod-with-only-resource-requests
5 | spec:
6 | containers:
7 | - name: some-container
8 | image: g1g1/py-kube:latest
9 | resources:
10 | requests:
11 | cpu: "1"
12 | memory: 1Gi
13 | limits:
14 | cpu: "1"
15 | memory: 1Gi
16 | command:
17 | - sleep
18 | - "9999"
19 |
--------------------------------------------------------------------------------
/ch16/code/some-pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: some-pod
5 | spec:
6 | containers:
7 | - name: some-container
8 | image: g1g1/py-kube:latest
9 | command:
10 | - sleep
11 | - "9999"
12 |
--------------------------------------------------------------------------------
/ch16/code/tests/kyverno-test.yaml:
--------------------------------------------------------------------------------
1 | name: test-some-rule
2 | policies:
3 | - ../disallow-some-services-policy.yaml # disallow names service-1 and service-2 and disallow namespace ns-1
4 | resources:
5 | - test-service-ok.yaml
6 | - test-service-bad-name.yaml # name is service-1
7 | - test-service-bad-namespace.yaml # namespace is ns-1
8 | results:
9 | - policy: disallow-some-services
10 | rule: some-rule
11 | resources:
12 | - service-ok
13 | result: skip
14 | - policy: disallow-some-services
15 | rule: some-rule
16 | resources:
17 | - service-1
18 | kind: Service
19 | result: fail
20 | - policy: disallow-some-services
21 | rule: some-rule
22 | resources:
23 | - service-in-ns-1
24 | kind: Service
25 | namespace: ns-1
26 | result: fail
27 |
--------------------------------------------------------------------------------
/ch16/code/tests/test-service-bad-name.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app: service-1
6 | name: service-1
7 | namespace: ns-2
8 | spec:
9 | ports:
10 | - name: https
11 | port: 443
12 | targetPort: https
13 | selector:
14 | app: some-app
15 |
--------------------------------------------------------------------------------
/ch16/code/tests/test-service-bad-namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app: some-app
6 | name: service-in-ns-1
7 | namespace: ns-1
8 | spec:
9 | ports:
10 | - name: https
11 | port: 443
12 | targetPort: https
13 | selector:
14 | app: some-app
15 |
--------------------------------------------------------------------------------
/ch16/code/tests/test-service-ok.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app: some-app
6 | name: service-ok
7 | namespace: ns-2
8 | spec:
9 | ports:
10 | - name: https
11 | port: 443
12 | targetPort: https
13 | selector:
14 | app: some-app
15 |
--------------------------------------------------------------------------------
/ch17/code/cluster-config.yaml:
--------------------------------------------------------------------------------
1 | kind: Cluster
2 | apiVersion: kind.x-k8s.io/v1alpha4
3 | name: trouble
4 | nodes:
5 | - role: control-plane
6 | featureGates:
7 | "PodSchedulingReadiness": true
--------------------------------------------------------------------------------
/ch17/code/deployment-bad-readiness-probe.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: bad-readiness-probe
5 | namespace: trouble
6 | labels:
7 | app: bad-readiness-probe
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | app: bad-readiness-probe
13 | template:
14 | metadata:
15 | name: bad-readiness-probe
16 | labels:
17 | app: bad-readiness-probe
18 | spec:
19 | containers:
20 | - name: pause
21 | image: registry.k8s.io/pause:3.8
22 | readinessProbe:
23 | httpGet:
24 | path: /
25 | port: 80
26 | failureThreshold: 3
27 | periodSeconds: 10
28 | initialDelaySeconds: 5
29 | timeoutSeconds: 2
30 |
--------------------------------------------------------------------------------
/ch17/code/deployment-bad-startup-probe.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: bad-startup-probe
5 | namespace: trouble
6 | labels:
7 | app: bad-startup-probe
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | app: bad-startup-probe
13 | template:
14 | metadata:
15 | name: bad-startup-probe
16 | labels:
17 | app: bad-startup-probe
18 | spec:
19 | containers:
20 | - name: pause
21 | image: registry.k8s.io/pause:3.8
22 | startupProbe:
23 | httpGet:
24 | path: /
25 | port: 80
26 | failureThreshold: 3
27 | periodSeconds: 10
28 | initialDelaySeconds: 5
29 | timeoutSeconds: 2
30 |
--------------------------------------------------------------------------------
/ch17/code/deployment-err-image-pull.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: bad-image-deployment
5 | namespace: trouble
6 | labels:
7 | app: the-app
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | app: the-app
13 | template:
14 | metadata:
15 | labels:
16 | app: the-app
17 | spec:
18 | containers:
19 | - name: no-such-image
20 | image: no-such-image:6.6.6
21 |
--------------------------------------------------------------------------------
/ch17/code/deployment-hung-init-container.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: infinite-init
5 | namespace: trouble
6 | labels:
7 | app: infinite-init
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | app: infinite-init
13 | template:
14 | metadata:
15 | name: infinite-init
16 | labels:
17 | app: infinite-init
18 | spec:
19 | initContainers:
20 | - name: pause
21 | image: registry.k8s.io/pause:3.8
22 | containers:
23 | - name: pause
24 | image: registry.k8s.io/pause:3.8
25 |
26 |
--------------------------------------------------------------------------------
/ch17/code/deployment-pending-namespace-quota.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: some-deployment
5 | namespace: trouble
6 | labels:
7 | app: the-app
8 | spec:
9 | replicas: 3
10 | selector:
11 | matchLabels:
12 | app: the-app
13 | template:
14 | metadata:
15 | labels:
16 | app: the-app
17 | spec:
18 | containers:
19 | - name: pause
20 | image: registry.k8s.io/pause:3.8
21 | resources:
22 | requests:
23 | cpu: "0.5"
24 |
--------------------------------------------------------------------------------
/ch17/code/deployment-pending-pod-requests.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: some-deployment
5 | namespace: trouble
6 | labels:
7 | app: the-app
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | app: the-app
13 | template:
14 | metadata:
15 | labels:
16 | app: the-app
17 | spec:
18 | containers:
19 | - name: pause
20 | image: registry.k8s.io/pause:3.8
21 | resources:
22 | requests:
23 | cpu: "666"
24 | memory: 1Gi
25 |
26 |
--------------------------------------------------------------------------------
/ch17/code/deployment-scheduling-gated.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: some-deployment
5 | namespace: trouble
6 | labels:
7 | app: the-app
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | app: the-app
13 | template:
14 | metadata:
15 | name: some-deployment
16 | labels:
17 | app: the-app
18 | spec:
19 | schedulingGates:
20 | - name: no-schedule-yet
21 | containers:
22 | - name: pause
23 | image: registry.k8s.io/pause:3.8
24 |
25 |
--------------------------------------------------------------------------------
/ch17/code/hpa.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: autoscaling/v2beta2
2 | kind: HorizontalPodAutoscaler
3 | metadata:
4 | name: kyverno
5 | namespace: kyverno
6 | spec:
7 | maxReplicas: 3
8 | metrics:
9 | - resource:
10 | name: cpu
11 | target:
12 | averageUtilization: 80
13 | type: Utilization
14 | type: Resource
15 | minReplicas: 1
16 | scaleTargetRef:
17 | apiVersion: apps/v1
18 | kind: Deployment
19 | name: kyverno
20 |
--------------------------------------------------------------------------------
/ch17/code/hpa_converted.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: autoscaling/v1
2 | kind: HorizontalPodAutoscaler
3 | metadata:
4 | creationTimestamp: null
5 | name: kyverno
6 | namespace: kyverno
7 | spec:
8 | maxReplicas: 3
9 | minReplicas: 1
10 | scaleTargetRef:
11 | apiVersion: apps/v1
12 | kind: Deployment
13 | name: kyverno
14 | targetCPUUtilizationPercentage: 80
15 | status:
16 | currentReplicas: 0
17 | desiredReplicas: 0
18 |
--------------------------------------------------------------------------------
/ch17/code/pod-run-container-error.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: run-container-error
5 | namespace: trouble
6 | spec:
7 | containers:
8 | - name: run-container-error
9 | image: bash
10 | command:
11 | - exit
12 | - "1"
13 |
--------------------------------------------------------------------------------
/ch17/code/trouble-ns-quota.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ResourceQuota
3 | metadata:
4 | name: cpu-requests
5 | spec:
6 | hard:
7 | requests.cpu: "1"
--------------------------------------------------------------------------------
/ch2/code/kind-ha-multi-node-config.yaml:
--------------------------------------------------------------------------------
1 | kind: Cluster
2 | apiVersion: kind.x-k8s.io/v1alpha4
3 | name: ha-multi-node-cluster
4 | nodes:
5 | - role: control-plane
6 | - role: control-plane
7 | - role: control-plane
8 | - role: worker
9 | - role: worker
10 |
--------------------------------------------------------------------------------
/ch2/code/kind-multi-node-config.yaml:
--------------------------------------------------------------------------------
1 | kind: Cluster
2 | apiVersion: kind.x-k8s.io/v1alpha4
3 | name: multi-node-cluster
4 | nodes:
5 | - role: control-plane
6 | - role: worker
7 | - role: worker
8 |
--------------------------------------------------------------------------------
/ch3/code/eks-cluster-autoscaler.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | labels:
5 | k8s-addon: cluster-autoscaler.addons.k8s.io
6 | k8s-app: cluster-autoscaler
7 | name: cluster-autoscaler
8 | namespace: kube-system
9 | ---
10 | apiVersion: rbac.authorization.k8s.io/v1beta1
11 | kind: ClusterRole
12 | metadata:
13 | name: cluster-autoscaler
14 | labels:
15 | k8s-addon: cluster-autoscaler.addons.k8s.io
16 | k8s-app: cluster-autoscaler
17 | rules:
18 | - apiGroups: [""]
19 | resources: ["events","endpoints"]
20 | verbs: ["create", "patch"]
21 | - apiGroups: [""]
22 | resources: ["pods/eviction"]
23 | verbs: ["create"]
24 | - apiGroups: [""]
25 | resources: ["pods/status"]
26 | verbs: ["update"]
27 | - apiGroups: [""]
28 | resources: ["endpoints"]
29 | resourceNames: ["cluster-autoscaler"]
30 | verbs: ["get","update"]
31 | - apiGroups: [""]
32 | resources: ["nodes"]
33 | verbs: ["watch","list","get","update"]
34 | - apiGroups: [""]
35 | resources: ["pods","services","replicationcontrollers","persistentvolumeclaims","persistentvolumes"]
36 | verbs: ["watch","list","get"]
37 | - apiGroups: ["extensions"]
38 | resources: ["replicasets","daemonsets"]
39 | verbs: ["watch","list","get"]
40 | - apiGroups: ["policy"]
41 | resources: ["poddisruptionbudgets"]
42 | verbs: ["watch","list"]
43 | - apiGroups: ["apps"]
44 | resources: ["statefulsets"]
45 | verbs: ["watch","list","get"]
46 | - apiGroups: ["storage.k8s.io"]
47 | resources: ["storageclasses"]
48 | verbs: ["watch","list","get"]
49 |
50 | ---
51 | apiVersion: rbac.authorization.k8s.io/v1beta1
52 | kind: Role
53 | metadata:
54 | name: cluster-autoscaler
55 | namespace: kube-system
56 | labels:
57 | k8s-addon: cluster-autoscaler.addons.k8s.io
58 | k8s-app: cluster-autoscaler
59 | rules:
60 | - apiGroups: [""]
61 | resources: ["configmaps"]
62 | verbs: ["create"]
63 | - apiGroups: [""]
64 | resources: ["configmaps"]
65 | resourceNames: ["cluster-autoscaler-status"]
66 | verbs: ["delete","get","update"]
67 |
68 | ---
69 | apiVersion: rbac.authorization.k8s.io/v1beta1
70 | kind: ClusterRoleBinding
71 | metadata:
72 | name: cluster-autoscaler
73 | labels:
74 | k8s-addon: cluster-autoscaler.addons.k8s.io
75 | k8s-app: cluster-autoscaler
76 | roleRef:
77 | apiGroup: rbac.authorization.k8s.io
78 | kind: ClusterRole
79 | name: cluster-autoscaler
80 | subjects:
81 | - kind: ServiceAccount
82 | name: cluster-autoscaler
83 | namespace: kube-system
84 |
85 | ---
86 | apiVersion: rbac.authorization.k8s.io/v1beta1
87 | kind: RoleBinding
88 | metadata:
89 | name: cluster-autoscaler
90 | namespace: kube-system
91 | labels:
92 | k8s-addon: cluster-autoscaler.addons.k8s.io
93 | k8s-app: cluster-autoscaler
94 | roleRef:
95 | apiGroup: rbac.authorization.k8s.io
96 | kind: Role
97 | name: cluster-autoscaler
98 | subjects:
99 | - kind: ServiceAccount
100 | name: cluster-autoscaler
101 | namespace: kube-system
102 |
103 | ---
104 | apiVersion: extensions/v1beta1
105 | kind: Deployment
106 | metadata:
107 | name: cluster-autoscaler
108 | namespace: kube-system
109 | labels:
110 | app: cluster-autoscaler
111 | spec:
112 | replicas: 1
113 | selector:
114 | matchLabels:
115 | app: cluster-autoscaler
116 | template:
117 | metadata:
118 | labels:
119 | app: cluster-autoscaler
120 | spec:
121 | serviceAccountName: cluster-autoscaler
122 | containers:
123 | - image: k8s.gcr.io/cluster-autoscaler:v1.2.2
124 | name: cluster-autoscaler
125 | resources:
126 | limits:
127 | cpu: 100m
128 | memory: 300Mi
129 | requests:
130 | cpu: 100m
131 | memory: 300Mi
132 | command:
133 | - ./cluster-autoscaler
134 | - --v=4
135 | - --stderrthreshold=info
136 | - --cloud-provider=aws
137 | - --skip-nodes-with-local-storage=false
138 | - --nodes=2:5:eksctl-project-nodegroup-ng-name-NodeGroup-suffix
139 | env:
140 | - name: AWS_REGION
141 | value: us-east-1
142 | volumeMounts:
143 | - name: ssl-certs
144 | mountPath: /etc/ssl/certs/ca-certificates.crt
145 | readOnly: true
146 | imagePullPolicy: "Always"
147 | volumes:
148 | - name: ssl-certs
149 | hostPath:
150 | path: "/etc/ssl/certs/ca-bundle.crt"
--------------------------------------------------------------------------------
/ch3/code/etcd-cluster.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: "etcd.database.coreos.com/v1beta2"
2 | kind: "EtcdCluster"
3 | metadata:
4 | name: "etcd-cluster"
5 | spec:
6 | size: 3
7 | version: "3.2.13"
8 |
--------------------------------------------------------------------------------
/ch3/code/helm-rbac.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: tiller
5 | namespace: kube-system
6 | ---
7 | apiVersion: rbac.authorization.k8s.io/v1beta1
8 | kind: ClusterRoleBinding
9 | metadata:
10 | name: tiller
11 | roleRef:
12 | apiGroup: rbac.authorization.k8s.io
13 | kind: ClusterRole
14 | name: cluster-admin
15 | subjects:
16 | - kind: ServiceAccount
17 | name: tiller
18 | namespace: kube-system
--------------------------------------------------------------------------------
/ch3/code/nginx-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: nginx-deployment
5 | labels:
6 | app: nginx
7 | spec:
8 | replicas: 3
9 | selector:
10 | matchLabels:
11 | app: nginx
12 | template:
13 | metadata:
14 | labels:
15 | app: nginx
16 | spec:
17 | containers:
18 | - name: nginx
19 | image: nginx:1.7.9
20 | ports:
21 | - containerPort: 80
22 |
--------------------------------------------------------------------------------
/ch4/code/cool-pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: cool-pod
5 | namespace: the-namespace
6 | spec:
7 | containers:
8 | - name: cool-container
9 | image: cool/app:v1
10 | imagePullSecrets:
11 | - name: the-registry-secret
12 |
--------------------------------------------------------------------------------
/ch4/code/custom-namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: custom-namespace
5 |
--------------------------------------------------------------------------------
/ch4/code/custom-service-account.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: custom-service-account
5 |
--------------------------------------------------------------------------------
/ch4/code/deny-all-network-policy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | name: deny-all
5 | spec:
6 | podSelector: {}
7 | policyTypes:
8 | - Ingress
9 | - Egress
--------------------------------------------------------------------------------
/ch4/code/deny-egress-network-policy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: policy
3 | metadata:
4 | name: default-deny-egress
5 | spec:
6 | order: 999
7 | egress:
8 | - action: deny
9 | destination:
10 | net: 1.2.3.4
11 | source: {}
--------------------------------------------------------------------------------
/ch4/code/network-policy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: NetworkPolicy
3 | metadata:
4 | name: the-network-policy
5 | namespace: default
6 | spec:
7 | podSelector:
8 | matchLabels:
9 | role: db
10 | ingress:
11 | - from:
12 | - namespaceSelector:
13 | matchLabels:
14 | project: cool-project
15 | - podSelector:
16 | matchLabels:
17 | role: frontend
18 | ports:
19 | - protocol: TCP
20 | port: 6379
--------------------------------------------------------------------------------
/ch4/code/pod-with-secret.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: pod-with-secret
5 | spec:
6 | containers:
7 | - name: container-with-secret
8 | image: g1g1/py-kube:0.3
9 | command: ["/bin/bash", "-c", "while true ; do sleep 10 ; done"]
10 | volumeMounts:
11 | - name: secret-volume
12 | mountPath: "/mnt/hush-hush"
13 | readOnly: true
14 | volumes:
15 | - name: secret-volume
16 | secret:
17 | secretName: hush-hush
--------------------------------------------------------------------------------
/ch5/code/admin-user.yaml:
--------------------------------------------------------------------------------
1 | # See: https://github.com/kubernetes/dashboard/wiki/Creating-sample-user
2 | apiVersion: v1
3 | kind: ServiceAccount
4 | metadata:
5 | name: admin-user
6 | namespace: kube-system
7 | ---
8 | apiVersion: rbac.authorization.k8s.io/v1
9 | kind: ClusterRoleBinding
10 | metadata:
11 | name: admin-user
12 | roleRef:
13 | apiGroup: rbac.authorization.k8s.io
14 | kind: ClusterRole
15 | name: cluster-admin
16 | subjects:
17 | - kind: ServiceAccount
18 | name: admin-user
19 | namespace: kube-system
--------------------------------------------------------------------------------
/ch5/code/base/hue-learn.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: hue-learner
5 | labels:
6 | tier: internal-service
7 | spec:
8 | containers:
9 | - name: hue-learner
10 | image: g1g1/hue-learn:0.3
11 | resources:
12 | requests:
13 | cpu: 200m
14 | memory: 256Mi
15 | env:
16 | - name: DISCOVER_QUEUE
17 | value: dns
18 | - name: DISCOVER_STORE
19 | value: dns
20 |
--------------------------------------------------------------------------------
/ch5/code/base/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 | commonLabels:
4 | app: hue
5 |
6 | resources:
7 | - hue-learn.yaml
--------------------------------------------------------------------------------
/ch5/code/cron-job.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1
2 | kind: CronJob
3 | metadata:
4 | name: cron-demo
5 | spec:
6 | schedule: "*/1 * * * *"
7 | jobTemplate:
8 | spec:
9 | template:
10 | metadata:
11 | labels:
12 | cronjob-name: cron-demo
13 | spec:
14 | containers:
15 | - name: cron-demo
16 | image: g1g1/py-kube:0.3
17 | args:
18 | - python
19 | - -c
20 | - from datetime import datetime; print(f'[{datetime.now()}] CronJob demo here...remember to stretch')
21 | restartPolicy: OnFailure
22 |
--------------------------------------------------------------------------------
/ch5/code/factorial-job.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1
2 | kind: Job
3 | metadata:
4 | name: factorial5
5 | spec:
6 | template:
7 | metadata:
8 | name: factorial5
9 | spec:
10 | containers:
11 | - name: factorial5
12 | image: g1g1/py-kube:0.3
13 | command: ["python",
14 | "-c",
15 | "import math; print(math.factorial(5))"]
16 | restartPolicy: Never
17 |
--------------------------------------------------------------------------------
/ch5/code/hue-collect-proxy-ds.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: DaemonSet
3 | metadata:
4 | name: hue-collect-proxy
5 | labels:
6 | tier: stats
7 | app: hue-collect-proxy
8 | spec:
9 | selector:
10 | matchLabels:
11 | tier: stats
12 | app: hue-collect-proxy
13 | template:
14 | metadata:
15 | labels:
16 | tier: stats
17 | app: hue-collect-proxy
18 | spec:
19 | hostPID: true
20 | hostIPC: true
21 | hostNetwork: true
22 | containers:
23 | - name: hue-collect-proxy
24 | image: busybox
25 |
26 |
--------------------------------------------------------------------------------
/ch5/code/hue-finance-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: hue-finance
5 | labels:
6 | app: hue
7 | spec:
8 | replicas: 3
9 | selector:
10 | matchLabels:
11 | app: hue
12 | template:
13 | metadata:
14 | labels:
15 | app: hue
16 | spec:
17 | containers:
18 | - name: hue-finance
19 | image: g1g1/hue-learn:0.3
20 |
--------------------------------------------------------------------------------
/ch5/code/hue-fitness-pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: hue-fitness
5 | spec:
6 | containers:
7 | - name: hue-fitness
8 | image: busybox
9 | initContainers:
10 | - name: install
11 | image: busybox
12 |
--------------------------------------------------------------------------------
/ch5/code/hue-learn-deployment-0.4.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: hue-learn
5 | labels:
6 | app: hue
7 | spec:
8 | replicas: 3
9 | selector:
10 | matchLabels:
11 | app: hue
12 | template:
13 | metadata:
14 | labels:
15 | app: hue
16 | spec:
17 | containers:
18 | - name: hue-learner
19 | image: g1g1/hue-learn:0.4
20 | resources:
21 | requests:
22 | cpu: 200m
23 | memory: 256Mi
24 | env:
25 | - name: DISCOVER_QUEUE
26 | value: dns
27 | - name: DISCOVER_STORE
28 | value: dns
29 |
--------------------------------------------------------------------------------
/ch5/code/hue-learn-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: hue-learn
5 | labels:
6 | app: hue
7 | spec:
8 | replicas: 3
9 | selector:
10 | matchLabels:
11 | app: hue
12 | template:
13 | metadata:
14 | labels:
15 | app: hue
16 | spec:
17 | containers:
18 | - name: hue-learner
19 | image: g1g1/hue-learn:0.3
20 | resources:
21 | requests:
22 | cpu: 200m
23 | memory: 256Mi
24 | env:
25 | - name: DISCOVER_QUEUE
26 | value: dns
27 | - name: DISCOVER_STORE
28 | value: dns
29 |
--------------------------------------------------------------------------------
/ch5/code/hue-learn/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM busybox
2 |
3 | CMD ash -c "echo 'Started...'; while true ; do sleep 10 ; done"
--------------------------------------------------------------------------------
/ch5/code/hue-music-pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | labels:
5 | app: music
6 | service: music
7 | name: hue-music
8 | spec:
9 | containers:
10 | - name: hue-music
11 | image: busybox
12 | livenessProbe:
13 | httpGet:
14 | path: /pulse
15 | port: 8888
16 | httpHeaders:
17 | - name: X-Custom-Header
18 | value: ItsAlive
19 | initialDelaySeconds: 30
20 | timeoutSeconds: 1
21 |
--------------------------------------------------------------------------------
/ch5/code/hue-reminders-deployment-with-pod-affinity.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: hue-reminders
5 | spec:
6 | replicas: 2
7 | selector:
8 | matchLabels:
9 | app: hue
10 | service: reminders
11 | template:
12 | metadata:
13 | name: hue-reminders
14 | labels:
15 | app: hue
16 | service: reminders
17 | spec:
18 | affinity:
19 | podAffinity:
20 | requiredDuringSchedulingIgnoredDuringExecution:
21 | - labelSelector:
22 | matchExpressions:
23 | - key: role
24 | operator: In
25 | values:
26 | - "trouble-shooter"
27 | topologyKey: topology.kubernetes.io/zone # for clusters on cloud providers only
28 | containers:
29 | - name: hue-reminders
30 | image: g1g1/hue-reminders:3.0
31 | ports:
32 | - containerPort: 80
33 |
--------------------------------------------------------------------------------
/ch5/code/hue-reminders-deployment-with-spread-contraitns.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: hue-reminders
5 | spec:
6 | replicas: 3
7 | selector:
8 | matchLabels:
9 | app: hue
10 | service: reminders
11 | template:
12 | metadata:
13 | name: hue-reminders
14 | labels:
15 | app: hue
16 | service: reminders
17 | spec:
18 | topologySpreadConstraints:
19 | - maxSkew: 1
20 | topologyKey: node.kubernetes.io/instance-type
21 | whenUnsatisfiable: DoNotSchedule
22 | labelSelector:
23 | matchLabels:
24 | app: hue
25 | service: hue-reminders
26 | containers:
27 | - name: hue-reminders
28 | image: g1g1/hue-reminders:3.0
29 | ports:
30 | - containerPort: 80
31 |
--------------------------------------------------------------------------------
/ch5/code/hue-reminders-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: hue-reminders
5 | spec:
6 | replicas: 2
7 | selector:
8 | matchLabels:
9 | app: hue
10 | service: reminders
11 | template:
12 | metadata:
13 | name: hue-reminders
14 | labels:
15 | app: hue
16 | service: reminders
17 | spec:
18 | containers:
19 | - name: hue-reminders
20 | image: g1g1/hue-reminders:3.0
21 | ports:
22 | - containerPort: 80
23 |
--------------------------------------------------------------------------------
/ch5/code/hue-reminders-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: hue-reminders
5 | labels:
6 | app: hue
7 | service: reminders
8 | spec:
9 | ports:
10 | - port: 8080
11 | selector:
12 | app: hue
13 | service: reminders
14 |
--------------------------------------------------------------------------------
/ch5/code/hue-reminders/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM golang:1.12 AS builder
2 | ADD ./main.go main.go
3 |
4 | # Fetch dependencies
5 | RUN go get -d -v
6 | # Build image as a truly static Go binary
7 | RUN CGO_ENABLED=0 GOOS=linux go build -o /hue-reminders -a -tags netgo -ldflags '-s -w'
8 |
9 | FROM scratch
10 | MAINTAINER Gigi Sayfan
11 | COPY --from=builder /hue-reminders /hue-reminders
12 | EXPOSE 8080
13 | ENTRYPOINT ["/hue-reminders"]
14 |
15 |
--------------------------------------------------------------------------------
/ch5/code/hue-reminders/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "log"
6 | "net/http"
7 | "strings"
8 | )
9 |
10 | func handler(w http.ResponseWriter, r *http.Request) {
11 | reminders := []string{
12 | "Dentist appointment at 3pm",
13 | "Dinner at 7pm",
14 | }
15 | fmt.Fprint(w, strings.Join(reminders, "\n"), r.URL.Path[1:])
16 | }
17 |
18 | func main() {
19 | http.HandleFunc("/", handler)
20 | log.Println("hue-reminders is listening on port 8080...")
21 | log.Fatal(http.ListenAndServe(":8080", nil))
22 | }
23 |
--------------------------------------------------------------------------------
/ch5/code/overlays/production/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 | namespace: production
4 | commonLabels:
5 | environment: production
6 | bases:
7 | - ../../base
8 |
--------------------------------------------------------------------------------
/ch5/code/overlays/production/namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: production
--------------------------------------------------------------------------------
/ch5/code/overlays/staging/hue-learn-patch.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: hue-learner
5 | spec:
6 | containers:
7 | - name: hue-learner
8 | image: g1g1/hue-learn:0.4
9 |
--------------------------------------------------------------------------------
/ch5/code/overlays/staging/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 | namespace: staging
4 | commonLabels:
5 | environment: staging
6 | bases:
7 | - ../../base
8 |
9 | patchesStrategicMerge:
10 | - hue-learn-patch.yaml
11 |
12 | resources:
13 | - namespace.yaml
--------------------------------------------------------------------------------
/ch5/code/overlays/staging/namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: staging
--------------------------------------------------------------------------------
/ch5/code/parallel-job.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1
2 | kind: Job
3 | metadata:
4 | name: sleep20
5 | spec:
6 | completions: 3
7 | parallelism: 6
8 | template:
9 | metadata:
10 | name: sleep20
11 | spec:
12 | containers:
13 | - name: sleep20
14 | image: g1g1/py-kube:0.3
15 | command: ["python",
16 | "-c",
17 | "import time; print('started...');
18 | time.sleep(20); print('done.')"]
19 | restartPolicy: Never
--------------------------------------------------------------------------------
/ch5/code/restricted-namespace.yaml:
--------------------------------------------------------------------------------
1 | kind: Namespace
2 | apiVersion: v1
3 | metadata:
4 | name: restricted
5 | labels:
6 | name: restricted
7 |
--------------------------------------------------------------------------------
/ch5/code/test-labels.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: test-labels
5 | labels:
6 | app: test-labels
7 | env: staging
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | app: test-labels
13 | another: label
14 | template:
15 | metadata:
16 | name: test-labels
17 | labels:
18 | app: test-labels
19 | service: test-labels
20 | another: label
21 | spec:
22 | containers:
23 | - name: test-labels
24 | image: nginx:1.13
25 | command: ["bash"]
26 | args: ["-c", "echo started...; while true ; do sleep 1 ; done"]
27 |
--------------------------------------------------------------------------------
/ch5/code/trouble-deployment-us-central1.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: trouble-us-central1
5 | labels:
6 | app: trouble
7 | spec:
8 | replicas: 1
9 | selector:
10 | matchLabels:
11 | app: trouble
12 | template:
13 | metadata:
14 | labels:
15 | app: trouble
16 | region: us-central1
17 | spec:
18 | containers:
19 | - name: trouble
20 | image: nginx
21 |
22 |
--------------------------------------------------------------------------------
/ch5/code/trouble-deployment-us-west2.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: trouble-us-west2
5 | labels:
6 | app: trouble
7 | spec:
8 | replicas: 1
9 | selector:
10 | matchLabels:
11 | app: trouble
12 | template:
13 | metadata:
14 | labels:
15 | app: trouble
16 | region: us-west2
17 | spec:
18 | containers:
19 | - name: trouble
20 | image: nginx
21 |
22 |
--------------------------------------------------------------------------------
/ch5/code/trouble-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: trouble
5 | labels:
6 | app: trouble
7 | app2: trouble2
8 | spec:
9 | ports:
10 | - port: 8080
11 | selector:
12 | app: trouble
13 |
14 |
15 |
--------------------------------------------------------------------------------
/ch5/code/trouble-shooter.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: trouble-shooter
5 | labels:
6 | role: trouble-shooter
7 | spec:
8 | nodeSelector:
9 | kubernetes.io/hostname: k3d-k3s-default-agent-1
10 | containers:
11 | - name: trouble-shooter
12 | image: g1g1/py-kube:0.3
13 | command: ["bash"]
14 | args: ["-c", "echo started...; while true ; do sleep 1 ; done"]
15 |
16 |
--------------------------------------------------------------------------------
/ch6/code/dir-persistent-volume-claim.yaml:
--------------------------------------------------------------------------------
1 | kind: PersistentVolumeClaim
2 | apiVersion: v1
3 | metadata:
4 | name: dir-pvc
5 | spec:
6 | storageClassName: dir
7 | accessModes:
8 | - ReadWriteMany
9 | resources:
10 | requests:
11 | storage: 1Gi
12 |
--------------------------------------------------------------------------------
/ch6/code/dir-persistent-volume.yaml:
--------------------------------------------------------------------------------
1 | kind: PersistentVolume
2 | apiVersion: v1
3 | metadata:
4 | name: dir-pv
5 | spec:
6 | storageClassName: dir
7 | capacity:
8 | storage: 1Gi
9 | accessModes:
10 | - ReadWriteMany
11 | hostPath:
12 | path: "/tmp/data"
13 |
--------------------------------------------------------------------------------
/ch6/code/hue-global-listener/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM busybox
2 |
3 | CMD ash -c "echo 'Started...'; while true ; do sleep 10 ; done"
--------------------------------------------------------------------------------
/ch6/code/hue-global-listener/build.sh:
--------------------------------------------------------------------------------
1 | $ docker build . -t g1g1/hue-global-listener:1.0
--------------------------------------------------------------------------------
/ch6/code/hue-job-scheduler/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM busybox
2 |
3 | CMD ash -c "echo 'Started...'; while true ; do sleep 10 ; done"
--------------------------------------------------------------------------------
/ch6/code/hue-job-scheduler/build.sh:
--------------------------------------------------------------------------------
1 | $ docker build . -t g1g1/hue-job-scheduler:1.0
--------------------------------------------------------------------------------
/ch6/code/hue-scheduler-in-memory.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: hue-scheduler
5 | spec:
6 | containers:
7 | - image: g1g1/hue-global-listener:1.0
8 | name: hue-global-listener
9 | volumeMounts:
10 | - mountPath: /notifications
11 | name: shared-volume
12 | - image: g1g1/hue-job-scheduler:1.0
13 | name: hue-job-scheduler
14 | volumeMounts:
15 | - mountPath: /incoming
16 | name: shared-volume
17 | volumes:
18 | - name: shared-volume
19 | emptyDir: {}
20 |
--------------------------------------------------------------------------------
/ch6/code/hue-scheduler.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: hue-scheduler
5 | spec:
6 | containers:
7 | - image: g1g1/hue-global-listener:1.0
8 | name: hue-global-listener
9 | volumeMounts:
10 | - mountPath: /notifications
11 | name: shared-volume
12 | - image: g1g1/hue-job-scheduler:1.0
13 | name: hue-job-scheduler
14 | volumeMounts:
15 | - mountPath: /incoming
16 | name: shared-volume
17 | volumes:
18 | - name: shared-volume
19 | emptyDir: {}
20 |
--------------------------------------------------------------------------------
/ch6/code/local-persistent-volume-claim.yaml:
--------------------------------------------------------------------------------
1 | kind: PersistentVolumeClaim
2 | apiVersion: v1
3 | metadata:
4 | name: local-storage-claim
5 | spec:
6 | accessModes:
7 | - ReadWriteOnce
8 | resources:
9 | requests:
10 | storage: 8Gi
11 | storageClassName: local-storage
12 | selector:
13 | matchLabels:
14 | release: stable
15 | matchExpressions:
16 | - {key: capacity, operator: In, values: [8Gi, 10Gi]}
17 |
--------------------------------------------------------------------------------
/ch6/code/local-storage-class.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: storage.k8s.io/v1
2 | kind: StorageClass
3 | metadata:
4 | name: local-storage
5 | provisioner: kubernetes.io/no-provisioner
6 | volumeBindingMode: WaitForFirstConsumer
--------------------------------------------------------------------------------
/ch6/code/local-volume.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolume
3 | metadata:
4 | name: local-pv
5 | labels:
6 | release: stable
7 | capacity: 10Gi
8 | spec:
9 | capacity:
10 | storage: 10Gi
11 | volumeMode: Filesystem
12 | accessModes:
13 | - ReadWriteOnce
14 | persistentVolumeReclaimPolicy: Delete
15 | storageClassName: local-storage
16 | local:
17 | path: /mnt/disks/disk-1
18 | nodeAffinity:
19 | required:
20 | nodeSelectorTerms:
21 | - matchExpressions:
22 | - key: kubernetes.io/hostname
23 | operator: In
24 | values:
25 | - k3d-k3s-default-agent-1
26 |
--------------------------------------------------------------------------------
/ch6/code/pod-with-csi-ephemeral-volume.yaml:
--------------------------------------------------------------------------------
1 | kind: Pod
2 | apiVersion: v1
3 | metadata:
4 | name: the-pod
5 | spec:
6 | containers:
7 | - name: the-container
8 | image: g1g1/py-kube:0.3
9 | volumeMounts:
10 | - mountPath: "/data"
11 | name: the-volume
12 | command: [ "sleep", "1000000" ]
13 | volumes:
14 | - name: the-volume
15 | csi:
16 | driver: inline.storage.kubernetes.io
17 | volumeAttributes:
18 | key: value
--------------------------------------------------------------------------------
/ch6/code/pod-with-generic-ephemeral-volume.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: storage.k8s.io/v1
2 | kind: StorageClass
3 | metadata:
4 | name: generic-storage
5 | provisioner: kubernetes.io/no-provisioner
6 | reclaimPolicy: Delete
7 | volumeBindingMode: WaitForFirstConsumer
8 | ---
9 | kind: Pod
10 | apiVersion: v1
11 | metadata:
12 | name: the-pod
13 | spec:
14 | containers:
15 | - name: the-container
16 | image: g1g1/py-kube:0.3
17 | volumeMounts:
18 | - mountPath: "/data"
19 | name: the-volume
20 | command: [ "sleep", "1000000" ]
21 | volumes:
22 | - name: the-volume
23 | ephemeral:
24 | volumeClaimTemplate:
25 | metadata:
26 | labels:
27 | type: generic-ephemeral-volume
28 | spec:
29 | accessModes: [ "ReadWriteOnce" ]
30 | storageClassName: generic-storage
31 | resources:
32 | requests:
33 | storage: 1Gi
34 |
--------------------------------------------------------------------------------
/ch6/code/pod-with-local-claim.yaml:
--------------------------------------------------------------------------------
1 | kind: Pod
2 | apiVersion: v1
3 | metadata:
4 | name: the-pod
5 | spec:
6 | containers:
7 | - name: the-container
8 | image: g1g1/py-kube:0.3
9 | command: ["bash"]
10 | args: ["-c", "echo started...; while true ; do sleep 1 ; done"]
11 | volumeMounts:
12 | - mountPath: "/mnt/data"
13 | name: persistent-volume
14 | volumes:
15 | - name: persistent-volume
16 | persistentVolumeClaim:
17 | claimName: local-storage-claim
--------------------------------------------------------------------------------
/ch6/code/rook-cluster.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: ceph.rook.io/v1
2 | kind: CephCluster
3 | metadata:
4 | name: rook-ceph
5 | namespace: rook-ceph # namespace:cluster
6 | spec:
7 | cephVersion:
8 | image: quay.io/ceph/ceph:v17.2.5
9 | allowUnsupported: false
10 | dataDirHostPath: /var/lib/rook
11 | skipUpgradeChecks: false
12 | continueUpgradeAfterChecksEvenIfNotHealthy: false
13 | waitTimeoutForHealthyOSDInMinutes: 10
14 | mon:
15 | count: 3
16 | allowMultiplePerNode: false
17 | mgr:
18 | count: 2
19 | allowMultiplePerNode: false
20 | modules:
21 | - name: pg_autoscaler
22 | enabled: true
23 | dashboard:
24 | enabled: true
25 | ssl: true
26 | monitoring:
27 | enabled: false
28 | network:
29 | connections:
30 | encryption:
31 | enabled: false
32 | compression:
33 | enabled: false
34 | crashCollector:
35 | disable: false
36 | logCollector:
37 | enabled: true
38 | periodicity: daily # one of: hourly, daily, weekly, monthly
39 | maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M.
40 | cleanupPolicy:
41 | confirmation: ""
42 | sanitizeDisks:
43 | method: quick
44 | dataSource: zero
45 | iteration: 1
46 | allowUninstallWithVolumes: false
47 | annotations:
48 | labels:
49 | resources:
50 | removeOSDsIfOutAndSafeToRemove: false
51 | priorityClassNames:
52 | mon: system-node-critical
53 | osd: system-node-critical
54 | mgr: system-cluster-critical
55 | storage: # cluster level storage configuration and selection
56 | useAllNodes: true
57 | useAllDevices: true
58 | config:
59 | onlyApplyOSDPlacement: false
60 | disruptionManagement:
61 | managePodBudgets: true
62 | osdMaintenanceTimeout: 30
63 | pgHealthCheckTimeout: 0
64 | manageMachineDisruptionBudgets: false
65 | machineDisruptionBudgetNamespace: openshift-machine-api
66 |
67 | healthCheck:
68 | daemonHealth:
69 | mon:
70 | disabled: false
71 | interval: 45s
72 | osd:
73 | disabled: false
74 | interval: 60s
75 | status:
76 | disabled: false
77 | interval: 60s
78 | livenessProbe:
79 | mon:
80 | disabled: false
81 | mgr:
82 | disabled: false
83 | osd:
84 | disabled: false
85 | startupProbe:
86 | mon:
87 | disabled: false
88 | mgr:
89 | disabled: false
90 | osd:
91 | disabled: false
--------------------------------------------------------------------------------
/ch6/code/shell-pod.yaml:
--------------------------------------------------------------------------------
1 | kind: Pod
2 | apiVersion: v1
3 | metadata:
4 | name: just-a-shell
5 | labels:
6 | name: just-a-shell
7 | spec:
8 | containers:
9 | - name: a-shell
10 | image: g1g1/py-kube:0.3
11 | command: ["sleep", "10000"]
12 | volumeMounts:
13 | - mountPath: "/data"
14 | name: pv
15 | - name: another-shell
16 | image: g1g1/py-kube:0.3
17 | command: ["sleep", "10000"]
18 | volumeMounts:
19 | - mountPath: "/another-data"
20 | name: pv
21 | volumes:
22 | - name: pv
23 | persistentVolumeClaim:
24 | claimName: dir-pvc
25 |
--------------------------------------------------------------------------------
/ch6/code/shell-pod2.yaml:
--------------------------------------------------------------------------------
1 | kind: Pod
2 | apiVersion: v1
3 | metadata:
4 | name: just-a-shell
5 | labels:
6 | name: just-a-shell
7 | spec:
8 | containers:
9 | - name: a-shell
10 | image: g1g1/py-kube:0.2
11 | command: ["/bin/bash", "-c", "while true ; do sleep 10 ; done"]
12 | - name: another-shell
13 | image: g1g1/py-kube:0.2
14 | command: ["/bin/bash", "-c", "while true ; do sleep 10 ; done"]
15 |
--------------------------------------------------------------------------------
/ch6/code/some-persistent-volume-claim.yaml:
--------------------------------------------------------------------------------
1 | kind: PersistentVolumeClaim
2 | apiVersion: v1
3 | metadata:
4 | name: some-pvc
5 | spec:
6 | accessModes:
7 | - ReadWriteMany
8 | resources:
9 | requests:
10 | storage: 1Gi
11 |
--------------------------------------------------------------------------------
/ch7/code/cassandra-headless-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app: cassandra
6 | name: cassandra
7 | spec:
8 | clusterIP: None
9 | ports:
10 | - port: 9042
11 | selector:
12 | app: Cassandra
13 |
--------------------------------------------------------------------------------
/ch7/code/cassandra-stateful-set.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: StatefulSet
3 | metadata:
4 | name: cassandra
5 | labels:
6 | app: cassandra
7 | spec:
8 | serviceName: cassandra
9 | replicas: 3
10 | selector:
11 | matchLabels:
12 | app: cassandra
13 | template:
14 | metadata:
15 | labels:
16 | app: cassandra
17 | spec:
18 | terminationGracePeriodSeconds: 1800
19 | containers:
20 | - name: cassandra
21 | image: gcr.io/google-samples/cassandra:v14
22 | imagePullPolicy: Always
23 | ports:
24 | - containerPort: 7000
25 | name: intra-node
26 | - containerPort: 7001
27 | name: tls-intra-node
28 | - containerPort: 7199
29 | name: jmx
30 | - containerPort: 9042
31 | name: cql
32 | resources:
33 | limits:
34 | cpu: "500m"
35 | memory: 1Gi
36 | requests:
37 | cpu: "500m"
38 | memory: 1Gi
39 | securityContext:
40 | capabilities:
41 | add:
42 | - IPC_LOCK
43 | lifecycle:
44 | preStop:
45 | exec:
46 | command:
47 | - /bin/sh
48 | - -c
49 | - nodetool drain
50 | env:
51 | - name: MAX_HEAP_SIZE
52 | value: 512M
53 | - name: HEAP_NEWSIZE
54 | value: 100M
55 | - name: CASSANDRA_SEEDS
56 | value: "cassandra-0.cassandra.default.svc.cluster.local"
57 | - name: CASSANDRA_CLUSTER_NAME
58 | value: "K8Demo"
59 | - name: CASSANDRA_DC
60 | value: "DC1-K8Demo"
61 | - name: CASSANDRA_RACK
62 | value: "Rack1-K8Demo"
63 | - name: CASSANDRA_SEED_PROVIDER
64 | value: io.k8s.cassandra.KubernetesSeedProvider
65 | - name: POD_IP
66 | valueFrom:
67 | fieldRef:
68 | fieldPath: status.podIP
69 | readinessProbe:
70 | exec:
71 | command:
72 | - /bin/bash
73 | - -c
74 | - /ready-probe.sh
75 | initialDelaySeconds: 15
76 | timeoutSeconds: 5
77 | volumeMounts:
78 | - name: cassandra-data
79 | mountPath: /var/lib/cassandra
80 | volumeClaimTemplates:
81 | - metadata:
82 | name: cassandra-data
83 | annotations:
84 | volume.beta.kubernetes.io/storage-class: fast
85 | spec:
86 | accessModes: [ "ReadWriteOnce" ]
87 | resources:
88 | requests:
89 | storage: 1Gi
90 |
--------------------------------------------------------------------------------
/ch7/code/db-config-map.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: db-config
5 | data:
6 | db-ip-addresses: 1.2.3.4,5.6.7.8
7 |
--------------------------------------------------------------------------------
/ch7/code/nginx-headless-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: nginx
5 | labels:
6 | app: nginx
7 | spec:
8 | ports:
9 | - port: 80
10 | name: web
11 | clusterIP: None
12 | selector:
13 | app: nginx
14 |
--------------------------------------------------------------------------------
/ch7/code/nginx-stateful-set.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: StatefulSet
3 | metadata:
4 | name: nginx
5 | labels:
6 | app: nginx
7 | spec:
8 | serviceName: nginx
9 | replicas: 3
10 | selector:
11 | matchLabels:
12 | app: nginx
13 | template:
14 | metadata:
15 | labels:
16 | app: nginx
17 | spec:
18 | terminationGracePeriodSeconds: 1800
19 | containers:
20 | - name: nginx
21 | image: nginx
22 | imagePullPolicy: Always
23 | ports:
24 | - containerPort: 80
25 | name: web
26 | volumeMounts:
27 | - name: www
28 | mountPath: /usr/share/nginx/html
29 | volumeClaimTemplates:
30 | - metadata:
31 | name: www
32 | spec:
33 | accessModes: [ "ReadWriteOnce" ]
34 | resources:
35 | requests:
36 | storage: 10Mi
37 |
--------------------------------------------------------------------------------
/ch7/code/pod-with-db.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: some-pod
5 | spec:
6 | containers:
7 | - name: some-container
8 | image: busybox
9 | command: ["/bin/sh", "-c", "env"]
10 | env:
11 | - name: DB_IP_ADDRESSES
12 | valueFrom:
13 | configMapKeyRef:
14 | name: db-config
15 | key: db-ip-addresses
16 | restartPolicy: Never
17 |
--------------------------------------------------------------------------------
/ch8/code/bash-loop-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: bash-loop
5 | spec:
6 | replicas: 3
7 | selector:
8 | matchLabels:
9 | name: bash-loop
10 | template:
11 | metadata:
12 | labels:
13 | name: bash-loop
14 | spec:
15 | containers:
16 | - name: bash-loop
17 | image: g1g1/py-kube:0.3
18 | resources:
19 | requests:
20 | cpu: 100m
21 | command: ["/bin/bash", "-c", "while true; do sleep 10; done"]
22 |
--------------------------------------------------------------------------------
/ch8/code/compute-quota.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ResourceQuota
3 | metadata:
4 | name: compute-quota
5 | spec:
6 | hard:
7 | pods: 2
8 | requests.cpu: 10
9 | requests.memory: 200Mi
10 | limits.cpu: 20
11 | limits.memory: 2Gi
12 |
--------------------------------------------------------------------------------
/ch8/code/hue-reminders-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: hue-reminders
5 | spec:
6 | replicas: 2
7 | selector:
8 | matchLabels:
9 | app: hue
10 | service: reminders
11 | template:
12 | metadata:
13 | name: hue-reminders
14 | labels:
15 | app: hue
16 | service: reminders
17 | spec:
18 | containers:
19 | - name: hue-reminders
20 | image: g1g1/hue-reminders:2.2
21 | resources:
22 | requests:
23 | cpu: 100m
24 | ports:
25 | - containerPort: 80
26 |
--------------------------------------------------------------------------------
/ch8/code/hue-reminders-hpa.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: autoscaling/v2
2 | kind: HorizontalPodAutoscaler
3 | metadata:
4 | name: hue-reminders
5 | spec:
6 | maxReplicas: 15
7 | minReplicas: 10
8 | targetCPUUtilizationPercentage: 90
9 | scaleTargetRef:
10 | apiVersion: apps/v1
11 | kind: Deployment
12 | name: hue-reminders
--------------------------------------------------------------------------------
/ch8/code/limits.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: LimitRange
3 | metadata:
4 | name: limits
5 | spec:
6 | limits:
7 | - default:
8 | cpu: 400m
9 | memory: 50Mi
10 | defaultRequest:
11 | cpu: 400m
12 | memory: 50Mi
13 | type: Container
14 |
15 |
--------------------------------------------------------------------------------
/ch8/code/nginx-deployment-with-resources.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: nginx
5 | spec:
6 | replicas: 2
7 | selector:
8 | matchLabels:
9 | run: nginx
10 | template:
11 | metadata:
12 | labels:
13 | run: nginx
14 | spec:
15 | containers:
16 | - name: nginx
17 | image: nginx
18 | resources:
19 | requests:
20 | cpu: 400m
21 | memory: 60Mi
22 | limits:
23 | cpu: 400m
24 | memory: 60Mi
25 | ports:
26 | - containerPort: 80
27 |
--------------------------------------------------------------------------------
/ch8/code/nginx-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: nginx
5 | spec:
6 | replicas: 3
7 | selector:
8 | matchLabels:
9 | run: nginx
10 | template:
11 | metadata:
12 | labels:
13 | run: nginx
14 | spec:
15 | containers:
16 | - name: nginx
17 | image: nginx
18 | resources:
19 | requests:
20 | cpu: 400m
21 | ports:
22 | - containerPort: 80
23 |
--------------------------------------------------------------------------------
/ch8/code/nginx-hpa.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: autoscaling/v2
2 | kind: HorizontalPodAutoscaler
3 | metadata:
4 | name: nginx
5 | spec:
6 | maxReplicas: 4
7 | minReplicas: 2
8 | scaleTargetRef:
9 | apiVersion: apps/v1
10 | kind: Deployment
11 | name: nginx
12 | metrics:
13 | - type: Resource
14 | resource:
15 | name: cpu
16 | target:
17 | type: Utilization
18 | averageUtilization: 90
19 |
--------------------------------------------------------------------------------
/ch8/code/object-count-quota.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ResourceQuota
3 | metadata:
4 | name: object-counts-quota
5 | spec:
6 | hard:
7 | count/configmaps: 10
8 | count/persistentvolumeclaims: 4
9 | count/jobs.batch: 20
10 | count/secrets: 3
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/ch8/code/priority-class.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: scheduling.k8s.io/v1
2 | kind: PriorityClass
3 | metadata:
4 | name: high-priority-no-preempt
5 | value: 1000000
6 | preemptionPolicy: Never
7 | globalDefault: false
8 | description: "Don't preempt other pods."
--------------------------------------------------------------------------------
/ch9/code/cool-chart/.helmignore:
--------------------------------------------------------------------------------
1 | # Patterns to ignore when building packages.
2 | # This supports shell glob matching, relative path matching, and
3 | # negation (prefixed with !). Only one pattern per line.
4 | .DS_Store
5 | # Common VCS dirs
6 | .git/
7 | .gitignore
8 | .bzr/
9 | .bzrignore
10 | .hg/
11 | .hgignore
12 | .svn/
13 | # Common backup files
14 | *.swp
15 | *.bak
16 | *.tmp
17 | *.orig
18 | *~
19 | # Various IDEs
20 | .project
21 | .idea/
22 | *.tmproj
23 | .vscode/
24 |
--------------------------------------------------------------------------------
/ch9/code/cool-chart/Chart.lock:
--------------------------------------------------------------------------------
1 | dependencies:
2 | - name: kube-state-metrics
3 | repository: https://prometheus-community.github.io/helm-charts
4 | version: 4.13.0
5 | digest: sha256:1a415d114693cb32b88360f46f5cf224ce5feaf84d1f6861e15f5533ce3dc534
6 | generated: "2022-08-08T00:04:01.014988-07:00"
7 |
--------------------------------------------------------------------------------
/ch9/code/cool-chart/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | name: cool-chart
3 | description: A Helm chart for Kubernetes
4 |
5 | # A chart can be either an 'application' or a 'library' chart.
6 | #
7 | # Application charts are a collection of templates that can be packaged into versioned archives
8 | # to be deployed.
9 | #
10 | # Library charts provide useful utilities or functions for the chart developer. They're included as
11 | # a dependency of application charts to inject those utilities and functions into the rendering
12 | # pipeline. Library charts do not define any templates and therefore cannot be deployed.
13 | type: application
14 |
15 | # This is the chart version. This version number should be incremented each time you make changes
16 | # to the chart and its templates, including the app version.
17 | # Versions are expected to follow Semantic Versioning (https://semver.org/)
18 | version: 0.1.0
19 |
20 | # This is the version number of the application being deployed. This version number should be
21 | # incremented each time you make changes to the application. Versions are not expected to
22 | # follow Semantic Versioning. They should reflect the version the application is using.
23 | # It is recommended to use it with quotes.
24 | appVersion: "1.16.0"
25 |
26 | dependencies:
27 | - name: kube-state-metrics
28 | version: "4.13.*"
29 | repository: https://prometheus-community.github.io/helm-charts
30 | condition: kubeStateMetrics.enabled
31 |
--------------------------------------------------------------------------------
/ch9/code/cool-chart/templates/NOTES.txt:
--------------------------------------------------------------------------------
1 | 1. Get the application URL by running these commands:
2 | {{- if .Values.ingress.enabled }}
3 | {{- range $host := .Values.ingress.hosts }}
4 | {{- range .paths }}
5 | http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
6 | {{- end }}
7 | {{- end }}
8 | {{- else if contains "NodePort" .Values.service.type }}
9 | export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "cool-chart.fullname" . }})
10 | export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
11 | echo http://$NODE_IP:$NODE_PORT
12 | {{- else if contains "LoadBalancer" .Values.service.type }}
13 | NOTE: It may take a few minutes for the LoadBalancer IP to be available.
14 | You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "cool-chart.fullname" . }}'
15 | export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "cool-chart.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
16 | echo http://$SERVICE_IP:{{ .Values.service.port }}
17 | {{- else if contains "ClusterIP" .Values.service.type }}
18 | export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "cool-chart.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
19 | export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
20 | echo "Visit http://127.0.0.1:8080 to use your application"
21 | kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
22 | {{- end }}
23 |
--------------------------------------------------------------------------------
/ch9/code/cool-chart/templates/_helpers.tpl:
--------------------------------------------------------------------------------
1 | {{/*
2 | Expand the name of the chart.
3 | */}}
4 | {{- define "cool-chart.name" -}}
5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
6 | {{- end }}
7 |
8 | {{/*
9 | Create a default fully qualified app name.
10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
11 | If release name contains chart name it will be used as a full name.
12 | */}}
13 | {{- define "cool-chart.fullname" -}}
14 | {{- if .Values.fullnameOverride }}
15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
16 | {{- else }}
17 | {{- $name := default .Chart.Name .Values.nameOverride }}
18 | {{- if contains $name .Release.Name }}
19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }}
20 | {{- else }}
21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
22 | {{- end }}
23 | {{- end }}
24 | {{- end }}
25 |
26 | {{/*
27 | Create chart name and version as used by the chart label.
28 | */}}
29 | {{- define "cool-chart.chart" -}}
30 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
31 | {{- end }}
32 |
33 | {{/*
34 | Common labels
35 | */}}
36 | {{- define "cool-chart.labels" -}}
37 | helm.sh/chart: {{ include "cool-chart.chart" . }}
38 | {{ include "cool-chart.selectorLabels" . }}
39 | {{- if .Chart.AppVersion }}
40 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
41 | {{- end }}
42 | app.kubernetes.io/managed-by: {{ .Release.Service }}
43 | {{- end }}
44 |
45 | {{/*
46 | Selector labels
47 | */}}
48 | {{- define "cool-chart.selectorLabels" -}}
49 | app.kubernetes.io/name: {{ include "cool-chart.name" . }}
50 | app.kubernetes.io/instance: {{ .Release.Name }}
51 | {{- end }}
52 |
53 | {{/*
54 | Create the name of the service account to use
55 | */}}
56 | {{- define "cool-chart.serviceAccountName" -}}
57 | {{- if .Values.serviceAccount.create }}
58 | {{- default (include "cool-chart.fullname" .) .Values.serviceAccount.name }}
59 | {{- else }}
60 | {{- default "default" .Values.serviceAccount.name }}
61 | {{- end }}
62 | {{- end }}
63 |
--------------------------------------------------------------------------------
/ch9/code/cool-chart/templates/deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: {{ include "cool-chart.fullname" . }}
5 | labels:
6 | {{- include "cool-chart.labels" . | nindent 4 }}
7 | spec:
8 | {{- if not .Values.autoscaling.enabled }}
9 | replicas: {{ .Values.replicaCount }}
10 | {{- end }}
11 | selector:
12 | matchLabels:
13 | {{- include "cool-chart.selectorLabels" . | nindent 6 }}
14 | template:
15 | metadata:
16 | {{- with .Values.podAnnotations }}
17 | annotations:
18 | {{- toYaml . | nindent 8 }}
19 | {{- end }}
20 | labels:
21 | {{- include "cool-chart.selectorLabels" . | nindent 8 }}
22 | spec:
23 | {{- with .Values.imagePullSecrets }}
24 | imagePullSecrets:
25 | {{- toYaml . | nindent 8 }}
26 | {{- end }}
27 | serviceAccountName: {{ include "cool-chart.serviceAccountName" . }}
28 | securityContext:
29 | {{- toYaml .Values.podSecurityContext | nindent 8 }}
30 | containers:
31 | - name: {{ .Chart.Name }}
32 | securityContext:
33 | {{- toYaml .Values.securityContext | nindent 12 }}
34 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
35 | imagePullPolicy: {{ .Values.image.pullPolicy }}
36 | ports:
37 | - name: http
38 | containerPort: 80
39 | protocol: TCP
40 | livenessProbe:
41 | httpGet:
42 | path: /
43 | port: http
44 | readinessProbe:
45 | httpGet:
46 | path: /
47 | port: http
48 | resources:
49 | {{- toYaml .Values.resources | nindent 12 }}
50 | {{- with .Values.nodeSelector }}
51 | nodeSelector:
52 | {{- toYaml . | nindent 8 }}
53 | {{- end }}
54 | {{- with .Values.affinity }}
55 | affinity:
56 | {{- toYaml . | nindent 8 }}
57 | {{- end }}
58 | {{- with .Values.tolerations }}
59 | tolerations:
60 | {{- toYaml . | nindent 8 }}
61 | {{- end }}
62 |
--------------------------------------------------------------------------------
/ch9/code/cool-chart/templates/hpa.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.autoscaling.enabled }}
2 | apiVersion: autoscaling/v2beta1
3 | kind: HorizontalPodAutoscaler
4 | metadata:
5 | name: {{ include "cool-chart.fullname" . }}
6 | labels:
7 | {{- include "cool-chart.labels" . | nindent 4 }}
8 | spec:
9 | scaleTargetRef:
10 | apiVersion: apps/v1
11 | kind: Deployment
12 | name: {{ include "cool-chart.fullname" . }}
13 | minReplicas: {{ .Values.autoscaling.minReplicas }}
14 | maxReplicas: {{ .Values.autoscaling.maxReplicas }}
15 | metrics:
16 | {{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
17 | - type: Resource
18 | resource:
19 | name: cpu
20 | targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
21 | {{- end }}
22 | {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
23 | - type: Resource
24 | resource:
25 | name: memory
26 | targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
27 | {{- end }}
28 | {{- end }}
29 |
--------------------------------------------------------------------------------
/ch9/code/cool-chart/templates/ingress.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.ingress.enabled -}}
2 | {{- $fullName := include "cool-chart.fullname" . -}}
3 | {{- $svcPort := .Values.service.port -}}
4 | {{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
5 | {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
6 | {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
7 | {{- end }}
8 | {{- end }}
9 | {{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
10 | apiVersion: networking.k8s.io/v1
11 | {{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
12 | apiVersion: networking.k8s.io/v1beta1
13 | {{- else -}}
14 | apiVersion: extensions/v1beta1
15 | {{- end }}
16 | kind: Ingress
17 | metadata:
18 | name: {{ $fullName }}
19 | labels:
20 | {{- include "cool-chart.labels" . | nindent 4 }}
21 | {{- with .Values.ingress.annotations }}
22 | annotations:
23 | {{- toYaml . | nindent 4 }}
24 | {{- end }}
25 | spec:
26 | {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
27 | ingressClassName: {{ .Values.ingress.className }}
28 | {{- end }}
29 | {{- if .Values.ingress.tls }}
30 | tls:
31 | {{- range .Values.ingress.tls }}
32 | - hosts:
33 | {{- range .hosts }}
34 | - {{ . | quote }}
35 | {{- end }}
36 | secretName: {{ .secretName }}
37 | {{- end }}
38 | {{- end }}
39 | rules:
40 | {{- range .Values.ingress.hosts }}
41 | - host: {{ .host | quote }}
42 | http:
43 | paths:
44 | {{- range .paths }}
45 | - path: {{ .path }}
46 | {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
47 | pathType: {{ .pathType }}
48 | {{- end }}
49 | backend:
50 | {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
51 | service:
52 | name: {{ $fullName }}
53 | port:
54 | number: {{ $svcPort }}
55 | {{- else }}
56 | serviceName: {{ $fullName }}
57 | servicePort: {{ $svcPort }}
58 | {{- end }}
59 | {{- end }}
60 | {{- end }}
61 | {{- end }}
62 |
--------------------------------------------------------------------------------
/ch9/code/cool-chart/templates/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: {{ include "cool-chart.fullname" . }}
5 | labels:
6 | {{- include "cool-chart.labels" . | nindent 4 }}
7 | spec:
8 | type: {{ .Values.service.type }}
9 | ports:
10 | - port: {{ .Values.service.port }}
11 | targetPort: http
12 | protocol: TCP
13 | name: http
14 | selector:
15 | {{- include "cool-chart.selectorLabels" . | nindent 4 }}
16 |
--------------------------------------------------------------------------------
/ch9/code/cool-chart/templates/serviceaccount.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.serviceAccount.create -}}
2 | apiVersion: v1
3 | kind: ServiceAccount
4 | metadata:
5 | name: {{ include "cool-chart.serviceAccountName" . }}
6 | labels:
7 | {{- include "cool-chart.labels" . | nindent 4 }}
8 | {{- with .Values.serviceAccount.annotations }}
9 | annotations:
10 | {{- toYaml . | nindent 4 }}
11 | {{- end }}
12 | {{- end }}
13 |
--------------------------------------------------------------------------------
/ch9/code/cool-chart/templates/tests/test-connection.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: "{{ include "cool-chart.fullname" . }}-test-connection"
5 | labels:
6 | {{- include "cool-chart.labels" . | nindent 4 }}
7 | annotations:
8 | "helm.sh/hook": test
9 | spec:
10 | containers:
11 | - name: wget
12 | image: busybox
13 | command: ['wget']
14 | args: ['{{ include "cool-chart.fullname" . }}:{{ .Values.service.port }}']
15 | restartPolicy: Never
16 |
--------------------------------------------------------------------------------
/ch9/code/cool-chart/values.yaml:
--------------------------------------------------------------------------------
1 | # Default values for cool-chart.
2 | # This is a YAML-formatted file.
3 | # Declare variables to be passed into your templates.
4 |
5 | replicaCount: 1
6 |
7 | image:
8 | repository: nginx
9 | pullPolicy: IfNotPresent
10 | # Overrides the image tag whose default is the chart appVersion.
11 | tag: ""
12 |
13 | imagePullSecrets: []
14 | nameOverride: ""
15 | fullnameOverride: ""
16 |
17 | serviceAccount:
18 | # Specifies whether a service account should be created
19 | create: true
20 | # Annotations to add to the service account
21 | annotations: {}
22 | # The name of the service account to use.
23 | # If not set and create is true, a name is generated using the fullname template
24 | name: ""
25 |
26 | podAnnotations: {}
27 |
28 | podSecurityContext: {}
29 | # fsGroup: 2000
30 |
31 | securityContext: {}
32 | # capabilities:
33 | # drop:
34 | # - ALL
35 | # readOnlyRootFilesystem: true
36 | # runAsNonRoot: true
37 | # runAsUser: 1000
38 |
39 | service:
40 | type: ClusterIP
41 | port: 80
42 |
43 | ingress:
44 | enabled: false
45 | className: ""
46 | annotations: {}
47 | # kubernetes.io/ingress.class: nginx
48 | # kubernetes.io/tls-acme: "true"
49 | hosts:
50 | - host: chart-example.local
51 | paths:
52 | - path: /
53 | pathType: ImplementationSpecific
54 | tls: []
55 | # - secretName: chart-example-tls
56 | # hosts:
57 | # - chart-example.local
58 |
59 | resources: {}
60 | # We usually recommend not to specify default resources and to leave this as a conscious
61 | # choice for the user. This also increases chances charts run on environments with little
62 | # resources, such as Minikube. If you do want to specify resources, uncomment the following
63 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
64 | # limits:
65 | # cpu: 100m
66 | # memory: 128Mi
67 | # requests:
68 | # cpu: 100m
69 | # memory: 128Mi
70 |
71 | autoscaling:
72 | enabled: false
73 | minReplicas: 1
74 | maxReplicas: 100
75 | targetCPUUtilizationPercentage: 80
76 | # targetMemoryUtilizationPercentage: 80
77 |
78 | nodeSelector: {}
79 |
80 | tolerations: []
81 |
82 | affinity: {}
83 |
--------------------------------------------------------------------------------
/ch9/code/food-chart/.helmignore:
--------------------------------------------------------------------------------
1 | # Patterns to ignore when building packages.
2 | # This supports shell glob matching, relative path matching, and
3 | # negation (prefixed with !). Only one pattern per line.
4 | .DS_Store
5 | # Common VCS dirs
6 | .git/
7 | .gitignore
8 | .bzr/
9 | .bzrignore
10 | .hg/
11 | .hgignore
12 | .svn/
13 | # Common backup files
14 | *.swp
15 | *.bak
16 | *.tmp
17 | *.orig
18 | *~
19 | # Various IDEs
20 | .project
21 | .idea/
22 | *.tmproj
23 | .vscode/
24 |
--------------------------------------------------------------------------------
/ch9/code/food-chart/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | name: food-chart
3 | description: A Helm chart for Kubernetes
4 |
5 | # A chart can be either an 'application' or a 'library' chart.
6 | #
7 | # Application charts are a collection of templates that can be packaged into versioned archives
8 | # to be deployed.
9 | #
10 | # Library charts provide useful utilities or functions for the chart developer. They're included as
11 | # a dependency of application charts to inject those utilities and functions into the rendering
12 | # pipeline. Library charts do not define any templates and therefore cannot be deployed.
13 | type: application
14 |
15 | # This is the chart version. This version number should be incremented each time you make changes
16 | # to the chart and its templates, including the app version.
17 | # Versions are expected to follow Semantic Versioning (https://semver.org/)
18 | version: 0.1.0
19 |
20 | # This is the version number of the application being deployed. This version number should be
21 | # incremented each time you make changes to the application. Versions are not expected to
22 | # follow Semantic Versioning. They should reflect the version the application is using.
23 | # It is recommended to use it with quotes.
24 | appVersion: "1.16.0"
25 |
--------------------------------------------------------------------------------
/ch9/code/food-chart/templates/config-map.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: {{ .Release.Name }}-configmap
5 | data:
6 | greeting: "Hello World"
7 | drink: {{ .Values.favorite.drink | repeat 3 | quote }}
8 | food: {{ .Values.favorite.food | upper }}
--------------------------------------------------------------------------------
/ch9/code/food-chart/values.yaml:
--------------------------------------------------------------------------------
1 | favorite:
2 | drink: coffee
3 | food: pizza
4 |
--------------------------------------------------------------------------------