├── .github
├── CODEOWNERS
└── ISSUE_TEMPLATE
│ └── issue-template.md
├── docker
└── getstarted
│ ├── README.md
│ ├── lab3
│ └── docker_compose.yml
│ ├── lab1
│ ├── Dockerfile
│ └── app.py
│ ├── lab2
│ └── docker_compose.yml
│ └── lab4
│ └── docker_compose.yml
├── manual-installation
├── README.md
├── init.sh
├── json
│ ├── front-proxy-ca-csr.json
│ ├── front-proxy-client-csr.json
│ ├── etcd-ca-csr.json
│ ├── ca-csr.json
│ ├── admin-csr.json
│ ├── apiserver-csr.json
│ ├── kubelet-csr.json
│ ├── ca-config.json
│ ├── etcd-csr.json
│ ├── kube-proxy-csr.json
│ ├── scheduler-csr.json
│ └── manager-csr.json
├── conf
│ ├── kubelet.service
│ ├── etcd.service
│ ├── 10-calico.conf
│ ├── etcd.conf
│ ├── 10-kubelet.conf
│ ├── calico-node.service
│ ├── kube-proxy.yml.conf
│ ├── calico-controller.yml.conf
│ └── kube-dns.yml.conf
└── manifests
│ ├── scheduler.yml
│ ├── manager.yml
│ └── apiserver.yml
├── load-balancing
├── run.sh
├── module
│ ├── Makefile
│ └── xt_statistic.c
├── services
│ ├── service
│ │ └── nginx-cluster.yml
│ └── deployment
│ │ └── server.yml
├── input.sh
├── client.py
└── Vagrantfile
├── addons
├── dns
│ ├── namespace.yml
│ ├── externaldns
│ │ ├── sa.yml
│ │ ├── rbac.yml
│ │ └── deployment.yml
│ ├── etcd
│ │ ├── service.yml
│ │ └── deployment.yml
│ └── coredns
│ │ ├── service-udp.yml
│ │ ├── service-tcp.yml
│ │ ├── configmap.yml
│ │ └── deployment.yml
├── monitoring
│ ├── namespace.yml
│ ├── grafana
│ │ ├── grafana-sa.yml
│ │ ├── grafana-admin-secret.yml
│ │ ├── grafana-svc.yml
│ │ ├── grafana-source.yml
│ │ ├── grafana-datasources.yml
│ │ └── grafana-dp.yml
│ ├── operator
│ │ ├── operator-sa.yml
│ │ ├── operator-svc.yml
│ │ ├── operator-dp.yml
│ │ └── operator-rbac.yml
│ ├── prometheus
│ │ ├── prometheus-sa.yml
│ │ ├── prometheus-svc.yml
│ │ ├── prometheus-main.yml
│ │ └── prometheus-rbac.yml
│ ├── alertmanater
│ │ ├── alertmanager-main-sa.yml
│ │ ├── alertmanager-main-svc.yml
│ │ ├── alertmanager-main.yml
│ │ └── alertmanager-main-secret.yml
│ ├── node-exporter
│ │ ├── node-exporter-sa.yml
│ │ ├── node-exporter-svc.yml
│ │ ├── node-exporter-rbac.yml
│ │ └── node-exporter-ds.yml
│ ├── kube-state-metrics
│ │ ├── kube-state-metrics-sa.yml
│ │ ├── kube-state-metrics-svc.yml
│ │ ├── kube-state-metrics-rbac.yml
│ │ └── kube-state-metrics-dp.yml
│ ├── prometheus-adapter
│ │ ├── prometheus-adapter-sa.yml
│ │ ├── prometheus-adapter-svc.yml
│ │ ├── prometheus-adapter-apiservice.yml
│ │ ├── prometheus-adapter-cm.yml
│ │ ├── prometheus-adapter-dp.yml
│ │ └── prometheus-adapter-rbac.yml
│ ├── servicemonitor
│ │ ├── grafana-sm.yml
│ │ ├── prometheus-sm.yml
│ │ ├── alertmanager-sm.yml
│ │ ├── prometheus-operator-sm.yml
│ │ ├── gpu-exporter-sm.yml
│ │ ├── kube-scheduler-sm.yml
│ │ ├── coredns-sm.yml
│ │ ├── node-exporter-sm.yml
│ │ ├── kube-controller-manager-sm.yml
│ │ ├── kube-state-metrics-sm.yml
│ │ ├── kube-apiserver-sm.yml
│ │ └── kubelet-sm.yml
│ ├── gpu-exporter
│ │ ├── gpu-exporter-svc.yml
│ │ └── gpu-exporter-ds.yml
│ ├── service-discovery
│ │ ├── kube-scheduler-svc.yml
│ │ └── kube-controller-manager-svc.yml
│ └── README.md
├── logging
│ ├── README.md
│ ├── es-service.yaml
│ ├── kibana-service.yaml
│ ├── rbac.yml
│ ├── kibana-deployment.yaml
│ ├── fluentd-es-ds.yaml
│ └── es-statefulset.yaml
├── metrics-server
│ ├── auth-delegator.yaml
│ ├── metrics-apiservice.yaml
│ ├── metrics-server-service.yaml
│ ├── auth-reader.yaml
│ ├── aggregated-metrics-reader.yaml
│ ├── resource-reader.yaml
│ └── metrics-server-deployment.yaml
└── ingress-controller
│ ├── service.yml
│ └── ingress-controller.yml
├── .gitignore
├── practical-k8s
├── beginners-exercise
│ ├── lab3-tcp-nc
│ │ ├── Dockerfile
│ │ ├── tcpdump-ds.yaml
│ │ └── nc-listener-deploy.yaml
│ ├── lab2-jobs
│ │ ├── pi-job.yaml
│ │ └── hello-cjob.yaml
│ └── lab1-nginx
│ │ └── nginx-deploy.yaml
└── practical-apps
│ ├── lab2-redis
│ ├── js_source_code
│ │ └── Dockerfile
│ ├── reduser-svc.yml
│ └── reduser-deploy.yml
│ ├── lab1-wordpress
│ ├── 03_mysql-svc.yml
│ ├── 01_mysql-pvc.yml
│ ├── 04_wordpress-pvc.yml
│ ├── 06_wordpress-svc.yml
│ ├── 00_pv.yml
│ ├── 02_mysql-pod.yml
│ ├── 05_wordpress-deploy.yml
│ └── all-in-one
│ │ ├── mysql-all-in-one.yml
│ │ └── wordpress-all-in-one.yml
│ ├── lab3-tensorflow
│ ├── worker-svc.yaml
│ ├── client-svc.yaml
│ ├── simple-task.py
│ ├── worker.yaml
│ ├── client.yaml
│ └── worker-gpu.yaml
│ ├── lab5-voting
│ ├── persistent-volume.yml
│ ├── vote-hpa.yml
│ └── deployment.yml
│ ├── lab6-cheese
│ ├── cheese-ing.yml
│ ├── cheese-svc.yml
│ └── cheese-dp.yml
│ └── lab4-prometheus
│ ├── rbac-setup.yaml
│ ├── node-exporter.yaml
│ ├── grafana-deployment.yaml
│ ├── prometheus-deploy.yaml
│ ├── grafana-zabbix.yaml
│ └── prometheus-config-map.yaml
├── multi-cluster
├── kubeadm
│ ├── pdb
│ │ ├── nginx-pdb.yml
│ │ ├── nginx-service.yml
│ │ └── nginx-deployment.yml
│ ├── harbor
│ │ ├── test-pod.yml
│ │ ├── run.sh
│ │ └── ca.crt
│ ├── Vagrantfile
│ └── README.md
└── kube-ansible
│ └── README.md
├── kubeflow
├── examples
│ ├── notebooks
│ │ └── hello-world.py
│ ├── fairing
│ │ └── README.md
│ └── dist-mnist
│ │ ├── tfjob.yml
│ │ └── pytorchjob.yml
├── single-node
│ └── README.md
└── multi-node
│ └── README.md
├── README.md
├── minikube-lab
├── concept-terms
│ ├── pod.yml
│ ├── service.yml
│ └── deploy.yml
└── workshop
│ ├── lab1
│ ├── redis_pod.yml
│ └── web_pod.yml
│ ├── lab4
│ ├── redis_deploy.yml
│ ├── local-pvc.yml
│ └── web_deploy.yml
│ ├── build
│ ├── Dockerfile
│ └── app.py
│ ├── README.md
│ ├── lab2
│ └── web_deploy.yml
│ └── lab3
│ └── web_deploy.yml
└── harbor
├── Vagrantfile
├── config
├── certs
│ ├── ca.crt
│ ├── harbor-registry.crt
│ ├── ca.key
│ └── harbor-registry.key
└── harbor.yml
└── README.md
/.github/CODEOWNERS:
--------------------------------------------------------------------------------
1 | * @kairen
2 | load-balancing/ @hwchiu
--------------------------------------------------------------------------------
/docker/getstarted/README.md:
--------------------------------------------------------------------------------
1 | # Get Started with Docker!
2 |
--------------------------------------------------------------------------------
/manual-installation/README.md:
--------------------------------------------------------------------------------
1 | # Manual Installation
2 | > Deprecated.
--------------------------------------------------------------------------------
/load-balancing/run.sh:
--------------------------------------------------------------------------------
1 | for i in `seq 1 20`; do python3 client.py; done
2 |
--------------------------------------------------------------------------------
/manual-installation/init.sh:
--------------------------------------------------------------------------------
1 | sudo mv ./* /root/
2 | sudo rm /root/init.sh
3 |
--------------------------------------------------------------------------------
/addons/dns/namespace.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: ddns
--------------------------------------------------------------------------------
/addons/monitoring/namespace.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: monitoring
--------------------------------------------------------------------------------
/manual-installation/json/front-proxy-ca-csr.json:
--------------------------------------------------------------------------------
1 | {"CN":"kubernetes","key":{"algo":"rsa","size":2048}}
--------------------------------------------------------------------------------
/manual-installation/json/front-proxy-client-csr.json:
--------------------------------------------------------------------------------
1 | {"CN":"front-proxy-client","key":{"algo":"rsa","size":2048}}
--------------------------------------------------------------------------------
/addons/dns/externaldns/sa.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: external-dns
5 | namespace: ddns
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | vagrant/
3 | .vagrant/
4 | ansible.cfg
5 | hosts.ini
6 |
7 | harbor/harbor/common
8 | harbor/harbor/docker-compose.yml
--------------------------------------------------------------------------------
/addons/monitoring/grafana/grafana-sa.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: grafana
5 | namespace: monitoring
6 |
--------------------------------------------------------------------------------
/addons/monitoring/operator/operator-sa.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: prometheus-operator
5 | namespace: monitoring
--------------------------------------------------------------------------------
/addons/monitoring/prometheus/prometheus-sa.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: prometheus-k8s
5 | namespace: monitoring
6 |
--------------------------------------------------------------------------------
/addons/monitoring/alertmanater/alertmanager-main-sa.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: alertmanager-main
5 | namespace: monitoring
--------------------------------------------------------------------------------
/addons/monitoring/node-exporter/node-exporter-sa.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: node-exporter
5 | namespace: monitoring
6 |
--------------------------------------------------------------------------------
/manual-installation/json/etcd-ca-csr.json:
--------------------------------------------------------------------------------
1 | {"CN":"etcd","key":{"algo":"rsa","size":2048},"names":[{"C":"TW","ST":"Taipei","L":"Taipei","O":"etcd","OU":"Etcd Security"}]}
--------------------------------------------------------------------------------
/addons/monitoring/kube-state-metrics/kube-state-metrics-sa.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: kube-state-metrics
5 | namespace: monitoring
--------------------------------------------------------------------------------
/addons/monitoring/prometheus-adapter/prometheus-adapter-sa.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: prometheus-adapter
5 | namespace: monitoring
--------------------------------------------------------------------------------
/manual-installation/json/ca-csr.json:
--------------------------------------------------------------------------------
1 | {"CN":"kubernetes","key":{"algo":"rsa","size":2048},"names":[{"C":"TW","ST":"Taipei","L":"Taipei","O":"Kubernetes","OU":"Kubernetes-manual"}]}
--------------------------------------------------------------------------------
/manual-installation/json/admin-csr.json:
--------------------------------------------------------------------------------
1 | {"CN":"admin","key":{"algo":"rsa","size":2048},"names":[{"C":"TW","ST":"Taipei","L":"Taipei","O":"system:masters","OU":"Kubernetes-manual"}]}
--------------------------------------------------------------------------------
/manual-installation/json/apiserver-csr.json:
--------------------------------------------------------------------------------
1 | {"CN":"kube-apiserver","key":{"algo":"rsa","size":2048},"names":[{"C":"TW","ST":"Taipei","L":"Taipei","O":"Kubernetes","OU":"Kubernetes-manual"}]}
--------------------------------------------------------------------------------
/manual-installation/json/kubelet-csr.json:
--------------------------------------------------------------------------------
1 | {"CN":"system:node:$NODE","key":{"algo":"rsa","size":2048},"names":[{"C":"TW","L":"Taipei","ST":"Taipei","O":"system:nodes","OU":"Kubernetes-manual"}]}
--------------------------------------------------------------------------------
/addons/logging/README.md:
--------------------------------------------------------------------------------
1 | # EFK for Kubernetes
2 |
3 | ```
4 | $ kubectl apply -f ./
5 | ```
6 |
7 | Access https://192.16.35.12:6443/api/v1/namespaces/kube-system/services/kibana-logging/proxy/.
--------------------------------------------------------------------------------
/manual-installation/json/ca-config.json:
--------------------------------------------------------------------------------
1 | {"signing":{"default":{"expiry":"87600h"},"profiles":{"kubernetes":{"usages":["signing","key encipherment","server auth","client auth"],"expiry":"87600h"}}}}
--------------------------------------------------------------------------------
/manual-installation/json/etcd-csr.json:
--------------------------------------------------------------------------------
1 | {"CN":"etcd","hosts":["127.0.0.1","172.16.35.12"],"key":{"algo":"rsa","size":2048},"names":[{"C":"TW","ST":"Taipei","L":"Taipei","O":"etcd","OU":"Etcd Security"}]}
--------------------------------------------------------------------------------
/manual-installation/json/kube-proxy-csr.json:
--------------------------------------------------------------------------------
1 | {"CN":"system:kube-proxy","key":{"algo":"rsa","size":2048},"names":[{"C":"TW","ST":"Taipei","L":"Taipei","O":"system:kube-proxy","OU":"Kubernetes-manual"}]}
--------------------------------------------------------------------------------
/manual-installation/json/scheduler-csr.json:
--------------------------------------------------------------------------------
1 | {"CN":"system:kube-scheduler","key":{"algo":"rsa","size":2048},"names":[{"C":"TW","ST":"Taipei","L":"Taipei","O":"system:kube-scheduler","OU":"Kubernetes-manual"}]}
--------------------------------------------------------------------------------
/practical-k8s/beginners-exercise/lab3-tcp-nc/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM 192.168.41.65:5000/centos:7
2 | COPY CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo
3 | RUN yum update -y
4 | RUN yum install -y tcpdump netcat
5 |
--------------------------------------------------------------------------------
/manual-installation/json/manager-csr.json:
--------------------------------------------------------------------------------
1 | {"CN":"system:kube-controller-manager","key":{"algo":"rsa","size":2048},"names":[{"C":"TW","ST":"Taipei","L":"Taipei","O":"system:kube-controller-manager","OU":"Kubernetes-manual"}]}
--------------------------------------------------------------------------------
/multi-cluster/kubeadm/pdb/nginx-pdb.yml:
--------------------------------------------------------------------------------
1 | apiVersion: policy/v1beta1
2 | kind: PodDisruptionBudget
3 | metadata:
4 | name: nginx-pdb
5 | spec:
6 | maxUnavailable: 1
7 | selector:
8 | matchLabels:
9 | app: nginx
--------------------------------------------------------------------------------
/practical-k8s/practical-apps/lab2-redis/js_source_code/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM node
2 |
3 | RUN mkdir -p /usr/src/app/
4 | WORKDIR /usr/src/app/
5 |
6 | COPY ./* /usr/src/app/
7 | RUN npm install
8 |
9 | CMD npm start
10 |
--------------------------------------------------------------------------------
/kubeflow/examples/notebooks/hello-world.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 |
3 | hello = tf.constant('Hello, TensorFlow!')
4 | sess = tf.Session()
5 | sess.run(hello)
6 |
7 | a = tf.constant(10)
8 | b = tf.constant(32)
9 | sess.run(a+b)
--------------------------------------------------------------------------------
/load-balancing/module/Makefile:
--------------------------------------------------------------------------------
1 | obj-m += xt_statistic.o
2 |
3 | all:
4 | make -C /lib/modules/$(shell uname -r)/build M=$(shell pwd) modules
5 |
6 | clean:
7 | make -C /lib/modules/$(shell uname -r)/build M=$(shell pwd) clean
8 |
--------------------------------------------------------------------------------
/addons/monitoring/grafana/grafana-admin-secret.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data:
3 | user: "YWRtaW4="
4 | password: "cEBzc3cwcmQ="
5 | kind: Secret
6 | metadata:
7 | name: grafana-credentials
8 | namespace: monitoring
9 | type: Opaque
10 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Kubernetes Course
2 | The purpose of this course series is to get you on-boarded to Docker and Kubernetes experience. We will guide you to setup clusters using minikube and kubeadm, and through tools(kubectl、helm or kustomize) to deploy applications.
--------------------------------------------------------------------------------
/minikube-lab/concept-terms/pod.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: ct-nginx
5 | labels:
6 | app: concept-terms
7 | spec:
8 | containers:
9 | - name: web
10 | image: nginx
11 | ports:
12 | - containerPort: 80
13 |
--------------------------------------------------------------------------------
/practical-k8s/practical-apps/lab1-wordpress/03_mysql-svc.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: wordpress-mysql
5 | labels:
6 | app: wordpress
7 | spec:
8 | ports:
9 | - port: 3306
10 | selector:
11 | app: wordpress
12 | tier: mysql
13 |
--------------------------------------------------------------------------------
/minikube-lab/workshop/lab1/redis_pod.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 |
4 | metadata:
5 | name: redis
6 | labels:
7 | name: redis
8 | app: demo
9 |
10 | spec:
11 | containers:
12 | - name: redis
13 | image: redis
14 | ports:
15 | - containerPort: 6379
16 |
--------------------------------------------------------------------------------
/minikube-lab/concept-terms/service.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: ct-nginx-svc
5 | spec:
6 | ports:
7 | - port: 80
8 | name: http
9 | targetPort: 80
10 | protocol: TCP
11 | selector:
12 | app: concept-terms
13 | type: NodePort
14 |
15 |
--------------------------------------------------------------------------------
/load-balancing/services/service/nginx-cluster.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: k8s-udpserver-cluster
5 | labels:
6 | run: k8s-udpserver-cluster
7 | spec:
8 | ports:
9 | - port: 20001
10 | protocol: UDP
11 | selector:
12 | run: k8s-udpserver
13 |
--------------------------------------------------------------------------------
/practical-k8s/practical-apps/lab2-redis/reduser-svc.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: reduser-svc
5 | labels:
6 | app: reduser
7 | spec:
8 | ports:
9 | - port: 3000
10 | nodePort: 33300
11 | selector:
12 | app: reduser
13 | type: NodePort
14 |
--------------------------------------------------------------------------------
/practical-k8s/practical-apps/lab3-tensorflow/worker-svc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: tf-worker-svc
5 | spec:
6 | selector:
7 | app: tf-worker
8 | type: NodePort
9 | ports:
10 | - port: 2222
11 | targetPort: 2222
12 | nodePort: 31235
13 |
14 |
--------------------------------------------------------------------------------
/addons/monitoring/servicemonitor/grafana-sm.yml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | name: grafana
5 | namespace: monitoring
6 | spec:
7 | endpoints:
8 | - interval: 15s
9 | port: http
10 | selector:
11 | matchLabels:
12 | app: grafana
--------------------------------------------------------------------------------
/practical-k8s/practical-apps/lab3-tensorflow/client-svc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: tf-client-svc
5 | spec:
6 | selector:
7 | app: tf-client
8 | type: NodePort
9 | ports:
10 | - port: 8888
11 | targetPort: 8888
12 | nodePort: 31234
13 |
14 |
--------------------------------------------------------------------------------
/practical-k8s/practical-apps/lab1-wordpress/01_mysql-pvc.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | name: mysql-pv-claim
5 | labels:
6 | app: wordpress
7 | spec:
8 | accessModes:
9 | - ReadWriteOnce
10 | resources:
11 | requests:
12 | storage: 10Gi
13 |
--------------------------------------------------------------------------------
/practical-k8s/practical-apps/lab1-wordpress/04_wordpress-pvc.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | name: wp-pv-claim
5 | labels:
6 | app: wordpress
7 | spec:
8 | accessModes:
9 | - ReadWriteOnce
10 | resources:
11 | requests:
12 | storage: 10Gi
13 |
--------------------------------------------------------------------------------
/manual-installation/conf/kubelet.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=kubelet: The Kubernetes Node Agent
3 | Documentation=http://kubernetes.io/docs/
4 |
5 | [Service]
6 | ExecStart=/usr/local/bin/kubelet
7 | Restart=on-failure
8 | StartLimitInterval=0
9 | RestartSec=10
10 |
11 | [Install]
12 | WantedBy=multi-user.target
13 |
--------------------------------------------------------------------------------
/multi-cluster/kubeadm/pdb/nginx-service.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app: nginx
6 | name: nginx
7 | spec:
8 | type: LoadBalancer
9 | externalIPs:
10 | - 192.16.35.12
11 | ports:
12 | - name: http
13 | port: 80
14 | targetPort: 80
15 | selector:
16 | app: nginx
--------------------------------------------------------------------------------
/practical-k8s/practical-apps/lab1-wordpress/06_wordpress-svc.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: wordpress
5 | labels:
6 | app: wordpress
7 | spec:
8 | ports:
9 | - port: 80
10 | nodePort: 32100
11 | selector:
12 | app: wordpress
13 | tier: frontend
14 | type: NodePort
15 |
--------------------------------------------------------------------------------
/addons/monitoring/grafana/grafana-svc.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: grafana
5 | namespace: monitoring
6 | spec:
7 | type: LoadBalancer
8 | externalIPs:
9 | - 192.16.35.12
10 | ports:
11 | - name: http
12 | port: 3000
13 | targetPort: http
14 | selector:
15 | app: grafana
16 |
--------------------------------------------------------------------------------
/addons/dns/etcd/service.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | k8s-app: coredns-etcd
6 | name: coredns-etcd
7 | namespace: ddns
8 | spec:
9 | ports:
10 | - name: etcd-http
11 | port: 2379
12 | - name: etcd-peer
13 | port: 2380
14 | selector:
15 | k8s-app: coredns-etcd
16 | type: ClusterIP
17 |
--------------------------------------------------------------------------------
/addons/monitoring/servicemonitor/prometheus-sm.yml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | labels:
5 | k8s-app: prometheus
6 | name: prometheus
7 | namespace: monitoring
8 | spec:
9 | endpoints:
10 | - interval: 30s
11 | port: web
12 | selector:
13 | matchLabels:
14 | prometheus: k8s
--------------------------------------------------------------------------------
/multi-cluster/kubeadm/harbor/test-pod.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: test
5 | spec:
6 | imagePullSecrets:
7 | - name: regcred
8 | containers:
9 | - name: alpine
10 | image: 192.16.35.99/library/alpine:3.7
11 | command: ["/bin/sh", "-c"]
12 | args:
13 | - "while :; do sleep 1; done"
14 |
15 |
16 |
--------------------------------------------------------------------------------
/practical-k8s/practical-apps/lab5-voting/persistent-volume.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolume
3 | metadata:
4 | name: postgres-pv
5 | spec:
6 | capacity:
7 | storage: 10Gi
8 | accessModes:
9 | - ReadWriteOnce
10 | persistentVolumeReclaimPolicy: Recycle
11 | nfs:
12 | path: /nfs-data/db
13 | server: 172.22.132.11
14 |
--------------------------------------------------------------------------------
/addons/monitoring/node-exporter/node-exporter-svc.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | k8s-app: node-exporter
6 | name: node-exporter
7 | namespace: monitoring
8 | spec:
9 | clusterIP: None
10 | ports:
11 | - name: https
12 | port: 9100
13 | targetPort: https
14 | selector:
15 | app: node-exporter
--------------------------------------------------------------------------------
/addons/monitoring/prometheus-adapter/prometheus-adapter-svc.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | name: prometheus-adapter
6 | name: prometheus-adapter
7 | namespace: monitoring
8 | spec:
9 | ports:
10 | - name: https
11 | port: 443
12 | targetPort: 6443
13 | selector:
14 | name: prometheus-adapter
--------------------------------------------------------------------------------
/multi-cluster/kubeadm/harbor/run.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Setup kubernetes to pull image from the private registry.
4 | #
5 |
6 | set -ex
7 |
8 | kubectl create secret docker-registry regcred \
9 | --docker-server="192.16.35.99" \
10 | --docker-username=admin \
11 | --docker-password=r00tme \
12 | --docker-email=admin@example.com
13 |
--------------------------------------------------------------------------------
/addons/monitoring/alertmanater/alertmanager-main-svc.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | alertmanager: main
6 | name: alertmanager-main
7 | namespace: monitoring
8 | spec:
9 | ports:
10 | - name: web
11 | port: 9093
12 | targetPort: web
13 | selector:
14 | alertmanager: main
15 | app: alertmanager
--------------------------------------------------------------------------------
/addons/monitoring/servicemonitor/alertmanager-sm.yml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | labels:
5 | k8s-app: alertmanager
6 | name: alertmanager
7 | namespace: monitoring
8 | spec:
9 | endpoints:
10 | - interval: 30s
11 | port: web
12 | selector:
13 | matchLabels:
14 | alertmanager: main
--------------------------------------------------------------------------------
/addons/monitoring/operator/operator-svc.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | k8s-app: prometheus-operator
6 | name: prometheus-operator
7 | namespace: monitoring
8 | spec:
9 | clusterIP: None
10 | ports:
11 | - name: http
12 | port: 8080
13 | targetPort: http
14 | selector:
15 | k8s-app: prometheus-operator
--------------------------------------------------------------------------------
/addons/monitoring/prometheus/prometheus-svc.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | prometheus: k8s
6 | name: prometheus-k8s
7 | namespace: monitoring
8 | spec:
9 | sessionAffinity: ClientIP
10 | ports:
11 | - name: web
12 | port: 9090
13 | targetPort: web
14 | selector:
15 | app: prometheus
16 | prometheus: k8s
--------------------------------------------------------------------------------
/addons/dns/coredns/service-udp.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | k8s-app: coredns
6 | name: coredns-udp
7 | namespace: ddns
8 | spec:
9 | type: LoadBalancer
10 | externalIPs:
11 | - 192.16.35.12
12 | ports:
13 | - name: dns-udp
14 | port: 53
15 | protocol: UDP
16 | selector:
17 | k8s-app: coredns
18 |
19 |
--------------------------------------------------------------------------------
/practical-k8s/beginners-exercise/lab2-jobs/pi-job.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1
2 | kind: Job
3 | metadata:
4 | name: pi
5 | spec:
6 | template:
7 | metadata:
8 | name: pi
9 | spec:
10 | containers:
11 | - name: pi
12 | image: perl
13 | command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"]
14 | restartPolicy: Never
15 |
--------------------------------------------------------------------------------
/addons/monitoring/servicemonitor/prometheus-operator-sm.yml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | labels:
5 | k8s-app: prometheus-operator
6 | name: prometheus-operator
7 | namespace: monitoring
8 | spec:
9 | endpoints:
10 | - honorLabels: true
11 | port: http
12 | selector:
13 | matchLabels:
14 | k8s-app: prometheus-operator
--------------------------------------------------------------------------------
/addons/metrics-server/auth-delegator.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: rbac.authorization.k8s.io/v1beta1
3 | kind: ClusterRoleBinding
4 | metadata:
5 | name: metrics-server:system:auth-delegator
6 | roleRef:
7 | apiGroup: rbac.authorization.k8s.io
8 | kind: ClusterRole
9 | name: system:auth-delegator
10 | subjects:
11 | - kind: ServiceAccount
12 | name: metrics-server
13 | namespace: kube-system
14 |
--------------------------------------------------------------------------------
/addons/metrics-server/metrics-apiservice.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apiregistration.k8s.io/v1beta1
3 | kind: APIService
4 | metadata:
5 | name: v1beta1.metrics.k8s.io
6 | spec:
7 | service:
8 | name: metrics-server
9 | namespace: kube-system
10 | group: metrics.k8s.io
11 | version: v1beta1
12 | insecureSkipTLSVerify: true
13 | groupPriorityMinimum: 100
14 | versionPriority: 100
15 |
--------------------------------------------------------------------------------
/addons/metrics-server/metrics-server-service.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: metrics-server
6 | namespace: kube-system
7 | labels:
8 | kubernetes.io/name: "Metrics-server"
9 | kubernetes.io/cluster-service: "true"
10 | spec:
11 | selector:
12 | k8s-app: metrics-server
13 | ports:
14 | - port: 443
15 | protocol: TCP
16 | targetPort: 443
17 |
--------------------------------------------------------------------------------
/addons/monitoring/prometheus-adapter/prometheus-adapter-apiservice.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apiregistration.k8s.io/v1
2 | kind: APIService
3 | metadata:
4 | name: v1beta1.metrics.k8s.io
5 | spec:
6 | group: metrics.k8s.io
7 | groupPriorityMinimum: 100
8 | insecureSkipTLSVerify: true
9 | service:
10 | name: prometheus-adapter
11 | namespace: monitoring
12 | version: v1beta1
13 | versionPriority: 100
--------------------------------------------------------------------------------
/docker/getstarted/lab3/docker_compose.yml:
--------------------------------------------------------------------------------
1 | version: "3"
2 | services:
3 | web:
4 | image: get_started
5 | deploy:
6 | replicas: 5
7 | restart_policy:
8 | condition: on-failure
9 | resources:
10 | limits:
11 | cpus: "0.1"
12 | memory: 50M
13 | ports:
14 | - "80:80"
15 | networks:
16 | - webnet
17 | networks:
18 | webnet:
19 |
--------------------------------------------------------------------------------
/addons/metrics-server/auth-reader.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: rbac.authorization.k8s.io/v1beta1
3 | kind: RoleBinding
4 | metadata:
5 | name: metrics-server-auth-reader
6 | namespace: kube-system
7 | roleRef:
8 | apiGroup: rbac.authorization.k8s.io
9 | kind: Role
10 | name: extension-apiserver-authentication-reader
11 | subjects:
12 | - kind: ServiceAccount
13 | name: metrics-server
14 | namespace: kube-system
15 |
--------------------------------------------------------------------------------
/addons/monitoring/gpu-exporter/gpu-exporter-svc.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: node-gpu-exporter
5 | labels:
6 | app: node-gpu-exporter
7 | k8s-app: node-gpu-exporter
8 | namespace: monitoring
9 | spec:
10 | type: ClusterIP
11 | clusterIP: None
12 | ports:
13 | - name: http-metrics
14 | port: 9445
15 | protocol: TCP
16 | selector:
17 | app: node-gpu-exporter
18 |
--------------------------------------------------------------------------------
/manual-installation/conf/etcd.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Etcd Service
3 | After=network.target
4 |
5 | [Service]
6 | Environment=ETCD_DATA_DIR=/var/lib/etcd/default
7 | EnvironmentFile=-/etc/etcd/etcd.conf
8 | Type=notify
9 | User=etcd
10 | PermissionsStartOnly=true
11 | ExecStart=/usr/local/bin/etcd
12 | Restart=on-failure
13 | RestartSec=10
14 | LimitNOFILE=65536
15 |
16 | [Install]
17 | WantedBy=multi-user.target
18 |
--------------------------------------------------------------------------------
/minikube-lab/concept-terms/deploy.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1beta1
2 | kind: Deployment
3 | metadata:
4 | name: ct-nginx-deploy
5 | labels:
6 | app: concept-terms
7 | spec:
8 | replicas: 3
9 | template:
10 | metadata:
11 | labels:
12 | app: concept-terms
13 | spec:
14 | containers:
15 | - name: web
16 | image: nginx
17 | ports:
18 | - containerPort: 8080
19 |
20 |
--------------------------------------------------------------------------------
/minikube-lab/workshop/lab1/web_pod.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 |
4 | metadata:
5 | name: web
6 | labels:
7 | name: web
8 | app: demo
9 |
10 | spec:
11 | containers:
12 | - name: web
13 | image: "k8s101web"
14 | imagePullPolicy: IfNotPresent
15 | env:
16 | - name: "REDIS_HOST"
17 | value: "redis_pod_ip" #replace with acutal redis ip
18 | ports:
19 | - containerPort: 80
20 |
--------------------------------------------------------------------------------
/minikube-lab/workshop/lab4/redis_deploy.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1beta1
2 | kind: Deployment
3 |
4 | metadata:
5 | name: redis
6 |
7 | spec:
8 |
9 | template:
10 |
11 | metadata:
12 | name: redis
13 | labels:
14 | name: redis
15 | app: demo
16 |
17 | spec:
18 | containers:
19 | - name: redis
20 | image: redis
21 | ports:
22 | - containerPort: 6379
23 |
--------------------------------------------------------------------------------
/addons/dns/coredns/service-tcp.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | k8s-app: coredns
6 | name: coredns-tcp
7 | namespace: ddns
8 | spec:
9 | type: LoadBalancer
10 | externalIPs:
11 | - 192.16.35.12
12 | ports:
13 | - name: dns-tcp
14 | port: 53
15 | protocol: TCP
16 | - name: metrics
17 | port: 9153
18 | protocol: TCP
19 | selector:
20 | k8s-app: coredns
21 |
22 |
--------------------------------------------------------------------------------
/docker/getstarted/lab1/Dockerfile:
--------------------------------------------------------------------------------
1 | # Use an official Python runtime as a parent image
2 | FROM python
3 |
4 | # Set the working directory
5 | WORKDIR /home/app/
6 | COPY ./* /home/app/
7 |
8 | # Install needed packages
9 | RUN pip install Flask
10 | RUN pip install Redis
11 |
12 | # Make port 80 available to the world outside of this container
13 | EXPOSE 80
14 |
15 | # Run app.py when the container launches
16 | CMD python app.py
17 |
--------------------------------------------------------------------------------
/addons/monitoring/service-discovery/kube-scheduler-svc.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | k8s-app: kube-scheduler
6 | name: kube-scheduler
7 | namespace: kube-system
8 | spec:
9 | type: ClusterIP
10 | clusterIP: None
11 | ports:
12 | - name: http-metrics
13 | port: 10251
14 | protocol: TCP
15 | targetPort: 10251
16 | selector:
17 | component: kube-scheduler
18 | tier: control-plane
19 |
--------------------------------------------------------------------------------
/addons/monitoring/servicemonitor/gpu-exporter-sm.yml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | name: node-gpu-exporter
5 | labels:
6 | k8s-app: node-gpu-exporter
7 | namespace: monitoring
8 | spec:
9 | selector:
10 | matchLabels:
11 | k8s-app: node-gpu-exporter
12 | namespaceSelector:
13 | matchNames:
14 | - monitoring
15 | endpoints:
16 | - port: http-metrics
17 | interval: 30s
18 |
--------------------------------------------------------------------------------
/practical-k8s/practical-apps/lab5-voting/vote-hpa.yml:
--------------------------------------------------------------------------------
1 | apiVersion: autoscaling/v2beta2
2 | kind: HorizontalPodAutoscaler
3 | metadata:
4 | name: vote-app
5 | spec:
6 | scaleTargetRef:
7 | apiVersion: apps/v1
8 | kind: Deployment
9 | name: vote
10 | minReplicas: 2
11 | maxReplicas: 10
12 | metrics:
13 | - type: Resource
14 | resource:
15 | name: cpu
16 | target:
17 | type: Utilization
18 | averageUtilization: 50
--------------------------------------------------------------------------------
/addons/monitoring/kube-state-metrics/kube-state-metrics-svc.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | k8s-app: kube-state-metrics
6 | name: kube-state-metrics
7 | namespace: monitoring
8 | spec:
9 | clusterIP: None
10 | ports:
11 | - name: https-main
12 | port: 8443
13 | targetPort: https-main
14 | - name: https-self
15 | port: 9443
16 | targetPort: https-self
17 | selector:
18 | app: kube-state-metrics
--------------------------------------------------------------------------------
/minikube-lab/workshop/build/Dockerfile:
--------------------------------------------------------------------------------
1 | # Use an official Python runtime as a parent image
2 | FROM python
3 |
4 | # Set the working directory
5 | WORKDIR /home/app/
6 | COPY ./* /home/app/
7 | RUN mkdir /home/redis/
8 |
9 | # Install neede packages
10 | RUN pip install Flask
11 | RUN pip install Redis
12 |
13 | # Make port 80 available to the world outside of this container
14 | EXPOSE 80
15 |
16 | # Run app.py when the container launches
17 | CMD python app.py
18 |
--------------------------------------------------------------------------------
/addons/monitoring/servicemonitor/kube-scheduler-sm.yml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | labels:
5 | k8s-app: kube-scheduler
6 | name: kube-scheduler
7 | namespace: monitoring
8 | spec:
9 | endpoints:
10 | - interval: 30s
11 | port: http-metrics
12 | jobLabel: k8s-app
13 | namespaceSelector:
14 | matchNames:
15 | - kube-system
16 | selector:
17 | matchLabels:
18 | k8s-app: kube-scheduler
--------------------------------------------------------------------------------
/minikube-lab/workshop/README.md:
--------------------------------------------------------------------------------
1 | Docker get_started deployed in k8s
2 | https://docs.docker.com/get-started/
3 |
4 |
5 |
6 | - no visualizer
7 | - LAB3 total count preserved at minikube:/webvol/count.txt (container:/home/app/count.txt) and shown in web
8 | - LAB4 total count preserved at kube-master:/tmp/data/pv-1/count.txt (container:/home/app/count.txt) and shown in web
9 |
10 |
11 |
--------------------------------------------------------------------------------
/docker/getstarted/lab2/docker_compose.yml:
--------------------------------------------------------------------------------
1 | version: "3"
2 | services:
3 | web:
4 | # replace username/repo:tag with your name and image details
5 | image: get_started
6 | deploy:
7 | replicas: 5
8 | restart_policy:
9 | condition: on-failure
10 | resources:
11 | limits:
12 | cpus: "0.1"
13 | memory: 50M
14 | ports:
15 | - "80:80"
16 | networks:
17 | - webnet
18 | networks:
19 | webnet:
20 |
--------------------------------------------------------------------------------
/addons/logging/es-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: elasticsearch-logging
5 | namespace: kube-system
6 | labels:
7 | k8s-app: elasticsearch-logging
8 | kubernetes.io/cluster-service: "true"
9 | addonmanager.kubernetes.io/mode: Reconcile
10 | kubernetes.io/name: "Elasticsearch"
11 | spec:
12 | ports:
13 | - port: 9200
14 | protocol: TCP
15 | targetPort: db
16 | selector:
17 | k8s-app: elasticsearch-logging
18 |
--------------------------------------------------------------------------------
/addons/metrics-server/aggregated-metrics-reader.yaml:
--------------------------------------------------------------------------------
1 | kind: ClusterRole
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | metadata:
4 | name: system:aggregated-metrics-reader
5 | labels:
6 | rbac.authorization.k8s.io/aggregate-to-view: "true"
7 | rbac.authorization.k8s.io/aggregate-to-edit: "true"
8 | rbac.authorization.k8s.io/aggregate-to-admin: "true"
9 | rules:
10 | - apiGroups: ["metrics.k8s.io"]
11 | resources: ["pods"]
12 | verbs: ["get", "list", "watch"]
13 |
--------------------------------------------------------------------------------
/addons/monitoring/README.md:
--------------------------------------------------------------------------------
1 | # Monitoring for Kubernetes
2 |
3 | ```sh
4 | $ kubectl apply -f namespace.yml
5 | $ kubectl apply -f operator
6 | $ kubectl apply -f service-discovery
7 | $ kubectl apply -f alertmanater
8 | $ kubectl apply -f prometheus
9 | $ kubectl apply -f prometheus-adapter
10 | $ kubectl apply -f node-exporter
11 | # $ kubectl apply -f gpu-exporter
12 | $ kubectl apply -f kube-state-metrics
13 | $ kubectl apply -f servicemonitor
14 | $ kubectl apply -f grafana
15 | ```
16 |
--------------------------------------------------------------------------------
/addons/monitoring/service-discovery/kube-controller-manager-svc.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | k8s-app: kube-controller-manager
6 | name: kube-controller-manager
7 | namespace: kube-system
8 | spec:
9 | type: ClusterIP
10 | clusterIP: None
11 | ports:
12 | - name: http-metrics
13 | port: 10252
14 | protocol: TCP
15 | targetPort: 10252
16 | selector:
17 | component: kube-controller-manager
18 | tier: control-plane
19 |
--------------------------------------------------------------------------------
/kubeflow/examples/fairing/README.md:
--------------------------------------------------------------------------------
1 | # Fairing for Kubeflow
2 |
3 | 1. Create a new terminal from Jupyter notebook.
4 | 2. Clone the fairing repo in the terminal:
5 |
6 | ```
7 | $ bash
8 | $ git clone https://github.com/kubeflow/fairing
9 | ```
10 |
11 | 3. In the terminal, run the following commands:
12 |
13 | ```
14 | $ cd fairing/examples/prediction
15 | $ pip3 install -r requirements.txt
16 | ```
17 |
18 | 4. Browse to faring/examples/prediction directory. Click on `xgboost-high-level-apis.ipynb`.
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/issue-template.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Issue template
3 | about: The template for reporting issues.
4 | title: Issue brief description
5 | labels:
6 | assignees:
7 |
8 | ---
9 |
10 |
11 |
12 | **Is this a BUG REPORT or FEATURE REQUEST?**:
13 |
14 | **What happened**:
15 |
16 | **What you expected to happen**:
17 |
18 | **How to reproduce it (as minimally and precisely as possible)**:
19 |
20 | **Anything else we need to know?**:
21 |
--------------------------------------------------------------------------------
/practical-k8s/beginners-exercise/lab2-jobs/hello-cjob.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1beta1
2 | kind: CronJob
3 | metadata:
4 | name: hello
5 | spec:
6 | schedule: "*/1 * * * *"
7 | jobTemplate:
8 | spec:
9 | template:
10 | spec:
11 | containers:
12 | - name: hello
13 | image: busybox
14 | args:
15 | - /bin/sh
16 | - -c
17 | - date; echo Hello from the Kubernetes cluster
18 | restartPolicy: OnFailure
19 |
--------------------------------------------------------------------------------
/addons/monitoring/alertmanater/alertmanager-main.yml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: Alertmanager
3 | metadata:
4 | labels:
5 | alertmanager: main
6 | name: main
7 | namespace: monitoring
8 | spec:
9 | baseImage: quay.io/prometheus/alertmanager
10 | nodeSelector:
11 | beta.kubernetes.io/os: linux
12 | replicas: 3
13 | securityContext:
14 | fsGroup: 2000
15 | runAsNonRoot: true
16 | runAsUser: 1000
17 | serviceAccountName: alertmanager-main
18 | version: v0.16.0
19 |
--------------------------------------------------------------------------------
/addons/logging/kibana-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: kibana-logging
5 | namespace: kube-system
6 | labels:
7 | k8s-app: kibana-logging
8 | kubernetes.io/cluster-service: "true"
9 | addonmanager.kubernetes.io/mode: Reconcile
10 | kubernetes.io/name: "Kibana"
11 | spec:
12 | type: LoadBalancer
13 | externalIPs:
14 | - 192.16.35.12
15 | ports:
16 | - port: 5601
17 | protocol: TCP
18 | targetPort: ui
19 | selector:
20 | k8s-app: kibana-logging
21 |
--------------------------------------------------------------------------------
/addons/monitoring/servicemonitor/coredns-sm.yml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | labels:
5 | k8s-app: kube-dns
6 | name: coredns
7 | namespace: monitoring
8 | spec:
9 | endpoints:
10 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
11 | interval: 15s
12 | port: http-metrics
13 | jobLabel: k8s-app
14 | namespaceSelector:
15 | matchNames:
16 | - kube-system
17 | selector:
18 | matchLabels:
19 | k8s-app: kube-dns
20 |
--------------------------------------------------------------------------------
/addons/monitoring/servicemonitor/node-exporter-sm.yml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | labels:
5 | k8s-app: node-exporter
6 | name: node-exporter
7 | namespace: monitoring
8 | spec:
9 | endpoints:
10 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
11 | interval: 30s
12 | port: https
13 | scheme: https
14 | tlsConfig:
15 | insecureSkipVerify: true
16 | jobLabel: k8s-app
17 | selector:
18 | matchLabels:
19 | k8s-app: node-exporter
--------------------------------------------------------------------------------
/load-balancing/services/deployment/server.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: k8s-udpserver
5 | spec:
6 | selector:
7 | matchLabels:
8 | run: k8s-udpserver
9 | replicas: 3
10 | template:
11 | metadata:
12 | labels:
13 | run: k8s-udpserver
14 | spec:
15 | containers:
16 | - name: k8s-udpserver
17 | imagePullPolicy: IfNotPresent
18 | image: hwchiu/pythontest
19 | ports:
20 | - containerPort: 20001
21 | protocol: UDP
22 |
23 |
--------------------------------------------------------------------------------
/load-balancing/input.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | CONTENT=${1?need desired content}
4 | TARGET_POD=${2?target PodIP}
5 | CLUSTER_IP=$(kubectl get svc -o json | jq -r '.items[] | select(.metadata.name == "k8s-udpserver-cluster").spec.clusterIP')
6 | PODIPs=$(kubectl get ep -o json | jq -r '.items[] | select(.metadata.name == "k8s-udpserver-cluster").subsets[].addresses[].ip' | tr '\n' ',')
7 |
8 | echo "${CLUSTER_IP}" | sudo tee /proc/k8s/clusterIP
9 | echo "${CONTENT},${TARGET_POD}" | sudo tee /proc/k8s/http
10 | echo "${PODIPs}" | sudo tee /proc/k8s/podIP
11 |
--------------------------------------------------------------------------------
/practical-k8s/beginners-exercise/lab1-nginx/nginx-deploy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1beta1
2 | kind: Deployment
3 | metadata:
4 | name: nginx-deploy
5 | labels:
6 | app: nginx
7 | spec:
8 | replicas: 3
9 | template:
10 | metadata:
11 | labels:
12 | app: nginx
13 | spec:
14 | containers:
15 | - name: nginx-container
16 | image: nginx
17 | ports:
18 | - containerPort: 80
19 | resources:
20 | requests:
21 | cpu: 50m
22 | limits:
23 | cpu: 100m
24 |
--------------------------------------------------------------------------------
/addons/monitoring/alertmanater/alertmanager-main-secret.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data:
3 | alertmanager.yaml: Imdsb2JhbCI6IAogICJyZXNvbHZlX3RpbWVvdXQiOiAiNW0iCiJyZWNlaXZlcnMiOiAKLSAibmFtZSI6ICJudWxsIgoicm91dGUiOiAKICAiZ3JvdXBfYnkiOiAKICAtICJqb2IiCiAgImdyb3VwX2ludGVydmFsIjogIjVtIgogICJncm91cF93YWl0IjogIjMwcyIKICAicmVjZWl2ZXIiOiAibnVsbCIKICAicmVwZWF0X2ludGVydmFsIjogIjEyaCIKICAicm91dGVzIjogCiAgLSAibWF0Y2giOiAKICAgICAgImFsZXJ0bmFtZSI6ICJEZWFkTWFuc1N3aXRjaCIKICAgICJyZWNlaXZlciI6ICJudWxsIg==
4 | kind: Secret
5 | metadata:
6 | name: alertmanager-main
7 | namespace: monitoring
8 | type: Opaque
--------------------------------------------------------------------------------
/addons/monitoring/grafana/grafana-source.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data:
3 | dashboards.yaml: |-
4 | {
5 | "apiVersion": 1,
6 | "providers": [
7 | {
8 | "folder": "",
9 | "name": "0",
10 | "options": {
11 | "path": "/grafana-dashboard-definitions/0"
12 | },
13 | "orgId": 1,
14 | "type": "file"
15 | }
16 | ]
17 | }
18 | kind: ConfigMap
19 | metadata:
20 | name: grafana-dashboards
21 | namespace: monitoring
22 |
--------------------------------------------------------------------------------
/practical-k8s/practical-apps/lab1-wordpress/00_pv.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolume
3 | metadata:
4 | name: local-pv-1
5 | labels:
6 | type: local
7 | spec:
8 | capacity:
9 | storage: 10Gi
10 | accessModes:
11 | - ReadWriteOnce
12 | hostPath:
13 | path: /local-pv/pv-1
14 |
15 | ---
16 |
17 | apiVersion: v1
18 | kind: PersistentVolume
19 | metadata:
20 | name: local-pv-2
21 | labels:
22 | type: local
23 | spec:
24 | capacity:
25 | storage: 10Gi
26 | accessModes:
27 | - ReadWriteOnce
28 | hostPath:
29 | path: /local-pv/pv-2
30 |
--------------------------------------------------------------------------------
/load-balancing/client.py:
--------------------------------------------------------------------------------
1 | import socket
2 |
3 | msgFromClient = input("Content\n")
4 | bytesToSend = str.encode(msgFromClient)
5 | serverAddressPort = ("127.0.0.1", 20001)
6 | bufferSize = 1024
7 |
8 | # Create a UDP socket at client side
9 | UDPClientSocket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
10 | # Send to server using created UDP socket
11 | msg = UDPClientSocket.sendto(bytesToSend, serverAddressPort)
12 | msgFromServer = UDPClientSocket.recvfrom(bufferSize)
13 | msg = "Message from Server {}".format(msgFromServer[0])
14 | print(msg)
15 |
--------------------------------------------------------------------------------
/minikube-lab/workshop/lab4/local-pvc.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolume
3 | metadata:
4 | name: pv-1
5 | labels:
6 | type: local
7 | spec:
8 | capacity:
9 | storage: 20Gi
10 | accessModes:
11 | - ReadWriteMany
12 | nfs:
13 | server: "nfs_server_ip" #replace with acutal server ip
14 | path: /tmp/data/pv-1
15 |
16 | ---
17 |
18 | apiVersion: v1
19 | kind: PersistentVolumeClaim
20 | metadata:
21 | name: pvc-1
22 | spec:
23 | accessModes:
24 | - ReadWriteMany
25 | resources:
26 | requests:
27 | storage: 8Gi
28 | volumeName: pv-1
29 |
--------------------------------------------------------------------------------
/multi-cluster/kubeadm/pdb/nginx-deployment.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: nginx
5 | labels:
6 | app: nginx
7 | spec:
8 | replicas: 2
9 | selector:
10 | matchLabels:
11 | app: nginx
12 | template:
13 | metadata:
14 | labels:
15 | app: nginx
16 | spec:
17 | containers:
18 | - name: nginx-container
19 | image: nginx
20 | ports:
21 | - containerPort: 80
22 | resources:
23 | requests:
24 | cpu: 50m
25 | limits:
26 | cpu: 100m
27 |
--------------------------------------------------------------------------------
/practical-k8s/practical-apps/lab2-redis/reduser-deploy.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: reduser-deploy
5 | labels:
6 | app: reduser
7 | spec:
8 | replicas: 3
9 | selector:
10 | matchLabels:
11 | app: reduser
12 | template:
13 | metadata:
14 | labels:
15 | app: reduser
16 | spec:
17 | containers:
18 | - name: reduser
19 | image: kourse/reduser-env_redis:v0.1.0
20 | env:
21 | - name: REDIS_POD_IP
22 | value: "REDIS_POD_IP"
23 | ports:
24 | - containerPort: 3000
25 |
--------------------------------------------------------------------------------
/addons/monitoring/grafana/grafana-datasources.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data:
3 | prometheus.yaml: ewogICAgImFwaVZlcnNpb24iOiAxLAogICAgImRhdGFzb3VyY2VzIjogWwogICAgICAgIHsKICAgICAgICAgICAgImFjY2VzcyI6ICJwcm94eSIsCiAgICAgICAgICAgICJlZGl0YWJsZSI6IGZhbHNlLAogICAgICAgICAgICAibmFtZSI6ICJwcm9tZXRoZXVzIiwKICAgICAgICAgICAgIm9yZ0lkIjogMSwKICAgICAgICAgICAgInR5cGUiOiAicHJvbWV0aGV1cyIsCiAgICAgICAgICAgICJ1cmwiOiAiaHR0cDovL3Byb21ldGhldXMtazhzLm1vbml0b3Jpbmcuc3ZjOjkwOTAiLAogICAgICAgICAgICAidmVyc2lvbiI6IDEKICAgICAgICB9CiAgICBdCn0=
4 | kind: Secret
5 | metadata:
6 | name: grafana-datasources
7 | namespace: monitoring
8 | type: Opaque
--------------------------------------------------------------------------------
/manual-installation/conf/10-calico.conf:
--------------------------------------------------------------------------------
1 | {
2 | "name": "calico-k8s-network",
3 | "cniVersion": "0.1.0",
4 | "type": "calico",
5 | "etcd_endpoints": "https://172.16.35.12:2379",
6 | "etcd_ca_cert_file": "/etc/etcd/ssl/etcd-ca.pem",
7 | "etcd_cert_file": "/etc/etcd/ssl/etcd.pem",
8 | "etcd_key_file": "/etc/etcd/ssl/etcd-key.pem",
9 | "log_level": "info",
10 | "ipam": {
11 | "type": "calico-ipam"
12 | },
13 | "policy": {
14 | "type": "k8s"
15 | },
16 | "kubernetes": {
17 | "kubeconfig": "/etc/kubernetes/kubelet.conf"
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/kubeflow/examples/dist-mnist/tfjob.yml:
--------------------------------------------------------------------------------
1 | apiVersion: "kubeflow.org/v1beta1"
2 | kind: "TFJob"
3 | metadata:
4 | name: "dist-mnist-for-e2e-test"
5 | spec:
6 | tfReplicaSpecs:
7 | PS:
8 | replicas: 2
9 | restartPolicy: Never
10 | template:
11 | spec:
12 | containers:
13 | - name: tensorflow
14 | image: kourse/tf-dist-mnist-test:v1.0
15 | Worker:
16 | replicas: 3
17 | restartPolicy: Never
18 | template:
19 | spec:
20 | containers:
21 | - name: tensorflow
22 | image: kourse/tf-dist-mnist-test:v1.0
--------------------------------------------------------------------------------
/addons/ingress-controller/service.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: ingress-nginx
5 | namespace: ingress-nginx
6 | labels:
7 | app.kubernetes.io/name: ingress-nginx
8 | app.kubernetes.io/part-of: ingress-nginx
9 | spec:
10 | type: LoadBalancer
11 | externalIPs:
12 | - 192.16.35.12
13 | ports:
14 | - name: http
15 | port: 80
16 | targetPort: 80
17 | protocol: TCP
18 | - name: https
19 | port: 443
20 | targetPort: 443
21 | protocol: TCP
22 | selector:
23 | app.kubernetes.io/name: ingress-nginx
24 | app.kubernetes.io/part-of: ingress-nginx
--------------------------------------------------------------------------------
/addons/monitoring/servicemonitor/kube-controller-manager-sm.yml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | labels:
5 | k8s-app: kube-controller-manager
6 | name: kube-controller-manager
7 | namespace: monitoring
8 | spec:
9 | endpoints:
10 | - interval: 30s
11 | metricRelabelings:
12 | - action: drop
13 | regex: etcd_(debugging|disk|request|server).*
14 | sourceLabels:
15 | - __name__
16 | port: http-metrics
17 | jobLabel: k8s-app
18 | namespaceSelector:
19 | matchNames:
20 | - kube-system
21 | selector:
22 | matchLabels:
23 | k8s-app: kube-controller-manager
--------------------------------------------------------------------------------
/harbor/Vagrantfile:
--------------------------------------------------------------------------------
1 | Vagrant.require_version ">= 1.7.0"
2 |
3 | $bridge_eth = (ENV['BRIDGE_ETH'] || "enp8s0").to_sym
4 |
5 | Vagrant.configure("2") do |config|
6 | config.vm.provider "virtualbox"
7 | config.vm.box = "k2r2bai/kubeadm-ubuntu18"
8 | config.vm.define "k8s-harbor" do |n|
9 | n.vm.hostname = "k8s-harbor"
10 | n.vm.network :private_network, ip: "192.16.35.99", auto_config: true
11 | n.vm.network "public_network", bridge: $bridge_eth
12 | n.vm.provider :virtualbox do |vb|
13 | vb.name = "#{n.vm.hostname}"
14 | vb.gui = false
15 | vb.memory = 2048
16 | vb.cpus = 1
17 | end
18 | end
19 | end
--------------------------------------------------------------------------------
/addons/dns/coredns/configmap.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: coredns
5 | namespace: ddns
6 | data:
7 | Corefile: |-
8 | .:53 {
9 | log . {
10 | class denial
11 | }
12 | cache
13 | errors
14 | loadbalance round_robin
15 | whoami
16 | health
17 | prometheus 0.0.0.0:9153
18 | proxy . 8.8.8.8:53
19 | }
20 |
21 | k8s.local {
22 | etcd k8s.local {
23 | stubzones
24 | path /skydns
25 | endpoint http://coredns-etcd:2379
26 | upstream 8.8.8.8:53 8.8.4.4:53
27 | debug
28 | }
29 | errors
30 | log
31 | }
32 |
--------------------------------------------------------------------------------
/addons/metrics-server/resource-reader.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRole
4 | metadata:
5 | name: system:metrics-server
6 | rules:
7 | - apiGroups:
8 | - ""
9 | resources:
10 | - pods
11 | - nodes
12 | - nodes/stats
13 | verbs:
14 | - get
15 | - list
16 | - watch
17 | ---
18 | apiVersion: rbac.authorization.k8s.io/v1
19 | kind: ClusterRoleBinding
20 | metadata:
21 | name: system:metrics-server
22 | roleRef:
23 | apiGroup: rbac.authorization.k8s.io
24 | kind: ClusterRole
25 | name: system:metrics-server
26 | subjects:
27 | - kind: ServiceAccount
28 | name: metrics-server
29 | namespace: kube-system
30 |
--------------------------------------------------------------------------------
/practical-k8s/practical-apps/lab1-wordpress/02_mysql-pod.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: mysql-pod
5 | labels:
6 | app: wordpress
7 | spec:
8 | containers:
9 | - image: mysql:5.6
10 | name: mysql
11 | env:
12 | - name: MYSQL_ROOT_PASSWORD
13 | valueFrom:
14 | secretKeyRef:
15 | name: mysql-pass
16 | key: password
17 | ports:
18 | - containerPort: 3306
19 | name: mysql
20 | volumeMounts:
21 | - name: mysql-persistent-storage
22 | mountPath: /var/lib/mysql
23 | volumes:
24 | - name: mysql-persistent-storage
25 | persistentVolumeClaim:
26 | claimName: mysql-pv-claim
27 |
--------------------------------------------------------------------------------
/addons/logging/rbac.yml:
--------------------------------------------------------------------------------
1 | kind: ClusterRole
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | metadata:
4 | name: kibana-anonymous-proxy
5 | rules:
6 | - apiGroups:
7 | - ""
8 | resources:
9 | - "services/proxy"
10 | resourceNames:
11 | - "kibana-logging"
12 | verbs:
13 | - get
14 | - create
15 | - update
16 | ---
17 | apiVersion: rbac.authorization.k8s.io/v1
18 | kind: ClusterRoleBinding
19 | metadata:
20 | name: kibana-anonymous-proxy-binding
21 | namespace: ""
22 | roleRef:
23 | apiGroup: rbac.authorization.k8s.io
24 | kind: ClusterRole
25 | name: kibana-anonymous-proxy
26 | subjects:
27 | - apiGroup: rbac.authorization.k8s.io
28 | kind: User
29 | name: system:anonymous
--------------------------------------------------------------------------------
/practical-k8s/practical-apps/lab6-cheese/cheese-ing.yml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1beta1
2 | kind: Ingress
3 | metadata:
4 | name: cheese
5 | spec:
6 | rules:
7 | - host: stilton.example.k8s.local
8 | http:
9 | paths:
10 | - path: /
11 | backend:
12 | serviceName: stilton
13 | servicePort: http
14 | - host: cheddar.example.k8s.local
15 | http:
16 | paths:
17 | - path: /
18 | backend:
19 | serviceName: cheddar
20 | servicePort: http
21 | - host: wensleydale.example.k8s.local
22 | http:
23 | paths:
24 | - path: /
25 | backend:
26 | serviceName: wensleydale
27 | servicePort: http
28 |
--------------------------------------------------------------------------------
/practical-k8s/practical-apps/lab6-cheese/cheese-svc.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: stilton
5 | spec:
6 | ports:
7 | - name: http
8 | targetPort: 80
9 | port: 80
10 | selector:
11 | app: cheese
12 | task: stilton
13 | ---
14 | apiVersion: v1
15 | kind: Service
16 | metadata:
17 | name: cheddar
18 | spec:
19 | ports:
20 | - name: http
21 | targetPort: 80
22 | port: 80
23 | selector:
24 | app: cheese
25 | task: cheddar
26 | ---
27 | apiVersion: v1
28 | kind: Service
29 | metadata:
30 | name: wensleydale
31 | spec:
32 | ports:
33 | - name: http
34 | targetPort: 80
35 | port: 80
36 | selector:
37 | app: cheese
38 | task: wensleydale
39 |
--------------------------------------------------------------------------------
/addons/monitoring/node-exporter/node-exporter-rbac.yml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: node-exporter
5 | rules:
6 | - apiGroups:
7 | - authentication.k8s.io
8 | resources:
9 | - tokenreviews
10 | verbs:
11 | - create
12 | - apiGroups:
13 | - authorization.k8s.io
14 | resources:
15 | - subjectaccessreviews
16 | verbs:
17 | - create
18 | ---
19 | apiVersion: rbac.authorization.k8s.io/v1
20 | kind: ClusterRoleBinding
21 | metadata:
22 | name: node-exporter
23 | roleRef:
24 | apiGroup: rbac.authorization.k8s.io
25 | kind: ClusterRole
26 | name: node-exporter
27 | subjects:
28 | - kind: ServiceAccount
29 | name: node-exporter
30 | namespace: monitoring
31 |
--------------------------------------------------------------------------------
/minikube-lab/workshop/lab2/web_deploy.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1beta1
2 | kind: Deployment
3 |
4 | metadata:
5 | name: web-deploy
6 |
7 | spec:
8 |
9 | strategy:
10 | type: RollingUpdate
11 | rollingUpdate:
12 | maxSurge: 3
13 | maxUnavailable: 3
14 |
15 | replicas: 5
16 |
17 | template:
18 |
19 | metadata:
20 | name: web
21 | labels:
22 | name: web
23 | app: demo
24 |
25 | spec:
26 |
27 | containers:
28 | - name: web
29 | image: k8s101web
30 | imagePullPolicy: IfNotPresent
31 | env:
32 | - name: REDIS_HOST
33 | value: "redis_pod_ip" #replace with acutal redis ip
34 | ports:
35 | - containerPort: 80
36 |
--------------------------------------------------------------------------------
/practical-k8s/practical-apps/lab3-tensorflow/simple-task.py:
--------------------------------------------------------------------------------
1 | # coding=utf-8
2 | import tensorflow as tf
3 |
4 | # 目標 Session
5 | server_target = "grpc://172.16.35.9" ##worker service
6 | logs_path = '/tmp/train'
7 |
8 | # 指定 worker task 0 使用 CPU 運算
9 | with tf.device("/job:worker/task:0"):
10 | with tf.device("/cpu:0"):
11 | a = tf.constant([1.5, 6.0], name='a')
12 | b = tf.Variable([1.5, 3.2], name='b')
13 | c = (a * b) + (a / b)
14 | d = c * a
15 | y = tf.assign(b, d)
16 |
17 | # 運算 Graph
18 | with tf.Session(server_target) as sess:
19 | sess.run(tf.global_variables_initializer())
20 | writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())
21 | print(sess.run(y))
22 |
23 |
--------------------------------------------------------------------------------
/practical-k8s/beginners-exercise/lab3-tcp-nc/tcpdump-ds.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: extensions/v1beta1
2 | kind: DaemonSet
3 | metadata:
4 | name: tcpdump-ds
5 | labels:
6 | app: tcpdump
7 | spec:
8 | selector:
9 | matchLabels:
10 | app: tcpdump
11 | template:
12 | metadata:
13 | labels:
14 | app: tcpdump
15 | spec:
16 | tolerations:
17 | - key: node-role.kubernetes.io/master
18 | effect: NoSchedule
19 | hostNetwork: true
20 | containers:
21 | - name: tcpdump-container
22 | image: kourse/tcp-nc:v0.1.0
23 | imagePullPolicy: Always
24 | command:
25 | - "/bin/bash"
26 | - "-c"
27 | - "tcpdump -s 0 -#U -w - | nc 8080"
28 |
29 |
--------------------------------------------------------------------------------
/docker/getstarted/lab1/app.py:
--------------------------------------------------------------------------------
1 | from flask import Flask
2 | from redis import Redis, RedisError
3 | import os
4 | import socket
5 |
6 | # Connect to Redis
7 | redis = Redis(host="redis", db=0, socket_connect_timeout=2, socket_timeout=2)
8 |
9 | app = Flask(__name__)
10 |
11 | @app.route("/")
12 | def hello():
13 | try:
14 | visits = redis.incr("counter")
15 | except RedisError:
16 | visits = "cannot connect to Redis, counter disabled"
17 |
18 | html = "Hello {name}!
" \
19 | "Hostname: {hostname}
" \
20 | "Visits: {visits}"
21 |
22 | return html.format(name=os.getenv("NAME", "world"), hostname=socket.gethostname(), visits=visits)
23 |
24 | if __name__ == "__main__":
25 | app.run(host='0.0.0.0', port=80)
26 |
--------------------------------------------------------------------------------
/addons/dns/externaldns/rbac.yml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: external-dns
5 | rules:
6 | - apiGroups: [""]
7 | resources: ["services"]
8 | verbs: ["get","watch","list"]
9 | - apiGroups: [""]
10 | resources: ["pods"]
11 | verbs: ["get","watch","list"]
12 | - apiGroups: ["extensions"]
13 | resources: ["ingresses"]
14 | verbs: ["get","watch","list"]
15 | - apiGroups: [""]
16 | resources: ["nodes"]
17 | verbs: ["list"]
18 | ---
19 | apiVersion: rbac.authorization.k8s.io/v1
20 | kind: ClusterRoleBinding
21 | metadata:
22 | name: external-dns-viewer
23 | roleRef:
24 | apiGroup: rbac.authorization.k8s.io
25 | kind: ClusterRole
26 | name: external-dns
27 | subjects:
28 | - kind: ServiceAccount
29 | name: external-dns
30 | namespace: ddns
--------------------------------------------------------------------------------
/addons/monitoring/servicemonitor/kube-state-metrics-sm.yml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | labels:
5 | k8s-app: kube-state-metrics
6 | name: kube-state-metrics
7 | namespace: monitoring
8 | spec:
9 | endpoints:
10 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
11 | honorLabels: true
12 | interval: 30s
13 | port: https-main
14 | scheme: https
15 | scrapeTimeout: 30s
16 | tlsConfig:
17 | insecureSkipVerify: true
18 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
19 | interval: 30s
20 | port: https-self
21 | scheme: https
22 | tlsConfig:
23 | insecureSkipVerify: true
24 | jobLabel: k8s-app
25 | selector:
26 | matchLabels:
27 | k8s-app: kube-state-metrics
--------------------------------------------------------------------------------
/practical-k8s/practical-apps/lab4-prometheus/rbac-setup.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1beta1
2 | kind: ClusterRole
3 | metadata:
4 | name: prometheus
5 | rules:
6 | - apiGroups: [""]
7 | resources:
8 | - nodes
9 | - nodes/proxy
10 | - services
11 | - endpoints
12 | - pods
13 | verbs: ["get", "list", "watch"]
14 | - nonResourceURLs: ["/metrics"]
15 | verbs: ["get"]
16 | ---
17 | apiVersion: v1
18 | kind: ServiceAccount
19 | metadata:
20 | name: prometheus
21 | namespace: default
22 | ---
23 | apiVersion: rbac.authorization.k8s.io/v1beta1
24 | kind: ClusterRoleBinding
25 | metadata:
26 | name: prometheus
27 | roleRef:
28 | apiGroup: rbac.authorization.k8s.io
29 | kind: ClusterRole
30 | name: prometheus
31 | subjects:
32 | - kind: ServiceAccount
33 | name: prometheus
34 | namespace: default
35 |
--------------------------------------------------------------------------------
/addons/monitoring/prometheus/prometheus-main.yml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: Prometheus
3 | metadata:
4 | labels:
5 | prometheus: k8s
6 | name: k8s
7 | namespace: monitoring
8 | spec:
9 | alerting:
10 | alertmanagers:
11 | - name: alertmanager-main
12 | namespace: monitoring
13 | port: web
14 | baseImage: quay.io/prometheus/prometheus
15 | nodeSelector:
16 | beta.kubernetes.io/os: linux
17 | replicas: 2
18 | resources:
19 | requests:
20 | memory: 400Mi
21 | ruleSelector:
22 | matchLabels:
23 | prometheus: k8s
24 | role: alert-rules
25 | securityContext:
26 | fsGroup: 2000
27 | runAsNonRoot: true
28 | runAsUser: 1000
29 | serviceAccountName: prometheus-k8s
30 | serviceMonitorNamespaceSelector: {}
31 | serviceMonitorSelector: {}
32 | version: v2.5.0
--------------------------------------------------------------------------------
/practical-k8s/practical-apps/lab3-tensorflow/worker.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: tf-worker
5 | spec:
6 | replicas: 1
7 | selector:
8 | matchLabels:
9 | app: tf-worker
10 | template:
11 | metadata:
12 | labels:
13 | app: tf-worker
14 | role: worker
15 | spec:
16 | containers:
17 | - name: tf-worker
18 | image: tensorflow/tensorflow
19 | ports:
20 | - containerPort: 2222
21 | command: ["/bin/sh", "-c"]
22 | args: ["
23 | echo '# coding=utf-8\nimport tensorflow as tf\ncluster = tf.train.ClusterSpec({\"worker\": [\"localhost:2222\"]})\nserver = tf.train.Server(cluster,job_name=\"worker\",task_index=0)\nserver.join()' > /opt/basic_server.py;
24 | python /opt/basic_server.py;
25 | "]
26 |
27 |
--------------------------------------------------------------------------------
/practical-k8s/beginners-exercise/lab3-tcp-nc/nc-listener-deploy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1beta1
2 | kind: Deployment
3 | metadata:
4 | name: nc-deploy
5 | labels:
6 | app: nc-listener
7 | spec:
8 | replicas: 5
9 | template:
10 | metadata:
11 | labels:
12 | app: nc-listener
13 | spec:
14 | nodeName: master1
15 | tolerations:
16 | - key: node-role.kubernetes.io/master
17 | effect: NoSchedule
18 | containers:
19 | - name: nc-listener-container
20 | image: kourse/tcp-nc:v0.1.0
21 | imagePullPolicy: Always
22 | command:
23 | - "/bin/bash"
24 | - "-c"
25 | - "nc -vvvlk -p 8080 > /temp/${HOSTNAME}.tcpdump.nc"
26 | volumeMounts:
27 | - name: tcpnc
28 | mountPath: /temp/
29 | volumes:
30 | - name: tcpnc
31 | hostPath:
32 | path: /root/nc-log/
33 |
--------------------------------------------------------------------------------
/minikube-lab/workshop/lab3/web_deploy.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1beta1
2 | kind: Deployment
3 |
4 | metadata:
5 | name: web
6 |
7 | spec:
8 |
9 | strategy:
10 | type: RollingUpdate
11 | rollingUpdate:
12 | maxSurge: 3
13 | maxUnavailable: 3
14 |
15 | replicas: 5
16 |
17 | template:
18 |
19 | metadata:
20 | name: web
21 | labels:
22 | name: web
23 | app: demo
24 |
25 | spec:
26 |
27 | containers:
28 | - name: web
29 | image: k8s101web
30 | imagePullPolicy: IfNotPresent
31 | env:
32 | - name: REDIS_HOST
33 | value: "redis_pod_ip" #replace with acutal redis ip
34 | ports:
35 | - containerPort: 80
36 | volumeMounts:
37 | - name: vol
38 | mountPath: /home/redis
39 |
40 | volumes:
41 | - name: vol
42 | hostPath:
43 | path: /webvol
44 |
45 |
--------------------------------------------------------------------------------
/minikube-lab/workshop/lab4/web_deploy.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1beta1
2 | kind: Deployment
3 |
4 | metadata:
5 | name: web
6 |
7 | spec:
8 |
9 | strategy:
10 | type: RollingUpdate
11 | rollingUpdate:
12 | maxSurge: 3
13 | maxUnavailable: 3
14 |
15 | replicas: 5
16 |
17 | template:
18 |
19 | metadata:
20 | name: web
21 | labels:
22 | name: web
23 | app: demo
24 |
25 | spec:
26 |
27 | containers:
28 | - name: web
29 | image: k8s101web
30 | imagePullPolicy: IfNotPresent
31 | env:
32 | - name: "REDIS_HOST"
33 | value: "redis_pod_ip" #replace with acutal redis ip
34 | ports:
35 | - containerPort: 80
36 | volumeMounts:
37 | - name: pv-1
38 | mountPath: /home/redis
39 |
40 | volumes:
41 | - name: pv-1
42 | persistentVolumeClaim:
43 | claimName: pvc-1
44 |
--------------------------------------------------------------------------------
/practical-k8s/practical-apps/lab4-prometheus/node-exporter.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | annotations:
5 | prometheus.io/scrape: 'true'
6 | labels:
7 | app: node-exporter
8 | name: node-exporter
9 | name: node-exporter
10 | spec:
11 | clusterIP: None
12 | ports:
13 | - name: scrape
14 | port: 9100
15 | protocol: TCP
16 | selector:
17 | app: node-exporter
18 | type: ClusterIP
19 | ---
20 | apiVersion: extensions/v1beta1
21 | kind: DaemonSet
22 | metadata:
23 | name: node-exporter
24 | spec:
25 | template:
26 | metadata:
27 | labels:
28 | app: node-exporter
29 | name: node-exporter
30 | spec:
31 | containers:
32 | - image: prom/node-exporter
33 | name: node-exporter
34 | ports:
35 | - containerPort: 9100
36 | hostPort: 9100
37 | name: scrape
38 | hostNetwork: true
39 | hostPID: true
40 |
--------------------------------------------------------------------------------
/manual-installation/conf/etcd.conf:
--------------------------------------------------------------------------------
1 | # [member]
2 | ETCD_NAME=master1
3 | ETCD_DATA_DIR=/var/lib/etcd
4 | ETCD_LISTEN_PEER_URLS=https://0.0.0.0:2380
5 | ETCD_LISTEN_CLIENT_URLS=https://0.0.0.0:2379
6 | ETCD_PROXY=off
7 |
8 | # [cluster]
9 | ETCD_ADVERTISE_CLIENT_URLS=https://172.16.35.12:2379
10 | ETCD_INITIAL_ADVERTISE_PEER_URLS=https://172.16.35.12:2380
11 | ETCD_INITIAL_CLUSTER=master1=https://172.16.35.12:2380
12 | ETCD_INITIAL_CLUSTER_STATE=new
13 | ETCD_INITIAL_CLUSTER_TOKEN=etcd-k8s-cluster
14 |
15 | # [security]
16 | ETCD_CERT_FILE="/etc/etcd/ssl/etcd.pem"
17 | ETCD_KEY_FILE="/etc/etcd/ssl/etcd-key.pem"
18 | ETCD_CLIENT_CERT_AUTH="true"
19 | ETCD_TRUSTED_CA_FILE="/etc/etcd/ssl/etcd-ca.pem"
20 | ETCD_AUTO_TLS="true"
21 | ETCD_PEER_CERT_FILE="/etc/etcd/ssl/etcd.pem"
22 | ETCD_PEER_KEY_FILE="/etc/etcd/ssl/etcd-key.pem"
23 | ETCD_PEER_CLIENT_CERT_AUTH="true"
24 | ETCD_PEER_TRUSTED_CA_FILE="/etc/etcd/ssl/etcd-ca.pem"
25 | ETCD_PEER_AUTO_TLS="true"
26 |
--------------------------------------------------------------------------------
/multi-cluster/kube-ansible/README.md:
--------------------------------------------------------------------------------
1 | # Kube-ansible
2 | In this section you will deploy a cluster using vagrant and [kube-ansible](https://github.com/kairen/kube-ansible).
3 |
4 | **Prerequisites**:
5 | * Ansible version: *v2.5 (or newer)*.
6 | * [Vagrant](https://www.vagrantup.com/downloads.html): >= 2.0.0.
7 | * [VirtualBox](https://www.virtualbox.org/wiki/Downloads): >= 5.0.0.
8 | * Mac OS X need to install `sshpass` tool.
9 |
10 | ```sh
11 | $ brew install http://git.io/sshpass.rb
12 | ```
13 |
14 | The getting started guide will use Vagrant with VirtualBox to deploy a Kubernetes cluster onto virtual machines. You can deploy the cluster with a single command:
15 | ```sh
16 | $ ./hack/setup-vms
17 | Cluster Size: 1 master, 2 worker.
18 | VM Size: 1 vCPU, 2048 MB
19 | VM Info: ubuntu16, virtualbox
20 | CNI binding iface: eth1
21 | Start to deploy?(y):
22 | ```
23 | > * You also can use `sudo ./hack/setup-vms -p libvirt -i eth1` command to deploy a cluster onto KVM.
24 |
--------------------------------------------------------------------------------
/kubeflow/examples/dist-mnist/pytorchjob.yml:
--------------------------------------------------------------------------------
1 | apiVersion: kubeflow.org/v1beta1
2 | kind: PyTorchJob
3 | metadata:
4 | name: pytorch-tcp-dist-mnist
5 | spec:
6 | cleanPodPolicy: None
7 | pytorchReplicaSpecs:
8 | Master:
9 | replicas: 1
10 | restartPolicy: OnFailure
11 | template:
12 | metadata:
13 | creationTimestamp: null
14 | spec:
15 | containers:
16 | - image: kourse/pytorch-dist-mnist-test:v1.0
17 | name: pytorch
18 | ports:
19 | - containerPort: 23456
20 | name: pytorchjob-port
21 | Worker:
22 | replicas: 3
23 | restartPolicy: OnFailure
24 | template:
25 | metadata:
26 | creationTimestamp: null
27 | spec:
28 | containers:
29 | - image: kourse/pytorch-dist-mnist-test:v1.0
30 | name: pytorch
31 | ports:
32 | - containerPort: 23456
33 | name: pytorchjob-port
--------------------------------------------------------------------------------
/practical-k8s/practical-apps/lab3-tensorflow/client.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: tf-client
5 | labels:
6 | app: tf-client
7 | spec:
8 | replicas: 1
9 | selector:
10 | matchLabels:
11 | app: tf-client
12 | template:
13 | metadata:
14 | labels:
15 | app: tf-client
16 | spec:
17 | nodeName: k8s-1
18 | containers:
19 | - name: tf-client
20 | image: tensorflow/tensorflow
21 | ports:
22 | - containerPort: 8888 ##Jupyter預設埠號為8888
23 | env:
24 | - name: TOKEN
25 | value: "p@ssw0rd"
26 | command: ["/bin/sh", "-c"]
27 | args: [" rm -r /notebooks/*;
28 | /run_jupyter.sh --NotebookApp.token=${TOKEN}
29 | --allow-root;"]
30 | volumeMounts:
31 | - name: tmp
32 | mountPath: /tmp/train
33 | volumes:
34 | - name: tmp
35 | hostPath:
36 | path: /root/tf-demo/data
37 |
38 |
--------------------------------------------------------------------------------
/docker/getstarted/lab4/docker_compose.yml:
--------------------------------------------------------------------------------
1 | version: "3"
2 | services:
3 | web:
4 | image: docker-lab
5 | deploy:
6 | replicas: 5
7 | restart_policy:
8 | condition: on-failure
9 | resources:
10 | limits:
11 | cpus: "0.1"
12 | memory: 50M
13 | ports:
14 | - "80:80"
15 | networks:
16 | - webnet
17 | visualizer:
18 | image: dockersamples/visualizer:stable
19 | ports:
20 | - "8080:8080"
21 | volumes:
22 | - "/var/run/docker.sock:/var/run/docker.sock"
23 | deploy:
24 | placement:
25 | constraints: [node.role == manager]
26 | networks:
27 | - webnet
28 | redis:
29 | image: redis:latest
30 | ports:
31 | - "6379:6379"
32 | volumes:
33 | - ~/data:/data
34 | deploy:
35 | replicas: 2
36 | restart_policy:
37 | condition: on-failure
38 | placement:
39 | constraints: [node.role == manager]
40 | networks:
41 | - webnet
42 | networks:
43 | webnet:
44 |
--------------------------------------------------------------------------------
/addons/monitoring/gpu-exporter/gpu-exporter-ds.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: DaemonSet
3 | metadata:
4 | name: node-gpu-exporter
5 | namespace: monitoring
6 | spec:
7 | selector:
8 | matchLabels:
9 | app: node-gpu-exporter
10 | template:
11 | metadata:
12 | labels:
13 | app: node-gpu-exporter
14 | spec:
15 | affinity:
16 | nodeAffinity:
17 | requiredDuringSchedulingIgnoredDuringExecution:
18 | nodeSelectorTerms:
19 | - matchExpressions:
20 | - key: node-gpu-exporter
21 | operator: In
22 | values:
23 | - "true"
24 | hostNetwork: true
25 | hostPID: true
26 | containers:
27 | - name: node-gpu-exporter
28 | image: kairen/gpu-prometheus-exporter:v0.1.0
29 | ports:
30 | - containerPort: 9445
31 | hostPort: 9445
32 | resources:
33 | requests:
34 | memory: 30Mi
35 | cpu: 100m
36 | limits:
37 | memory: 50Mi
38 | cpu: 200m
39 |
--------------------------------------------------------------------------------
/practical-k8s/practical-apps/lab1-wordpress/05_wordpress-deploy.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: wordpress
5 | labels:
6 | app: wordpress
7 | spec:
8 | replicas: 3
9 | selector:
10 | matchLabels:
11 | app: wordpress
12 | tier: frontend
13 | template:
14 | metadata:
15 | labels:
16 | app: wordpress
17 | tier: frontend
18 | spec:
19 | containers:
20 | - image: wordpress:4.9.6-php5.6-apache
21 | name: wordpress
22 | env:
23 | - name: WORDPRESS_DB_HOST
24 | value: "MYSQL_POD_IP"
25 | - name: WORDPRESS_DB_PASSWORD
26 | valueFrom:
27 | secretKeyRef:
28 | name: mysql-pass
29 | key: password
30 | ports:
31 | - containerPort: 80
32 | name: wordpress
33 | volumeMounts:
34 | - name: wordpress-persistent-storage
35 | mountPath: /var/www/html
36 | volumes:
37 | - name: wordpress-persistent-storage
38 | persistentVolumeClaim:
39 | claimName: wp-pv-claim
40 |
41 |
--------------------------------------------------------------------------------
/addons/monitoring/servicemonitor/kube-apiserver-sm.yml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | labels:
5 | k8s-app: apiserver
6 | name: kube-apiserver
7 | namespace: monitoring
8 | spec:
9 | endpoints:
10 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
11 | interval: 30s
12 | metricRelabelings:
13 | - action: drop
14 | regex: etcd_(debugging|disk|request|server).*
15 | sourceLabels:
16 | - __name__
17 | - action: drop
18 | regex: apiserver_admission_controller_admission_latencies_seconds_.*
19 | sourceLabels:
20 | - __name__
21 | - action: drop
22 | regex: apiserver_admission_step_admission_latencies_seconds_.*
23 | sourceLabels:
24 | - __name__
25 | port: https
26 | scheme: https
27 | tlsConfig:
28 | caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
29 | serverName: kubernetes
30 | jobLabel: component
31 | namespaceSelector:
32 | matchNames:
33 | - default
34 | selector:
35 | matchLabels:
36 | component: apiserver
37 | provider: kubernetes
--------------------------------------------------------------------------------
/addons/metrics-server/metrics-server-deployment.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: ServiceAccount
4 | metadata:
5 | name: metrics-server
6 | namespace: kube-system
7 | ---
8 | apiVersion: extensions/v1beta1
9 | kind: Deployment
10 | metadata:
11 | name: metrics-server
12 | namespace: kube-system
13 | labels:
14 | k8s-app: metrics-server
15 | spec:
16 | selector:
17 | matchLabels:
18 | k8s-app: metrics-server
19 | template:
20 | metadata:
21 | name: metrics-server
22 | labels:
23 | k8s-app: metrics-server
24 | spec:
25 | serviceAccountName: metrics-server
26 | volumes:
27 | # mount in tmp so we can safely use from-scratch images and/or read-only containers
28 | - name: tmp-dir
29 | emptyDir: {}
30 | containers:
31 | - name: metrics-server
32 | image: k8s.gcr.io/metrics-server-amd64:v0.3.3
33 | command:
34 | - /metrics-server
35 | - --kubelet-insecure-tls
36 | - --kubelet-preferred-address-types=InternalIP
37 | imagePullPolicy: Always
38 | volumeMounts:
39 | - name: tmp-dir
40 | mountPath: /tmp
41 |
42 |
--------------------------------------------------------------------------------
/kubeflow/single-node/README.md:
--------------------------------------------------------------------------------
1 | # Kubeflow
2 | Instructions for installing Kubeflow on your existing Kubernetes cluster with list of supported options.
3 |
4 | ## Minikube
5 |
6 | First, Create a Kubernetes cluster for Kubeflow:
7 | ```sh
8 | $ minikube start --memory=16384 --cpus=4 --kubernetes-version=v1.14.5
9 | ```
10 |
11 | ## Installing command line tools
12 | The following information is useful if you need or prefer to use command line tools for deploying and managing Kubeflow:
13 |
14 | ```sh
15 | $ wget https://github.com/kubeflow/kubeflow/releases/download/v0.5.1/kfctl_v0.5.1_linux.tar.gz
16 | $ sudo tar -C /usr/local/bin -xzf kfctl_v0.5.1_linux.tar.gz
17 | ```
18 |
19 | Run the following commands to set up and deploy Kubeflow. The code below includes an optional command to add the binary kfctl to your path. If you don’t add the binary to your path, you must use the full path to the kfctl binary each time you run it:
20 |
21 | ```sh
22 | $ kfctl init kubeflow -V
23 | $ cd kubeflow && kfctl generate all -V
24 | $ kfctl apply all -V
25 | ```
26 |
27 | Expose service:
28 |
29 | ```sh
30 | $ kubectl -n kubeflow port-forward svc/ambassador 8080:80 --address 0.0.0.0
31 | ```
--------------------------------------------------------------------------------
/addons/monitoring/servicemonitor/kubelet-sm.yml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | labels:
5 | k8s-app: kubelet
6 | name: kubelet
7 | namespace: monitoring
8 | spec:
9 | endpoints:
10 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
11 | honorLabels: true
12 | interval: 30s
13 | port: https-metrics
14 | scheme: https
15 | tlsConfig:
16 | insecureSkipVerify: true
17 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
18 | honorLabels: true
19 | interval: 30s
20 | metricRelabelings:
21 | - action: drop
22 | regex: container_([a-z_]+);
23 | sourceLabels:
24 | - __name__
25 | - image
26 | - action: drop
27 | regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)
28 | sourceLabels:
29 | - __name__
30 | path: /metrics/cadvisor
31 | port: https-metrics
32 | scheme: https
33 | tlsConfig:
34 | insecureSkipVerify: true
35 | jobLabel: k8s-app
36 | namespaceSelector:
37 | matchNames:
38 | - kube-system
39 | selector:
40 | matchLabels:
41 | k8s-app: kubelet
--------------------------------------------------------------------------------
/practical-k8s/practical-apps/lab1-wordpress/all-in-one/mysql-all-in-one.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: wordpress-mysql
5 | labels:
6 | app: wordpress
7 | spec:
8 | ports:
9 | - port: 3306
10 | selector:
11 | app: wordpress
12 | tier: mysql
13 |
14 | ---
15 |
16 | apiVersion: v1
17 | kind: PersistentVolumeClaim
18 | metadata:
19 | name: mysql-pv-claim
20 | labels:
21 | app: wordpress
22 | spec:
23 | accessModes:
24 | - ReadWriteOnce
25 | resources:
26 | requests:
27 | storage: 10Gi
28 |
29 | ---
30 |
31 | apiVersion: v1
32 | kind: Pod
33 | metadata:
34 | name: mysql-pod
35 | labels:
36 | app: wordpress
37 | spec:
38 | containers:
39 | - image: mysql:5.6
40 | name: mysql
41 | env:
42 | - name: MYSQL_ROOT_PASSWORD
43 | valueFrom:
44 | secretKeyRef:
45 | name: mysql-pass
46 | key: password
47 | ports:
48 | - containerPort: 3306
49 | name: mysql
50 | volumeMounts:
51 | - name: mysql-persistent-storage
52 | mountPath: /var/lib/mysql
53 | volumes:
54 | - name: mysql-persistent-storage
55 | persistentVolumeClaim:
56 | claimName: mysql-pv-claim
57 |
--------------------------------------------------------------------------------
/addons/monitoring/operator/operator-dp.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | labels:
5 | k8s-app: prometheus-operator
6 | name: prometheus-operator
7 | namespace: monitoring
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | k8s-app: prometheus-operator
13 | template:
14 | metadata:
15 | labels:
16 | k8s-app: prometheus-operator
17 | spec:
18 | serviceAccountName: prometheus-operator
19 | containers:
20 | - name: prometheus-operator
21 | image: quay.io/coreos/prometheus-operator:v0.29.0
22 | args:
23 | - --kubelet-service=kube-system/kubelet
24 | - --logtostderr=true
25 | - --config-reloader-image=quay.io/coreos/configmap-reload:v0.0.1
26 | - --prometheus-config-reloader=quay.io/coreos/prometheus-config-reloader:v0.29.0
27 | ports:
28 | - containerPort: 8080
29 | name: http
30 | resources:
31 | limits:
32 | cpu: 200m
33 | memory: 100Mi
34 | requests:
35 | cpu: 100m
36 | memory: 50Mi
37 | securityContext:
38 | allowPrivilegeEscalation: false
39 | readOnlyRootFilesystem: true
40 | securityContext:
41 | runAsNonRoot: true
42 | runAsUser: 65534
43 |
--------------------------------------------------------------------------------
/manual-installation/conf/10-kubelet.conf:
--------------------------------------------------------------------------------
1 | [Service]
2 | Environment="KUBELET_KUBECONFIG_ARGS=--address=0.0.0.0 --port=10250 --kubeconfig=/etc/kubernetes/kubelet.conf"
3 | Environment="KUBE_LOGTOSTDERR=--logtostderr=true --v=0"
4 | Environment="KUBELET_SYSTEM_PODS_ARGS=--pod-manifest-path=/etc/kubernetes/manifests --allow-privileged=true --anonymous-auth=false"
5 | Environment="KUBELET_POD_CONTAINER=--pod-infra-container-image=gcr.io/google_containers/pause:3.0"
6 | Environment="KUBELET_NETWORK_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
7 | Environment="KUBELET_DNS_ARGS=--cluster-dns=10.96.0.10 --cluster-domain=cluster.local"
8 | Environment="KUBELET_AUTHZ_ARGS=--authorization-mode=Webhook --client-ca-file=/etc/kubernetes/pki/ca.pem"
9 | Environment="KUBELET_CADVISOR_ARGS=--cadvisor-port=0"
10 | Environment="KUBELET_CERTIFICATE_ARGS=--rotate-certificates=true --cert-dir=/var/lib/kubelet/pki"
11 | Environment="KUBELET_EXTRA_ARGS=--fail-swap-on=false --serialize-image-pulls=false"
12 | Environment="KUBE_NODE_LABEL=--node-labels=node-role.kubernetes.io/master=true"
13 | ExecStart=
14 | ExecStart=/usr/local/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBE_LOGTOSTDERR $KUBELET_POD_CONTAINER $KUBELET_SYSTEM_PODS_ARGS $KUBELET_NETWORK_ARGS $KUBELET_DNS_ARGS $KUBELET_AUTHZ_ARGS $KUBELET_EXTRA_ARGS $KUBE_NODE_LABEL
15 |
--------------------------------------------------------------------------------
/minikube-lab/workshop/build/app.py:
--------------------------------------------------------------------------------
1 | from flask import Flask
2 | from redis import Redis, RedisError
3 | import os
4 | import socket
5 |
6 | # Connect to Redis
7 | redis = Redis(host=os.environ.get('REDIS_HOST', 'redis'), port=6379)
8 |
9 | app = Flask(__name__)
10 |
11 | @app.route("/")
12 | def hello():
13 |
14 | redisCount = 0
15 | visits = 0
16 |
17 | path = "/home/redis/count.txt"
18 |
19 | if os.path.isfile(path):
20 | with open(path,'r') as f:
21 | visits = int(f.readline())
22 | else:
23 | with open(path,'w') as f:
24 | f.write('0\n')
25 | print('file written')
26 |
27 | try:
28 | redisCount = redis.incr("counter")
29 | with open(path,'w') as f:
30 | visits += 1
31 | f.write('%d\n' % visits)
32 | print('file written')
33 |
34 | except RedisError:
35 | visits = "cannot connect to Redis, counter disabled"
36 | redisCount = "not connected to Redis"
37 |
38 |
39 | html = "Hello {name}!
" \
40 | "Hostname: {hostname}
" \
41 | "Total Visits: {visits}
" \
42 | "Visits since connected to Redis: {count}"
43 |
44 | return html.format(name=os.getenv("NAME", "world"), hostname=socket.gethostname(), visits=visits, count=redisCount)
45 |
46 | if __name__ == "__main__":
47 | app.run(host='0.0.0.0', port=80)
48 |
--------------------------------------------------------------------------------
/addons/monitoring/prometheus-adapter/prometheus-adapter-cm.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data:
3 | config.yaml: |
4 | resourceRules:
5 | cpu:
6 | containerQuery: sum(rate(container_cpu_usage_seconds_total{<<.LabelMatchers>>}[1m])) by (<<.GroupBy>>)
7 | nodeQuery: sum(1 - rate(node_cpu_seconds_total{mode="idle"}[1m]) * on(namespace, pod) group_left(node) node_namespace_pod:kube_pod_info:{<<.LabelMatchers>>}) by (<<.GroupBy>>)
8 | resources:
9 | overrides:
10 | node:
11 | resource: node
12 | namespace:
13 | resource: namespace
14 | pod_name:
15 | resource: pod
16 | containerLabel: container_name
17 | memory:
18 | containerQuery: sum(container_memory_working_set_bytes{<<.LabelMatchers>>}) by (<<.GroupBy>>)
19 | nodeQuery: sum(node:node_memory_bytes_total:sum{<<.LabelMatchers>>} - node:node_memory_bytes_available:sum{<<.LabelMatchers>>}) by (<<.GroupBy>>)
20 | resources:
21 | overrides:
22 | node:
23 | resource: node
24 | namespace:
25 | resource: namespace
26 | pod_name:
27 | resource: pod
28 | containerLabel: container_name
29 | window: 1m
30 | kind: ConfigMap
31 | metadata:
32 | name: adapter-config
33 | namespace: monitoring
--------------------------------------------------------------------------------
/manual-installation/conf/calico-node.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=calico node
3 | After=docker.service
4 | Requires=docker.service
5 |
6 | [Service]
7 | User=root
8 | PermissionsStartOnly=true
9 | ExecStart=/usr/bin/docker run --net=host --privileged --name=calico-node \
10 | -e ETCD_ENDPOINTS=https://172.16.35.12:2379 \
11 | -e ETCD_CA_CERT_FILE=/etc/etcd/ssl/etcd-ca.pem \
12 | -e ETCD_CERT_FILE=/etc/etcd/ssl/etcd.pem \
13 | -e ETCD_KEY_FILE=/etc/etcd/ssl/etcd-key.pem \
14 | -e NODENAME=${HOSTNAME} \
15 | -e IP= \
16 | -e NO_DEFAULT_POOLS= \
17 | -e AS= \
18 | -e CALICO_LIBNETWORK_ENABLED=true \
19 | -e IP6= \
20 | -e CALICO_NETWORKING_BACKEND=bird \
21 | -e FELIX_DEFAULTENDPOINTTOHOSTACTION=ACCEPT \
22 | -e FELIX_HEALTHENABLED=true \
23 | -e CALICO_IPV4POOL_CIDR=10.244.0.0/16 \
24 | -e CALICO_IPV4POOL_IPIP=always \
25 | -e IP_AUTODETECTION_METHOD=interface=enp0s8 \
26 | -e IP6_AUTODETECTION_METHOD=interface=enp0s8 \
27 | -v /etc/etcd/ssl:/etc/etcd/ssl \
28 | -v /var/run/calico:/var/run/calico \
29 | -v /lib/modules:/lib/modules \
30 | -v /run/docker/plugins:/run/docker/plugins \
31 | -v /var/run/docker.sock:/var/run/docker.sock \
32 | -v /var/log/calico:/var/log/calico \
33 | quay.io/calico/node:v2.6.2
34 | ExecStop=/usr/bin/docker rm -f calico-node
35 | Restart=on-failure
36 | RestartSec=10
37 |
38 | [Install]
39 | WantedBy=multi-user.target
40 |
--------------------------------------------------------------------------------
/manual-installation/manifests/scheduler.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | annotations:
5 | scheduler.alpha.kubernetes.io/critical-pod: ""
6 | labels:
7 | component: kube-scheduler
8 | tier: control-plane
9 | name: kube-scheduler
10 | namespace: kube-system
11 | spec:
12 | hostNetwork: true
13 | containers:
14 | - name: kube-scheduler
15 | image: gcr.io/google_containers/kube-scheduler-amd64:v1.8.2
16 | command:
17 | - kube-scheduler
18 | - --v=0
19 | - --logtostderr=true
20 | - --address=127.0.0.1
21 | - --leader-elect=true
22 | - --kubeconfig=/etc/kubernetes/scheduler.conf
23 | livenessProbe:
24 | failureThreshold: 8
25 | httpGet:
26 | host: 127.0.0.1
27 | path: /healthz
28 | port: 10251
29 | scheme: HTTP
30 | initialDelaySeconds: 15
31 | timeoutSeconds: 15
32 | resources:
33 | requests:
34 | cpu: 100m
35 | volumeMounts:
36 | - mountPath: /etc/kubernetes/pki
37 | name: k8s-certs
38 | readOnly: true
39 | - mountPath: /etc/kubernetes/scheduler.conf
40 | name: kubeconfig
41 | readOnly: true
42 | volumes:
43 | - hostPath:
44 | path: /etc/kubernetes/pki
45 | type: DirectoryOrCreate
46 | name: k8s-certs
47 | - hostPath:
48 | path: /etc/kubernetes/scheduler.conf
49 | type: FileOrCreate
50 | name: kubeconfig
51 |
--------------------------------------------------------------------------------
/multi-cluster/kubeadm/Vagrantfile:
--------------------------------------------------------------------------------
1 | Vagrant.require_version ">= 1.7.0"
2 |
3 | def set_vbox(vb, config)
4 | vb.gui = false
5 | vb.memory = 2048
6 | vb.cpus = 1
7 | config.vm.box = "k2r2bai/kubeadm-ubuntu18"
8 | end
9 |
10 | Vagrant.configure("2") do |config|
11 | config.vm.provider "virtualbox"
12 | master = 1
13 | node = 2
14 |
15 | private_count = 10
16 | (1..(master + node)).each do |mid|
17 | name = (mid <= node) ? "n" : "m"
18 | id = (mid <= node) ? mid : (mid - node)
19 |
20 | config.vm.define "k8s-#{name}#{id}" do |n|
21 | n.vm.hostname = "k8s-#{name}#{id}"
22 | ip_addr = "192.16.35.#{private_count}"
23 | n.vm.network :private_network, ip: "#{ip_addr}", auto_config: true
24 | n.vm.provider :virtualbox do |vb, override|
25 | vb.name = "#{n.vm.hostname}"
26 | set_vbox(vb, override)
27 | end
28 | private_count += 1
29 | n.vm.provision "shell", inline: "sudo swapoff -a && sudo sysctl -w vm.swappiness=0"
30 | n.vm.provision "shell", inline: <<-SHELL
31 | echo "127.0.0.1 localhost
32 |
33 | 192.16.35.10 k8s-n1
34 | 192.16.35.11 k8s-n2
35 | 192.16.35.12 k8s-m1
36 | 192.16.35.99 k8s-harbor
37 |
38 | # The following lines are desirable for IPv6 capable hosts
39 | ::1 localhost ip6-localhost ip6-loopback
40 | ff02::1 ip6-allnodes
41 | ff02::2 ip6-allrouters" | sudo tee /etc/hosts
42 | SHELL
43 | end
44 | end
45 | end
--------------------------------------------------------------------------------
/addons/logging/kibana-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: kibana-logging
5 | namespace: kube-system
6 | labels:
7 | k8s-app: kibana-logging
8 | addonmanager.kubernetes.io/mode: Reconcile
9 | spec:
10 | replicas: 1
11 | selector:
12 | matchLabels:
13 | k8s-app: kibana-logging
14 | template:
15 | metadata:
16 | labels:
17 | k8s-app: kibana-logging
18 | annotations:
19 | seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
20 | spec:
21 | containers:
22 | - name: kibana-logging
23 | image: docker.elastic.co/kibana/kibana-oss:7.2.0
24 | resources:
25 | # need more cpu upon initialization, therefore burstable class
26 | limits:
27 | cpu: 1000m
28 | requests:
29 | cpu: 100m
30 | env:
31 | - name: ELASTICSEARCH_HOSTS
32 | value: http://elasticsearch-logging:9200
33 | - name: SERVER_NAME
34 | value: kibana-logging
35 | ports:
36 | - containerPort: 5601
37 | name: ui
38 | protocol: TCP
39 | livenessProbe:
40 | httpGet:
41 | path: /api/status
42 | port: ui
43 | initialDelaySeconds: 5
44 | timeoutSeconds: 10
45 | readinessProbe:
46 | httpGet:
47 | path: /api/status
48 | port: ui
49 | initialDelaySeconds: 5
50 | timeoutSeconds: 10
51 |
--------------------------------------------------------------------------------
/practical-k8s/practical-apps/lab4-prometheus/grafana-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: grafana
5 | #namespace: prometheus
6 | labels:
7 | app: grafana
8 | spec:
9 | ports:
10 | - port: 80
11 | targetPort: 3000
12 | nodePort: 30000
13 | selector:
14 | app: grafana
15 | type: LoadBalancer
16 | ---
17 | apiVersion: extensions/v1beta1
18 | kind: Deployment
19 | metadata:
20 | name: grafana
21 | #namespace: prometheus
22 | labels:
23 | app: grafana
24 | spec:
25 | replicas: 1
26 | template:
27 | metadata:
28 | labels:
29 | app: grafana
30 | spec:
31 | containers:
32 | - image: grafana/grafana:3.1.0
33 | name: grafana
34 | ports:
35 | - containerPort: 3000
36 | hostPort: 3000
37 | resources:
38 | limits:
39 | cpu: 100m
40 | memory: 100Mi
41 | requests:
42 | cpu: 100m
43 | memory: 100Mi
44 | env:
45 | - name: GF_AUTH_BASIC_ENABLED
46 | value: "false"
47 | - name: GF_AUTH_ANONYMOUS_ENABLED
48 | value: "true"
49 | - name: GF_AUTH_ANONYMOUS_ORG_ROLE
50 | value: Admin
51 | - name: GF_SERVER_ROOT_URL
52 | value: /api/v1/proxy/namespaces/prometheus/services/grafana/
53 | volumeMounts:
54 | - name: grafana-persistent-storage
55 | mountPath: /var/lib/grafana
56 | volumes:
57 | - name: grafana-persistent-storage
58 | emptyDir: {}
59 |
--------------------------------------------------------------------------------
/addons/monitoring/operator/operator-rbac.yml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: prometheus-operator
5 | rules:
6 | - apiGroups:
7 | - apiextensions.k8s.io
8 | resources:
9 | - customresourcedefinitions
10 | verbs:
11 | - '*'
12 | - apiGroups:
13 | - monitoring.coreos.com
14 | resources:
15 | - alertmanagers
16 | - prometheuses
17 | - prometheuses/finalizers
18 | - alertmanagers/finalizers
19 | - servicemonitors
20 | - prometheusrules
21 | verbs:
22 | - '*'
23 | - apiGroups:
24 | - apps
25 | resources:
26 | - statefulsets
27 | verbs:
28 | - '*'
29 | - apiGroups:
30 | - ""
31 | resources:
32 | - configmaps
33 | - secrets
34 | verbs:
35 | - '*'
36 | - apiGroups:
37 | - ""
38 | resources:
39 | - pods
40 | verbs:
41 | - list
42 | - delete
43 | - apiGroups:
44 | - ""
45 | resources:
46 | - services
47 | - services/finalizers
48 | - endpoints
49 | verbs:
50 | - get
51 | - create
52 | - update
53 | - delete
54 | - apiGroups:
55 | - ""
56 | resources:
57 | - nodes
58 | verbs:
59 | - list
60 | - watch
61 | - apiGroups:
62 | - ""
63 | resources:
64 | - namespaces
65 | verbs:
66 | - get
67 | - list
68 | - watch
69 | ---
70 | apiVersion: rbac.authorization.k8s.io/v1
71 | kind: ClusterRoleBinding
72 | metadata:
73 | name: prometheus-operator
74 | roleRef:
75 | apiGroup: rbac.authorization.k8s.io
76 | kind: ClusterRole
77 | name: prometheus-operator
78 | subjects:
79 | - kind: ServiceAccount
80 | name: prometheus-operator
81 | namespace: monitoring
82 |
--------------------------------------------------------------------------------
/addons/monitoring/prometheus-adapter/prometheus-adapter-dp.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: prometheus-adapter
5 | namespace: monitoring
6 | spec:
7 | replicas: 1
8 | selector:
9 | matchLabels:
10 | name: prometheus-adapter
11 | strategy:
12 | rollingUpdate:
13 | maxSurge: 1
14 | maxUnavailable: 0
15 | template:
16 | metadata:
17 | labels:
18 | name: prometheus-adapter
19 | spec:
20 | containers:
21 | - args:
22 | - --cert-dir=/var/run/serving-cert
23 | - --config=/etc/adapter/config.yaml
24 | - --logtostderr=true
25 | - --metrics-relist-interval=1m
26 | - --prometheus-url=http://prometheus-k8s.monitoring.svc:9090/
27 | - --secure-port=6443
28 | image: quay.io/coreos/k8s-prometheus-adapter-amd64:v0.4.1
29 | name: prometheus-adapter
30 | ports:
31 | - containerPort: 6443
32 | volumeMounts:
33 | - mountPath: /tmp
34 | name: tmpfs
35 | readOnly: false
36 | - mountPath: /var/run/serving-cert
37 | name: volume-serving-cert
38 | readOnly: false
39 | - mountPath: /etc/adapter
40 | name: config
41 | readOnly: false
42 | nodeSelector:
43 | beta.kubernetes.io/os: linux
44 | serviceAccountName: prometheus-adapter
45 | volumes:
46 | - emptyDir: {}
47 | name: tmpfs
48 | - emptyDir: {}
49 | name: volume-serving-cert
50 | - configMap:
51 | name: adapter-config
52 | name: config
--------------------------------------------------------------------------------
/addons/dns/externaldns/deployment.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: external-dns
5 | namespace: ddns
6 | labels:
7 | k8s-app: external-dns
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | k8s-app: external-dns
13 | template:
14 | metadata:
15 | annotations:
16 | scheduler.alpha.kubernetes.io/critical-pod: ""
17 | labels:
18 | k8s-app: external-dns
19 | spec:
20 | serviceAccountName: external-dns
21 | affinity:
22 | nodeAffinity:
23 | requiredDuringSchedulingIgnoredDuringExecution:
24 | nodeSelectorTerms:
25 | - matchExpressions:
26 | - key: node-role.kubernetes.io/master
27 | operator: In
28 | values:
29 | - ''
30 | tolerations:
31 | - key: CriticalAddonsOnly
32 | operator: Exists
33 | - effect: NoSchedule
34 | key: node-role.kubernetes.io/master
35 | containers:
36 | - name: external-dns
37 | image: registry.opensource.zalan.do/teapot/external-dns:v0.5.17
38 | env:
39 | - name: ETCD_URLS
40 | value: "http://coredns-etcd:2379"
41 | args:
42 | - --provider=coredns
43 | - --source=service
44 | - --source=ingress
45 | - --registry=txt
46 | - --interval=10s
47 | - --log-level=debug
48 | # - --fqdn-template="{{.Namespace}}.k8s.local"
49 | # - --domain-filter=k8s.local
50 | # - --namespace=default
51 | # - --txt-owner-id=default.k8s.local
52 | # - --metrics-address=":7979"
53 |
--------------------------------------------------------------------------------
/addons/monitoring/prometheus-adapter/prometheus-adapter-rbac.yml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: prometheus-adapter
5 | rules:
6 | - apiGroups:
7 | - ""
8 | resources:
9 | - nodes
10 | - namespaces
11 | - pods
12 | - services
13 | verbs:
14 | - get
15 | - list
16 | - watch
17 | ---
18 | apiVersion: rbac.authorization.k8s.io/v1
19 | kind: ClusterRole
20 | metadata:
21 | name: resource-metrics-server-resources
22 | rules:
23 | - apiGroups:
24 | - metrics.k8s.io
25 | resources:
26 | - '*'
27 | verbs:
28 | - '*'
29 | ---
30 | apiVersion: rbac.authorization.k8s.io/v1
31 | kind: ClusterRoleBinding
32 | metadata:
33 | name: prometheus-adapter
34 | roleRef:
35 | apiGroup: rbac.authorization.k8s.io
36 | kind: ClusterRole
37 | name: prometheus-adapter
38 | subjects:
39 | - kind: ServiceAccount
40 | name: prometheus-adapter
41 | namespace: monitoring
42 | ---
43 | apiVersion: rbac.authorization.k8s.io/v1
44 | kind: ClusterRoleBinding
45 | metadata:
46 | name: resource-metrics:system:auth-delegator
47 | roleRef:
48 | apiGroup: rbac.authorization.k8s.io
49 | kind: ClusterRole
50 | name: system:auth-delegator
51 | subjects:
52 | - kind: ServiceAccount
53 | name: prometheus-adapter
54 | namespace: monitoring
55 | ---
56 | apiVersion: rbac.authorization.k8s.io/v1
57 | kind: RoleBinding
58 | metadata:
59 | name: resource-metrics-auth-reader
60 | namespace: kube-system
61 | roleRef:
62 | apiGroup: rbac.authorization.k8s.io
63 | kind: Role
64 | name: extension-apiserver-authentication-reader
65 | subjects:
66 | - kind: ServiceAccount
67 | name: prometheus-adapter
68 | namespace: monitoring
--------------------------------------------------------------------------------
/practical-k8s/practical-apps/lab1-wordpress/all-in-one/wordpress-all-in-one.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: wordpress
5 | labels:
6 | app: wordpress
7 | spec:
8 | ports:
9 | - port: 80
10 | nodePort: 32100
11 | selector:
12 | app: wordpress
13 | tier: frontend
14 | type: NodePort
15 |
16 | ---
17 |
18 | apiVersion: v1
19 | kind: PersistentVolumeClaim
20 | metadata:
21 | name: wp-pv-claim
22 | labels:
23 | app: wordpress
24 | spec:
25 | accessModes:
26 | - ReadWriteOnce
27 | resources:
28 | requests:
29 | storage: 10Gi
30 |
31 | ---
32 |
33 | apiVersion: apps/v1
34 | kind: Deployment
35 | metadata:
36 | name: wordpress
37 | labels:
38 | app: wordpress
39 | spec:
40 | replicas: 3
41 | selector:
42 | matchLabels:
43 | app: wordpress
44 | tier: frontend
45 | template:
46 | metadata:
47 | labels:
48 | app: wordpress
49 | tier: frontend
50 | spec:
51 | containers:
52 | - image: wordpress:4.9.6-php5.6-apache
53 | name: wordpress
54 | env:
55 | - name: WORDPRESS_DB_HOST
56 | value: "MYSQL_POD_IP"
57 | - name: WORDPRESS_DB_PASSWORD
58 | valueFrom:
59 | secretKeyRef:
60 | name: mysql-pass
61 | key: password
62 | ports:
63 | - containerPort: 80
64 | name: wordpress
65 | volumeMounts:
66 | - name: wordpress-persistent-storage
67 | mountPath: /var/www/html
68 | volumes:
69 | - name: wordpress-persistent-storage
70 | persistentVolumeClaim:
71 | claimName: wp-pv-claim
72 |
73 |
--------------------------------------------------------------------------------
/practical-k8s/practical-apps/lab4-prometheus/prometheus-deploy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | annotations:
5 | prometheus.io/scrape: 'true'
6 | labels:
7 | name: prometheus
8 | name: prometheus
9 | spec:
10 | selector:
11 | app: prometheus-server
12 | type: NodePort
13 | ports:
14 | - name: prometheus
15 | protocol: TCP
16 | port: 9090
17 | nodePort: 30008
18 | ---
19 | apiVersion: extensions/v1beta1
20 | kind: Deployment
21 | metadata:
22 | name: prometheus-deployment
23 | spec:
24 | replicas: 1
25 | template:
26 | metadata:
27 | labels:
28 | app: prometheus-server
29 | spec:
30 | serviceAccountName: prometheus
31 | containers:
32 | - name: prometheus
33 | image: prom/prometheus:v1.8.2
34 | args:
35 | - "-config.file=/etc/prometheus/prometheus.yml"
36 | - "-storage.local.path=/prometheus/"
37 | - "-storage.local.retention=8760h"
38 | #- "-storage.local.memory-chunks=500000"
39 | #- "-query.staleness-delta=30s"
40 | ports:
41 | - containerPort: 9090
42 | volumeMounts:
43 | - name: prometheus-config-volume
44 | mountPath: /etc/prometheus/
45 | - name: prometheus-storage-volume
46 | mountPath: /prometheus/
47 | volumes:
48 | - name: prometheus-config-volume
49 | configMap:
50 | defaultMode: 420
51 | name: prometheus-server-conf
52 | - name: prometheus-storage-volume
53 | emptyDir: {}
54 | #nfs:
55 | #server: 10.241.1.11
56 | #path: "/nfs/prometheus"
57 | #nodeSelector:
58 | #release: production
59 | #gputype: v100
60 |
--------------------------------------------------------------------------------
/addons/dns/etcd/deployment.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | labels:
5 | k8s-app: coredns-etcd
6 | name: coredns-etcd
7 | namespace: ddns
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | k8s-app: coredns-etcd
13 | template:
14 | metadata:
15 | annotations:
16 | scheduler.alpha.kubernetes.io/critical-pod: ""
17 | labels:
18 | k8s-app: coredns-etcd
19 | spec:
20 | affinity:
21 | nodeAffinity:
22 | requiredDuringSchedulingIgnoredDuringExecution:
23 | nodeSelectorTerms:
24 | - matchExpressions:
25 | - key: node-role.kubernetes.io/master
26 | operator: In
27 | values:
28 | - ''
29 | tolerations:
30 | - key: CriticalAddonsOnly
31 | operator: Exists
32 | - effect: NoSchedule
33 | key: node-role.kubernetes.io/master
34 | containers:
35 | - name: coredns-etcd
36 | image: quay.io/coreos/etcd:v3.3.8
37 | command: ["/usr/local/bin/etcd"]
38 | args:
39 | - --listen-client-urls=http://0.0.0.0:2379
40 | - --advertise-client-urls=http://0.0.0.0:2379
41 | - --data-dir=/var/lib/etcd
42 | livenessProbe:
43 | failureThreshold: 5
44 | tcpSocket:
45 | port: 2379
46 | initialDelaySeconds: 60
47 | periodSeconds: 10
48 | successThreshold: 1
49 | timeoutSeconds: 5
50 | ports:
51 | - name: etcd-http
52 | containerPort: 2379
53 | protocol: TCP
54 | - name: etcd-peer
55 | containerPort: 2380
56 | protocol: TCP
57 | resources:
58 | limits:
59 | cpu: 500m
60 | memory: 512Mi
61 | requests:
62 | cpu: 100m
63 | memory: 128Mi
64 |
--------------------------------------------------------------------------------
/practical-k8s/practical-apps/lab3-tensorflow/worker-gpu.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: tf-worker
5 | spec:
6 | replicas: 1
7 | selector:
8 | matchLabels:
9 | app: tf-worker
10 | template:
11 | metadata:
12 | labels:
13 | app: tf-worker
14 | role: worker
15 | spec:
16 | nodeName: k8s-4
17 | containers:
18 | - name: tf-worker
19 | image: tensorflow/tensorflow:latest-gpu
20 | resources:
21 | limits:
22 | nvidia.com/gpu: 1
23 | ports:
24 | - containerPort: 2222
25 | command: ["/bin/sh", "-c"]
26 | args: ["
27 | echo '# coding=utf-8\nimport tensorflow as tf\ncluster = tf.train.ClusterSpec({\"worker\": [\"localhost:2222\"]})\nserver = tf.train.Server(cluster,job_name=\"worker\",task_index=0)\nserver.join()' > /opt/basic_server.py;
28 | python /opt/basic_server.py;
29 | export CUDA_VISIBLE_DEVICES=0;"]
30 | volumeMounts:
31 | - name: nvidia-driver-396-26
32 | mountPath: /usr/local/nvidia
33 | readOnly: true
34 | - name: libcuda-so
35 | mountPath: /usr/lib/x86_64-linux-gnu/libcuda.so
36 | - name: libcuda-so-1
37 | mountPath: /usr/lib/x86_64-linux-gnu/libcuda.so.1
38 | - name: libcuda-so-396-26
39 | mountPath: /usr/lib/x86_64-linux-gnu/libcuda.so.396.26
40 | volumes:
41 | - name: nvidia-driver-396-26
42 | hostPath:
43 | path: /var/lib/nvidia-docker/volumes/nvidia_driver/396.26
44 | - name: libcuda-so
45 | hostPath:
46 | path: /usr/lib/x86_64-linux-gnu/libcuda.so
47 | - name: libcuda-so-1
48 | hostPath:
49 | path: /usr/lib/x86_64-linux-gnu/libcuda.so.1
50 | - name: libcuda-so-396-26
51 | hostPath:
52 | path: /usr/lib/x86_64-linux-gnu/libcuda.so.396.26
53 |
54 |
--------------------------------------------------------------------------------
/practical-k8s/practical-apps/lab4-prometheus/grafana-zabbix.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: grafana
5 | labels:
6 | app: grafana
7 | spec:
8 | ports:
9 | - port: 80
10 | targetPort: 3000
11 | nodePort: 30009
12 | selector:
13 | name: grafana
14 | type: LoadBalancer
15 | ---
16 | apiVersion: extensions/v1beta1
17 | kind: Deployment
18 | metadata:
19 | name: grafana
20 | labels:
21 | app: grafana
22 | spec:
23 | replicas: 1
24 | template:
25 | metadata:
26 | labels:
27 | name: grafana
28 | app: grafana
29 | spec:
30 | containers:
31 | - name: grafana
32 | image: monitoringartist/grafana-xxl
33 | #image: 10.241.1.11:30001/grafana-xxl:joe
34 | ports:
35 | - containerPort: 3000
36 | volumeMounts:
37 | - name: grafana-storage
38 | mountPath: /var/lib/grafana
39 | env:
40 | - name: INFLUXDB_HOST
41 | value: monitoring-influxdb
42 | - name: GRAFANA_PORT
43 | value: "3000"
44 | # The following env variables are required to make Grafana accessible via
45 | # the kubernetes api-server proxy. On production clusters, we recommend
46 | # removing these env variables, setup auth for grafana, and expose the grafana
47 | # service using a LoadBalancer or a public IP.
48 | - name: GF_AUTH_BASIC_ENABLED
49 | value: "false"
50 | - name: GF_AUTH_ANONYMOUS_ENABLED
51 | value: "true"
52 | - name: GF_AUTH_ANONYMOUS_ORG_ROLE
53 | value: Admin
54 | - name: GF_SERVER_ROOT_URL
55 | # If you're only using the API Server proxy, set this value instead:
56 | # value: /api/v1/proxy/namespaces/kube-system/services/monitoring-grafana/
57 | value: /
58 | volumes:
59 | - name: grafana-storage
60 | emptyDir: {}
61 | #nfs:
62 | #server: 10.241.1.11
63 | #path: "/nfs/grafana"
64 | #nodeSelector:
65 | #release: production
66 |
--------------------------------------------------------------------------------
/manual-installation/conf/kube-proxy.yml.conf:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: kube-proxy
5 | labels:
6 | k8s-app: kube-proxy
7 | kubernetes.io/cluster-service: "true"
8 | addonmanager.kubernetes.io/mode: Reconcile
9 | namespace: kube-system
10 | ---
11 | apiVersion: extensions/v1beta1
12 | kind: DaemonSet
13 | metadata:
14 | name: kube-proxy
15 | labels:
16 | k8s-app: kube-proxy
17 | kubernetes.io/cluster-service: "true"
18 | addonmanager.kubernetes.io/mode: Reconcile
19 | namespace: kube-system
20 | spec:
21 | selector:
22 | matchLabels:
23 | k8s-app: kube-proxy
24 | templateGeneration: 1
25 | updateStrategy:
26 | rollingUpdate:
27 | maxUnavailable: 1
28 | type: RollingUpdate
29 | template:
30 | metadata:
31 | labels:
32 | k8s-app: kube-proxy
33 | annotations:
34 | scheduler.alpha.kubernetes.io/critical-pod: ''
35 | spec:
36 | serviceAccountName: kube-proxy
37 | hostNetwork: true
38 | containers:
39 | - name: kube-proxy
40 | image: gcr.io/google_containers/kube-proxy-amd64:v1.8.2
41 | command:
42 | - kube-proxy
43 | - --v=0
44 | - --logtostderr=true
45 | - --kubeconfig=/run/kube-proxy.conf
46 | - --cluster-cidr=10.244.0.0/16
47 | - --proxy-mode=iptables
48 | imagePullPolicy: IfNotPresent
49 | securityContext:
50 | privileged: true
51 | volumeMounts:
52 | - mountPath: /run/kube-proxy.conf
53 | name: kubeconfig
54 | readOnly: true
55 | - mountPath: /etc/kubernetes/pki
56 | name: k8s-certs
57 | readOnly: true
58 | dnsPolicy: ClusterFirst
59 | restartPolicy: Always
60 | terminationGracePeriodSeconds: 30
61 | volumes:
62 | - hostPath:
63 | path: /etc/kubernetes/kube-proxy.conf
64 | type: FileOrCreate
65 | name: kubeconfig
66 | - hostPath:
67 | path: /etc/kubernetes/pki
68 | type: DirectoryOrCreate
69 | name: k8s-certs
70 |
--------------------------------------------------------------------------------
/harbor/config/certs/ca.crt:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIFtTCCA52gAwIBAgIUZDMmC7qs4nxXNMN8FhKTfgyoitMwDQYJKoZIhvcNAQEL
3 | BQAwajELMAkGA1UEBhMCVFcxEzARBgNVBAgMCk5ldyBUYWlwZWkxEzARBgNVBAcM
4 | Ck5ldyBUYWlwZWkxFTATBgNVBAoMDHRlc3RfY29tcGFueTELMAkGA1UECwwCSVQx
5 | DTALBgNVBAMMBHRlc3QwHhcNMTkwODEyMTU0NzQwWhcNMjAwODExMTU0NzQwWjBq
6 | MQswCQYDVQQGEwJUVzETMBEGA1UECAwKTmV3IFRhaXBlaTETMBEGA1UEBwwKTmV3
7 | IFRhaXBlaTEVMBMGA1UECgwMdGVzdF9jb21wYW55MQswCQYDVQQLDAJJVDENMAsG
8 | A1UEAwwEdGVzdDCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAL4Ezpja
9 | qwY3B9yYZhk/wGoFbHRMNV2BXHxvqV/G35K2ZVH/XwZQGAZxe9UfoisU9/6MsdA0
10 | lKymEaAG2dTJvlgqIzfA4p2wfOcghGjV4vCWPMP+RqIRAVze5c590JEod5VaErYh
11 | eJ20MuBvMIW74Xva0WPykFo1TIEhytpalX5JBL4VWXxPcCVRG2w6tRPNYKLpFvJf
12 | kFMYWGVphRsuK//O2ItzAQhPlp1YskpCGTSxbvBkk0U1Uwxdg8zrTP4DZokD3GWs
13 | Kwp7ghix99PLebAUsUelZ6Smx2PNdgoj60dufqayIObq0tsVDEyUF1qien7k6RS9
14 | SUIVPmZYY89qBHgBTYRHtYNupVUpIMVHfWYCAFhO29d++2YQT/9Xao0i66PN1dAu
15 | c2SjTcdYpUKKal5E6YZKQc3GjFIlBIpgtYtD1d70TJ1wiuQrN6LE2rSIXcsgCu2O
16 | 5aJBCDKRmD71N6ZEsK++79aWkYJBGh/sisOjPO+kch5GEa8BF4WjIh7vaJuh8msI
17 | yMQX2iX6B9JeZiFVkucldQ/6vonj8T0DR8lmUObyZ5aw8ZrWXII9WDSLW2rqMDO9
18 | P7BFYjuRgI5GYVqV1bP503qzvOu+QaMUp++MYurhqTQ/EY+Pl2pl9bislbFuz9WI
19 | wWH19Pv5EZoVVvFIYLXOWdM5/mTyPZ4+gyDhAgMBAAGjUzBRMB0GA1UdDgQWBBQB
20 | Sx9PSJ49TvEwayvZuWj/bAlREjAfBgNVHSMEGDAWgBQBSx9PSJ49TvEwayvZuWj/
21 | bAlREjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQB7WTXzS7s4
22 | zCHtJZ+YX99ula2usy0ACRBB/OWjuJPKVwIFq8Y2iHu/282p5B/lgKa0K8l9q5CV
23 | DFaUntM0AXL9gviZl4S+Oc8Bpkesn/wIVxNi/ga/G52qE3EiEOmqEvWB1cf6uNTx
24 | rwRHumBkcgS2LyWRiOdlqpCvuE4HbUu59SaQACc1R/52EzA3pR5QPsNfDcI/Ecme
25 | F9Rmhvpmfb4EGq+WIK6xge2L5X8GQhDZQru5sUAFEDjodtXaFWoNqCCvNL73G+6v
26 | qP5YWpH614deUWAVJiOCZOzF4B95dm1NGqiCYCJw0lNzlrG227/N5fKQoWIVFxxp
27 | +QWrHoBGDkg1QCtFJmmQoXnDuAI4Z7tqzS1AFZu8SelNyedXt3R+TqT2jrdnJMS/
28 | YgQX0tcQQbo7/MVo6EWzuqN/LEaYCYX+O+DukVe4vWDrzRWvq8hNJ55OjwMZZXn7
29 | iEV9xyO/OHhmE4U6+3IsVJsdP5y9R6RIGGdaTX/nxZ4p9FqMSK4BQbQ1ZswT1aCL
30 | 6cTg70BOzmXMcz37fgP/FQwNp0pacU//u4X16P4WHi3PMyA+qE6RqsQIhKul1ao/
31 | zHg2BQ5wdx/2DFcYvxUQF8XRlSr0hap/d4JavkfJEdbfOg2QntZ0H5Kro0OkDA/B
32 | HHsc3m8NSWY+fEMH9YtGsXQlgH8WRJ3d6g==
33 | -----END CERTIFICATE-----
34 |
--------------------------------------------------------------------------------
/manual-installation/conf/calico-controller.yml.conf:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1beta1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | name: calico-kube-controllers
5 | roleRef:
6 | apiGroup: rbac.authorization.k8s.io
7 | kind: ClusterRole
8 | name: calico-kube-controllers
9 | subjects:
10 | - kind: ServiceAccount
11 | name: calico-kube-controllers
12 | namespace: kube-system
13 | ---
14 | kind: ClusterRole
15 | apiVersion: rbac.authorization.k8s.io/v1beta1
16 | metadata:
17 | name: calico-kube-controllers
18 | namespace: kube-system
19 | rules:
20 | - apiGroups:
21 | - ""
22 | - extensions
23 | resources:
24 | - pods
25 | - namespaces
26 | - networkpolicies
27 | verbs:
28 | - watch
29 | - list
30 | ---
31 | apiVersion: v1
32 | kind: ServiceAccount
33 | metadata:
34 | name: calico-kube-controllers
35 | namespace: kube-system
36 | ---
37 | apiVersion: extensions/v1beta1
38 | kind: Deployment
39 | metadata:
40 | name: calico-policy-controller
41 | namespace: kube-system
42 | labels:
43 | k8s-app: calico-policy
44 | spec:
45 | strategy:
46 | type: Recreate
47 | template:
48 | metadata:
49 | name: calico-policy-controller
50 | namespace: kube-system
51 | labels:
52 | k8s-app: calico-policy
53 | spec:
54 | hostNetwork: true
55 | serviceAccountName: calico-kube-controllers
56 | containers:
57 | - name: calico-policy-controller
58 | image: quay.io/calico/kube-controllers:v1.0.0
59 | env:
60 | - name: ETCD_ENDPOINTS
61 | value: "https://172.16.35.12:2379"
62 | - name: ETCD_CA_CERT_FILE
63 | value: "/etc/etcd/ssl/etcd-ca.pem"
64 | - name: ETCD_CERT_FILE
65 | value: "/etc/etcd/ssl/etcd.pem"
66 | - name: ETCD_KEY_FILE
67 | value: "/etc/etcd/ssl/etcd-key.pem"
68 | volumeMounts:
69 | - mountPath: /etc/etcd/ssl
70 | name: etcd-ca-certs
71 | readOnly: true
72 | volumes:
73 | - hostPath:
74 | path: /etc/etcd/ssl
75 | type: DirectoryOrCreate
76 | name: etcd-ca-certs
77 |
--------------------------------------------------------------------------------
/multi-cluster/kubeadm/harbor/ca.crt:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIFtTCCA52gAwIBAgIUZDMmC7qs4nxXNMN8FhKTfgyoitMwDQYJKoZIhvcNAQEL
3 | BQAwajELMAkGA1UEBhMCVFcxEzARBgNVBAgMCk5ldyBUYWlwZWkxEzARBgNVBAcM
4 | Ck5ldyBUYWlwZWkxFTATBgNVBAoMDHRlc3RfY29tcGFueTELMAkGA1UECwwCSVQx
5 | DTALBgNVBAMMBHRlc3QwHhcNMTkwODEyMTU0NzQwWhcNMjAwODExMTU0NzQwWjBq
6 | MQswCQYDVQQGEwJUVzETMBEGA1UECAwKTmV3IFRhaXBlaTETMBEGA1UEBwwKTmV3
7 | IFRhaXBlaTEVMBMGA1UECgwMdGVzdF9jb21wYW55MQswCQYDVQQLDAJJVDENMAsG
8 | A1UEAwwEdGVzdDCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAL4Ezpja
9 | qwY3B9yYZhk/wGoFbHRMNV2BXHxvqV/G35K2ZVH/XwZQGAZxe9UfoisU9/6MsdA0
10 | lKymEaAG2dTJvlgqIzfA4p2wfOcghGjV4vCWPMP+RqIRAVze5c590JEod5VaErYh
11 | eJ20MuBvMIW74Xva0WPykFo1TIEhytpalX5JBL4VWXxPcCVRG2w6tRPNYKLpFvJf
12 | kFMYWGVphRsuK//O2ItzAQhPlp1YskpCGTSxbvBkk0U1Uwxdg8zrTP4DZokD3GWs
13 | Kwp7ghix99PLebAUsUelZ6Smx2PNdgoj60dufqayIObq0tsVDEyUF1qien7k6RS9
14 | SUIVPmZYY89qBHgBTYRHtYNupVUpIMVHfWYCAFhO29d++2YQT/9Xao0i66PN1dAu
15 | c2SjTcdYpUKKal5E6YZKQc3GjFIlBIpgtYtD1d70TJ1wiuQrN6LE2rSIXcsgCu2O
16 | 5aJBCDKRmD71N6ZEsK++79aWkYJBGh/sisOjPO+kch5GEa8BF4WjIh7vaJuh8msI
17 | yMQX2iX6B9JeZiFVkucldQ/6vonj8T0DR8lmUObyZ5aw8ZrWXII9WDSLW2rqMDO9
18 | P7BFYjuRgI5GYVqV1bP503qzvOu+QaMUp++MYurhqTQ/EY+Pl2pl9bislbFuz9WI
19 | wWH19Pv5EZoVVvFIYLXOWdM5/mTyPZ4+gyDhAgMBAAGjUzBRMB0GA1UdDgQWBBQB
20 | Sx9PSJ49TvEwayvZuWj/bAlREjAfBgNVHSMEGDAWgBQBSx9PSJ49TvEwayvZuWj/
21 | bAlREjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQB7WTXzS7s4
22 | zCHtJZ+YX99ula2usy0ACRBB/OWjuJPKVwIFq8Y2iHu/282p5B/lgKa0K8l9q5CV
23 | DFaUntM0AXL9gviZl4S+Oc8Bpkesn/wIVxNi/ga/G52qE3EiEOmqEvWB1cf6uNTx
24 | rwRHumBkcgS2LyWRiOdlqpCvuE4HbUu59SaQACc1R/52EzA3pR5QPsNfDcI/Ecme
25 | F9Rmhvpmfb4EGq+WIK6xge2L5X8GQhDZQru5sUAFEDjodtXaFWoNqCCvNL73G+6v
26 | qP5YWpH614deUWAVJiOCZOzF4B95dm1NGqiCYCJw0lNzlrG227/N5fKQoWIVFxxp
27 | +QWrHoBGDkg1QCtFJmmQoXnDuAI4Z7tqzS1AFZu8SelNyedXt3R+TqT2jrdnJMS/
28 | YgQX0tcQQbo7/MVo6EWzuqN/LEaYCYX+O+DukVe4vWDrzRWvq8hNJ55OjwMZZXn7
29 | iEV9xyO/OHhmE4U6+3IsVJsdP5y9R6RIGGdaTX/nxZ4p9FqMSK4BQbQ1ZswT1aCL
30 | 6cTg70BOzmXMcz37fgP/FQwNp0pacU//u4X16P4WHi3PMyA+qE6RqsQIhKul1ao/
31 | zHg2BQ5wdx/2DFcYvxUQF8XRlSr0hap/d4JavkfJEdbfOg2QntZ0H5Kro0OkDA/B
32 | HHsc3m8NSWY+fEMH9YtGsXQlgH8WRJ3d6g==
33 | -----END CERTIFICATE-----
34 |
--------------------------------------------------------------------------------
/addons/dns/coredns/deployment.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | labels:
5 | k8s-app: coredns
6 | name: coredns
7 | namespace: ddns
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | k8s-app: coredns
13 | template:
14 | metadata:
15 | annotations:
16 | scheduler.alpha.kubernetes.io/critical-pod: ""
17 | labels:
18 | k8s-app: coredns
19 | spec:
20 | affinity:
21 | nodeAffinity:
22 | requiredDuringSchedulingIgnoredDuringExecution:
23 | nodeSelectorTerms:
24 | - matchExpressions:
25 | - key: node-role.kubernetes.io/master
26 | operator: In
27 | values:
28 | - ''
29 | tolerations:
30 | - key: CriticalAddonsOnly
31 | operator: Exists
32 | - effect: NoSchedule
33 | key: node-role.kubernetes.io/master
34 | containers:
35 | - name: coredns
36 | image: coredns/coredns:1.3.0
37 | args:
38 | - -conf
39 | - /etc/coredns/Corefile
40 | livenessProbe:
41 | failureThreshold: 5
42 | httpGet:
43 | path: /health
44 | port: 8080
45 | scheme: HTTP
46 | initialDelaySeconds: 60
47 | periodSeconds: 10
48 | successThreshold: 1
49 | timeoutSeconds: 5
50 | ports:
51 | - containerPort: 53
52 | name: dns-udp
53 | protocol: UDP
54 | - containerPort: 53
55 | name: dns-tcp
56 | protocol: TCP
57 | - containerPort: 9153
58 | name: metrics
59 | protocol: TCP
60 | resources:
61 | limits:
62 | cpu: 1
63 | memory: 512Mi
64 | requests:
65 | cpu: 100m
66 | memory: 128Mi
67 | volumeMounts:
68 | - mountPath: /etc/coredns
69 | name: config-volume
70 | volumes:
71 | - configMap:
72 | defaultMode: 420
73 | items:
74 | - key: Corefile
75 | path: Corefile
76 | name: coredns
77 | name: config-volume
78 |
--------------------------------------------------------------------------------
/harbor/config/certs/harbor-registry.crt:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIF1zCCA7+gAwIBAgIUf1tBzOWj/+M7rGaNf/QHBkroV+gwDQYJKoZIhvcNAQEN
3 | BQAwajELMAkGA1UEBhMCVFcxEzARBgNVBAgMCk5ldyBUYWlwZWkxEzARBgNVBAcM
4 | Ck5ldyBUYWlwZWkxFTATBgNVBAoMDHRlc3RfY29tcGFueTELMAkGA1UECwwCSVQx
5 | DTALBgNVBAMMBHRlc3QwHhcNMTkwODEyMTU1MDAwWhcNMjkwODA5MTU1MDAwWjBy
6 | MQswCQYDVQQGEwJUVzETMBEGA1UECAwKTmV3IFRhaXBlaTETMBEGA1UEBwwKTmV3
7 | IFRhaXBlaTEVMBMGA1UECgwMdGVzdF9jb21wYW55MQswCQYDVQQLDAJJVDEVMBMG
8 | A1UEAwwMMTkyLjE2LjM1Ljk5MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKC
9 | AgEAzM2lag0XGkVeLvSL66fR9Tx3rRXUSRVu9uDyeEh7PZpQk8HlRyrkXbv1YzdY
10 | GBMLsz/R+XGkXMWduoEhquFRabJ4ZsqcMdghZj2X7ZiBRd8DIRG08OK0hpjEV3ON
11 | rNnK63PXMxx9Mp8cA8Ll0s2R046dbnpLxfLQ/S6wYAq58KNGv5tK+CuyV3YOMgov
12 | 82u7udPnI2mMnczbjCwoUWat+8Xi5QGXe3gR2f4TNhDAseo+QaXNM+jANBuN5KZi
13 | o6u7l+koWckTJe1+olQQYLUTWAJ+J0VjEuLfIFt5/kM2wXmn/m43Z8Bf8nRuja5U
14 | j/mC9XTSTi7lWydVxQ2tdsVHlsXwdL+fwu3JUHKIjF1IbBhn7085N9nO+JbARJSP
15 | BGKs6KxzbikQ9XtZCAZgOvLHthzuAEtcIEvI0IH9fXvQ1a6yJYe1N22T3j2+6wul
16 | nJTDoB3Ndxga01e1mKhhOqDDUVQJ/89LvUfR1w6X2XbpEdB2gaZ6fFDpegiX/AYq
17 | e+1sMqvuGlf3sYF/E3fRbKfetLv2SKbDHVWsnFCL46SNywz1A5te7mESh6zYrIdE
18 | yiKHQAZ2huCxNbbP6x8bOJNwGkYOLa6hPn9cXOxtLfZ++SMWOLh+2ns5FNzKPwxv
19 | QdZXxXg0/eks7i/c5cvvicoh6CfPuMamJ2RLsgt6NfbVGBsCAwEAAaNtMGswHwYD
20 | VR0jBBgwFoAUAUsfT0iePU7xMGsr2blo/2wJURIwCQYDVR0TBAIwADALBgNVHQ8E
21 | BAMCBPAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwGwYDVR0RBBQwEoIKazhzLWhhcmJv
22 | cocEwBAjYzANBgkqhkiG9w0BAQ0FAAOCAgEAh6lKs1fHffWRTjAxC5tJN2kNgS91
23 | wQ+OqbYDGZ9YS7+MKvTItuJ0ShY8ribENs4vlSVn9UEmIV21YgFAh/4yIHBMN150
24 | ZT57m+je/onYPaFEcrt18h8oyVvXcqtYwC6RuXHfD0TnoB1VV2vGUedy8Mn9rJfH
25 | 5KaLH05Yrw3/uH83MPIrATxJfXhQ7QUDBCYQET9IiayfZxBE6g1QTH1xdpn4VuP/
26 | UiDhGxy3JyCZS8PgrUrE6x0PvMy1w/s926l7OqRcsdXKcLHNI0iDL6jfyhmU2Hjr
27 | se8UMQAxQ8qCVLPrWDWZdtTOBj+zQNpBxc19Tq1FV7Gs86DERt+WdQPe0EL4/uTE
28 | 8yDgrdcPwnUPSCXAY5ouUtbt/bsFpp95nMs11p15T8QrxbwXRN4ptTrhORNuy1at
29 | E4i4PfR/cd+zOLSGQoeTHxSn4l/K5vkQVpOrQ+VpsbZhOYA5mbviczzcOgvbq7JQ
30 | g/N8ASKhS7XiqAoJdRSX4aNUHuz06+/Qgk9caZRxtcfWou27ynZsl2F02bRWYj5F
31 | PsHn0Z6Oa4G3bEWFLmx4wTAZl0uvw92T+P3rXdrrUqMvlvaXW2ObTzHgB06umfJS
32 | pxPkuuOcBoq6dIpx2A74AqPUv1gyEY3KoleIVBQ8oI43G9kw+o9gfbEZSJQH8POn
33 | pYHgRphO5jxi7lc=
34 | -----END CERTIFICATE-----
35 |
--------------------------------------------------------------------------------
/practical-k8s/practical-apps/lab6-cheese/cheese-dp.yml:
--------------------------------------------------------------------------------
1 | kind: Deployment
2 | apiVersion: apps/v1
3 | metadata:
4 | name: stilton
5 | labels:
6 | app: cheese
7 | cheese: stilton
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | app: cheese
13 | task: stilton
14 | template:
15 | metadata:
16 | labels:
17 | app: cheese
18 | task: stilton
19 | version: v0.0.1
20 | spec:
21 | containers:
22 | - name: cheese
23 | image: errm/cheese:stilton
24 | resources:
25 | requests:
26 | cpu: 100m
27 | memory: 50Mi
28 | limits:
29 | cpu: 100m
30 | memory: 50Mi
31 | ports:
32 | - containerPort: 80
33 | ---
34 | kind: Deployment
35 | apiVersion: apps/v1
36 | metadata:
37 | name: cheddar
38 | labels:
39 | app: cheese
40 | cheese: cheddar
41 | spec:
42 | replicas: 1
43 | selector:
44 | matchLabels:
45 | app: cheese
46 | task: cheddar
47 | template:
48 | metadata:
49 | labels:
50 | app: cheese
51 | task: cheddar
52 | version: v0.0.1
53 | spec:
54 | containers:
55 | - name: cheese
56 | image: errm/cheese:cheddar
57 | resources:
58 | requests:
59 | cpu: 100m
60 | memory: 50Mi
61 | limits:
62 | cpu: 100m
63 | memory: 50Mi
64 | ports:
65 | - containerPort: 80
66 | ---
67 | kind: Deployment
68 | apiVersion: apps/v1
69 | metadata:
70 | name: wensleydale
71 | labels:
72 | app: cheese
73 | cheese: wensleydale
74 | spec:
75 | replicas: 1
76 | selector:
77 | matchLabels:
78 | app: cheese
79 | task: wensleydale
80 | template:
81 | metadata:
82 | labels:
83 | app: cheese
84 | task: wensleydale
85 | version: v0.0.1
86 | spec:
87 | containers:
88 | - name: cheese
89 | image: errm/cheese:wensleydale
90 | resources:
91 | requests:
92 | cpu: 100m
93 | memory: 50Mi
94 | limits:
95 | cpu: 100m
96 | memory: 50Mi
97 | ports:
98 | - containerPort: 80
99 |
--------------------------------------------------------------------------------
/manual-installation/manifests/manager.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | annotations:
5 | scheduler.alpha.kubernetes.io/critical-pod: ""
6 | labels:
7 | component: kube-controller-manager
8 | tier: control-plane
9 | name: kube-controller-manager
10 | namespace: kube-system
11 | spec:
12 | hostNetwork: true
13 | containers:
14 | - name: kube-controller-manager
15 | image: gcr.io/google_containers/kube-controller-manager-amd64:v1.8.2
16 | command:
17 | - kube-controller-manager
18 | - --v=0
19 | - --logtostderr=true
20 | - --address=127.0.0.1
21 | - --root-ca-file=/etc/kubernetes/pki/ca.pem
22 | - --cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem
23 | - --cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem
24 | - --service-account-private-key-file=/etc/kubernetes/pki/sa.key
25 | - --kubeconfig=/etc/kubernetes/controller-manager.conf
26 | - --leader-elect=true
27 | - --use-service-account-credentials=true
28 | - --node-monitor-grace-period=40s
29 | - --node-monitor-period=5s
30 | - --pod-eviction-timeout=2m0s
31 | - --controllers=*,bootstrapsigner,tokencleaner
32 | - --allocate-node-cidrs=true
33 | - --cluster-cidr=10.244.0.0/16
34 | - --node-cidr-mask-size=24
35 | livenessProbe:
36 | failureThreshold: 8
37 | httpGet:
38 | host: 127.0.0.1
39 | path: /healthz
40 | port: 10252
41 | scheme: HTTP
42 | initialDelaySeconds: 15
43 | timeoutSeconds: 15
44 | resources:
45 | requests:
46 | cpu: 200m
47 | volumeMounts:
48 | - mountPath: /etc/kubernetes/pki
49 | name: k8s-certs
50 | readOnly: true
51 | - mountPath: /etc/ssl/certs
52 | name: ca-certs
53 | readOnly: true
54 | - mountPath: /etc/kubernetes/controller-manager.conf
55 | name: kubeconfig
56 | readOnly: true
57 | - mountPath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
58 | name: flexvolume-dir
59 | volumes:
60 | - hostPath:
61 | path: /etc/kubernetes/pki
62 | type: DirectoryOrCreate
63 | name: k8s-certs
64 | - hostPath:
65 | path: /etc/ssl/certs
66 | type: DirectoryOrCreate
67 | name: ca-certs
68 | - hostPath:
69 | path: /etc/kubernetes/controller-manager.conf
70 | type: FileOrCreate
71 | name: kubeconfig
72 | - hostPath:
73 | path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec
74 | type: DirectoryOrCreate
75 | name: flexvolume-dir
76 |
--------------------------------------------------------------------------------
/harbor/README.md:
--------------------------------------------------------------------------------
1 | # Install Harbor
2 |
3 | ## Docker compose
4 |
5 | ```sh
6 | $ curl -L https://github.com/docker/compose/releases/download/1.24.1/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose
7 | $ chmod +x /usr/local/bin/docker-compose
8 | ```
9 |
10 | ## Harbor
11 |
12 | ### Download Harbor
13 |
14 | ```sh
15 | $ wget https://storage.googleapis.com/harbor-releases/release-1.8.0/harbor-offline-installer-v1.8.2-rc2.tgz
16 | $ tar xvf harbor-offline-installer-v1.8.2-rc2.tgz
17 | $ cd harbor
18 | $ cp -rp /vagrant/config/harbor.yml ./
19 | ```
20 |
21 | Load Harbor images:
22 |
23 | ```sh
24 | $ docker load < harbor.v1.8.2.tar.gz
25 | ```
26 |
27 | ### Getting Certificate Authority
28 |
29 | Copy keys and certs to `/data/cert/`:
30 |
31 | ```sh
32 | $ mkdir -p /data/cert/
33 | $ cp -rp /vagrant/config/certs/* /data/cert/
34 | $ chmod 777 -R /data/cert/
35 | ```
36 |
37 | ### Deploy Harbor
38 |
39 | Prepare config and deploy:
40 | ```sh
41 | $ ./prepare
42 | $ docker-compose up -d
43 | ```
44 |
45 | Install with clair and notary:
46 |
47 | ```sh
48 | $ sudo ./install.sh --with-notary --with-clair
49 | ```
50 |
51 | ### Push Image to Harbor
52 |
53 | ```sh
54 | $ mkdir -p /etc/docker/certs.d/192.16.35.99
55 | $ cp /vagrant/config/certs/ca.crt /etc/docker/certs.d/192.16.35.99/
56 | ```
57 |
58 | Login Harbor with Docker:
59 |
60 | ```sh
61 | $ docker login 192.16.35.99
62 | ```
63 |
64 | Pull a image, and tag it as `192.16.35.99/library/`:
65 |
66 | ```sh
67 | $ docker pull alpine:3.7
68 | $ docker tag alpine:3.7 192.16.35.99/library/alpine:3.7
69 | ```
70 |
71 | Push image to Harbor:
72 |
73 | ```sh
74 | $ docker push 192.16.35.99/library/alpine:3.7
75 | ```
76 |
77 | Access Portal `https://192.16.35.99`.
78 |
79 | ## Pull image from Harbor to Kubernetes cluster
80 |
81 | First, copy `ca.key` to `/etc/docker/certs.d/192.16.35.99` on nodes:
82 |
83 | ```sh
84 | $ mkdir -p /etc/docker/certs.d/192.16.35.99
85 | $ cp /vagrant/harbor/ca.crt /etc/docker/certs.d/192.16.35.99/
86 | ```
87 |
88 | ## Content trust
89 |
90 | ```sh
91 | $ mkdir -p $HOME/.docker/tls/192.16.35.99:4443/
92 | $ cp /vagrant/config/certs/ca.crt $HOME/.docker/tls/192.16.35.99:4443/
93 | # $ cp /vagrant/harbor/ca.crt $HOME/.docker/tls/192.16.35.99:4443/
94 |
95 | $ export DOCKER_CONTENT_TRUST=1
96 | $ export DOCKER_CONTENT_TRUST_SERVER=https://192.16.35.99:4443
97 | $ docker tag alpine:3.7 192.16.35.99/trust/alpine:3.7
98 | $ docker push 192.16.35.99/trust/alpine:3.7
99 | ```
100 |
--------------------------------------------------------------------------------
/addons/monitoring/kube-state-metrics/kube-state-metrics-rbac.yml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: kube-state-metrics
5 | rules:
6 | - apiGroups:
7 | - ""
8 | resources:
9 | - configmaps
10 | - secrets
11 | - nodes
12 | - pods
13 | - services
14 | - resourcequotas
15 | - replicationcontrollers
16 | - limitranges
17 | - persistentvolumeclaims
18 | - persistentvolumes
19 | - namespaces
20 | - endpoints
21 | verbs:
22 | - list
23 | - watch
24 | - apiGroups:
25 | - extensions
26 | resources:
27 | - daemonsets
28 | - deployments
29 | - replicasets
30 | verbs:
31 | - list
32 | - watch
33 | - apiGroups:
34 | - apps
35 | resources:
36 | - statefulsets
37 | - daemonsets
38 | - deployments
39 | - replicasets
40 | verbs:
41 | - list
42 | - watch
43 | - apiGroups:
44 | - batch
45 | resources:
46 | - cronjobs
47 | - jobs
48 | verbs:
49 | - list
50 | - watch
51 | - apiGroups:
52 | - autoscaling
53 | resources:
54 | - horizontalpodautoscalers
55 | verbs:
56 | - list
57 | - watch
58 | - apiGroups:
59 | - authentication.k8s.io
60 | resources:
61 | - tokenreviews
62 | verbs:
63 | - create
64 | - apiGroups:
65 | - authorization.k8s.io
66 | resources:
67 | - subjectaccessreviews
68 | verbs:
69 | - create
70 | - apiGroups:
71 | - policy
72 | resources:
73 | - poddisruptionbudgets
74 | verbs:
75 | - list
76 | - watch
77 | ---
78 | apiVersion: rbac.authorization.k8s.io/v1
79 | kind: ClusterRoleBinding
80 | metadata:
81 | name: kube-state-metrics
82 | roleRef:
83 | apiGroup: rbac.authorization.k8s.io
84 | kind: ClusterRole
85 | name: kube-state-metrics
86 | subjects:
87 | - kind: ServiceAccount
88 | name: kube-state-metrics
89 | namespace: monitoring
90 | ---
91 | apiVersion: rbac.authorization.k8s.io/v1
92 | kind: Role
93 | metadata:
94 | name: kube-state-metrics
95 | namespace: monitoring
96 | rules:
97 | - apiGroups:
98 | - ""
99 | resources:
100 | - pods
101 | verbs:
102 | - get
103 | - apiGroups:
104 | - extensions
105 | resourceNames:
106 | - kube-state-metrics
107 | resources:
108 | - deployments
109 | verbs:
110 | - get
111 | - update
112 | - apiGroups:
113 | - apps
114 | resourceNames:
115 | - kube-state-metrics
116 | resources:
117 | - deployments
118 | verbs:
119 | - get
120 | - update
121 | ---
122 | apiVersion: rbac.authorization.k8s.io/v1
123 | kind: RoleBinding
124 | metadata:
125 | name: kube-state-metrics
126 | namespace: monitoring
127 | roleRef:
128 | apiGroup: rbac.authorization.k8s.io
129 | kind: Role
130 | name: kube-state-metrics
131 | subjects:
132 | - kind: ServiceAccount
133 | name: kube-state-metrics
--------------------------------------------------------------------------------
/load-balancing/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 |
4 | Vagrant.configure("2") do |config|
5 | config.vm.box = "bento/ubuntu-18.04"
6 | config.vm.hostname = 'k8s-dev'
7 | config.vm.define vm_name = 'k8s'
8 |
9 | config.vm.provision "shell", privileged: false, inline: <<-SHELL
10 | set -e -x -u
11 | export DEBIAN_FRONTEND=noninteractive
12 |
13 | #change the source.list
14 | sudo apt-get update
15 | sudo apt-get install -y vim git cmake build-essential tcpdump tig jq
16 | # Install ntp
17 | sudo apt-get install -y ntp
18 | # Install Docker
19 | # kubernetes official max validated version: 17.03.2~ce-0~ubuntu-xenial
20 | export DOCKER_VERSION="18.06.3~ce~3-0~ubuntu"
21 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
22 | sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
23 | sudo apt-get update
24 | sudo apt-get install -y docker-ce=${DOCKER_VERSION}
25 |
26 | echo 3 | sudo tee /proc/sys/net/ipv4/tcp_fastopen
27 | git clone https://github.com/hwchiu/kubeDemo
28 | git clone https://github.com/hwchiu/k8s-course
29 | sudo apt-get install -y linux-headers-$(uname -r)
30 | cd ~/k8s-course/load-balancing/module
31 | sudo make
32 | sudo cp xt_statistic.ko /lib/modules/$(uname -r)/kernel/net/netfilter/xt_statistic.ko
33 |
34 | # Install Kubernetes
35 | export KUBE_VERSION="1.13.5"
36 | export NET_IF_NAME="enp0s8"
37 | sudo apt-get install -y apt-transport-https curl
38 | curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
39 | echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee --append /etc/apt/sources.list.d/kubernetes.list
40 | sudo apt-get update
41 | sudo apt-get install -y kubeadm=${KUBE_VERSION}-00 kubelet=${KUBE_VERSION}-00 kubectl=${KUBE_VERSION}-00 kubernetes-cni=0.7.5-00
42 | # Disable swap
43 | sudo swapoff -a && sudo sysctl -w vm.swappiness=0
44 | sudo sed '/swap.img/d' -i /etc/fstab
45 | sudo kubeadm init --kubernetes-version v${KUBE_VERSION} --apiserver-advertise-address=172.17.8.101 --pod-network-cidr=10.244.0.0/16
46 | mkdir -p $HOME/.kube
47 | sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
48 | sudo chown $(id -u):$(id -g) $HOME/.kube/config
49 | kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/a70459be0084506e4ec919aa1c114638878db11b/Documentation/kube-flannel.yml
50 | kubectl taint nodes --all node-role.kubernetes.io/master-
51 |
52 |
53 | SHELL
54 |
55 | config.vm.network :private_network, ip: "172.17.8.101"
56 | config.vm.provider :virtualbox do |v|
57 | v.customize ["modifyvm", :id, "--cpus", 2]
58 | v.customize ["modifyvm", :id, "--memory", 4096]
59 | v.customize ['modifyvm', :id, '--nicpromisc1', 'allow-all']
60 | end
61 | end
62 |
--------------------------------------------------------------------------------
/kubeflow/multi-node/README.md:
--------------------------------------------------------------------------------
1 | # Kubeflow
2 | Instructions for installing Kubeflow on your existing Kubernetes cluster with list of supported options.
3 |
4 | ## NFS Persistent Volumes
5 |
6 | ### NFS Server
7 | If an NFS volume is not available to your cluster, you can transform one of the cluster’s nodes into an NFS server with the following commands:
8 |
9 | ```sh
10 | $ sudo apt-get install -y nfs-common nfs-kernel-server
11 | $ sudo mkdir /nfsroot
12 | ```
13 |
14 | Than you need to configure /etc/exports to share that directory:
15 |
16 | ```sh
17 | $ echo "/nfsroot *(rw,no_root_squash,no_subtree_check)" | sudo tee -a /etc/exports
18 | ```
19 |
20 | Restart your NFS server:
21 |
22 | ```sh
23 | $ sudo systemctl restart nfs-kernel-server
24 | ```
25 |
26 | ### NFS Client
27 | ach node of the cluster must be able to establish a connection to the NFS server. To enable this, install the following NFS client library on each node:
28 |
29 | ```sh
30 | $ sudo apt-get -y install nfs-common
31 | ```
32 |
33 | ### NFS Provisioner
34 |
35 | #### Install Helm
36 |
37 | Install Helm onto master node:
38 |
39 | ```sh
40 | $ wget https://get.helm.sh/helm-v2.14.3-linux-amd64.tar.gz
41 | $ tar xvf helm-v2.14.3-linux-amd64.tar.gz
42 | $ mv linux-amd64/helm /usr/local/bin/
43 | ```
44 |
45 | Init Helm server on cluster:
46 |
47 | ```sh
48 | $ kubectl -n kube-system create sa tiller
49 | $ kubectl create clusterrolebinding tiller \
50 | --clusterrole cluster-admin \
51 | --serviceaccount=kube-system:tiller
52 | $ helm init --service-account tiller
53 | ```
54 |
55 | #### Install NFS Provisioner using Helm
56 | You can install NFS Client Provisioner with Helm:
57 |
58 | ```sh
59 | $ helm install \
60 | --name nfs-client-provisioner \
61 | --set nfs.server=192.16.35.12 \
62 | --set nfs.path=/nfsroot/kubeflow \
63 | --set storageClass.name=nfs \
64 | --set storageClass.defaultClass=true \
65 | --namespace=kube-system \
66 | stable/nfs-client-provisioner
67 | ```
68 |
69 | ## Installing command line tools
70 | The following information is useful if you need or prefer to use command line tools for deploying and managing Kubeflow:
71 |
72 | ```sh
73 | $ wget https://github.com/kubeflow/kubeflow/releases/download/v0.6.1/kfctl_v0.6.1_linux.tar.gz
74 | $ sudo tar -C /usr/local/bin -xzf kfctl_v0.6.1_linux.tar.gz
75 | ```
76 |
77 | Run the following commands to set up and deploy Kubeflow. The code below includes an optional command to add the binary kfctl to your path. If you don’t add the binary to your path, you must use the full path to the kfctl binary each time you run it:
78 |
79 | ```sh
80 | $ export CONFIG="https://raw.githubusercontent.com/kubeflow/kubeflow/master/bootstrap/config/kfctl_k8s_istio.yaml"
81 | $ kfctl init kubeflow --config=${CONFIG} -V
82 | $ cd kubeflow
83 | $ kfctl generate all -V
84 | $ kfctl apply all -V
85 | ```
86 | > If you want to delete the cluster, you can use this command: `kfctl delete all -V`.
--------------------------------------------------------------------------------
/addons/monitoring/node-exporter/node-exporter-ds.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: DaemonSet
3 | metadata:
4 | labels:
5 | app: node-exporter
6 | name: node-exporter
7 | namespace: monitoring
8 | spec:
9 | selector:
10 | matchLabels:
11 | app: node-exporter
12 | template:
13 | metadata:
14 | labels:
15 | app: node-exporter
16 | spec:
17 | hostNetwork: true
18 | hostPID: true
19 | nodeSelector:
20 | beta.kubernetes.io/os: linux
21 | securityContext:
22 | runAsNonRoot: true
23 | runAsUser: 65534
24 | serviceAccountName: node-exporter
25 | tolerations:
26 | - effect: NoExecute
27 | operator: Exists
28 | - effect: NoSchedule
29 | operator: Exists
30 | containers:
31 | - name: node-exporter
32 | image: quay.io/prometheus/node-exporter:v0.17.0
33 | args:
34 | - --web.listen-address=127.0.0.1:9100
35 | - --path.procfs=/host/proc
36 | - --path.sysfs=/host/sys
37 | - --path.rootfs=/host/root
38 | - --collector.filesystem.ignored-mount-points=^/(dev|proc|sys|var/lib/docker/.+)($|/)
39 | - --collector.filesystem.ignored-fs-types=^(autofs|binfmt_misc|cgroup|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|mqueue|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|sysfs|tracefs)$
40 | resources:
41 | limits:
42 | cpu: 250m
43 | memory: 180Mi
44 | requests:
45 | cpu: 102m
46 | memory: 180Mi
47 | volumeMounts:
48 | - mountPath: /host/proc
49 | name: proc
50 | readOnly: false
51 | - mountPath: /host/sys
52 | name: sys
53 | readOnly: false
54 | - mountPath: /host/root
55 | mountPropagation: HostToContainer
56 | name: root
57 | readOnly: true
58 | - name: kube-rbac-proxy
59 | image: quay.io/coreos/kube-rbac-proxy:v0.4.1
60 | args:
61 | - --logtostderr
62 | - --secure-listen-address=$(IP):9100
63 | - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
64 | - --upstream=http://127.0.0.1:9100/
65 | env:
66 | - name: IP
67 | valueFrom:
68 | fieldRef:
69 | fieldPath: status.podIP
70 | ports:
71 | - containerPort: 9100
72 | hostPort: 9100
73 | name: https
74 | resources:
75 | limits:
76 | cpu: 20m
77 | memory: 40Mi
78 | requests:
79 | cpu: 10m
80 | memory: 20Mi
81 | volumes:
82 | - hostPath:
83 | path: /proc
84 | name: proc
85 | - hostPath:
86 | path: /sys
87 | name: sys
88 | - hostPath:
89 | path: /
90 | name: root
--------------------------------------------------------------------------------
/addons/logging/fluentd-es-ds.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: fluentd-es
5 | namespace: kube-system
6 | labels:
7 | k8s-app: fluentd-es
8 | addonmanager.kubernetes.io/mode: Reconcile
9 | ---
10 | kind: ClusterRole
11 | apiVersion: rbac.authorization.k8s.io/v1
12 | metadata:
13 | name: fluentd-es
14 | labels:
15 | k8s-app: fluentd-es
16 | addonmanager.kubernetes.io/mode: Reconcile
17 | rules:
18 | - apiGroups:
19 | - ""
20 | resources:
21 | - "namespaces"
22 | - "pods"
23 | verbs:
24 | - "get"
25 | - "watch"
26 | - "list"
27 | ---
28 | kind: ClusterRoleBinding
29 | apiVersion: rbac.authorization.k8s.io/v1
30 | metadata:
31 | name: fluentd-es
32 | labels:
33 | k8s-app: fluentd-es
34 | addonmanager.kubernetes.io/mode: Reconcile
35 | subjects:
36 | - kind: ServiceAccount
37 | name: fluentd-es
38 | namespace: kube-system
39 | apiGroup: ""
40 | roleRef:
41 | kind: ClusterRole
42 | name: fluentd-es
43 | apiGroup: ""
44 | ---
45 | apiVersion: apps/v1
46 | kind: DaemonSet
47 | metadata:
48 | name: fluentd-es-v2.6.0
49 | namespace: kube-system
50 | labels:
51 | k8s-app: fluentd-es
52 | version: v2.6.0
53 | addonmanager.kubernetes.io/mode: Reconcile
54 | spec:
55 | selector:
56 | matchLabels:
57 | k8s-app: fluentd-es
58 | version: v2.6.0
59 | template:
60 | metadata:
61 | labels:
62 | k8s-app: fluentd-es
63 | version: v2.6.0
64 | # This annotation ensures that fluentd does not get evicted if the node
65 | # supports critical pod annotation based priority scheme.
66 | # Note that this does not guarantee admission on the nodes (#40573).
67 | annotations:
68 | scheduler.alpha.kubernetes.io/critical-pod: ''
69 | seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
70 | spec:
71 | priorityClassName: system-node-critical
72 | serviceAccountName: fluentd-es
73 | containers:
74 | - name: fluentd-es
75 | image: quay.io/fluentd_elasticsearch/fluentd:v2.6.0
76 | env:
77 | - name: FLUENTD_ARGS
78 | value: --no-supervisor -q
79 | resources:
80 | limits:
81 | memory: 500Mi
82 | requests:
83 | cpu: 100m
84 | memory: 200Mi
85 | volumeMounts:
86 | - name: varlog
87 | mountPath: /var/log
88 | - name: varlibdockercontainers
89 | mountPath: /var/lib/docker/containers
90 | readOnly: true
91 | - name: config-volume
92 | mountPath: /etc/fluent/config.d
93 | terminationGracePeriodSeconds: 30
94 | volumes:
95 | - name: varlog
96 | hostPath:
97 | path: /var/log
98 | - name: varlibdockercontainers
99 | hostPath:
100 | path: /var/lib/docker/containers
101 | - name: config-volume
102 | configMap:
103 | name: fluentd-es-config-v0.2.0
104 |
--------------------------------------------------------------------------------
/addons/logging/es-statefulset.yaml:
--------------------------------------------------------------------------------
1 | # RBAC authn and authz
2 | apiVersion: v1
3 | kind: ServiceAccount
4 | metadata:
5 | name: elasticsearch-logging
6 | namespace: kube-system
7 | labels:
8 | k8s-app: elasticsearch-logging
9 | addonmanager.kubernetes.io/mode: Reconcile
10 | ---
11 | kind: ClusterRole
12 | apiVersion: rbac.authorization.k8s.io/v1
13 | metadata:
14 | name: elasticsearch-logging
15 | labels:
16 | k8s-app: elasticsearch-logging
17 | addonmanager.kubernetes.io/mode: Reconcile
18 | rules:
19 | - apiGroups:
20 | - ""
21 | resources:
22 | - "services"
23 | - "namespaces"
24 | - "endpoints"
25 | verbs:
26 | - "get"
27 | ---
28 | kind: ClusterRoleBinding
29 | apiVersion: rbac.authorization.k8s.io/v1
30 | metadata:
31 | namespace: kube-system
32 | name: elasticsearch-logging
33 | labels:
34 | k8s-app: elasticsearch-logging
35 | addonmanager.kubernetes.io/mode: Reconcile
36 | subjects:
37 | - kind: ServiceAccount
38 | name: elasticsearch-logging
39 | namespace: kube-system
40 | apiGroup: ""
41 | roleRef:
42 | kind: ClusterRole
43 | name: elasticsearch-logging
44 | apiGroup: ""
45 | ---
46 | # Elasticsearch deployment itself
47 | apiVersion: apps/v1
48 | kind: StatefulSet
49 | metadata:
50 | name: elasticsearch-logging
51 | namespace: kube-system
52 | labels:
53 | k8s-app: elasticsearch-logging
54 | version: v7.2.0
55 | addonmanager.kubernetes.io/mode: Reconcile
56 | spec:
57 | serviceName: elasticsearch-logging
58 | replicas: 2
59 | selector:
60 | matchLabels:
61 | k8s-app: elasticsearch-logging
62 | version: v7.2.0
63 | template:
64 | metadata:
65 | labels:
66 | k8s-app: elasticsearch-logging
67 | version: v7.2.0
68 | spec:
69 | serviceAccountName: elasticsearch-logging
70 | containers:
71 | - image: quay.io/fluentd_elasticsearch/elasticsearch:v7.2.0
72 | name: elasticsearch-logging
73 | imagePullPolicy: Always
74 | resources:
75 | # need more cpu upon initialization, therefore burstable class
76 | limits:
77 | cpu: 1000m
78 | requests:
79 | cpu: 100m
80 | ports:
81 | - containerPort: 9200
82 | name: db
83 | protocol: TCP
84 | - containerPort: 9300
85 | name: transport
86 | protocol: TCP
87 | volumeMounts:
88 | - name: elasticsearch-logging
89 | mountPath: /data
90 | env:
91 | - name: "NAMESPACE"
92 | valueFrom:
93 | fieldRef:
94 | fieldPath: metadata.namespace
95 | volumes:
96 | - name: elasticsearch-logging
97 | emptyDir: {}
98 | # Elasticsearch requires vm.max_map_count to be at least 262144.
99 | # If your OS already sets up this number to a higher value, feel free
100 | # to remove this init container.
101 | initContainers:
102 | - image: alpine:3.6
103 | command: ["/sbin/sysctl", "-w", "vm.max_map_count=262144"]
104 | name: elasticsearch-logging-init
105 | securityContext:
106 | privileged: true
107 |
--------------------------------------------------------------------------------
/addons/monitoring/kube-state-metrics/kube-state-metrics-dp.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | labels:
5 | app: kube-state-metrics
6 | name: kube-state-metrics
7 | namespace: monitoring
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | app: kube-state-metrics
13 | template:
14 | metadata:
15 | labels:
16 | app: kube-state-metrics
17 | spec:
18 | serviceAccountName: kube-state-metrics
19 | containers:
20 | - name: kube-rbac-proxy-main
21 | image: quay.io/coreos/kube-rbac-proxy:v0.4.1
22 | args:
23 | - --logtostderr
24 | - --secure-listen-address=:8443
25 | - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
26 | - --upstream=http://127.0.0.1:8081/
27 | ports:
28 | - containerPort: 8443
29 | name: https-main
30 | resources:
31 | limits:
32 | cpu: 20m
33 | memory: 40Mi
34 | requests:
35 | cpu: 10m
36 | memory: 20Mi
37 | - name: kube-rbac-proxy-self
38 | image: quay.io/coreos/kube-rbac-proxy:v0.4.1
39 | args:
40 | - --logtostderr
41 | - --secure-listen-address=:9443
42 | - --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
43 | - --upstream=http://127.0.0.1:8082/
44 | ports:
45 | - containerPort: 9443
46 | name: https-self
47 | resources:
48 | limits:
49 | cpu: 20m
50 | memory: 40Mi
51 | requests:
52 | cpu: 10m
53 | memory: 20Mi
54 | - name: kube-state-metrics
55 | image: quay.io/coreos/kube-state-metrics:v1.5.0
56 | args:
57 | - --host=127.0.0.1
58 | - --port=8081
59 | - --telemetry-host=127.0.0.1
60 | - --telemetry-port=8082
61 | resources:
62 | limits:
63 | cpu: 100m
64 | memory: 150Mi
65 | requests:
66 | cpu: 100m
67 | memory: 150Mi
68 | - name: addon-resizer
69 | image: gcr.io/google-containers/addon-resizer-amd64:2.1
70 | command:
71 | - /pod_nanny
72 | - --container=kube-state-metrics
73 | - --cpu=100m
74 | - --extra-cpu=2m
75 | - --memory=150Mi
76 | - --extra-memory=30Mi
77 | - --acceptance-offset=5
78 | - --deployment=kube-state-metrics
79 | env:
80 | - name: MY_POD_NAME
81 | valueFrom:
82 | fieldRef:
83 | apiVersion: v1
84 | fieldPath: metadata.name
85 | - name: MY_POD_NAMESPACE
86 | valueFrom:
87 | fieldRef:
88 | apiVersion: v1
89 | fieldPath: metadata.namespace
90 | resources:
91 | limits:
92 | cpu: 50m
93 | memory: 30Mi
94 | requests:
95 | cpu: 10m
96 | memory: 30Mi
97 | securityContext:
98 | runAsNonRoot: true
99 | runAsUser: 65534
--------------------------------------------------------------------------------
/addons/monitoring/prometheus/prometheus-rbac.yml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: prometheus-k8s
5 | rules:
6 | - apiGroups:
7 | - ""
8 | resources:
9 | - nodes/metrics
10 | verbs:
11 | - get
12 | - nonResourceURLs:
13 | - /metrics
14 | verbs:
15 | - get
16 | ---
17 | apiVersion: rbac.authorization.k8s.io/v1
18 | kind: ClusterRoleBinding
19 | metadata:
20 | name: prometheus-k8s
21 | roleRef:
22 | apiGroup: rbac.authorization.k8s.io
23 | kind: ClusterRole
24 | name: prometheus-k8s
25 | subjects:
26 | - kind: ServiceAccount
27 | name: prometheus-k8s
28 | namespace: monitoring
29 | ---
30 | apiVersion: rbac.authorization.k8s.io/v1
31 | kind: Role
32 | metadata:
33 | name: prometheus-k8s
34 | namespace: monitoring
35 | rules:
36 | - apiGroups:
37 | - ""
38 | resources:
39 | - nodes
40 | - services
41 | - endpoints
42 | - pods
43 | verbs:
44 | - get
45 | - list
46 | - watch
47 | ---
48 | apiVersion: rbac.authorization.k8s.io/v1
49 | kind: RoleBinding
50 | metadata:
51 | name: prometheus-k8s
52 | namespace: monitoring
53 | roleRef:
54 | apiGroup: rbac.authorization.k8s.io
55 | kind: Role
56 | name: prometheus-k8s
57 | subjects:
58 | - kind: ServiceAccount
59 | name: prometheus-k8s
60 | namespace: monitoring
61 | ---
62 | apiVersion: rbac.authorization.k8s.io/v1
63 | kind: Role
64 | metadata:
65 | name: prometheus-k8s
66 | namespace: kube-system
67 | rules:
68 | - apiGroups:
69 | - ""
70 | resources:
71 | - nodes
72 | - services
73 | - endpoints
74 | - pods
75 | verbs:
76 | - get
77 | - list
78 | - watch
79 | ---
80 | apiVersion: rbac.authorization.k8s.io/v1
81 | kind: RoleBinding
82 | metadata:
83 | name: prometheus-k8s
84 | namespace: kube-system
85 | roleRef:
86 | apiGroup: rbac.authorization.k8s.io
87 | kind: Role
88 | name: prometheus-k8s
89 | subjects:
90 | - kind: ServiceAccount
91 | name: prometheus-k8s
92 | namespace: monitoring
93 | ---
94 | apiVersion: rbac.authorization.k8s.io/v1
95 | kind: Role
96 | metadata:
97 | name: prometheus-k8s
98 | namespace: default
99 | rules:
100 | - apiGroups:
101 | - ""
102 | resources:
103 | - nodes
104 | - services
105 | - endpoints
106 | - pods
107 | verbs:
108 | - get
109 | - list
110 | - watch
111 | ---
112 | apiVersion: rbac.authorization.k8s.io/v1
113 | kind: RoleBinding
114 | metadata:
115 | name: prometheus-k8s
116 | namespace: default
117 | roleRef:
118 | apiGroup: rbac.authorization.k8s.io
119 | kind: Role
120 | name: prometheus-k8s
121 | subjects:
122 | - kind: ServiceAccount
123 | name: prometheus-k8s
124 | namespace: monitoring
125 | ---
126 | apiVersion: rbac.authorization.k8s.io/v1
127 | kind: Role
128 | metadata:
129 | name: prometheus-k8s-config
130 | namespace: monitoring
131 | rules:
132 | - apiGroups:
133 | - ""
134 | resources:
135 | - configmaps
136 | verbs:
137 | - get
138 | ---
139 | apiVersion: rbac.authorization.k8s.io/v1
140 | kind: RoleBinding
141 | metadata:
142 | name: prometheus-k8s-config
143 | namespace: monitoring
144 | roleRef:
145 | apiGroup: rbac.authorization.k8s.io
146 | kind: Role
147 | name: prometheus-k8s-config
148 | subjects:
149 | - kind: ServiceAccount
150 | name: prometheus-k8s
151 | namespace: monitoring
152 |
--------------------------------------------------------------------------------
/harbor/config/certs/ca.key:
--------------------------------------------------------------------------------
1 | -----BEGIN PRIVATE KEY-----
2 | MIIJRAIBADANBgkqhkiG9w0BAQEFAASCCS4wggkqAgEAAoICAQC+BM6Y2qsGNwfc
3 | mGYZP8BqBWx0TDVdgVx8b6lfxt+StmVR/18GUBgGcXvVH6IrFPf+jLHQNJSsphGg
4 | BtnUyb5YKiM3wOKdsHznIIRo1eLwljzD/kaiEQFc3uXOfdCRKHeVWhK2IXidtDLg
5 | bzCFu+F72tFj8pBaNUyBIcraWpV+SQS+FVl8T3AlURtsOrUTzWCi6RbyX5BTGFhl
6 | aYUbLiv/ztiLcwEIT5adWLJKQhk0sW7wZJNFNVMMXYPM60z+A2aJA9xlrCsKe4IY
7 | sffTy3mwFLFHpWekpsdjzXYKI+tHbn6msiDm6tLbFQxMlBdaonp+5OkUvUlCFT5m
8 | WGPPagR4AU2ER7WDbqVVKSDFR31mAgBYTtvXfvtmEE//V2qNIuujzdXQLnNko03H
9 | WKVCimpeROmGSkHNxoxSJQSKYLWLQ9Xe9EydcIrkKzeixNq0iF3LIArtjuWiQQgy
10 | kZg+9TemRLCvvu/WlpGCQRof7IrDozzvpHIeRhGvAReFoyIe72ibofJrCMjEF9ol
11 | +gfSXmYhVZLnJXUP+r6J4/E9A0fJZlDm8meWsPGa1lyCPVg0i1tq6jAzvT+wRWI7
12 | kYCORmFaldWz+dN6s7zrvkGjFKfvjGLq4ak0PxGPj5dqZfW4rJWxbs/ViMFh9fT7
13 | +RGaFVbxSGC1zlnTOf5k8j2ePoMg4QIDAQABAoICAQCoeGiXeyAwtW/B9FhpQG2f
14 | Ukmy3rJdwci3BuUbZp5oXFB+n7SnSzHR3KHW9mH4YnHxfNpV55h9bguoXwqYr3mU
15 | f1+UGe6Rsp066kibDi9T8U/U0Ufcfsk/htwm9MOLP16uyxpV4l0nLVICTHKvz0D+
16 | Z6ZIQBId+K1ovAY3DCMIwUJ0KUeagG/EliGDFpXfBbMNHS3cHEBAfBA7cY+y5n/a
17 | IJE7Ke6baLz3qmEAl5PzdPEQL4umy71i9DJb567ucyNgjUzF1ByhAZn5vSgl3nx4
18 | UhBPnmXCxJ95a/ugx/Hc9ilSMBIdycrCtOmfp5kBIZcLlHbWjXA7J8yYPvvXNH7a
19 | QenAYAaU/NktEmM61WLuJlcUGg9oKT7BxswWCGOeOuty6eb6whrkihlB28BOSR9S
20 | /4aF//3CHIEPDIeNklRXLDUe6eShKwqumQAtjELpQ3dZvlVqAZqXlS/wXwUp9CiM
21 | UWIbvSCb0RlPY5rUULe40liYesp1K8W/cqZ8Iclf8jK4ja7CI411kA7vklqxwHFm
22 | 9CAvYDkWcWzp/TtQy/mP2Inyi/Dts8Iylr2y8xucj92zGbw7zLBeQ8USVb3sRTdH
23 | 38z2YWCuGaE0ofEe9tF75fa+KGbtN7+q8q1wwszgT1xUi4dqM/KFLtQXorEAGHHm
24 | 6KRKbpr+gyZA2nzPPAnwAQKCAQEA9BnQZKi1wuHHvIEaNWjknpeFlsqGPhAG8WS6
25 | 2EvljQEhnch/ecnEg29hh+q9UlPYvIxxvzWOSkWv+nz9+vgkn4vX/1oVtDno59MA
26 | WmxR60vzIyln5NGZhzQ/zFsLFrs3LO9Rixg40nz5YGsMFTwJGPVD1jOM7eDF/A2H
27 | XIMZ5FvapPBb7Wq2c8mPXVbZXHffCut0ih9j//Ts8uA9eySSSD+zAsw+WWelYLFo
28 | USaYUX+MYrOj9riNUSreYialy35J0g8KeBQxFWspInoivVu3EzVmYENomF3881w1
29 | zHc+3STuydnnE1iViVDbJa+tkzNJHk9aZ40hwyH5TSkVJAiCYQKCAQEAx0gXbtRD
30 | dER5zqAYOfeNbH9mpwu1M3pGLnD53mWm0i9ENsRWu4UDxDhGzYHZI6NM1tr3IK3k
31 | ftAbEWaEf5EvtSYgx318Kbjk75vRvJffWpHrfUZKXKt5cx7+msukjiTPGImmx0tr
32 | RItiQ/WOxi6U2j1CzNN1RiZLr6r187t4H/8dUtqgsz35WY0qGHQ1CmTCreB7sbWy
33 | s14gfWqxETtWg0qwiZDlZCK0/DUgyIj4YoF6yvgQKU447xmBXKjUWEbf3T4LtyQb
34 | sfsvSmIG51tvZPBmSiucDDmm4CHLkXj/uDyS50/80Mqo85yzzyMMpHXqlYL3EX8m
35 | e29vDBpZGkwugQKCAQA2JgvOaIZ5hrBSJqlzs6MTczVVL/P7jH6dyeSQ2rU/REL6
36 | x2FP2OxY47pU9kOtIaBwow8QhYbc/vx8qoGEzMsDAXiW0psB8d6ynmHzrRe3BJAY
37 | +rRrU/ukvHhykrexATLKKwSBwIvKVaLUOqrhgwHZoX68446drkD9GtsuMH7r41xr
38 | Pk6GKifzI4FECWIKWyUFPRoLEc+TFfgCILBCwugYUtkZA8nI927+NhyX8+HSImM3
39 | +fWjY2kl2kaeq0gYDhm+GUwPOxtwTuc/GKzgKJpCDpR5QjrPofnc2lWs178AO5Mk
40 | /8t8xSfW4DLc902f276alKvPUMVujfjJnxVret/BAoIBAQCxyiKF0UAA6dqWOoSC
41 | qtX0l1EY9TwLGiqk4kw6WwxS9rJv/gi3MbujguXAEPmNbvJtuVrDQADcFoQfqQvK
42 | 19ftUH/sGor8miU9lBGxpPkyz359/Ij5Vk57I4rYxZgg7LwK0zwFD8c6lvPrV5ey
43 | Pfq1HfSL0pgNJrtdlTPq6DUKrssVkVHPuBnPCD8nI/3yMOTyxZT1ffpZRj/MRLSG
44 | gB718VvAz5GoHcLERA3kezbGg2fuzxN0FoJ65pFf/ou72jd9Jg7DrKY8yrFtNjmc
45 | jRm7vzIzN+b3jTPRQLViKsZSM1CIp/cQu82x0fslcMqlq0HHc9lDNeI6O7eh35vg
46 | 91iBAoIBAQCYZzA8SJpkykqO5ygCrGn61vIQFNKQ941rm2bpQ8WIjVCcIfsr34Z3
47 | ha0GZZGPRVdC98QT8oJEFij+Gm/a+uLjHax0Og8L4YY6foswKQmxSiULgu/dYF52
48 | G18BTpVPk8zYxRbr2C70jsdqshswv+Km1EH5QUPoiaI7DtCg6WbNcfXTxH2Vg2Jp
49 | daZdP8mB90d21eed/fIXnrD102RFN8cl+X2EaLTGy05sM7dTlJuRmTOWgFjfQ6u9
50 | LTknmx9UJwMLe5dpKrXQott0jDF+zl/dNcry6uMRBl21CiIQ+RXvnoppkqit/Qfp
51 | BweFPSjJ3ff+ypIu/FQnkVqtKuOW2/aE
52 | -----END PRIVATE KEY-----
53 |
--------------------------------------------------------------------------------
/harbor/config/certs/harbor-registry.key:
--------------------------------------------------------------------------------
1 | -----BEGIN PRIVATE KEY-----
2 | MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQDMzaVqDRcaRV4u
3 | 9Ivrp9H1PHetFdRJFW724PJ4SHs9mlCTweVHKuRdu/VjN1gYEwuzP9H5caRcxZ26
4 | gSGq4VFpsnhmypwx2CFmPZftmIFF3wMhEbTw4rSGmMRXc42s2crrc9czHH0ynxwD
5 | wuXSzZHTjp1uekvF8tD9LrBgCrnwo0a/m0r4K7JXdg4yCi/za7u50+cjaYydzNuM
6 | LChRZq37xeLlAZd7eBHZ/hM2EMCx6j5Bpc0z6MA0G43kpmKjq7uX6ShZyRMl7X6i
7 | VBBgtRNYAn4nRWMS4t8gW3n+QzbBeaf+bjdnwF/ydG6NrlSP+YL1dNJOLuVbJ1XF
8 | Da12xUeWxfB0v5/C7clQcoiMXUhsGGfvTzk32c74lsBElI8EYqzorHNuKRD1e1kI
9 | BmA68se2HO4AS1wgS8jQgf19e9DVrrIlh7U3bZPePb7rC6WclMOgHc13GBrTV7WY
10 | qGE6oMNRVAn/z0u9R9HXDpfZdukR0HaBpnp8UOl6CJf8Bip77Wwyq+4aV/exgX8T
11 | d9Fsp960u/ZIpsMdVaycUIvjpI3LDPUDm17uYRKHrNish0TKIodABnaG4LE1ts/r
12 | Hxs4k3AaRg4trqE+f1xc7G0t9n75IxY4uH7aezkU3Mo/DG9B1lfFeDT96SzuL9zl
13 | y++JyiHoJ8+4xqYnZEuyC3o19tUYGwIDAQABAoICAE+mav6juKhu8gIw+Xnc+rr1
14 | mVWnd7g0rcM2GRbOmZ+LtGrCrfjPpaEOyJCdpZU3UwWTfIYWvGWYNY+jSQS8PH1u
15 | +4R+p+B6vgKVUdEFzsDtyrnBuS01/h+aqkGYnzDqQxDvLHKkUcyuNKcB1X/2kxBE
16 | FtYwmeeSV9Ufo1i20znTRmCedLmvxNxe/YZYj+2LqKbM0sfi9N0qHviU89k0pdWV
17 | UHjf9Zn/SHIPbklImqiXAT9mzDez/YxGaxjR3Orqjzq1YwwIN3o4s1dOelAiHDPO
18 | RGHhinZ95WkgKoHIgjMcrUk2dDXVi3sv7Q8NDbYww83ScTgW5Pxef+zzNNNMRlqa
19 | vSGH57d4RlMrcNQm1lvZSjVB8UcZd/5IY9u/2BTYNe0iugsqofyC5RNkP1Ui/CCx
20 | dlw9yMUfb1N4dAJviXCgppq0A4RT8m66fybT3YauLGw4uBoWFRhsjn7EYEEUM4XL
21 | RraFTjKT8D8jkbDHb4e0HYFcSxMjGs8w/sCjA93T3KExo8bMHd6ERgfWnoX7pdd4
22 | sqsKM6VXv+R4WG8/PXWUVosMHwmKQOJMo3kaDMOOadF+KuqHSDuEZ6/luH3OEP5i
23 | uBf3QWKsIpaczlKaD5DXxjOYlQq5AG7sQfN2QKgkZMViLU2azn++Eca4UIrQkpo3
24 | WkY9GKTC/evUidmzOKqBAoIBAQDu4dBDzE2I1MMWFv9KKx4Nt+POjMkyk9uvmsSn
25 | HDKwPLJxAdpY3rr7+Omb4QYGMf5AjPiHsh2YwlagvsCadhM4l3Wudz9dghar0oeM
26 | N/0mfqqGVm/5EOAATgrL4WOJeMqAhdhgoag8M3CQn5jd/GtZqWgiLWSP02TjaZga
27 | Ra+2bPWlzW9gCXtMMFBr1YBQffxLjMOGQQfLhtA5NrtNgpNT2PhjCOFnA7cV0Lc7
28 | D1VFPegqKkahTSGbgh7UgiQVll/JP3XkobMyEF16/IxjdRlgwyrhgUgN7en7Tzfn
29 | 1SxHKah7Vu4IpnDEiZCFONpU4YUE8H0AZmCMWM+eVvYx+6JXAoIBAQDbeqwlmKXE
30 | HRhZ/iuckhvn6jvptS4adtEMFjD8MdkWIHMNP/D5XWLI9icAFYfS3BG31B3mU1zF
31 | diKzXpu9ZgHWdBWgsUUgEmVAJZXtoHWw7hNFJDzQdpnQLwicETjA8oWNfBnQAMhe
32 | Wjs5gHwgZN1jTcSRMXUkGZhSMt+lI5/IfS3GR1AC+N8/EJvecPL9GsiGKnykIxz6
33 | bS/3OR51xWyTi0Gr8OleO1064+cuQlY38wvtsJphRYRPqTHhvD0VZ9Nb9YAxPIL4
34 | ZI7L3cEmD2OfYZBXyaG2n3T/hfViHBvvyy7Bin+lGraA2C+7JoDXydc5fGzgIjBP
35 | ALd+b8Lum8XdAoIBAQCYWKM92dnI3gfc2qXNY1ozXYeHfvoBq5J7l7S67pzVEKCs
36 | uYXLD6EoeUJjC0uQP+nquPz3bXy6o5YjxgOHfq4bdVyz+5EJzaEvy4BuyHZGSpnu
37 | OsoGieCR0TPJeuDEF6jt9m5ckjx8yEGNunr/d6/u6r9irJhpsWOyNPGwL6k0KqEv
38 | MKlLEkD0AgwH+Y1vyClS9V7tDCQwhLXfByzE7fWPj9w5LLyzugbVftqca9bx5d1e
39 | pr+Mt2griPyogqcIrXJBIpvJR+bEkvNLMC1IcLHSqCV807pkR8rlX0xEKgunBsZQ
40 | gswj8CIS9biZffygDYOL9uOBkckZOLqQPN2eWuDZAoIBAQC7t8dG/LFC9DOgvPCm
41 | IJ13lPKbaFg/UEJqGQzalvP6pdRhj7/WOFZGVapzuvaJlaod1XGhTsl5Jhp12cRQ
42 | kM+fkXfyEcxyqukonT0HmgcvReD7cQwGrHnQAnpVqbEl7gfmRC97pT+622fuPYNs
43 | 3Xgw+BHYsTlkn/OU0mhQQRDnlJEzFklL0Xp34Qe/kFFzw5WgkvQ6GsItOngc9iCk
44 | UDAWXbPUkN+4tgN+W/CdsyU1A06/hB32dlO9IpREYxDHnx66d1amRgAq3S75FPif
45 | SehGKFchjpYesXGLHxMKT8phJSIfbQrRtvBDcSQZ6MuoRL0tMMuaRAOXfqByprpO
46 | BLONAoIBAH8Nn6uWmZJl+UzBpuFlTfTr3OqPJPC2NkBVPS+qUwPPJJqS+q1ou9Mo
47 | vnl1OaW2oDtINR7hhBE/5Nj3HF88uuQOE+OPZ0tO8JA6X4opWtCXHsSSY1W42Nlj
48 | YpmQWMIhtHMQD/ymDsxfWq4TS9ZVMPjEDVpy3eY4PrthaE+lxDx76p8jnQ0pPTuV
49 | 7MoRjaHaLV/h2C8DHwTPjm/s60rRKuOtsnxwmi8Fmf7NroKY4jGrQOZkmDLI0Niu
50 | sJWv6sy2mPE7QbeXfnH5rguHcPAwDkFEezfd7Njp3kOLzbyLzvTAOke/S5/IlNzQ
51 | y/7JY2pweeykZD1WY4CWZ6I6o6i4Oao=
52 | -----END PRIVATE KEY-----
53 |
--------------------------------------------------------------------------------
/practical-k8s/practical-apps/lab5-voting/deployment.yml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | labels:
6 | app: redis
7 | name: redis
8 | spec:
9 | clusterIP: None
10 | ports:
11 | - name: redis
12 | port: 6379
13 | targetPort: 6379
14 | selector:
15 | app: redis
16 | ---
17 | apiVersion: apps/v1
18 | kind: Deployment
19 | metadata:
20 | name: redis
21 | labels:
22 | app: redis
23 | spec:
24 | selector:
25 | matchLabels:
26 | app: redis
27 | replicas: 1
28 | template:
29 | metadata:
30 | labels:
31 | app: redis
32 | spec:
33 | containers:
34 | - name: redis
35 | image: redis:5.0-alpine
36 | ports:
37 | - containerPort: 6379
38 | name: redis
39 | ---
40 | apiVersion: v1
41 | kind: Service
42 | metadata:
43 | labels:
44 | app: db
45 | name: db
46 | spec:
47 | clusterIP: None
48 | ports:
49 | - name: db
50 | port: 5432
51 | targetPort: 5432
52 | selector:
53 | app: db
54 | ---
55 | apiVersion: apps/v1
56 | kind: Deployment
57 | metadata:
58 | name: db
59 | labels:
60 | app: db
61 | spec:
62 | selector:
63 | matchLabels:
64 | app: db
65 | template:
66 | metadata:
67 | labels:
68 | app: db
69 | spec:
70 | containers:
71 | - name: db
72 | image: postgres:9.4
73 | env:
74 | - name: PGDATA
75 | value: /var/lib/postgresql/data/pgdata
76 | ports:
77 | - containerPort: 5432
78 | name: db
79 | volumeMounts:
80 | - name: db-data
81 | mountPath: /var/lib/postgresql/data
82 | volumes:
83 | - name: db-data
84 | persistentVolumeClaim:
85 | claimName: postgres-pv-claim
86 | ---
87 | apiVersion: v1
88 | kind: PersistentVolumeClaim
89 | metadata:
90 | name: postgres-pv-claim
91 | spec:
92 | accessModes:
93 | - ReadWriteOnce
94 | resources:
95 | requests:
96 | storage: 10Gi
97 | ---
98 | apiVersion: v1
99 | kind: Service
100 | metadata:
101 | name: result
102 | labels:
103 | app: result
104 | spec:
105 | type: LoadBalancer
106 | externalIPs:
107 | - 172.22.132.9
108 | ports:
109 | - port: 5001
110 | targetPort: 80
111 | name: result
112 | selector:
113 | app: result
114 | ---
115 | apiVersion: apps/v1
116 | kind: Deployment
117 | metadata:
118 | name: result
119 | labels:
120 | app: result
121 | spec:
122 | selector:
123 | matchLabels:
124 | app: result
125 | replicas: 1
126 | template:
127 | metadata:
128 | labels:
129 | app: result
130 | spec:
131 | containers:
132 | - name: result
133 | image: kairen/vote-result:v1.0.0
134 | ports:
135 | - containerPort: 80
136 | name: result
137 | ---
138 | apiVersion: v1
139 | kind: Service
140 | metadata:
141 | name: vote
142 | labels:
143 | apps: vote
144 | spec:
145 | type: LoadBalancer
146 | externalIPs:
147 | - 172.22.132.9
148 | ports:
149 | - port: 5000
150 | targetPort: 80
151 | name: vote
152 | selector:
153 | app: vote
154 | ---
155 | apiVersion: apps/v1
156 | kind: Deployment
157 | metadata:
158 | name: vote
159 | labels:
160 | app: vote
161 | spec:
162 | selector:
163 | matchLabels:
164 | app: vote
165 | replicas: 2
166 | template:
167 | metadata:
168 | labels:
169 | app: vote
170 | spec:
171 | containers:
172 | - name: vote
173 | image: kairen/vote:v1.0.0
174 | ports:
175 | - containerPort: 80
176 | name: vote
177 | resources:
178 | requests:
179 | cpu: "100m"
180 | limits:
181 | cpu: "1000m"
182 | ---
183 | apiVersion: v1
184 | kind: Service
185 | metadata:
186 | labels:
187 | apps: worker
188 | name: worker
189 | spec:
190 | clusterIP: None
191 | selector:
192 | app: worker
193 | ---
194 | apiVersion: apps/v1
195 | kind: Deployment
196 | metadata:
197 | labels:
198 | app: worker
199 | name: worker
200 | spec:
201 | selector:
202 | matchLabels:
203 | app: worker
204 | replicas: 1
205 | template:
206 | metadata:
207 | labels:
208 | app: worker
209 | spec:
210 | containers:
211 | - image: kairen/vote-worker:v1.0.0
212 | name: worker
213 |
--------------------------------------------------------------------------------
/manual-installation/manifests/apiserver.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | annotations:
5 | scheduler.alpha.kubernetes.io/critical-pod: ""
6 | labels:
7 | component: kube-apiserver
8 | tier: control-plane
9 | name: kube-apiserver
10 | namespace: kube-system
11 | spec:
12 | hostNetwork: true
13 | containers :
14 | - name: kube-apiserver
15 | image: gcr.io/google_containers/kube-apiserver-amd64:v1.8.2
16 | command:
17 | - kube-apiserver
18 | - --v=0
19 | - --logtostderr=true
20 | - --allow-privileged=true
21 | - --bind-address=0.0.0.0
22 | - --secure-port=6443
23 | - --insecure-port=0
24 | - --advertise-address=172.16.35.12
25 | - --service-cluster-ip-range=10.96.0.0/12
26 | - --service-node-port-range=30000-32767
27 | - --etcd-servers=https://172.16.35.12:2379
28 | - --etcd-cafile=/etc/etcd/ssl/etcd-ca.pem
29 | - --etcd-certfile=/etc/etcd/ssl/etcd.pem
30 | - --etcd-keyfile=/etc/etcd/ssl/etcd-key.pem
31 | - --client-ca-file=/etc/kubernetes/pki/ca.pem
32 | - --tls-cert-file=/etc/kubernetes/pki/apiserver.pem
33 | - --tls-private-key-file=/etc/kubernetes/pki/apiserver-key.pem
34 | - --kubelet-client-certificate=/etc/kubernetes/pki/apiserver.pem
35 | - --kubelet-client-key=/etc/kubernetes/pki/apiserver-key.pem
36 | - --service-account-key-file=/etc/kubernetes/pki/sa.pub
37 | - --token-auth-file=/etc/kubernetes/token.csv
38 | - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
39 | - --admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota
40 | - --authorization-mode=Node,RBAC
41 | - --enable-bootstrap-token-auth=true
42 | - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem
43 | - --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem
44 | - --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem
45 | - --requestheader-allowed-names=aggregator
46 | - --requestheader-group-headers=X-Remote-Group
47 | - --requestheader-extra-headers-prefix=X-Remote-Extra-
48 | - --requestheader-username-headers=X-Remote-User
49 | - --audit-log-maxage=30
50 | - --audit-log-maxbackup=3
51 | - --audit-log-maxsize=100
52 | - --audit-log-path=/var/log/kubernetes/audit.log
53 | - --audit-policy-file=/etc/kubernetes/audit-policy.yml
54 | - --experimental-encryption-provider-config=/etc/kubernetes/encryption.yml
55 | - --event-ttl=1h
56 | livenessProbe:
57 | failureThreshold: 8
58 | httpGet:
59 | host: 127.0.0.1
60 | path: /healthz
61 | port: 6443
62 | scheme: HTTPS
63 | initialDelaySeconds: 15
64 | timeoutSeconds: 15
65 | resources:
66 | requests:
67 | cpu: 250m
68 | volumeMounts:
69 | - mountPath: /var/log/kubernetes
70 | name: k8s-audit-log
71 | - mountPath: /etc/kubernetes/pki
72 | name: k8s-certs
73 | readOnly: true
74 | - mountPath: /etc/ssl/certs
75 | name: ca-certs
76 | readOnly: true
77 | - mountPath: /etc/kubernetes/encryption.yml
78 | name: encryption-config
79 | readOnly: true
80 | - mountPath: /etc/kubernetes/audit-policy.yml
81 | name: audit-config
82 | readOnly: true
83 | - mountPath: /etc/kubernetes/token.csv
84 | name: token-csv
85 | readOnly: true
86 | - mountPath: /etc/etcd/ssl
87 | name: etcd-ca-certs
88 | readOnly: true
89 | volumes:
90 | - hostPath:
91 | path: /var/log/kubernetes
92 | type: DirectoryOrCreate
93 | name: k8s-audit-log
94 | - hostPath:
95 | path: /etc/kubernetes/pki
96 | type: DirectoryOrCreate
97 | name: k8s-certs
98 | - hostPath:
99 | path: /etc/kubernetes/encryption.yml
100 | type: FileOrCreate
101 | name: encryption-config
102 | - hostPath:
103 | path: /etc/kubernetes/audit-policy.yml
104 | type: FileOrCreate
105 | name: audit-config
106 | - hostPath:
107 | path: /etc/kubernetes/token.csv
108 | type: FileOrCreate
109 | name: token-csv
110 | - hostPath:
111 | path: /etc/ssl/certs
112 | type: DirectoryOrCreate
113 | name: ca-certs
114 | - hostPath:
115 | path: /etc/etcd/ssl
116 | type: DirectoryOrCreate
117 | name: etcd-ca-certs
118 |
--------------------------------------------------------------------------------
/multi-cluster/kubeadm/README.md:
--------------------------------------------------------------------------------
1 | # Kubeadm multi-nodes Installation
2 |
3 | ## Create a cluster
4 | Create nodes as the following commands:
5 |
6 | ```sh
7 | $ vagrant up
8 | $ vagrant status
9 | ```
10 |
11 | Modify the `/etc/hosts` on all nodes, and then add the following content:
12 |
13 | ```
14 | 192.16.35.10 k8s-n1
15 | 192.16.35.11 k8s-n2
16 | 192.16.35.12 k8s-m1
17 | ```
18 |
19 | Now, SSH into `k8s-m1`, and then type the following commands:
20 |
21 | ```sh
22 | $ sudo kubeadm init --apiserver-advertise-address=192.16.35.12 \
23 | --pod-network-cidr=10.244.0.0/16 \
24 | --token rlag12.6sd1dhhery5r6fk2 \
25 | --ignore-preflight-errors=NumCPU
26 |
27 | # Copy kubeconfig
28 | $ mkdir -p $HOME/.kube && \
29 | sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config && \
30 | sudo chown $(id -u):$(id -g) $HOME/.kube/config
31 |
32 | # Create CNI
33 | $ kubectl apply -f /vagrant/calico.yaml
34 | ```
35 |
36 | SSH into other nodes, and then type the following commands:
37 |
38 | ```sh
39 | # Copy from master
40 | $ sudo kubeadm join 192.16.35.12:6443 --token rlag12.6sd1dhhery5r6fk2 \
41 | --discovery-token-ca-cert-hash sha256:ea1f9e8a715c5fcaf1379073e4f9ed5ea34339398b1fab3bcc2bfe74cc07c6be
42 | ```
43 |
44 | ## Create a NGINX deployment and PDB
45 |
46 | Run the following command on master:
47 |
48 | ```sh
49 | $ kubectl apply -f /vagrant/pdb/
50 | ```
51 |
52 | ## Upgrade to v1.15.x
53 |
54 | ### Upgrading control plane nodes
55 |
56 | ```sh
57 | $ sudo apt-get update && sudo apt-get install -y kubeadm=1.15.2-00 && \
58 | sudo apt-mark hold kubeadm
59 | ```
60 |
61 | Verify that the download works and has the expected version::
62 |
63 | ```sh
64 | $ kubeadm version
65 | kubeadm version: &version.Info{Major:"1", Minor:"15", GitVersion:"v1.15.2", GitCommit:"f6278300bebbb750328ac16ee6dd3aa7d3549568", GitTreeState:"clean", BuildDate:"2019-08-05T09:20:51Z", GoVersion:"go1.12.5", Compiler:"gc", Platform:"linux/amd64"}
66 | ```
67 |
68 | On the control plane node, run:
69 |
70 | ```sh
71 | $ sudo kubeadm upgrade plan
72 | $ sudo kubeadm upgrade apply v1.15.2
73 |
74 | # Upgrade nodes
75 | $ sudo kubeadm upgrade node
76 | $ sudo kubeadm upgrade apply v1.15.2
77 | ```
78 |
79 | Upgrade the kubelet and kubectl on all control plane nodes:
80 |
81 | ```sh
82 | $ sudo apt-get update && sudo apt-get install -y kubelet=1.15.2-00 kubectl=1.15.2-00 && \
83 | sudo apt-mark hold kubelet kubectl
84 | ```
85 |
86 | Restart the kubelet:
87 |
88 | ```sh
89 | $ sudo systemctl restart kubelet
90 | ```
91 |
92 | ### Upgrade worker nodes
93 |
94 | Run the following command on master:
95 |
96 | ```sh
97 | $ kubectl drain $NODE --ignore-daemonsets
98 | ```
99 |
100 | Run the following command on nodes(`k8s-n1, k8s-n2`):
101 | ```sh
102 | $ sudo apt-get update && sudo apt-get install -y kubeadm=1.15.2-00 && \
103 | sudo apt-mark hold kubeadm
104 | ```
105 |
106 | Call the following command to upgrade the kubelet configuration on nodes:
107 |
108 | ```sh
109 | $ sudo kubeadm upgrade node
110 | ```
111 |
112 | Upgrade the kubelet and kubectl on all nodes:
113 |
114 | ```sh
115 | $ sudo apt-get update && sudo apt-get install -y kubelet=1.15.2-00 && \
116 | sudo apt-mark hold kubelet
117 | ```
118 |
119 | Restart the kubelet:
120 |
121 | ```sh
122 | $ sudo systemctl restart kubelet
123 | ```
124 |
125 | Run the following command on master for uncordoning nodes:
126 |
127 | ```sh
128 | $ kubectl uncordon $NODE
129 | ```
130 |
131 | ## Check cluster
132 |
133 | Check version on control plane node:
134 |
135 | ```sh
136 | $ kubectl version
137 | Client Version: version.Info{Major:"1", Minor:"15", GitVersion:"v1.15.2", GitCommit:"f6278300bebbb750328ac16ee6dd3aa7d3549568", GitTreeState:"clean", BuildDate:"2019-08-05T09:23:26Z", GoVersion:"go1.12.5", Compiler:"gc", Platform:"linux/amd64"}
138 | Server Version: version.Info{Major:"1", Minor:"15", GitVersion:"v1.15.2", GitCommit:"f6278300bebbb750328ac16ee6dd3aa7d3549568", GitTreeState:"clean", BuildDate:"2019-08-05T09:15:22Z", GoVersion:"go1.12.5", Compiler:"gc", Platform:"linux/amd64"}
139 |
140 | $ kubectl get no
141 | NAME STATUS ROLES AGE VERSION
142 | k8s-m1 Ready master 15m v1.15.2
143 | k8s-n1 Ready 13m v1.15.2
144 | k8s-n2 Ready 12m v1.15.2
145 |
146 | $ kubectl get cs
147 | NAME STATUS MESSAGE ERROR
148 | controller-manager Healthy ok
149 | scheduler Healthy ok
150 | etcd-0 Healthy {"health":"true"}
151 | ```
--------------------------------------------------------------------------------
/practical-k8s/practical-apps/lab4-prometheus/prometheus-config-map.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: prometheus-server-conf
5 | labels:
6 | name: prometheus-server-conf
7 | namespace: default
8 | data:
9 | prometheus.yml: |-
10 | global:
11 | scrape_interval: 15s
12 | #evaluation_interval: 15s
13 |
14 | scrape_configs:
15 |
16 | - job_name: 'kubernetes-apiservers'
17 |
18 | kubernetes_sd_configs:
19 | - role: endpoints
20 | scheme: https
21 |
22 | tls_config:
23 | ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
24 | bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
25 |
26 | relabel_configs:
27 | - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
28 | action: keep
29 | regex: default;kubernetes;https
30 |
31 | - job_name: 'kubernetes-nodes'
32 |
33 | scheme: https
34 |
35 | tls_config:
36 | ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
37 | bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
38 |
39 | kubernetes_sd_configs:
40 | - role: node
41 |
42 | relabel_configs:
43 | - action: labelmap
44 | regex: __meta_kubernetes_node_label_(.+)
45 | - target_label: __address__
46 | replacement: kubernetes.default.svc:443
47 | - source_labels: [__meta_kubernetes_node_name]
48 | regex: (.+)
49 | target_label: __metrics_path__
50 | replacement: /api/v1/nodes/${1}/proxy/metrics
51 |
52 |
53 | - job_name: 'kubernetes-pods'
54 |
55 | kubernetes_sd_configs:
56 | - role: pod
57 |
58 | relabel_configs:
59 | - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
60 | action: keep
61 | regex: true
62 | - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
63 | action: replace
64 | target_label: __metrics_path__
65 | regex: (.+)
66 | - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
67 | action: replace
68 | regex: ([^:]+)(?::\d+)?;(\d+)
69 | replacement: $1:$2
70 | target_label: __address__
71 | - action: labelmap
72 | regex: __meta_kubernetes_pod_label_(.+)
73 | - source_labels: [__meta_kubernetes_namespace]
74 | action: replace
75 | target_label: kubernetes_namespace
76 | - source_labels: [__meta_kubernetes_pod_name]
77 | action: replace
78 | target_label: kubernetes_pod_name
79 |
80 | - job_name: 'kubernetes-cadvisor'
81 |
82 | scheme: https
83 |
84 | tls_config:
85 | ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
86 | bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
87 |
88 | kubernetes_sd_configs:
89 | - role: node
90 |
91 | relabel_configs:
92 | - action: labelmap
93 | regex: __meta_kubernetes_node_label_(.+)
94 | - target_label: __address__
95 | replacement: kubernetes.default.svc:443
96 | - source_labels: [__meta_kubernetes_node_name]
97 | regex: (.+)
98 | target_label: __metrics_path__
99 | replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
100 |
101 | - job_name: 'kubernetes-service-endpoints'
102 |
103 | kubernetes_sd_configs:
104 | - role: endpoints
105 |
106 | relabel_configs:
107 | - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
108 | action: keep
109 | regex: true
110 | - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
111 | action: replace
112 | target_label: __scheme__
113 | regex: (https?)
114 | - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
115 | action: replace
116 | target_label: __metrics_path__
117 | regex: (.+)
118 | - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
119 | action: replace
120 | target_label: __address__
121 | regex: ([^:]+)(?::\d+)?;(\d+)
122 | replacement: $1:$2
123 | - action: labelmap
124 | regex: __meta_kubernetes_service_label_(.+)
125 | - source_labels: [__meta_kubernetes_namespace]
126 | action: replace
127 | target_label: kubernetes_namespace
128 | - source_labels: [__meta_kubernetes_service_name]
129 | action: replace
130 | target_label: kubernetes_name
131 |
--------------------------------------------------------------------------------
/harbor/config/harbor.yml:
--------------------------------------------------------------------------------
1 | # Configuration file of Harbor
2 |
3 | # The IP address or hostname to access admin UI and registry service.
4 | # DO NOT use localhost or 127.0.0.1, because Harbor needs to be accessed by external clients.
5 | hostname: 192.16.35.99
6 |
7 | # http related config
8 | http:
9 | # port for http, default is 80. If https enabled, this port will redirect to https port
10 | port: 80
11 |
12 | # https related config
13 | https:
14 | # https port for harbor, default is 443
15 | port: 443
16 | # The path of cert and key files for nginx
17 | certificate: /data/cert/harbor-registry.crt
18 | private_key: /data/cert/harbor-registry.key
19 |
20 | # Uncomment external_url if you want to enable external proxy
21 | # And when it enabled the hostname will no longer used
22 | # external_url: https://reg.mydomain.com:8433
23 |
24 | # The initial password of Harbor admin
25 | # It only works in first time to install harbor
26 | # Remember Change the admin password from UI after launching Harbor.
27 | harbor_admin_password: r00tme
28 |
29 | # Harbor DB configuration
30 | database:
31 | # The password for the root user of Harbor DB. Change this before any production use.
32 | password: root123
33 |
34 | # The default data volume
35 | data_volume: /data
36 |
37 | # Harbor Storage settings by default is using /data dir on local filesystem
38 | # Uncomment storage_service setting If you want to using external storage
39 | # storage_service:
40 | # # ca_bundle is the path to the custom root ca certificate, which will be injected into the truststore
41 | # # of registry's and chart repository's containers. This is usually needed when the user hosts a internal storage with self signed certificate.
42 | # ca_bundle:
43 |
44 | # # storage backend, default is filesystem, options include filesystem, azure, gcs, s3, swift and oss
45 | # # for more info about this configuration please refer https://docs.docker.com/registry/configuration/
46 | # filesystem:
47 | # maxthreads: 100
48 | # # set disable to true when you want to disable registry redirect
49 | # redirect:
50 | # disabled: false
51 |
52 | # Clair configuration
53 | clair:
54 | # The interval of clair updaters, the unit is hour, set to 0 to disable the updaters.
55 | updaters_interval: 12
56 |
57 | # Config http proxy for Clair, e.g. http://my.proxy.com:3128
58 | # Clair doesn't need to connect to harbor internal components via http proxy.
59 | http_proxy:
60 | https_proxy:
61 | no_proxy: 127.0.0.1,localhost,core,registry
62 |
63 | jobservice:
64 | # Maximum number of job workers in job service
65 | max_job_workers: 10
66 |
67 | chart:
68 | # Change the value of absolute_url to enabled can enable absolute url in chart
69 | absolute_url: disabled
70 |
71 | # Log configurations
72 | log:
73 | # options are debug, info, warning, error, fatal
74 | level: info
75 | # Log files are rotated log_rotate_count times before being removed. If count is 0, old versions are removed rather than rotated.
76 | rotate_count: 50
77 | # Log files are rotated only if they grow bigger than log_rotate_size bytes. If size is followed by k, the size is assumed to be in kilobytes.
78 | # If the M is used, the size is in megabytes, and if G is used, the size is in gigabytes. So size 100, size 100k, size 100M and size 100G
79 | # are all valid.
80 | rotate_size: 200M
81 | # The directory on your host that store log
82 | location: /var/log/harbor
83 |
84 | #This attribute is for migrator to detect the version of the .cfg file, DO NOT MODIFY!
85 | _version: 1.8.0
86 |
87 | # Uncomment external_database if using external database.
88 | # external_database:
89 | # harbor:
90 | # host: harbor_db_host
91 | # port: harbor_db_port
92 | # db_name: harbor_db_name
93 | # username: harbor_db_username
94 | # password: harbor_db_password
95 | # ssl_mode: disable
96 | # clair:
97 | # host: clair_db_host
98 | # port: clair_db_port
99 | # db_name: clair_db_name
100 | # username: clair_db_username
101 | # password: clair_db_password
102 | # ssl_mode: disable
103 | # notary_signer:
104 | # host: notary_signer_db_host
105 | # port: notary_signer_db_port
106 | # db_name: notary_signer_db_name
107 | # username: notary_signer_db_username
108 | # password: notary_signer_db_password
109 | # ssl_mode: disable
110 | # notary_server:
111 | # host: notary_server_db_host
112 | # port: notary_server_db_port
113 | # db_name: notary_server_db_name
114 | # username: notary_server_db_username
115 | # password: notary_server_db_password
116 | # ssl_mode: disable
117 |
118 | # Uncomment external_redis if using external Redis server
119 | # external_redis:
120 | # host: redis
121 | # port: 6379
122 | # password:
123 | # # db_index 0 is for core, it's unchangeable
124 | # registry_db_index: 1
125 | # jobservice_db_index: 2
126 | # chartmuseum_db_index: 3
127 |
128 | # Uncomment uaa for trusting the certificate of uaa instance that is hosted via self-signed cert.
129 | # uaa:
130 | # ca_file: /path/to/ca
131 |
--------------------------------------------------------------------------------
/manual-installation/conf/kube-dns.yml.conf:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: kube-dns
5 | labels:
6 | k8s-app: kube-dns
7 | kubernetes.io/cluster-service: "true"
8 | addonmanager.kubernetes.io/mode: Reconcile
9 | namespace: kube-system
10 | ---
11 | apiVersion: v1
12 | kind: Service
13 | metadata:
14 | name: kube-dns
15 | namespace: kube-system
16 | labels:
17 | k8s-app: kube-dns
18 | kubernetes.io/cluster-service: "true"
19 | addonmanager.kubernetes.io/mode: Reconcile
20 | spec:
21 | selector:
22 | k8s-app: kube-dns
23 | clusterIP: 10.96.0.10
24 | ports:
25 | - name: dns
26 | port: 53
27 | protocol: UDP
28 | - name: dns-tcp
29 | port: 53
30 | protocol: TCP
31 | ---
32 | apiVersion: extensions/v1beta1
33 | kind: Deployment
34 | metadata:
35 | name: kube-dns
36 | namespace: kube-system
37 | labels:
38 | k8s-app: kube-dns
39 | kubernetes.io/cluster-service: "true"
40 | addonmanager.kubernetes.io/mode: Reconcile
41 | spec:
42 | strategy:
43 | rollingUpdate:
44 | maxSurge: 10%
45 | maxUnavailable: 0
46 | selector:
47 | matchLabels:
48 | k8s-app: kube-dns
49 | template:
50 | metadata:
51 | labels:
52 | k8s-app: kube-dns
53 | annotations:
54 | scheduler.alpha.kubernetes.io/critical-pod: ''
55 | spec:
56 | dnsPolicy: Default
57 | serviceAccountName: kube-dns
58 | tolerations:
59 | - key: "CriticalAddonsOnly"
60 | operator: "Exists"
61 | - key: node-role.kubernetes.io/master
62 | effect: NoSchedule
63 | volumes:
64 | - name: kube-dns-config
65 | configMap:
66 | name: kube-dns
67 | optional: true
68 | containers:
69 | - name: kubedns
70 | image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.7
71 | resources:
72 | limits:
73 | memory: 170Mi
74 | requests:
75 | cpu: 100m
76 | memory: 70Mi
77 | livenessProbe:
78 | httpGet:
79 | path: /healthcheck/kubedns
80 | port: 10054
81 | scheme: HTTP
82 | initialDelaySeconds: 60
83 | timeoutSeconds: 5
84 | successThreshold: 1
85 | failureThreshold: 5
86 | readinessProbe:
87 | httpGet:
88 | path: /readiness
89 | port: 8081
90 | scheme: HTTP
91 | initialDelaySeconds: 3
92 | timeoutSeconds: 5
93 | args:
94 | - "--domain=cluster.local"
95 | - --dns-port=10053
96 | - --v=2
97 | env:
98 | - name: PROMETHEUS_PORT
99 | value: "10055"
100 | ports:
101 | - containerPort: 10053
102 | name: dns-local
103 | protocol: UDP
104 | - containerPort: 10053
105 | name: dns-tcp-local
106 | protocol: TCP
107 | - containerPort: 10055
108 | name: metrics
109 | protocol: TCP
110 | volumeMounts:
111 | - name: kube-dns-config
112 | mountPath: /kube-dns-config
113 | - name: dnsmasq
114 | image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7
115 | livenessProbe:
116 | httpGet:
117 | path: /healthcheck/dnsmasq
118 | port: 10054
119 | scheme: HTTP
120 | initialDelaySeconds: 60
121 | timeoutSeconds: 5
122 | successThreshold: 1
123 | failureThreshold: 5
124 | args:
125 | - "-v=2"
126 | - "-logtostderr"
127 | - "-configDir=/etc/k8s/dns/dnsmasq-nanny"
128 | - "-restartDnsmasq=true"
129 | - "--"
130 | - "-k"
131 | - "--cache-size=1000"
132 | - "--log-facility=-"
133 | - "--server=/cluster.local/127.0.0.1#10053"
134 | - "--server=/in-addr.arpa/127.0.0.1#10053"
135 | - "--server=/ip6.arpa/127.0.0.1#10053"
136 | ports:
137 | - containerPort: 53
138 | name: dns
139 | protocol: UDP
140 | - containerPort: 53
141 | name: dns-tcp
142 | protocol: TCP
143 | resources:
144 | requests:
145 | cpu: 150m
146 | memory: 20Mi
147 | volumeMounts:
148 | - name: kube-dns-config
149 | mountPath: /etc/k8s/dns/dnsmasq-nanny
150 | - name: sidecar
151 | image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.7
152 | livenessProbe:
153 | httpGet:
154 | path: /metrics
155 | port: 10054
156 | scheme: HTTP
157 | initialDelaySeconds: 60
158 | timeoutSeconds: 5
159 | successThreshold: 1
160 | failureThreshold: 5
161 | args:
162 | - "--v=2"
163 | - "--logtostderr"
164 | - "--probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,A"
165 | - "--probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,A"
166 | ports:
167 | - containerPort: 10054
168 | name: metrics
169 | protocol: TCP
170 | resources:
171 | requests:
172 | memory: 20Mi
173 | cpu: 10m
174 |
--------------------------------------------------------------------------------
/addons/monitoring/grafana/grafana-dp.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | labels:
5 | app: grafana
6 | name: grafana
7 | namespace: monitoring
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | app: grafana
13 | template:
14 | metadata:
15 | labels:
16 | app: grafana
17 | spec:
18 | serviceAccountName: grafana
19 | nodeSelector:
20 | beta.kubernetes.io/os: linux
21 | containers:
22 | - name: grafana
23 | image: grafana/grafana:6.0.1
24 | env:
25 | - name: GF_SECURITY_ADMIN_USER
26 | valueFrom:
27 | secretKeyRef:
28 | name: grafana-credentials
29 | key: user
30 | - name: GF_SECURITY_ADMIN_PASSWORD
31 | valueFrom:
32 | secretKeyRef:
33 | name: grafana-credentials
34 | key: password
35 | ports:
36 | - containerPort: 3000
37 | name: http
38 | readinessProbe:
39 | httpGet:
40 | path: /api/health
41 | port: http
42 | resources:
43 | limits:
44 | cpu: 200m
45 | memory: 200Mi
46 | requests:
47 | cpu: 100m
48 | memory: 100Mi
49 | volumeMounts:
50 | - mountPath: /var/lib/grafana
51 | name: grafana-storage
52 | readOnly: false
53 | - mountPath: /etc/grafana/provisioning/datasources
54 | name: grafana-datasources
55 | readOnly: false
56 | - mountPath: /etc/grafana/provisioning/dashboards
57 | name: grafana-dashboards
58 | readOnly: false
59 | - mountPath: /grafana-dashboard-definitions/0/k8s-cluster-rsrc-use
60 | name: grafana-dashboard-k8s-cluster-rsrc-use
61 | readOnly: false
62 | - mountPath: /grafana-dashboard-definitions/0/k8s-node-rsrc-use
63 | name: grafana-dashboard-k8s-node-rsrc-use
64 | readOnly: false
65 | - mountPath: /grafana-dashboard-definitions/0/k8s-resources-cluster
66 | name: grafana-dashboard-k8s-resources-cluster
67 | readOnly: false
68 | - mountPath: /grafana-dashboard-definitions/0/k8s-resources-namespace
69 | name: grafana-dashboard-k8s-resources-namespace
70 | readOnly: false
71 | - mountPath: /grafana-dashboard-definitions/0/k8s-resources-pod
72 | name: grafana-dashboard-k8s-resources-pod
73 | readOnly: false
74 | - mountPath: /grafana-dashboard-definitions/0/nodes
75 | name: grafana-dashboard-nodes
76 | readOnly: false
77 | - mountPath: /grafana-dashboard-definitions/0/pods
78 | name: grafana-dashboard-pods
79 | readOnly: false
80 | - mountPath: /grafana-dashboard-definitions/0/statefulset
81 | name: grafana-dashboard-statefulset
82 | readOnly: false
83 | - mountPath: /grafana-dashboard-definitions/0/gpu-pod
84 | name: grafana-dashboard-gpu-pod
85 | readOnly: false
86 | - mountPath: /grafana-dashboard-definitions/0/gpu-node
87 | name: grafana-dashboard-gpu-node
88 | readOnly: false
89 | - mountPath: /grafana-dashboard-definitions/0/gpu-cluster
90 | name: grafana-dashboard-gpu-cluster
91 | readOnly: false
92 | - mountPath: /grafana-dashboard-definitions/0/k8s-cluster
93 | name: grafana-dashboard-k8s-cluster
94 | readOnly: false
95 | securityContext:
96 | runAsNonRoot: true
97 | runAsUser: 65534
98 | volumes:
99 | - emptyDir: {}
100 | name: grafana-storage
101 | - name: grafana-datasources
102 | secret:
103 | secretName: grafana-datasources
104 | - configMap:
105 | name: grafana-dashboards
106 | name: grafana-dashboards
107 | - configMap:
108 | name: grafana-dashboard-k8s-cluster-rsrc-use
109 | name: grafana-dashboard-k8s-cluster-rsrc-use
110 | - configMap:
111 | name: grafana-dashboard-k8s-node-rsrc-use
112 | name: grafana-dashboard-k8s-node-rsrc-use
113 | - configMap:
114 | name: grafana-dashboard-k8s-resources-cluster
115 | name: grafana-dashboard-k8s-resources-cluster
116 | - configMap:
117 | name: grafana-dashboard-k8s-resources-namespace
118 | name: grafana-dashboard-k8s-resources-namespace
119 | - configMap:
120 | name: grafana-dashboard-k8s-resources-pod
121 | name: grafana-dashboard-k8s-resources-pod
122 | - configMap:
123 | name: grafana-dashboard-nodes
124 | name: grafana-dashboard-nodes
125 | - configMap:
126 | name: grafana-dashboard-pods
127 | name: grafana-dashboard-pods
128 | - configMap:
129 | name: grafana-dashboard-statefulset
130 | name: grafana-dashboard-statefulset
131 | - configMap:
132 | name: grafana-dashboard-gpu-pod
133 | name: grafana-dashboard-gpu-pod
134 | - configMap:
135 | name: grafana-dashboard-gpu-node
136 | name: grafana-dashboard-gpu-node
137 | - configMap:
138 | name: grafana-dashboard-gpu-cluster
139 | name: grafana-dashboard-gpu-cluster
140 | - configMap:
141 | name: grafana-dashboard-k8s-cluster
142 | name: grafana-dashboard-k8s-cluster
143 |
--------------------------------------------------------------------------------
/addons/ingress-controller/ingress-controller.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: ingress-nginx
5 | labels:
6 | app.kubernetes.io/name: ingress-nginx
7 | app.kubernetes.io/part-of: ingress-nginx
8 | ---
9 | kind: ConfigMap
10 | apiVersion: v1
11 | metadata:
12 | name: nginx-configuration
13 | namespace: ingress-nginx
14 | labels:
15 | app.kubernetes.io/name: ingress-nginx
16 | app.kubernetes.io/part-of: ingress-nginx
17 | ---
18 | kind: ConfigMap
19 | apiVersion: v1
20 | metadata:
21 | name: tcp-services
22 | namespace: ingress-nginx
23 | labels:
24 | app.kubernetes.io/name: ingress-nginx
25 | app.kubernetes.io/part-of: ingress-nginx
26 | ---
27 | kind: ConfigMap
28 | apiVersion: v1
29 | metadata:
30 | name: udp-services
31 | namespace: ingress-nginx
32 | labels:
33 | app.kubernetes.io/name: ingress-nginx
34 | app.kubernetes.io/part-of: ingress-nginx
35 | ---
36 | apiVersion: v1
37 | kind: ServiceAccount
38 | metadata:
39 | name: nginx-ingress-serviceaccount
40 | namespace: ingress-nginx
41 | labels:
42 | app.kubernetes.io/name: ingress-nginx
43 | app.kubernetes.io/part-of: ingress-nginx
44 | ---
45 | apiVersion: rbac.authorization.k8s.io/v1
46 | kind: ClusterRole
47 | metadata:
48 | name: nginx-ingress-clusterrole
49 | labels:
50 | app.kubernetes.io/name: ingress-nginx
51 | app.kubernetes.io/part-of: ingress-nginx
52 | rules:
53 | - apiGroups:
54 | - ""
55 | resources:
56 | - configmaps
57 | - endpoints
58 | - nodes
59 | - pods
60 | - secrets
61 | verbs:
62 | - list
63 | - watch
64 | - apiGroups:
65 | - ""
66 | resources:
67 | - nodes
68 | verbs:
69 | - get
70 | - apiGroups:
71 | - ""
72 | resources:
73 | - services
74 | verbs:
75 | - get
76 | - list
77 | - watch
78 | - apiGroups:
79 | - "extensions"
80 | resources:
81 | - ingresses
82 | verbs:
83 | - get
84 | - list
85 | - watch
86 | - apiGroups:
87 | - ""
88 | resources:
89 | - events
90 | verbs:
91 | - create
92 | - patch
93 | - apiGroups:
94 | - "extensions"
95 | resources:
96 | - ingresses/status
97 | verbs:
98 | - update
99 | ---
100 | apiVersion: rbac.authorization.k8s.io/v1
101 | kind: Role
102 | metadata:
103 | name: nginx-ingress-role
104 | namespace: ingress-nginx
105 | labels:
106 | app.kubernetes.io/name: ingress-nginx
107 | app.kubernetes.io/part-of: ingress-nginx
108 | rules:
109 | - apiGroups:
110 | - ""
111 | resources:
112 | - configmaps
113 | - pods
114 | - secrets
115 | - namespaces
116 | verbs:
117 | - get
118 | - apiGroups:
119 | - ""
120 | resources:
121 | - configmaps
122 | resourceNames:
123 | # Defaults to "-"
124 | # Here: "-"
125 | # This has to be adapted if you change either parameter
126 | # when launching the nginx-ingress-controller.
127 | - "ingress-controller-leader-nginx"
128 | verbs:
129 | - get
130 | - update
131 | - apiGroups:
132 | - ""
133 | resources:
134 | - configmaps
135 | verbs:
136 | - create
137 | - apiGroups:
138 | - ""
139 | resources:
140 | - endpoints
141 | verbs:
142 | - get
143 | ---
144 | apiVersion: rbac.authorization.k8s.io/v1
145 | kind: RoleBinding
146 | metadata:
147 | name: nginx-ingress-role-nisa-binding
148 | namespace: ingress-nginx
149 | labels:
150 | app.kubernetes.io/name: ingress-nginx
151 | app.kubernetes.io/part-of: ingress-nginx
152 | roleRef:
153 | apiGroup: rbac.authorization.k8s.io
154 | kind: Role
155 | name: nginx-ingress-role
156 | subjects:
157 | - kind: ServiceAccount
158 | name: nginx-ingress-serviceaccount
159 | namespace: ingress-nginx
160 | ---
161 | apiVersion: rbac.authorization.k8s.io/v1
162 | kind: ClusterRoleBinding
163 | metadata:
164 | name: nginx-ingress-clusterrole-nisa-binding
165 | labels:
166 | app.kubernetes.io/name: ingress-nginx
167 | app.kubernetes.io/part-of: ingress-nginx
168 | roleRef:
169 | apiGroup: rbac.authorization.k8s.io
170 | kind: ClusterRole
171 | name: nginx-ingress-clusterrole
172 | subjects:
173 | - kind: ServiceAccount
174 | name: nginx-ingress-serviceaccount
175 | namespace: ingress-nginx
176 | ---
177 | apiVersion: apps/v1
178 | kind: Deployment
179 | metadata:
180 | name: nginx-ingress-controller
181 | namespace: ingress-nginx
182 | labels:
183 | app.kubernetes.io/name: ingress-nginx
184 | app.kubernetes.io/part-of: ingress-nginx
185 | spec:
186 | replicas: 1
187 | selector:
188 | matchLabels:
189 | app.kubernetes.io/name: ingress-nginx
190 | app.kubernetes.io/part-of: ingress-nginx
191 | template:
192 | metadata:
193 | labels:
194 | app.kubernetes.io/name: ingress-nginx
195 | app.kubernetes.io/part-of: ingress-nginx
196 | annotations:
197 | prometheus.io/port: "10254"
198 | prometheus.io/scrape: "true"
199 | spec:
200 | serviceAccountName: nginx-ingress-serviceaccount
201 | containers:
202 | - name: nginx-ingress-controller
203 | image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.21.0
204 | args:
205 | - /nginx-ingress-controller
206 | - --configmap=$(POD_NAMESPACE)/nginx-configuration
207 | - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services
208 | - --udp-services-configmap=$(POD_NAMESPACE)/udp-services
209 | - --publish-service=$(POD_NAMESPACE)/ingress-nginx
210 | - --annotations-prefix=nginx.ingress.kubernetes.io
211 | securityContext:
212 | capabilities:
213 | drop:
214 | - ALL
215 | add:
216 | - NET_BIND_SERVICE
217 | # www-data -> 33
218 | runAsUser: 33
219 | env:
220 | - name: POD_NAME
221 | valueFrom:
222 | fieldRef:
223 | fieldPath: metadata.name
224 | - name: POD_NAMESPACE
225 | valueFrom:
226 | fieldRef:
227 | fieldPath: metadata.namespace
228 | ports:
229 | - name: http
230 | containerPort: 80
231 | - name: https
232 | containerPort: 443
233 | livenessProbe:
234 | failureThreshold: 3
235 | httpGet:
236 | path: /healthz
237 | port: 10254
238 | scheme: HTTP
239 | initialDelaySeconds: 10
240 | periodSeconds: 10
241 | successThreshold: 1
242 | timeoutSeconds: 1
243 | readinessProbe:
244 | failureThreshold: 3
245 | httpGet:
246 | path: /healthz
247 | port: 10254
248 | scheme: HTTP
249 | periodSeconds: 10
250 | successThreshold: 1
251 | timeoutSeconds: 1
--------------------------------------------------------------------------------
/load-balancing/module/xt_statistic.c:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright (c) 2006 Patrick McHardy
3 | *
4 | * This program is free software; you can redistribute it and/or modify
5 | * it under the terms of the GNU General Public License version 2 as
6 | * published by the Free Software Foundation.
7 | *
8 | * Based on ipt_random and ipt_nth by Fabrice MARIE .
9 | */
10 |
11 | #include
12 | #include
13 | #include
14 | #include
15 | #include
16 | #include
17 |
18 | #include
19 | #include
20 | #include
21 | #include
22 | #include
23 | #include
24 |
25 | struct xt_statistic_priv {
26 | atomic_t count;
27 | } ____cacheline_aligned_in_smp;
28 |
29 | MODULE_LICENSE("GPL");
30 | MODULE_AUTHOR("Patrick McHardy ");
31 | MODULE_DESCRIPTION("Xtables: statistics-based matching (\"Nth\", random)");
32 | MODULE_ALIAS("ipt_statistic");
33 | MODULE_ALIAS("ip6t_statistic");
34 |
35 | #define LENGTH_IP 16
36 | #define LENGTH_DOMAIN 30
37 | #define MAX_POD 6
38 | static char clusterIP[LENGTH_IP];
39 | static char targetDomain[LENGTH_DOMAIN];
40 | static char targetIP[LENGTH_DOMAIN];
41 | static char podIP[MAX_POD][LENGTH_IP];
42 | static int curPodSize = 0;
43 |
44 | static ssize_t clusterIPwrite(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
45 | {
46 | int c;
47 | memset(clusterIP, 0, LENGTH_IP);
48 |
49 | if (*ppos > 0 || count > LENGTH_IP)
50 | return -EFAULT;
51 | if (copy_from_user(clusterIP, ubuf, count))
52 | return -EFAULT;
53 |
54 | c = strlen(clusterIP);
55 | clusterIP[c-1]=0; //remove new line
56 | printk(KERN_INFO "Read %s", clusterIP);
57 | *ppos = c;
58 | return c;
59 | }
60 |
61 | static ssize_t httpwrite(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
62 | {
63 | int c;
64 | char buf[LENGTH_DOMAIN + LENGTH_IP] = {0};
65 | char *token, *cur;
66 |
67 | memset(targetDomain, 0, LENGTH_DOMAIN);
68 | memset(targetIP, 0, LENGTH_IP);
69 | if (*ppos > 0 || count > (LENGTH_DOMAIN + LENGTH_IP))
70 | return -EFAULT;
71 | if (copy_from_user(buf, ubuf, count))
72 | return -EFAULT;
73 |
74 | c = strlen(buf);
75 | buf[c-1]=0; //remove new line
76 |
77 | printk(KERN_INFO "Read %s:%d", buf, c);
78 |
79 | //Use strsep to splite the string, I don't know why sscanf doesn't work here...
80 | cur = buf;
81 | token = strsep(&cur, ",");
82 | memcpy(targetDomain, token, strlen(token));
83 | token = strsep(&cur, ",");
84 | memcpy(targetIP, token, strlen(token));
85 | printk(KERN_INFO "Read %s:%s", targetDomain, targetIP);
86 | *ppos = c;
87 | return c;
88 | }
89 |
90 | static ssize_t podIPwrite(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
91 | {
92 | int i;
93 | int c;
94 | char buf[MAX_POD * LENGTH_IP + 1] = {0};
95 | char *token, *cur;
96 |
97 | curPodSize = 0;
98 | //Init all podIP array
99 | for (i = 0; i < MAX_POD; i++) {
100 | memset(podIP[i], 0, LENGTH_IP);
101 | }
102 |
103 | if (*ppos > 0 || count > (MAX_POD * LENGTH_IP + 1))
104 | return -EFAULT;
105 | if (copy_from_user(buf, ubuf, count))
106 | return -EFAULT;
107 |
108 | c = strlen(buf);
109 | buf[c-1]=0; //remove new line
110 |
111 | cur = buf;
112 | while ( (token = strsep(&cur, ","))) {
113 | memcpy(podIP[curPodSize++], token, strlen(token));
114 | }
115 |
116 | *ppos = c;
117 | return c;
118 | }
119 |
120 | //proc operations
121 | static struct file_operations clusterIPops =
122 | {
123 | .owner = THIS_MODULE,
124 | .write = clusterIPwrite,
125 | };
126 |
127 | static struct file_operations httpops =
128 | {
129 | .owner = THIS_MODULE,
130 | .write = httpwrite,
131 | };
132 |
133 | static struct file_operations podIPops =
134 | {
135 | .owner = THIS_MODULE,
136 | .write = podIPwrite,
137 | };
138 |
139 |
140 |
141 | static struct proc_dir_entry *entClusterIP;
142 | static struct proc_dir_entry *entHTTP;
143 | static struct proc_dir_entry *entPodIP;
144 |
145 | static unsigned int getDestinationIP(const struct sk_buff *skb)
146 | {
147 | struct iphdr *iph; /* IPv4 header */
148 | iph = ip_hdr(skb); /* get IP header */
149 |
150 | //We only check UDP
151 | if (iph->protocol != IPPROTO_UDP)
152 | return 0;
153 | return iph->daddr;
154 | }
155 |
156 |
157 | enum MATCH_RESULT {
158 | ROLL_BACK = 0,
159 | SUCCESS = 1,
160 | FAIL = 2,
161 | };
162 | /*
163 | * 0 -> roll back to probability
164 | * 1 -> match success
165 | * 2 -> match fail
166 | */
167 | static enum MATCH_RESULT checkL7LB(struct sk_buff *skb)
168 | {
169 | struct udphdr *udph; /* UDP header */
170 | unsigned char *user_data; /* UDP data begin pointer */
171 | enum MATCH_RESULT ret = ROLL_BACK;
172 |
173 | udph = udp_hdr(skb); /* get UDP header */
174 | /* Calculate pointers for begin and end of UDP packet data */
175 | user_data = (unsigned char *)((unsigned char *)udph + sizeof(struct udphdr));
176 |
177 | //Try to prinout the current IP
178 | //We use the first 4 bit for counter, k8s use the 12th-16th bit
179 | printk(KERN_INFO "current IP is %s", podIP[(skb->mark&(0x000f))]);
180 |
181 | if (strlen(user_data) >= strlen(targetDomain)) {
182 | if (0 != (strncmp(user_data, targetDomain, strlen(targetDomain))))
183 | goto END;
184 |
185 | if (strlen(podIP[(skb->mark&(0x000f))]) != strlen(targetIP))
186 | {
187 | ret = FAIL;
188 | goto END;
189 | }
190 | //Return true if cuurent IP is target IP
191 | if (0 != (strncmp(podIP[(skb->mark&(0x000f))], targetIP, strlen(targetIP))))
192 | {
193 | ret = FAIL;
194 | goto END;
195 | }
196 |
197 | printk(KERN_INFO "Find the target !\n");
198 | ret = SUCCESS;
199 | }
200 |
201 | END:
202 | skb->mark++;
203 | return ret;
204 | }
205 |
206 | static bool
207 | statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
208 | {
209 | const struct xt_statistic_info *info = par->matchinfo;
210 | bool ret = info->flags & XT_STATISTIC_INVERT;
211 | int nval, oval;
212 |
213 | switch (info->mode) {
214 | case XT_STATISTIC_MODE_RANDOM:
215 | //If we set the clusterIP, try to match.
216 | if (strlen(clusterIP)!=0) {
217 | unsigned int cluster_ip = in_aton(clusterIP);
218 | unsigned int dest_ip = getDestinationIP(skb);
219 | enum MATCH_RESULT result;
220 |
221 | printk(KERN_INFO "try to match %d:%d", cluster_ip, dest_ip);
222 | if (cluster_ip == dest_ip) {
223 | printk(KERN_INFO "match the IP address");
224 | result = checkL7LB((struct sk_buff*)skb);
225 |
226 | if (result == SUCCESS)
227 | return true;
228 | else if (result == FAIL)
229 | return false;
230 | }
231 | }
232 | if ((prandom_u32() & 0x7FFFFFFF) < info->u.random.probability)
233 | ret = !ret;
234 | break;
235 | case XT_STATISTIC_MODE_NTH:
236 | do {
237 | oval = atomic_read(&info->master->count);
238 | nval = (oval == info->u.nth.every) ? 0 : oval + 1;
239 | } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
240 | if (nval == 0)
241 | ret = !ret;
242 | break;
243 | }
244 |
245 | return ret;
246 | }
247 |
248 | static int statistic_mt_check(const struct xt_mtchk_param *par)
249 | {
250 | struct xt_statistic_info *info = par->matchinfo;
251 |
252 | if (info->mode > XT_STATISTIC_MODE_MAX ||
253 | info->flags & ~XT_STATISTIC_MASK)
254 | return -EINVAL;
255 |
256 | info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
257 | if (info->master == NULL)
258 | return -ENOMEM;
259 | atomic_set(&info->master->count, info->u.nth.count);
260 |
261 | return 0;
262 | }
263 |
264 | static void statistic_mt_destroy(const struct xt_mtdtor_param *par)
265 | {
266 | const struct xt_statistic_info *info = par->matchinfo;
267 |
268 | kfree(info->master);
269 | }
270 |
271 | static struct xt_match xt_statistic_mt_reg __read_mostly = {
272 | .name = "statistic",
273 | .revision = 0,
274 | .family = NFPROTO_UNSPEC,
275 | .match = statistic_mt,
276 | .checkentry = statistic_mt_check,
277 | .destroy = statistic_mt_destroy,
278 | .matchsize = sizeof(struct xt_statistic_info),
279 | .usersize = offsetof(struct xt_statistic_info, master),
280 | .me = THIS_MODULE,
281 | };
282 |
283 | char *dirname="k8s";
284 | struct proc_dir_entry *parent;
285 | static int __init statistic_mt_init(void)
286 | {
287 | parent = proc_mkdir(dirname, NULL);
288 | if (parent == NULL)
289 | return -ENOMEM;
290 | entClusterIP = proc_create("clusterIP",0220, parent, &clusterIPops);
291 | if (entClusterIP == NULL)
292 | return -ENOMEM;
293 | entHTTP = proc_create("http",0220, parent, &httpops);
294 | if (entHTTP == NULL)
295 | return -ENOMEM;
296 | entPodIP = proc_create("podIP",0220, parent, &podIPops);
297 | if (entPodIP == NULL)
298 | return -ENOMEM;
299 | printk(KERN_INFO "create three proc fs \n");
300 | return xt_register_match(&xt_statistic_mt_reg);
301 | }
302 |
303 | static void __exit statistic_mt_exit(void)
304 | {
305 | //Debug Message
306 | int i = 0;
307 | printk(KERN_INFO "exit module \n");
308 | printk(KERN_INFO "%s\n", clusterIP);
309 | printk(KERN_INFO "%s->%s\n", targetDomain,targetIP);
310 | printk(KERN_INFO "podIP size %d", curPodSize);
311 | for (i = 0; i < curPodSize; i++)
312 | printk(KERN_INFO "%s\n", podIP[i]);
313 |
314 | if (entClusterIP)
315 | proc_remove(entClusterIP);
316 | if (entHTTP)
317 | proc_remove(entHTTP);
318 | if (entPodIP)
319 | proc_remove(entPodIP);
320 | if (parent)
321 | proc_remove(parent);
322 | xt_unregister_match(&xt_statistic_mt_reg);
323 | }
324 |
325 | module_init(statistic_mt_init);
326 | module_exit(statistic_mt_exit);
327 |
--------------------------------------------------------------------------------