├── Basic_troubleshooting
├── README.md
├── hello-world-bad.yaml
└── hello-world-dep.yaml
├── Job&CronJob
├── commands.txt
├── mysql.sql
├── mysql.yaml
├── mysql1.sql
└── mysql_job-dataseed.yaml
├── README.md
├── Votingapp-Deployment
├── advanced
│ ├── commands.txt
│ ├── ingress-resource.yml
│ ├── namespace.yaml
│ ├── networkpolicy.yaml
│ ├── postgress-deploy.yml
│ ├── postgress-service.yml
│ ├── redis-deploy.yml
│ ├── redis-service.yml
│ ├── resourcequota.yaml
│ ├── result-app-service.yml
│ ├── resultapp-deploy.yml
│ ├── storageClass.yaml
│ ├── voting-app-deploy.yml
│ ├── voting-app-service.yml
│ └── worker-deploy.yml
└── normal-dep
│ ├── postgress-deploy.yml
│ ├── postgress-service.yml
│ ├── redis-deploy.yml
│ ├── redis-service.yml
│ ├── result-app-service.yml
│ ├── resultapp-deploy.yml
│ ├── voting-app-deploy.yml
│ ├── voting-app-service.yml
│ └── worker-deploy.yml
├── basic-objects
├── deployment
│ └── deployment.yaml
├── pod
│ └── pod.yaml
└── replica
│ └── replicaset.yaml
├── config-map-subpath
├── README.md
├── basic-config-map.yaml
├── basic-pod.yaml
├── config_map.yaml
├── mysql-config-subpath.yaml
├── mysql-with-config.yaml
└── mysql-without-config.yaml
├── datadump
├── mysql.sql
└── mysql1.sql
├── datree
├── k8s-demo.yaml
└── myapp
│ ├── .helmignore
│ ├── Chart.yaml
│ ├── templates
│ ├── _helpers.tpl
│ ├── deployment.yaml
│ └── service.yaml
│ └── values.yaml
├── deamonsets
└── app.yaml
├── deployment-strategies
├── blue-green
│ └── single-service
│ │ ├── app-v1.yaml
│ │ └── app-v2.yaml
├── canary
│ └── native
│ │ ├── app-v1.yaml
│ │ └── app-v2.yaml
├── recreate
│ ├── app-v1.yaml
│ └── app-v2.yaml
└── rolling-update
│ ├── app-v1.yaml
│ └── app-v2.yaml
├── ingress
├── README.md
└── sample-example.yaml
├── kube_config
├── kubernetes-conatiner-patterns
├── Ambassador
│ ├── pod.yaml
│ └── service.yaml
├── adapter
│ ├── README.md
│ ├── adapter.yaml
│ ├── configmap.yaml
│ └── service.yaml
├── init-postgres-db
│ ├── commands.txt
│ ├── database.yaml
│ ├── deployment.yaml
│ ├── init-deployment.yaml
│ ├── secret.yaml
│ └── service.yaml
└── init-sidecar
│ ├── config_map_aws.yaml
│ ├── config_map_nginx.yaml
│ ├── nginx_dep.yaml
│ └── nginxservice.yaml
├── kubernetes-hpa
├── Dockerfile
├── README.md
├── hpa.yaml
├── hpa.yml
├── index.jsp
├── metric-server.yaml
└── php-apache.yaml
├── kubernetes-liveness-readiness
├── commands.txt
├── pod-deployment.yaml
├── pod-live-readiness-deployment.yaml
├── pod-liveness-deployment.yaml
├── pod-readiness-deplyment.yaml
└── probe-service.yaml
├── kubernetes-vpa
├── README.md
├── deployment.yaml
├── deployment_normal.yaml
├── vpa-updatemode-off.yaml
├── vpa-updatemode-on.yaml
└── vpa
│ ├── Dockerfile
│ ├── README.md
│ ├── app.go
│ ├── deployment.yaml
│ ├── traffic-generator.yaml
│ └── vpa.yaml
├── monitoring
├── kubernetes-elk
│ ├── curator-cronjob.yaml
│ ├── elasticsearch-ss.yaml
│ ├── filebeat-ds.yaml
│ ├── kibana-deployment.yaml
│ ├── logstash-deployment.yaml
│ └── metricbeat-ds.yaml
├── kubernetes-grafana
│ ├── README.md
│ ├── deployment.yaml
│ ├── grafana-datasource-config.yaml
│ └── service.yaml
└── kubernetes-prometheus
│ ├── README.md
│ ├── clusterRole.yaml
│ ├── config-map.yaml
│ ├── prometheus-deployment.yaml
│ ├── prometheus-ingress.yaml
│ └── prometheus-service.yaml
├── namespace
├── basic-resource-quota.yaml
├── deploy.yaml
├── limitrange
│ ├── limitrange.yaml
│ ├── pod.yaml
│ ├── pod_max.yaml
│ ├── pod_min.yaml
│ └── pod_no_rc.yaml
├── pod.yaml
├── rc-with-resources.yaml
├── resource-q1.yaml
└── resource-q2.yaml
├── network-policy
├── README.md
├── foo-allow-to-hello.yaml
├── hello-allow-from-foo.yaml
├── more-info
│ ├── 00-create-cluster.md
│ ├── 01-deny-all-traffic-to-an-application.md
│ ├── 02-limit-traffic-to-an-application.md
│ ├── 02a-allow-all-traffic-to-an-application.md
│ ├── 03-deny-all-non-whitelisted-traffic-in-the-namespace.md
│ ├── 04-deny-traffic-from-other-namespaces.md
│ ├── 05-allow-traffic-from-all-namespaces.md
│ ├── 06-allow-traffic-from-a-namespace.md
│ ├── 07-allow-traffic-from-some-pods-in-another-namespace.md
│ ├── 08-allow-external-traffic.md
│ ├── 09-allow-traffic-only-to-a-port.md
│ ├── 10-allowing-traffic-with-multiple-selectors.md
│ ├── 11-deny-egress-traffic-from-an-application.md
│ ├── 12-deny-all-non-whitelisted-traffic-from-the-namespace.md
│ ├── 14-deny-external-egress-traffic.md
│ ├── CONTRIBUTING.md
│ ├── LICENSE
│ ├── README.md
│ └── img
│ │ ├── 1.gif
│ │ ├── 2.gif
│ │ ├── 3.gif
│ │ ├── 4.gif
│ │ ├── 5.gif
│ │ ├── 6.gif
│ │ ├── 8.gif
│ │ └── 9.gif
└── web-allow-all-ns-monitoring.yaml
├── node-selector
├── node-affinity.yaml
└── pod.yml
├── operators
├── dotnet-application
│ ├── dockerfile
│ └── src
│ │ ├── Pages
│ │ ├── Error.cshtml
│ │ ├── Error.cshtml.cs
│ │ ├── Index.cshtml
│ │ ├── Index.cshtml.cs
│ │ ├── Privacy.cshtml
│ │ ├── Privacy.cshtml.cs
│ │ ├── Shared
│ │ │ ├── _CookieConsentPartial.cshtml
│ │ │ ├── _Layout.cshtml
│ │ │ └── _ValidationScriptsPartial.cshtml
│ │ ├── _ViewImports.cshtml
│ │ └── _ViewStart.cshtml
│ │ ├── Program.cs
│ │ ├── Properties
│ │ └── launchSettings.json
│ │ ├── Startup.cs
│ │ ├── appsettings.Development.json
│ │ ├── appsettings.json
│ │ ├── work.csproj
│ │ └── wwwroot
│ │ ├── css
│ │ └── site.css
│ │ ├── favicon.ico
│ │ ├── js
│ │ └── site.js
│ │ └── lib
│ │ ├── bootstrap
│ │ ├── LICENSE
│ │ └── dist
│ │ │ ├── css
│ │ │ ├── bootstrap-grid.css
│ │ │ ├── bootstrap-grid.css.map
│ │ │ ├── bootstrap-grid.min.css
│ │ │ ├── bootstrap-grid.min.css.map
│ │ │ ├── bootstrap-reboot.css
│ │ │ ├── bootstrap-reboot.css.map
│ │ │ ├── bootstrap-reboot.min.css
│ │ │ ├── bootstrap-reboot.min.css.map
│ │ │ ├── bootstrap.css
│ │ │ ├── bootstrap.css.map
│ │ │ ├── bootstrap.min.css
│ │ │ └── bootstrap.min.css.map
│ │ │ └── js
│ │ │ ├── bootstrap.bundle.js
│ │ │ ├── bootstrap.bundle.js.map
│ │ │ ├── bootstrap.bundle.min.js
│ │ │ ├── bootstrap.bundle.min.js.map
│ │ │ ├── bootstrap.js
│ │ │ ├── bootstrap.js.map
│ │ │ ├── bootstrap.min.js
│ │ │ └── bootstrap.min.js.map
│ │ ├── jquery-validation-unobtrusive
│ │ ├── LICENSE.txt
│ │ ├── jquery.validate.unobtrusive.js
│ │ └── jquery.validate.unobtrusive.min.js
│ │ ├── jquery-validation
│ │ ├── LICENSE.md
│ │ └── dist
│ │ │ ├── additional-methods.js
│ │ │ ├── additional-methods.min.js
│ │ │ ├── jquery.validate.js
│ │ │ └── jquery.validate.min.js
│ │ └── jquery
│ │ ├── LICENSE.txt
│ │ └── dist
│ │ ├── jquery.js
│ │ ├── jquery.min.js
│ │ └── jquery.min.map
├── go-application
│ ├── dockerfile
│ └── main.go
├── nodejs-application
│ ├── dockerfile
│ └── src
│ │ ├── package-lock.json
│ │ ├── package.json
│ │ └── server.js
├── prometheous
│ ├── 1.14.8
│ │ ├── alertmanager
│ │ │ ├── alertmanager.secret.yaml
│ │ │ ├── alertmanager.sericeaccount.yaml
│ │ │ ├── alertmanager.service.yaml
│ │ │ ├── alertmanager.servicemonitor.yaml
│ │ │ └── alertmanager.yaml
│ │ ├── grafana
│ │ │ ├── grafana-deployment.yaml
│ │ │ ├── grafana-service.yaml
│ │ │ ├── grafana.configmap.yaml
│ │ │ ├── grafana.dashboards.configmap.yaml
│ │ │ └── grafana.serviceaccount.yaml
│ │ ├── kube-state-metrics
│ │ │ ├── cluster-role-binding.yaml
│ │ │ ├── cluster-role.yaml
│ │ │ ├── deployment.yaml
│ │ │ ├── service-account.yaml
│ │ │ ├── service-monitor.yaml
│ │ │ └── service.yaml
│ │ ├── node-exporter
│ │ │ ├── node-exporter.yaml
│ │ │ └── service-monitor.yaml
│ │ ├── prometheus-cluster-monitoring
│ │ │ ├── apiserver.servicemonitor.yaml
│ │ │ ├── cluster-role-binding.yaml
│ │ │ ├── cluster-role.yaml
│ │ │ ├── kubelet.servicemonitor.yaml
│ │ │ ├── prometheus-alerts.yaml
│ │ │ ├── prometheus.rules.yaml
│ │ │ ├── prometheus.service.yaml
│ │ │ ├── prometheus.yaml
│ │ │ └── service-account.yaml
│ │ ├── prometheus-operator
│ │ │ ├── cluster-role-binding.yaml
│ │ │ ├── cluster-role.yaml
│ │ │ ├── deployment.yaml
│ │ │ ├── service-account.yaml
│ │ │ ├── service-monitor.yaml
│ │ │ └── service.yaml
│ │ ├── prometheus-standalone
│ │ │ ├── apps.service-monitor.yaml
│ │ │ ├── cluster-role-binding.yaml
│ │ │ ├── cluster-role.yaml
│ │ │ ├── prometheus.service.yaml
│ │ │ └── prometheus.yaml
│ │ └── readme.md
│ ├── 1.15-1.17
│ │ ├── alertmanager
│ │ │ ├── alertmanager-secret.yaml
│ │ │ ├── alertmanager-sericeaccount.yaml
│ │ │ ├── alertmanager-service.yaml
│ │ │ ├── alertmanager-servicemonitor.yaml
│ │ │ └── alertmanager.yaml
│ │ ├── grafana
│ │ │ ├── dashboard-nodeexporter-custom.yaml
│ │ │ ├── grafana-dashboardDatasources.yaml
│ │ │ ├── grafana-dashboardDefinitions.yaml
│ │ │ ├── grafana-dashboardSources.yaml
│ │ │ ├── grafana-deployment.yaml
│ │ │ ├── grafana-service.yaml
│ │ │ └── grafana-serviceAccount.yaml
│ │ ├── kube-state-metrics
│ │ │ ├── cluster-role-binding.yaml
│ │ │ ├── cluster-role.yaml
│ │ │ ├── deployment.yaml
│ │ │ ├── role-binding.yaml
│ │ │ ├── role.yaml
│ │ │ ├── service-account.yaml
│ │ │ ├── service-monitor.yaml
│ │ │ └── service.yaml
│ │ ├── node-exporter
│ │ │ ├── cluster-role-binding.yaml
│ │ │ ├── cluster-role.yaml
│ │ │ ├── daemonset.yaml
│ │ │ ├── service-account.yaml
│ │ │ ├── service-monitor.yaml
│ │ │ └── service.yaml
│ │ ├── prometheus-cluster-monitoring
│ │ │ ├── cluster-role-binding.yaml
│ │ │ ├── cluster-role.yaml
│ │ │ ├── prometheus.rules.yaml
│ │ │ ├── prometheus.service.yaml
│ │ │ ├── prometheus.yaml
│ │ │ ├── service-account.yaml
│ │ │ ├── servicemonitor-apiserver.yaml
│ │ │ └── servicemonitor-kubelet.yaml
│ │ ├── prometheus-operator
│ │ │ ├── cluster-role-binding.yaml
│ │ │ ├── cluster-role.yaml
│ │ │ ├── crd-alertmanager.yaml
│ │ │ ├── crd-podmonitor.yaml
│ │ │ ├── crd-prometheus.yaml
│ │ │ ├── crd-prometheusrule.yaml
│ │ │ ├── crd-servicemonitor.yaml
│ │ │ ├── deployment.yaml
│ │ │ ├── namepace.yaml
│ │ │ ├── service-account.yaml
│ │ │ └── service.yaml
│ │ └── readme.md
│ ├── 1.18.4
│ │ ├── alertmanager
│ │ │ ├── alertmanager-secret.yaml
│ │ │ ├── alertmanager-sericeaccount.yaml
│ │ │ ├── alertmanager-service.yaml
│ │ │ ├── alertmanager-servicemonitor.yaml
│ │ │ └── alertmanager.yaml
│ │ ├── grafana
│ │ │ ├── dashboard-nodeexporter-custom.yaml
│ │ │ ├── grafana-dashboardDatasources.yaml
│ │ │ ├── grafana-dashboardDefinitions.yaml
│ │ │ ├── grafana-dashboardSources.yaml
│ │ │ ├── grafana-deployment.yaml
│ │ │ ├── grafana-service.yaml
│ │ │ └── grafana-serviceAccount.yaml
│ │ ├── kube-state-metrics
│ │ │ ├── cluster-role-binding.yaml
│ │ │ ├── cluster-role.yaml
│ │ │ ├── deployment.yaml
│ │ │ ├── service-account.yaml
│ │ │ ├── service-monitor.yaml
│ │ │ └── service.yaml
│ │ ├── node-exporter
│ │ │ ├── cluster-role-binding.yaml
│ │ │ ├── cluster-role.yaml
│ │ │ ├── daemonset.yaml
│ │ │ ├── service-account.yaml
│ │ │ ├── service-monitor.yaml
│ │ │ └── service.yaml
│ │ ├── prometheus-cluster-monitoring
│ │ │ ├── apiserver.servicemonitor.yaml
│ │ │ ├── cluster-role-binding.yaml
│ │ │ ├── cluster-role.yaml
│ │ │ ├── kubelet.servicemonitor.yaml
│ │ │ ├── prometheus-alerts.yaml
│ │ │ ├── prometheus.rules.yaml
│ │ │ ├── prometheus.service.yaml
│ │ │ ├── prometheus.yaml
│ │ │ └── service-account.yaml
│ │ ├── prometheus-operator
│ │ │ ├── cluster-role-binding.yaml
│ │ │ ├── cluster-role.yaml
│ │ │ ├── crd-alertmanager.yaml
│ │ │ ├── crd-podmonitor.yaml
│ │ │ ├── crd-prometheus.yaml
│ │ │ ├── crd-prometheusrule.yaml
│ │ │ ├── crd-servicemonitor.yaml
│ │ │ ├── crd-thanosruler.yaml
│ │ │ ├── deployment.yaml
│ │ │ ├── namepace.yaml
│ │ │ ├── service-account.yaml
│ │ │ └── service.yaml
│ │ └── readme.md
│ └── readme.md
└── python-application
│ ├── dockerfile
│ └── src
│ ├── requirements.txt
│ └── server.py
├── pod-affinity
├── redis.yaml
└── web-server.yaml
├── pod-distruption-budget
├── README.md
├── nginx-deploy.yaml
└── pdb.yaml
├── pod-security-policy
├── nginx-deployment-kubesystem.yaml
├── nginx-deployment.yaml
├── nginx-hostnetwork-deployment-kubesystem.yaml
├── nginx-hostnetwork-deployment-sa.yaml
├── nginx-hostnetwork-deployment.yaml
├── psp-permissive-clusterrole.yaml
├── psp-permissive-rolebinding.yaml
├── psp-permissive.yaml
├── psp-restrictive-clusterrole.yaml
├── psp-restrictive-rolebinding.yaml
├── psp-restrictive.yaml
├── psp
│ ├── README.md
│ ├── permissive-clusterrole.yaml
│ ├── permissive-psp.yaml
│ ├── privileged-pod.yaml
│ ├── restrictive-clusterrole.yaml
│ └── restrictive-psp.yaml
└── specialsa-psp-permissive.yaml
├── private-registry
├── hosts
├── private-registry.yaml
└── templates
│ ├── app.crt
│ └── daemon.json
├── scalling-jenkins
├── README.md
├── master-Persistent
│ ├── Dockerfile
│ ├── build.sh
│ ├── cluster-role.yaml
│ ├── jenkins-deployment.yaml
│ ├── jenkins-pv.yaml
│ └── jenkins-pvc.yaml
├── master
│ ├── Dockerfile
│ ├── jenkins-dep.yaml
│ └── jenkins-service.yaml
└── slave
│ ├── Dockerfile
│ └── jenkins-agent
├── secrets
├── command.txt
├── database.yaml
└── secret.yaml
├── services
├── cluster-ip
│ ├── app.yaml
│ ├── command.txt
│ └── service.yaml
├── loadbalancer
│ ├── app.yaml
│ └── service.yaml
├── mysql-headless-service
│ ├── README.md
│ ├── commands.txt
│ ├── mysql-configmap.yaml
│ ├── mysql-services.yaml
│ ├── mysql-statefulset.yaml
│ └── storageclass.yaml
└── nodeport
│ ├── app.yaml
│ └── service.yaml
├── special-usecase
├── Get-external-in-pod
│ ├── Dockerfile
│ ├── README.md
│ ├── kube.yml
│ ├── nodes.json
│ └── pods.json
├── out-of-rotation
│ ├── Dockerfile
│ └── app.yaml
└── prometheus-https-json
│ ├── kube.yaml
│ ├── output.json
│ ├── prometheus.txt
│ └── prometheus.yaml
├── taint-tolerations
├── README.md
├── pod-normal.yml
├── pod.yml
└── taint_affinity.yaml
└── volume
├── emptyDir
└── pod.yaml
├── hostpath
└── pod.yaml
├── nfs-pv
├── README.md
└── static-provision-deployment
│ ├── nfs-deployment.yaml
│ ├── nfs-pv.yaml
│ └── nfs-pvc.yaml
├── volume-aws
├── dynamic-provision
│ ├── command.txt
│ ├── pod.yaml
│ ├── pvc.yaml
│ └── storageclass.yaml
└── static-provision
│ ├── README.md
│ ├── pod.yaml
│ ├── pv.yaml
│ └── pvc.yaml
└── volume-gcp
├── README.md
├── dynamic-provision
├── deployment.yaml
├── pod.yaml
├── pvc.yaml
└── storageclass.yaml
└── static-provision
├── deployment
├── README.md
├── deployment.yaml
├── pod.yaml
├── pv.yaml
└── pvc.yaml
└── statefulset
└── pod.yaml
/Basic_troubleshooting/hello-world-bad.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: bad-helloworld-deployment
5 | spec:
6 | selector:
7 | matchLabels:
8 | app: bad-helloworld
9 | replicas: 1 # tells deployment to run 1 pods matching the template
10 | template: # create pods using pod definition in this template
11 | metadata:
12 | labels:
13 | app: bad-helloworld
14 | spec:
15 | containers:
16 | - name: helloworld
17 | image: deekshithsn/unkown-pod:latest
18 | ports:
19 | - containerPort: 80
20 |
--------------------------------------------------------------------------------
/Basic_troubleshooting/hello-world-dep.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: helloworld-deployment
5 | spec:
6 | selector:
7 | matchLabels:
8 | app: helloworld
9 | replicas: 1 # tells deployment to run 1 pods matching the template
10 | template: # create pods using pod definition in this template
11 | metadata:
12 | labels:
13 | app: helloworld
14 | spec:
15 | containers:
16 | - name: helloworld
17 | image: busybox
18 | ports:
19 | - containerPort: 80
20 |
--------------------------------------------------------------------------------
/Job&CronJob/commands.txt:
--------------------------------------------------------------------------------
1 | mysql -h localhost -u root -padmin test
2 |
3 | select * from messages;
--------------------------------------------------------------------------------
/Job&CronJob/mysql.sql:
--------------------------------------------------------------------------------
1 | CREATE TABLE test.messages (message VARCHAR(250));
2 | INSERT INTO test.messages VALUES ('hello');
3 | INSERT INTO test.messages VALUES ('hey');
4 |
--------------------------------------------------------------------------------
/Job&CronJob/mysql.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: mysql
5 | spec:
6 | ports:
7 | - port: 3306
8 | selector:
9 | app: mysql
10 | clusterIP: None
11 | ---
12 | apiVersion: apps/v1
13 | kind: Deployment
14 | metadata:
15 | name: mysql
16 | spec:
17 | selector:
18 | matchLabels:
19 | app: mysql
20 | strategy:
21 | type: Recreate
22 | template:
23 | metadata:
24 | labels:
25 | app: mysql
26 | spec:
27 | containers:
28 | - image: mysql:5.6
29 | name: mysql
30 | env:
31 | # Use secret in real usage
32 | - name: MYSQL_USER
33 | value: deekshithsn
34 | - name: MYSQL_PASSWORD
35 | value: password
36 | - name: MYSQL_ROOT_PASSWORD
37 | value: admin
38 | ports:
39 | - containerPort: 3306
40 | name: mysql
41 |
--------------------------------------------------------------------------------
/Job&CronJob/mysql1.sql:
--------------------------------------------------------------------------------
1 | INSERT INTO test.messages VALUES ('Devops');
2 | INSERT INTO test.messages VALUES ('DeekshithSN');
--------------------------------------------------------------------------------
/Job&CronJob/mysql_job-dataseed.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1
2 | kind: Job
3 | metadata:
4 | name: my-sql-dump
5 | spec:
6 | ttlSecondsAfterFinished: 10
7 | template:
8 | metadata:
9 | name: my-sql-dump
10 | spec:
11 | containers:
12 | - name: my-sql-seed
13 | image: jlsrly/mysql-git
14 | command:
15 | - 'bash'
16 | - '-c'
17 | - |
18 | git clone https://github.com/DeekshithSN/kubernetes.git
19 | mysql -h mysql -u root -padmin -e 'CREATE DATABASE IF NOT EXISTS test';
20 | mysql -h mysql -u root -padmin test < kubernetes/datadump/mysql.sql
21 | sleep 20
22 | restartPolicy: OnFailure
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # kubernetes
2 |
3 | ## emphiramal containers
4 |
5 |
6 | ## ingress
7 |
8 | https://kubernetes.io/docs/concepts/services-networking/ingress/
9 |
10 | ## kubernetes commands link
11 |
12 | https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#create
13 |
--------------------------------------------------------------------------------
/Votingapp-Deployment/advanced/commands.txt:
--------------------------------------------------------------------------------
1 | kubectl run postgresql-postgresql-client --rm --tty -i --restart='Never' --namespace votingapp --image bitnami/postgresql --env="PGPASSWORD=postgres" --command -- psql --host db -U postgres
2 |
3 |
--------------------------------------------------------------------------------
/Votingapp-Deployment/advanced/ingress-resource.yml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: Ingress
3 | metadata:
4 | name: example-ingress
5 | annotations:
6 | nginx.ingress.kubernetes.io/rewrite-target: /$1
7 | spec:
8 | rules:
9 | - host: votingapp.com
10 | http:
11 | paths:
12 | - path: /vote
13 | pathType: Prefix
14 | backend:
15 | service:
16 | name: result-service
17 | port:
18 | number: 8080
19 | - path: /result
20 | pathType: Prefix
21 | backend:
22 | service:
23 | name: voting-service
24 | port:
25 | number: 8080
26 |
27 | #
--------------------------------------------------------------------------------
/Votingapp-Deployment/advanced/namespace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: votingapp
5 |
--------------------------------------------------------------------------------
/Votingapp-Deployment/advanced/networkpolicy.yaml:
--------------------------------------------------------------------------------
1 | kind: NetworkPolicy
2 | apiVersion: networking.k8s.io/v1
3 | metadata:
4 | name: hello-allow-from-foo
5 | spec:
6 | policyTypes:
7 | - Ingress
8 | - Egress
9 | podSelector:
10 | matchLabels:
11 | name: postgress-pod
12 | app: demo-voting-app
13 | ingress:
14 | - from:
15 | - podSelector:
16 | matchLabels:
17 | name: worker-deploy
18 | app: demo-voting-app
19 | egress:
20 | - to:
21 | - podSelector:
22 | matchLabels:
23 | name: result-app-pod
24 | app: demo-voting-app
25 |
--------------------------------------------------------------------------------
/Votingapp-Deployment/advanced/postgress-deploy.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: StatefulSet
3 | metadata:
4 | name: postgres-deploy
5 | labels:
6 | name: postgress-deploy
7 | app: demo-voting-app
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | name: postgress-pod
13 | app: demo-voting-app
14 | serviceName: postgress-service
15 | template:
16 | metadata:
17 | name: postgres-pod
18 | labels:
19 | name: postgress-pod
20 | app: demo-voting-app
21 | spec:
22 | containers:
23 | - name: postgres
24 | image: postgres
25 | ports:
26 | - containerPort: 5432
27 | env:
28 | - name: POSTGRES_USER
29 | value: "postgres"
30 | - name: POSTGRES_PASSWORD
31 | value: "postgres"
32 | volumeMounts:
33 | - mountPath: /var/lib/postgresql/data
34 | name: postgredb
35 | subPath: postgres
36 | volumeClaimTemplates:
37 | - metadata:
38 | name: postgredb
39 | spec:
40 | storageClassName: manual
41 | accessModes: ["ReadWriteOnce"]
42 | resources:
43 | requests:
44 | storage: 10Gi
45 |
--------------------------------------------------------------------------------
/Votingapp-Deployment/advanced/postgress-service.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: db
5 | labels:
6 | name: postgress-service
7 | app: demo-voting-app
8 | spec:
9 | ports:
10 | - port: 5432
11 | targetPort: 5432
12 | selector:
13 | name: postgress-pod
14 | app: demo-voting-app
15 |
--------------------------------------------------------------------------------
/Votingapp-Deployment/advanced/redis-deploy.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: redis-deploy
5 | labels:
6 | name: redis-deploy
7 | app: demo-voting-app
8 | spec:
9 | replicas: 2
10 | selector:
11 | matchLabels:
12 | name: redis-pod
13 | app: demo-voting-app
14 | template:
15 | metadata:
16 | name: redis-pod
17 | labels:
18 | name: redis-pod
19 | app: demo-voting-app
20 | spec:
21 | containers:
22 | - name: redis
23 | image: redis
24 | ports:
25 | - containerPort: 6379
26 | affinity:
27 | podAntiAffinity:
28 | requiredDuringSchedulingIgnoredDuringExecution:
29 | - labelSelector:
30 | matchExpressions:
31 | - key: name
32 | operator: In
33 | values:
34 | - redis-pod
35 | topologyKey: "kubernetes.io/hostname"
36 |
37 |
--------------------------------------------------------------------------------
/Votingapp-Deployment/advanced/redis-service.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: redis
5 | labels:
6 | name: redis-service
7 | app: demo-voting-app
8 | spec:
9 | ports:
10 | - port: 6379
11 | targetPort: 6379
12 | selector:
13 | name: redis-pod
14 | app: demo-voting-app
15 |
--------------------------------------------------------------------------------
/Votingapp-Deployment/advanced/resourcequota.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ResourceQuota
3 | metadata:
4 | name: voting-app-rc
5 | spec:
6 | hard:
7 | configmaps: "10"
8 | persistentvolumeclaims: "4"
9 | pods: "20"
10 | replicationcontrollers: "20"
11 | secrets: "10"
12 | services: "20"
13 | services.loadbalancers: "2"
14 |
--------------------------------------------------------------------------------
/Votingapp-Deployment/advanced/result-app-service.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: result-service
5 | labels:
6 | name: result-service
7 | app: demo-voting-app
8 | spec:
9 | type: NodePort
10 | ports:
11 | - port: 80
12 | targetPort: 80
13 | nodePort: 30005
14 | selector:
15 | name: result-app-pod
16 | app: demo-voting-app
17 |
--------------------------------------------------------------------------------
/Votingapp-Deployment/advanced/resultapp-deploy.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: result-app-deploy
5 | labels:
6 | name: result-app-deploy
7 | app: demo-voting-app
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | name: result-app-pod
13 | app: demo-voting-app
14 | template:
15 | metadata:
16 | name: result-app-pod
17 | labels:
18 | name: result-app-pod
19 | app: demo-voting-app
20 | spec:
21 | containers:
22 | - name: result-app
23 | image: kodekloud/examplevotingapp_result:v1
24 | ports:
25 | - containerPort: 80
26 |
27 |
--------------------------------------------------------------------------------
/Votingapp-Deployment/advanced/storageClass.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: storage.k8s.io/v1
2 | kind: StorageClass
3 | metadata:
4 | name: manual
5 | provisioner: kubernetes.io/gce-pd
6 | parameters:
7 | type: pd-standard
8 | fstype: ext4
9 | replication-type: none
10 | reclaimPolicy: Retain
11 | allowVolumeExpansion: true
12 | mountOptions:
13 | - debug
14 |
--------------------------------------------------------------------------------
/Votingapp-Deployment/advanced/voting-app-deploy.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: voting-app-deploy
5 | labels:
6 | name: voting-app-deploy
7 | app: demo-voting-app
8 | spec:
9 | template:
10 | metadata:
11 | name: voting-app-pod
12 | labels:
13 | name: voting-app-pod
14 | app: demo-voting-app
15 | spec:
16 | containers:
17 | - name: voting-app
18 | image: kodekloud/examplevotingapp_vote:v1
19 | ports:
20 | - containerPort: 80
21 | selector:
22 | matchLabels:
23 | name: voting-app-pod
24 | app: demo-voting-app
25 | replicas: 1
--------------------------------------------------------------------------------
/Votingapp-Deployment/advanced/voting-app-service.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: voting-service
5 | labels:
6 | name: voting-service
7 | app: demo-voting-app
8 | spec:
9 | type: NodePort
10 | ports:
11 | - port: 80
12 | targetPort: 80
13 | nodePort: 30004
14 | selector:
15 | name: voting-app-pod
16 | app: demo-voting-app
17 |
--------------------------------------------------------------------------------
/Votingapp-Deployment/advanced/worker-deploy.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: worker-deploy
5 | labels:
6 | name: worker-deploy
7 | app: demo-voting-app
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | name: worker-pod
13 | app: demo-voting-app
14 | template:
15 | metadata:
16 | name: worker-pod
17 | labels:
18 | name: worker-pod
19 | app: demo-voting-app
20 | spec:
21 | initContainers:
22 | - name: check-db-ready
23 | image: postgres:9.6.5
24 | command: ['sh', '-c','until pg_isready -h db -p 5432; do echo waiting for database; sleep 2; done;']
25 | containers:
26 | - name: worker-app
27 | image: kodekloud/examplevotingapp_worker:v1
28 | affinity:
29 | podAffinity:
30 | requiredDuringSchedulingIgnoredDuringExecution:
31 | - labelSelector:
32 | matchExpressions:
33 | - key: name
34 | operator: In
35 | values:
36 | - redis-pod
37 | topologyKey: "kubernetes.io/hostname"
38 |
39 |
40 |
--------------------------------------------------------------------------------
/Votingapp-Deployment/normal-dep/postgress-deploy.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: postgres-deploy
5 | labels:
6 | name: postgress-deploy
7 | app: demo-voting-app
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | name: postgress-pod
13 | app: demo-voting-app
14 | template:
15 | metadata:
16 | name: postgres-pod
17 | labels:
18 | name: postgress-pod
19 | app: demo-voting-app
20 | spec:
21 | containers:
22 | - name: postgres
23 | image: postgres
24 | ports:
25 | - containerPort: 5432
26 | env:
27 | - name: POSTGRES_USER
28 | value: "postgres"
29 | - name: POSTGRES_PASSWORD
30 | value: "postgres"
31 |
--------------------------------------------------------------------------------
/Votingapp-Deployment/normal-dep/postgress-service.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: db
5 | labels:
6 | name: postgress-service
7 | app: demo-voting-app
8 | spec:
9 | ports:
10 | - port: 5432
11 | targetPort: 5432
12 | selector:
13 | name: postgress-pod
14 | app: demo-voting-app
15 |
--------------------------------------------------------------------------------
/Votingapp-Deployment/normal-dep/redis-deploy.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: redis-deploy
5 | labels:
6 | name: redis-deploy
7 | app: demo-voting-app
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | name: redis-pod
13 | app: demo-voting-app
14 | template:
15 | metadata:
16 | name: redis-pod
17 | labels:
18 | name: redis-pod
19 | app: demo-voting-app
20 | spec:
21 | containers:
22 | - name: redis
23 | image: redis
24 | ports:
25 | - containerPort: 6379
26 |
--------------------------------------------------------------------------------
/Votingapp-Deployment/normal-dep/redis-service.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: redis
5 | labels:
6 | name: redis-service
7 | app: demo-voting-app
8 | spec:
9 | ports:
10 | - port: 6379
11 | targetPort: 6379
12 | selector:
13 | name: redis-pod
14 | app: demo-voting-app
15 |
--------------------------------------------------------------------------------
/Votingapp-Deployment/normal-dep/result-app-service.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: result-service
5 | labels:
6 | name: result-service
7 | app: demo-voting-app
8 | spec:
9 | type: NodePort
10 | ports:
11 | - port: 80
12 | targetPort: 80
13 | nodePort: 30005
14 | selector:
15 | name: result-app-pod
16 | app: demo-voting-app
17 |
--------------------------------------------------------------------------------
/Votingapp-Deployment/normal-dep/resultapp-deploy.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: result-app-deploy
5 | labels:
6 | name: result-app-deploy
7 | app: demo-voting-app
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | name: result-app-pod
13 | app: demo-voting-app
14 | template:
15 | metadata:
16 | name: result-app-pod
17 | labels:
18 | name: result-app-pod
19 | app: demo-voting-app
20 | spec:
21 | containers:
22 | - name: result-app
23 | image: kodekloud/examplevotingapp_result:v1
24 | ports:
25 | - containerPort: 80
26 |
27 |
--------------------------------------------------------------------------------
/Votingapp-Deployment/normal-dep/voting-app-deploy.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: voting-app-deploy
5 | labels:
6 | name: voting-app-deploy
7 | app: demo-voting-app
8 | spec:
9 | template:
10 | metadata:
11 | name: voting-app-pod
12 | labels:
13 | name: voting-app-pod
14 | app: demo-voting-app
15 | spec:
16 | containers:
17 | - name: voting-app
18 | image: kodekloud/examplevotingapp_vote:v1
19 | ports:
20 | - containerPort: 80
21 | selector:
22 | matchLabels:
23 | name: voting-app-pod
24 | app: demo-voting-app
25 | replicas: 1
--------------------------------------------------------------------------------
/Votingapp-Deployment/normal-dep/voting-app-service.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: voting-service
5 | labels:
6 | name: voting-service
7 | app: demo-voting-app
8 | spec:
9 | type: NodePort
10 | ports:
11 | - port: 80
12 | targetPort: 80
13 | nodePort: 30004
14 | selector:
15 | name: voting-app-pod
16 | app: demo-voting-app
17 |
--------------------------------------------------------------------------------
/Votingapp-Deployment/normal-dep/worker-deploy.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: worker-deploy
5 | labels:
6 | name: worker-deploy
7 | app: demo-voting-app
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | name: worker-pod
13 | app: demo-voting-app
14 | template:
15 | metadata:
16 | name: worker-pod
17 | labels:
18 | name: worker-pod
19 | app: demo-voting-app
20 | spec:
21 | containers:
22 | - name: worker-app
23 | image: kodekloud/examplevotingapp_worker:v1
24 |
25 |
26 |
27 |
--------------------------------------------------------------------------------
/basic-objects/deployment/deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: nginx-deployment
5 | labels:
6 | app: nginx
7 | spec:
8 | replicas: 3
9 | selector:
10 | matchLabels:
11 | app: nginx
12 | template:
13 | metadata:
14 | labels:
15 | app: nginx
16 | spec:
17 | containers:
18 | - name: nginx
19 | image: nginx:1.14.2
20 | ports:
21 | - containerPort: 80
22 |
--------------------------------------------------------------------------------
/basic-objects/pod/pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: my-first-app
5 | labels:
6 | type: front-end
7 | spec:
8 | containers:
9 | - name: nginx
10 | image: nginx
11 |
--------------------------------------------------------------------------------
/basic-objects/replica/replicaset.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: ReplicaSet
3 | metadata:
4 | name: my-rs
5 | labels:
6 | type: front-end
7 | spec:
8 | template:
9 | metadata:
10 | name: my-pod
11 | labels:
12 | type: front-end
13 | spec:
14 | containers:
15 | - name: nginx
16 | image: nginx
17 | replicas: 6
18 | selector:
19 | matchLabels:
20 | type: front-end
21 |
--------------------------------------------------------------------------------
/config-map-subpath/README.md:
--------------------------------------------------------------------------------
1 | ## kubectl exec --stdin --tty pod_name -- sh
2 | - ls /etc/mysql/conf.d
3 |
4 | Kubernetes took the map name of mysql_binlog_format.cnf present it as a filewith the contents that were stored in the data source of the configMap.
5 | The problem however is it laid that volume on top of the existing directory.
6 | The default configuration files for mysql are no longer present.
7 | I'd have to create all the mysql configuration files and store them into the configMap.
8 | Or, I can use a subPath.
9 |
--------------------------------------------------------------------------------
/config-map-subpath/basic-config-map.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: game-demo
5 | data:
6 | # property-like keys; each key maps to a simple value
7 | player_initial_lives: "3"
8 | ui_properties_file_name: "user-interface.properties"
9 |
--------------------------------------------------------------------------------
/config-map-subpath/basic-pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: configmap-demo-pod
5 | spec:
6 | containers:
7 | - name: demo
8 | image: alpine
9 | command: ["sleep", "3600"]
10 | env:
11 | - name: PLAYER_INITIAL_LIVES
12 | valueFrom:
13 | configMapKeyRef:
14 | name: game-demo
15 | key: player_initial_lives
16 | - name: UI_PROPERTIES_FILE_NAME
17 | valueFrom:
18 | configMapKeyRef:
19 | name: game-demo
20 | key: ui_properties_file_name
21 |
--------------------------------------------------------------------------------
/config-map-subpath/config_map.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: mysql-configmap
5 | labels:
6 | app: mysql
7 | data:
8 | mysql_binlog_format.cnf: |
9 | [mysqld]
10 | binlog-format=mixed
11 |
12 |
13 |
--------------------------------------------------------------------------------
/config-map-subpath/mysql-with-config.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: mysql
5 | spec:
6 | ports:
7 | - port: 3306
8 | selector:
9 | app: mysql
10 | clusterIP: None
11 | ---
12 | apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
13 | kind: Deployment
14 | metadata:
15 | name: mysql
16 | spec:
17 | selector:
18 | matchLabels:
19 | app: mysql
20 | strategy:
21 | type: Recreate
22 | template:
23 | metadata:
24 | labels:
25 | app: mysql
26 | spec:
27 | containers:
28 | - image: mysql:5.6
29 | name: mysql
30 | env:
31 | # Use secret in real usage
32 | - name: MYSQL_ROOT_PASSWORD
33 | value: password
34 | ports:
35 | - containerPort: 3306
36 | name: mysql
37 | volumeMounts:
38 | - name: mysql-configmap-volume
39 | mountPath: /etc/mysql/conf.d
40 | volumes:
41 | - name: mysql-configmap-volume
42 | configMap:
43 | name: mysql-configmap
44 |
--------------------------------------------------------------------------------
/config-map-subpath/mysql-without-config.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: mysql
5 | spec:
6 | ports:
7 | - port: 3306
8 | selector:
9 | app: mysql
10 | clusterIP: None
11 | ---
12 | apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
13 | kind: Deployment
14 | metadata:
15 | name: mysql
16 | spec:
17 | selector:
18 | matchLabels:
19 | app: mysql
20 | strategy:
21 | type: Recreate
22 | template:
23 | metadata:
24 | labels:
25 | app: mysql
26 | spec:
27 | containers:
28 | - image: mysql:5.6
29 | name: mysql
30 | env:
31 | # Use secret in real usage
32 | - name: MYSQL_ROOT_PASSWORD
33 | value: password
34 | ports:
35 | - containerPort: 3306
36 | name: mysql
37 |
--------------------------------------------------------------------------------
/datadump/mysql.sql:
--------------------------------------------------------------------------------
1 | CREATE TABLE test.messages (message VARCHAR(250));
2 | INSERT INTO test.messages VALUES ('hello');
3 | INSERT INTO test.messages VALUES ('hey');
4 |
--------------------------------------------------------------------------------
/datadump/mysql1.sql:
--------------------------------------------------------------------------------
1 | INSERT INTO test.messages VALUES ('Devops');
2 | INSERT INTO test.messages VALUES ('Docker');
3 |
--------------------------------------------------------------------------------
/datree/myapp/.helmignore:
--------------------------------------------------------------------------------
1 | # Patterns to ignore when building packages.
2 | # This supports shell glob matching, relative path matching, and
3 | # negation (prefixed with !). Only one pattern per line.
4 | .DS_Store
5 | # Common VCS dirs
6 | .git/
7 | .gitignore
8 | .bzr/
9 | .bzrignore
10 | .hg/
11 | .hgignore
12 | .svn/
13 | # Common backup files
14 | *.swp
15 | *.bak
16 | *.tmp
17 | *.orig
18 | *~
19 | # Various IDEs
20 | .project
21 | .idea/
22 | *.tmproj
23 | .vscode/
24 |
--------------------------------------------------------------------------------
/datree/myapp/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | name: myapp
3 | description: A Helm chart for Kubernetes
4 |
5 | # A chart can be either an 'application' or a 'library' chart.
6 | #
7 | # Application charts are a collection of templates that can be packaged into versioned archives
8 | # to be deployed.
9 | #
10 | # Library charts provide useful utilities or functions for the chart developer. They're included as
11 | # a dependency of application charts to inject those utilities and functions into the rendering
12 | # pipeline. Library charts do not define any templates and therefore cannot be deployed.
13 | type: application
14 |
15 | # This is the chart version. This version number should be incremented each time you make changes
16 | # to the chart and its templates, including the app version.
17 | # Versions are expected to follow Semantic Versioning (https://semver.org/)
18 | version: 0.2.0
19 |
20 | # This is the version number of the application being deployed. This version number should be
21 | # incremented each time you make changes to the application. Versions are not expected to
22 | # follow Semantic Versioning. They should reflect the version the application is using.
23 | # It is recommended to use it with quotes.
24 | appVersion: "1.16.0"
25 |
--------------------------------------------------------------------------------
/datree/myapp/templates/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: {{ include "myapp.fullname" . }}
5 | labels:
6 | {{- include "myapp.labels" . | nindent 4 }}
7 | spec:
8 | type: {{ .Values.service.type }}
9 | ports:
10 | - port: {{ .Values.service.port }}
11 | targetPort: http
12 | protocol: TCP
13 | name: http
14 | selector:
15 | {{- include "myapp.selectorLabels" . | nindent 4 }}
16 |
--------------------------------------------------------------------------------
/datree/myapp/values.yaml:
--------------------------------------------------------------------------------
1 | # Default values for myapp.
2 | # This is a YAML-formatted file.
3 | # Declare variables to be passed into your templates.
4 |
5 | replicaCount: 2
6 |
7 | image:
8 | repository: tomcat
9 | pullPolicy: IfNotPresent
10 | # Overrides the image tag whose default is the chart appVersion.
11 | tag: 1.21
12 |
13 | service:
14 | type: NodePort
15 | port: 8080
16 |
17 |
--------------------------------------------------------------------------------
/deamonsets/app.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: DaemonSet
3 | metadata:
4 | name: fluentd-elasticsearch
5 | namespace: kube-system
6 | labels:
7 | k8s-app: fluentd-logging
8 | spec:
9 | selector:
10 | matchLabels:
11 | name: fluentd-elasticsearch
12 | template:
13 | metadata:
14 | labels:
15 | name: fluentd-elasticsearch
16 | spec:
17 | tolerations:
18 | # this toleration is to have the daemonset runnable on master nodes
19 | # remove it if your masters can't run pods
20 | - key: node-role.kubernetes.io/master
21 | operator: Exists
22 | effect: NoSchedule
23 | containers:
24 | - name: fluentd-elasticsearch
25 | image: quay.io/fluentd_elasticsearch/fluentd:v2.5.2
26 | resources:
27 | limits:
28 | memory: 200Mi
29 | requests:
30 | cpu: 100m
31 | memory: 200Mi
32 | volumeMounts:
33 | - name: varlog
34 | mountPath: /var/log
35 | - name: varlibdockercontainers
36 | mountPath: /var/lib/docker/containers
37 | readOnly: true
38 | terminationGracePeriodSeconds: 30
39 | volumes:
40 | - name: varlog
41 | hostPath:
42 | path: /var/log
43 | - name: varlibdockercontainers
44 | hostPath:
45 | path: /var/lib/docker/containers
46 |
47 |
--------------------------------------------------------------------------------
/deployment-strategies/blue-green/single-service/app-v1.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: my-app
5 | labels:
6 | app: my-app
7 | spec:
8 | type: NodePort
9 | ports:
10 | - name: http
11 | port: 80
12 | targetPort: http
13 |
14 | # Note here that we match both the app and the version
15 | selector:
16 | app: my-app
17 | version: v1.0.0
18 | ---
19 | apiVersion: apps/v1
20 | kind: Deployment
21 | metadata:
22 | name: my-app-v1
23 | labels:
24 | app: my-app
25 | spec:
26 | replicas: 3
27 | selector:
28 | matchLabels:
29 | app: my-app
30 | version: v1.0.0
31 | template:
32 | metadata:
33 | labels:
34 | app: my-app
35 | version: v1.0.0
36 | spec:
37 | containers:
38 | - name: my-app
39 | image: deekshithsn/k8s-deployment-strategies
40 | ports:
41 | - name: http
42 | containerPort: 8080
43 | - name: probe
44 | containerPort: 8086
45 | env:
46 | - name: VERSION
47 | value: v1.0.0
48 | livenessProbe:
49 | httpGet:
50 | path: /live
51 | port: probe
52 | initialDelaySeconds: 5
53 | periodSeconds: 5
54 | readinessProbe:
55 | httpGet:
56 | path: /ready
57 | port: probe
58 | periodSeconds: 5
59 |
--------------------------------------------------------------------------------
/deployment-strategies/blue-green/single-service/app-v2.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: my-app-v2
5 | labels:
6 | app: my-app
7 | spec:
8 | replicas: 3
9 | selector:
10 | matchLabels:
11 | app: my-app
12 | version: v2.0.0
13 | template:
14 | metadata:
15 | labels:
16 | app: my-app
17 | version: v2.0.0
18 | spec:
19 | containers:
20 | - name: my-app
21 | image: deekshithsn/k8s-deployment-strategies
22 | ports:
23 | - name: http
24 | containerPort: 8080
25 | - name: probe
26 | containerPort: 8086
27 | env:
28 | - name: VERSION
29 | value: v2.0.0
30 | livenessProbe:
31 | httpGet:
32 | path: /live
33 | port: probe
34 | initialDelaySeconds: 5
35 | periodSeconds: 5
36 | readinessProbe:
37 | httpGet:
38 | path: /ready
39 | port: probe
40 | periodSeconds: 5
41 |
--------------------------------------------------------------------------------
/deployment-strategies/canary/native/app-v1.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: my-app
5 | labels:
6 | app: my-app
7 | spec:
8 | type: NodePort
9 | ports:
10 | - name: http
11 | port: 80
12 | targetPort: http
13 | selector:
14 | app: my-app
15 | ---
16 | apiVersion: apps/v1
17 | kind: Deployment
18 | metadata:
19 | name: my-app-v1
20 | labels:
21 | app: my-app
22 | spec:
23 | replicas: 5
24 | selector:
25 | matchLabels:
26 | app: my-app
27 | version: v1.0.0
28 | template:
29 | metadata:
30 | labels:
31 | app: my-app
32 | version: v1.0.0
33 | spec:
34 | containers:
35 | - name: my-app
36 | image: deekshithsn/k8s-deployment-strategies
37 | ports:
38 | - name: http
39 | containerPort: 8080
40 | - name: probe
41 | containerPort: 8086
42 | env:
43 | - name: VERSION
44 | value: v1.0.0
45 | livenessProbe:
46 | httpGet:
47 | path: /live
48 | port: probe
49 | initialDelaySeconds: 5
50 | periodSeconds: 5
51 | readinessProbe:
52 | httpGet:
53 | path: /ready
54 | port: probe
55 | periodSeconds: 5
56 |
--------------------------------------------------------------------------------
/deployment-strategies/canary/native/app-v2.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: my-app-v2
5 | labels:
6 | app: my-app
7 | spec:
8 | replicas: 1
9 | selector:
10 | matchLabels:
11 | app: my-app
12 | version: v2.0.0
13 | template:
14 | metadata:
15 | labels:
16 | app: my-app
17 | version: v2.0.0
18 | spec:
19 | containers:
20 | - name: my-app
21 | image: deekshithsn/k8s-deployment-strategies
22 | ports:
23 | - name: http
24 | containerPort: 8080
25 | - name: probe
26 | containerPort: 8086
27 | env:
28 | - name: VERSION
29 | value: v2.0.0
30 | livenessProbe:
31 | httpGet:
32 | path: /live
33 | port: probe
34 | initialDelaySeconds: 5
35 | periodSeconds: 5
36 | readinessProbe:
37 | httpGet:
38 | path: /ready
39 | port: probe
40 | periodSeconds: 5
41 |
--------------------------------------------------------------------------------
/deployment-strategies/recreate/app-v1.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: my-app
5 | labels:
6 | app: my-app
7 | spec:
8 | type: NodePort
9 | ports:
10 | - name: http
11 | port: 80
12 | targetPort: http
13 | selector:
14 | app: my-app
15 | ---
16 | apiVersion: apps/v1
17 | kind: Deployment
18 | metadata:
19 | name: my-app
20 | labels:
21 | app: my-app
22 | spec:
23 | replicas: 3
24 | selector:
25 | matchLabels:
26 | app: my-app
27 | strategy:
28 | type: Recreate
29 | selector:
30 | matchLabels:
31 | app: my-app
32 | template:
33 | metadata:
34 | labels:
35 | app: my-app
36 | version: v1.0.0
37 | spec:
38 | containers:
39 | - name: my-app
40 | image: deekshithsn/k8s-deployment-strategies
41 | ports:
42 | - name: http
43 | containerPort: 8080
44 | - name: probe
45 | containerPort: 8086
46 | env:
47 | - name: VERSION
48 | value: v1.0.0
49 | livenessProbe:
50 | httpGet:
51 | path: /live
52 | port: probe
53 | initialDelaySeconds: 5
54 | periodSeconds: 5
55 | readinessProbe:
56 | httpGet:
57 | path: /ready
58 | port: probe
59 | periodSeconds: 5
60 |
--------------------------------------------------------------------------------
/deployment-strategies/recreate/app-v2.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: my-app
5 | labels:
6 | app: my-app
7 | spec:
8 | replicas: 3
9 | strategy:
10 | type: Recreate
11 |
12 | # The selector field tell the deployment which pod to update with
13 | # the new version. This field is optional, but if you have labels
14 | # uniquely defined for the pod, in this case the "version" label,
15 | # then we need to redefine the matchLabels and eliminate the version
16 | # field from there.
17 | selector:
18 | matchLabels:
19 | app: my-app
20 | template:
21 | metadata:
22 | labels:
23 | app: my-app
24 | version: v2.0.0
25 | spec:
26 | containers:
27 | - name: my-app
28 | image: deekshithsn/k8s-deployment-strategies
29 | ports:
30 | - name: http
31 | containerPort: 8080
32 | - name: probe
33 | containerPort: 8086
34 | env:
35 | - name: VERSION
36 | value: v2.0.0
37 | livenessProbe:
38 | httpGet:
39 | path: /live
40 | port: probe
41 | initialDelaySeconds: 5
42 | periodSeconds: 5
43 | readinessProbe:
44 | httpGet:
45 | path: /ready
46 | port: probe
47 | periodSeconds: 5
48 |
--------------------------------------------------------------------------------
/deployment-strategies/rolling-update/app-v1.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: my-app
5 | labels:
6 | app: my-app
7 | spec:
8 | type: NodePort
9 | ports:
10 | - name: http
11 | port: 80
12 | targetPort: http
13 | selector:
14 | app: my-app
15 | ---
16 | apiVersion: apps/v1
17 | kind: Deployment
18 | metadata:
19 | name: my-app
20 | labels:
21 | app: my-app
22 | spec:
23 | replicas: 5
24 | selector:
25 | matchLabels:
26 | app: my-app
27 | template:
28 | metadata:
29 | labels:
30 | app: my-app
31 | version: v1.0.0
32 | spec:
33 | containers:
34 | - name: my-app
35 | image: deekshithsn/k8s-deployment-strategies
36 | ports:
37 | - name: http
38 | containerPort: 8080
39 | - name: probe
40 | containerPort: 8086
41 | env:
42 | - name: VERSION
43 | value: v1.0.0
44 | livenessProbe:
45 | httpGet:
46 | path: /live
47 | port: probe
48 | initialDelaySeconds: 5
49 | periodSeconds: 5
50 | readinessProbe:
51 | httpGet:
52 | path: /ready
53 | port: probe
54 | periodSeconds: 5
55 |
--------------------------------------------------------------------------------
/ingress/README.md:
--------------------------------------------------------------------------------
1 | Create nginx controller
2 | ------
3 | kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/cloud/deploy.yaml
4 |
5 | ``` For more info refer https://kubernetes.github.io/ingress-nginx/deploy/ ```
6 |
--------------------------------------------------------------------------------
/kubernetes-conatiner-patterns/Ambassador/pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: ambassador-pod
5 | labels:
6 | app: ambassador-app
7 | spec:
8 | volumes:
9 | - name: shared
10 | emptyDir: {}
11 | containers:
12 | - name: app-container-poller
13 | image: yauritux/busybox-curl
14 | command: ["/bin/sh"]
15 | args: ["-c", "while true; do curl 127.0.0.1:81 > /usr/share/nginx/html/index.html; sleep 10; done"]
16 | volumeMounts:
17 | - name: shared
18 | mountPath: /usr/share/nginx/html
19 | - name: app-container-server
20 | image: nginx
21 | ports:
22 | - containerPort: 80
23 | volumeMounts:
24 | - name: shared
25 | mountPath: /usr/share/nginx/html
26 | - name: ambassador-container
27 | image: bharamicrosystems/nginx-forward-proxy
28 | ports:
29 | - containerPort: 81
30 |
--------------------------------------------------------------------------------
/kubernetes-conatiner-patterns/Ambassador/service.yaml:
--------------------------------------------------------------------------------
1 |
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: ambassador-nodeport-service
6 | labels:
7 | tier: frontend
8 | spec:
9 | type: NodePort
10 | ports:
11 | - targetPort: 80
12 | port: 80
13 | nodePort: 30008
14 | selector:
15 | app: ambassador-app
16 |
--------------------------------------------------------------------------------
/kubernetes-conatiner-patterns/adapter/README.md:
--------------------------------------------------------------------------------
1 | ## Execute below commands to replicate the scenario
2 |
3 | - kubectl exec -it webserver bash
4 | - apt update && apt install curl -y
5 | - curl localhost/nginx_status
6 | - curl localhost:9113/metrics
7 |
8 | for more details refer https://www.magalix.com/blog/the-adapter-pattern
9 |
--------------------------------------------------------------------------------
/kubernetes-conatiner-patterns/adapter/adapter.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: webserver-1
5 | labels:
6 | app: webserver
7 | spec:
8 | volumes:
9 | - name: nginx-conf
10 | configMap:
11 | name: nginx-conf
12 | items:
13 | - key: default.conf
14 | path: default.conf
15 | containers:
16 | - name: webserver
17 | image: nginx
18 | ports:
19 | - containerPort: 80
20 | volumeMounts:
21 | - mountPath: /etc/nginx/conf.d
22 | name: nginx-conf
23 | readOnly: true
24 | - name: adapter
25 | image: nginx/nginx-prometheus-exporter:0.4.2
26 | args: ["-nginx.scrape-uri","http://localhost/nginx_status"]
27 | ports:
28 | - containerPort: 9113
29 |
--------------------------------------------------------------------------------
/kubernetes-conatiner-patterns/adapter/configmap.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: nginx-conf
5 | data:
6 | default.conf: |
7 | server {
8 | listen 80;
9 | server_name localhost;
10 | location / {
11 | root /usr/share/nginx/html;
12 | index index.html index.htm;
13 | }
14 | error_page 500 502 503 504 /50x.html;
15 | location = /50x.html {
16 | root /usr/share/nginx/html;
17 | }
18 | location /nginx_status {
19 | stub_status;
20 | allow 127.0.0.1; #only allow requests from localhost
21 | deny all; #deny all other hosts
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/kubernetes-conatiner-patterns/adapter/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: myapp-nodeport-service
5 | labels:
6 | app: myapp-service-httpd
7 | tier: frontend
8 | spec:
9 | type: NodePort
10 | ports:
11 | - targetPort: 80
12 | port: 80
13 | nodePort: 30008
14 | name: nginx
15 | - targetPort: 9113
16 | port: 9113
17 | nodePort: 30009
18 | name: adaptor
19 | selector:
20 | app: webserver
21 |
22 |
--------------------------------------------------------------------------------
/kubernetes-conatiner-patterns/init-postgres-db/commands.txt:
--------------------------------------------------------------------------------
1 | kubectl run postgres-client --image=postgres:9.6.5 -i -t --rm --restart=Never
2 |
--------------------------------------------------------------------------------
/kubernetes-conatiner-patterns/init-postgres-db/database.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: postgres
5 | spec:
6 | replicas: 1
7 | selector:
8 | matchLabels:
9 | app: postgres
10 | template:
11 | metadata:
12 | labels:
13 | app: postgres
14 | spec:
15 | containers:
16 | - name: postgres
17 | image: postgres:9.6.5
18 | ports:
19 | - containerPort: 5432
20 | env:
21 | - name: POSTGRES_DB
22 | valueFrom:
23 | secretKeyRef:
24 | name: database-secret-config
25 | key: dbname
26 | - name: POSTGRES_USER
27 | valueFrom:
28 | secretKeyRef:
29 | name: database-secret-config
30 | key: username
31 | - name: POSTGRES_PASSWORD
32 | valueFrom:
33 | secretKeyRef:
34 | name: database-secret-config
35 | key: password
36 |
--------------------------------------------------------------------------------
/kubernetes-conatiner-patterns/init-postgres-db/deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: webapp
5 | spec:
6 | replicas: 1
7 | selector:
8 | matchLabels:
9 | app: webapp
10 | template:
11 | metadata:
12 | labels:
13 | app: webapp
14 | spec:
15 | containers:
16 | - image: deekshithsn/go-url-shortener:1.0
17 | name: go-url-shortener
18 | ports:
19 | - containerPort: 8080
20 | env:
21 | - name: POSTGRES_HOST
22 | value: postgres
23 | - name: POSTGRES_PORT
24 | value: "5432"
25 | - name: POSTGRES_DATABASE
26 | valueFrom:
27 | secretKeyRef:
28 | name: database-secret-config
29 | key: dbname
30 | - name: POSTGRES_USER
31 | valueFrom:
32 | secretKeyRef:
33 | name: database-secret-config
34 | key: username
35 | - name: POSTGRES_PASSWORD
36 | valueFrom:
37 | secretKeyRef:
38 | name: database-secret-config
39 | key: password
40 |
--------------------------------------------------------------------------------
/kubernetes-conatiner-patterns/init-postgres-db/init-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: webapp
5 | spec:
6 | replicas: 1
7 | selector:
8 | matchLabels:
9 | app: webapp
10 | template:
11 | metadata:
12 | labels:
13 | app: webapp
14 | spec:
15 | initContainers:
16 | - name: check-db-ready
17 | image: postgres:9.6.5
18 | command: ['sh', '-c',
19 | 'until pg_isready -h postgres -p 5432;
20 | do echo waiting for database; sleep 2; done;']
21 | containers:
22 | - image: deekshithsn/go-url-shortener:1.0
23 | name: go-url-shortener
24 | ports:
25 | - containerPort: 8080
26 | env:
27 | - name: POSTGRES_HOST
28 | value: postgres
29 | - name: POSTGRES_PORT
30 | value: "5432"
31 | - name: POSTGRES_DATABASE
32 | valueFrom:
33 | secretKeyRef:
34 | name: database-secret-config
35 | key: dbname
36 | - name: POSTGRES_USER
37 | valueFrom:
38 | secretKeyRef:
39 | name: database-secret-config
40 | key: username
41 | - name: POSTGRES_PASSWORD
42 | valueFrom:
43 | secretKeyRef:
44 | name: database-secret-config
45 | key: password
46 |
47 |
--------------------------------------------------------------------------------
/kubernetes-conatiner-patterns/init-postgres-db/secret.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | name: database-secret-config
5 | type: Opaque
6 | data:
7 | dbname: dXJsX3Nob3J0ZW5lcl9kYg==
8 | username: dXNlcg==
9 | password: bXlzZWNyZXRwYXNzd29yZA==
10 |
--------------------------------------------------------------------------------
/kubernetes-conatiner-patterns/init-postgres-db/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: postgres
5 | spec:
6 | ports:
7 | - port: 5432
8 | selector:
9 | app: postgres
10 |
--------------------------------------------------------------------------------
/kubernetes-conatiner-patterns/init-sidecar/config_map_aws.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: aws-config
5 | data:
6 | config: |
7 | [default]
8 | region = us-east-1
9 | syncs3.sh: |
10 | export AWS_CONFIG_FILE="/root/.aws/config" #change it as per user
11 | export AWS_ACCESS_KEY_ID=***** #please provide access id
12 | export AWS_SECRET_ACCESS_KEY=***** #please provide secret key
13 | aws s3 sync /var/log/nginx s3://your_bucket_name
14 |
--------------------------------------------------------------------------------
/kubernetes-conatiner-patterns/init-sidecar/config_map_nginx.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: nginx-config
5 | data:
6 | default.conf: |
7 | server {
8 | listen 80 default_server;
9 | listen [::]:80 default_server ipv6only=on;
10 | listen 443 ssl;
11 |
12 | root /usr/share/nginx/html;
13 | index index.html;
14 |
15 | server_name localhost;
16 | ssl_certificate /etc/nginx/ssl/nginx.crt;
17 | ssl_certificate_key /etc/nginx/ssl/nginx.key;
18 | ssl_session_timeout 1d;
19 | ssl_session_cache shared:SSL:50m;
20 | ssl_session_tickets off;
21 | # modern configuration. tweak to your needs.
22 | ssl_protocols TLSv1.2;
23 | ssl_ciphers 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256';
24 | ssl_prefer_server_ciphers on;
25 | # HSTS (ngx_http_headers_module is required) (15768000 seconds = 6 months)
26 | add_header Strict-Transport-Security max-age=15768000;
27 | # OCSP Stapling ---
28 | # fetch OCSP records from URL in ssl_certificate and cache them
29 | ssl_stapling on;
30 | ssl_stapling_verify on;
31 | location / {
32 | try_files $uri $uri/ =404;
33 | }
34 | }
--------------------------------------------------------------------------------
/kubernetes-conatiner-patterns/init-sidecar/nginxservice.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: nginxsvc
5 | labels:
6 | app: nginx
7 | spec:
8 | type: NodePort
9 | ports:
10 | - port: 80
11 | protocol: TCP
12 | name: http
13 | - port: 443
14 | protocol: TCP
15 | name: https
16 | selector:
17 | app: myapp-dep
--------------------------------------------------------------------------------
/kubernetes-hpa/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM php:5-apache
2 | ADD index.php /var/www/html/index.php
3 | RUN chmod a+rx index.php
4 |
--------------------------------------------------------------------------------
/kubernetes-hpa/README.md:
--------------------------------------------------------------------------------
1 | ## First we need to setup metric server, follow steps in below post
2 |
3 | https://stackoverflow.com/questions/54106725/docker-kubernetes-mac-autoscaler-unable-to-find-metrics
4 | https://forum.linuxfoundation.org/discussion/comment/32209
5 |
6 | ## Formula HPA uses
7 |
8 | desiredReplicas = ceil[currentReplicas * ( currentMetricValue / desiredMetricValue )]
9 |
10 |
11 | ## Run below commands to HPA
12 |
13 | kubectl autoscale deployment php-apache --cpu-percent=50 --min=1 --max=10
14 |
15 | ## Run below commands to put load on php-apache pod
16 |
17 | kubectl run -it --rm load-generator --image=busybox /bin/sh
18 |
19 | while true; do wget -q -O- http://php-apache.default.svc.cluster.local; done
20 |
21 |
22 | in 1.23 metric-server found at https://github.com/kubernetes-sigs/metrics-server
23 |
--------------------------------------------------------------------------------
/kubernetes-hpa/hpa.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: autoscaling/v1
2 | kind: HorizontalPodAutoscaler
3 | metadata:
4 | annotations:
5 | name: node-example
6 | namespace: default
7 | spec:
8 | maxReplicas: 4
9 | minReplicas: 1
10 | scaleTargetRef:
11 | apiVersion: extensions/v1
12 | kind: Deployment
13 | name: node-example
14 | targetCPUUtilizationPercentage: 1
15 |
--------------------------------------------------------------------------------
/kubernetes-hpa/hpa.yml:
--------------------------------------------------------------------------------
1 | apiVersion: autoscaling/v1
2 | kind: HorizontalPodAutoscaler
3 | metadata:
4 | annotations:
5 | name: node-example
6 | namespace: default
7 | spec:
8 | maxReplicas: 4
9 | minReplicas: 1
10 | scaleTargetRef:
11 | apiVersion: extensions/v1
12 | kind: Deployment
13 | name: node-example
14 | targetCPUUtilizationPercentage: 1
15 |
--------------------------------------------------------------------------------
/kubernetes-hpa/index.jsp:
--------------------------------------------------------------------------------
1 |
8 |
--------------------------------------------------------------------------------
/kubernetes-hpa/php-apache.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: php-apache
5 | spec:
6 | selector:
7 | matchLabels:
8 | run: php-apache
9 | replicas: 1
10 | template:
11 | metadata:
12 | labels:
13 | run: php-apache
14 | spec:
15 | containers:
16 | - name: php-apache
17 | image: k8s.gcr.io/hpa-example
18 | ports:
19 | - containerPort: 80
20 | resources:
21 | limits:
22 | cpu: 500m
23 | requests:
24 | cpu: 200m
25 |
26 | ---
27 |
28 | apiVersion: v1
29 | kind: Service
30 | metadata:
31 | name: php-apache
32 | labels:
33 | run: php-apache
34 | spec:
35 | ports:
36 | - port: 80
37 | selector:
38 | run: php-apache
39 |
40 |
41 |
--------------------------------------------------------------------------------
/kubernetes-liveness-readiness/commands.txt:
--------------------------------------------------------------------------------
1 | wget -qO- http://node-ip:30001/health
2 |
3 | wget -qO- http://node-ip:30001/processing-time/30000
4 |
--------------------------------------------------------------------------------
/kubernetes-liveness-readiness/pod-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: probe-demo-deployment
5 | spec:
6 | replicas: 2
7 | selector:
8 | matchLabels:
9 | run: probe-demo
10 | template:
11 | metadata:
12 | labels:
13 | run: probe-demo
14 | spec:
15 | containers:
16 | - image: deekshithsn/probe-demo
17 | name: probe-demo
18 | env:
19 | - name: START_DELAY
20 | value: "60"
21 | ports:
22 | - containerPort: 8080
--------------------------------------------------------------------------------
/kubernetes-liveness-readiness/pod-live-readiness-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: probe-liveness-readiness-deployment
5 | spec:
6 | replicas: 2
7 | selector:
8 | matchLabels:
9 | run: probe-demo
10 | template:
11 | metadata:
12 | labels:
13 | run: probe-demo
14 | spec:
15 | containers:
16 | - image: deekshithsn/probe-demo
17 | name: probe-demo
18 | env:
19 | - name: START_DELAY
20 | value: "0"
21 | ports:
22 | - containerPort: 8080
23 | livenessProbe:
24 | httpGet:
25 | path: /health
26 | port: 8080
27 | initialDelaySeconds: 60
28 | periodSeconds: 5
29 | successThreshold: 1
30 | failureThreshold: 3
31 | timeoutSeconds: 1
32 | readinessProbe:
33 | httpGet:
34 | path: /health
35 | port: 8080
36 | initialDelaySeconds: 60
37 | periodSeconds: 5
38 | successThreshold: 1
39 | failureThreshold: 3
40 | timeoutSeconds: 1
--------------------------------------------------------------------------------
/kubernetes-liveness-readiness/pod-liveness-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: probe-liveness-deployment
5 | spec:
6 | replicas: 2
7 | selector:
8 | matchLabels:
9 | run: probe-demo
10 | template:
11 | metadata:
12 | labels:
13 | run: probe-demo
14 | spec:
15 | containers:
16 | - image: deekshithsn/probe-demo
17 | name: probe-demo
18 | env:
19 | - name: START_DELAY
20 | value: "60"
21 | ports:
22 | - containerPort: 8080
23 | livenessProbe:
24 | httpGet:
25 | path: /health
26 | port: 8080
27 | initialDelaySeconds: 60
28 | periodSeconds: 5
29 | successThreshold: 1
30 | failureThreshold: 3
31 | timeoutSeconds: 1
32 |
--------------------------------------------------------------------------------
/kubernetes-liveness-readiness/pod-readiness-deplyment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: probe-readiness-deployment
5 | spec:
6 | replicas: 2
7 | selector:
8 | matchLabels:
9 | run: probe-demo
10 | template:
11 | metadata:
12 | labels:
13 | run: probe-demo
14 | spec:
15 | containers:
16 | - image: deekshithsn/probe-demo
17 | name: probe-demo
18 | env:
19 | - name: START_DELAY
20 | value: "60"
21 | ports:
22 | - containerPort: 8080
23 | readinessProbe:
24 | httpGet:
25 | path: /health
26 | port: 8080
27 | initialDelaySeconds: 60
28 | periodSeconds: 5
29 | successThreshold: 1
30 | failureThreshold: 3
31 | timeoutSeconds: 1
--------------------------------------------------------------------------------
/kubernetes-liveness-readiness/probe-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: probe-service
5 | spec:
6 | selector:
7 | run: probe-demo
8 | type: NodePort
9 | ports:
10 | - name: 8080-8080
11 | port: 8080
12 | protocol: TCP
13 | targetPort: 8080
14 | nodePort: 30001
--------------------------------------------------------------------------------
/kubernetes-vpa/README.md:
--------------------------------------------------------------------------------
1 | gcloud container clusters update kclient --enable-vertical-pod-autoscaling --region us-east1-b
2 |
--------------------------------------------------------------------------------
/kubernetes-vpa/deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: my-auto-deployment
5 | spec:
6 | replicas: 2
7 | selector:
8 | matchLabels:
9 | app: my-auto-deployment
10 | template:
11 | metadata:
12 | labels:
13 | app: my-auto-deployment
14 | spec:
15 | containers:
16 | - name: my-container
17 | image: k8s.gcr.io/ubuntu-slim:0.1
18 | resources:
19 | requests:
20 | cpu: 100m
21 | memory: 50Mi
22 | command: ["/bin/sh"]
23 | args: ["-c", "while true; do timeout 0.5s yes >/dev/null; sleep 0.5s; done"]
24 |
--------------------------------------------------------------------------------
/kubernetes-vpa/deployment_normal.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: my-rec-deployment
5 | spec:
6 | replicas: 2
7 | selector:
8 | matchLabels:
9 | app: my-rec-deployment
10 | template:
11 | metadata:
12 | labels:
13 | app: my-rec-deployment
14 | spec:
15 | containers:
16 | - name: my-rec-container
17 | image: nginx
18 |
--------------------------------------------------------------------------------
/kubernetes-vpa/vpa-updatemode-off.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: autoscaling.k8s.io/v1
2 | kind: VerticalPodAutoscaler
3 | metadata:
4 | name: my-rec-vpa
5 | spec:
6 | targetRef:
7 | apiVersion: "apps/v1"
8 | kind: Deployment
9 | name: my-rec-deployment
10 | updatePolicy:
11 | updateMode: "Off"
12 |
--------------------------------------------------------------------------------
/kubernetes-vpa/vpa-updatemode-on.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: autoscaling.k8s.io/v1
2 | kind: VerticalPodAutoscaler
3 | metadata:
4 | name: my-vpa
5 | spec:
6 | targetRef:
7 | apiVersion: "apps/v1"
8 | kind: Deployment
9 | name: my-auto-deployment
10 | updatePolicy:
11 | updateMode: "Auto"
12 |
--------------------------------------------------------------------------------
/kubernetes-vpa/vpa/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM golang:1.14-alpine as build
2 |
3 | RUN apk add --no-cache git curl
4 |
5 | WORKDIR /src
6 |
7 | COPY app.go /src
8 |
9 | RUN go build app.go
10 |
11 | FROM alpine as runtime
12 |
13 | COPY --from=build /src/app /app/app
14 |
15 | CMD [ "/app/app" ]
16 |
--------------------------------------------------------------------------------
/kubernetes-vpa/vpa/README.md:
--------------------------------------------------------------------------------
1 | # Install wrk for load testing ( very light weight load testing utility )
2 |
3 | apk add --no-cache wrk
4 |
5 | # simulate some load
6 | wrk -c 5 -t 5 -d 99999 -H "Connection: Close" http://application-cpu
7 |
8 |
9 | # VPA components deployment
10 | ```
11 | git clone https://github.com/kubernetes/autoscaler.git
12 | cd autoscaler/vertical-pod-autoscaler/hack
13 | ./vpa-up.sh
14 | ```
15 |
--------------------------------------------------------------------------------
/kubernetes-vpa/vpa/app.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "net/http"
6 | )
7 |
8 | func main(){
9 | http.HandleFunc("/", useCPU)
10 | http.ListenAndServe(":80", nil)
11 | }
12 |
13 | func useCPU(w http.ResponseWriter, r *http.Request) {
14 | count := 1
15 |
16 | for i := 1; i <= 1000000; i++ {
17 | count = i
18 | }
19 |
20 | fmt.Printf("count: %d", count)
21 | w.Write([]byte(string(count)))
22 | }
23 |
--------------------------------------------------------------------------------
/kubernetes-vpa/vpa/deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: application-cpu
5 | labels:
6 | app: application-cpu
7 | spec:
8 | type: ClusterIP
9 | selector:
10 | app: application-cpu
11 | ports:
12 | - protocol: TCP
13 | name: http
14 | port: 80
15 | targetPort: 80
16 | ---
17 | apiVersion: apps/v1
18 | kind: Deployment
19 | metadata:
20 | name: application-cpu
21 | labels:
22 | app: application-cpu
23 | spec:
24 | selector:
25 | matchLabels:
26 | app: application-cpu
27 | replicas: 1
28 | strategy:
29 | type: RollingUpdate
30 | rollingUpdate:
31 | maxSurge: 1
32 | maxUnavailable: 0
33 | template:
34 | metadata:
35 | labels:
36 | app: application-cpu
37 | spec:
38 | containers:
39 | - name: application-cpu
40 | image: aimvector/application-cpu:v1.0.2
41 | imagePullPolicy: Always
42 | ports:
43 | - containerPort: 80
44 | resources:
45 | requests:
46 | memory: "50Mi"
47 | cpu: "500m"
48 | limits:
49 | memory: "500Mi"
50 | cpu: "2000m"
51 |
--------------------------------------------------------------------------------
/kubernetes-vpa/vpa/traffic-generator.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: traffic-generator
5 | spec:
6 | containers:
7 | - name: alpine
8 | image: alpine
9 | args:
10 | - sleep
11 | - "100000000"
12 |
--------------------------------------------------------------------------------
/kubernetes-vpa/vpa/vpa.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: autoscaling.k8s.io/v1
2 | kind: VerticalPodAutoscaler
3 | metadata:
4 | name: application-cpu
5 | spec:
6 | targetRef:
7 | apiVersion: "apps/v1"
8 | kind: Deployment
9 | name: application-cpu
10 | updatePolicy:
11 | updateMode: "Off"
12 |
--------------------------------------------------------------------------------
/monitoring/kubernetes-elk/curator-cronjob.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1beta1
2 | kind: CronJob
3 | metadata:
4 | name: elasticsearch-curator
5 | namespace: kube-system
6 | labels:
7 | k8s-app: elasticsearch-logging
8 | spec:
9 | schedule: "0 0 * * *"
10 | jobTemplate:
11 | spec:
12 | template:
13 | metadata:
14 | name: elasticsearch-curator
15 | labels:
16 | k8s-app: elasticsearch-logging
17 | spec:
18 | restartPolicy: "Never"
19 | containers:
20 | - name: ingestor
21 | image: python:3.6-alpine
22 | args: ["sh", "-c", "pip install elasticsearch-curator && curator_cli --host elasticsearch-logging delete_indices --filter_list '[{\"filtertype\":\"age\",\"source\":\"creation_date\",\"direction\":\"older\",\"unit\":\"days\",\"unit_count\":7},{\"filtertype\":\"pattern\",\"kind\":\"prefix\",\"value\":\"logstash\"}]' || true"]
23 | backoffLimit: 1
--------------------------------------------------------------------------------
/monitoring/kubernetes-grafana/README.md:
--------------------------------------------------------------------------------
1 | - Create the configmap using the following command
2 | ``` kubectl create -f grafana-datasource-config.yaml ```
3 |
4 | - Create the deployment
5 | ``` kubectl create -f deployment.yaml ```
6 |
7 | - Create the service
8 | ``` kubectl create -f service.yaml ```
9 |
10 | Chart Number to import - ``` 8588 ```
11 |
--------------------------------------------------------------------------------
/monitoring/kubernetes-grafana/deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: grafana
5 | namespace: monitoring
6 | spec:
7 | replicas: 1
8 | selector:
9 | matchLabels:
10 | app: grafana
11 | template:
12 | metadata:
13 | name: grafana
14 | labels:
15 | app: grafana
16 | spec:
17 | containers:
18 | - name: grafana
19 | image: grafana/grafana:latest
20 | ports:
21 | - name: grafana
22 | containerPort: 3000
23 | resources:
24 | limits:
25 | memory: "2Gi"
26 | cpu: "1000m"
27 | requests:
28 | memory: "1Gi"
29 | cpu: "500m"
30 | volumeMounts:
31 | - mountPath: /var/lib/grafana
32 | name: grafana-storage
33 | - mountPath: /etc/grafana/provisioning/datasources
34 | name: grafana-datasources
35 | readOnly: false
36 | volumes:
37 | - name: grafana-storage
38 | emptyDir: {}
39 | - name: grafana-datasources
40 | configMap:
41 | defaultMode: 420
42 | name: grafana-datasources
43 |
--------------------------------------------------------------------------------
/monitoring/kubernetes-grafana/grafana-datasource-config.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: grafana-datasources
5 | namespace: monitoring
6 | data:
7 | prometheus.yaml: |-
8 | {
9 | "apiVersion": 1,
10 | "datasources": [
11 | {
12 | "access":"proxy",
13 | "editable": true,
14 | "name": "prometheus",
15 | "orgId": 1,
16 | "type": "prometheus",
17 | "url": "http://prometheus-service.monitoring.svc:8080",
18 | "version": 1
19 | }
20 | ]
21 | }
22 |
--------------------------------------------------------------------------------
/monitoring/kubernetes-grafana/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: grafana
5 | namespace: monitoring
6 | annotations:
7 | prometheus.io/scrape: 'true'
8 | prometheus.io/port: '3000'
9 | spec:
10 | selector:
11 | app: grafana
12 | type: NodePort
13 | ports:
14 | - port: 3000
15 | targetPort: 3000
16 | nodePort: 32000
17 |
--------------------------------------------------------------------------------
/monitoring/kubernetes-prometheus/clusterRole.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1beta1
2 | kind: ClusterRole
3 | metadata:
4 | name: prometheus
5 | rules:
6 | - apiGroups: [""]
7 | resources:
8 | - nodes
9 | - nodes/proxy
10 | - services
11 | - endpoints
12 | - pods
13 | verbs: ["get", "list", "watch"]
14 | - apiGroups:
15 | - extensions
16 | resources:
17 | - ingresses
18 | verbs: ["get", "list", "watch"]
19 | - nonResourceURLs: ["/metrics"]
20 | verbs: ["get"]
21 | ---
22 | apiVersion: rbac.authorization.k8s.io/v1beta1
23 | kind: ClusterRoleBinding
24 | metadata:
25 | name: prometheus
26 | roleRef:
27 | apiGroup: rbac.authorization.k8s.io
28 | kind: ClusterRole
29 | name: prometheus
30 | subjects:
31 | - kind: ServiceAccount
32 | name: default
33 | namespace: monitoring
34 |
--------------------------------------------------------------------------------
/monitoring/kubernetes-prometheus/prometheus-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: prometheus-deployment
5 | namespace: monitoring
6 | labels:
7 | app: prometheus-server
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | app: prometheus-server
13 | template:
14 | metadata:
15 | labels:
16 | app: prometheus-server
17 | spec:
18 | containers:
19 | - name: prometheus
20 | image: prom/prometheus
21 | args:
22 | - "--config.file=/etc/prometheus/prometheus.yml"
23 | - "--storage.tsdb.path=/prometheus/"
24 | ports:
25 | - containerPort: 9090
26 | volumeMounts:
27 | - name: prometheus-config-volume
28 | mountPath: /etc/prometheus/
29 | - name: prometheus-storage-volume
30 | mountPath: /prometheus/
31 | volumes:
32 | - name: prometheus-config-volume
33 | configMap:
34 | defaultMode: 420
35 | name: prometheus-server-conf
36 |
37 | - name: prometheus-storage-volume
38 | emptyDir: {}
39 |
--------------------------------------------------------------------------------
/monitoring/kubernetes-prometheus/prometheus-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: prometheus-service
5 | namespace: monitoring
6 | annotations:
7 | prometheus.io/scrape: 'true'
8 | prometheus.io/port: '9090'
9 |
10 | spec:
11 | selector:
12 | app: prometheus-server
13 | type: NodePort
14 | ports:
15 | - port: 8080
16 | targetPort: 9090
17 | nodePort: 30000
18 |
--------------------------------------------------------------------------------
/namespace/basic-resource-quota.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ResourceQuota
3 | metadata:
4 | name: object-counts-3
5 | spec:
6 | hard:
7 | configmaps: "10"
8 | persistentvolumeclaims: "4"
9 | pods: "4"
10 | replicationcontrollers: "20"
11 | secrets: "10"
12 | services: "10"
13 | services.loadbalancers: "2"
14 |
--------------------------------------------------------------------------------
/namespace/deploy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: voting-app-deploy
5 | labels:
6 | name: voting-app-deploy
7 | app: demo-voting-app
8 | spec:
9 | template:
10 | metadata:
11 | name: voting-app-pod
12 | labels:
13 | name: voting-app-pod
14 | app: demo-voting-app
15 | spec:
16 | containers:
17 | - name: voting-app
18 | image: kodekloud/examplevotingapp_vote:v1
19 | ports:
20 | - containerPort: 80
21 | selector:
22 | matchLabels:
23 | name: voting-app-pod
24 | app: demo-voting-app
25 | replicas: 2
26 |
--------------------------------------------------------------------------------
/namespace/limitrange/limitrange.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: LimitRange
3 | metadata:
4 | name: cpu-min-max-demo-lr
5 | spec:
6 | limits:
7 | - max:
8 | cpu: "500m"
9 | min:
10 | cpu: "200m"
11 | type: Container
12 |
13 |
--------------------------------------------------------------------------------
/namespace/limitrange/pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: constraints-cpu-demo
5 | spec:
6 | containers:
7 | - name: constraints-cpu-demo-ctr
8 | image: nginx
9 | resources:
10 | limits:
11 | cpu: "500m"
12 | requests:
13 | cpu: "500m"
14 |
--------------------------------------------------------------------------------
/namespace/limitrange/pod_max.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: constraints-cpu-demo-2
5 | spec:
6 | containers:
7 | - name: constraints-cpu-demo-2-ctr
8 | image: nginx
9 | resources:
10 | limits:
11 | cpu: "1.5"
12 | requests:
13 | cpu: "500m"
14 |
15 |
--------------------------------------------------------------------------------
/namespace/limitrange/pod_min.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: constraints-cpu-demo-3
5 | spec:
6 | containers:
7 | - name: constraints-cpu-demo-3-ctr
8 | image: nginx
9 | resources:
10 | limits:
11 | cpu: "800m"
12 | requests:
13 | cpu: "100m"
14 |
15 |
--------------------------------------------------------------------------------
/namespace/limitrange/pod_no_rc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: constraints-cpu-demo-4
5 | spec:
6 | containers:
7 | - name: constraints-cpu-demo-4-ctr
8 | image: vish/stress
9 |
10 |
--------------------------------------------------------------------------------
/namespace/pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: high-priority
5 | spec:
6 | containers:
7 | - name: high-priority
8 | image: ubuntu
9 | command: ["/bin/sh"]
10 | args: ["-c", "while true; do echo hello; sleep 10;done"]
11 | resources:
12 | requests:
13 | memory: "1Gi"
14 | cpu: "500m"
15 | limits:
16 | memory: "1Gi"
17 | cpu: "500m"
18 |
--------------------------------------------------------------------------------
/namespace/rc-with-resources.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ResourceQuota
3 | metadata:
4 | name: compute-resources
5 | spec:
6 | hard:
7 | requests.cpu: "1"
8 | requests.memory: 1Gi
9 | limits.cpu: "2"
10 | limits.memory: 2Gi
11 | requests.nvidia.com/gpu: 4
12 |
--------------------------------------------------------------------------------
/namespace/resource-q1.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ResourceQuota
3 | metadata:
4 | name: object-counts
5 | spec:
6 | hard:
7 | configmaps: "10"
8 | pods: "1"
9 |
--------------------------------------------------------------------------------
/namespace/resource-q2.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ResourceQuota
3 | metadata:
4 | name: object-counts-1
5 | spec:
6 | hard:
7 | configmaps: "15"
8 | pods: "2"
9 |
--------------------------------------------------------------------------------
/network-policy/foo-allow-to-hello.yaml:
--------------------------------------------------------------------------------
1 | kind: NetworkPolicy
2 | apiVersion: networking.k8s.io/v1
3 | metadata:
4 | name: foo-allow-to-hello
5 | spec:
6 | policyTypes:
7 | - Egress
8 | podSelector:
9 | matchLabels:
10 | app: foo
11 | egress:
12 | - to:
13 | - podSelector:
14 | matchLabels:
15 | app: hello
16 | - ports:
17 | - port: 53
18 | protocol: TCP
19 | - port: 53
20 | protocol: UDP
21 |
--------------------------------------------------------------------------------
/network-policy/hello-allow-from-foo.yaml:
--------------------------------------------------------------------------------
1 | kind: NetworkPolicy
2 | apiVersion: networking.k8s.io/v1
3 | metadata:
4 | name: hello-allow-from-foo
5 | spec:
6 | policyTypes:
7 | - Ingress
8 | podSelector:
9 | matchLabels:
10 | app: hello
11 | ingress:
12 | - from:
13 | - podSelector:
14 | matchLabels:
15 | app: foo
16 |
17 |
--------------------------------------------------------------------------------
/network-policy/more-info/00-create-cluster.md:
--------------------------------------------------------------------------------
1 | ## Create a cluster
2 |
3 | Most of the Kubernetes installation methods out there do not get you a cluster
4 | with [Network
5 | Policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/)
6 | feature. You manually need to install and configure a Network Policy provider
7 | such as Weave Net or Calico.
8 |
9 | **[Google Kubernetes Engine (GKE)][gke]** easily lets you get a Kubernetes
10 | cluster with Network Policies feature. You do not need to install a network
11 | policy provider yourself, as GKE configures Calico as the networking provider
12 | for you. (This feature is generally available as of GKE v1.10.)
13 |
14 | To create a GKE cluster named `np` with Network Policy feature enabled, run:
15 |
16 | gcloud beta container clusters create np \
17 | --enable-network-policy \
18 | --zone us-central1-b
19 |
20 | This will create a 3-node Kubernetes cluster on Kubernetes Engine and turn on
21 | the Network Policy feature.
22 |
23 | Once you complete this tutorial, you can delete the cluster by running:
24 |
25 | gcloud container clusters delete -q --zone us-central1-b np
26 |
27 |
28 | [gke]: https://cloud.google.com/kubernetes-engine/
29 |
--------------------------------------------------------------------------------
/network-policy/more-info/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # How to Contribute
2 |
3 | We'd love to accept your patches and contributions to this project. There are
4 | just a few small guidelines you need to follow.
5 |
6 | ## Contributor License Agreement
7 |
8 | Contributions to this project must be accompanied by a Contributor License
9 | Agreement. You (or your employer) retain the copyright to your contribution,
10 | this simply gives us permission to use and redistribute your contributions as
11 | part of the project. Head over to to see
12 | your current agreements on file or to sign a new one.
13 |
14 | You generally only need to submit a CLA once, so if you've already submitted one
15 | (even if it was for a different project), you probably don't need to do it
16 | again.
17 |
18 | ## Code reviews
19 |
20 | All submissions, including submissions by project members, require review. We
21 | use GitHub pull requests for this purpose. Consult
22 | [GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
23 | information on using pull requests.
24 |
--------------------------------------------------------------------------------
/network-policy/more-info/img/1.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeekshithSN/kubernetes/aad69738753ee476b74dcef11179871644e69631/network-policy/more-info/img/1.gif
--------------------------------------------------------------------------------
/network-policy/more-info/img/2.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeekshithSN/kubernetes/aad69738753ee476b74dcef11179871644e69631/network-policy/more-info/img/2.gif
--------------------------------------------------------------------------------
/network-policy/more-info/img/3.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeekshithSN/kubernetes/aad69738753ee476b74dcef11179871644e69631/network-policy/more-info/img/3.gif
--------------------------------------------------------------------------------
/network-policy/more-info/img/4.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeekshithSN/kubernetes/aad69738753ee476b74dcef11179871644e69631/network-policy/more-info/img/4.gif
--------------------------------------------------------------------------------
/network-policy/more-info/img/5.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeekshithSN/kubernetes/aad69738753ee476b74dcef11179871644e69631/network-policy/more-info/img/5.gif
--------------------------------------------------------------------------------
/network-policy/more-info/img/6.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeekshithSN/kubernetes/aad69738753ee476b74dcef11179871644e69631/network-policy/more-info/img/6.gif
--------------------------------------------------------------------------------
/network-policy/more-info/img/8.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeekshithSN/kubernetes/aad69738753ee476b74dcef11179871644e69631/network-policy/more-info/img/8.gif
--------------------------------------------------------------------------------
/network-policy/more-info/img/9.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeekshithSN/kubernetes/aad69738753ee476b74dcef11179871644e69631/network-policy/more-info/img/9.gif
--------------------------------------------------------------------------------
/network-policy/web-allow-all-ns-monitoring.yaml:
--------------------------------------------------------------------------------
1 | kind: NetworkPolicy
2 | apiVersion: networking.k8s.io/v1
3 | metadata:
4 | name: web-allow-all-ns-monitoring
5 | namespace: default
6 | spec:
7 | podSelector:
8 | matchLabels:
9 | app: web
10 | ingress:
11 | - from:
12 | - namespaceSelector: # chooses all pods in namespaces labelled with team=operations
13 | matchLabels:
14 | team: operations
15 | podSelector: # chooses pods with type=monitoring
16 | matchLabels:
17 | type: monitoring
18 |
--------------------------------------------------------------------------------
/node-selector/node-affinity.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: nginx-deployment
5 | spec:
6 | selector:
7 | matchLabels:
8 | app: nginx
9 | replicas: 3
10 | template:
11 | metadata:
12 | labels:
13 | app: nginx
14 | spec:
15 | affinity:
16 | nodeAffinity:
17 | requiredDuringSchedulingIgnoredDuringExecution:
18 | # preferredDuringSchedulingIgnoredDuringExecution:
19 | # preferredDuringSchedulingRequiredDuringExecution:
20 | - labelSelector:
21 | matchExpressions:
22 | - key: app
23 | operator: In
24 | values:
25 | - blue
26 | containers:
27 | - name: nginx
28 | image: nginx:1.7.9
29 | ports:
30 | - containerPort: 80
31 |
--------------------------------------------------------------------------------
/node-selector/pod.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: my-python
5 | spec:
6 | containers:
7 | - name: my-python
8 | image: deekshithsn/python-web-app
9 | nodeSelector:
10 | size: medium
11 |
12 |
13 |
14 |
15 | # kubectl label nodes node_name size=medium
16 |
17 |
--------------------------------------------------------------------------------
/operators/dotnet-application/dockerfile:
--------------------------------------------------------------------------------
1 | FROM mcr.microsoft.com/dotnet/core/sdk:2.2-stretch as debug
2 |
3 | #install debugger for NET Core
4 | RUN apt-get update
5 | RUN apt-get install -y unzip
6 | RUN curl -sSL https://aka.ms/getvsdbgsh | /bin/sh /dev/stdin -v latest -l ~/vsdbg
7 |
8 | RUN mkdir /work/
9 | WORKDIR /work/
10 |
11 | COPY ./src/work.csproj /work/work.csproj
12 | RUN dotnet restore
13 |
14 | COPY ./src/ /work/
15 | RUN mkdir /out/
16 | RUN dotnet publish --no-restore --output /out/ --configuration Release
17 |
18 | ENTRYPOINT ["dotnet", "run"]
19 |
20 | ###########START NEW IMAGE###########################################
21 |
22 | FROM mcr.microsoft.com/dotnet/core/aspnet:2.2-stretch-slim as prod
23 |
24 | RUN mkdir /app/
25 | WORKDIR /app/
26 | COPY --from=debug /out/ /app/
27 | RUN chmod +x /app/
28 | CMD dotnet work.dll
29 |
--------------------------------------------------------------------------------
/operators/dotnet-application/src/Pages/Error.cshtml:
--------------------------------------------------------------------------------
1 | @page
2 | @model ErrorModel
3 | @{
4 | ViewData["Title"] = "Error";
5 | }
6 |
7 |
Error.
8 | An error occurred while processing your request.
9 |
10 | @if (Model.ShowRequestId)
11 | {
12 |
13 | Request ID: @Model.RequestId
14 |
15 | }
16 |
17 | Development Mode
18 |
19 | Swapping to the Development environment displays detailed information about the error that occurred.
20 |
21 |
22 | The Development environment shouldn't be enabled for deployed applications.
23 | It can result in displaying sensitive information from exceptions to end users.
24 | For local debugging, enable the Development environment by setting the ASPNETCORE_ENVIRONMENT environment variable to Development
25 | and restarting the app.
26 |
27 |
--------------------------------------------------------------------------------
/operators/dotnet-application/src/Pages/Error.cshtml.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Diagnostics;
4 | using System.Linq;
5 | using System.Threading.Tasks;
6 | using Microsoft.AspNetCore.Mvc;
7 | using Microsoft.AspNetCore.Mvc.RazorPages;
8 |
9 | namespace work.Pages
10 | {
11 | [ResponseCache(Duration = 0, Location = ResponseCacheLocation.None, NoStore = true)]
12 | public class ErrorModel : PageModel
13 | {
14 | public string RequestId { get; set; }
15 |
16 | public bool ShowRequestId => !string.IsNullOrEmpty(RequestId);
17 |
18 | public void OnGet()
19 | {
20 | RequestId = Activity.Current?.Id ?? HttpContext.TraceIdentifier;
21 | }
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/operators/dotnet-application/src/Pages/Index.cshtml:
--------------------------------------------------------------------------------
1 | @page
2 | @model IndexModel
3 | @{
4 | ViewData["Title"] = "Home page";
5 | }
6 |
7 |
11 |
--------------------------------------------------------------------------------
/operators/dotnet-application/src/Pages/Index.cshtml.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Diagnostics;
3 | using System.Collections.Generic;
4 | using System.Linq;
5 | using System.Threading.Tasks;
6 | using Microsoft.AspNetCore.Mvc;
7 | using Microsoft.AspNetCore.Mvc.RazorPages;
8 | using Prometheus;
9 | namespace work.Pages
10 | {
11 | public class IndexModel : PageModel
12 | {
13 | private static readonly Counter ProcessedJobCount = Metrics
14 | .CreateCounter("dotnet_request_operations_total", "The total number of processed requests");
15 | public void OnGet()
16 | {
17 | var sw = Stopwatch.StartNew();
18 |
19 | sw.Stop();
20 | ProcessedJobCount.Inc();
21 | var histogram =
22 | Metrics.CreateHistogram(
23 | "dotnet_request_duration_seconds",
24 | "Histogram for the duration in seconds.",
25 | new HistogramConfiguration
26 | {
27 | Buckets = Histogram.LinearBuckets(start: 1, width: 1, count: 5)
28 | });
29 |
30 | histogram.Observe(sw.Elapsed.TotalSeconds);
31 |
32 | }
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/operators/dotnet-application/src/Pages/Privacy.cshtml:
--------------------------------------------------------------------------------
1 | @page
2 | @model PrivacyModel
3 | @{
4 | ViewData["Title"] = "Privacy Policy";
5 | }
6 | @ViewData["Title"]
7 |
8 | Use this page to detail your site's privacy policy.
9 |
--------------------------------------------------------------------------------
/operators/dotnet-application/src/Pages/Privacy.cshtml.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.Linq;
4 | using System.Threading.Tasks;
5 | using Microsoft.AspNetCore.Mvc;
6 | using Microsoft.AspNetCore.Mvc.RazorPages;
7 |
8 | namespace work.Pages
9 | {
10 | public class PrivacyModel : PageModel
11 | {
12 | public void OnGet()
13 | {
14 | }
15 | }
16 | }
--------------------------------------------------------------------------------
/operators/dotnet-application/src/Pages/Shared/_CookieConsentPartial.cshtml:
--------------------------------------------------------------------------------
1 | @using Microsoft.AspNetCore.Http.Features
2 |
3 | @{
4 | var consentFeature = Context.Features.Get();
5 | var showBanner = !consentFeature?.CanTrack ?? false;
6 | var cookieString = consentFeature?.CreateConsentCookie();
7 | }
8 |
9 | @if (showBanner)
10 | {
11 |
12 | Use this space to summarize your privacy and cookie use policy.
Learn More .
13 |
14 | Accept
15 |
16 |
17 |
25 | }
26 |
--------------------------------------------------------------------------------
/operators/dotnet-application/src/Pages/Shared/_ValidationScriptsPartial.cshtml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
12 |
18 |
19 |
--------------------------------------------------------------------------------
/operators/dotnet-application/src/Pages/_ViewImports.cshtml:
--------------------------------------------------------------------------------
1 | @using work
2 | @namespace work.Pages
3 | @addTagHelper *, Microsoft.AspNetCore.Mvc.TagHelpers
4 |
--------------------------------------------------------------------------------
/operators/dotnet-application/src/Pages/_ViewStart.cshtml:
--------------------------------------------------------------------------------
1 | @{
2 | Layout = "_Layout";
3 | }
4 |
--------------------------------------------------------------------------------
/operators/dotnet-application/src/Program.cs:
--------------------------------------------------------------------------------
1 | using System;
2 | using System.Collections.Generic;
3 | using System.IO;
4 | using System.Linq;
5 | using System.Threading.Tasks;
6 | using Microsoft.AspNetCore;
7 | using Microsoft.AspNetCore.Hosting;
8 | using Microsoft.Extensions.Configuration;
9 | using Microsoft.Extensions.Logging;
10 |
11 | namespace work
12 | {
13 | public class Program
14 | {
15 | public static void Main(string[] args)
16 | {
17 | CreateWebHostBuilder(args).Build().Run();
18 | }
19 |
20 | public static IWebHostBuilder CreateWebHostBuilder(string[] args) =>
21 | WebHost.CreateDefaultBuilder(args)
22 | .UseUrls("http://*:5000")
23 | .UseStartup();
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/operators/dotnet-application/src/Properties/launchSettings.json:
--------------------------------------------------------------------------------
1 | {
2 | "iisSettings": {
3 | "windowsAuthentication": false,
4 | "anonymousAuthentication": true,
5 | "iisExpress": {
6 | "applicationUrl": "http://localhost:63846",
7 | "sslPort": 44303
8 | }
9 | },
10 | "profiles": {
11 | "IIS Express": {
12 | "commandName": "IISExpress",
13 | "launchBrowser": true,
14 | "environmentVariables": {
15 | "ASPNETCORE_ENVIRONMENT": "Development"
16 | }
17 | },
18 | "work": {
19 | "commandName": "Project",
20 | "launchBrowser": true,
21 | "applicationUrl": "https://localhost:5001;http://localhost:5000",
22 | "environmentVariables": {
23 | "ASPNETCORE_ENVIRONMENT": "Development"
24 | }
25 | }
26 | }
27 | }
--------------------------------------------------------------------------------
/operators/dotnet-application/src/appsettings.Development.json:
--------------------------------------------------------------------------------
1 | {
2 | "Logging": {
3 | "LogLevel": {
4 | "Default": "Debug",
5 | "System": "Information",
6 | "Microsoft": "Information"
7 | }
8 | }
9 | }
10 |
--------------------------------------------------------------------------------
/operators/dotnet-application/src/appsettings.json:
--------------------------------------------------------------------------------
1 | {
2 | "Logging": {
3 | "LogLevel": {
4 | "Default": "Warning"
5 | }
6 | },
7 | "AllowedHosts": "*"
8 | }
9 |
--------------------------------------------------------------------------------
/operators/dotnet-application/src/work.csproj:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | netcoreapp2.2
5 | InProcess
6 | linux-x64
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
--------------------------------------------------------------------------------
/operators/dotnet-application/src/wwwroot/css/site.css:
--------------------------------------------------------------------------------
1 | /* Please see documentation at https://docs.microsoft.com/aspnet/core/client-side/bundling-and-minification
2 | for details on configuring this project to bundle and minify static web assets. */
3 |
4 | a.navbar-brand {
5 | white-space: normal;
6 | text-align: center;
7 | word-break: break-all;
8 | }
9 |
10 | /* Sticky footer styles
11 | -------------------------------------------------- */
12 | html {
13 | font-size: 14px;
14 | }
15 | @media (min-width: 768px) {
16 | html {
17 | font-size: 16px;
18 | }
19 | }
20 |
21 | .border-top {
22 | border-top: 1px solid #e5e5e5;
23 | }
24 | .border-bottom {
25 | border-bottom: 1px solid #e5e5e5;
26 | }
27 |
28 | .box-shadow {
29 | box-shadow: 0 .25rem .75rem rgba(0, 0, 0, .05);
30 | }
31 |
32 | button.accept-policy {
33 | font-size: 1rem;
34 | line-height: inherit;
35 | }
36 |
37 | /* Sticky footer styles
38 | -------------------------------------------------- */
39 | html {
40 | position: relative;
41 | min-height: 100%;
42 | }
43 |
44 | body {
45 | /* Margin bottom by footer height */
46 | margin-bottom: 60px;
47 | }
48 | .footer {
49 | position: absolute;
50 | bottom: 0;
51 | width: 100%;
52 | white-space: nowrap;
53 | /* Set the fixed height of the footer here */
54 | height: 60px;
55 | line-height: 60px; /* Vertically center the text there */
56 | }
57 |
--------------------------------------------------------------------------------
/operators/dotnet-application/src/wwwroot/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeekshithSN/kubernetes/aad69738753ee476b74dcef11179871644e69631/operators/dotnet-application/src/wwwroot/favicon.ico
--------------------------------------------------------------------------------
/operators/dotnet-application/src/wwwroot/js/site.js:
--------------------------------------------------------------------------------
1 | // Please see documentation at https://docs.microsoft.com/aspnet/core/client-side/bundling-and-minification
2 | // for details on configuring this project to bundle and minify static web assets.
3 |
4 | // Write your Javascript code.
5 |
--------------------------------------------------------------------------------
/operators/dotnet-application/src/wwwroot/lib/bootstrap/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) 2011-2018 Twitter, Inc.
4 | Copyright (c) 2011-2018 The Bootstrap Authors
5 |
6 | Permission is hereby granted, free of charge, to any person obtaining a copy
7 | of this software and associated documentation files (the "Software"), to deal
8 | in the Software without restriction, including without limitation the rights
9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 | copies of the Software, and to permit persons to whom the Software is
11 | furnished to do so, subject to the following conditions:
12 |
13 | The above copyright notice and this permission notice shall be included in
14 | all copies or substantial portions of the Software.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 | THE SOFTWARE.
23 |
--------------------------------------------------------------------------------
/operators/dotnet-application/src/wwwroot/lib/jquery-validation-unobtrusive/LICENSE.txt:
--------------------------------------------------------------------------------
1 | Copyright (c) .NET Foundation. All rights reserved.
2 |
3 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use
4 | these files except in compliance with the License. You may obtain a copy of the
5 | License at
6 |
7 | http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | Unless required by applicable law or agreed to in writing, software distributed
10 | under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
11 | CONDITIONS OF ANY KIND, either express or implied. See the License for the
12 | specific language governing permissions and limitations under the License.
13 |
--------------------------------------------------------------------------------
/operators/dotnet-application/src/wwwroot/lib/jquery-validation/LICENSE.md:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 | =====================
3 |
4 | Copyright Jörn Zaefferer
5 |
6 | Permission is hereby granted, free of charge, to any person obtaining a copy
7 | of this software and associated documentation files (the "Software"), to deal
8 | in the Software without restriction, including without limitation the rights
9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 | copies of the Software, and to permit persons to whom the Software is
11 | furnished to do so, subject to the following conditions:
12 |
13 | The above copyright notice and this permission notice shall be included in
14 | all copies or substantial portions of the Software.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 | THE SOFTWARE.
23 |
--------------------------------------------------------------------------------
/operators/go-application/dockerfile:
--------------------------------------------------------------------------------
1 | FROM golang:1.11.13-alpine3.10 as builder
2 |
3 | # installing git
4 | RUN apk update && apk upgrade && \
5 | apk add --no-cache bash git openssh
6 |
7 | # setting working directory
8 | WORKDIR /go/src/app
9 |
10 | # installing dependencies
11 | RUN go get github.com/leonelquinteros/gorand
12 | RUN go get github.com/sirupsen/logrus
13 | RUN go get github.com/prometheus/client_golang/prometheus
14 | RUN go get github.com/prometheus/client_golang/prometheus/promauto
15 | RUN go get github.com/prometheus/client_golang/prometheus/promhttp
16 |
17 | COPY / /go/src/app/
18 | RUN go build -o myapp
19 |
20 | FROM alpine:3.10
21 |
22 | RUN apk update && apk upgrade && \
23 | apk add --no-cache openssh curl ca-certificates
24 |
25 | WORKDIR /go/src/app
26 | COPY --from=builder /go/src/app/myapp /go/src/app/myapp
27 |
28 | EXPOSE 5000
29 |
30 | CMD ["./myapp"]
--------------------------------------------------------------------------------
/operators/go-application/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "time"
6 | "net/http"
7 | "github.com/prometheus/client_golang/prometheus"
8 | "github.com/prometheus/client_golang/prometheus/promauto"
9 | "github.com/prometheus/client_golang/prometheus/promhttp"
10 | )
11 |
12 |
13 | var requestsProcessed = promauto.NewCounter(prometheus.CounterOpts{
14 | Name: "go_request_operations_total",
15 | Help: "The total number of processed requests",
16 | })
17 |
18 |
19 | var requestDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{
20 | Name: "go_request_duration_seconds",
21 | Help: "Histogram for the duration in seconds.",
22 | Buckets: []float64{1, 2, 5, 6, 10},
23 | } ,
24 | []string{"endpoint"},
25 | )
26 |
27 | func main() {
28 |
29 | fmt.Println("starting...")
30 |
31 | prometheus.MustRegister(requestDuration)
32 |
33 | http.HandleFunc("/", func (w http.ResponseWriter, r *http.Request) {
34 | //start a timer
35 | start := time.Now()
36 |
37 | time.Sleep(600 * time.Millisecond) //Sleep simulate work
38 | fmt.Fprint(w, "Welcome to my application!")
39 |
40 | //measure the duration and log to prometheus
41 | httpDuration := time.Since(start)
42 | requestDuration.WithLabelValues("GET /").Observe(httpDuration.Seconds())
43 |
44 | //increment a counter for number of requests processed
45 | requestsProcessed.Inc()
46 | })
47 |
48 | http.Handle("/metrics", promhttp.Handler())
49 | http.ListenAndServe(":5000", nil)
50 |
51 | }
52 |
--------------------------------------------------------------------------------
/operators/nodejs-application/dockerfile:
--------------------------------------------------------------------------------
1 | FROM node:12.4.0-alpine as dev
2 |
3 | RUN mkdir /work/
4 | WORKDIR /work/
5 |
6 | COPY ./src/package.json /work/package.json
7 | RUN npm install
8 |
9 | COPY ./src/ /work/
10 |
11 | CMD node .
12 |
--------------------------------------------------------------------------------
/operators/nodejs-application/src/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "docker_web_app",
3 | "version": "1.0.0",
4 | "description": "Node.js on Docker",
5 | "author": "First Last ",
6 | "main": "server.js",
7 | "scripts": {
8 | "start": "node server.js"
9 | },
10 | "dependencies": {
11 | "express": "^4.16.1",
12 | "prom-client" : "11.5.3"
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/operators/nodejs-application/src/server.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | const express = require('express');
4 |
5 | // Constants
6 | const PORT = 5000;
7 | const HOST = '0.0.0.0';
8 |
9 | // App
10 | const client = require('prom-client');
11 | const collectDefaultMetrics = client.collectDefaultMetrics;
12 | // Probe every 5th second.
13 | collectDefaultMetrics({ timeout: 5000 });
14 |
15 | const counter = new client.Counter({
16 | name: 'node_request_operations_total',
17 | help: 'The total number of processed requests'
18 | });
19 |
20 | const histogram = new client.Histogram({
21 | name: 'node_request_duration_seconds',
22 | help: 'Histogram for the duration in seconds.',
23 | buckets: [1, 2, 5, 6, 10]
24 | });
25 |
26 | const app = express();
27 | app.get('/', (req, res) => {
28 |
29 | //Simulate a sleep
30 | var start = new Date()
31 | var simulateTime = 1000
32 |
33 | setTimeout(function(argument) {
34 | // execution time simulated with setTimeout function
35 | var end = new Date() - start
36 | histogram.observe(end / 1000); //convert to seconds
37 | }, simulateTime)
38 |
39 | counter.inc();
40 |
41 | res.send('Hello world\n');
42 | });
43 |
44 |
45 | // Metrics endpoint
46 | app.get('/metrics', (req, res) => {
47 | res.set('Content-Type', client.register.contentType)
48 | res.end(client.register.metrics())
49 | })
50 |
51 | app.listen(PORT, HOST);
52 | console.log(`Running on http://${HOST}:${PORT}`);
--------------------------------------------------------------------------------
/operators/prometheous/1.14.8/alertmanager/alertmanager.secret.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data: {}
3 | kind: Secret
4 | metadata:
5 | name: alertmanager-main
6 | stringData:
7 | alertmanager.yaml: |-
8 | "global":
9 | "resolve_timeout": "5m"
10 | "inhibit_rules":
11 | - "equal":
12 | - "alertname"
13 | "source_match":
14 | "severity": "critical"
15 | "target_match_re":
16 | "severity": "warning|info"
17 | - "equal":
18 | - "alertname"
19 | "source_match":
20 | "severity": "warning"
21 | "target_match_re":
22 | "severity": "info"
23 | "receivers":
24 | - "name": "Default"
25 | - "name": "Watchdog"
26 | - "name": "Critical"
27 | "route":
28 | "group_by":
29 | - "namespace"
30 | "group_interval": "5m"
31 | "group_wait": "30s"
32 | "receiver": "Default"
33 | "repeat_interval": "12h"
34 | "routes":
35 | - "match":
36 | "alertname": "Watchdog"
37 | "receiver": "Watchdog"
38 | - "match":
39 | "severity": "critical"
40 | "receiver": "Critical"
41 | type: Opaque
--------------------------------------------------------------------------------
/operators/prometheous/1.14.8/alertmanager/alertmanager.sericeaccount.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: alertmanager-main
--------------------------------------------------------------------------------
/operators/prometheous/1.14.8/alertmanager/alertmanager.service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | alertmanager: main
6 | name: alertmanager-main
7 | spec:
8 | ports:
9 | - name: web
10 | port: 9093
11 | targetPort: web
12 | selector:
13 | alertmanager: main
14 | app: alertmanager
15 | sessionAffinity: ClientIP
--------------------------------------------------------------------------------
/operators/prometheous/1.14.8/alertmanager/alertmanager.servicemonitor.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | labels:
5 | k8s-app: alertmanager
6 | name: alertmanager
7 | spec:
8 | endpoints:
9 | - interval: 30s
10 | port: web
11 | selector:
12 | matchLabels:
13 | alertmanager: main
--------------------------------------------------------------------------------
/operators/prometheous/1.14.8/alertmanager/alertmanager.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: Alertmanager
3 | metadata:
4 | labels:
5 | alertmanager: main
6 | name: main
7 | spec:
8 | baseImage: quay.io/prometheus/alertmanager
9 | nodeSelector:
10 | kubernetes.io/os: linux
11 | replicas: 3
12 | securityContext:
13 | fsGroup: 2000
14 | runAsNonRoot: true
15 | runAsUser: 1000
16 | serviceAccountName: alertmanager-main
17 | version: v0.20.0
18 |
--------------------------------------------------------------------------------
/operators/prometheous/1.14.8/grafana/grafana-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app: grafana
6 | name: grafana
7 | spec:
8 | ports:
9 | - name: http
10 | port: 3000
11 | targetPort: http
12 | selector:
13 | app: grafana
--------------------------------------------------------------------------------
/operators/prometheous/1.14.8/grafana/grafana.serviceaccount.yaml:
--------------------------------------------------------------------------------
1 |
2 | apiVersion: v1
3 | kind: ServiceAccount
4 | metadata:
5 | name: grafana
--------------------------------------------------------------------------------
/operators/prometheous/1.14.8/kube-state-metrics/cluster-role-binding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | labels:
5 | app.kubernetes.io/name: kube-state-metrics
6 | app.kubernetes.io/version: v1.8.0
7 | name: kube-state-metrics
8 | roleRef:
9 | apiGroup: rbac.authorization.k8s.io
10 | kind: ClusterRole
11 | name: kube-state-metrics
12 | subjects:
13 | - kind: ServiceAccount
14 | name: kube-state-metrics
15 | namespace: monitoring
--------------------------------------------------------------------------------
/operators/prometheous/1.14.8/kube-state-metrics/deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | labels:
5 | k8s-app: kube-state-metrics
6 | app.kubernetes.io/name: kube-state-metrics
7 | app.kubernetes.io/version: v1.8.0
8 | name: kube-state-metrics
9 | spec:
10 | replicas: 1
11 | selector:
12 | matchLabels:
13 | app.kubernetes.io/name: kube-state-metrics
14 | template:
15 | metadata:
16 | labels:
17 | app.kubernetes.io/name: kube-state-metrics
18 | app.kubernetes.io/version: v1.8.0
19 | spec:
20 | containers:
21 | - image: quay.io/coreos/kube-state-metrics:v1.8.0
22 | livenessProbe:
23 | httpGet:
24 | path: /healthz
25 | port: 8080
26 | initialDelaySeconds: 5
27 | timeoutSeconds: 5
28 | name: kube-state-metrics
29 | ports:
30 | - containerPort: 8080
31 | name: http-metrics
32 | - containerPort: 8081
33 | name: telemetry
34 | readinessProbe:
35 | httpGet:
36 | path: /
37 | port: 8081
38 | initialDelaySeconds: 5
39 | timeoutSeconds: 5
40 | nodeSelector:
41 | kubernetes.io/os: linux
42 | serviceAccountName: kube-state-metrics
--------------------------------------------------------------------------------
/operators/prometheous/1.14.8/kube-state-metrics/service-account.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | labels:
5 | app.kubernetes.io/name: kube-state-metrics
6 | app.kubernetes.io/version: v1.8.0
7 | name: kube-state-metrics
--------------------------------------------------------------------------------
/operators/prometheous/1.14.8/kube-state-metrics/service-monitor.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | name: kube-state-metrics
5 | labels:
6 | prometheus: cluster-monitoring
7 | k8s-app: kube-state-metrics
8 | spec:
9 | targetLabels:
10 | - cluster
11 | jobLabel: kube-state-metrics
12 | selector:
13 | matchLabels:
14 | k8s-app: kube-state-metrics
15 | namespaceSelector:
16 | matchNames:
17 | - monitoring
18 | endpoints:
19 | - port: http-metrics
20 | honorLabels: true
21 | scheme: http
22 | interval: 60s
--------------------------------------------------------------------------------
/operators/prometheous/1.14.8/kube-state-metrics/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app.kubernetes.io/name: kube-state-metrics
6 | app.kubernetes.io/version: v1.8.0
7 | k8s-app: kube-state-metrics
8 | cluster: docker-for-desktop
9 | name: kube-state-metrics
10 | spec:
11 | clusterIP: None
12 | ports:
13 | - name: http-metrics
14 | port: 8080
15 | targetPort: http-metrics
16 | - name: telemetry
17 | port: 8081
18 | targetPort: telemetry
19 | selector:
20 | app.kubernetes.io/name: kube-state-metrics
--------------------------------------------------------------------------------
/operators/prometheous/1.14.8/node-exporter/service-monitor.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | name: node-exporter
5 | labels:
6 | prometheus: cluster-monitoring
7 | k8s-app: node-exporter
8 | spec:
9 | jobLabel: node-exporter
10 | selector:
11 | matchLabels:
12 | k8s-app: node-exporter
13 | namespaceSelector:
14 | matchNames:
15 | - monitoring
16 | endpoints:
17 | - port: http-metrics
18 | interval: 60s
--------------------------------------------------------------------------------
/operators/prometheous/1.14.8/prometheus-cluster-monitoring/apiserver.servicemonitor.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | name: kube-apiserver
5 | labels:
6 | prometheus: cluster-monitoring
7 | k8s-app: apiserver
8 | spec:
9 | jobLabel: component
10 | selector:
11 | matchLabels:
12 | component: apiserver
13 | provider: kubernetes
14 | namespaceSelector:
15 | matchNames:
16 | - default
17 | endpoints:
18 | - port: https
19 | interval: 60s
20 | scheme: https
21 | tlsConfig:
22 | caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
23 | serverName: kubernetes
24 | bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
--------------------------------------------------------------------------------
/operators/prometheous/1.14.8/prometheus-cluster-monitoring/cluster-role-binding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | name: prometheus
5 | roleRef:
6 | apiGroup: rbac.authorization.k8s.io
7 | kind: ClusterRole
8 | name: prometheus
9 | subjects:
10 | - kind: ServiceAccount
11 | name: prometheus
12 | namespace: monitoring
13 |
--------------------------------------------------------------------------------
/operators/prometheous/1.14.8/prometheus-cluster-monitoring/cluster-role.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: prometheus
5 | rules:
6 | - apiGroups: [""]
7 | resources:
8 | - nodes
9 | - services
10 | - endpoints
11 | - pods
12 | verbs: ["get", "list", "watch"]
13 | - apiGroups: [""]
14 | resources:
15 | - configmaps
16 | verbs: ["get"]
17 | - nonResourceURLs: ["/metrics"]
18 | verbs: ["get"]
--------------------------------------------------------------------------------
/operators/prometheous/1.14.8/prometheus-cluster-monitoring/prometheus-alerts.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: PrometheusRule
3 | metadata:
4 | labels:
5 | prometheus: k8s
6 | role: alert-rules
7 | name: example-rule
8 | spec:
9 | groups:
10 | - name: example-rule
11 | rules:
12 | - alert: example-alert
13 | annotations:
14 | description: Memory on node {{ $labels.instance }} currently at {{ $value }}%
15 | is under pressure
16 | summary: Memory usage is under pressure, system may become unstable.
17 | expr: |
18 | 100 - ((node_memory_MemAvailable_bytes{job="node-exporter"} * 100) / node_memory_MemTotal_bytes{job="node-exporter"}) > 70
19 | for: 2m
20 | labels:
21 | severity: warning
22 |
--------------------------------------------------------------------------------
/operators/prometheous/1.14.8/prometheus-cluster-monitoring/prometheus.service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app: prometheus
6 | prometheus: prometheus
7 | name: prometheus-service
8 | spec:
9 | ports:
10 | - protocol: TCP
11 | port: 9090
12 | targetPort: 9090
13 | selector:
14 | app: prometheus
15 | prometheus: prometheus
--------------------------------------------------------------------------------
/operators/prometheous/1.14.8/prometheus-cluster-monitoring/prometheus.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: Prometheus
3 | metadata:
4 | name: prometheus
5 | labels:
6 | prometheus: k8s
7 | spec:
8 | externalLabels:
9 | cluster: docker-desktop
10 | replicas: 1
11 | version: v2.13.1
12 | serviceAccountName: prometheus
13 | serviceMonitorSelector:
14 | matchExpressions:
15 | - key: k8s-app
16 | operator: In
17 | values:
18 | - node-exporter
19 | - kube-state-metrics
20 | - apiserver
21 | - kubelet
22 | ruleSelector:
23 | matchLabels:
24 | role: alert-rules
25 | prometheus: k8s
26 | resources:
27 | requests:
28 | memory: 400Mi
29 | alerting:
30 | alertmanagers:
31 | - namespace: monitoring
32 | name: alertmanager-main
33 | port: web
34 |
--------------------------------------------------------------------------------
/operators/prometheous/1.14.8/prometheus-cluster-monitoring/service-account.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: prometheus
5 |
--------------------------------------------------------------------------------
/operators/prometheous/1.14.8/prometheus-operator/cluster-role-binding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: controller
6 | app.kubernetes.io/name: prometheus-operator
7 | app.kubernetes.io/version: v0.31.1
8 | name: prometheus-operator
9 | roleRef:
10 | apiGroup: rbac.authorization.k8s.io
11 | kind: ClusterRole
12 | name: prometheus-operator
13 | subjects:
14 | - kind: ServiceAccount
15 | name: prometheus-operator
16 | namespace: monitoring
17 |
--------------------------------------------------------------------------------
/operators/prometheous/1.14.8/prometheus-operator/cluster-role.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: controller
6 | app.kubernetes.io/name: prometheus-operator
7 | app.kubernetes.io/version: v0.31.1
8 | name: prometheus-operator
9 | rules:
10 | - apiGroups:
11 | - apiextensions.k8s.io
12 | resources:
13 | - customresourcedefinitions
14 | verbs:
15 | - '*'
16 | - apiGroups:
17 | - monitoring.coreos.com
18 | resources:
19 | - alertmanagers
20 | - prometheuses
21 | - prometheuses/finalizers
22 | - alertmanagers/finalizers
23 | - servicemonitors
24 | - podmonitors
25 | - prometheusrules
26 | verbs:
27 | - '*'
28 | - apiGroups:
29 | - apps
30 | resources:
31 | - statefulsets
32 | verbs:
33 | - '*'
34 | - apiGroups:
35 | - ""
36 | resources:
37 | - configmaps
38 | - secrets
39 | verbs:
40 | - '*'
41 | - apiGroups:
42 | - ""
43 | resources:
44 | - pods
45 | verbs:
46 | - list
47 | - delete
48 | - apiGroups:
49 | - ""
50 | resources:
51 | - services
52 | - services/finalizers
53 | - endpoints
54 | verbs:
55 | - get
56 | - create
57 | - update
58 | - delete
59 | - apiGroups:
60 | - ""
61 | resources:
62 | - nodes
63 | verbs:
64 | - list
65 | - watch
66 | - apiGroups:
67 | - ""
68 | resources:
69 | - namespaces
70 | verbs:
71 | - get
72 | - list
73 | - watch
74 |
--------------------------------------------------------------------------------
/operators/prometheous/1.14.8/prometheus-operator/service-account.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: controller
6 | app.kubernetes.io/name: prometheus-operator
7 | app.kubernetes.io/version: v0.31.1
8 | name: prometheus-operator
9 |
--------------------------------------------------------------------------------
/operators/prometheous/1.14.8/prometheus-operator/service-monitor.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: controller
6 | app.kubernetes.io/name: prometheus-operator
7 | app.kubernetes.io/version: v0.31.1
8 | name: prometheus-operator
9 | spec:
10 | endpoints:
11 | - honorLabels: true
12 | port: http
13 | selector:
14 | matchLabels:
15 | app.kubernetes.io/component: controller
16 | app.kubernetes.io/name: prometheus-operator
17 | app.kubernetes.io/version: v0.31.1
18 |
--------------------------------------------------------------------------------
/operators/prometheous/1.14.8/prometheus-operator/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: controller
6 | app.kubernetes.io/name: prometheus-operator
7 | app.kubernetes.io/version: v0.31.1
8 | name: prometheus-operator
9 | spec:
10 | clusterIP: None
11 | ports:
12 | - name: http
13 | port: 8080
14 | targetPort: http
15 | selector:
16 | app.kubernetes.io/component: controller
17 | app.kubernetes.io/name: prometheus-operator
18 |
--------------------------------------------------------------------------------
/operators/prometheous/1.14.8/prometheus-standalone/apps.service-monitor.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | name: apps
5 | labels:
6 | prometheus: prometheus-standalone
7 | k8s-app: apps
8 | spec:
9 | jobLabel: apps
10 | selector:
11 | matchLabels:
12 | app: example-app
13 | namespaceSelector:
14 | matchNames:
15 | - apps
16 | endpoints:
17 | - port: http
18 | interval: 30s
--------------------------------------------------------------------------------
/operators/prometheous/1.14.8/prometheus-standalone/cluster-role-binding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | name: prometheus-standalone
5 | roleRef:
6 | apiGroup: rbac.authorization.k8s.io
7 | kind: ClusterRole
8 | name: prometheus-standalone
9 | subjects:
10 | - kind: ServiceAccount
11 | name: prometheus-standalone
12 | namespace: apps
13 |
--------------------------------------------------------------------------------
/operators/prometheous/1.14.8/prometheus-standalone/cluster-role.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: prometheus-standalone
5 | rules:
6 | - apiGroups: [""]
7 | resources:
8 | - services
9 | - endpoints
10 | - pods
11 | verbs: ["get", "list", "watch"]
12 | - apiGroups: [""]
13 | resources:
14 | - configmaps
15 | verbs: ["get"]
16 | - nonResourceURLs: ["/metrics"]
17 | verbs: ["get"]
--------------------------------------------------------------------------------
/operators/prometheous/1.14.8/prometheus-standalone/prometheus.service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | prometheus: prometheus-standalone
6 | name: prometheus-service
7 | spec:
8 | ports:
9 | - protocol: TCP
10 | port: 9090
11 | targetPort: 9090
12 | selector:
13 | prometheus: prometheus-standalone
--------------------------------------------------------------------------------
/operators/prometheous/1.14.8/prometheus-standalone/prometheus.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: Prometheus
3 | metadata:
4 | name: prometheus-standalone
5 | labels:
6 | prometheus: k8s
7 | spec:
8 | externalLabels:
9 | cluster: docker-desktop
10 | replicas: 1
11 | version: v2.13.1
12 | serviceAccountName: prometheus-standalone
13 | serviceMonitorSelector:
14 | matchExpressions:
15 | - key: k8s-app
16 | operator: In
17 | values:
18 | - apps
19 | ---
20 | apiVersion: v1
21 | kind: ServiceAccount
22 | metadata:
23 | name: prometheus-standalone
24 |
--------------------------------------------------------------------------------
/operators/prometheous/1.14.8/readme.md:
--------------------------------------------------------------------------------
1 | # Kubernetes 1.14.8 Monitoring Guide
2 |
3 | Create a cluster with [kind](https://kind.sigs.k8s.io/docs/user/quick-start/)
4 | ```
5 | kind create cluster --name prometheus --image kindest/node:v1.14.9
6 | ```
7 |
8 | ```
9 | kubectl create ns monitoring
10 |
11 | # Create the operator to instanciate all CRDs
12 | # Note: You will see error: no matches for kind "ServiceMonitor" ...
13 | # Wait till the operator is running, then rerun the command
14 | kubectl -n monitoring apply -f ./monitoring/prometheus/kubernetes/1.14.8/prometheus-operator/
15 |
16 | # Deploy monitoring components
17 | kubectl -n monitoring apply -f ./monitoring/prometheus/kubernetes/1.14.8/node-exporter/
18 | kubectl -n monitoring apply -f ./monitoring/prometheus/kubernetes/1.14.8/kube-state-metrics/
19 | kubectl -n monitoring apply -f ./monitoring/prometheus/kubernetes/1.14.8/alertmanager
20 |
21 | # Deploy prometheus instance and all the service monitors for targets
22 | kubectl -n monitoring apply -f ./monitoring/prometheus/kubernetes/1.14.8/prometheus-cluster-monitoring/
23 |
24 | # Dashboarding
25 | kubectl -n monitoring create -f ./monitoring/prometheus/kubernetes/1.14.8/grafana/
26 |
27 | ```
28 |
29 | # Sources
30 |
31 | The source code for monitoring Kubernetes 1.14 comes from the [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus/tree/v0.3.0/manifests) v0.3.0 tree
32 |
--------------------------------------------------------------------------------
/operators/prometheous/1.15-1.17/alertmanager/alertmanager-secret.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data:
3 | alertmanager.yaml: Imdsb2JhbCI6CiAgInJlc29sdmVfdGltZW91dCI6ICI1bSIKInJlY2VpdmVycyI6Ci0gIm5hbWUiOiAibnVsbCIKInJvdXRlIjoKICAiZ3JvdXBfYnkiOgogIC0gImpvYiIKICAiZ3JvdXBfaW50ZXJ2YWwiOiAiNW0iCiAgImdyb3VwX3dhaXQiOiAiMzBzIgogICJyZWNlaXZlciI6ICJudWxsIgogICJyZXBlYXRfaW50ZXJ2YWwiOiAiMTJoIgogICJyb3V0ZXMiOgogIC0gIm1hdGNoIjoKICAgICAgImFsZXJ0bmFtZSI6ICJXYXRjaGRvZyIKICAgICJyZWNlaXZlciI6ICJudWxsIg==
4 | kind: Secret
5 | metadata:
6 | name: alertmanager-main
7 | namespace: monitoring
8 | type: Opaque
--------------------------------------------------------------------------------
/operators/prometheous/1.15-1.17/alertmanager/alertmanager-sericeaccount.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: alertmanager-main
5 | namespace: monitoring
--------------------------------------------------------------------------------
/operators/prometheous/1.15-1.17/alertmanager/alertmanager-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | alertmanager: main
6 | name: alertmanager-main
7 | namespace: monitoring
8 | spec:
9 | ports:
10 | - name: web
11 | port: 9093
12 | targetPort: web
13 | selector:
14 | alertmanager: main
15 | app: alertmanager
16 | sessionAffinity: ClientIP
--------------------------------------------------------------------------------
/operators/prometheous/1.15-1.17/alertmanager/alertmanager-servicemonitor.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | labels:
5 | k8s-app: alertmanager
6 | name: alertmanager
7 | namespace: monitoring
8 | spec:
9 | endpoints:
10 | - interval: 30s
11 | port: web
12 | selector:
13 | matchLabels:
14 | alertmanager: main
--------------------------------------------------------------------------------
/operators/prometheous/1.15-1.17/alertmanager/alertmanager.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: Alertmanager
3 | metadata:
4 | labels:
5 | alertmanager: main
6 | name: main
7 | namespace: monitoring
8 | spec:
9 | baseImage: quay.io/prometheus/alertmanager
10 | nodeSelector:
11 | kubernetes.io/os: linux
12 | replicas: 3
13 | securityContext:
14 | fsGroup: 2000
15 | runAsNonRoot: true
16 | runAsUser: 1000
17 | serviceAccountName: alertmanager-main
18 | version: v0.18.0
--------------------------------------------------------------------------------
/operators/prometheous/1.15-1.17/grafana/dashboard-nodeexporter-custom.yaml:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeekshithSN/kubernetes/aad69738753ee476b74dcef11179871644e69631/operators/prometheous/1.15-1.17/grafana/dashboard-nodeexporter-custom.yaml
--------------------------------------------------------------------------------
/operators/prometheous/1.15-1.17/grafana/grafana-dashboardDatasources.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data:
3 | datasources.yaml: ewogICAgImFwaVZlcnNpb24iOiAxLAogICAgImRhdGFzb3VyY2VzIjogWwogICAgICAgIHsKICAgICAgICAgICAgImFjY2VzcyI6ICJwcm94eSIsCiAgICAgICAgICAgICJlZGl0YWJsZSI6IGZhbHNlLAogICAgICAgICAgICAibmFtZSI6ICJwcm9tZXRoZXVzIiwKICAgICAgICAgICAgIm9yZ0lkIjogMSwKICAgICAgICAgICAgInR5cGUiOiAicHJvbWV0aGV1cyIsCiAgICAgICAgICAgICJ1cmwiOiAiaHR0cDovL3Byb21ldGhldXMtazhzLm1vbml0b3Jpbmcuc3ZjOjkwOTAiLAogICAgICAgICAgICAidmVyc2lvbiI6IDEKICAgICAgICB9CiAgICBdCn0=
4 | kind: Secret
5 | metadata:
6 | name: grafana-datasources
7 | namespace: monitoring
8 | type: Opaque
--------------------------------------------------------------------------------
/operators/prometheous/1.15-1.17/grafana/grafana-dashboardSources.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data:
3 | dashboards.yaml: |-
4 | {
5 | "apiVersion": 1,
6 | "providers": [
7 | {
8 | "folder": "",
9 | "name": "0",
10 | "options": {
11 | "path": "/grafana-dashboard-definitions/0"
12 | },
13 | "orgId": 1,
14 | "type": "file"
15 | }
16 | ]
17 | }
18 | kind: ConfigMap
19 | metadata:
20 | name: grafana-dashboards
21 | namespace: monitoring
--------------------------------------------------------------------------------
/operators/prometheous/1.15-1.17/grafana/grafana-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app: grafana
6 | name: grafana
7 | namespace: monitoring
8 | spec:
9 | ports:
10 | - name: http
11 | port: 3000
12 | targetPort: http
13 | selector:
14 | app: grafana
--------------------------------------------------------------------------------
/operators/prometheous/1.15-1.17/grafana/grafana-serviceAccount.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: grafana
5 | namespace: monitoring
--------------------------------------------------------------------------------
/operators/prometheous/1.15-1.17/kube-state-metrics/cluster-role-binding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | name: kube-state-metrics
5 | roleRef:
6 | apiGroup: rbac.authorization.k8s.io
7 | kind: ClusterRole
8 | name: kube-state-metrics
9 | subjects:
10 | - kind: ServiceAccount
11 | name: kube-state-metrics
12 | namespace: monitoring
--------------------------------------------------------------------------------
/operators/prometheous/1.15-1.17/kube-state-metrics/role-binding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: RoleBinding
3 | metadata:
4 | name: kube-state-metrics
5 | namespace: monitoring
6 | roleRef:
7 | apiGroup: rbac.authorization.k8s.io
8 | kind: Role
9 | name: kube-state-metrics
10 | subjects:
11 | - kind: ServiceAccount
12 | name: kube-state-metrics
--------------------------------------------------------------------------------
/operators/prometheous/1.15-1.17/kube-state-metrics/role.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: Role
3 | metadata:
4 | name: kube-state-metrics
5 | namespace: monitoring
6 | rules:
7 | - apiGroups:
8 | - ""
9 | resources:
10 | - pods
11 | verbs:
12 | - get
13 | - apiGroups:
14 | - extensions
15 | resourceNames:
16 | - kube-state-metrics
17 | resources:
18 | - deployments
19 | verbs:
20 | - get
21 | - update
22 | - apiGroups:
23 | - apps
24 | resourceNames:
25 | - kube-state-metrics
26 | resources:
27 | - deployments
28 | verbs:
29 | - get
30 | - update
--------------------------------------------------------------------------------
/operators/prometheous/1.15-1.17/kube-state-metrics/service-account.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: kube-state-metrics
5 | namespace: monitoring
--------------------------------------------------------------------------------
/operators/prometheous/1.15-1.17/kube-state-metrics/service-monitor.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | labels:
5 | k8s-app: kube-state-metrics
6 | name: kube-state-metrics
7 | namespace: monitoring
8 | spec:
9 | endpoints:
10 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
11 | honorLabels: true
12 | interval: 30s
13 | port: https-main
14 | relabelings:
15 | - action: labeldrop
16 | regex: (pod|service|endpoint|namespace)
17 | scheme: https
18 | scrapeTimeout: 30s
19 | tlsConfig:
20 | insecureSkipVerify: true
21 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
22 | interval: 30s
23 | port: https-self
24 | scheme: https
25 | tlsConfig:
26 | insecureSkipVerify: true
27 | jobLabel: k8s-app
28 | selector:
29 | matchLabels:
30 | k8s-app: kube-state-metrics
--------------------------------------------------------------------------------
/operators/prometheous/1.15-1.17/kube-state-metrics/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | k8s-app: kube-state-metrics
6 | name: kube-state-metrics
7 | namespace: monitoring
8 | spec:
9 | clusterIP: None
10 | ports:
11 | - name: https-main
12 | port: 8443
13 | targetPort: https-main
14 | - name: https-self
15 | port: 9443
16 | targetPort: https-self
17 | selector:
18 | app: kube-state-metrics
--------------------------------------------------------------------------------
/operators/prometheous/1.15-1.17/node-exporter/cluster-role-binding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | name: node-exporter
5 | roleRef:
6 | apiGroup: rbac.authorization.k8s.io
7 | kind: ClusterRole
8 | name: node-exporter
9 | subjects:
10 | - kind: ServiceAccount
11 | name: node-exporter
12 | namespace: monitoring
--------------------------------------------------------------------------------
/operators/prometheous/1.15-1.17/node-exporter/cluster-role.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: node-exporter
5 | rules:
6 | - apiGroups:
7 | - authentication.k8s.io
8 | resources:
9 | - tokenreviews
10 | verbs:
11 | - create
12 | - apiGroups:
13 | - authorization.k8s.io
14 | resources:
15 | - subjectaccessreviews
16 | verbs:
17 | - create
--------------------------------------------------------------------------------
/operators/prometheous/1.15-1.17/node-exporter/service-account.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: node-exporter
5 | namespace: monitoring
--------------------------------------------------------------------------------
/operators/prometheous/1.15-1.17/node-exporter/service-monitor.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | labels:
5 | k8s-app: node-exporter
6 | name: node-exporter
7 | namespace: monitoring
8 | spec:
9 | endpoints:
10 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
11 | interval: 30s
12 | port: https
13 | relabelings:
14 | - action: replace
15 | regex: (.*)
16 | replacement: $1
17 | sourceLabels:
18 | - __meta_kubernetes_pod_node_name
19 | targetLabel: instance
20 | scheme: https
21 | tlsConfig:
22 | insecureSkipVerify: true
23 | jobLabel: k8s-app
24 | selector:
25 | matchLabels:
26 | k8s-app: node-exporter
--------------------------------------------------------------------------------
/operators/prometheous/1.15-1.17/node-exporter/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | k8s-app: node-exporter
6 | name: node-exporter
7 | namespace: monitoring
8 | spec:
9 | clusterIP: None
10 | ports:
11 | - name: https
12 | port: 9100
13 | targetPort: https
14 | selector:
15 | app: node-exporter
--------------------------------------------------------------------------------
/operators/prometheous/1.15-1.17/prometheus-cluster-monitoring/cluster-role-binding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | name: prometheus-k8s
5 | roleRef:
6 | apiGroup: rbac.authorization.k8s.io
7 | kind: ClusterRole
8 | name: prometheus-k8s
9 | subjects:
10 | - kind: ServiceAccount
11 | name: prometheus-k8s
12 | namespace: monitoring
--------------------------------------------------------------------------------
/operators/prometheous/1.15-1.17/prometheus-cluster-monitoring/cluster-role.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: prometheus-k8s
5 | rules:
6 | - apiGroups:
7 | - ""
8 | resources:
9 | - nodes/metrics
10 | verbs:
11 | - get
12 | - nonResourceURLs:
13 | - /metrics
14 | verbs:
15 | - get
16 | - apiGroups:
17 | - ""
18 | resources:
19 | - services
20 | - endpoints
21 | - pods
22 | verbs:
23 | - get
24 | - list
25 | - watch
--------------------------------------------------------------------------------
/operators/prometheous/1.15-1.17/prometheus-cluster-monitoring/prometheus.service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | prometheus: k8s
6 | name: prometheus-k8s
7 | namespace: monitoring
8 | spec:
9 | ports:
10 | - name: web
11 | port: 9090
12 | targetPort: web
13 | selector:
14 | app: prometheus
15 | prometheus: k8s
16 | sessionAffinity: ClientIP
--------------------------------------------------------------------------------
/operators/prometheous/1.15-1.17/prometheus-cluster-monitoring/prometheus.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: Prometheus
3 | metadata:
4 | labels:
5 | prometheus: k8s
6 | name: k8s
7 | namespace: monitoring
8 | spec:
9 | alerting:
10 | alertmanagers:
11 | - name: alertmanager-main
12 | namespace: monitoring
13 | port: web
14 | baseImage: quay.io/prometheus/prometheus
15 | nodeSelector:
16 | kubernetes.io/os: linux
17 | podMonitorSelector: {}
18 | replicas: 2
19 | # resources:
20 | # requests:
21 | # memory: 400Mi
22 | ruleSelector:
23 | matchLabels:
24 | prometheus: k8s
25 | role: alert-rules
26 | securityContext:
27 | fsGroup: 2000
28 | runAsNonRoot: true
29 | runAsUser: 1000
30 | serviceAccountName: prometheus-k8s
31 | serviceMonitorSelector:
32 | matchExpressions:
33 | - key: k8s-app
34 | operator: In
35 | values:
36 | - node-exporter
37 | - kube-state-metrics
38 | - apiserver
39 | - kubelet
40 | version: v2.11.0
--------------------------------------------------------------------------------
/operators/prometheous/1.15-1.17/prometheus-cluster-monitoring/service-account.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: prometheus-k8s
5 | namespace: monitoring
--------------------------------------------------------------------------------
/operators/prometheous/1.15-1.17/prometheus-cluster-monitoring/servicemonitor-apiserver.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | labels:
5 | k8s-app: apiserver
6 | name: kube-apiserver
7 | namespace: monitoring
8 | spec:
9 | endpoints:
10 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
11 | interval: 30s
12 | metricRelabelings:
13 | - action: drop
14 | regex: etcd_(debugging|disk|request|server).*
15 | sourceLabels:
16 | - __name__
17 | - action: drop
18 | regex: apiserver_admission_controller_admission_latencies_seconds_.*
19 | sourceLabels:
20 | - __name__
21 | - action: drop
22 | regex: apiserver_admission_step_admission_latencies_seconds_.*
23 | sourceLabels:
24 | - __name__
25 | port: https
26 | scheme: https
27 | tlsConfig:
28 | caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
29 | serverName: kubernetes
30 | jobLabel: component
31 | namespaceSelector:
32 | matchNames:
33 | - default
34 | selector:
35 | matchLabels:
36 | component: apiserver
37 | provider: kubernetes
--------------------------------------------------------------------------------
/operators/prometheous/1.15-1.17/prometheus-cluster-monitoring/servicemonitor-kubelet.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | labels:
5 | k8s-app: kubelet
6 | name: kubelet
7 | namespace: monitoring
8 | spec:
9 | endpoints:
10 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
11 | honorLabels: true
12 | interval: 30s
13 | port: https-metrics
14 | relabelings:
15 | - sourceLabels:
16 | - __metrics_path__
17 | targetLabel: metrics_path
18 | scheme: https
19 | tlsConfig:
20 | insecureSkipVerify: true
21 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
22 | honorLabels: true
23 | interval: 30s
24 | metricRelabelings:
25 | - action: drop
26 | regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)
27 | sourceLabels:
28 | - __name__
29 | path: /metrics/cadvisor
30 | port: https-metrics
31 | relabelings:
32 | - sourceLabels:
33 | - __metrics_path__
34 | targetLabel: metrics_path
35 | scheme: https
36 | tlsConfig:
37 | insecureSkipVerify: true
38 | jobLabel: k8s-app
39 | namespaceSelector:
40 | matchNames:
41 | - kube-system
42 | selector:
43 | matchLabels:
44 | k8s-app: kubelet
--------------------------------------------------------------------------------
/operators/prometheous/1.15-1.17/prometheus-operator/cluster-role-binding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: controller
6 | app.kubernetes.io/name: prometheus-operator
7 | app.kubernetes.io/version: v0.34.0
8 | name: prometheus-operator
9 | roleRef:
10 | apiGroup: rbac.authorization.k8s.io
11 | kind: ClusterRole
12 | name: prometheus-operator
13 | subjects:
14 | - kind: ServiceAccount
15 | name: prometheus-operator
16 | namespace: monitoring
--------------------------------------------------------------------------------
/operators/prometheous/1.15-1.17/prometheus-operator/cluster-role.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: controller
6 | app.kubernetes.io/name: prometheus-operator
7 | app.kubernetes.io/version: v0.34.0
8 | name: prometheus-operator
9 | rules:
10 | - apiGroups:
11 | - apiextensions.k8s.io
12 | resources:
13 | - customresourcedefinitions
14 | verbs:
15 | - '*'
16 | - apiGroups:
17 | - monitoring.coreos.com
18 | resources:
19 | - alertmanagers
20 | - prometheuses
21 | - prometheuses/finalizers
22 | - alertmanagers/finalizers
23 | - servicemonitors
24 | - podmonitors
25 | - prometheusrules
26 | verbs:
27 | - '*'
28 | - apiGroups:
29 | - apps
30 | resources:
31 | - statefulsets
32 | verbs:
33 | - '*'
34 | - apiGroups:
35 | - ""
36 | resources:
37 | - configmaps
38 | - secrets
39 | verbs:
40 | - '*'
41 | - apiGroups:
42 | - ""
43 | resources:
44 | - pods
45 | verbs:
46 | - list
47 | - delete
48 | - apiGroups:
49 | - ""
50 | resources:
51 | - services
52 | - services/finalizers
53 | - endpoints
54 | verbs:
55 | - get
56 | - create
57 | - update
58 | - delete
59 | - apiGroups:
60 | - ""
61 | resources:
62 | - nodes
63 | verbs:
64 | - list
65 | - watch
66 | - apiGroups:
67 | - ""
68 | resources:
69 | - namespaces
70 | verbs:
71 | - get
72 | - list
73 | - watch
--------------------------------------------------------------------------------
/operators/prometheous/1.15-1.17/prometheus-operator/namepace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: monitoring
--------------------------------------------------------------------------------
/operators/prometheous/1.15-1.17/prometheus-operator/service-account.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: controller
6 | app.kubernetes.io/name: prometheus-operator
7 | app.kubernetes.io/version: v0.34.0
8 | name: prometheus-operator
9 | namespace: monitoring
--------------------------------------------------------------------------------
/operators/prometheous/1.15-1.17/prometheus-operator/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: controller
6 | app.kubernetes.io/name: prometheus-operator
7 | app.kubernetes.io/version: v0.34.0
8 | name: prometheus-operator
9 | namespace: monitoring
10 | spec:
11 | clusterIP: None
12 | ports:
13 | - name: http
14 | port: 8080
15 | targetPort: http
16 | selector:
17 | app.kubernetes.io/component: controller
18 | app.kubernetes.io/name: prometheus-operator
--------------------------------------------------------------------------------
/operators/prometheous/1.18.4/alertmanager/alertmanager-secret.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data: {}
3 | kind: Secret
4 | metadata:
5 | name: alertmanager-main
6 | namespace: monitoring
7 | stringData:
8 | alertmanager.yaml: |-
9 | "global":
10 | "resolve_timeout": "5m"
11 | "inhibit_rules":
12 | - "equal":
13 | - "namespace"
14 | - "alertname"
15 | "source_match":
16 | "severity": "critical"
17 | "target_match_re":
18 | "severity": "warning|info"
19 | - "equal":
20 | - "namespace"
21 | - "alertname"
22 | "source_match":
23 | "severity": "warning"
24 | "target_match_re":
25 | "severity": "info"
26 | "receivers":
27 | - "name": "Default"
28 | - "name": "Watchdog"
29 | - "name": "Critical"
30 | "route":
31 | "group_by":
32 | - "namespace"
33 | "group_interval": "5m"
34 | "group_wait": "30s"
35 | "receiver": "Default"
36 | "repeat_interval": "12h"
37 | "routes":
38 | - "match":
39 | "alertname": "Watchdog"
40 | "receiver": "Watchdog"
41 | - "match":
42 | "severity": "critical"
43 | "receiver": "Critical"
44 | type: Opaque
--------------------------------------------------------------------------------
/operators/prometheous/1.18.4/alertmanager/alertmanager-sericeaccount.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: alertmanager-main
5 | namespace: monitoring
--------------------------------------------------------------------------------
/operators/prometheous/1.18.4/alertmanager/alertmanager-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | alertmanager: main
6 | name: alertmanager-main
7 | namespace: monitoring
8 | spec:
9 | ports:
10 | - name: web
11 | port: 9093
12 | targetPort: web
13 | selector:
14 | alertmanager: main
15 | app: alertmanager
16 | sessionAffinity: ClientIP
--------------------------------------------------------------------------------
/operators/prometheous/1.18.4/alertmanager/alertmanager-servicemonitor.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | labels:
5 | k8s-app: alertmanager
6 | name: alertmanager
7 | namespace: monitoring
8 | spec:
9 | endpoints:
10 | - interval: 30s
11 | port: web
12 | selector:
13 | matchLabels:
14 | alertmanager: main
--------------------------------------------------------------------------------
/operators/prometheous/1.18.4/alertmanager/alertmanager.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: Alertmanager
3 | metadata:
4 | labels:
5 | alertmanager: main
6 | name: main
7 | namespace: monitoring
8 | spec:
9 | image: quay.io/prometheus/alertmanager:v0.21.0
10 | nodeSelector:
11 | kubernetes.io/os: linux
12 | replicas: 3
13 | securityContext:
14 | fsGroup: 2000
15 | runAsNonRoot: true
16 | runAsUser: 1000
17 | serviceAccountName: alertmanager-main
18 | version: v0.21.0
--------------------------------------------------------------------------------
/operators/prometheous/1.18.4/grafana/dashboard-nodeexporter-custom.yaml:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeekshithSN/kubernetes/aad69738753ee476b74dcef11179871644e69631/operators/prometheous/1.18.4/grafana/dashboard-nodeexporter-custom.yaml
--------------------------------------------------------------------------------
/operators/prometheous/1.18.4/grafana/grafana-dashboardDatasources.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data:
3 | datasources.yaml: ewogICAgImFwaVZlcnNpb24iOiAxLAogICAgImRhdGFzb3VyY2VzIjogWwogICAgICAgIHsKICAgICAgICAgICAgImFjY2VzcyI6ICJwcm94eSIsCiAgICAgICAgICAgICJlZGl0YWJsZSI6IGZhbHNlLAogICAgICAgICAgICAibmFtZSI6ICJwcm9tZXRoZXVzIiwKICAgICAgICAgICAgIm9yZ0lkIjogMSwKICAgICAgICAgICAgInR5cGUiOiAicHJvbWV0aGV1cyIsCiAgICAgICAgICAgICJ1cmwiOiAiaHR0cDovL3Byb21ldGhldXMtazhzLm1vbml0b3Jpbmcuc3ZjOjkwOTAiLAogICAgICAgICAgICAidmVyc2lvbiI6IDEKICAgICAgICB9CiAgICBdCn0=
4 | kind: Secret
5 | metadata:
6 | name: grafana-datasources
7 | namespace: monitoring
8 | type: Opaque
--------------------------------------------------------------------------------
/operators/prometheous/1.18.4/grafana/grafana-dashboardSources.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data:
3 | dashboards.yaml: |-
4 | {
5 | "apiVersion": 1,
6 | "providers": [
7 | {
8 | "folder": "Default",
9 | "name": "0",
10 | "options": {
11 | "path": "/grafana-dashboard-definitions/0"
12 | },
13 | "orgId": 1,
14 | "type": "file"
15 | }
16 | ]
17 | }
18 | kind: ConfigMap
19 | metadata:
20 | name: grafana-dashboards
21 | namespace: monitoring
--------------------------------------------------------------------------------
/operators/prometheous/1.18.4/grafana/grafana-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app: grafana
6 | name: grafana
7 | namespace: monitoring
8 | spec:
9 | ports:
10 | - name: http
11 | port: 3000
12 | targetPort: http
13 | selector:
14 | app: grafana
--------------------------------------------------------------------------------
/operators/prometheous/1.18.4/grafana/grafana-serviceAccount.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: grafana
5 | namespace: monitoring
--------------------------------------------------------------------------------
/operators/prometheous/1.18.4/kube-state-metrics/cluster-role-binding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | labels:
5 | app.kubernetes.io/name: kube-state-metrics
6 | app.kubernetes.io/version: 1.9.5
7 | name: kube-state-metrics
8 | roleRef:
9 | apiGroup: rbac.authorization.k8s.io
10 | kind: ClusterRole
11 | name: kube-state-metrics
12 | subjects:
13 | - kind: ServiceAccount
14 | name: kube-state-metrics
15 | namespace: monitoring
--------------------------------------------------------------------------------
/operators/prometheous/1.18.4/kube-state-metrics/service-account.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | labels:
5 | app.kubernetes.io/name: kube-state-metrics
6 | app.kubernetes.io/version: 1.9.5
7 | name: kube-state-metrics
8 | namespace: monitoring
--------------------------------------------------------------------------------
/operators/prometheous/1.18.4/kube-state-metrics/service-monitor.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | labels:
5 | app.kubernetes.io/name: kube-state-metrics
6 | app.kubernetes.io/version: 1.9.5
7 | k8s-app: kube-state-metrics
8 | name: kube-state-metrics
9 | namespace: monitoring
10 | spec:
11 | endpoints:
12 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
13 | honorLabels: true
14 | interval: 30s
15 | port: https-main
16 | relabelings:
17 | - action: labeldrop
18 | regex: (pod|service|endpoint|namespace)
19 | scheme: https
20 | scrapeTimeout: 30s
21 | tlsConfig:
22 | insecureSkipVerify: true
23 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
24 | interval: 30s
25 | port: https-self
26 | scheme: https
27 | tlsConfig:
28 | insecureSkipVerify: true
29 | jobLabel: app.kubernetes.io/name
30 | selector:
31 | matchLabels:
32 | app.kubernetes.io/name: kube-state-metrics
--------------------------------------------------------------------------------
/operators/prometheous/1.18.4/kube-state-metrics/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app.kubernetes.io/name: kube-state-metrics
6 | app.kubernetes.io/version: 1.9.5
7 | name: kube-state-metrics
8 | namespace: monitoring
9 | spec:
10 | clusterIP: None
11 | ports:
12 | - name: https-main
13 | port: 8443
14 | targetPort: https-main
15 | - name: https-self
16 | port: 9443
17 | targetPort: https-self
18 | selector:
19 | app.kubernetes.io/name: kube-state-metrics
--------------------------------------------------------------------------------
/operators/prometheous/1.18.4/node-exporter/cluster-role-binding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | name: node-exporter
5 | roleRef:
6 | apiGroup: rbac.authorization.k8s.io
7 | kind: ClusterRole
8 | name: node-exporter
9 | subjects:
10 | - kind: ServiceAccount
11 | name: node-exporter
12 | namespace: monitoring
--------------------------------------------------------------------------------
/operators/prometheous/1.18.4/node-exporter/cluster-role.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: node-exporter
5 | rules:
6 | - apiGroups:
7 | - authentication.k8s.io
8 | resources:
9 | - tokenreviews
10 | verbs:
11 | - create
12 | - apiGroups:
13 | - authorization.k8s.io
14 | resources:
15 | - subjectaccessreviews
16 | verbs:
17 | - create
--------------------------------------------------------------------------------
/operators/prometheous/1.18.4/node-exporter/service-account.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: node-exporter
5 | namespace: monitoring
--------------------------------------------------------------------------------
/operators/prometheous/1.18.4/node-exporter/service-monitor.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: ServiceMonitor
3 | metadata:
4 | labels:
5 | app.kubernetes.io/name: node-exporter
6 | app.kubernetes.io/version: v0.18.1
7 | k8s-app: node-exporter
8 | name: node-exporter
9 | namespace: monitoring
10 | spec:
11 | endpoints:
12 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
13 | interval: 15s
14 | port: https
15 | relabelings:
16 | - action: replace
17 | regex: (.*)
18 | replacement: $1
19 | sourceLabels:
20 | - __meta_kubernetes_pod_node_name
21 | targetLabel: instance
22 | scheme: https
23 | tlsConfig:
24 | insecureSkipVerify: true
25 | jobLabel: app.kubernetes.io/name
26 | selector:
27 | matchLabels:
28 | app.kubernetes.io/name: node-exporter
--------------------------------------------------------------------------------
/operators/prometheous/1.18.4/node-exporter/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app.kubernetes.io/name: node-exporter
6 | app.kubernetes.io/version: v0.18.1
7 | name: node-exporter
8 | namespace: monitoring
9 | spec:
10 | clusterIP: None
11 | ports:
12 | - name: https
13 | port: 9100
14 | targetPort: https
15 | selector:
16 | app.kubernetes.io/name: node-exporter
--------------------------------------------------------------------------------
/operators/prometheous/1.18.4/prometheus-cluster-monitoring/cluster-role-binding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | name: prometheus-k8s
5 | roleRef:
6 | apiGroup: rbac.authorization.k8s.io
7 | kind: ClusterRole
8 | name: prometheus-k8s
9 | subjects:
10 | - kind: ServiceAccount
11 | name: prometheus-k8s
12 | namespace: monitoring
--------------------------------------------------------------------------------
/operators/prometheous/1.18.4/prometheus-cluster-monitoring/cluster-role.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: prometheus-k8s
5 | rules:
6 | - apiGroups:
7 | - ""
8 | resources:
9 | - nodes/metrics
10 | verbs:
11 | - get
12 | - nonResourceURLs:
13 | - /metrics
14 | verbs:
15 | - get
16 | - apiGroups:
17 | - ""
18 | resources:
19 | - services
20 | - endpoints
21 | - pods
22 | verbs:
23 | - get
24 | - list
25 | - watch
--------------------------------------------------------------------------------
/operators/prometheous/1.18.4/prometheus-cluster-monitoring/prometheus-alerts.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: PrometheusRule
3 | metadata:
4 | labels:
5 | prometheus: k8s
6 | role: alert-rules
7 | name: example-rule
8 | spec:
9 | groups:
10 | - name: example-rule
11 | rules:
12 | - alert: example-alert
13 | annotations:
14 | description: Memory on node {{ $labels.instance }} currently at {{ $value }}%
15 | is under pressure
16 | summary: Memory usage is under pressure, system may become unstable.
17 | expr: |
18 | 100 - ((node_memory_MemAvailable_bytes{job="node-exporter"} * 100) / node_memory_MemTotal_bytes{job="node-exporter"}) > 70
19 | for: 2m
20 | labels:
21 | severity: warning
22 |
--------------------------------------------------------------------------------
/operators/prometheous/1.18.4/prometheus-cluster-monitoring/prometheus.service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | prometheus: k8s
6 | name: prometheus-k8s
7 | namespace: monitoring
8 | spec:
9 | ports:
10 | - name: web
11 | port: 9090
12 | targetPort: web
13 | selector:
14 | app: prometheus
15 | prometheus: k8s
16 | sessionAffinity: ClientIP
--------------------------------------------------------------------------------
/operators/prometheous/1.18.4/prometheus-cluster-monitoring/prometheus.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: monitoring.coreos.com/v1
2 | kind: Prometheus
3 | metadata:
4 | labels:
5 | prometheus: k8s
6 | name: k8s
7 | namespace: monitoring
8 | spec:
9 | alerting:
10 | alertmanagers:
11 | - name: alertmanager-main
12 | namespace: monitoring
13 | port: web
14 | image: quay.io/prometheus/prometheus:v2.19.2
15 | nodeSelector:
16 | kubernetes.io/os: linux
17 | podMonitorNamespaceSelector: {}
18 | podMonitorSelector: {}
19 | replicas: 1
20 | resources:
21 | requests:
22 | memory: 400Mi
23 | externalLabels:
24 | cluster: docker-desktop
25 | serviceAccountName: prometheus-k8s
26 | version: v2.19.2
27 | ruleSelector:
28 | matchLabels:
29 | role: alert-rules
30 | prometheus: k8s
31 | securityContext:
32 | fsGroup: 2000
33 | runAsNonRoot: true
34 | runAsUser: 1000
35 | serviceMonitorSelector:
36 | matchExpressions:
37 | - key: k8s-app
38 | operator: In
39 | values:
40 | - node-exporter
41 | - kube-state-metrics
42 | - apiserver
43 | - kubelet
44 |
--------------------------------------------------------------------------------
/operators/prometheous/1.18.4/prometheus-cluster-monitoring/service-account.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: prometheus-k8s
5 | namespace: monitoring
6 |
--------------------------------------------------------------------------------
/operators/prometheous/1.18.4/prometheus-operator/cluster-role-binding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: controller
6 | app.kubernetes.io/name: prometheus-operator
7 | app.kubernetes.io/version: v0.40.0
8 | name: prometheus-operator
9 | roleRef:
10 | apiGroup: rbac.authorization.k8s.io
11 | kind: ClusterRole
12 | name: prometheus-operator
13 | subjects:
14 | - kind: ServiceAccount
15 | name: prometheus-operator
16 | namespace: monitoring
17 |
--------------------------------------------------------------------------------
/operators/prometheous/1.18.4/prometheus-operator/cluster-role.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: controller
6 | app.kubernetes.io/name: prometheus-operator
7 | app.kubernetes.io/version: v0.40.0
8 | name: prometheus-operator
9 | rules:
10 | - apiGroups:
11 | - monitoring.coreos.com
12 | resources:
13 | - alertmanagers
14 | - alertmanagers/finalizers
15 | - prometheuses
16 | - prometheuses/finalizers
17 | - thanosrulers
18 | - thanosrulers/finalizers
19 | - servicemonitors
20 | - podmonitors
21 | - prometheusrules
22 | verbs:
23 | - '*'
24 | - apiGroups:
25 | - apps
26 | resources:
27 | - statefulsets
28 | verbs:
29 | - '*'
30 | - apiGroups:
31 | - ""
32 | resources:
33 | - configmaps
34 | - secrets
35 | verbs:
36 | - '*'
37 | - apiGroups:
38 | - ""
39 | resources:
40 | - pods
41 | verbs:
42 | - list
43 | - delete
44 | - apiGroups:
45 | - ""
46 | resources:
47 | - services
48 | - services/finalizers
49 | - endpoints
50 | verbs:
51 | - get
52 | - create
53 | - update
54 | - delete
55 | - apiGroups:
56 | - ""
57 | resources:
58 | - nodes
59 | verbs:
60 | - list
61 | - watch
62 | - apiGroups:
63 | - ""
64 | resources:
65 | - namespaces
66 | verbs:
67 | - get
68 | - list
69 | - watch
70 | - apiGroups:
71 | - authentication.k8s.io
72 | resources:
73 | - tokenreviews
74 | verbs:
75 | - create
76 | - apiGroups:
77 | - authorization.k8s.io
78 | resources:
79 | - subjectaccessreviews
80 | verbs:
81 | - create
--------------------------------------------------------------------------------
/operators/prometheous/1.18.4/prometheus-operator/namepace.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: monitoring
--------------------------------------------------------------------------------
/operators/prometheous/1.18.4/prometheus-operator/service-account.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: controller
6 | app.kubernetes.io/name: prometheus-operator
7 | app.kubernetes.io/version: v0.40.0
8 | name: prometheus-operator
9 |
--------------------------------------------------------------------------------
/operators/prometheous/1.18.4/prometheus-operator/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app.kubernetes.io/component: controller
6 | app.kubernetes.io/name: prometheus-operator
7 | app.kubernetes.io/version: v0.40.0
8 | name: prometheus-operator
9 | namespace: monitoring
10 | spec:
11 | clusterIP: None
12 | ports:
13 | - name: https
14 | port: 8443
15 | targetPort: https
16 | selector:
17 | app.kubernetes.io/component: controller
18 | app.kubernetes.io/name: prometheus-operator
--------------------------------------------------------------------------------
/operators/prometheous/1.18.4/readme.md:
--------------------------------------------------------------------------------
1 | # Kubernetes 1.18.4 Monitoring Guide
2 |
3 | Create a cluster with [kind](https://kind.sigs.k8s.io/docs/user/quick-start/)
4 | ```
5 | kind create cluster --name prometheus --image kindest/node:v1.18.4
6 | ```
7 |
8 | ```
9 | kubectl create ns monitoring
10 |
11 | # Create the operator to instanciate all CRDs
12 | kubectl -n monitoring apply -f ./monitoring/prometheus/kubernetes/1.18.4/prometheus-operator/
13 |
14 | # Deploy monitoring components
15 | kubectl -n monitoring apply -f ./monitoring/prometheus/kubernetes/1.18.4/node-exporter/
16 | kubectl -n monitoring apply -f ./monitoring/prometheus/kubernetes/1.18.4/kube-state-metrics/
17 | kubectl -n monitoring apply -f ./monitoring/prometheus/kubernetes/1.18.4/alertmanager
18 |
19 | # Deploy prometheus instance and all the service monitors for targets
20 | kubectl -n monitoring apply -f ./monitoring/prometheus/kubernetes/1.18.4/prometheus-cluster-monitoring/
21 |
22 | # Dashboarding
23 | kubectl -n monitoring create -f ./monitoring/prometheus/kubernetes/1.18.4/grafana/
24 |
25 | ```
26 |
27 | # Sources
28 |
29 | The source code for monitoring Kubernetes 1.18.4 comes from the [kube-prometheus](https://github.com/prometheus-operator/kube-prometheus/tree/v0.6.0/manifests) v0.6.0 tree
--------------------------------------------------------------------------------
/operators/prometheous/readme.md:
--------------------------------------------------------------------------------
1 | # Kubernetes monitoring with Prometheus
2 |
3 | Kubernetes [1.14.8](./1.14.8/readme.md)
4 | Kubernetes [1.15-1.17](./1.15-1.17/readme.md)
5 | Kubernetes [1.18.4](./1.18.4/readme.md)
6 |
7 | ## Prometheus Overview
8 |
9 | Prometheus Overview and Architecture - Video : [here](https://youtu.be/5o37CGlNLr8)
10 | Source: [here](https://github.com/marcel-dempers/docker-development-youtube-series/releases/tag/prometheus-operator-1)
11 |
12 | ## Application Monitoring
13 |
14 | Monitoring Python code with Prometheus - Video : [here](https://youtu.be/HzEiRwJP6ag)
15 | Monitoring Nodejs code with Prometheus - Video : [here](https://youtu.be/m2zM3zOZl34)
16 | Monitoring C# code with Prometheus - Video : [here](https://youtu.be/o4tdSrFnkvw)
17 |
18 | Source: [here](https://github.com/marcel-dempers/docker-development-youtube-series/releases/tag/prometheus-operator-1)
19 |
20 | ## Prometheus Operator guide
21 |
22 | Video : [here](https://youtu.be/LQpmeb7idt8)
23 | Source Code tested on K8s version 1.14.8
24 | Source: [here](https://github.com/marcel-dempers/docker-development-youtube-series/releases/tag/prometheus-operator-1)
25 |
26 | ## Node Exporter guide
27 |
28 | Video : [here](https://youtu.be/1-tRiThpFrY)
29 | Source Code tested on K8s version 1.14.8
30 | Source: [here](https://github.com/marcel-dempers/docker-development-youtube-series/releases/tag/prometheus-node-exporter-1)
31 |
32 |
--------------------------------------------------------------------------------
/operators/python-application/dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.7.3-alpine3.9 as prod
2 |
3 | RUN mkdir /app/
4 | WORKDIR /app/
5 |
6 | COPY ./src/requirements.txt /app/requirements.txt
7 | RUN pip install -r requirements.txt
8 |
9 | COPY ./src/ /app/
10 |
11 | ENV FLASK_APP=server.py
12 | CMD flask run -h 0.0.0 -p 5000
13 |
--------------------------------------------------------------------------------
/operators/python-application/src/requirements.txt:
--------------------------------------------------------------------------------
1 | Flask == 1.0.3
2 | prometheus_client == 0.7.1
--------------------------------------------------------------------------------
/operators/python-application/src/server.py:
--------------------------------------------------------------------------------
1 | from flask import Response, Flask, request
2 | import prometheus_client
3 | from prometheus_client.core import CollectorRegistry
4 | from prometheus_client import Summary, Counter, Histogram, Gauge
5 | import time
6 |
7 | app = Flask(__name__)
8 |
9 | _INF = float("inf")
10 |
11 | graphs = {}
12 | graphs['c'] = Counter('python_request_operations_total', 'The total number of processed requests')
13 | graphs['h'] = Histogram('python_request_duration_seconds', 'Histogram for the duration in seconds.', buckets=(1, 2, 5, 6, 10, _INF))
14 |
15 | @app.route("/")
16 | def hello():
17 | start = time.time()
18 | graphs['c'].inc()
19 |
20 | time.sleep(0.600)
21 | end = time.time()
22 | graphs['h'].observe(end - start)
23 | return "Hello World!"
24 |
25 | @app.route("/metrics")
26 | def requests_count():
27 | res = []
28 | for k,v in graphs.items():
29 | res.append(prometheus_client.generate_latest(v))
30 | return Response(res, mimetype="text/plain")
31 |
32 |
--------------------------------------------------------------------------------
/pod-affinity/redis.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: redis-cache
5 | spec:
6 | selector:
7 | matchLabels:
8 | app: store
9 | replicas: 2
10 | template:
11 | metadata:
12 | labels:
13 | app: store
14 | spec:
15 | affinity:
16 | podAntiAffinity:
17 | requiredDuringSchedulingIgnoredDuringExecution:
18 | - labelSelector:
19 | matchExpressions:
20 | - key: app
21 | operator: In
22 | values:
23 | - store
24 | topologyKey: "kubernetes.io/hostname"
25 | containers:
26 | - name: redis-server
27 | image: redis:3.2-alpine
28 |
--------------------------------------------------------------------------------
/pod-affinity/web-server.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: web-server
5 | spec:
6 | selector:
7 | matchLabels:
8 | app: web-store
9 | replicas: 2
10 | template:
11 | metadata:
12 | labels:
13 | app: web-store
14 | spec:
15 | affinity:
16 | podAntiAffinity:
17 | requiredDuringSchedulingIgnoredDuringExecution:
18 | - labelSelector:
19 | matchExpressions:
20 | - key: app
21 | operator: In
22 | values:
23 | - web-store
24 | topologyKey: "kubernetes.io/hostname"
25 | podAffinity:
26 | requiredDuringSchedulingIgnoredDuringExecution:
27 | - labelSelector:
28 | matchExpressions:
29 | - key: app
30 | operator: In
31 | values:
32 | - store
33 | topologyKey: "kubernetes.io/hostname"
34 | containers:
35 | - name: web-app
36 | image: nginx:1.16-alpine
37 |
--------------------------------------------------------------------------------
/pod-distruption-budget/nginx-deploy.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | labels:
5 | run: nginx
6 | name: nginx-deploy
7 | spec:
8 | replicas: 4
9 | selector:
10 | matchLabels:
11 | run: nginx
12 | template:
13 | metadata:
14 | labels:
15 | run: nginx
16 | spec:
17 | containers:
18 | - image: nginx
19 | name: nginx
20 |
--------------------------------------------------------------------------------
/pod-distruption-budget/pdb.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: policy/v1beta1
2 | kind: PodDisruptionBudget
3 | metadata:
4 | name: pdbdemo
5 | spec:
6 | minAvailable: 2
7 | selector:
8 | matchLabels:
9 | run: nginx
10 |
11 |
--------------------------------------------------------------------------------
/pod-security-policy/nginx-deployment-kubesystem.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: nginx-deployment
5 | namespace: kube-system
6 | labels:
7 | app: nginx
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | app: nginx
13 | template:
14 | metadata:
15 | labels:
16 | app: nginx
17 | spec:
18 | containers:
19 | - name: nginx
20 | image: nginx:1.15.4
21 |
--------------------------------------------------------------------------------
/pod-security-policy/nginx-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: nginx-deployment
5 | namespace: default
6 | labels:
7 | app: nginx
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | app: nginx
13 | template:
14 | metadata:
15 | labels:
16 | app: nginx
17 | spec:
18 | containers:
19 | - name: nginx
20 | image: nginx:1.15.4
21 |
--------------------------------------------------------------------------------
/pod-security-policy/nginx-hostnetwork-deployment-kubesystem.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: nginx-hostnetwork-deployment
5 | namespace: kube-system
6 | labels:
7 | app: nginx
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | app: nginx
13 | template:
14 | metadata:
15 | labels:
16 | app: nginx
17 | spec:
18 | containers:
19 | - name: nginx
20 | image: nginx:1.15.4
21 | hostNetwork: true
22 |
--------------------------------------------------------------------------------
/pod-security-policy/nginx-hostnetwork-deployment-sa.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: nginx-hostnetwork-deployment
5 | namespace: default
6 | labels:
7 | app: nginx
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | app: nginx
13 | template:
14 | metadata:
15 | labels:
16 | app: nginx
17 | spec:
18 | containers:
19 | - name: nginx
20 | image: nginx:1.15.4
21 | hostNetwork: true
22 | serviceAccount: specialsa
23 |
--------------------------------------------------------------------------------
/pod-security-policy/nginx-hostnetwork-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: nginx-hostnetwork-deployment
5 | namespace: default
6 | labels:
7 | app: nginx
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | app: nginx
13 | template:
14 | metadata:
15 | labels:
16 | app: nginx
17 | spec:
18 | containers:
19 | - name: nginx
20 | image: nginx:1.15.4
21 | hostNetwork: true
22 |
--------------------------------------------------------------------------------
/pod-security-policy/psp-permissive-clusterrole.yaml:
--------------------------------------------------------------------------------
1 | kind: ClusterRole
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | metadata:
4 | name: psp-permissive
5 | rules:
6 | - apiGroups:
7 | - extensions
8 | resources:
9 | - podsecuritypolicies
10 | resourceNames:
11 | - permissive
12 | verbs:
13 | - use
14 |
--------------------------------------------------------------------------------
/pod-security-policy/psp-permissive-rolebinding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1beta1
2 | kind: RoleBinding
3 | metadata:
4 | name: psp-permissive
5 | namespace: kube-system
6 | roleRef:
7 | apiGroup: rbac.authorization.k8s.io
8 | kind: ClusterRole
9 | name: psp-permissive
10 | subjects:
11 | - kind: ServiceAccount
12 | name: daemon-set-controller
13 | namespace: kube-system
14 | - kind: ServiceAccount
15 | name: replicaset-controller
16 | namespace: kube-system
17 | - kind: ServiceAccount
18 | name: job-controller
19 | namespace: kube-system
20 |
--------------------------------------------------------------------------------
/pod-security-policy/psp-permissive.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: policy/v1beta1
2 | kind: PodSecurityPolicy
3 | metadata:
4 | name: permissive
5 | spec:
6 | privileged: true
7 | hostNetwork: true
8 | hostIPC: true
9 | hostPID: true
10 | seLinux:
11 | rule: RunAsAny
12 | supplementalGroups:
13 | rule: RunAsAny
14 | runAsUser:
15 | rule: RunAsAny
16 | fsGroup:
17 | rule: RunAsAny
18 | hostPorts:
19 | - min: 0
20 | max: 65535
21 | volumes:
22 | - '*'
23 |
--------------------------------------------------------------------------------
/pod-security-policy/psp-restrictive-clusterrole.yaml:
--------------------------------------------------------------------------------
1 | kind: ClusterRole
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | metadata:
4 | name: psp-restrictive
5 | rules:
6 | - apiGroups:
7 | - extensions
8 | resources:
9 | - podsecuritypolicies
10 | resourceNames:
11 | - restrictive
12 | verbs:
13 | - use
14 |
--------------------------------------------------------------------------------
/pod-security-policy/psp-restrictive-rolebinding.yaml:
--------------------------------------------------------------------------------
1 | kind: ClusterRoleBinding
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | metadata:
4 | name: psp-default
5 | subjects:
6 | - kind: Group
7 | name: system:serviceaccounts
8 | namespace: kube-system
9 | roleRef:
10 | kind: ClusterRole
11 | name: psp-restrictive
12 | apiGroup: rbac.authorization.k8s.io
13 |
--------------------------------------------------------------------------------
/pod-security-policy/psp-restrictive.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: policy/v1beta1
2 | kind: PodSecurityPolicy
3 | metadata:
4 | name: restrictive
5 | spec:
6 | privileged: false
7 | hostNetwork: false
8 | allowPrivilegeEscalation: false
9 | defaultAllowPrivilegeEscalation: false
10 | hostPID: false
11 | hostIPC: false
12 | runAsUser:
13 | rule: RunAsAny
14 | fsGroup:
15 | rule: RunAsAny
16 | seLinux:
17 | rule: RunAsAny
18 | supplementalGroups:
19 | rule: RunAsAny
20 | volumes:
21 | - 'configMap'
22 | - 'downwardAPI'
23 | - 'emptyDir'
24 | - 'persistentVolumeClaim'
25 | - 'secret'
26 | - 'projected'
27 | allowedCapabilities:
28 | - '*'
29 |
--------------------------------------------------------------------------------
/pod-security-policy/psp/README.md:
--------------------------------------------------------------------------------
1 | kubectl -n kube-system create rolebinding premissive-rolebinding --clusterrole=permissive-cr --group=system:authenticated
2 |
3 | kubectl -n psp-test create rolebinding restrictive-rolebinding --clusterrole=restrictive-cr --group=system:authenticated
4 |
5 |
6 | After which update the configs in /etc/kubernetes/manifests/kube-apiserver.yaml
7 |
8 | ```
9 | add PodSecurityPolicy , in the line - --enable-admission-plugins=NodeRestriction
10 |
11 | ```
12 |
--------------------------------------------------------------------------------
/pod-security-policy/psp/permissive-clusterrole.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: permissive-cr
5 | rules:
6 | - apiGroups: ['policy']
7 | resources: ['podsecuritypolicies']
8 | verbs: ['use']
9 | resourceNames:
10 | - permissive
11 |
--------------------------------------------------------------------------------
/pod-security-policy/psp/permissive-psp.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: policy/v1beta1
2 | kind: PodSecurityPolicy
3 | metadata:
4 | name: permissive
5 | spec:
6 | privileged: true
7 | seLinux:
8 | rule: RunAsAny
9 | supplementalGroups:
10 | rule: RunAsAny
11 | runAsUser:
12 | rule: RunAsAny
13 | fsGroup:
14 | rule: RunAsAny
15 | volumes:
16 | - '*'
17 |
--------------------------------------------------------------------------------
/pod-security-policy/psp/privileged-pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: traffic-generator
5 | spec:
6 | hostPID: true
7 | containers:
8 | - name: alpine
9 | image: alpine
10 | securityContext:
11 | privileged: true
12 | args:
13 | - sleep
14 | - "100000000"
15 |
--------------------------------------------------------------------------------
/pod-security-policy/psp/restrictive-clusterrole.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: restrictive-cr
5 | rules:
6 | - apiGroups: ['policy']
7 | resources: ['podsecuritypolicies']
8 | verbs: ['use']
9 | resourceNames:
10 | - restrictive
11 |
--------------------------------------------------------------------------------
/pod-security-policy/psp/restrictive-psp.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: policy/v1beta1
2 | kind: PodSecurityPolicy
3 | metadata:
4 | name: restrictive
5 | spec:
6 | privileged: false
7 | seLinux:
8 | rule: RunAsAny
9 | supplementalGroups:
10 | rule: RunAsAny
11 | runAsUser:
12 | rule: RunAsAny
13 | fsGroup:
14 | rule: RunAsAny
15 | volumes:
16 | - '*'
17 |
--------------------------------------------------------------------------------
/pod-security-policy/specialsa-psp-permissive.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1beta1
2 | kind: RoleBinding
3 | metadata:
4 | name: specialsa-psp-permissive
5 | namespace: default
6 | roleRef:
7 | apiGroup: rbac.authorization.k8s.io
8 | kind: ClusterRole
9 | name: psp-permissive
10 | subjects:
11 | - kind: ServiceAccount
12 | name: specialsa
13 | namespace: default
14 |
--------------------------------------------------------------------------------
/private-registry/hosts:
--------------------------------------------------------------------------------
1 | [kubernetes]
2 | 34.125.37.127
3 | 34.125.222.96
--------------------------------------------------------------------------------
/private-registry/private-registry.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | hosts: kubernetes
3 | become: true
4 | vars:
5 | - nexus_machine_ip: 34.125.138.204
6 | - nexus_docker_registry: 34.125.138.204:8085
7 | - nexus_user_name: admin
8 | - nexus_user_password: admin
9 | tasks:
10 | - name: update daemon json
11 | template:
12 | src: daemon.json
13 | dest: /etc/docker/daemon.json
14 |
15 | - name: restart docker
16 | service:
17 | name: docker
18 | state: restarted
19 |
20 | - name: docker login to private registry
21 | shell: docker login -u {{ nexus_user_name }} -p {{ nexus_user_password }} {{ nexus_machine_ip }}
22 |
23 | - name: Create a directory if it does not exist
24 | file:
25 | path: /etc/docker/certs.d/{{ nexus_machine_ip }}
26 | state: directory
27 |
28 | - name: copy app.crt of nginx
29 | template:
30 | src: app.crt
31 | dest: /etc/docker/certs.d/{{ nexus_machine_ip }}/
--------------------------------------------------------------------------------
/private-registry/templates/daemon.json:
--------------------------------------------------------------------------------
1 | {
2 | "exec-opts": ["native.cgroupdriver=systemd"],
3 | "log-driver": "json-file",
4 | "log-opts": {
5 | "max-size": "100m"
6 | },
7 | "storage-driver": "overlay2",
8 | "insecure-registries":[" {{ nexus_machine_ip }}"]
9 | }
--------------------------------------------------------------------------------
/scalling-jenkins/master-Persistent/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM jenkins/jenkins:alpine
2 |
3 | # Distributed Builds plugins
4 | RUN /usr/local/bin/install-plugins.sh ssh-slaves
5 |
6 | #Install GIT
7 | RUN /usr/local/bin/install-plugins.sh git
8 |
9 | # Artifacts
10 | RUN /usr/local/bin/install-plugins.sh htmlpublisher
11 |
12 | # UI
13 | RUN /usr/local/bin/install-plugins.sh greenballs
14 | RUN /usr/local/bin/install-plugins.sh simple-theme-plugin
15 |
16 | # Scaling
17 | RUN /usr/local/bin/install-plugins.sh kubernetes
18 |
19 | VOLUME /var/jenkins_home
20 |
21 | USER jenkins
22 |
--------------------------------------------------------------------------------
/scalling-jenkins/master-Persistent/build.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | VERSION=$1
3 | if [[ -z "$VERSION" ]]
4 | then
5 | VERSION="latest"
6 | fi
7 | DOCKER_IMAGE=keaz/jenkins-master:${VERSION}
8 | PROJECT_PATH="$( cd "$(dirname "$0")" ; pwd -P )"
9 | tput setaf 7;
10 | tput bold setaf 1; echo "Create image using ${VERSION} tag"
11 | tput sgr0;
12 | docker build -t ${DOCKER_IMAGE} -f "${PROJECT_PATH}/Dockerfile" ${PROJECT_PATH}
13 |
14 | exit 0
15 |
--------------------------------------------------------------------------------
/scalling-jenkins/master-Persistent/cluster-role.yaml:
--------------------------------------------------------------------------------
1 | kind: ClusterRole
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | metadata:
4 | namespace: default
5 | name: service-reader
6 | rules:
7 | - apiGroups: [""] # "" indicates the core API group
8 | resources: ["services"]
9 | verbs: ["get", "watch", "list"]
10 |
--------------------------------------------------------------------------------
/scalling-jenkins/master-Persistent/jenkins-pv.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolume
3 | metadata:
4 | name: jenkins-volume
5 | labels:
6 | type: local
7 | spec:
8 | storageClassName: manual
9 | capacity:
10 | storage: 10Gi
11 | accessModes:
12 | - ReadWriteOnce
13 | hostPath:
14 | path: "/home/deekshithsn/jenkins"
15 |
--------------------------------------------------------------------------------
/scalling-jenkins/master-Persistent/jenkins-pvc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | name: jenkins-volume-claim
5 | spec:
6 | storageClassName: manual
7 | accessModes:
8 | - ReadWriteOnce
9 | resources:
10 | requests:
11 | storage: 3Gi
12 |
--------------------------------------------------------------------------------
/scalling-jenkins/master/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM jenkins/jenkins:alpine
2 |
3 | # Distributed Builds plugins
4 | RUN /usr/local/bin/install-plugins.sh ssh-slaves
5 |
6 | #Install GIT
7 | RUN /usr/local/bin/install-plugins.sh git
8 |
9 | # Artifacts
10 | RUN /usr/local/bin/install-plugins.sh htmlpublisher
11 |
12 | # UI
13 | RUN /usr/local/bin/install-plugins.sh greenballs
14 | RUN /usr/local/bin/install-plugins.sh simple-theme-plugin
15 |
16 | # Scaling
17 | RUN /usr/local/bin/install-plugins.sh kubernetes
18 |
19 | VOLUME /var/jenkins_home
20 |
21 | USER jenkins
22 |
--------------------------------------------------------------------------------
/scalling-jenkins/master/jenkins-dep.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: "jenkins-master-deployment"
5 | labels:
6 | app: jenkins-master
7 | version: "latest"
8 | group: "jenkins"
9 | namespace: "jenkins"
10 | spec:
11 | replicas: 1
12 | selector:
13 | matchLabels:
14 | app: "jenkins-master"
15 | version: "latest"
16 | group: "jenkins"
17 | template:
18 | metadata:
19 | labels:
20 | app: jenkins-master
21 | version: "latest"
22 | group: "jenkins"
23 | spec:
24 | containers:
25 | - name: "jenkins-master"
26 | image: "keaz/jenkins-master:alpine"
27 | imagePullPolicy: "IfNotPresent"
28 |
--------------------------------------------------------------------------------
/scalling-jenkins/master/jenkins-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: "v1"
2 | kind: "Service"
3 | metadata:
4 | labels:
5 | app: "jenkins-master"
6 | version: "latest"
7 | group: "jenkins"
8 | namespace: "jenkins"
9 | name: "jenkins-master-service"
10 | spec:
11 | ports:
12 | - name: "http"
13 | port: 80
14 | targetPort: 8080
15 | - name: "jnlp"
16 | port: 50000
17 | targetPort: 50000
18 | selector:
19 | app: "jenkins-master"
20 | version: "latest"
21 | group: "jenkins"
22 | type: "NodePort"
23 |
--------------------------------------------------------------------------------
/secrets/command.txt:
--------------------------------------------------------------------------------
1 | kubectl create secret generic my-secret --from-literal=key1=supersecret --from-literal=key2=topsecret
2 |
--------------------------------------------------------------------------------
/secrets/database.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: postgres
5 | spec:
6 | replicas: 1
7 | selector:
8 | matchLabels:
9 | app: postgres
10 | template:
11 | metadata:
12 | labels:
13 | app: postgres
14 | spec:
15 | containers:
16 | - name: postgres
17 | image: postgres:9.6.5
18 | ports:
19 | - containerPort: 5432
20 | env:
21 | - name: POSTGRES_DB
22 | valueFrom:
23 | secretKeyRef:
24 | name: database-secret-config
25 | key: dbname
26 | - name: POSTGRES_USER
27 | valueFrom:
28 | secretKeyRef:
29 | name: database-secret-config
30 | key: username
31 | - name: POSTGRES_PASSWORD
32 | valueFrom:
33 | secretKeyRef:
34 | name: database-secret-config
35 | key: password
36 |
--------------------------------------------------------------------------------
/secrets/secret.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | name: database-secret-config
5 | type: Opaque
6 | data:
7 | dbname: dXJsX3Nob3J0ZW5lcl9kYg==
8 | username: dXNlcg==
9 | password: bXlzZWNyZXRwYXNzd29yZA==
10 |
--------------------------------------------------------------------------------
/services/cluster-ip/app.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: my-app
5 | labels:
6 | app: my-app
7 | spec:
8 | replicas: 5
9 | selector:
10 | matchLabels:
11 | app: my-app
12 | template:
13 | metadata:
14 | labels:
15 | app: my-app
16 | version: v1.0.0
17 | spec:
18 | containers:
19 | - name: my-app
20 | image: deekshithsn/k8s-deployment-strategies
21 | ports:
22 | - name: http
23 | containerPort: 8080
24 | - name: probe
25 | containerPort: 8086
26 | env:
27 | - name: VERSION
28 | value: v1.0.0
29 | livenessProbe:
30 | httpGet:
31 | path: /live
32 | port: probe
33 | initialDelaySeconds: 5
34 | periodSeconds: 5
35 | readinessProbe:
36 | httpGet:
37 | path: /ready
38 | port: probe
39 | periodSeconds: 5
40 |
--------------------------------------------------------------------------------
/services/cluster-ip/command.txt:
--------------------------------------------------------------------------------
1 | kubectl run client --image=ubuntu -i -t --rm --restart=Never
2 |
--------------------------------------------------------------------------------
/services/cluster-ip/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: my-app
5 | labels:
6 | app: my-app
7 | spec:
8 | ports:
9 | - name: http
10 | port: 80
11 | targetPort: http
12 | selector:
13 | app: my-app
14 |
--------------------------------------------------------------------------------
/services/loadbalancer/app.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: my-app
5 | labels:
6 | app: my-app
7 | spec:
8 | replicas: 5
9 | selector:
10 | matchLabels:
11 | app: my-app
12 | template:
13 | metadata:
14 | labels:
15 | app: my-app
16 | version: v1.0.0
17 | spec:
18 | containers:
19 | - name: my-app
20 | image: deekshithsn/k8s-deployment-strategies
21 | ports:
22 | - name: http
23 | containerPort: 8080
24 | - name: probe
25 | containerPort: 8086
26 | env:
27 | - name: VERSION
28 | value: v1.0.0
29 | livenessProbe:
30 | httpGet:
31 | path: /live
32 | port: probe
33 | initialDelaySeconds: 5
34 | periodSeconds: 5
35 | readinessProbe:
36 | httpGet:
37 | path: /ready
38 | port: probe
39 | periodSeconds: 5
40 |
--------------------------------------------------------------------------------
/services/loadbalancer/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: my-app
5 | labels:
6 | app: my-app
7 | spec:
8 | type: LoadBalancer
9 | ports:
10 | - name: http
11 | port: 80
12 | targetPort: http
13 | selector:
14 | app: my-app
15 |
--------------------------------------------------------------------------------
/services/mysql-headless-service/commands.txt:
--------------------------------------------------------------------------------
1 | kubectl run mysql-client --image=mysql:5.7 -i --rm --restart=Never --\
2 | mysql -h mysql-0.mysql < health.txt
3 |
--------------------------------------------------------------------------------
/special-usecase/out-of-rotation/app.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app: "react-ui"
6 | name: "ui"
7 | spec:
8 | ports:
9 | - name: "http"
10 | port: 8080
11 | targetPort: 8080
12 | selector:
13 | app: "react-ui"
14 | type: "NodePort"
15 | ---
16 | apiVersion: apps/v1
17 | kind: Deployment
18 | metadata:
19 | name: "react-ui"
20 | labels:
21 | app: react-ui
22 | spec:
23 | replicas: 3
24 | selector:
25 | matchLabels:
26 | app: "react-ui"
27 | template:
28 | metadata:
29 | labels:
30 | app: react-ui
31 | spec:
32 | containers:
33 | - name: "react-ui"
34 | image: "deekshithsn/rotation"
35 | imagePullPolicy: "IfNotPresent"
36 | ports:
37 | - containerPort: 8080
38 | readinessProbe:
39 | exec:
40 | command:
41 | - cat
42 | - /app/health.txt
43 | initialDelaySeconds: 5
44 | periodSeconds: 5
45 | successThreshold: 1
46 | failureThreshold: 3
47 | timeoutSeconds: 1
48 |
--------------------------------------------------------------------------------
/special-usecase/prometheus-https-json/output.json:
--------------------------------------------------------------------------------
1 | {
2 | "cart": true,
3 | "ui": false,
4 | "login": true,
5 | "home": true,
6 | "offers": false
7 | }
8 |
--------------------------------------------------------------------------------
/special-usecase/prometheus-https-json/prometheus.txt:
--------------------------------------------------------------------------------
1 | docker run -d --name prometheus -p 9090:9090 prom/prometheus
2 |
--------------------------------------------------------------------------------
/taint-tolerations/README.md:
--------------------------------------------------------------------------------
1 | - kubectl taint nodes node1 dedicated=devs:NoSchedule
2 | - kubectl taint nodes node-2 dedicated=tests:NoSchedule
3 |
4 | - kubectl label nodes node1 dedicated=devs
5 | - kubectl label nodes node-2 dedicated=devs
6 |
--------------------------------------------------------------------------------
/taint-tolerations/pod-normal.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: nginx
5 | labels:
6 | env: test
7 | spec:
8 | containers:
9 | - name: nginx
10 | image: nginx
11 | imagePullPolicy: IfNotPresent
12 |
--------------------------------------------------------------------------------
/taint-tolerations/pod.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: nginx
5 | labels:
6 | env: test
7 | spec:
8 | containers:
9 | - name: nginx
10 | image: nginx
11 | imagePullPolicy: IfNotPresent
12 | tolerations:
13 | - key: "node-role.kubernetes.io/master"
14 | operator: "Exists"
15 | effect: "NoSchedule"
16 | nodeSelector:
17 | kubernetes.io/hostname: kubernetes-master
18 |
--------------------------------------------------------------------------------
/taint-tolerations/taint_affinity.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: pod-test
5 | spec:
6 | affinity:
7 | nodeAffinity:
8 | requiredDuringSchedulingIgnoredDuringExecution:
9 | nodeSelectorTerms:
10 | - matchExpressions:
11 | - key: dedicated
12 | operator: In
13 | values:
14 | - devs
15 | tolerations:
16 | - key: "dedicated"
17 | operator: "Equal"
18 | value: "devs"
19 | effect: "NoSchedule"
20 | containers:
21 | - name: just-container
22 | image: supergiantkir/animals:bear
23 |
--------------------------------------------------------------------------------
/volume/emptyDir/pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: ambassador-pod
5 | labels:
6 | app: ambassador-app
7 | spec:
8 | volumes:
9 | - name: shared
10 | emptyDir: {}
11 | containers:
12 | - name: app-container-poller
13 | image: yauritux/busybox-curl
14 | command: ["/bin/sh"]
15 | args: ["-c", "while true; do curl 127.0.0.1:81 > /usr/share/nginx/html/index.html; sleep 10; done"]
16 | volumeMounts:
17 | - name: shared
18 | mountPath: /usr/share/nginx/html
19 | - name: app-container-server
20 | image: nginx
21 | ports:
22 | - containerPort: 80
23 | volumeMounts:
24 | - name: shared
25 | mountPath: /usr/share/nginx/html
26 | - name: ambassador-container
27 | image: bharamicrosystems/nginx-forward-proxy
28 | ports:
29 | - containerPort: 81
30 |
--------------------------------------------------------------------------------
/volume/hostpath/pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: test-pd
5 | spec:
6 | containers:
7 | - image: k8s.gcr.io/test-webserver
8 | name: test-container
9 | volumeMounts:
10 | - mountPath: /test-pd
11 | name: test-volume
12 | volumes:
13 | - name: test-volume
14 | hostPath:
15 | # directory location on host
16 | path: /data
17 | # this field is optional
18 | type: DirectoryOrCreate
19 |
--------------------------------------------------------------------------------
/volume/nfs-pv/README.md:
--------------------------------------------------------------------------------
1 | Useful Links:
2 |
3 | How to Install and Configure an NFS Server on Ubuntu 18.04 : https://www.tecmint.com/install-nfs-server-on-ubuntu/
4 |
5 | ## NFS Server :
6 |
7 | ```
8 | - sudo apt update
9 | - sudo apt install nfs-kernel-server
10 | - sudo mkdir -p /mnt/nfs_share
11 | - sudo chown -R nobody:nogroup /mnt/nfs_share/
12 | - sudo vim /etc/exports
13 | - insert this content to /etc/exports
14 | /mnt/nfs_share *(rw,sync,no_subtree_check)
15 | if you face any security issues then use below content
16 | /mnt/nfs_share *(rw,sync,no_subtree_check,insecure)
17 | - sudo exportfs -a
18 | - to check exports
19 | sudo exportfs -v or showmount -e
20 | - sudo systemctl restart nfs-kernel-server
21 | ```
22 | ## NFS Client ( in worker nodes ) :
23 |
24 | ```
25 | - sudo apt install nfs-common
26 | - showmount -e nfs-server-ip
27 | - To verify the mount
28 | mount -t nfs ipaddress:/mnt/nfs_share/ /mnt
29 | mount | grep nfs_share
30 | umount /mnt
31 |
32 | ```
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
--------------------------------------------------------------------------------
/volume/nfs-pv/static-provision-deployment/nfs-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | labels:
5 | run: nginx
6 | name: nginx-deploy
7 | spec:
8 | replicas: 1
9 | selector:
10 | matchLabels:
11 | run: nginx
12 | template:
13 | metadata:
14 | labels:
15 | run: nginx
16 | spec:
17 | volumes:
18 | - name: www
19 | persistentVolumeClaim:
20 | claimName: pvc-nfs-pv1
21 | containers:
22 | - image: nginx
23 | name: nginx
24 | volumeMounts:
25 | - name: www
26 | mountPath: /usr/share/nginx/html
27 |
--------------------------------------------------------------------------------
/volume/nfs-pv/static-provision-deployment/nfs-pv.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolume
3 | metadata:
4 | name: pv-nfs-pv1
5 | labels:
6 | type: local
7 | spec:
8 | storageClassName: manual
9 | capacity:
10 | storage: 1Gi
11 | accessModes:
12 | - ReadWriteMany
13 | nfs:
14 | server: nfs_server_ip
15 | path: "/mnt/nfs_share"
16 |
--------------------------------------------------------------------------------
/volume/nfs-pv/static-provision-deployment/nfs-pvc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | name: pvc-nfs-pv1
5 | spec:
6 | storageClassName: manual
7 | accessModes:
8 | - ReadWriteMany
9 | resources:
10 | requests:
11 | storage: 500Mi
12 |
13 |
--------------------------------------------------------------------------------
/volume/volume-aws/dynamic-provision/command.txt:
--------------------------------------------------------------------------------
1 | eksctl utils associate-iam-oidc-provider --region=eu-central-1 --cluster=my-k8s-cluster --approve
--------------------------------------------------------------------------------
/volume/volume-aws/dynamic-provision/pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: ebs-app
5 | spec:
6 | containers:
7 | - name: app
8 | image: nginx
9 | volumeMounts:
10 | - mountPath: "/usr/share/nginx/html"
11 | name: ebs-volume
12 | volumes:
13 | - name: ebs-volume
14 | persistentVolumeClaim:
15 | claimName: ebs-claim
16 |
--------------------------------------------------------------------------------
/volume/volume-aws/dynamic-provision/pvc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | name: ebs-claim
5 | spec:
6 | accessModes:
7 | - ReadWriteOnce
8 | storageClassName: ebs-sc
9 | resources:
10 | requests:
11 | storage: 10Gi
12 |
--------------------------------------------------------------------------------
/volume/volume-aws/dynamic-provision/storageclass.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: storage.k8s.io/v1
2 | kind: StorageClass
3 | metadata:
4 | name: ebs-sc
5 | provisioner: kubernetes.io/aws-ebs
6 | volumeBindingMode: WaitForFirstConsumer
7 | parameters:
8 | type: gp3
9 | fsType: ext4
--------------------------------------------------------------------------------
/volume/volume-aws/static-provision/README.md:
--------------------------------------------------------------------------------
1 | 1. create voulume in aws ( imp: zone node --> ebs mapping )
2 |
3 | ```
4 | aws ec2 create-volume \
5 | --availability-zone us-east-1c \
6 | --size 10 \
7 | --volume-type gp2 \
8 | --tag-specifications 'ResourceType=volume,Tags=[{Key=Name,Value=eks-static-volume}]'
9 | ```
10 |
11 | 2. Add ebs-csi addon to eks cluster
12 |
13 | 3. add below permission to eks nodes
14 |
15 | ```
16 | "ec2:AttachVolume",
17 | "ec2:DetachVolume",
18 | "ec2:DescribeVolumes",
19 | "ec2:DescribeInstances"
20 | ```
--------------------------------------------------------------------------------
/volume/volume-aws/static-provision/pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: test-ebs-pod
5 | spec:
6 | containers:
7 | - name: app
8 | image: busybox
9 | command: [ "sleep", "3600" ]
10 | volumeMounts:
11 | - mountPath: "/data"
12 | name: ebs-volume
13 | affinity:
14 | nodeAffinity:
15 | requiredDuringSchedulingIgnoredDuringExecution:
16 | nodeSelectorTerms:
17 | - matchExpressions:
18 | - key: topology.kubernetes.io/zone
19 | operator: In
20 | values:
21 | - us-east-1c
22 | volumes:
23 | - name: ebs-volume
24 | persistentVolumeClaim:
25 | claimName: static-ebs-pvc
26 |
--------------------------------------------------------------------------------
/volume/volume-aws/static-provision/pv.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolume
3 | metadata:
4 | name: static-ebs-pv
5 | spec:
6 | capacity:
7 | storage: 10Gi
8 | volumeMode: Filesystem
9 | accessModes:
10 | - ReadWriteOnce
11 | persistentVolumeReclaimPolicy: Retain
12 | storageClassName: manual
13 | awsElasticBlockStore:
14 | volumeID: # e.g., vol-0123456789abcdef0
15 | fsType: ext4
16 |
--------------------------------------------------------------------------------
/volume/volume-aws/static-provision/pvc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | name: static-ebs-pvc
5 | spec:
6 | accessModes:
7 | - ReadWriteOnce
8 | resources:
9 | requests:
10 | storage: 10Gi
11 | storageClassName: manual
12 |
--------------------------------------------------------------------------------
/volume/volume-gcp/README.md:
--------------------------------------------------------------------------------
1 | # If you want to use gcp disk as your voulme type before creating pv or pvc follow below steps
2 |
3 |
4 | **Assumptions**
5 | - kuberenetes installation type is kubeadm
6 |
7 | ### steps
8 |
9 | - The Kubernetes cloud-config file needs to be configured. The file can be found at /etc/kubernetes/cloud-config and the following content is enough to get the cloud provider to work and if file is not present create the same. file should be configured in all the nodes:
10 |
11 | ```
12 | [Global]
13 | project-id = ""
14 | ```
15 | to get project id, In gcp portal click on select project dropdown you will be able to see the project id
16 |
17 |
18 | - add "--cloud-provider=gce" in /etc/systemd/system/kubelet.service.d/10-kubeadm.conf in all nodes
19 | add the value:
20 | ```
21 | Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf --cloud-provider=gce"
22 | ```
23 | then restart the kubelet by ```systemctl restart kubelet```
24 |
25 | - edit api-server and controllers manifests which are present under /etc/kubernetes/manifests add ```--cloud-provider=gce``` as a argument
26 |
27 | ### if you dont follow, you might end up with below error
28 |
29 | 
30 |
31 |
32 |
--------------------------------------------------------------------------------
/volume/volume-gcp/dynamic-provision/deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | labels:
5 | run: nginx
6 | name: nginx-deploy
7 | spec:
8 | replicas: 3
9 | selector:
10 | matchLabels:
11 | run: nginx
12 | template:
13 | metadata:
14 | labels:
15 | run: nginx
16 | spec:
17 | volumes:
18 | - name: www
19 | persistentVolumeClaim:
20 | claimName: mypvc
21 | containers:
22 | - image: nginx
23 | name: nginx
24 | volumeMounts:
25 | - name: www
26 | mountPath: /usr/share/nginx/html
27 |
--------------------------------------------------------------------------------
/volume/volume-gcp/dynamic-provision/pod.yaml:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeekshithSN/kubernetes/aad69738753ee476b74dcef11179871644e69631/volume/volume-gcp/dynamic-provision/pod.yaml
--------------------------------------------------------------------------------
/volume/volume-gcp/dynamic-provision/pvc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | name: mypvc
5 | labels:
6 | app: mysql
7 | tier: database
8 | spec:
9 | storageClassName: "standard"
10 | accessModes:
11 | - ReadWriteOnce
12 | resources:
13 | requests:
14 | storage: 5Gi
15 | #volumeName: pv-vol1
16 |
--------------------------------------------------------------------------------
/volume/volume-gcp/dynamic-provision/storageclass.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: storage.k8s.io/v1
2 | kind: StorageClass
3 | metadata:
4 | name: standard
5 | provisioner: kubernetes.io/gce-pd
6 | parameters:
7 | type: pd-standard
8 | fstype: ext4
9 | replication-type: none
10 | reclaimPolicy: Retain
11 | allowVolumeExpansion: true
12 | mountOptions:
13 | - debug
14 | volumeBindingMode: WaitForFirstConsumer
15 |
--------------------------------------------------------------------------------
/volume/volume-gcp/static-provision/deployment/README.md:
--------------------------------------------------------------------------------
1 | In case of static provisioning the disk has to be created before creating pod or deployment
2 |
3 | 
4 |
--------------------------------------------------------------------------------
/volume/volume-gcp/static-provision/deployment/deployment.yaml:
--------------------------------------------------------------------------------
1 |
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | labels:
6 | run: nginx
7 | name: nginx-deploy
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels:
12 | run: nginx
13 | template:
14 | metadata:
15 | labels:
16 | run: nginx
17 | spec:
18 | volumes:
19 | - name: www
20 | persistentVolumeClaim:
21 | claimName: mypvc
22 | containers:
23 | - image: nginx
24 | name: nginx
25 | volumeMounts:
26 | - name: www
27 | mountPath: /usr/share/nginx/html
28 |
--------------------------------------------------------------------------------
/volume/volume-gcp/static-provision/deployment/pod.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: test-pd
5 | spec:
6 | containers:
7 | - image: k8s.gcr.io/test-webserver
8 | name: test-container
9 | volumeMounts:
10 | - mountPath: /test-pd
11 | name: test-volume
12 | volumes:
13 | - name: test-volume
14 | # This GCE PD must already exist.
15 | gcePersistentDisk:
16 | pdName: my-data-disk
17 | fsType: ext4
18 |
--------------------------------------------------------------------------------
/volume/volume-gcp/static-provision/deployment/pv.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolume
3 | metadata:
4 | name: gcp-pv
5 | spec:
6 | accessModes:
7 | - ReadWriteMany
8 | capacity:
9 | storage: 10Gi
10 | storageClassName: slow
11 | persistentVolumeReclaimPolicy: Retain
12 | gcePersistentDisk:
13 | pdName: gcp-pv
14 |
--------------------------------------------------------------------------------
/volume/volume-gcp/static-provision/deployment/pvc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: PersistentVolumeClaim
3 | metadata:
4 | name: mypvc
5 | labels:
6 | app: nginx
7 | tier: webserver
8 | spec:
9 | storageClassName: slow
10 | accessModes:
11 | - ReadWriteMany
12 | resources:
13 | requests:
14 | storage: 10Gi
15 | #volumeName: pv-vol1
16 |
--------------------------------------------------------------------------------
/volume/volume-gcp/static-provision/statefulset/pod.yaml:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/DeekshithSN/kubernetes/aad69738753ee476b74dcef11179871644e69631/volume/volume-gcp/static-provision/statefulset/pod.yaml
--------------------------------------------------------------------------------