├── .gitignore ├── README.md ├── _config.yml ├── gcp ├── task-001-k8s-dashboard │ ├── .README_images │ │ ├── Login_screen.png │ │ └── Post_login_screen.png │ ├── 00-namespace.yaml │ ├── 05-service-account.yaml │ ├── 10-service.yaml │ ├── 15-secret.yaml │ ├── 20-configmap.yaml │ ├── 25-role.yaml │ ├── 30-clusterRole.yaml │ ├── 35-roleBinding.yaml │ ├── 40-clusterRoleBinding.yaml │ ├── 45-deployment.yaml │ ├── 50-service-kubernetes-dashboard.yaml │ ├── 55-service-dashboard-metrics-scraper.yaml │ ├── 60-admin-service-account.yaml │ └── README.md ├── task-002-basic-namespace-wide-kubeconfig │ ├── README.md │ ├── admin-run.sh │ ├── client-run.sh │ ├── csr.cnf │ ├── csr.yaml │ ├── dev-ns.yaml │ ├── kubeconfig.tpl │ ├── reset.sh │ ├── role-binding.yaml │ ├── role.yaml │ └── www.yaml ├── task-003-elastic-search │ └── ReadMe.md ├── task-003-intermediate-namespace-wide-kubeconfig │ ├── README.md │ ├── admin-run.sh │ ├── client-run.sh │ ├── common-resources │ │ ├── csr.cnf-template │ │ ├── csr.yaml │ │ ├── kubeconfig.tpl │ │ ├── role-binding.yaml-template │ │ ├── role-readonly.yaml-template │ │ └── role-readwrite.yaml-template │ ├── dev │ │ ├── default │ │ │ └── groupQA │ │ │ │ ├── csr.yaml │ │ │ │ ├── dave.key │ │ │ │ ├── kubeconfig │ │ │ │ ├── role-binding.yaml │ │ │ │ └── role-readonly.yaml │ │ └── kube-system │ │ │ └── groupDEV │ │ │ ├── csr.yaml │ │ │ ├── dave.key │ │ │ ├── kubeconfig │ │ │ ├── role-binding.yaml │ │ │ └── role-readwrite.yaml │ ├── reset.sh │ ├── run-all.sh │ └── www.yaml ├── task-004-intermediate-cluster-wide-kubeconfig │ ├── README.md │ ├── admin-run.sh │ ├── client-run.sh │ ├── common-resources │ │ ├── clusterRole-binding.yaml-template │ │ ├── clusterRole-readonly.yaml-template │ │ ├── clusterRole-readwrite.yaml-template │ │ ├── csr.cnf-template │ │ ├── csr.yaml │ │ └── kubeconfig.tpl │ ├── dev │ │ ├── groupDEV │ │ │ ├── clusterRole-binding.yaml │ │ │ ├── clusterRole-readwrite.yaml │ │ │ ├── csr.yaml │ │ │ ├── dave.key │ │ │ └── kubeconfig │ │ └── groupQA │ │ │ ├── clusterRole-binding.yaml │ │ │ ├── clusterRole-readonly.yaml │ │ │ ├── csr.yaml │ │ │ ├── dave.key │ │ │ └── kubeconfig │ ├── reset.sh │ ├── run-all.sh │ └── www.yaml ├── task-005-traefik-whoami │ ├── .ReadMe_images │ │ ├── http-middlewares.png │ │ ├── http-routers.png │ │ ├── http-services.png │ │ ├── traefik-homepage-part1.png │ │ ├── traefik-homepage-part2.png │ │ └── whoami-service.png │ ├── 00-resource-crd-definition.yml │ ├── 05-traefik-rbac.yml │ ├── 10-service-account.yaml │ ├── 15-traefik-deployment.yaml │ ├── 16-traefik.toml │ ├── 20-traefik-service.yaml │ ├── 25-whoami-deployment.yaml │ ├── 30-whoami-service.yaml │ ├── 35-whoami-ingress-route.yaml │ └── ReadMe.md ├── task-006-traefik-whoami-tomlInConfigMap │ ├── .ReadMe_images │ │ ├── features-enabled-in-toml-inside-configMap.png │ │ └── whoami-service.png │ ├── 00-resource-crd-definition.yml │ ├── 05-traefik-rbac.yml │ ├── 10-service-account.yaml │ ├── 15-traefik-deployment.yaml │ ├── 20-traefik-service.yaml │ ├── 24-traefik-configMap.yaml │ ├── 25-whoami-deployment.yaml │ ├── 30-whoami-service.yaml │ ├── 35-whoami-ingress-route.yaml │ └── ReadMe.md ├── task-007-traefik-whoami-lets-encrypt │ ├── .ReadMe_images │ │ ├── ACME-certificate-tls.png │ │ ├── dashboard.png │ │ ├── finally-the-cert-needed.png │ │ └── whoami-service-notls.png │ ├── 00-resource-crd-definition.yml │ ├── 05-traefik-rbac.yml │ ├── 10-service-account.yaml │ ├── 15-traefik-deployment.yaml │ ├── 20-traefik-service.yaml │ ├── 25-whoami-deployment.yaml │ ├── 30-whoami-service.yaml │ ├── 35-ingress-route.yaml │ └── ReadMe.md ├── task-008-external-IP-to-access-Application-In-Cluster │ ├── ReadMe.md │ └── service │ │ └── load-balancer-example.yaml ├── task-009-configuring-dns-with-static-IPs-k8-using-Service │ ├── .ReadMe_images │ │ └── GKE_cluster_created.png │ ├── ReadMe.md │ ├── helloweb-deployment.yaml │ └── helloweb-service.yaml ├── task-010-configuring-dns-with-static-IPs-k8-using-Ingress │ ├── ReadMe.md │ ├── helloweb-deployment.yaml │ └── helloweb-ingress.yaml ├── task-011-configuring-datadog │ ├── .ReadMe_images │ │ ├── events-tab.png │ │ ├── k8s-dashboard-pic1.png │ │ ├── k8s-dashboard-pic2.png │ │ ├── k8s-dashboard-pic3.png │ │ ├── live-tail-logs.png │ │ └── logs-dasboard-page.png │ ├── 00-clusterrole.yaml │ ├── 01-cluster-role.yaml │ ├── 05-serviceaccount.yaml │ ├── 06-service-account.yaml │ ├── 10-clusterrolebinding.yaml │ ├── 11-cluster-role-binding.yaml │ ├── 20-datadog-cluster-agent.yaml │ ├── 25-datadog-agent.yaml │ ├── 30-service.yaml │ ├── 35-deployment.yaml │ └── ReadMe.md ├── task-012-PodSecurityPolicy │ ├── ReadMe.md │ └── example-psp.yaml ├── task-013-traefik-whoami-tls-custom-certs │ ├── 00-resource-crd-definition.yml │ ├── 05-traefik-rbac.yml │ ├── 10-service-account.yaml │ ├── 11-traefik-configmap.yaml │ ├── 12-secret.yaml │ ├── 15-traefik-deployment.yaml │ ├── 20-traefik-service.yaml │ ├── 25-whoami-deployment.yaml │ ├── 30-whoami-service.yaml │ ├── 35-whoami-ingress-route.yaml │ └── ReadMe.md ├── task-014-metricbeat │ ├── .ReadMe_images │ │ ├── Infra-Pods.png │ │ ├── Infra-vms.png │ │ ├── K8s-dashboard.png │ │ ├── Pod-metrics.png │ │ ├── containers-overview.png │ │ ├── host-overview-2.png │ │ ├── host-overview.png │ │ └── system-overview.png │ ├── 00-service-account.yaml │ ├── 04-cluster-role.yaml │ ├── 08-cluster-role-binding.yaml │ ├── 12-configmap-metricbeat-deployment.yaml │ ├── 16-configmap-metricbeat-daemonset.yaml │ ├── 20-daemonset.yaml │ ├── 24-deployment.yaml │ └── ReadMe.md ├── task-015-kube-state-metrics │ ├── ReadMe.md │ ├── cluster-role-binding.yaml │ ├── cluster-role.yaml │ ├── deployment.yaml │ ├── service-account.yaml │ └── service.yaml └── task-016-journalbeat │ ├── .ReadMe_images │ ├── creating-index-pattern.png │ └── logs-dashboard.png │ ├── 00-service-account.yaml │ ├── 02-cluster-role.yaml │ ├── 03-cluster-role-binding.yaml │ ├── 04-pod-security-policy.yaml │ ├── 08-configmap.yaml │ ├── 12-daemonset.yaml │ └── ReadMe.md ├── local-mac ├── application-life-cycle-management │ ├── task-032-rolling-updates-and-rollback │ │ └── ReadMe.md │ ├── task-033-commands-and-arguments │ │ ├── ReadMe.md │ │ └── pod-ubuntu-ls.yaml │ ├── task-034-configuring-environment-variables-in-application │ │ └── ReadMe.md │ ├── task-035-configuring-secrets-in-application │ │ └── ReadMe.md │ ├── task-036-multi-container-pods │ │ └── ReadMe.md │ └── task-037-init-containers │ │ └── ReadMe.md ├── cluster-creation │ └── task-055-create-k8s-using-kubeadm-vagrant │ │ ├── Readme.md │ │ ├── Vagrantfile │ │ ├── ubuntu-bionic-18.04-cloudimg-console.log │ │ └── ubuntu │ │ ├── allow-bridge-nf-traffic.sh │ │ ├── cert_verify.sh │ │ ├── install-docker-2.sh │ │ ├── install-docker.sh │ │ ├── update-dns.sh │ │ └── vagrant │ │ ├── install-guest-additions.sh │ │ └── setup-hosts.sh ├── cluster-maintenance │ ├── task-038-os-upgrades │ │ └── ReadMe.md │ ├── task-039-cluster-upgrades-kubeadm │ │ └── ReadMe.md │ ├── task-040-cluster-upgrade │ │ └── ReadMe.md │ └── task-041-backup-and-restore │ │ └── ReadMe.md ├── core-concepts │ ├── task-017-pods │ │ ├── ReadMe.md │ │ └── redis.yml │ ├── task-018-replicaset │ │ ├── ReadMe.md │ │ └── replicaset.yaml │ ├── task-019-deployment │ │ ├── ReadMe.md │ │ └── nginx-deployment.yaml │ ├── task-020-namespaces │ │ └── ReadMe.md │ └── task-021-services │ │ └── ReadMe.md ├── logging-and-monitoring │ ├── task-030-metrics-server │ │ └── ReadMe.md │ └── task-031-managing-logs │ │ └── ReadMe.md ├── networking │ ├── task-050-explore-env │ │ └── ReadMe.md │ ├── task-051-explore-cni-weave │ │ └── Readme.md │ ├── task-052-IP-address-management │ │ └── Readme.md │ ├── task-053-service-networking │ │ └── Readme.md │ ├── task-054-dns │ │ └── Readme.md │ └── task-054-ingress │ │ └── Readme.md ├── scheduling │ ├── task-022-labels-and-selectors │ │ └── ReadMe.md │ ├── task-023-taints-and-tolerations │ │ └── ReadMe.md │ ├── task-024-node-affinity │ │ └── ReadMe.md │ ├── task-025-resource-requirement-and-limits │ │ └── ReadMe.md │ ├── task-026-editing-pods-and-deployments │ │ └── ReadMe.md │ ├── task-027-daemonsets │ │ └── ReadMe.md │ ├── task-028-static-pods │ │ └── ReadMe.md │ └── task-029-multiple-schedulers │ │ └── ReadMe.md └── security │ ├── task-042-authentication │ └── ReadMe.md │ ├── task-043-tls-in-k8s-certificate-creation │ ├── ReadMe.md │ ├── admin.crt │ ├── admin.csr │ ├── admin.key │ ├── ca.crt │ ├── ca.csr │ ├── ca.key │ ├── jane.csr │ └── jane.key │ ├── task-044-kubeconfig-file │ └── ReadMe.md │ ├── task-045-api-groups │ └── ReadMe.md │ ├── task-046-RBAC │ └── ReadMe.md │ ├── task-047-image-security │ └── ReadMe.md │ ├── task-048-security-contexts │ └── ReadMe.md │ └── task-049-network-policy │ └── ReadMe.md └── task-000-commands ├── basics.md ├── commands-new.md ├── commands.md └── doclinks.md /.gitignore: -------------------------------------------------------------------------------- 1 | # Created by .ignore support plugin (hsz.mobi) 2 | .idea 3 | .vagrant -------------------------------------------------------------------------------- /_config.yml: -------------------------------------------------------------------------------- 1 | theme: jekyll-theme-hacker -------------------------------------------------------------------------------- /gcp/task-001-k8s-dashboard/.README_images/Login_screen.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeaprendiz/learn_kubernetes/55e8a46fdea0eff5113dc13354e482d584e4c661/gcp/task-001-k8s-dashboard/.README_images/Login_screen.png -------------------------------------------------------------------------------- /gcp/task-001-k8s-dashboard/.README_images/Post_login_screen.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeaprendiz/learn_kubernetes/55e8a46fdea0eff5113dc13354e482d584e4c661/gcp/task-001-k8s-dashboard/.README_images/Post_login_screen.png -------------------------------------------------------------------------------- /gcp/task-001-k8s-dashboard/00-namespace.yaml: -------------------------------------------------------------------------------- 1 | #Docs Referred : https://docs.aws.amazon.com/eks/latest/userguide/dashboard-tutorial.html 2 | 3 | apiVersion: v1 4 | kind: Namespace 5 | metadata: 6 | name: kubernetes-dashboard 7 | -------------------------------------------------------------------------------- /gcp/task-001-k8s-dashboard/05-service-account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | labels: 5 | k8s-app: kubernetes-dashboard 6 | name: kubernetes-dashboard 7 | namespace: kubernetes-dashboard 8 | -------------------------------------------------------------------------------- /gcp/task-001-k8s-dashboard/10-service.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | labels: 5 | k8s-app: kubernetes-dashboard 6 | name: kubernetes-dashboard 7 | namespace: kubernetes-dashboard 8 | spec: 9 | type: NodePort 10 | ports: 11 | - port: 443 12 | targetPort: 8443 13 | selector: 14 | k8s-app: kubernetes-dashboard 15 | -------------------------------------------------------------------------------- /gcp/task-001-k8s-dashboard/15-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | labels: 5 | k8s-app: kubernetes-dashboard 6 | name: kubernetes-dashboard-certs 7 | namespace: kubernetes-dashboard 8 | type: Opaque 9 | 10 | --- 11 | 12 | apiVersion: v1 13 | kind: Secret 14 | metadata: 15 | labels: 16 | k8s-app: kubernetes-dashboard 17 | name: kubernetes-dashboard-csrf 18 | namespace: kubernetes-dashboard 19 | type: Opaque 20 | data: 21 | csrf: "" 22 | 23 | --- 24 | 25 | apiVersion: v1 26 | kind: Secret 27 | metadata: 28 | labels: 29 | k8s-app: kubernetes-dashboard 30 | name: kubernetes-dashboard-key-holder 31 | namespace: kubernetes-dashboard 32 | type: Opaque 33 | -------------------------------------------------------------------------------- /gcp/task-001-k8s-dashboard/20-configmap.yaml: -------------------------------------------------------------------------------- 1 | kind: ConfigMap 2 | apiVersion: v1 3 | metadata: 4 | labels: 5 | k8s-app: kubernetes-dashboard 6 | name: kubernetes-dashboard-settings 7 | namespace: kubernetes-dashboard 8 | -------------------------------------------------------------------------------- /gcp/task-001-k8s-dashboard/25-role.yaml: -------------------------------------------------------------------------------- 1 | kind: Role 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | labels: 5 | k8s-app: kubernetes-dashboard 6 | name: kubernetes-dashboard 7 | namespace: kubernetes-dashboard 8 | rules: 9 | # Allow Dashboard to get, update and delete Dashboard exclusive secrets. 10 | - apiGroups: [""] 11 | resources: ["secrets"] 12 | resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"] 13 | verbs: ["get", "update", "delete"] 14 | # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. 15 | - apiGroups: [""] 16 | resources: ["configmaps"] 17 | resourceNames: ["kubernetes-dashboard-settings"] 18 | verbs: ["get", "update"] 19 | # Allow Dashboard to get metrics. 20 | - apiGroups: [""] 21 | resources: ["services"] 22 | resourceNames: ["heapster", "dashboard-metrics-scraper"] 23 | verbs: ["proxy"] 24 | - apiGroups: [""] 25 | resources: ["services/proxy"] 26 | resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"] 27 | verbs: ["get"] 28 | -------------------------------------------------------------------------------- /gcp/task-001-k8s-dashboard/30-clusterRole.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRole 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | labels: 5 | k8s-app: kubernetes-dashboard 6 | name: kubernetes-dashboard 7 | rules: 8 | # Allow Metrics Scraper to get metrics from the Metrics server 9 | - apiGroups: ["metrics.k8s.io"] 10 | resources: ["pods", "nodes"] 11 | verbs: ["get", "list", "watch"] 12 | -------------------------------------------------------------------------------- /gcp/task-001-k8s-dashboard/35-roleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | labels: 5 | k8s-app: kubernetes-dashboard 6 | name: kubernetes-dashboard 7 | namespace: kubernetes-dashboard 8 | roleRef: 9 | apiGroup: rbac.authorization.k8s.io 10 | kind: Role 11 | name: kubernetes-dashboard 12 | subjects: 13 | - kind: ServiceAccount 14 | name: kubernetes-dashboard 15 | namespace: kubernetes-dashboard 16 | -------------------------------------------------------------------------------- /gcp/task-001-k8s-dashboard/40-clusterRoleBinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: kubernetes-dashboard 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: kubernetes-dashboard 9 | subjects: 10 | - kind: ServiceAccount 11 | name: kubernetes-dashboard 12 | namespace: kubernetes-dashboard 13 | -------------------------------------------------------------------------------- /gcp/task-001-k8s-dashboard/45-deployment.yaml: -------------------------------------------------------------------------------- 1 | kind: Deployment 2 | apiVersion: apps/v1 3 | metadata: 4 | labels: 5 | k8s-app: kubernetes-dashboard 6 | name: kubernetes-dashboard 7 | namespace: kubernetes-dashboard 8 | spec: 9 | replicas: 1 10 | revisionHistoryLimit: 10 11 | selector: 12 | matchLabels: 13 | k8s-app: kubernetes-dashboard 14 | template: 15 | metadata: 16 | labels: 17 | k8s-app: kubernetes-dashboard 18 | spec: 19 | containers: 20 | - name: kubernetes-dashboard 21 | image: kubernetesui/dashboard:v2.0.0-beta8 22 | imagePullPolicy: Always 23 | ports: 24 | - containerPort: 8443 25 | protocol: TCP 26 | args: 27 | - --auto-generate-certificates 28 | - --namespace=kubernetes-dashboard 29 | # Uncomment the following line to manually specify Kubernetes API server Host 30 | # If not specified, Dashboard will attempt to auto discover the API server and connect 31 | # to it. Uncomment only if the default does not work. 32 | # - --apiserver-host=http://my-address:port 33 | volumeMounts: 34 | - name: kubernetes-dashboard-certs 35 | mountPath: /certs 36 | # Create on-disk volume to store exec logs 37 | - mountPath: /tmp 38 | name: tmp-volume 39 | livenessProbe: 40 | httpGet: 41 | scheme: HTTPS 42 | path: / 43 | port: 8443 44 | initialDelaySeconds: 30 45 | timeoutSeconds: 30 46 | securityContext: 47 | allowPrivilegeEscalation: false 48 | readOnlyRootFilesystem: true 49 | runAsUser: 1001 50 | runAsGroup: 2001 51 | volumes: 52 | - name: kubernetes-dashboard-certs 53 | secret: 54 | secretName: kubernetes-dashboard-certs 55 | - name: tmp-volume 56 | emptyDir: {} 57 | serviceAccountName: kubernetes-dashboard 58 | nodeSelector: 59 | "beta.kubernetes.io/os": linux 60 | # Comment the following tolerations if Dashboard must not be deployed on master 61 | tolerations: 62 | - key: node-role.kubernetes.io/master 63 | effect: NoSchedule 64 | -------------------------------------------------------------------------------- /gcp/task-001-k8s-dashboard/50-service-kubernetes-dashboard.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | labels: 5 | k8s-app: dashboard-metrics-scraper 6 | name: dashboard-metrics-scraper 7 | namespace: kubernetes-dashboard 8 | spec: 9 | ports: 10 | - port: 8000 11 | targetPort: 8000 12 | selector: 13 | k8s-app: dashboard-metrics-scraper 14 | -------------------------------------------------------------------------------- /gcp/task-001-k8s-dashboard/55-service-dashboard-metrics-scraper.yaml: -------------------------------------------------------------------------------- 1 | kind: Deployment 2 | apiVersion: apps/v1 3 | metadata: 4 | labels: 5 | k8s-app: dashboard-metrics-scraper 6 | name: dashboard-metrics-scraper 7 | namespace: kubernetes-dashboard 8 | spec: 9 | replicas: 1 10 | revisionHistoryLimit: 10 11 | selector: 12 | matchLabels: 13 | k8s-app: dashboard-metrics-scraper 14 | template: 15 | metadata: 16 | labels: 17 | k8s-app: dashboard-metrics-scraper 18 | annotations: 19 | seccomp.security.alpha.kubernetes.io/pod: 'runtime/default' 20 | spec: 21 | containers: 22 | - name: dashboard-metrics-scraper 23 | image: kubernetesui/metrics-scraper:v1.0.1 24 | ports: 25 | - containerPort: 8000 26 | protocol: TCP 27 | livenessProbe: 28 | httpGet: 29 | scheme: HTTP 30 | path: / 31 | port: 8000 32 | initialDelaySeconds: 30 33 | timeoutSeconds: 30 34 | volumeMounts: 35 | - mountPath: /tmp 36 | name: tmp-volume 37 | securityContext: 38 | allowPrivilegeEscalation: false 39 | readOnlyRootFilesystem: true 40 | runAsUser: 1001 41 | runAsGroup: 2001 42 | serviceAccountName: kubernetes-dashboard 43 | nodeSelector: 44 | "beta.kubernetes.io/os": linux 45 | # Comment the following tolerations if Dashboard must not be deployed on master 46 | tolerations: 47 | - key: node-role.kubernetes.io/master 48 | effect: NoSchedule 49 | volumes: 50 | - name: tmp-volume 51 | emptyDir: {} 52 | -------------------------------------------------------------------------------- /gcp/task-001-k8s-dashboard/60-admin-service-account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: eks-admin 5 | namespace: kube-system 6 | --- 7 | apiVersion: rbac.authorization.k8s.io/v1beta1 8 | kind: ClusterRoleBinding 9 | metadata: 10 | name: eks-admin 11 | roleRef: 12 | apiGroup: rbac.authorization.k8s.io 13 | kind: ClusterRole 14 | name: cluster-admin 15 | subjects: 16 | - kind: ServiceAccount 17 | name: eks-admin 18 | namespace: kube-system 19 | -------------------------------------------------------------------------------- /gcp/task-001-k8s-dashboard/README.md: -------------------------------------------------------------------------------- 1 | ## Steps 2 | 3 | ```bash 4 | $ cd kubernetes-kitchen/gcp/task1-k8s-dashboard 5 | 6 | $ kubectl apply -f . 7 | 8 | $ kubectl get service kubernetes-dashboard -n kubernetes-dashboard 9 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 10 | kubernetes-dashboard NodePort 10.48.8.193 443:30566/TCP 16m 11 | 12 | $ gcloud compute firewall-rules create node-port-30566 --allow tcp:30566 13 | Creating firewall...⠹Created [https://www.googleapis.com/compute/v1/projects/gcloud-262311/global/firewalls/node-port-30566]. 14 | Creating firewall...done. 15 | NAME NETWORK DIRECTION PRIORITY ALLOW DENY DISABLED 16 | node-port-30566 default INGRESS 1000 tcp:30566 False 17 | 18 | $ kubectl get nodes --output wide 19 | NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP 20 | gke-cluster-2-default-pool-43440158-7dk0 Ready 148m v1.14.10-gke.27 10.128.0.35 34.67.212.219 21 | 22 | $ kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep eks-admin | awk '{print $1}') 23 | Name: eks-admin-token-xcms9 24 | Namespace: kube-system 25 | Labels: 26 | Annotations: kubernetes.io/service-account.name: eks-admin 27 | kubernetes.io/service-account.uid: d96d7ddc-74cb-11ea-802a-42010a8001ab 28 | 29 | Type: kubernetes.io/service-account-token 30 | 31 | Data 32 | ==== 33 | ca.crt: 1115 bytes 34 | namespace: 11 bytes 35 | token: COPY_THIS_VALUE 36 | ``` 37 | 38 | - Now visit the following on firefox and give the token copied above 39 | 40 | > https://34.67.212.219:30566 41 | 42 | ![](.README_images/Login_screen.png) 43 | 44 | - Post login screen 45 | 46 | ![](.README_images/Post_login_screen.png) 47 | 48 | -------------------------------------------------------------------------------- /gcp/task-002-basic-namespace-wide-kubeconfig/README.md: -------------------------------------------------------------------------------- 1 | ## Objective : To create a user DAVE and give access to DAVE to create only specific resources in specific namespace 2 | 3 | Docs referred - https://medium.com/better-programming/k8s-tips-give-access-to-your-clusterwith-a-client-certificate-dfb3b71a76fe 4 | 5 | - Generating the dave.key and dave.csr 6 | ```bash 7 | $ ./client-run.sh 8 | Generating RSA private key, 4096 bit long modulus 9 | ..........................................................++++ 10 | ......................................................................................................................................................................................................................................++++ 11 | e is 65537 (0x010001) 12 | ``` 13 | 14 | - Generating the kubeconfig 15 | ```bash 16 | $ ./admin-run.sh 17 | certificatesigningrequest.certificates.k8s.io/mycsr created 18 | NAME AGE REQUESTOR CONDITION 19 | mycsr 0s user@gmail.com Pending 20 | certificatesigningrequest.certificates.k8s.io/mycsr approved 21 | NAME AGE REQUESTOR CONDITION 22 | mycsr 1s user@gmail.com Approved,Issued 23 | namespace/development created 24 | role.rbac.authorization.k8s.io/dev created 25 | rolebinding.rbac.authorization.k8s.io/dev created 26 | ``` 27 | 28 | - At the client workstation copy the dave.key and kubeconfig and execute the following 29 | 30 | ```bash 31 | $ ls kubeconfig dave.key 32 | dave.key kubeconfig 33 | 34 | $ export KUBECONFIG=$PWD/kubeconfig 35 | 36 | $ kubectl config set-credentials dave \ 37 | --client-key=$PWD/dave.key \ 38 | --embed-certs=true 39 | User "dave" set. 40 | 41 | $ kubectl version 42 | Client Version: version.Info{Major:"1", Minor:"15", GitVersion:"v1.15.5", GitCommit:"20c265fef0741dd71a66480e35bd69f18351daea", GitTreeState:"clean", BuildDate:"2019-10-15T19:16:51Z", GoVersion:"go1.12.10", Compiler:"gc", Platform:"darwin/amd64"} 43 | Server Version: version.Info{Major:"1", Minor:"14+", GitVersion:"v1.14.10-gke.27", GitCommit:"145f9e21a4515947d6fb10819e5a336aff1b6959", GitTreeState:"clean", BuildDate:"2020-02-21T18:01:40Z", GoVersion:"go1.12.12b4", Compiler:"gc", Platform:"linux/amd64"} 44 | ``` 45 | 46 | - Try creating a resource in namespace development 47 | 48 | ```bash 49 | $ kubectl apply -f www.yaml 50 | deployment.apps/www created 51 | service/www created 52 | 53 | $ kubectl get pods 54 | Error from server (Forbidden): pods is forbidden: User "dave" cannot list resource "pods" in API group "" in the namespace "default" 55 | 56 | $ kubectl get pods -n development 57 | NAME READY STATUS RESTARTS AGE 58 | www-66fd899d46-8sr97 1/1 Running 0 15s 59 | www-66fd899d46-hzgr6 1/1 Running 0 15s 60 | www-66fd899d46-pb7fm 1/1 Running 0 15s 61 | ``` 62 | -------------------------------------------------------------------------------- /gcp/task-002-basic-namespace-wide-kubeconfig/admin-run.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | 4 | # Encoding the .csr file in base64 5 | export BASE64_CSR=$(cat ./dave.csr | base64 | tr -d '\n') 6 | # Substitution of the BASE64_CSR env variable and creation of the CertificateSigninRequest resource 7 | cat csr.yaml | envsubst | kubectl apply -f - 8 | 9 | kubectl get csr 10 | 11 | kubectl certificate approve mycsr 12 | 13 | kubectl get csr 14 | 15 | kubectl get csr mycsr -o jsonpath='{.status.certificate}' \ 16 | | base64 --decode > dave.crt 17 | 18 | 19 | kubectl apply -f dev-ns.yaml 20 | 21 | kubectl apply -f role.yaml 22 | 23 | kubectl apply -f role-binding.yaml 24 | 25 | 26 | # User identifier 27 | export USER="dave" 28 | # Cluster Name (get it from the current context) 29 | export CLUSTER_NAME=$(kubectl config view --minify -o jsonpath={.current-context}) 30 | # Client certificate 31 | export CLIENT_CERTIFICATE_DATA=$(kubectl get csr mycsr -o jsonpath='{.status.certificate}') 32 | # Cluster Certificate Authority 33 | export CLUSTER_CA=$(kubectl config view --raw -o json | jq -r '.clusters[] | select(.name == "'$(kubectl config current-context)'") | .cluster."certificate-authority-data"') 34 | #export CLUSTER_CA=$(kubectl config view --raw -o json | jq -r '.clusters[].cluster."certificate-authority-data"') 35 | 36 | # API Server endpoint 37 | export CLUSTER_ENDPOINT=$(kubectl config view --raw -o json | jq -r '.clusters[] | select(.name == "'$(kubectl config current-context)'") | .cluster."server"') 38 | #export CLUSTER_ENDPOINT=$(kubectl config view --raw -o json | jq -r '.clusters[].cluster."server"') 39 | 40 | cat kubeconfig.tpl | envsubst > kubeconfig 41 | -------------------------------------------------------------------------------- /gcp/task-002-basic-namespace-wide-kubeconfig/client-run.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | 4 | openssl genrsa -out dave.key 4096 5 | 6 | openssl req -config ./csr.cnf -new -key dave.key -nodes -out dave.csr 7 | -------------------------------------------------------------------------------- /gcp/task-002-basic-namespace-wide-kubeconfig/csr.cnf: -------------------------------------------------------------------------------- 1 | [ req ] 2 | default_bits = 2048 3 | prompt = no 4 | default_md = sha256 5 | distinguished_name = dn 6 | [ dn ] 7 | CN = dave 8 | O = dev 9 | [ v3_ext ] 10 | authorityKeyIdentifier=keyid,issuer:always 11 | basicConstraints=CA:FALSE 12 | keyUsage=keyEncipherment,dataEncipherment 13 | extendedKeyUsage=serverAuth,clientAuth 14 | -------------------------------------------------------------------------------- /gcp/task-002-basic-namespace-wide-kubeconfig/csr.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: certificates.k8s.io/v1beta1 2 | kind: CertificateSigningRequest 3 | metadata: 4 | name: mycsr 5 | spec: 6 | groups: 7 | - system:authenticated 8 | request: ${BASE64_CSR} 9 | usages: 10 | - digital signature 11 | - key encipherment 12 | - server auth 13 | - client auth 14 | -------------------------------------------------------------------------------- /gcp/task-002-basic-namespace-wide-kubeconfig/dev-ns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: development 5 | -------------------------------------------------------------------------------- /gcp/task-002-basic-namespace-wide-kubeconfig/kubeconfig.tpl: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Config 3 | clusters: 4 | - cluster: 5 | certificate-authority-data: ${CLUSTER_CA} 6 | server: ${CLUSTER_ENDPOINT} 7 | name: ${CLUSTER_NAME} 8 | users: 9 | - name: ${USER} 10 | user: 11 | client-certificate-data: ${CLIENT_CERTIFICATE_DATA} 12 | contexts: 13 | - context: 14 | cluster: ${CLUSTER_NAME} 15 | user: dave 16 | name: ${USER}-${CLUSTER_NAME} 17 | current-context: ${USER}-${CLUSTER_NAME} 18 | -------------------------------------------------------------------------------- /gcp/task-002-basic-namespace-wide-kubeconfig/reset.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | 4 | 5 | kubectl delete csr mycsr 6 | 7 | 8 | kubectl delete -f role.yaml 9 | 10 | kubectl delete -f role-binding.yaml 11 | 12 | kubectl delete -f dev-ns.yaml 13 | 14 | 15 | rm -rf kubeconfig dave.crt dave.csr dave.key 16 | -------------------------------------------------------------------------------- /gcp/task-002-basic-namespace-wide-kubeconfig/role-binding.yaml: -------------------------------------------------------------------------------- 1 | kind: RoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: dev 5 | namespace: development 6 | subjects: 7 | - kind: Group 8 | name: dev 9 | apiGroup: rbac.authorization.k8s.io 10 | roleRef: 11 | kind: Role 12 | name: dev 13 | apiGroup: rbac.authorization.k8s.io 14 | 15 | -------------------------------------------------------------------------------- /gcp/task-002-basic-namespace-wide-kubeconfig/role.yaml: -------------------------------------------------------------------------------- 1 | kind: Role 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | namespace: development 5 | name: dev 6 | rules: 7 | - apiGroups: [""] 8 | resources: ["pods", "services"] 9 | verbs: ["create", "get", "update", "list", "delete"] 10 | - apiGroups: ["apps"] 11 | resources: ["deployments"] 12 | verbs: ["create", "get", "update", "list", "delete"] 13 | -------------------------------------------------------------------------------- /gcp/task-002-basic-namespace-wide-kubeconfig/www.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: www 5 | namespace: development 6 | spec: 7 | replicas: 3 8 | selector: 9 | matchLabels: 10 | app: www 11 | template: 12 | metadata: 13 | labels: 14 | app: www 15 | spec: 16 | containers: 17 | - name: nginx 18 | image: nginx:1.14-alpine 19 | ports: 20 | - containerPort: 80 21 | --- 22 | apiVersion: v1 23 | kind: Service 24 | metadata: 25 | name: www 26 | namespace: development 27 | spec: 28 | selector: 29 | app: vote 30 | type: ClusterIP 31 | ports: 32 | - port: 80 33 | targetPort: 80 34 | -------------------------------------------------------------------------------- /gcp/task-003-elastic-search/ReadMe.md: -------------------------------------------------------------------------------- 1 | 2 | ## Deploy ECK in your kubernetes cluster 3 | [k8s-deploy-eck](https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-quickstart.html#k8s-deploy-eck) 4 | 5 | - Install custom resource definitions and the operator with its RBAC rules: 6 | 7 | ```bash 8 | $ kubectl apply -f https://download.elastic.co/downloads/eck/1.0.1/all-in-one.yaml 9 | customresourcedefinition.apiextensions.k8s.io/apmservers.apm.k8s.elastic.co created 10 | customresourcedefinition.apiextensions.k8s.io/elasticsearches.elasticsearch.k8s.elastic.co created 11 | customresourcedefinition.apiextensions.k8s.io/kibanas.kibana.k8s.elastic.co created 12 | clusterrole.rbac.authorization.k8s.io/elastic-operator created 13 | clusterrolebinding.rbac.authorization.k8s.io/elastic-operator created 14 | namespace/elastic-system created 15 | statefulset.apps/elastic-operator created 16 | serviceaccount/elastic-operator created 17 | validatingwebhookconfiguration.admissionregistration.k8s.io/elastic-webhook.k8s.elastic.co created 18 | service/elastic-webhook-server created 19 | secret/elastic-webhook-server-cert created 20 | ``` 21 | 22 | ## Deploy an Elastic Search Cluster 23 | [k8s-deploy-elasticsearch](https://www.elastic.co/guide/en/cloud-on-k8s/current/k8s-quickstart.html#k8s-deploy-elasticsearch) 24 | 25 | ```bash 26 | $ cat < apiVersion: elasticsearch.k8s.elastic.co/v1 28 | > kind: Elasticsearch 29 | > metadata: 30 | > name: quickstart 31 | > spec: 32 | > version: 7.6.2 33 | > nodeSets: 34 | > - name: default 35 | > count: 1 36 | > config: 37 | > node.master: true 38 | > node.data: true 39 | > node.ingest: true 40 | > node.store.allow_mmap: false 41 | > EOF 42 | elasticsearch.elasticsearch.k8s.elastic.co/quickstart created 43 | ``` -------------------------------------------------------------------------------- /gcp/task-003-intermediate-namespace-wide-kubeconfig/admin-run.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if [ $# -lt 4 ] 4 | then 5 | echo "Usage: ./admin-run.sh " 6 | exit 0 7 | fi 8 | 9 | FOLDER_NAMESPACE=$1 10 | FOLDER_USER_GROUP=$2 11 | ACCESS_TYPE=$3 12 | KUBECONFIG_CLUSTER_FOLDER=$4 13 | 14 | # Encoding the .csr file in base64 15 | export BASE64_CSR=$(cat ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_NAMESPACE/$FOLDER_USER_GROUP/dave.csr | base64 | tr -d '\n') 16 | export NAME_OF_CSR="$FOLDER_NAMESPACE-csr" 17 | # Substitution of the BASE64_CSR env variable and creation of the CertificateSigninRequest resource 18 | cat ./common-resources/csr.yaml | envsubst > ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_NAMESPACE/$FOLDER_USER_GROUP/csr.yaml 19 | cat ./common-resources/csr.yaml | envsubst | kubectl apply -f - 20 | 21 | kubectl get csr 22 | 23 | kubectl certificate approve $NAME_OF_CSR 24 | 25 | kubectl get csr 26 | 27 | kubectl get csr $NAME_OF_CSR -o jsonpath='{.status.certificate}' \ 28 | | base64 --decode > ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_NAMESPACE/$FOLDER_USER_GROUP/dave.crt 29 | 30 | 31 | if [ "$ACCESS_TYPE" == "R" ] 32 | then 33 | kubectl apply -f ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_NAMESPACE/$FOLDER_USER_GROUP/role-readonly.yaml 34 | fi 35 | 36 | if [ "$ACCESS_TYPE" == "RW" ] 37 | then 38 | kubectl apply -f ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_NAMESPACE/$FOLDER_USER_GROUP/role-readwrite.yaml 39 | fi 40 | 41 | 42 | kubectl apply -f ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_NAMESPACE/$FOLDER_USER_GROUP/role-binding.yaml 43 | 44 | 45 | # User identifier 46 | export USER="dave" 47 | # Cluster Name (get it from the current context) 48 | export CLUSTER_NAME=$(kubectl config view --minify -o jsonpath={.current-context}) 49 | # Client certificate 50 | export CLIENT_CERTIFICATE_DATA=$(kubectl get csr $NAME_OF_CSR -o jsonpath='{.status.certificate}') 51 | # Cluster Certificate Authority 52 | export CLUSTER_CA=$(kubectl config view --raw -o json | jq -r '.clusters[] | select(.name == "'$(kubectl config current-context)'") | .cluster."certificate-authority-data"') 53 | #export CLUSTER_CA=$(kubectl config view --raw -o json | jq -r '.clusters[].cluster."certificate-authority-data"') 54 | 55 | # API Server endpoint 56 | export CLUSTER_ENDPOINT=$(kubectl config view --raw -o json | jq -r '.clusters[] | select(.name == "'$(kubectl config current-context)'") | .cluster."server"') 57 | #export CLUSTER_ENDPOINT=$(kubectl config view --raw -o json | jq -r '.clusters[].cluster."server"') 58 | 59 | cat ./common-resources/kubeconfig.tpl | envsubst > ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_NAMESPACE/$FOLDER_USER_GROUP/kubeconfig 60 | 61 | 62 | rm -rf ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_NAMESPACE/$FOLDER_USER_GROUP/csr.cnf \ 63 | ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_NAMESPACE/$FOLDER_USER_GROUP/dave.crt \ 64 | ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_NAMESPACE/$FOLDER_USER_GROUP/dave.csr 65 | 66 | 67 | -------------------------------------------------------------------------------- /gcp/task-003-intermediate-namespace-wide-kubeconfig/client-run.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | 4 | if [ $# -lt 4 ] 5 | then 6 | echo "Usage: ./client-run.sh " 7 | exit 0 8 | fi 9 | 10 | FOLDER_NAMESPACE=$1 11 | FOLDER_USER_GROUP=$2 12 | ACCESS_TYPE=$3 13 | KUBECONFIG_CLUSTER_FOLDER=$4 14 | 15 | 16 | 17 | 18 | mkdir -p ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_NAMESPACE/$FOLDER_USER_GROUP/ 19 | 20 | cp -rfp ./common-resources/csr.cnf-template ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_NAMESPACE/$FOLDER_USER_GROUP/csr.cnf 21 | cp -rfp ./common-resources/role-binding.yaml-template ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_NAMESPACE/$FOLDER_USER_GROUP/role-binding.yaml 22 | 23 | if [ "$ACCESS_TYPE" == "R" ] 24 | then 25 | cp -rfp ./common-resources/role-readonly.yaml-template ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_NAMESPACE/$FOLDER_USER_GROUP/role-readonly.yaml 26 | fi 27 | 28 | if [ "$ACCESS_TYPE" == "RW" ] 29 | then 30 | cp -rfp ./common-resources/role-readwrite.yaml-template ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_NAMESPACE/$FOLDER_USER_GROUP/role-readwrite.yaml 31 | fi 32 | 33 | 34 | egrep -rl "SUBSTITUTE_GROUPNAME" ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_NAMESPACE/$FOLDER_USER_GROUP/ | xargs sed -i "s/SUBSTITUTE_GROUPNAME/$FOLDER_USER_GROUP/g" 35 | 36 | egrep -rl "SUBSTITUTE_NAMESPACE" ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_NAMESPACE/$FOLDER_USER_GROUP/ | xargs sed -i "s/SUBSTITUTE_NAMESPACE/$FOLDER_NAMESPACE/g" 37 | 38 | mkdir -p ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_NAMESPACE/$FOLDER_USER_GROUP/ 39 | 40 | openssl genrsa -out ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_NAMESPACE/$FOLDER_USER_GROUP/dave.key 4096 41 | 42 | openssl req -config ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_NAMESPACE/$FOLDER_USER_GROUP/csr.cnf -new -key ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_NAMESPACE/$FOLDER_USER_GROUP/dave.key -nodes -out ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_NAMESPACE/$FOLDER_USER_GROUP/dave.csr 43 | 44 | 45 | -------------------------------------------------------------------------------- /gcp/task-003-intermediate-namespace-wide-kubeconfig/common-resources/csr.cnf-template: -------------------------------------------------------------------------------- 1 | [ req ] 2 | default_bits = 2048 3 | prompt = no 4 | default_md = sha256 5 | distinguished_name = dn 6 | [ dn ] 7 | CN = dave 8 | O = SUBSTITUTE_GROUPNAME 9 | [ v3_ext ] 10 | authorityKeyIdentifier=keyid,issuer:always 11 | basicConstraints=CA:FALSE 12 | keyUsage=keyEncipherment,dataEncipherment 13 | extendedKeyUsage=serverAuth,clientAuth 14 | -------------------------------------------------------------------------------- /gcp/task-003-intermediate-namespace-wide-kubeconfig/common-resources/csr.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: certificates.k8s.io/v1beta1 2 | kind: CertificateSigningRequest 3 | metadata: 4 | name: ${NAME_OF_CSR} 5 | spec: 6 | groups: 7 | - system:authenticated 8 | request: ${BASE64_CSR} 9 | usages: 10 | - digital signature 11 | - key encipherment 12 | - server auth 13 | - client auth 14 | -------------------------------------------------------------------------------- /gcp/task-003-intermediate-namespace-wide-kubeconfig/common-resources/kubeconfig.tpl: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Config 3 | clusters: 4 | - cluster: 5 | certificate-authority-data: ${CLUSTER_CA} 6 | server: ${CLUSTER_ENDPOINT} 7 | name: ${CLUSTER_NAME} 8 | users: 9 | - name: ${USER} 10 | user: 11 | client-certificate-data: ${CLIENT_CERTIFICATE_DATA} 12 | contexts: 13 | - context: 14 | cluster: ${CLUSTER_NAME} 15 | user: dave 16 | name: ${USER}-${CLUSTER_NAME} 17 | current-context: ${USER}-${CLUSTER_NAME} 18 | -------------------------------------------------------------------------------- /gcp/task-003-intermediate-namespace-wide-kubeconfig/common-resources/role-binding.yaml-template: -------------------------------------------------------------------------------- 1 | kind: RoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: rolebinding-monitoring-ns 5 | namespace: SUBSTITUTE_NAMESPACE 6 | subjects: 7 | - kind: Group 8 | name: SUBSTITUTE_GROUPNAME 9 | apiGroup: rbac.authorization.k8s.io 10 | roleRef: 11 | kind: Role 12 | name: role-SUBSTITUTE_NAMESPACE 13 | apiGroup: rbac.authorization.k8s.io 14 | 15 | -------------------------------------------------------------------------------- /gcp/task-003-intermediate-namespace-wide-kubeconfig/common-resources/role-readonly.yaml-template: -------------------------------------------------------------------------------- 1 | kind: Role 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | namespace: SUBSTITUTE_NAMESPACE 5 | name: role-SUBSTITUTE_NAMESPACE 6 | rules: 7 | - apiGroups: [""] 8 | resources: ["pods", "services"] 9 | verbs: ["get", "list"] 10 | - apiGroups: ["apps"] 11 | resources: ["deployments"] 12 | verbs: ["get", "list"] 13 | -------------------------------------------------------------------------------- /gcp/task-003-intermediate-namespace-wide-kubeconfig/common-resources/role-readwrite.yaml-template: -------------------------------------------------------------------------------- 1 | kind: Role 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | namespace: SUBSTITUTE_NAMESPACE 5 | name: role-SUBSTITUTE_NAMESPACE 6 | rules: 7 | - apiGroups: [""] 8 | resources: ["pods", "services"] 9 | verbs: ["create", "get", "update", "list", "delete"] 10 | - apiGroups: ["apps"] 11 | resources: ["deployments"] 12 | verbs: ["create", "get", "update", "list", "delete"] 13 | -------------------------------------------------------------------------------- /gcp/task-003-intermediate-namespace-wide-kubeconfig/dev/default/groupQA/csr.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: certificates.k8s.io/v1beta1 2 | kind: CertificateSigningRequest 3 | metadata: 4 | name: default-csr 5 | spec: 6 | groups: 7 | - system:authenticated 8 | request: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0KTUlJRVpqQ0NBazRDQVFBd0lURU5NQXNHQTFVRUF3d0VaR0YyWlRFUU1BNEdBMVVFQ2d3SFozSnZkWEJSUVRDQwpBaUl3RFFZSktvWklodmNOQVFFQkJRQURnZ0lQQURDQ0Fnb0NnZ0lCQU11WG9SSncxM3g0V0l3UWk1cmcwbWF6ClFKckk4amlMNlhIa2E2UzB0cWxCS1AwZFI4Wnpncmt2T0k1QVV0NWllbVEwZ0FIbG1kMXBNcUZodTV3RW5renIKdGM0ZTVqK3Blb01MWkFVbEZmaVVmVU95RSttSU5YSE1JOE5GeTZ1VXJCSW1ZaU5CemxBbENLQk1SSHY1QjdWdgpSTkpNQnRsT2MzOENQKzJlWjQ2UmxsUk5pb2dndUEvVjVmS0FQN3BidXVRTktKNExGWHhvUVlPMmxKNlhqajgxCnpMWGZKRGFYdi9LZGRXc0xZZmkrOTFCVmNmSnIxTUdISDBQNW44eE5DWHg5YTR4MlhjNk9uTkpOZnRqemhNY3cKem51Y0JvZXZCcW4ydUtSMGZ2c1FDVWtQb0pYM2NyblVnc0YzMzRXNUJxWWczWHVRRVdZZ2JLSDZhMDhIa1JROApqeFFLeTA3eUx0ZjFja3NyOVBPQTQ2Yy9xVTYrM3ZUb1k4UWhsYTZFclk0akV2VStHSmZveExSelp0ZUd3QnduCjJkVys5SmUzeVIwTkJMSFJMR3hKaDdBV09LdHo1KzRCbysvc0VmVk9oNWF6RVgrQUdWS0VpQjFZR3c5TGNTT04KNlR5VXJPUkk5WStwQlk5TmdWNTBMbVdwSmV2M281TnNCeUl1TGZ1ZFpEUnQ2enprV3YwLytiK2lTR0tzM3UwZApQakJnSnN4RERJcVFBYnlwaVJoZE8wTzNVNHkzdm55Z1c2M3B3TWhXRFE5T1N4VHRzU0lCNXo5S1NJRWNsSUs2ClRrWjE1am9qcEpRdlppN0hkVnhBUDl3TEV1MjMyem9lRE9MZWtYUWE0SVVYSSs5V0ZPcXNNUlB6Rmg4aHdaZlMKOERMZy9vR1J3TXQ0MmREUVNPdXpBZ01CQUFHZ0FEQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FnRUFZdkFKNERwSwpMc2JCVjhodmh3b3NKOWlJTlN6eFNDZW4rOVhDQXVpQVdRSTFRWVJab20rYjRnekRwd0FyTjRGdm9vWSs2cWdnClhWd0pONWRsY1ZOWTc2eno5N3ppb3FaL0RUMDVqQTlNU3poRXp4M3ZMT1NoZU9wZ1RTYUp5VGV0MWZXY21CV3MKWE1mWnF1c3dJVTJTTE1uSjRNa1JBZzFFMmYvd0JnVG91elV3L0tRM0FDdkhkYnlNdjR4clBuajY1YjB1cVRZagpaazFFRkdqNFhZWU9oM0JhRGlJOG9kTGpMNVc3MTljai9QUjhIQzdSb3ZnSFh6dTBya1ZoUERaUXJTbVR4VTZTCjh0OC9xRk5LS1pKOEh6RGFKQ0hqY1FrZFh4NGcyR2xhSy9Vd0dkbTJZaE9OdzRlL3lNWUM2MHpONEZQZVJUdW0KZXpGRGE0WDdVUGpUZ2VBanNyUHJoYlNKVDVhdWxoOUJpRHBNMXQvTVRYS0FoODAvc1JoTWhXTEdPUHUxc0xTWQpoQVRUUUVNWTMzUWVWeGo4dEtnYXpxQWlZYi9INkcrWTZpWVZQeXhtZjZ4M3pjMEJMQUFOemUzc3Q3SFQ3QjhmCm9vYzJxT1BaYmNQQnMvbEpPWVArYUQ5VGlNYlVRVWJmcWZJdUxWaEFxK01OZlNzaFJhcEhNWGZaNVhENk45Qm8Kb2w1aGxnNzkzRXhaVFpVSlJuUnhqa1lrcE52UnJSRG11STQvdzRxcUVzTEpwT0x1T0EzVlczbXVGTUFRY3pHQQp1TEgwZTR3cEV4M25IU3liRFhuUE9kbU9VV0xlWWR6WmtHdEFKZHRwaUpoU1ZPb1B4NVFIRUpXbGVZVjlBMTJaCncvMVRvZW4yZjdjWHBGKzBXeGlTUmVPQWwvSCs5djZCOHlRPQotLS0tLUVORCBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0K 9 | usages: 10 | - digital signature 11 | - key encipherment 12 | - server auth 13 | - client auth 14 | -------------------------------------------------------------------------------- /gcp/task-003-intermediate-namespace-wide-kubeconfig/dev/default/groupQA/dave.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIJKQIBAAKCAgEAy5ehEnDXfHhYjBCLmuDSZrNAmsjyOIvpceRrpLS2qUEo/R1H 3 | xnOCuS84jkBS3mJ6ZDSAAeWZ3WkyoWG7nASeTOu1zh7mP6l6gwtkBSUV+JR9Q7IT 4 | 6Yg1ccwjw0XLq5SsEiZiI0HOUCUIoExEe/kHtW9E0kwG2U5zfwI/7Z5njpGWVE2K 5 | iCC4D9Xl8oA/ulu65A0ongsVfGhBg7aUnpeOPzXMtd8kNpe/8p11awth+L73UFVx 6 | 8mvUwYcfQ/mfzE0JfH1rjHZdzo6c0k1+2POExzDOe5wGh68Gqfa4pHR++xAJSQ+g 7 | lfdyudSCwXffhbkGpiDde5ARZiBsofprTweRFDyPFArLTvIu1/VySyv084Djpz+p 8 | Tr7e9OhjxCGVroStjiMS9T4Yl+jEtHNm14bAHCfZ1b70l7fJHQ0EsdEsbEmHsBY4 9 | q3Pn7gGj7+wR9U6HlrMRf4AZUoSIHVgbD0txI43pPJSs5Ej1j6kFj02BXnQuZakl 10 | 6/ejk2wHIi4t+51kNG3rPORa/T/5v6JIYqze7R0+MGAmzEMMipABvKmJGF07Q7dT 11 | jLe+fKBbrenAyFYND05LFO2xIgHnP0pIgRyUgrpORnXmOiOklC9mLsd1XEA/3AsS 12 | 7bfbOh4M4t6RdBrghRcj71YU6qwxE/MWHyHBl9LwMuD+gZHAy3jZ0NBI67MCAwEA 13 | AQKCAgBC67ZwP1M1gMFqWM3qvNesFmO3/TFRUPwNCc4wN46LF/YHLSa4nqNrh5AJ 14 | gi87Fhew9GjXNsdatgeEcIGgiQwXsKrz88gvSOASTx4zartMGFMMe258Jt+aZkWj 15 | HEAUSCw/UwqTGEBeq62oMymXroXQJ6DxlzfxGg5wwyaMTrMvc798Oni8wjR+UArj 16 | qTBJa3mFGKYvPOl1GTffuZYkgyCoumBtr8lRgJQBhaxg4T706qCUcWGhE7y9WFL4 17 | nwbdurzRhNBjPU0cZlg58GsvmOweJXbZvrGOzANevtB2jixnDiBfOZnJcaruVmIc 18 | gfChHe6HkOn8gaCWdzX+EDhRo+UVsPncX2f5G0N+AC9Zqs3v57dPetLYTkk0NcJ/ 19 | dtca8sAVGbZDdeI4T9OnswOVaBVutCr5n5kNSqimCskBMA9xrvoEuNX07y7Mm4e/ 20 | FGzNdMCh94nzDutPpTrkDd39SWH7LUTZtky1ZOcBCRvYCAdT4cPV3x7/uoty+E9h 21 | rcECO6UsX/WouVR5sWNTwctzZdZghYc2H+C322XQ3wxTtnPx2iL6zWSKqxyALioJ 22 | cfWiyKKSAiGP8Cm98QHsovTnt3G5yNha2WTu2jVgYw2//tGb+99o1XGlcsQ45Fg+ 23 | XQojoNFn0gmKuqsFOclgNWzop32EcORV/ix3i5bXdN3njA/JsQKCAQEA+/ArfE4O 24 | oY29iuixMczgOO9VqiWVSuhsH+uFucjBgWgLKmnpGusc+Z0HpEYNHQAPhAJ+2Ic3 25 | nbEIaMzQNPqBtan1j7Oa/r/VzRUALTPSM1DsNQJndfDLivbqCKLI9gE9wi20l/N7 26 | eLNrviwOSac9yujFMEweMCLL+cUkrl6Y2Ppj3KaIBz9/9Vk7okyGSnN2XAV1RL3r 27 | VGZ3/UB9l170Pw1whspZn93K3s0Y8IZIRM92ZNJnzxzID4yp62rDe96nJb7JazLN 28 | G3IfMI+9HSsa+nSZLKHSBb+TzUr1J6OvG0TOO6KBe78FsrTBzakkigdMJETCYN3o 29 | kieQPkjI8R71PQKCAQEAzt/rm4iVDiT5YhV/tlQn2BcHYVdKHMohdEbXHqMlIS6+ 30 | fNbPSnIF/1dPj6dPUzVnb8tKs1fCmRWr+d0vSyPlgpc2xZ1PVT1wti6lousjrphL 31 | X2xyFaI103w44wShzHMlqwcyAaOeRXOHqxg8lwEhkz4rDJRSPbuDsB7WlA1GzxF4 32 | eLdUQr8wHrddys0w6QSYAbTBAyIN+GW092t77ikvz3+lOAG5VhxoI+FwrjqoZcoU 33 | WiiIBYhGtbl3uNTqPDpY2iZwiwY6oZ0eW5OxPI9imdHD3m+l0+K07P3it/4K1wBE 34 | YDsjshmfCnjl/pOHYwJiI0OQmre9GcMS09O9w9bTrwKCAQEAsLbwuZtC1fzG07WN 35 | /KF5MVZnHxaHlmha1zTrsrnnSS7CjjiFwlTiGRAVORvNvOa6+1yIvrZxw1FW1bO7 36 | 4z1HWDZjwS047bF/dP9jBVHRAh+j3zbVfTuuecXhuLb+ANg0CozmnKKzm6E2VMkw 37 | KGd/h2EoV00864Ss0ICAzzIZQjC3dSD2TycySNUTaC8HDxfYEInfjJ0ugR/y0Be5 38 | QEDhT4zMvi8XGHSJBSiVcrit3l4q7GNNX1AP4CBJW5Cil7NASNB/FYsnpTWyPOzF 39 | uLzwinbYrh6/YzrSoHAQb4J3o8fqZJgC/NqrU4NIYUpgD9U4ikQUl79vj3zix11k 40 | XQTa/QKCAQEAxwieuLROnB4spRaEGhqbfwag/iEqV8qNJz7kx0A1f9hmQtu08i4D 41 | 4SfSjyZsZmTyEJfpVeTwf7JfFHWeg8+Uh/EOLFBuPFdxcoYsrDNI0DkMA1Hl7TR0 42 | TvWsEZaqSHbcXdD9ZqBRrQY7F/yohqv67pwYdPDaszQOvcrdrfGcF6r6WkIf2lvx 43 | YwwOPAjwrq/Rw4pxTJ9eC4dm9ci+omOT8TIg9mKKOvP3qdXDQI2E6PHkZN2sCI6V 44 | YU+AhR6sAagtRi1HiCpFDKqKw5a0JczaE53CseZ/B/2NDpoxn83WBgbmwiMD8908 45 | q6mntiegUTzAyKa/oGt4jymkmpKUd6/fVwKCAQBPAhcEn3m0UtsiuNeJ78LrIwqx 46 | ZrlhdwM7FWvkaYFeg1qi1qI6zSDMjPp73iF750sKnyHuIW4SAWZUjyKOiYzfoR2t 47 | 8yRRYhUji/rZ1Pl+GEFS57G0y5DKJE84Slulqn+JwYVAoayv+GbknDCCC1xpa1tq 48 | UofwzMIGnETZr1Wsx4LU4ztd1FZbM5vwQ7xqlh2vTAGpYEc17J4QqfmfsgUxipFP 49 | OGgKWjIkjwjt22tSR5CfwcvCEHkfchFQzjYWvlRwSkWhbTZ4LZBdP6dr4Xm1rX9i 50 | +9t/SO7fxgyrhNrvHQyQ/mf2ntV1Gcvp0K62NrTO+dLawijOqWkdqBi5+BL7 51 | -----END RSA PRIVATE KEY----- 52 | -------------------------------------------------------------------------------- /gcp/task-003-intermediate-namespace-wide-kubeconfig/dev/default/groupQA/role-binding.yaml: -------------------------------------------------------------------------------- 1 | kind: RoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: rolebinding-monitoring-ns 5 | namespace: default 6 | subjects: 7 | - kind: Group 8 | name: groupQA 9 | apiGroup: rbac.authorization.k8s.io 10 | roleRef: 11 | kind: Role 12 | name: role-default 13 | apiGroup: rbac.authorization.k8s.io 14 | 15 | -------------------------------------------------------------------------------- /gcp/task-003-intermediate-namespace-wide-kubeconfig/dev/default/groupQA/role-readonly.yaml: -------------------------------------------------------------------------------- 1 | kind: Role 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | namespace: default 5 | name: role-default 6 | rules: 7 | - apiGroups: [""] 8 | resources: ["pods", "services"] 9 | verbs: ["get", "list"] 10 | - apiGroups: ["apps"] 11 | resources: ["deployments"] 12 | verbs: ["get", "list"] 13 | -------------------------------------------------------------------------------- /gcp/task-003-intermediate-namespace-wide-kubeconfig/dev/kube-system/groupDEV/csr.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: certificates.k8s.io/v1beta1 2 | kind: CertificateSigningRequest 3 | metadata: 4 | name: kube-system-csr 5 | spec: 6 | groups: 7 | - system:authenticated 8 | request: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0KTUlJRVp6Q0NBazhDQVFBd0lqRU5NQXNHQTFVRUF3d0VaR0YyWlRFUk1BOEdBMVVFQ2d3SVozSnZkWEJFUlZZdwpnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDRHdBd2dnSUtBb0lDQVFET1lLQVhQdWpHRGVKWUpWWU4vSmNkClBJWHcrVUJvY3VBWHEyQzdMK2JhZ3lhNTdZTGNnajNMT1pxdWVQSFpKSGhKcWZZUUpDcjJqN0kxbG1NSXFMOVoKNVBZZ3RLN0lka2dVN3NDWUZ6SnI2V3phUFByTngveGV5Lyt3NFdxQjk4U0Y2QU5aSi9uY3A5ZUxUTzVYVVBySgpZelIxNTJ5ZVVDdEpuMGZMVFkvQ2VYK1lGKzltYVo4UU5Fd0c5bTBCMGE5T1RQS2VJNU1relhRZjhTWjZ4LzlsCkFTUjUwU2JyckZDbnl6TlAxZ2FPZ00vMFNqNGZsdFY2Z3l1aE82YjB3OVEwV3RnTkthaWt6VElPU0hVcXNyanIKQldIZUp1cFh4dWFQd01JK3pvd3BGcmlqRW1tbWhLaG4yQlJaWlJmZGtYdm5ndjZWSDZ5VThydG1hbld1UWwvdgp3ci9Nbm5OSU5wWlBnSXpFTU1yN2NGbHUzVlQ0akp1eVJxeVRYTDJFd3pwM0dkMWRJY2pLdG5jeTJhM1JseFB5CjRWd3d3WE9mbHFsb2xJS2Z4OE1YMkhoeVhYV0lLNHl6MVRhVzhQQTMwNTRuRVFmUS9hbXJtNlA1ZU42TkNjV3MKSHIzVy9HYUpEK1Joc2ZiMGs3YjdXTWZSbTMrYnJXcWI0OGZtK0M5dktPUkpaZGFPRmh5OFJmRTBKLzFyc0RwUQp3eGR1NHVlWnFrcHowVkdFVWVKZWZVSmN5L1R3c3M0WG1hMEZXQUo2WEJ3RXRoa3hlRDVRaHprdGQwcUI0UWtICnUwdDFUYit1WFpOY3NxTUVGdDUvbkxkTE5hKzVpaG1kS0VNUGxVSmUzRURlU0YvbkQ2MWtLaUExcTFHNG1LWC8KNWppc3ZRQUlhNHREb0gxLzUzblpud0lEQVFBQm9BQXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnSUJBQUlmUDhJQwpOMnhCVDZrbUVFQWV5ZGdiNEtxeHVrVFBZdDh2ZTJsZitwbHlVSm9XV0tOZTZXd2czV29zeFVBY3RrQ1F1MDJMClE3ejVXWk9qU2F3ZTd0b08wMDZJM2Zickwwb2wrQkt1VHpJdXlDN1hyZGN6TTRLWWIzZ1l0L0RUYzNobEd5OUMKcHo1MmxUZGQ3Y1JpQTRCRnVVN0N6SWZHTG5odEVwM24yYWk4STlPTmxOclZjWWlHbUllWkF4Z1ZVTFJPcGgrUAp0K0RNaDRya1FSNUNhcDlHdzVSbUswU0dna1dHNVFja2gzVkp3SmV0Qzg2M2YxajFCaFpkVjVjM05yTGJoaGx0CkNjSnVVeDlHcXVUVlFNWHZnYm1KMjlxaVFTWmxKN1NpYkwwOHp3MXN0SExkRlZxMDZicGcrVnIzem16UjRqamkKOGtqVzR3eFFOYnZNY2g0OUJubGs2WVVWdWxEbW9kYjh5UzVEZjJESFdWWEFkdlQ3OGNHNmtyaVZXNnBNKzROZQpsTHdGR25WV1Bpamx5K1lVQlNqbkpCQkNPZ0cwYmNnektJbG1qOXRTVnpFVWtnUWhHdERBRG5VY2N4NFRiMUlHCjRhK0F0azNvV0FIMmtzRnpzenpiNUwwS3RJVDZqdnR0ZGZRdWZXUDVERnl3RGMvSEZ4WlVjTGhXWldha2dMSXUKaWhEbUV6MjFDaTdZN2pZUWM5ZFA3N1B4V0YwNTBSSGFrOTgxYXBoVTZpUEphN1EzWmZ5b1Q1eWRvME82eWtZdQpSa095WERWTEQwN1VWZkhLMTVDTkVJMk5xT0pTcHZZSkFMc1lReDNsQ2F2UFdqMlJ0K3BPY3lPQXdzZlp2Nk5nCmQ0QU8va3FkUDFhM043MnNZWU5RL0doRUJOTGs2OXJZYVR1agotLS0tLUVORCBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0K 9 | usages: 10 | - digital signature 11 | - key encipherment 12 | - server auth 13 | - client auth 14 | -------------------------------------------------------------------------------- /gcp/task-003-intermediate-namespace-wide-kubeconfig/dev/kube-system/groupDEV/dave.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIJKQIBAAKCAgEAzmCgFz7oxg3iWCVWDfyXHTyF8PlAaHLgF6tguy/m2oMmue2C 3 | 3II9yzmarnjx2SR4San2ECQq9o+yNZZjCKi/WeT2ILSuyHZIFO7AmBcya+ls2jz6 4 | zcf8Xsv/sOFqgffEhegDWSf53KfXi0zuV1D6yWM0dedsnlArSZ9Hy02Pwnl/mBfv 5 | ZmmfEDRMBvZtAdGvTkzyniOTJM10H/Emesf/ZQEkedEm66xQp8szT9YGjoDP9Eo+ 6 | H5bVeoMroTum9MPUNFrYDSmopM0yDkh1KrK46wVh3ibqV8bmj8DCPs6MKRa4oxJp 7 | poSoZ9gUWWUX3ZF754L+lR+slPK7Zmp1rkJf78K/zJ5zSDaWT4CMxDDK+3BZbt1U 8 | +Iybskask1y9hMM6dxndXSHIyrZ3Mtmt0ZcT8uFcMMFzn5apaJSCn8fDF9h4cl11 9 | iCuMs9U2lvDwN9OeJxEH0P2pq5uj+XjejQnFrB691vxmiQ/kYbH29JO2+1jH0Zt/ 10 | m61qm+PH5vgvbyjkSWXWjhYcvEXxNCf9a7A6UMMXbuLnmapKc9FRhFHiXn1CXMv0 11 | 8LLOF5mtBVgCelwcBLYZMXg+UIc5LXdKgeEJB7tLdU2/rl2TXLKjBBbef5y3SzWv 12 | uYoZnShDD5VCXtxA3khf5w+tZCogNatRuJil/+Y4rL0ACGuLQ6B9f+d52Z8CAwEA 13 | AQKCAgEAjYkzAf5BAeNvwegJNppuwLxLV9J5W6i9Cz6bnoThnCaqHoVHZBdmbcPZ 14 | 06JaqwMaA9suSC+lz2cJXzr+PGgOo4YlTpTb8ds84C5Fo4j1KZDy+FEt6IcpC5J4 15 | j0RpGS1leDiAY1rJP41Ug9KE+xkshYUl616F3aU5qso0ILfWyhctbI+tvdzeG+zh 16 | 3zyN0duOkPKwyce4GFtXMrjeHQKgoxK/g1Pvj12kCw+O50fvQzgQZoesR1nYmqbu 17 | +yq/+8l59VYAKjbR1ARuOeFziY78xyHv1r11pjFg2mHmVdCty1gr1JrCHlOIc397 18 | pTtxvKEUCtiJ5JfOcnBuGXUVtCcNR1MICtSB3rdCvyK9ftTeMnA2ppQEsmvUkNnU 19 | QdTlreaLo4P9MckN6W5vUaPRdY/c1qg1lPYxheRf9dOaeCabNN4EK30AN2ha5ZI+ 20 | XbRUPE6mTLLZ99vnb0CgG8+zRytmlnSg/Bz0KuE0wSsg1cX10RAwD+3vMf5/XMp+ 21 | F/z1XdqATbwfCRy6f640ZbfR/nuZNa4/eI22rBBUD5dQXJ/mk+9JhkSYd509HYCa 22 | SQP7l/OdOvNfs/fkS0p0EWE1n6ZP3vup0haPSvkNAf8cNx3stVyhifKslsL/XIcO 23 | ekhFUi4y7O+0fpR1zMKM1drQLnhPWhA2xAtE2in5bw28GtAAc1ECggEBAPuHLAJj 24 | 2wQRji5NZRwtXUPJ7CNTZk9KgMN0Nl2nlz+6oj5IU+KQOdBDhOp9Q5T+kpnKzXlV 25 | WQ9n0inDx0xWYqAUEkBI3P8DOO+aj+2IV6WwGPp8mLNavMhO+l7pPAQekQbihIJp 26 | 4b7/7tKLZkQ1PQFddibBMggC4xxVl0twYMu1JXmMAPP06AJ6TRxLri5Norb0aOlY 27 | cfKw9udHkQDQ8YZHf4gv9Uz34OgMpY2B0WygQ8HJsy7VZE662SQv2VcPG60y6M+H 28 | bea5WQkfsO2MIHeoociv+yVg4y1X8/rc7jWPRTtsXC8qmDZZ4TphSAluXy1j2cyV 29 | qfhjn9YxZcfzcjUCggEBANIL828GoBlqNfj98P3Tsm/mj0DcupLZlE7wQeDSRPnW 30 | GTs4gTKOLm345i3Hh3NVwfGhJZwU9PGBTH0s18UNHSYdQu8NXNTsdfU6PlZtTki7 31 | A3+0tBfe+4m8Sql2Jhhhtb95Qqd1wcsUQamPtQxhHNJ1n2rcawyMB8eUBAp9fmNk 32 | PvPW4CICfFQ1nlsJkQyMfp3TrWZLg5VhAnWv2C5FkLNUR0AEegyxzWeA5tue1hKq 33 | NG/47AuwguE0V8fFBiGn9vKsDt2x6kZNJzEAbKeY0ggBAvYaeKVeCrhozgjTw34T 34 | scz1Gft46dGRiiGNUL2GSJyTaa6USAPuWKsZ1BaB1wMCggEBAL818gnbGVtNcTXX 35 | 1j6GafknnqD4PLWYYKy1QsMNrVhgLAiG+WYp2BlvyrJsCZ1RrebGGk/bxvmEJmtj 36 | jrWEkjM5Luq5uW8PZxAXAsVDOcYWSdbaM0ddelgYWhMe4WYMSpneR3fQwDFv5IEg 37 | Qd0mT7si7rfldIBMmZQnqP1Lb/pMOjHdcbgDfQCtMsOI8N/4WvMUeFCz0OP7Daei 38 | ayQgswsufU/tjWA2rD4bpxsdDjSy3O+LwvCefkOvnXT5/xJExPh01DE3th+I1Zuv 39 | ANSFCwWMJ3kl9xliPzFwRGR34YrL4IidBrNAgTgIOcUkwRuSUT5zQtZZDD5slczB 40 | /qrzceUCggEAVVvn9MGjhFCGWIaSTgSe6hyvMLPC2nSzcZTvoLayszkPjeJ7lpjz 41 | Q1u35vjEEdnS9CgBybk2hYJ/HAuCMCERLHENx2oNxLXHhf3CYYsQGjxtwpMj8ON8 42 | I1Ayo0VRW+0xV/umW7leJPI5cPfmon5AoUKNo3apxjKN4sxDDCRNnYSalKtI8GlE 43 | L1CO5v4vRQM/XVItkHm1opoGzt7EIqHAmS/xuJFZLBmBmMNoje2xn2ot3ycwfdug 44 | hHaqCZQG1tyUXbAQZoWr6B/ogmBix4UyLXCDzJGbX9Ck2KY2vJ2aP30vb1RX17se 45 | gjz7/oCYRDzy+zZo5zp5Pae5vubDKtZfEwKCAQA6dhNMJDfGgEsa55j/ydhZAC2n 46 | 2QadqroR7D7EUDYaLtDCj0BmcdGam6emaP0ind6MMAob+Fooiug6w5d4CJSKxx4+ 47 | jghf3T8KaEhCbtiakeYxozmggn121sB9NxsljiY40+t3avKJAhj0G3se5Wphowi6 48 | YhnrzwlwDAxR6A00kA6AhXFjcYO9ERFxd/37m2yWB62TMwajGI3UQ/RCviCkLT2f 49 | XsjV8p7w/DAlXW5KADHpO/rBRWuZarATMt+rEchCtBQ4rKJ9RnZ1lVzC1GYZJiEo 50 | 3OsErlISZkDvkIpfZsiB2LQ9DigDX3Xk8PHRZMd3kXdGjqXflJS+31i987D4 51 | -----END RSA PRIVATE KEY----- 52 | -------------------------------------------------------------------------------- /gcp/task-003-intermediate-namespace-wide-kubeconfig/dev/kube-system/groupDEV/role-binding.yaml: -------------------------------------------------------------------------------- 1 | kind: RoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: rolebinding-monitoring-ns 5 | namespace: kube-system 6 | subjects: 7 | - kind: Group 8 | name: groupDEV 9 | apiGroup: rbac.authorization.k8s.io 10 | roleRef: 11 | kind: Role 12 | name: role-kube-system 13 | apiGroup: rbac.authorization.k8s.io 14 | 15 | -------------------------------------------------------------------------------- /gcp/task-003-intermediate-namespace-wide-kubeconfig/dev/kube-system/groupDEV/role-readwrite.yaml: -------------------------------------------------------------------------------- 1 | kind: Role 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | namespace: kube-system 5 | name: role-kube-system 6 | rules: 7 | - apiGroups: [""] 8 | resources: ["pods", "services"] 9 | verbs: ["create", "get", "update", "list", "delete"] 10 | - apiGroups: ["apps"] 11 | resources: ["deployments"] 12 | verbs: ["create", "get", "update", "list", "delete"] 13 | -------------------------------------------------------------------------------- /gcp/task-003-intermediate-namespace-wide-kubeconfig/reset.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | 4 | if [ $# -lt 3 ] 5 | then 6 | echo "Usage: ./reset.sh " 7 | exit 0 8 | fi 9 | 10 | FOLDER_NAMESPACE=$1 11 | FOLDER_USER_GROUP=$2 12 | KUBCONFIG_CLUSTER_FOLDER=$3 13 | 14 | 15 | export NAME_OF_CSR="$FOLDER_NAMESPACE-csr" 16 | 17 | kubectl delete csr "$NAME_OF_CSR" 18 | 19 | 20 | 21 | if test -f "./$KUBCONFIG_CLUSTER_FOLDER/$FOLDER_NAMESPACE/$FOLDER_USER_GROUP/role-readonly.yaml"; then 22 | kubectl delete -f "./$KUBCONFIG_CLUSTER_FOLDER/$FOLDER_NAMESPACE/$FOLDER_USER_GROUP/role-readonly.yaml" 23 | fi 24 | 25 | if test -f "./$KUBCONFIG_CLUSTER_FOLDER/$FOLDER_NAMESPACE/$FOLDER_USER_GROUP/role-readwrite.yaml"; then 26 | kubectl delete -f "./$KUBCONFIG_CLUSTER_FOLDER/$FOLDER_NAMESPACE/$FOLDER_USER_GROUP/role-readwrite.yaml" 27 | fi 28 | 29 | 30 | 31 | kubectl delete -f "./$KUBCONFIG_CLUSTER_FOLDER/$FOLDER_NAMESPACE/$FOLDER_USER_GROUP/role-binding.yaml" 32 | 33 | 34 | rm -rf ./$KUBCONFIG_CLUSTER_FOLDER/$FOLDER_NAMESPACE/$FOLDER_USER_GROUP/* 35 | 36 | 37 | 38 | 39 | -------------------------------------------------------------------------------- /gcp/task-003-intermediate-namespace-wide-kubeconfig/run-all.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | clusterNamespaceSet="default|kube-system" 4 | userSet="groupQA|groupDEV" 5 | accessTypeValueSet="R|RW" 6 | FOLDER_DEV_CLUSTER="dev" 7 | 8 | 9 | echoUsageDEVCluster() 10 | { 11 | echo "Values for : $clusterNamespaceSet" 12 | echo "Values for : $userSet" 13 | echo "Values for : $accessTypeValueSet" 14 | } 15 | 16 | assignVars() 17 | { 18 | FOLDER_NAMESPACE="$1" 19 | FOLDER_USER_GROUP="$2" 20 | ACCESS_TYPE="$3" 21 | } 22 | 23 | generate() 24 | { 25 | echo "-------------------------------" 26 | echo " Resetting previous changes " 27 | echo "-------------------------------" 28 | ./reset.sh $FOLDER_NAMESPACE $FOLDER_USER_GROUP $KUBCONFIG_CLUSTER_FOLDER 29 | 30 | echo "-------------------------------" 31 | echo " Client Cert Generation " 32 | echo "-------------------------------" 33 | ./client-run.sh $FOLDER_NAMESPACE $FOLDER_USER_GROUP $ACCESS_TYPE $KUBCONFIG_CLUSTER_FOLDER 34 | 35 | echo "-------------------------------" 36 | echo " kubeconfig & dave.key generation " 37 | echo "-------------------------------" 38 | ./admin-run.sh $FOLDER_NAMESPACE $FOLDER_USER_GROUP $ACCESS_TYPE $KUBCONFIG_CLUSTER_FOLDER 39 | echo "-------------------------------" 40 | echo " Share the following files with the $FOLDER_USER_GROUP 41 | ./$KUBCONFIG_CLUSTER_FOLDER/$FOLDER_NAMESPACE/$FOLDER_USER_GROUP/kubeconfig 42 | ./$KUBCONFIG_CLUSTER_FOLDER/$FOLDER_NAMESPACE/$FOLDER_USER_GROUP/dave.key 43 | 44 | Initialization Steps 45 | $ export KUBECONFIG=\$PWD/kubeconfig 46 | 47 | $ kubectl config set-credentials dave \\ 48 | --client-key=\$PWD/dave.key \\ 49 | --embed-certs=true 50 | " 51 | echo "-------------------------------" 52 | } 53 | 54 | 55 | if [ `kubectl config view --raw -o json | jq -r '.clusters[] | select(.name == "'$(kubectl config current-context)'") | .cluster."server"' | grep "https" | wc -l` == "1" ] 56 | then 57 | echo "Dev cluster" 58 | if [ $# -lt 3 ] 59 | then 60 | echo "Usage: ./run-all.sh " 61 | echoUsageDEVCluster 62 | exit 0 63 | fi 64 | assignVars "$1" "$2" "$3" 65 | 66 | if [ `echo "$FOLDER_NAMESPACE" | egrep "$clusterNamespaceSet" | wc -l` == "0" ] 67 | then 68 | echo " value not as per standards" 69 | echoUsageDEVCluster 70 | exit 0 71 | fi 72 | 73 | if [ `echo "$FOLDER_USER_GROUP" | egrep "$userSet" | wc -l` == "0" ] 74 | then 75 | echo " value not as per standards" 76 | echoUsageDEVCluster 77 | exit 0 78 | fi 79 | 80 | if [ `echo "$ACCESS_TYPE" | egrep "$accessTypeValueSet" | wc -l` == "0" ] 81 | then 82 | echo " value not as per standards" 83 | echoUsageDEVCluster 84 | exit 0 85 | fi 86 | 87 | KUBCONFIG_CLUSTER_FOLDER=$FOLDER_DEV_CLUSTER 88 | generate 89 | fi 90 | 91 | 92 | 93 | 94 | 95 | -------------------------------------------------------------------------------- /gcp/task-003-intermediate-namespace-wide-kubeconfig/www.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: www 5 | namespace: kube-system 6 | spec: 7 | replicas: 3 8 | selector: 9 | matchLabels: 10 | app: www 11 | template: 12 | metadata: 13 | labels: 14 | app: www 15 | spec: 16 | containers: 17 | - name: nginx 18 | image: nginx:1.14-alpine 19 | ports: 20 | - containerPort: 80 21 | --- 22 | apiVersion: v1 23 | kind: Service 24 | metadata: 25 | name: www 26 | namespace: kube-system 27 | spec: 28 | selector: 29 | app: vote 30 | type: ClusterIP 31 | ports: 32 | - port: 80 33 | targetPort: 80 34 | -------------------------------------------------------------------------------- /gcp/task-004-intermediate-cluster-wide-kubeconfig/admin-run.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if [ $# -lt 3 ] 4 | then 5 | echo "Usage: ./admin-run.sh " 6 | exit 0 7 | fi 8 | 9 | 10 | FOLDER_USER_GROUP=$1 11 | ACCESS_TYPE=$2 12 | KUBECONFIG_CLUSTER_FOLDER=$3 13 | 14 | # Encoding the .csr file in base64 15 | export BASE64_CSR=$(cat ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_USER_GROUP/dave.csr | base64 | tr -d '\n') 16 | export NAME_OF_CSR="$KUBECONFIG_CLUSTER_FOLDER-$FOLDER_USER_GROUP-$ACCESS_TYPE-csr" 17 | # Substitution of the BASE64_CSR env variable and creation of the CertificateSigninRequest resource 18 | cat ./common-resources/csr.yaml | envsubst > ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_USER_GROUP/csr.yaml 19 | cat ./common-resources/csr.yaml | envsubst | kubectl apply -f - 20 | 21 | kubectl get csr 22 | 23 | kubectl certificate approve $NAME_OF_CSR 24 | 25 | kubectl get csr 26 | 27 | kubectl get csr $NAME_OF_CSR -o jsonpath='{.status.certificate}' \ 28 | | base64 --decode > ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_USER_GROUP/dave.crt 29 | 30 | 31 | if [ "$ACCESS_TYPE" == "R" ] 32 | then 33 | kubectl apply -f ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_USER_GROUP/clusterRole-readonly.yaml 34 | fi 35 | 36 | if [ "$ACCESS_TYPE" == "RW" ] 37 | then 38 | kubectl apply -f ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_USER_GROUP/clusterRole-readwrite.yaml 39 | fi 40 | 41 | 42 | kubectl apply -f ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_USER_GROUP/clusterRole-binding.yaml 43 | 44 | 45 | # User identifier 46 | export USER="dave" 47 | # Cluster Name (get it from the current context) 48 | export CLUSTER_NAME=$(kubectl config view --minify -o jsonpath={.current-context}) 49 | # Client certificate 50 | export CLIENT_CERTIFICATE_DATA=$(kubectl get csr $NAME_OF_CSR -o jsonpath='{.status.certificate}') 51 | # Cluster Certificate Authority 52 | export CLUSTER_CA=$(kubectl config view --raw -o json | jq -r '.clusters[] | select(.name == "'$(kubectl config current-context)'") | .cluster."certificate-authority-data"') 53 | #export CLUSTER_CA=$(kubectl config view --raw -o json | jq -r '.clusters[].cluster."certificate-authority-data"') 54 | 55 | # API Server endpoint 56 | export CLUSTER_ENDPOINT=$(kubectl config view --raw -o json | jq -r '.clusters[] | select(.name == "'$(kubectl config current-context)'") | .cluster."server"') 57 | #export CLUSTER_ENDPOINT=$(kubectl config view --raw -o json | jq -r '.clusters[].cluster."server"') 58 | 59 | cat ./common-resources/kubeconfig.tpl | envsubst > ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_USER_GROUP/kubeconfig 60 | 61 | 62 | rm -rf ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_USER_GROUP/csr.cnf \ 63 | ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_USER_GROUP/dave.crt \ 64 | ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_USER_GROUP/dave.csr 65 | 66 | 67 | -------------------------------------------------------------------------------- /gcp/task-004-intermediate-cluster-wide-kubeconfig/client-run.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | 4 | if [ $# -lt 3 ] 5 | then 6 | echo "Usage: ./client-run.sh " 7 | exit 0 8 | fi 9 | 10 | 11 | FOLDER_USER_GROUP=$1 12 | ACCESS_TYPE=$2 13 | KUBECONFIG_CLUSTER_FOLDER=$3 14 | 15 | 16 | 17 | 18 | mkdir -p ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_USER_GROUP/ 19 | 20 | cp -rfp ./common-resources/csr.cnf-template ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_USER_GROUP/csr.cnf 21 | cp -rfp ./common-resources/clusterRole-binding.yaml-template ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_USER_GROUP/clusterRole-binding.yaml 22 | 23 | if [ "$ACCESS_TYPE" == "R" ] 24 | then 25 | cp -rfp ./common-resources/clusterRole-readonly.yaml-template ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_USER_GROUP/clusterRole-readonly.yaml 26 | fi 27 | 28 | if [ "$ACCESS_TYPE" == "RW" ] 29 | then 30 | cp -rfp ./common-resources/clusterRole-readwrite.yaml-template ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_USER_GROUP/clusterRole-readwrite.yaml 31 | fi 32 | 33 | 34 | egrep -rl "SUBSTITUTE_GROUPNAME" ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_USER_GROUP/ | xargs sed -i "s/SUBSTITUTE_GROUPNAME/$FOLDER_USER_GROUP/g" 35 | 36 | egrep -rl "SUBSTITUTE_CLUSTER_NAME" ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_USER_GROUP/ | xargs sed -i "s/SUBSTITUTE_CLUSTER_NAME/$KUBECONFIG_CLUSTER_FOLDER/g" 37 | 38 | egrep -rl "SUBSTITUTE_ACCESS_TYPE" ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_USER_GROUP/ | xargs sed -i "s/SUBSTITUTE_ACCESS_TYPE/$ACCESS_TYPE/g" 39 | 40 | mkdir -p ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_USER_GROUP/ 41 | 42 | openssl genrsa -out ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_USER_GROUP/dave.key 4096 43 | 44 | openssl req -config ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_USER_GROUP/csr.cnf -new -key ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_USER_GROUP/dave.key -nodes -out ./$KUBECONFIG_CLUSTER_FOLDER/$FOLDER_USER_GROUP/dave.csr 45 | 46 | 47 | -------------------------------------------------------------------------------- /gcp/task-004-intermediate-cluster-wide-kubeconfig/common-resources/clusterRole-binding.yaml-template: -------------------------------------------------------------------------------- 1 | kind: ClusterRoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: rolebinding-SUBSTITUTE_CLUSTER_NAME-SUBSTITUTE_GROUPNAME-SUBSTITUTE_ACCESS_TYPE 5 | subjects: 6 | - kind: Group 7 | name: SUBSTITUTE_GROUPNAME 8 | apiGroup: rbac.authorization.k8s.io 9 | roleRef: 10 | kind: ClusterRole 11 | name: role-SUBSTITUTE_CLUSTER_NAME-SUBSTITUTE_GROUPNAME-SUBSTITUTE_ACCESS_TYPE 12 | apiGroup: rbac.authorization.k8s.io 13 | 14 | -------------------------------------------------------------------------------- /gcp/task-004-intermediate-cluster-wide-kubeconfig/common-resources/clusterRole-readonly.yaml-template: -------------------------------------------------------------------------------- 1 | kind: ClusterRole 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: role-SUBSTITUTE_CLUSTER_NAME-SUBSTITUTE_GROUPNAME-SUBSTITUTE_ACCESS_TYPE 5 | rules: 6 | - apiGroups: [""] 7 | resources: ["pods", "services"] 8 | verbs: ["get", "list"] 9 | - apiGroups: ["apps"] 10 | resources: ["deployments"] 11 | verbs: ["get", "list"] 12 | -------------------------------------------------------------------------------- /gcp/task-004-intermediate-cluster-wide-kubeconfig/common-resources/clusterRole-readwrite.yaml-template: -------------------------------------------------------------------------------- 1 | kind: ClusterRole 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: role-SUBSTITUTE_CLUSTER_NAME-SUBSTITUTE_GROUPNAME-SUBSTITUTE_ACCESS_TYPE 5 | rules: 6 | - apiGroups: [""] 7 | resources: ["pods", "services"] 8 | verbs: ["create", "get", "update", "list", "delete"] 9 | - apiGroups: ["apps"] 10 | resources: ["deployments"] 11 | verbs: ["create", "get", "update", "list", "delete"] 12 | -------------------------------------------------------------------------------- /gcp/task-004-intermediate-cluster-wide-kubeconfig/common-resources/csr.cnf-template: -------------------------------------------------------------------------------- 1 | [ req ] 2 | default_bits = 2048 3 | prompt = no 4 | default_md = sha256 5 | distinguished_name = dn 6 | [ dn ] 7 | CN = dave 8 | O = SUBSTITUTE_GROUPNAME 9 | [ v3_ext ] 10 | authorityKeyIdentifier=keyid,issuer:always 11 | basicConstraints=CA:FALSE 12 | keyUsage=keyEncipherment,dataEncipherment 13 | extendedKeyUsage=serverAuth,clientAuth 14 | -------------------------------------------------------------------------------- /gcp/task-004-intermediate-cluster-wide-kubeconfig/common-resources/csr.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: certificates.k8s.io/v1beta1 2 | kind: CertificateSigningRequest 3 | metadata: 4 | name: ${NAME_OF_CSR} 5 | spec: 6 | groups: 7 | - system:authenticated 8 | request: ${BASE64_CSR} 9 | usages: 10 | - digital signature 11 | - key encipherment 12 | - server auth 13 | - client auth 14 | -------------------------------------------------------------------------------- /gcp/task-004-intermediate-cluster-wide-kubeconfig/common-resources/kubeconfig.tpl: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Config 3 | clusters: 4 | - cluster: 5 | certificate-authority-data: ${CLUSTER_CA} 6 | server: ${CLUSTER_ENDPOINT} 7 | name: ${CLUSTER_NAME} 8 | users: 9 | - name: ${USER} 10 | user: 11 | client-certificate-data: ${CLIENT_CERTIFICATE_DATA} 12 | contexts: 13 | - context: 14 | cluster: ${CLUSTER_NAME} 15 | user: dave 16 | name: ${USER}-${CLUSTER_NAME} 17 | current-context: ${USER}-${CLUSTER_NAME} 18 | -------------------------------------------------------------------------------- /gcp/task-004-intermediate-cluster-wide-kubeconfig/dev/groupDEV/clusterRole-binding.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: rolebinding-dev-groupDEV-RW 5 | subjects: 6 | - kind: Group 7 | name: groupDEV 8 | apiGroup: rbac.authorization.k8s.io 9 | roleRef: 10 | kind: ClusterRole 11 | name: role-dev-groupDEV-RW 12 | apiGroup: rbac.authorization.k8s.io 13 | 14 | -------------------------------------------------------------------------------- /gcp/task-004-intermediate-cluster-wide-kubeconfig/dev/groupDEV/clusterRole-readwrite.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRole 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: role-dev-groupDEV-RW 5 | rules: 6 | - apiGroups: [""] 7 | resources: ["pods", "services"] 8 | verbs: ["create", "get", "update", "list", "delete"] 9 | - apiGroups: ["apps"] 10 | resources: ["deployments"] 11 | verbs: ["create", "get", "update", "list", "delete"] 12 | -------------------------------------------------------------------------------- /gcp/task-004-intermediate-cluster-wide-kubeconfig/dev/groupDEV/csr.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: certificates.k8s.io/v1beta1 2 | kind: CertificateSigningRequest 3 | metadata: 4 | name: dev-groupDEV-RW-csr 5 | spec: 6 | groups: 7 | - system:authenticated 8 | request: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0KTUlJRVp6Q0NBazhDQVFBd0lqRU5NQXNHQTFVRUF3d0VaR0YyWlRFUk1BOEdBMVVFQ2d3SVozSnZkWEJFUlZZdwpnZ0lpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElDRHdBd2dnSUtBb0lDQVFET3kxL1A3NXJ0TWdKemRyeFJNTW4wClg5YmFHaFlxSzhmcEg4T2syaFlVdVhxTUYyb0FUdStUY1pVWjZOL2c2V0ZSTWNGV0F6bTV6SnoxMThnWGZaMHMKSTd1K2UvTXNyT05qM2VlN09pZVRSbVp6UVFmcmllL0tpRUZselAydkEweFlUUmZpRlNYVXRyYlJtSVBNMEdNcwpuWjNnczR6MlNTQnVaeThLZ2c5MmJGd04yYng3bUsxM25GYUNDamhSaEtxeGxaalFydE1WTis5ZG9WOFY2am14CnhuMGdsbExPYUFaMWxFUE80WFZvOElCOVpqUzd3d3lSdHkwNEtjeTN5bjBCMlNWelFDejRnell6cFZGN2dEZVMKVDczWXFzb29pY3NkaHNqUysvTE1jcHRUTTVkekZrckR1QzlIVXJRcHluNW5sbmRhYXU1TkdjQXdSUWtaVzA3VQpIbE4za3BvSGJISzZHT2hGZkJPMmFueXEwUDZlRTBJQlpzNTR2VE93QmFZcmh0YS9DdXAxbDN6UHVKcnlvUnJQCjJBekQ2RHB2SEw0Z1MvMGRXKzJzNTNoclJGclg3aGN5bWFsRmVQWnJIaTVNWjRmQUNwL0VSSG13NEttNyt4SSsKclovV0tsWktFL2czbU5GZ0lqajRqSFJDUXNlQTZWSXlzR1hTbjdvWTJ1enJCNDFKdVVpTFBIcTZ6U3UzVml6cgo1elh5TmpPOTdoMFJjY29VUlFTOEZKQWZZV0FBaEIzcXBqMVN1R3orYkFETTVGNjZJRlRXbkpGQ0I2bG03MUg2CmVEZmNkUlVacWhGSGhOTFFENk5ycHBIS2tTeTB4eW5Ebzg3L0NSU01wM0cwaDM4M25hNEFqR29PK25JanFCcUoKcjQyak1nblN6WXpEcEtYTDlnLzhwd0lEQVFBQm9BQXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnSUJBSGh5d1hmSQpkYm1SNzgvWndOT1dEdWJOY3dZVWREa1NhSVgwbXdSUlJVZFB2YjdiU2ZpUDdlKysvYzlwU1AzeE0rMzlRTnJ2ClBsTzRjVkNSQmNYVHZKWHhvbDlNZElDYTVhNitLSURVYlM2YXhpYmtkQjR2aXByK1Q5eXUxYm1SVFBERDRoSkEKUnRlOURUWGZhbjZUY3pya0tpQnlFQUdWMDVZQWhFVkNCUG8vaTlCMEVRK2tkbUpxYW5leDNzK094dWt1TWJBSApleWFETDNUN0dHNTBCQjJmSlliQnlWaTdwMUh2Z2RKWmprby9PQzZPOWVISXUybHNjdzBsV09kU1FybTRJUEpQClVQTnVtUzBFVXdobUZJaW80Y0MrM05RbkN5SUZrV0NKZUVrdzlqSWFZRlBGZDAraHEzdlJtd0xkbCt3MXpCWnAKcFFZWEF3a2NHTm9USmJoT0tRLy9ya0oxTnVkVEw0TEc0S2VERlJhczJCOXpMYzlxaEI0RUlOM0NxRFJaU29RRwpyUERDQUxyTDRTUml0NHBoZlNsaUlIL2NnenQ4WHZCOVRRYkFCVjAzajlBZ0ZpRzlvbzhUY3V4Zk5weVJWbGJNCjdKcHFWaWhnS1Bqek1JWkc2eGxqMDJicVg1dCt4NXBJTmEzWjgrelZKUC9GRzN5bkxPTTNTb3hNVTl1cUJ4MVgKalNjRGZ3cjhlQ3hESGYvaExPUjlHL2Y1NGNha2hJeDFoUkJ1dWZEd0dqczZXWks5L24weXpuSkF4bnRJOHl1NQp0OVRFVVdzVHppMVI4WFNvZTQxdWNTM0doditDK3FRVHk1M2dobW1SNmF0SUhJbnZJSzRIM3M2M3BucXp0UG9jClQvbzluWjNERUs5NmxVMUhBdjBxbXM1VWN4VnFDalJ0bmpyVQotLS0tLUVORCBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0K 9 | usages: 10 | - digital signature 11 | - key encipherment 12 | - server auth 13 | - client auth 14 | -------------------------------------------------------------------------------- /gcp/task-004-intermediate-cluster-wide-kubeconfig/dev/groupDEV/dave.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIJKgIBAAKCAgEAzstfz++a7TICc3a8UTDJ9F/W2hoWKivH6R/DpNoWFLl6jBdq 3 | AE7vk3GVGejf4OlhUTHBVgM5ucyc9dfIF32dLCO7vnvzLKzjY93nuzonk0Zmc0EH 4 | 64nvyohBZcz9rwNMWE0X4hUl1La20ZiDzNBjLJ2d4LOM9kkgbmcvCoIPdmxcDdm8 5 | e5itd5xWggo4UYSqsZWY0K7TFTfvXaFfFeo5scZ9IJZSzmgGdZRDzuF1aPCAfWY0 6 | u8MMkbctOCnMt8p9Adklc0As+IM2M6VRe4A3kk+92KrKKInLHYbI0vvyzHKbUzOX 7 | cxZKw7gvR1K0Kcp+Z5Z3WmruTRnAMEUJGVtO1B5Td5KaB2xyuhjoRXwTtmp8qtD+ 8 | nhNCAWbOeL0zsAWmK4bWvwrqdZd8z7ia8qEaz9gMw+g6bxy+IEv9HVvtrOd4a0Ra 9 | 1+4XMpmpRXj2ax4uTGeHwAqfxER5sOCpu/sSPq2f1ipWShP4N5jRYCI4+Ix0QkLH 10 | gOlSMrBl0p+6GNrs6weNSblIizx6us0rt1Ys6+c18jYzve4dEXHKFEUEvBSQH2Fg 11 | AIQd6qY9Urhs/mwAzOReuiBU1pyRQgepZu9R+ng33HUVGaoRR4TS0A+ja6aRypEs 12 | tMcpw6PO/wkUjKdxtId/N52uAIxqDvpyI6gaia+NozIJ0s2Mw6Sly/YP/KcCAwEA 13 | AQKCAgEAsWvD1H9aiM8zUyU4BOC7JLNlftKK2bueVOEv1ixWbYmFHtSn2YPgALye 14 | lBA8pJcXK7ZojgdOa56lvDdaMO1RACHYXmGyDU53+aLVwsjuU2k/VPsidG9wEHA+ 15 | y7zImnNV+RyLtSk3v/zTdZhI3Fyd+hVw8uyZsGSlMpPMxFM6OPxXLnr1P4WKruiv 16 | P/rMvwP3/0y+Q6z65WWMNOTS09aVkXUeveq7OOP/RrniQ4kkVtihJPWfoMiD7DEg 17 | 1uLmBXnxLrjDbNJOwxiwVDcuZatoAlvwIgjGUWmHntmZS4tmkQpzQTsuYUtq8ekA 18 | h0zpzkExmmZ6GFVOB1BkmAVi/SJdygsFgoApEff43TQ5H3oM2suZfw0xYB4Z40cb 19 | CCoItHpwMjXhzi06WyKULeAn7HYOyEj2Y0czje/WJGgyhx6EGUGFquCrHTCDOWcn 20 | fJaZpRA4mLQ3v/AGl2Xnj3IwwWY0YeXw5O2wuSCaWqUi8yQ5IVSEpiwQG1dgWkAj 21 | WXupB6Fn4omHCsWWa9Tfk6Wri2S51QlP7N7pVMUAmL5TsgjJnjG+3ZzDjmyvXlu0 22 | Ze1teQjS8NWx0UD6wgLrhqbtK+r8g/zePm9U7ffqNi1ArbMS58A2JjRR8110P+5p 23 | +2GTvb74Xe/Yl6clYoUV6skyuMbYv5lCLMn9b/guu8WsdN9ZQ8ECggEBAP+76NF8 24 | KrjiPIKz+HjOYhoM+O61hUNR3UA82jYowf5aXas37x7q19kHBHRzdtnpNQxt9Qg/ 25 | opjo38WBWtEkEzk99ZIqWciQYhr4fPskr/Xk2VeuwXQ/wdpoZrut6+t0HWAHUUHj 26 | 7iOxcDw7KfKtIoYGxi9qCSxPm8mjuLF8tyEbn/61fAYLUEnEHjJiHWrRhwG30LJE 27 | hMHxuwx47yZnqMkom5RCmkcUO1vUwaDJBO6kzAukBRw/iRwVfUh1aL9e9A8SI69d 28 | Up+TR9Rs3WHMA69X0KrSCrzV4dZrbSPIZmSOzCNP+bMX8Noou2F5eYXFUxeIn/i6 29 | hUxmorP5TsZ7Ii8CggEBAM8CbzROiC1CaYflyskeqY97wc6K0QZPBGdDlgSSMaI8 30 | r4IJn23KN909pUsoUYSrnIt8OSaADDjIZMF1FFNWOye4/e0YQ06E22k3Duf23bI7 31 | YsuWr3o1XE3qzWx2j4F34qUNn+wU3cfwsjO/yZNhfLCbEv3ddhoEmoFLVhaDJ0rD 32 | /7kpAe2VZ3qFCZheedj52E6bASihPiuLO01GlKUgNAq2SGWTppjVjKNlVfzMpeNc 33 | zAxiKtBYFtK4S/A6vIiskbRn1TrW4H81lH+KtCREvEoP7tDhy6S1eLWdODSq3vNd 34 | cwROrWP60X4YYzV5hUjz4Sd7c4iqaANeF9B02/pbhwkCggEBAN9kpZdBUQyaTGOc 35 | lQPqvVWmWVok5RmOjKiNv8ypw/TNuZ9UyCSI3y7ta13Sc1Md3cJS10ZyWDYcz0T3 36 | n36sdcJ+NmONbUq/nYOT55F3VeKGG7n1pXZIL637nyp5Xmoaz4JQQhI96ovxd2qW 37 | ACnGRi8wAbXveb3keOEmjLM2j75zJEta+9FH1sXpdLtsa+ggyj+c88iWBCZrZIsb 38 | pUU2LgIy390eZSiMKuIoM1Ch6RYSjU/p4rDva3VF7F/8eBkb+DSmEtggvIk7HfcZ 39 | djX1Ao8MNfLScqDMcXBd/UPRzmgvOXHrbeOdmT7pb+5RL+/EFLatGZksTj8UNTLk 40 | KIVHkO8CggEBAMu75Z5hO/zvQ9WP/0h5eP/SGSghzx6aXgcTqW3ORDMb7s/YkKQJ 41 | RXnk1e1mPPSJwYQ2Ojj+BQlNkHwlsl9ims25b5ANNRDuCNNAcNczS3XCpLUiFIn+ 42 | Ql4dHRgGh64wQ0uWYTKERtIkvf3YX5dBduqJdOONNlLc/QMLFisJ4X3f8KpXBaLC 43 | TBl1HcBsGxfErXKPc1B9gSZx3Wdsc5cgXpe8VvxBmNEKO2QEs+AduZZPGjOF2gU6 44 | 7Rtubq574FnTHYOkPWOvXztTjFcKN8kKLRXnIG9MCpSMVcTIeyy6jrqN2kj+9f9U 45 | NyIW9m0GsHrgK1QkV8zFiaDm1iE/IdEAi6ECggEAHCygJVA8Mygi3l1CC9ZPgrR0 46 | EQcoWWkmAiiDqCDkAKxwse3iHel0TbeBi/RgXgFA1bpcsrrhDWQFu28eqwepCSZf 47 | ll7ee9cAZEO29NyZns8Vd/wmfB/oYmUpP/MaW18CjiEA8N0HZvxhAAf7DIG5Ku/7 48 | 88/TbIGNCHapz1rxmoQv5hIkAZqXinPP9p2oChHro3qxlgD94lVSiqwI/RLPixxg 49 | vy80ZzpFgjM0GsSjnS5iqre2OdeL508r/0nnm0EoyLsYirWlyTeBLCr0i8cfzVLJ 50 | QCJ/yG0MW8cgufm1mMRuKArLCwTAXx//BKyMuhKsNvKcMl2e+9Go2kJpiWcRHw== 51 | -----END RSA PRIVATE KEY----- 52 | -------------------------------------------------------------------------------- /gcp/task-004-intermediate-cluster-wide-kubeconfig/dev/groupQA/clusterRole-binding.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: rolebinding-dev-groupQA-R 5 | subjects: 6 | - kind: Group 7 | name: groupQA 8 | apiGroup: rbac.authorization.k8s.io 9 | roleRef: 10 | kind: ClusterRole 11 | name: role-dev-groupQA-R 12 | apiGroup: rbac.authorization.k8s.io 13 | 14 | -------------------------------------------------------------------------------- /gcp/task-004-intermediate-cluster-wide-kubeconfig/dev/groupQA/clusterRole-readonly.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRole 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: role-dev-groupQA-R 5 | rules: 6 | - apiGroups: [""] 7 | resources: ["pods", "services"] 8 | verbs: ["get", "list"] 9 | - apiGroups: ["apps"] 10 | resources: ["deployments"] 11 | verbs: ["get", "list"] 12 | -------------------------------------------------------------------------------- /gcp/task-004-intermediate-cluster-wide-kubeconfig/dev/groupQA/csr.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: certificates.k8s.io/v1beta1 2 | kind: CertificateSigningRequest 3 | metadata: 4 | name: dev-groupQA-R-csr 5 | spec: 6 | groups: 7 | - system:authenticated 8 | request: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0KTUlJRVpqQ0NBazRDQVFBd0lURU5NQXNHQTFVRUF3d0VaR0YyWlRFUU1BNEdBMVVFQ2d3SFozSnZkWEJSUVRDQwpBaUl3RFFZSktvWklodmNOQVFFQkJRQURnZ0lQQURDQ0Fnb0NnZ0lCQU1PdjVQeFlERXNJbldlMWtBNTRad0VkCjlOc01iQWF2U3g3aVQrZVJob0VNbXBoclh0dHRhLzJBT3JIZURmeEQ1MmJVK0ZiK0FPZE1vaFdER1d5YzBoNjEKdk9SVUtOOWZvcys1TUp2bnJab1ZyUVJObUtLVkFiODM1ZW5hcUQvQWRFZ2hOVUVFVmJBbHU5NUdRUXd5aXZsSwpQY0phNFV5RnllTDMvVmd6WXd6WUpQVkNoV2VHSzFlaWdsMXMrM2k2aldRS1VLZmFzek1ScVRrRFIyUHhJMnFJClR2NjVacDVaTkRmMHhHYWIrS1ZYSGpDN2dtbTFubGRYYnFJbk9LaDlBems4cGU2d0JkbUIrblU5dlhvYWRPcmIKdDVPSG9VQWRuYVd1M01rZGVOMVEzSTNycmtkUnZrTHJIUWxvUVZpKzlxWGN2Y0kvcm5uWUZDUXUyM3JtRmpSOQo3WVJHSFNHZ09wUGVyd1U1WGxiS1lJR05RbzRsK1ZwQW5TN016V05EeDgwb2VPN1hMN2ZtTTJ1OTJGZ3locHJoCjAwOW5xODVSQ09xT2dXMzRlT1V5RnVFbkl4eHZnSXdTeU1hQnpTTmdUM01rcDdZb3I2LzRPTXN1UWUyTEhnTFQKUUpMMjJuZ2piQXg3U1J2eDJYellaWlM4NERHSEdMMmVNRGxaaXJrOVZCdm9uaTBIbUcxdFZRKzhMS0NBRVhuYgpXV0hhZXhucXBONm55R3VCbGV6VWdHR1czU0V4WTlCWmtSdlhZVm1UOGNudFFraG9WSmJET0JzdEtUV2UvVzEzCkVCeDBqSGthQnowR1NpWGVLMms4dFNmVmExZDNhMTZSMXJKVCtHdmNGWmJEMm5IVDlHdjJDN09YZUhUMEtHdDQKL2tjUmRqUlFIc3dkWExWNzJHeHpBZ01CQUFHZ0FEQU5CZ2txaGtpRzl3MEJBUXNGQUFPQ0FnRUFEM1k3eGRoVQo0aUtYa1NYOVkzVGQ2dU5xWHJoUFM4YVoyUjNwUFVBRGxMM2VBaFR5UnhGaWVKMFUxMVBjVFZPaWZGR1FqRG5hCmUrL25vcVJ1QldzeHg0QXE2MVlvYnNaOTB4SFFIaExiMGVpeENUck0yaFhDK3FBK2Ewa1FtYWYrNXFaS1BYVUYKSjM4eTgzVG1STUtiUFZRM3hPOEJpVWg4ckt4VWlMREU3WEhGOWloQTV2V3lKaDAzN0VFa0xhVDQzTVh4d3pIaApKR1gzaG1RNWlRZ2RESmpHSGdOL1lqeWZNZzgwWUJUNW0yZkZJc0RHbW9SdC90UWMyWmZOOVUrcDNPTXA2dkhRCldJOHRBcFk5L0w5NnRBSjdGSk1IUWdXNGd2T2pVRjlPNFVMMUxYTGw2Q3NqZExMQlBXbkFDNEhpQ2RLSjRoMmMKQ0EzMWZKZUJROWtiWmFSM3A4VHhEVnFBWXQ2c05ZUEpDcUZSbmxiMUhxSmYwbitwN2M1SWxkUklob2N6bXlJUAppdFN1MUU5RXp0eTJ3V1RMbng3dnB4WEtNc295cWNJK0o1UDhQdjBKUVVoMTVWczdIRXdGYm5KTzk2WWVJaHhHCk5EV2pJcFJCOWxRTDVFSHJXZTZOWitwVWJ2MnlibURUbHBLajFQSk01MnJlMytvVWhxY3RydHFsazdnWnorK1QKTVRES29WUUtHSXRyMGNGWXorWnk2VFN5TUJSNEdRb2UrZHBVbHhEQ1M2Tm16K2NQc3ZaL3JNWDZsaWJ3WjdGVQpEc0JWemd4NTFPS0dWNmRqTU5DWXR4Qm0rdjlGMk1pUTYyV1lSK0FFU28rMHU4Tm5wczhsU0xtOVlVam9zakRSCm1lKzkwVWlpOFVjY3ROU0VDTGlDY3dpeU1uR3RaN1RnaGVnPQotLS0tLUVORCBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0K 9 | usages: 10 | - digital signature 11 | - key encipherment 12 | - server auth 13 | - client auth 14 | -------------------------------------------------------------------------------- /gcp/task-004-intermediate-cluster-wide-kubeconfig/dev/groupQA/dave.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIJKQIBAAKCAgEAw6/k/FgMSwidZ7WQDnhnAR302wxsBq9LHuJP55GGgQyamGte 3 | 221r/YA6sd4N/EPnZtT4Vv4A50yiFYMZbJzSHrW85FQo31+iz7kwm+etmhWtBE2Y 4 | opUBvzfl6dqoP8B0SCE1QQRVsCW73kZBDDKK+Uo9wlrhTIXJ4vf9WDNjDNgk9UKF 5 | Z4YrV6KCXWz7eLqNZApQp9qzMxGpOQNHY/EjaohO/rlmnlk0N/TEZpv4pVceMLuC 6 | abWeV1duoic4qH0DOTyl7rAF2YH6dT29ehp06tu3k4ehQB2dpa7cyR143VDcjeuu 7 | R1G+QusdCWhBWL72pdy9wj+uedgUJC7beuYWNH3thEYdIaA6k96vBTleVspggY1C 8 | jiX5WkCdLszNY0PHzSh47tcvt+Yza73YWDKGmuHTT2erzlEI6o6Bbfh45TIW4Scj 9 | HG+AjBLIxoHNI2BPcySntiivr/g4yy5B7YseAtNAkvbaeCNsDHtJG/HZfNhllLzg 10 | MYcYvZ4wOVmKuT1UG+ieLQeYbW1VD7wsoIARedtZYdp7Geqk3qfIa4GV7NSAYZbd 11 | ITFj0FmRG9dhWZPxye1CSGhUlsM4Gy0pNZ79bXcQHHSMeRoHPQZKJd4raTy1J9Vr 12 | V3drXpHWslP4a9wVlsPacdP0a/YLs5d4dPQoa3j+RxF2NFAezB1ctXvYbHMCAwEA 13 | AQKCAgEAhu+YP6VbPLZBDA13u3Ks3GlSuWVUbDud2jsGDFEd5YjSZX3MWBSY3aH5 14 | V0Mec7BDNXZvlFTsPJJsRdczxeBRR5ncGboqwPTKaOBzM15LKDAAMTJsqNeE791U 15 | ra/4SpWj2UUE2sNpbuBmLnxVSFrl18ao1YcjSkgLEXYdfR9aErZMrc5j/SHYa0KF 16 | EHUg5nwFGVG/ki9NWE66WLT5GmJiIIjDqAQiTdiHGHqwn2y3xRNQnxv9bnUtLV79 17 | y9Stzgj+Gw7GANPiBL0K/qNHL5ZkvropWRKyT2rclS4B0rB+8mVa4IX+k/E6+Bv1 18 | 1tHMFa0AmRigxPxyjFRCdzzUXWehMPXnGqmt/rHA5qkqKkskA6Wif68fA9lrmIG3 19 | +ooGXwZEZZwc1pPRhZaK7ZXLXgl381cl42lpLpvzD3Mvc+1wAjS8HWX0uSjkhR9j 20 | EpodErAZIesRul2pbVzgRMGj9tzyJ5k5M7SqyY3e0lPcRwbi/tWhZG6aU9VFB+WW 21 | ubkFsu4EFgxNPHbOnqz+ncUq6nijRAIbslSLoBEvXqV3y2QBe/10gRX4nI5VZKIC 22 | 6v0i0J40ptnvoqzo9nWFyKbgkqKErvVcLD/l/013A6BAcu0KJhkPJgKqan03sDWz 23 | AMtL/zgK7uINV1litO0j0N+kZEap6BF7R8Cz65YylmNCeVQi9QkCggEBAOy+rSoo 24 | RR/84uyIKGMOM8IsOy/oAfED/GG2DdQfiJNVd6y4WHBFGNKnGAFxr6QH3DOFdFh1 25 | EKRSqMtRyaQoD4LLer3rJMTkC9QCs75vn2pjkiDjrERHqHzpsrTX3NXO2vIPipCy 26 | 4uTKakve5uaEzmelC6Vp5vHIcEpjOUj2kUtIUV1anfWfj189z6c5UpQ7X9zTDWgY 27 | JL1dDVHzEt/Q4PDRYrzMrrIqS8QwHNNsnnbAA+4OIycx7jh0aYLSRRy8OHJSGLz1 28 | U73Dz2anA3aLV89w9y9y0figIwNLcI3sD+Cr+BgPKVi7w3fIgX2392URPqG7pEIu 29 | 5WtvSYAKHvhRgscCggEBANOaWDCIfC1mjKO5F6Rl+dyoX8Gz1NslPxG1RWKPrWae 30 | tofYHYh90H2NLRSjC++YpUYDVlA8uoHb+J3YEBHVYImGcxsDxs29qD7FykTh6lQ9 31 | +UsIEiqVaQAREsO+jEPB3aU4DUzEhUWQIjrAoXZ5vJrLl8q9ILSRQRUE+n3yj5o5 32 | UEKtuxAqIb8XyL3lFP34kvno2D5lFZBZvJAvOEOdcv7dyiv2bmm1uyxfSQ3+xTnF 33 | pH3d6t6UmVW5HeN0+6DdoLT8dGP4YcGg2jHKMUckgujTmdE1SB5n0V9tofYDggVz 34 | J3dAWbztcy6mdH1vR5tNis0rUuVT9jiHq/UbF3+znPUCggEBAJY9MztLrwJrw76D 35 | cc3a13HkC/OM8p52/xUrKX9yO4P0eyv7GPS2hNg5/fv8n5AjBNUVXg48zEDbzA6r 36 | aqFoYt+ILe6UaRMh/Xef911lDO/BHJ0OnuX966jJzgUB8D0nNr7iqfbF70NuSSK4 37 | 5CNqKGqRzOA+YrcRkmFCsrng77esvyXVOeUES0IPwdXHhOZY/67tqNZka/xYLvnQ 38 | niu3LOjmmBhkdIDQYqCwP7rUdYO6u0O5aQmYoCrnhYXl1sIzmEbEXrqbnXcR6m6k 39 | kogKScF1WQidfy6ewVl36IfIRRM5TwpFpOGQ1q0Pa03TD4bt+Kc+ozD0/da+GxDM 40 | W3cOk0UCggEAZnsN0xOG8IYF/xUZeilZRaDWH330zN3RGcZtADsNg/HB/TCRvRUR 41 | 8Hx7JM+CbFvFT482Mk33FgVu9yM5RSnOQ0y2WpMNqX79CMnKGGpBjuMk5U3XXoBW 42 | Bg4+xEZ4sCgoLGhlKJTU5sfTjBjXGgYDbgTEUxAd0tW2a3OpzW24Vr0DPgfd3QpK 43 | pb3jHvTfodDNffNjJ3TVaL/NLxdvbG/Izd6kl34WI2Q8blUHkJue8UfjToI3yey2 44 | BmpNK2fwpgwwVPcfd3tH/PrmMc725rmsHQ6+JDBl6TPmlw0Fr5wrUHBg4MRtsa5e 45 | Zm1/ZXCLgY+gdZVR5HcxfFdY1LB2RQa8MQKCAQAi7TCpLxoSbcBIZt9AhoEbNML4 46 | e9nJu0s4YV9a8F1rhg1pEnIHf5Qs7ccTbDsYjA2LzIstnQ0U8ZYp4KmglXij7svT 47 | +FxP7Zx7yF3+2fj8bMb/mnmlWQqPK3FhNi7sjhEXNsprM7/OiAl3bELi2fofcUTp 48 | kRa0y9TIAc7aZvefxo0L+z/7q7oMA5Ts1KTPjKvNdACEjscFZTVgK6d4kY1TBd8v 49 | OLSSqmS8oc/Y4qqP/NqlAZrFGRTOji+xmIEGgUHnSY3i2n03xlVtr44V3BSDm37R 50 | 0/4Ftu/mTojsAfd4Oc0LcOc5r/pvH8j8+na+0Rk9Fm6kAphZr9kki7X93t2F 51 | -----END RSA PRIVATE KEY----- 52 | -------------------------------------------------------------------------------- /gcp/task-004-intermediate-cluster-wide-kubeconfig/reset.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | 4 | if [ $# -lt 3 ] 5 | then 6 | echo "Usage: ./reset.sh " 7 | exit 0 8 | fi 9 | 10 | 11 | FOLDER_USER_GROUP=$1 12 | KUBCONFIG_CLUSTER_FOLDER=$2 13 | ACCESS_TYPE=$3 14 | 15 | 16 | export NAME_OF_CSR="$KUBCONFIG_CLUSTER_FOLDER-$FOLDER_USER_GROUP-$ACCESS_TYPE-csr" 17 | 18 | kubectl delete csr "$NAME_OF_CSR" 19 | 20 | 21 | kubectl delete clusterrole "role-$KUBCONFIG_CLUSTER_FOLDER-$FOLDER_USER_GROUP-$ACCESS_TYPE" 22 | 23 | kubectl delete clusterrolebinding "rolebinding-$KUBCONFIG_CLUSTER_FOLDER-$FOLDER_USER_GROUP-$ACCESS_TYPE" 24 | 25 | 26 | rm -rf ./$KUBCONFIG_CLUSTER_FOLDER/$FOLDER_USER_GROUP/* 27 | 28 | 29 | 30 | 31 | -------------------------------------------------------------------------------- /gcp/task-004-intermediate-cluster-wide-kubeconfig/run-all.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | userSet="groupQA|groupDEV" 4 | accessTypeValueSet="R|RW" 5 | FOLDER_DEV_CLUSTER="dev" 6 | 7 | 8 | echoUsageDEVCluster() 9 | { 10 | echo "Values for : $userSet" 11 | echo "Values for : $accessTypeValueSet" 12 | } 13 | 14 | assignVars() 15 | { 16 | FOLDER_USER_GROUP="$1" 17 | ACCESS_TYPE="$2" 18 | } 19 | 20 | generate() 21 | { 22 | echo "-------------------------------" 23 | echo " Resetting previous changes " 24 | echo "-------------------------------" 25 | ./reset.sh $FOLDER_USER_GROUP $KUBCONFIG_CLUSTER_FOLDER $ACCESS_TYPE 26 | 27 | echo "-------------------------------" 28 | echo " Client Cert Generation " 29 | echo "-------------------------------" 30 | ./client-run.sh $FOLDER_USER_GROUP $ACCESS_TYPE $KUBCONFIG_CLUSTER_FOLDER 31 | 32 | echo "-------------------------------" 33 | echo " kubeconfig & dave.key generation " 34 | echo "-------------------------------" 35 | ./admin-run.sh $FOLDER_USER_GROUP $ACCESS_TYPE $KUBCONFIG_CLUSTER_FOLDER 36 | echo "-------------------------------" 37 | echo " Share the following files with the $FOLDER_USER_GROUP 38 | ./$KUBCONFIG_CLUSTER_FOLDER/$FOLDER_USER_GROUP/kubeconfig 39 | ./$KUBCONFIG_CLUSTER_FOLDER/$FOLDER_USER_GROUP/dave.key 40 | 41 | Initialization Steps 42 | $ export KUBECONFIG=\$PWD/kubeconfig 43 | 44 | $ kubectl config set-credentials dave \\ 45 | --client-key=\$PWD/dave.key \\ 46 | --embed-certs=true 47 | " 48 | echo "-------------------------------" 49 | } 50 | 51 | 52 | if [ `kubectl config view --raw -o json | jq -r '.clusters[] | select(.name == "'$(kubectl config current-context)'") | .cluster."server"' | grep "https" | wc -l` == "1" ] 53 | then 54 | echo "Dev cluster" 55 | if [ $# -lt 2 ] 56 | then 57 | echo "Usage: ./run-all.sh " 58 | echoUsageDEVCluster 59 | exit 0 60 | fi 61 | assignVars "$1" "$2" 62 | 63 | 64 | if [ `echo "$FOLDER_USER_GROUP" | egrep "$userSet" | wc -l` == "0" ] 65 | then 66 | echo " value not as per standards" 67 | echoUsageDEVCluster 68 | exit 0 69 | fi 70 | 71 | if [ `echo "$ACCESS_TYPE" | egrep "$accessTypeValueSet" | wc -l` == "0" ] 72 | then 73 | echo " value not as per standards" 74 | echoUsageDEVCluster 75 | exit 0 76 | fi 77 | 78 | KUBCONFIG_CLUSTER_FOLDER=$FOLDER_DEV_CLUSTER 79 | generate 80 | fi 81 | 82 | 83 | 84 | 85 | 86 | -------------------------------------------------------------------------------- /gcp/task-004-intermediate-cluster-wide-kubeconfig/www.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: www 5 | namespace: kube-system 6 | spec: 7 | replicas: 3 8 | selector: 9 | matchLabels: 10 | app: www 11 | template: 12 | metadata: 13 | labels: 14 | app: www 15 | spec: 16 | containers: 17 | - name: nginx 18 | image: nginx:1.14-alpine 19 | ports: 20 | - containerPort: 80 21 | --- 22 | apiVersion: v1 23 | kind: Service 24 | metadata: 25 | name: www 26 | namespace: kube-system 27 | spec: 28 | selector: 29 | app: vote 30 | type: ClusterIP 31 | ports: 32 | - port: 80 33 | targetPort: 80 34 | -------------------------------------------------------------------------------- /gcp/task-005-traefik-whoami/.ReadMe_images/http-middlewares.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeaprendiz/learn_kubernetes/55e8a46fdea0eff5113dc13354e482d584e4c661/gcp/task-005-traefik-whoami/.ReadMe_images/http-middlewares.png -------------------------------------------------------------------------------- /gcp/task-005-traefik-whoami/.ReadMe_images/http-routers.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeaprendiz/learn_kubernetes/55e8a46fdea0eff5113dc13354e482d584e4c661/gcp/task-005-traefik-whoami/.ReadMe_images/http-routers.png -------------------------------------------------------------------------------- /gcp/task-005-traefik-whoami/.ReadMe_images/http-services.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeaprendiz/learn_kubernetes/55e8a46fdea0eff5113dc13354e482d584e4c661/gcp/task-005-traefik-whoami/.ReadMe_images/http-services.png -------------------------------------------------------------------------------- /gcp/task-005-traefik-whoami/.ReadMe_images/traefik-homepage-part1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeaprendiz/learn_kubernetes/55e8a46fdea0eff5113dc13354e482d584e4c661/gcp/task-005-traefik-whoami/.ReadMe_images/traefik-homepage-part1.png -------------------------------------------------------------------------------- /gcp/task-005-traefik-whoami/.ReadMe_images/traefik-homepage-part2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeaprendiz/learn_kubernetes/55e8a46fdea0eff5113dc13354e482d584e4c661/gcp/task-005-traefik-whoami/.ReadMe_images/traefik-homepage-part2.png -------------------------------------------------------------------------------- /gcp/task-005-traefik-whoami/.ReadMe_images/whoami-service.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeaprendiz/learn_kubernetes/55e8a46fdea0eff5113dc13354e482d584e4c661/gcp/task-005-traefik-whoami/.ReadMe_images/whoami-service.png -------------------------------------------------------------------------------- /gcp/task-005-traefik-whoami/00-resource-crd-definition.yml: -------------------------------------------------------------------------------- 1 | # All resources definition must be declared 2 | apiVersion: apiextensions.k8s.io/v1beta1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: ingressroutes.traefik.containo.us 6 | 7 | spec: 8 | group: traefik.containo.us 9 | version: v1alpha1 10 | names: 11 | kind: IngressRoute 12 | plural: ingressroutes 13 | singular: ingressroute 14 | scope: Namespaced 15 | 16 | --- 17 | apiVersion: apiextensions.k8s.io/v1beta1 18 | kind: CustomResourceDefinition 19 | metadata: 20 | name: middlewares.traefik.containo.us 21 | 22 | spec: 23 | group: traefik.containo.us 24 | version: v1alpha1 25 | names: 26 | kind: Middleware 27 | plural: middlewares 28 | singular: middleware 29 | scope: Namespaced 30 | 31 | --- 32 | apiVersion: apiextensions.k8s.io/v1beta1 33 | kind: CustomResourceDefinition 34 | metadata: 35 | name: ingressroutetcps.traefik.containo.us 36 | 37 | spec: 38 | group: traefik.containo.us 39 | version: v1alpha1 40 | names: 41 | kind: IngressRouteTCP 42 | plural: ingressroutetcps 43 | singular: ingressroutetcp 44 | scope: Namespaced 45 | 46 | --- 47 | apiVersion: apiextensions.k8s.io/v1beta1 48 | kind: CustomResourceDefinition 49 | metadata: 50 | name: tlsoptions.traefik.containo.us 51 | 52 | spec: 53 | group: traefik.containo.us 54 | version: v1alpha1 55 | names: 56 | kind: TLSOption 57 | plural: tlsoptions 58 | singular: tlsoption 59 | scope: Namespaced 60 | 61 | --- 62 | apiVersion: apiextensions.k8s.io/v1beta1 63 | kind: CustomResourceDefinition 64 | metadata: 65 | name: traefikservices.traefik.containo.us 66 | 67 | spec: 68 | group: traefik.containo.us 69 | version: v1alpha1 70 | names: 71 | kind: TraefikService 72 | plural: traefikservices 73 | singular: traefikservice 74 | scope: Namespaced -------------------------------------------------------------------------------- /gcp/task-005-traefik-whoami/05-traefik-rbac.yml: -------------------------------------------------------------------------------- 1 | kind: ClusterRole 2 | apiVersion: rbac.authorization.k8s.io/v1beta1 3 | metadata: 4 | name: traefik-ingress-controller 5 | 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - services 11 | - endpoints 12 | - secrets 13 | verbs: 14 | - get 15 | - list 16 | - watch 17 | - apiGroups: 18 | - extensions 19 | resources: 20 | - ingresses 21 | verbs: 22 | - get 23 | - list 24 | - watch 25 | - apiGroups: 26 | - extensions 27 | resources: 28 | - ingresses/status 29 | verbs: 30 | - update 31 | - apiGroups: 32 | - traefik.containo.us 33 | resources: 34 | - middlewares 35 | - ingressroutes 36 | - traefikservices 37 | - ingressroutetcps 38 | - ingressrouteudps 39 | - tlsoptions 40 | - tlsstores 41 | verbs: 42 | - get 43 | - list 44 | - watch 45 | 46 | --- 47 | kind: ClusterRoleBinding 48 | apiVersion: rbac.authorization.k8s.io/v1beta1 49 | metadata: 50 | name: traefik-ingress-controller 51 | 52 | roleRef: 53 | apiGroup: rbac.authorization.k8s.io 54 | kind: ClusterRole 55 | name: traefik-ingress-controller 56 | subjects: 57 | - kind: ServiceAccount 58 | name: traefik-ingress-controller 59 | namespace: default 60 | -------------------------------------------------------------------------------- /gcp/task-005-traefik-whoami/10-service-account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: traefik-ingress-controller 5 | -------------------------------------------------------------------------------- /gcp/task-005-traefik-whoami/15-traefik-deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Deployment 3 | apiVersion: extensions/v1beta1 4 | metadata: 5 | name: traefik 6 | labels: 7 | app: traefik 8 | 9 | spec: 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | app: traefik 14 | template: 15 | metadata: 16 | labels: 17 | app: traefik 18 | spec: 19 | serviceAccountName: traefik-ingress-controller 20 | containers: 21 | - name: traefik 22 | image: traefik:v2.2 23 | args: 24 | - --accesslog=true 25 | - --api 26 | - --api.insecure 27 | - --entrypoints.web.address=:80 28 | - --entrypoints.websecure.address=:443 29 | - --providers.kubernetescrd 30 | - --configfile=/config/traefik.toml 31 | ports: 32 | - name: web 33 | containerPort: 80 34 | - name: admin 35 | containerPort: 8080 36 | - name: websecure 37 | containerPort: 443 38 | -------------------------------------------------------------------------------- /gcp/task-005-traefik-whoami/16-traefik.toml: -------------------------------------------------------------------------------- 1 | # traefik.toml 2 | defaultEntryPoints = ["http","https"] 3 | [entryPoints] 4 | [entryPoints.http] 5 | address = ":80" 6 | [entryPoints.http.redirect] 7 | entryPoint = "https" 8 | [entryPoints.https] 9 | address = ":443" 10 | [entryPoints.https.tls] 11 | [[entryPoints.https.tls.certificates]] 12 | CertFile = "/ssl/tls.crt" 13 | KeyFile = "/ssl/tls.key" 14 | [api] 15 | dashboard = true 16 | insecure = true 17 | -------------------------------------------------------------------------------- /gcp/task-005-traefik-whoami/20-traefik-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: traefik 6 | spec: 7 | type: LoadBalancer 8 | selector: 9 | app: traefik 10 | ports: 11 | - protocol: TCP 12 | port: 80 13 | name: web 14 | targetPort: 80 15 | - protocol: TCP 16 | port: 443 17 | name: websecure 18 | targetPort: 80 19 | - protocol: TCP 20 | port: 8080 21 | name: admin 22 | targetPort: 8080 23 | -------------------------------------------------------------------------------- /gcp/task-005-traefik-whoami/25-whoami-deployment.yaml: -------------------------------------------------------------------------------- 1 | kind: Deployment 2 | apiVersion: apps/v1 3 | metadata: 4 | namespace: default 5 | name: whoami 6 | labels: 7 | app: whoami 8 | 9 | spec: 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | app: whoami 14 | template: 15 | metadata: 16 | labels: 17 | app: whoami 18 | spec: 19 | containers: 20 | - name: whoami 21 | image: containous/whoami 22 | ports: 23 | - name: web 24 | containerPort: 80 25 | 26 | -------------------------------------------------------------------------------- /gcp/task-005-traefik-whoami/30-whoami-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: whoami 5 | 6 | spec: 7 | ports: 8 | - protocol: TCP 9 | name: web 10 | port: 80 11 | selector: 12 | app: whoami 13 | 14 | -------------------------------------------------------------------------------- /gcp/task-005-traefik-whoami/35-whoami-ingress-route.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: traefik.containo.us/v1alpha1 2 | kind: IngressRoute 3 | metadata: 4 | name: whoami-whoami 5 | namespace: default 6 | spec: 7 | entryPoints: 8 | - web 9 | - websecure 10 | routes: 11 | - match: PathPrefix(`/whoami-app-api`) 12 | kind: Rule 13 | services: 14 | - name: whoami 15 | port: 80 16 | 17 | -------------------------------------------------------------------------------- /gcp/task-005-traefik-whoami/ReadMe.md: -------------------------------------------------------------------------------- 1 | ## To deploy traefik on GKE with whoami service 2 | 3 | 4 | - Run the following command (you may run it again if you get error for the first time as some 5 | custom resources take some time to get created) 6 | 7 | ```bash 8 | kubectl apply -f . 9 | ``` 10 | 11 | - Get the public IP of Treafik-Service 12 | ```bash 13 | $ kubectl get service 14 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 15 | kubernetes ClusterIP 10.109.0.1 443/TCP 31m 16 | traefik LoadBalancer 10.109.10.37 35.188.102.17 80:30541/TCP,443:32703/TCP,8080:31141/TCP 84s 17 | whoami ClusterIP 10.109.10.217 80/TCP 84s 18 | ``` 19 | 20 | 21 | 22 | - Dashboard 23 | 24 | ![](.ReadMe_images/traefik-homepage-part1.png) 25 | 26 | ![](.ReadMe_images/traefik-homepage-part2.png) 27 | 28 | 29 | - HTTP Routers 30 | ![](.ReadMe_images/http-routers.png) 31 | 32 | - HTTP Services 33 | ![](.ReadMe_images/http-services.png) 34 | 35 | - HTTP Middlewares 36 | ![](.ReadMe_images/http-middlewares.png) 37 | 38 | 39 | - Accessing whoami service 40 | 41 | ![](.ReadMe_images/whoami-service.png) -------------------------------------------------------------------------------- /gcp/task-006-traefik-whoami-tomlInConfigMap/.ReadMe_images/features-enabled-in-toml-inside-configMap.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeaprendiz/learn_kubernetes/55e8a46fdea0eff5113dc13354e482d584e4c661/gcp/task-006-traefik-whoami-tomlInConfigMap/.ReadMe_images/features-enabled-in-toml-inside-configMap.png -------------------------------------------------------------------------------- /gcp/task-006-traefik-whoami-tomlInConfigMap/.ReadMe_images/whoami-service.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeaprendiz/learn_kubernetes/55e8a46fdea0eff5113dc13354e482d584e4c661/gcp/task-006-traefik-whoami-tomlInConfigMap/.ReadMe_images/whoami-service.png -------------------------------------------------------------------------------- /gcp/task-006-traefik-whoami-tomlInConfigMap/00-resource-crd-definition.yml: -------------------------------------------------------------------------------- 1 | # All resources definition must be declared 2 | apiVersion: apiextensions.k8s.io/v1beta1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: ingressroutes.traefik.containo.us 6 | 7 | spec: 8 | group: traefik.containo.us 9 | version: v1alpha1 10 | names: 11 | kind: IngressRoute 12 | plural: ingressroutes 13 | singular: ingressroute 14 | scope: Namespaced 15 | 16 | --- 17 | apiVersion: apiextensions.k8s.io/v1beta1 18 | kind: CustomResourceDefinition 19 | metadata: 20 | name: middlewares.traefik.containo.us 21 | 22 | spec: 23 | group: traefik.containo.us 24 | version: v1alpha1 25 | names: 26 | kind: Middleware 27 | plural: middlewares 28 | singular: middleware 29 | scope: Namespaced 30 | 31 | --- 32 | apiVersion: apiextensions.k8s.io/v1beta1 33 | kind: CustomResourceDefinition 34 | metadata: 35 | name: ingressroutetcps.traefik.containo.us 36 | 37 | spec: 38 | group: traefik.containo.us 39 | version: v1alpha1 40 | names: 41 | kind: IngressRouteTCP 42 | plural: ingressroutetcps 43 | singular: ingressroutetcp 44 | scope: Namespaced 45 | 46 | --- 47 | apiVersion: apiextensions.k8s.io/v1beta1 48 | kind: CustomResourceDefinition 49 | metadata: 50 | name: tlsoptions.traefik.containo.us 51 | 52 | spec: 53 | group: traefik.containo.us 54 | version: v1alpha1 55 | names: 56 | kind: TLSOption 57 | plural: tlsoptions 58 | singular: tlsoption 59 | scope: Namespaced 60 | 61 | --- 62 | apiVersion: apiextensions.k8s.io/v1beta1 63 | kind: CustomResourceDefinition 64 | metadata: 65 | name: traefikservices.traefik.containo.us 66 | 67 | spec: 68 | group: traefik.containo.us 69 | version: v1alpha1 70 | names: 71 | kind: TraefikService 72 | plural: traefikservices 73 | singular: traefikservice 74 | scope: Namespaced -------------------------------------------------------------------------------- /gcp/task-006-traefik-whoami-tomlInConfigMap/05-traefik-rbac.yml: -------------------------------------------------------------------------------- 1 | kind: ClusterRole 2 | apiVersion: rbac.authorization.k8s.io/v1beta1 3 | metadata: 4 | name: traefik-ingress-controller 5 | 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - services 11 | - endpoints 12 | - secrets 13 | verbs: 14 | - get 15 | - list 16 | - watch 17 | - apiGroups: 18 | - extensions 19 | resources: 20 | - ingresses 21 | verbs: 22 | - get 23 | - list 24 | - watch 25 | - apiGroups: 26 | - extensions 27 | resources: 28 | - ingresses/status 29 | verbs: 30 | - update 31 | - apiGroups: 32 | - traefik.containo.us 33 | resources: 34 | - middlewares 35 | - ingressroutes 36 | - traefikservices 37 | - ingressroutetcps 38 | - ingressrouteudps 39 | - tlsoptions 40 | - tlsstores 41 | verbs: 42 | - get 43 | - list 44 | - watch 45 | 46 | --- 47 | kind: ClusterRoleBinding 48 | apiVersion: rbac.authorization.k8s.io/v1beta1 49 | metadata: 50 | name: traefik-ingress-controller 51 | 52 | roleRef: 53 | apiGroup: rbac.authorization.k8s.io 54 | kind: ClusterRole 55 | name: traefik-ingress-controller 56 | subjects: 57 | - kind: ServiceAccount 58 | name: traefik-ingress-controller 59 | namespace: default -------------------------------------------------------------------------------- /gcp/task-006-traefik-whoami-tomlInConfigMap/10-service-account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: traefik-ingress-controller 5 | -------------------------------------------------------------------------------- /gcp/task-006-traefik-whoami-tomlInConfigMap/15-traefik-deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Deployment 3 | apiVersion: extensions/v1beta1 4 | metadata: 5 | name: traefik 6 | labels: 7 | app: traefik 8 | 9 | spec: 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | app: traefik 14 | template: 15 | metadata: 16 | labels: 17 | app: traefik 18 | spec: 19 | serviceAccountName: traefik-ingress-controller 20 | volumes: 21 | - name: config 22 | configMap: 23 | name: traefik-config-map 24 | containers: 25 | - name: traefik 26 | image: traefik:v2.1 27 | args: 28 | - --accesslog=true 29 | - --api 30 | - --api.insecure 31 | - --entrypoints.web.address=:80 32 | - --entrypoints.websecure.address=:443 33 | - --providers.kubernetescrd 34 | - --configfile=/config/traefik.toml 35 | ports: 36 | - name: web 37 | containerPort: 80 38 | - name: admin 39 | containerPort: 8080 40 | - name: websecure 41 | containerPort: 443 42 | volumeMounts: 43 | - mountPath: /etc/traefik/traefik.toml 44 | name: config 45 | subPath: traefik.toml 46 | -------------------------------------------------------------------------------- /gcp/task-006-traefik-whoami-tomlInConfigMap/20-traefik-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: traefik 6 | spec: 7 | type: LoadBalancer 8 | selector: 9 | app: traefik 10 | ports: 11 | - protocol: TCP 12 | port: 80 13 | name: web 14 | targetPort: 80 15 | - protocol: TCP 16 | port: 443 17 | name: websecure 18 | targetPort: 80 19 | - protocol: TCP 20 | port: 8080 21 | name: admin 22 | targetPort: 8080 23 | -------------------------------------------------------------------------------- /gcp/task-006-traefik-whoami-tomlInConfigMap/24-traefik-configMap.yaml: -------------------------------------------------------------------------------- 1 | kind: ConfigMap 2 | apiVersion: v1 3 | metadata: 4 | name: traefik-config-map 5 | data: 6 | traefik.toml: |- 7 | [global] 8 | checkNewVersion = false 9 | sendAnonymousUsage = false 10 | [retry] 11 | attempts = 3 12 | maxMem = 3 13 | [entryPoints] 14 | [entryPoints.web] 15 | address = ":80" 16 | [entryPoints.websecure] 17 | address = ":443" 18 | [log] 19 | level = "DEBUG" 20 | [accessLog] 21 | [api] 22 | insecure = true 23 | dashboard = true 24 | debug = true 25 | [providers] 26 | [providers.file] 27 | directory = "/var/tf" 28 | watch = true 29 | [providers.kubernetesCRD] 30 | -------------------------------------------------------------------------------- /gcp/task-006-traefik-whoami-tomlInConfigMap/25-whoami-deployment.yaml: -------------------------------------------------------------------------------- 1 | kind: Deployment 2 | apiVersion: apps/v1 3 | metadata: 4 | namespace: default 5 | name: whoami 6 | labels: 7 | app: whoami 8 | 9 | spec: 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | app: whoami 14 | template: 15 | metadata: 16 | labels: 17 | app: whoami 18 | spec: 19 | containers: 20 | - name: whoami 21 | image: containous/whoami 22 | ports: 23 | - name: web 24 | containerPort: 80 25 | 26 | -------------------------------------------------------------------------------- /gcp/task-006-traefik-whoami-tomlInConfigMap/30-whoami-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: whoami 5 | 6 | spec: 7 | ports: 8 | - protocol: TCP 9 | name: web 10 | port: 80 11 | selector: 12 | app: whoami 13 | 14 | -------------------------------------------------------------------------------- /gcp/task-006-traefik-whoami-tomlInConfigMap/35-whoami-ingress-route.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: traefik.containo.us/v1alpha1 2 | kind: IngressRoute 3 | metadata: 4 | name: whoami-whoami 5 | namespace: default 6 | spec: 7 | entryPoints: 8 | - web 9 | - websecure 10 | routes: 11 | - match: PathPrefix(`/whoami-app-api`) 12 | kind: Rule 13 | services: 14 | - name: whoami 15 | port: 80 16 | 17 | -------------------------------------------------------------------------------- /gcp/task-006-traefik-whoami-tomlInConfigMap/ReadMe.md: -------------------------------------------------------------------------------- 1 | ## To deploy traefik on GKE with whoami service when toml file is passed as configMap mounted to deployment of Traefik 2 | 3 | 4 | - Run the following command (you may run it again if you get error for the first time as some 5 | custom resources take some time to get created) 6 | 7 | ```bash 8 | kubectl apply -f . 9 | ``` 10 | 11 | - Get the public IP of Treafik-Service 12 | ```bash 13 | $ kubectl get service 14 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 15 | kubernetes ClusterIP 10.109.0.1 443/TCP 179m 16 | traefik LoadBalancer 10.109.4.20 34.66.233.93 80:30521/TCP,443:32062/TCP,8080:30299/TCP 2m26s 17 | whoami ClusterIP 10.109.12.195 80/TCP 2m25s 84s 18 | ``` 19 | 20 | 21 | 22 | - Features enabled 23 | 24 | ![](.ReadMe_images/features-enabled-in-toml-inside-configMap.png) 25 | 26 | 27 | - whoami 28 | 29 | ![](.ReadMe_images/whoami-service.png) -------------------------------------------------------------------------------- /gcp/task-007-traefik-whoami-lets-encrypt/.ReadMe_images/ACME-certificate-tls.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeaprendiz/learn_kubernetes/55e8a46fdea0eff5113dc13354e482d584e4c661/gcp/task-007-traefik-whoami-lets-encrypt/.ReadMe_images/ACME-certificate-tls.png -------------------------------------------------------------------------------- /gcp/task-007-traefik-whoami-lets-encrypt/.ReadMe_images/dashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeaprendiz/learn_kubernetes/55e8a46fdea0eff5113dc13354e482d584e4c661/gcp/task-007-traefik-whoami-lets-encrypt/.ReadMe_images/dashboard.png -------------------------------------------------------------------------------- /gcp/task-007-traefik-whoami-lets-encrypt/.ReadMe_images/finally-the-cert-needed.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeaprendiz/learn_kubernetes/55e8a46fdea0eff5113dc13354e482d584e4c661/gcp/task-007-traefik-whoami-lets-encrypt/.ReadMe_images/finally-the-cert-needed.png -------------------------------------------------------------------------------- /gcp/task-007-traefik-whoami-lets-encrypt/.ReadMe_images/whoami-service-notls.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeaprendiz/learn_kubernetes/55e8a46fdea0eff5113dc13354e482d584e4c661/gcp/task-007-traefik-whoami-lets-encrypt/.ReadMe_images/whoami-service-notls.png -------------------------------------------------------------------------------- /gcp/task-007-traefik-whoami-lets-encrypt/00-resource-crd-definition.yml: -------------------------------------------------------------------------------- 1 | # All resources definition must be declared 2 | apiVersion: apiextensions.k8s.io/v1beta1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: ingressroutes.traefik.containo.us 6 | 7 | spec: 8 | group: traefik.containo.us 9 | version: v1alpha1 10 | names: 11 | kind: IngressRoute 12 | plural: ingressroutes 13 | singular: ingressroute 14 | scope: Namespaced 15 | 16 | --- 17 | apiVersion: apiextensions.k8s.io/v1beta1 18 | kind: CustomResourceDefinition 19 | metadata: 20 | name: middlewares.traefik.containo.us 21 | 22 | spec: 23 | group: traefik.containo.us 24 | version: v1alpha1 25 | names: 26 | kind: Middleware 27 | plural: middlewares 28 | singular: middleware 29 | scope: Namespaced 30 | 31 | --- 32 | apiVersion: apiextensions.k8s.io/v1beta1 33 | kind: CustomResourceDefinition 34 | metadata: 35 | name: ingressroutetcps.traefik.containo.us 36 | 37 | spec: 38 | group: traefik.containo.us 39 | version: v1alpha1 40 | names: 41 | kind: IngressRouteTCP 42 | plural: ingressroutetcps 43 | singular: ingressroutetcp 44 | scope: Namespaced 45 | 46 | --- 47 | apiVersion: apiextensions.k8s.io/v1beta1 48 | kind: CustomResourceDefinition 49 | metadata: 50 | name: tlsoptions.traefik.containo.us 51 | 52 | spec: 53 | group: traefik.containo.us 54 | version: v1alpha1 55 | names: 56 | kind: TLSOption 57 | plural: tlsoptions 58 | singular: tlsoption 59 | scope: Namespaced 60 | 61 | --- 62 | apiVersion: apiextensions.k8s.io/v1beta1 63 | kind: CustomResourceDefinition 64 | metadata: 65 | name: traefikservices.traefik.containo.us 66 | 67 | spec: 68 | group: traefik.containo.us 69 | version: v1alpha1 70 | names: 71 | kind: TraefikService 72 | plural: traefikservices 73 | singular: traefikservice 74 | scope: Namespaced -------------------------------------------------------------------------------- /gcp/task-007-traefik-whoami-lets-encrypt/05-traefik-rbac.yml: -------------------------------------------------------------------------------- 1 | kind: ClusterRole 2 | apiVersion: rbac.authorization.k8s.io/v1beta1 3 | metadata: 4 | name: traefik-ingress-controller 5 | 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - services 11 | - endpoints 12 | - secrets 13 | verbs: 14 | - get 15 | - list 16 | - watch 17 | - apiGroups: 18 | - extensions 19 | resources: 20 | - ingresses 21 | verbs: 22 | - get 23 | - list 24 | - watch 25 | - apiGroups: 26 | - extensions 27 | resources: 28 | - ingresses/status 29 | verbs: 30 | - update 31 | - apiGroups: 32 | - traefik.containo.us 33 | resources: 34 | - middlewares 35 | - ingressroutes 36 | - traefikservices 37 | - ingressroutetcps 38 | - ingressrouteudps 39 | - tlsoptions 40 | - tlsstores 41 | verbs: 42 | - get 43 | - list 44 | - watch 45 | 46 | --- 47 | kind: ClusterRoleBinding 48 | apiVersion: rbac.authorization.k8s.io/v1beta1 49 | metadata: 50 | name: traefik-ingress-controller 51 | 52 | roleRef: 53 | apiGroup: rbac.authorization.k8s.io 54 | kind: ClusterRole 55 | name: traefik-ingress-controller 56 | subjects: 57 | - kind: ServiceAccount 58 | name: traefik-ingress-controller 59 | namespace: default -------------------------------------------------------------------------------- /gcp/task-007-traefik-whoami-lets-encrypt/10-service-account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: traefik-ingress-controller 5 | -------------------------------------------------------------------------------- /gcp/task-007-traefik-whoami-lets-encrypt/15-traefik-deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Deployment 3 | apiVersion: apps/v1 4 | metadata: 5 | namespace: default 6 | name: traefik 7 | labels: 8 | app: traefik 9 | spec: 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | app: traefik 14 | template: 15 | metadata: 16 | labels: 17 | app: traefik 18 | spec: 19 | serviceAccountName: traefik-ingress-controller 20 | containers: 21 | - name: traefik 22 | image: traefik:v2.1 23 | args: 24 | - --api 25 | - --log.level=DEBUG 26 | - --api.insecure 27 | - --accesslog 28 | - --entrypoints.web.address=:80 29 | - --entrypoints.websecure.address=:443 30 | - --providers.kubernetescrd 31 | - --certificatesresolvers.default.acme.tlschallenge 32 | - --certificatesresolvers.default.acme.email=emailexample@gmail.com 33 | - --certificatesresolvers.default.acme.storage=acme.json 34 | # Please note that this is the staging Let's Encrypt server. 35 | # Once you get things working, you should remove that whole line altogether. 36 | # - --certificatesresolvers.default.acme.caserver=https://acme-staging-v02.api.letsencrypt.org/directory 37 | ports: 38 | - name: web 39 | containerPort: 80 40 | - name: admin 41 | containerPort: 8080 42 | - name: websecure 43 | containerPort: 443 44 | 45 | 46 | -------------------------------------------------------------------------------- /gcp/task-007-traefik-whoami-lets-encrypt/20-traefik-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: traefik 6 | spec: 7 | type: LoadBalancer 8 | selector: 9 | app: traefik 10 | ports: 11 | - protocol: TCP 12 | port: 80 13 | name: web 14 | targetPort: 80 15 | - protocol: TCP 16 | port: 443 17 | name: websecure 18 | targetPort: 80 19 | - protocol: TCP 20 | port: 8080 21 | name: admin 22 | targetPort: 8080 23 | -------------------------------------------------------------------------------- /gcp/task-007-traefik-whoami-lets-encrypt/25-whoami-deployment.yaml: -------------------------------------------------------------------------------- 1 | kind: Deployment 2 | apiVersion: apps/v1 3 | metadata: 4 | namespace: default 5 | name: whoami 6 | labels: 7 | app: whoami 8 | 9 | spec: 10 | replicas: 2 11 | selector: 12 | matchLabels: 13 | app: whoami 14 | template: 15 | metadata: 16 | labels: 17 | app: whoami 18 | spec: 19 | containers: 20 | - name: whoami 21 | image: containous/whoami 22 | ports: 23 | - name: web 24 | containerPort: 80 25 | 26 | -------------------------------------------------------------------------------- /gcp/task-007-traefik-whoami-lets-encrypt/30-whoami-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: whoami 5 | 6 | spec: 7 | ports: 8 | - protocol: TCP 9 | name: web 10 | port: 80 11 | selector: 12 | app: whoami 13 | 14 | -------------------------------------------------------------------------------- /gcp/task-007-traefik-whoami-lets-encrypt/35-ingress-route.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: traefik.containo.us/v1alpha1 2 | kind: IngressRoute 3 | metadata: 4 | name: simpleingressroute 5 | namespace: default 6 | spec: 7 | entryPoints: 8 | - web 9 | routes: 10 | - match: Host(`k8straefiktlstest.gotdns.ch`) && PathPrefix(`/notls`) 11 | kind: Rule 12 | services: 13 | - name: whoami 14 | port: 80 15 | 16 | --- 17 | apiVersion: traefik.containo.us/v1alpha1 18 | kind: IngressRoute 19 | metadata: 20 | name: ingressroutetls 21 | namespace: default 22 | spec: 23 | entryPoints: 24 | - websecure 25 | - web 26 | routes: 27 | - match: Host(`k8straefiktlstest.gotdns.ch`) && PathPrefix(`/tls`) 28 | kind: Rule 29 | services: 30 | - name: whoami 31 | port: 80 32 | tls: 33 | certResolver: default -------------------------------------------------------------------------------- /gcp/task-008-external-IP-to-access-Application-In-Cluster/service/load-balancer-example.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: load-balancer-example 6 | name: hello-world 7 | spec: 8 | replicas: 5 9 | selector: 10 | matchLabels: 11 | app.kubernetes.io/name: load-balancer-example 12 | template: 13 | metadata: 14 | labels: 15 | app.kubernetes.io/name: load-balancer-example 16 | spec: 17 | containers: 18 | - image: gcr.io/google-samples/node-hello:1.0 19 | name: hello-world 20 | ports: 21 | - containerPort: 8080 22 | 23 | -------------------------------------------------------------------------------- /gcp/task-009-configuring-dns-with-static-IPs-k8-using-Service/.ReadMe_images/GKE_cluster_created.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeaprendiz/learn_kubernetes/55e8a46fdea0eff5113dc13354e482d584e4c661/gcp/task-009-configuring-dns-with-static-IPs-k8-using-Service/.ReadMe_images/GKE_cluster_created.png -------------------------------------------------------------------------------- /gcp/task-009-configuring-dns-with-static-IPs-k8-using-Service/helloweb-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: helloweb 5 | labels: 6 | app: hello 7 | spec: 8 | selector: 9 | matchLabels: 10 | app: hello 11 | tier: web 12 | template: 13 | metadata: 14 | labels: 15 | app: hello 16 | tier: web 17 | spec: 18 | containers: 19 | - name: hello-app 20 | image: gcr.io/google-samples/hello-app:1.0 21 | ports: 22 | - containerPort: 8080 -------------------------------------------------------------------------------- /gcp/task-009-configuring-dns-with-static-IPs-k8-using-Service/helloweb-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: helloweb 5 | labels: 6 | app: hello 7 | spec: 8 | selector: 9 | app: hello 10 | tier: web 11 | ports: 12 | - port: 80 13 | targetPort: 8080 14 | type: LoadBalancer 15 | loadBalancerIP: "34.67.51.160" -------------------------------------------------------------------------------- /gcp/task-010-configuring-dns-with-static-IPs-k8-using-Ingress/helloweb-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: helloweb 5 | labels: 6 | app: hello 7 | spec: 8 | selector: 9 | matchLabels: 10 | app: hello 11 | tier: web 12 | template: 13 | metadata: 14 | labels: 15 | app: hello 16 | tier: web 17 | spec: 18 | containers: 19 | - name: hello-app 20 | image: gcr.io/google-samples/hello-app:1.0 21 | ports: 22 | - containerPort: 8080 -------------------------------------------------------------------------------- /gcp/task-010-configuring-dns-with-static-IPs-k8-using-Ingress/helloweb-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: helloweb 5 | annotations: 6 | kubernetes.io/ingress.global-static-ip-name: helloweb-ip 7 | labels: 8 | app: hello 9 | spec: 10 | backend: 11 | serviceName: helloweb-backend 12 | servicePort: 8080 13 | --- 14 | apiVersion: v1 15 | kind: Service 16 | metadata: 17 | name: helloweb-backend 18 | labels: 19 | app: hello 20 | spec: 21 | type: NodePort 22 | selector: 23 | app: hello 24 | tier: web 25 | ports: 26 | - port: 8080 27 | targetPort: 8080 -------------------------------------------------------------------------------- /gcp/task-011-configuring-datadog/.ReadMe_images/events-tab.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeaprendiz/learn_kubernetes/55e8a46fdea0eff5113dc13354e482d584e4c661/gcp/task-011-configuring-datadog/.ReadMe_images/events-tab.png -------------------------------------------------------------------------------- /gcp/task-011-configuring-datadog/.ReadMe_images/k8s-dashboard-pic1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeaprendiz/learn_kubernetes/55e8a46fdea0eff5113dc13354e482d584e4c661/gcp/task-011-configuring-datadog/.ReadMe_images/k8s-dashboard-pic1.png -------------------------------------------------------------------------------- /gcp/task-011-configuring-datadog/.ReadMe_images/k8s-dashboard-pic2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeaprendiz/learn_kubernetes/55e8a46fdea0eff5113dc13354e482d584e4c661/gcp/task-011-configuring-datadog/.ReadMe_images/k8s-dashboard-pic2.png -------------------------------------------------------------------------------- /gcp/task-011-configuring-datadog/.ReadMe_images/k8s-dashboard-pic3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeaprendiz/learn_kubernetes/55e8a46fdea0eff5113dc13354e482d584e4c661/gcp/task-011-configuring-datadog/.ReadMe_images/k8s-dashboard-pic3.png -------------------------------------------------------------------------------- /gcp/task-011-configuring-datadog/.ReadMe_images/live-tail-logs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeaprendiz/learn_kubernetes/55e8a46fdea0eff5113dc13354e482d584e4c661/gcp/task-011-configuring-datadog/.ReadMe_images/live-tail-logs.png -------------------------------------------------------------------------------- /gcp/task-011-configuring-datadog/.ReadMe_images/logs-dasboard-page.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeaprendiz/learn_kubernetes/55e8a46fdea0eff5113dc13354e482d584e4c661/gcp/task-011-configuring-datadog/.ReadMe_images/logs-dasboard-page.png -------------------------------------------------------------------------------- /gcp/task-011-configuring-datadog/00-clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: datadog-agent 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - services 10 | - events 11 | - endpoints 12 | - pods 13 | - nodes 14 | - componentstatuses 15 | verbs: 16 | - get 17 | - list 18 | - watch 19 | - apiGroups: ["quota.openshift.io"] 20 | resources: 21 | - clusterresourcequotas 22 | verbs: 23 | - get 24 | - list 25 | - apiGroups: 26 | - "" 27 | resources: 28 | - configmaps 29 | resourceNames: 30 | - datadogtoken # Kubernetes event collection state 31 | - datadog-leader-election # Leader election token 32 | verbs: 33 | - get 34 | - update 35 | - apiGroups: # To create the leader election token 36 | - "" 37 | resources: 38 | - configmaps 39 | verbs: 40 | - create 41 | - nonResourceURLs: 42 | - "/version" 43 | - "/healthz" 44 | - "/metrics" 45 | verbs: 46 | - get 47 | - apiGroups: # Kubelet connectivity 48 | - "" 49 | resources: 50 | - nodes/metrics 51 | - nodes/spec 52 | - nodes/proxy 53 | - nodes/stats 54 | verbs: 55 | - get 56 | 57 | --- 58 | 59 | kind: ClusterRole 60 | apiVersion: rbac.authorization.k8s.io/v1 61 | metadata: 62 | name: dca 63 | rules: 64 | - apiGroups: 65 | - "" 66 | resources: 67 | - services 68 | - events 69 | - endpoints 70 | - pods 71 | - nodes 72 | - componentstatuses 73 | verbs: 74 | - get 75 | - list 76 | - watch 77 | - apiGroups: 78 | - "autoscaling" 79 | resources: 80 | - horizontalpodautoscalers 81 | verbs: 82 | - list 83 | - watch 84 | - apiGroups: 85 | - "" 86 | resources: 87 | - configmaps 88 | resourceNames: 89 | - datadogtoken # Kubernetes event collection state 90 | - datadog-leader-election # Leader election token 91 | verbs: 92 | - get 93 | - update 94 | - apiGroups: # To create the leader election token 95 | - "" 96 | resources: 97 | - configmaps 98 | verbs: 99 | - create 100 | - get 101 | - update 102 | - nonResourceURLs: 103 | - "/version" 104 | - "/healthz" 105 | verbs: 106 | - get 107 | -------------------------------------------------------------------------------- /gcp/task-011-configuring-datadog/01-cluster-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: kube-state-metrics 6 | app.kubernetes.io/version: 1.9.5 7 | name: kube-state-metrics 8 | rules: 9 | - apiGroups: 10 | - "" 11 | resources: 12 | - configmaps 13 | - secrets 14 | - nodes 15 | - pods 16 | - services 17 | - resourcequotas 18 | - replicationcontrollers 19 | - limitranges 20 | - persistentvolumeclaims 21 | - persistentvolumes 22 | - namespaces 23 | - endpoints 24 | verbs: 25 | - list 26 | - watch 27 | - apiGroups: 28 | - extensions 29 | resources: 30 | - daemonsets 31 | - deployments 32 | - replicasets 33 | - ingresses 34 | verbs: 35 | - list 36 | - watch 37 | - apiGroups: 38 | - apps 39 | resources: 40 | - statefulsets 41 | - daemonsets 42 | - deployments 43 | - replicasets 44 | verbs: 45 | - list 46 | - watch 47 | - apiGroups: 48 | - batch 49 | resources: 50 | - cronjobs 51 | - jobs 52 | verbs: 53 | - list 54 | - watch 55 | - apiGroups: 56 | - autoscaling 57 | resources: 58 | - horizontalpodautoscalers 59 | verbs: 60 | - list 61 | - watch 62 | - apiGroups: 63 | - authentication.k8s.io 64 | resources: 65 | - tokenreviews 66 | verbs: 67 | - create 68 | - apiGroups: 69 | - authorization.k8s.io 70 | resources: 71 | - subjectaccessreviews 72 | verbs: 73 | - create 74 | - apiGroups: 75 | - policy 76 | resources: 77 | - poddisruptionbudgets 78 | verbs: 79 | - list 80 | - watch 81 | - apiGroups: 82 | - certificates.k8s.io 83 | resources: 84 | - certificatesigningrequests 85 | verbs: 86 | - list 87 | - watch 88 | - apiGroups: 89 | - storage.k8s.io 90 | resources: 91 | - storageclasses 92 | - volumeattachments 93 | verbs: 94 | - list 95 | - watch 96 | - apiGroups: 97 | - admissionregistration.k8s.io 98 | resources: 99 | - mutatingwebhookconfigurations 100 | - validatingwebhookconfigurations 101 | verbs: 102 | - list 103 | - watch 104 | - apiGroups: 105 | - networking.k8s.io 106 | resources: 107 | - networkpolicies 108 | verbs: 109 | - list 110 | - watch 111 | - apiGroups: 112 | - coordination.k8s.io 113 | resources: 114 | - leases 115 | verbs: 116 | - list 117 | - watch 118 | -------------------------------------------------------------------------------- /gcp/task-011-configuring-datadog/05-serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | kind: ServiceAccount 2 | apiVersion: v1 3 | metadata: 4 | name: datadog-agent 5 | namespace: default 6 | 7 | --- 8 | 9 | kind: ServiceAccount 10 | apiVersion: v1 11 | metadata: 12 | name: dca 13 | namespace: default 14 | -------------------------------------------------------------------------------- /gcp/task-011-configuring-datadog/06-service-account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: kube-state-metrics 6 | app.kubernetes.io/version: 1.9.5 7 | name: kube-state-metrics 8 | namespace: kube-system 9 | -------------------------------------------------------------------------------- /gcp/task-011-configuring-datadog/10-clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: datadog-agent 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: datadog-agent 9 | subjects: 10 | - kind: ServiceAccount 11 | name: datadog-agent 12 | namespace: default 13 | 14 | --- 15 | 16 | apiVersion: rbac.authorization.k8s.io/v1 17 | kind: ClusterRoleBinding 18 | metadata: 19 | name: dca 20 | roleRef: 21 | apiGroup: rbac.authorization.k8s.io 22 | kind: ClusterRole 23 | name: dca 24 | subjects: 25 | - kind: ServiceAccount 26 | name: dca 27 | namespace: default 28 | -------------------------------------------------------------------------------- /gcp/task-011-configuring-datadog/11-cluster-role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: kube-state-metrics 6 | app.kubernetes.io/version: 1.9.5 7 | name: kube-state-metrics 8 | roleRef: 9 | apiGroup: rbac.authorization.k8s.io 10 | kind: ClusterRole 11 | name: kube-state-metrics 12 | subjects: 13 | - kind: ServiceAccount 14 | name: kube-state-metrics 15 | namespace: kube-system 16 | -------------------------------------------------------------------------------- /gcp/task-011-configuring-datadog/20-datadog-cluster-agent.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: datadog-cluster-agent 5 | labels: 6 | app: datadog-cluster-agent 7 | spec: 8 | ports: 9 | - port: 5005 # Has to be the same as the one exposed in the DCA. Default is 5005. 10 | protocol: TCP 11 | selector: 12 | app: datadog-cluster-agent 13 | --- 14 | apiVersion: apps/v1 15 | kind: Deployment 16 | metadata: 17 | name: datadog-cluster-agent 18 | namespace: default 19 | spec: 20 | selector: 21 | matchLabels: 22 | app: datadog-cluster-agent 23 | template: 24 | metadata: 25 | labels: 26 | app: datadog-cluster-agent 27 | name: datadog-agent 28 | annotations: 29 | ad.datadoghq.com/datadog-cluster-agent.check_names: '["prometheus"]' 30 | ad.datadoghq.com/datadog-cluster-agent.init_configs: '[{}]' 31 | ad.datadoghq.com/datadog-cluster-agent.instances: '[{"prometheus_url": "http://%%host%%:5000/metrics","namespace": "datadog.cluster_agent","metrics": ["go_goroutines","go_memstats_*","process_*","api_requests","datadog_requests","external_metrics", "cluster_checks_*"]}]' 32 | spec: 33 | serviceAccountName: dca 34 | containers: 35 | - image: datadog/cluster-agent:latest 36 | imagePullPolicy: Always 37 | name: datadog-cluster-agent 38 | env: 39 | - name: DD_API_KEY 40 | valueFrom: 41 | secretKeyRef: 42 | name: datadog-secret 43 | key: api-key 44 | # Optionally reference an APP KEY for the External Metrics Provider. 45 | # - name: DD_APP_KEY 46 | # value: '' 47 | - name: DD_CLUSTER_AGENT_AUTH_TOKEN 48 | valueFrom: 49 | secretKeyRef: 50 | name: datadog-auth-token 51 | key: token 52 | - name: DD_COLLECT_KUBERNETES_EVENTS 53 | value: "true" 54 | - name: DD_LEADER_ELECTION 55 | value: "true" 56 | - name: DD_EXTERNAL_METRICS_PROVIDER_ENABLED 57 | value: "true" 58 | -------------------------------------------------------------------------------- /gcp/task-011-configuring-datadog/30-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: kube-state-metrics 6 | app.kubernetes.io/version: 1.9.5 7 | name: kube-state-metrics 8 | namespace: kube-system 9 | spec: 10 | clusterIP: None 11 | ports: 12 | - name: http-metrics 13 | port: 8080 14 | targetPort: http-metrics 15 | - name: telemetry 16 | port: 8081 17 | targetPort: telemetry 18 | selector: 19 | app.kubernetes.io/name: kube-state-metrics 20 | -------------------------------------------------------------------------------- /gcp/task-011-configuring-datadog/35-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: kube-state-metrics 6 | app.kubernetes.io/version: 1.9.5 7 | name: kube-state-metrics 8 | namespace: kube-system 9 | spec: 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | app.kubernetes.io/name: kube-state-metrics 14 | template: 15 | metadata: 16 | labels: 17 | app.kubernetes.io/name: kube-state-metrics 18 | app.kubernetes.io/version: 1.9.5 19 | spec: 20 | containers: 21 | - image: quay.io/coreos/kube-state-metrics:v1.9.5 22 | livenessProbe: 23 | httpGet: 24 | path: /healthz 25 | port: 8080 26 | initialDelaySeconds: 5 27 | timeoutSeconds: 5 28 | name: kube-state-metrics 29 | ports: 30 | - containerPort: 8080 31 | name: http-metrics 32 | - containerPort: 8081 33 | name: telemetry 34 | readinessProbe: 35 | httpGet: 36 | path: / 37 | port: 8081 38 | initialDelaySeconds: 5 39 | timeoutSeconds: 5 40 | securityContext: 41 | runAsUser: 65534 42 | nodeSelector: 43 | kubernetes.io/os: linux 44 | serviceAccountName: kube-state-metrics 45 | -------------------------------------------------------------------------------- /gcp/task-011-configuring-datadog/ReadMe.md: -------------------------------------------------------------------------------- 1 | Referenced Docs 2 | 1) https://www.datadoghq.com/blog/monitoring-kubernetes-with-datadog/ 3 | 4 | 5 | 6 | - Secret resource Created 7 | ```bash 8 | kubectl create secret generic datadog-secret --from-literal api-key="2dd8*******************74d48f" 9 | ``` 10 | 11 | - Run the following commands 12 | ```bash 13 | kubectl apply -f . 14 | ``` 15 | 16 | - Verification of agent and ensure that 17 | - all status's are OK 18 | - API key is valid 19 | - logs are getting shipped 20 | ```bash 21 | $ kubectl exec -it datadog-agent-rp2bs agent status | egrep "OK|API Key valid|FAIL|API Key invalid" 22 | Instance ID: cpu [OK] 23 | Instance ID: disk:e5dffb8bef24336f [OK] 24 | Instance ID: docker [OK] 25 | Instance ID: file_handle [OK] 26 | Instance ID: io [OK] 27 | Instance ID: kube_dns:cd40e8b0b9591c53 [OK] 28 | Instance ID: kubelet:d884b5186b651429 [OK] 29 | Instance ID: kubernetes_apiserver [OK] 30 | Instance ID: kubernetes_state:786c62219a8c6f42 [OK] 31 | Instance ID: load [OK] 32 | Instance ID: memory [OK] 33 | Instance ID: network:e0204ad63d43c949 [OK] 34 | Instance ID: ntp:d884b5186b651429 [OK] 35 | Instance ID: prometheus:datadog.cluster_agent:c45da342a409d029 [OK] 36 | Instance ID: uptime [OK] 37 | API key ending with 4d48f: API Key valid 38 | Status: OK 39 | Status: OK 40 | ``` 41 | 42 | 43 | - Deploy the cluster agent 44 | ```bash 45 | kubectl get pods -l app=datadog-cluster-agent 46 | ``` 47 | 48 | - Check the status of cluster-agent 49 | ```bash 50 | $ kubectl exec -it datadog-cluster-agent-67588d6f7b-znkxm agent status | egrep "OK|API Key valid" 51 | Instance ID: kubernetes_apiserver [OK] 52 | API key ending with 4d48f: API Key valid 53 | ``` 54 | 55 | 56 | - Now login to datadog and let's see what we have achieved so far 57 | 58 | ### Logs 59 | 60 | - Live Tail 61 | 62 | ![](.ReadMe_images/live-tail-logs.png) 63 | 64 | 65 | - Logs dashboard 66 | 67 | ![](.ReadMe_images/logs-dasboard-page.png) 68 | 69 | 70 | ### Dashboard 71 | 72 | - k8s dashboard 73 | 74 | ![](.ReadMe_images/k8s-dashboard-pic1.png) 75 | 76 | 77 | ![](.ReadMe_images/k8s-dashboard-pic2.png) 78 | 79 | 80 | ![](.ReadMe_images/k8s-dashboard-pic3.png) 81 | 82 | ### Events 83 | 84 | - Events tab 85 | 86 | ![](.ReadMe_images/events-tab.png) 87 | -------------------------------------------------------------------------------- /gcp/task-012-PodSecurityPolicy/example-psp.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1beta1 2 | kind: PodSecurityPolicy 3 | metadata: 4 | name: example 5 | spec: 6 | privileged: false # Don't allow privileged pods! 7 | # The rest fills in some required fields. 8 | seLinux: 9 | rule: RunAsAny 10 | supplementalGroups: 11 | rule: RunAsAny 12 | runAsUser: 13 | rule: RunAsAny 14 | fsGroup: 15 | rule: RunAsAny 16 | volumes: 17 | - '*' -------------------------------------------------------------------------------- /gcp/task-013-traefik-whoami-tls-custom-certs/00-resource-crd-definition.yml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1beta1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | name: ingressroutes.traefik.containo.us 5 | 6 | spec: 7 | group: traefik.containo.us 8 | version: v1alpha1 9 | names: 10 | kind: IngressRoute 11 | plural: ingressroutes 12 | singular: ingressroute 13 | scope: Namespaced 14 | 15 | --- 16 | apiVersion: apiextensions.k8s.io/v1beta1 17 | kind: CustomResourceDefinition 18 | metadata: 19 | name: middlewares.traefik.containo.us 20 | 21 | spec: 22 | group: traefik.containo.us 23 | version: v1alpha1 24 | names: 25 | kind: Middleware 26 | plural: middlewares 27 | singular: middleware 28 | scope: Namespaced 29 | 30 | --- 31 | apiVersion: apiextensions.k8s.io/v1beta1 32 | kind: CustomResourceDefinition 33 | metadata: 34 | name: ingressroutetcps.traefik.containo.us 35 | 36 | spec: 37 | group: traefik.containo.us 38 | version: v1alpha1 39 | names: 40 | kind: IngressRouteTCP 41 | plural: ingressroutetcps 42 | singular: ingressroutetcp 43 | scope: Namespaced 44 | 45 | --- 46 | apiVersion: apiextensions.k8s.io/v1beta1 47 | kind: CustomResourceDefinition 48 | metadata: 49 | name: ingressrouteudps.traefik.containo.us 50 | 51 | spec: 52 | group: traefik.containo.us 53 | version: v1alpha1 54 | names: 55 | kind: IngressRouteUDP 56 | plural: ingressrouteudps 57 | singular: ingressrouteudp 58 | scope: Namespaced 59 | 60 | --- 61 | apiVersion: apiextensions.k8s.io/v1beta1 62 | kind: CustomResourceDefinition 63 | metadata: 64 | name: tlsoptions.traefik.containo.us 65 | 66 | spec: 67 | group: traefik.containo.us 68 | version: v1alpha1 69 | names: 70 | kind: TLSOption 71 | plural: tlsoptions 72 | singular: tlsoption 73 | scope: Namespaced 74 | 75 | --- 76 | apiVersion: apiextensions.k8s.io/v1beta1 77 | kind: CustomResourceDefinition 78 | metadata: 79 | name: tlsstores.traefik.containo.us 80 | 81 | spec: 82 | group: traefik.containo.us 83 | version: v1alpha1 84 | names: 85 | kind: TLSStore 86 | plural: tlsstores 87 | singular: tlsstore 88 | scope: Namespaced 89 | 90 | --- 91 | apiVersion: apiextensions.k8s.io/v1beta1 92 | kind: CustomResourceDefinition 93 | metadata: 94 | name: traefikservices.traefik.containo.us 95 | 96 | spec: 97 | group: traefik.containo.us 98 | version: v1alpha1 99 | names: 100 | kind: TraefikService 101 | plural: traefikservices 102 | singular: traefikservice 103 | scope: Namespaced -------------------------------------------------------------------------------- /gcp/task-013-traefik-whoami-tls-custom-certs/05-traefik-rbac.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: ClusterRole 3 | apiVersion: rbac.authorization.k8s.io/v1beta1 4 | metadata: 5 | name: traefik-ingress-controller 6 | 7 | rules: 8 | - apiGroups: 9 | - "" 10 | resources: 11 | - services 12 | - endpoints 13 | - secrets 14 | verbs: 15 | - get 16 | - list 17 | - watch 18 | - apiGroups: 19 | - extensions 20 | resources: 21 | - ingresses 22 | verbs: 23 | - get 24 | - list 25 | - watch 26 | - apiGroups: 27 | - extensions 28 | resources: 29 | - ingresses/status 30 | verbs: 31 | - update 32 | - apiGroups: 33 | - traefik.containo.us 34 | resources: 35 | - middlewares 36 | - ingressroutes 37 | - traefikservices 38 | - ingressroutetcps 39 | - ingressrouteudps 40 | - tlsoptions 41 | - tlsstores 42 | verbs: 43 | - get 44 | - list 45 | - watch 46 | 47 | --- 48 | kind: ClusterRoleBinding 49 | apiVersion: rbac.authorization.k8s.io/v1beta1 50 | metadata: 51 | name: traefik-ingress-controller 52 | 53 | roleRef: 54 | apiGroup: rbac.authorization.k8s.io 55 | kind: ClusterRole 56 | name: traefik-ingress-controller 57 | subjects: 58 | - kind: ServiceAccount 59 | name: traefik-ingress-controller 60 | namespace: default 61 | --- -------------------------------------------------------------------------------- /gcp/task-013-traefik-whoami-tls-custom-certs/10-service-account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: traefik-ingress-controller 5 | -------------------------------------------------------------------------------- /gcp/task-013-traefik-whoami-tls-custom-certs/11-traefik-configmap.yaml: -------------------------------------------------------------------------------- 1 | kind: ConfigMap 2 | apiVersion: v1 3 | metadata: 4 | name: traefik-config-map 5 | data: 6 | traefik.toml: |- 7 | [global] 8 | checkNewVersion = false 9 | sendAnonymousUsage = false 10 | [retry] 11 | attempts = 3 12 | maxMem = 3 13 | [entryPoints] 14 | [entryPoints.web] 15 | address = ":80" 16 | [entryPoints.web.http] 17 | [entryPoints.web.http.redirections] 18 | [entryPoints.web.http.redirections.entryPoint] 19 | to = "websecure" 20 | scheme = "https" 21 | permanent = true 22 | [entryPoints.websecure] 23 | address = ":443" 24 | [log] 25 | level = "DEBUG" 26 | [accessLog] 27 | [api] 28 | insecure = true 29 | dashboard = true 30 | debug = true 31 | [providers] 32 | [providers.file] 33 | directory = "/etc/traefik" 34 | watch = true 35 | [providers.kubernetesCRD] 36 | 37 | 38 | [[tls.certificates]] 39 | keyFile = "/var/ssl/start-domain-com-ssl/star_domain.com.key" 40 | certFile = "/var/ssl/start-domain-com-ssl/star_domain_com.chained.crt" 41 | 42 | [tls.options] 43 | [tls.options.default] 44 | minVersion = "VersionTLS12" 45 | preferServerCipherSuites = true 46 | cipherSuites = [ 47 | "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", 48 | "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", 49 | "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", 50 | "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", 51 | "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", 52 | "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", 53 | "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", 54 | "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", 55 | "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", 56 | ] 57 | 58 | [tls.options.mintls13] 59 | minVersion = "VersionTLS13" 60 | 61 | [tls.stores] 62 | [tls.stores.default] 63 | [tls.stores.default.defaultCertificate] 64 | keyFile = "/var/ssl/start-domain-com-ssl/star_domain.com.key" 65 | certFile = "/var/ssl/start-domain-com-ssl/star_domain_com.chained.crt" -------------------------------------------------------------------------------- /gcp/task-013-traefik-whoami-tls-custom-certs/12-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | star_domain.com.key: 4 | star_domain_com.chained.crt: 5 | kind: Secret 6 | metadata: 7 | name: start-domain-com-ssl 8 | namespace: default 9 | type: Opaque -------------------------------------------------------------------------------- /gcp/task-013-traefik-whoami-tls-custom-certs/15-traefik-deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Deployment 3 | apiVersion: apps/v1 4 | metadata: 5 | name: traefik 6 | labels: 7 | app: traefik 8 | 9 | spec: 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | app: traefik 14 | template: 15 | metadata: 16 | labels: 17 | app: traefik 18 | spec: 19 | serviceAccountName: traefik-ingress-controller 20 | volumes: 21 | - name: config 22 | configMap: 23 | name: traefik-config-map 24 | - name: start-domain-com-ssl 25 | secret: 26 | secretName: start-domain-com-ssl 27 | containers: 28 | - name: traefik 29 | image: traefik:v2.2.1 30 | ports: 31 | - name: web 32 | containerPort: 80 33 | - name: admin 34 | containerPort: 8080 35 | - name: websecure 36 | containerPort: 443 37 | volumeMounts: 38 | - mountPath: /etc/traefik/traefik.toml 39 | name: config 40 | subPath: traefik.toml 41 | - mountPath: "/var/ssl/start-domain-com-ssl" 42 | name: start-domain-com-ssl 43 | readOnly: true 44 | -------------------------------------------------------------------------------- /gcp/task-013-traefik-whoami-tls-custom-certs/20-traefik-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: traefik 6 | spec: 7 | type: LoadBalancer 8 | selector: 9 | app: traefik 10 | ports: 11 | - protocol: TCP 12 | port: 80 13 | name: web 14 | targetPort: 80 15 | - protocol: TCP 16 | port: 443 17 | name: websecure 18 | targetPort: 80 19 | - protocol: TCP 20 | port: 8080 21 | name: admin 22 | targetPort: 8080 23 | -------------------------------------------------------------------------------- /gcp/task-013-traefik-whoami-tls-custom-certs/25-whoami-deployment.yaml: -------------------------------------------------------------------------------- 1 | kind: Deployment 2 | apiVersion: apps/v1 3 | metadata: 4 | namespace: default 5 | name: whoami 6 | labels: 7 | app: whoami 8 | 9 | spec: 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | app: whoami 14 | template: 15 | metadata: 16 | labels: 17 | app: whoami 18 | spec: 19 | containers: 20 | - name: whoami 21 | image: containous/whoami 22 | ports: 23 | - name: web 24 | containerPort: 80 25 | 26 | -------------------------------------------------------------------------------- /gcp/task-013-traefik-whoami-tls-custom-certs/30-whoami-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: whoami 5 | 6 | spec: 7 | ports: 8 | - protocol: TCP 9 | name: web 10 | port: 80 11 | selector: 12 | app: whoami 13 | 14 | -------------------------------------------------------------------------------- /gcp/task-013-traefik-whoami-tls-custom-certs/35-whoami-ingress-route.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: traefik.containo.us/v1alpha1 2 | kind: IngressRoute 3 | metadata: 4 | name: whoami-whoami 5 | namespace: default 6 | spec: 7 | tls: {} 8 | entryPoints: 9 | - web 10 | - websecure 11 | routes: 12 | - match: PathPrefix(`/whoami-app-api`) 13 | kind: Rule 14 | services: 15 | - name: whoami 16 | port: 80 17 | 18 | -------------------------------------------------------------------------------- /gcp/task-013-traefik-whoami-tls-custom-certs/ReadMe.md: -------------------------------------------------------------------------------- 1 | ## To deploy traefik on GKE with whoami service and with custom TLS certificates 2 | 3 | 4 | - Assuming that you have the certificates already, please visit visit [here](https://www.base64encode.org/) and 5 | - convert your `star_domain.com.key` to base64 and paste the value in `12-secret.yaml` 6 | - convert your `star_domain_com.chained.crt` to base64 and paste the value in `12-secret.yaml` 7 | 8 | 9 | - Create the secret using `kubectl apply -f 12-secret.yaml` 10 | 11 | - Now create the rest of the resources using `kubectl apply -f .` 12 | 13 | 14 | 15 | 16 | -------------------------------------------------------------------------------- /gcp/task-014-metricbeat/.ReadMe_images/Infra-Pods.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeaprendiz/learn_kubernetes/55e8a46fdea0eff5113dc13354e482d584e4c661/gcp/task-014-metricbeat/.ReadMe_images/Infra-Pods.png -------------------------------------------------------------------------------- /gcp/task-014-metricbeat/.ReadMe_images/Infra-vms.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeaprendiz/learn_kubernetes/55e8a46fdea0eff5113dc13354e482d584e4c661/gcp/task-014-metricbeat/.ReadMe_images/Infra-vms.png -------------------------------------------------------------------------------- /gcp/task-014-metricbeat/.ReadMe_images/K8s-dashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeaprendiz/learn_kubernetes/55e8a46fdea0eff5113dc13354e482d584e4c661/gcp/task-014-metricbeat/.ReadMe_images/K8s-dashboard.png -------------------------------------------------------------------------------- /gcp/task-014-metricbeat/.ReadMe_images/Pod-metrics.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeaprendiz/learn_kubernetes/55e8a46fdea0eff5113dc13354e482d584e4c661/gcp/task-014-metricbeat/.ReadMe_images/Pod-metrics.png -------------------------------------------------------------------------------- /gcp/task-014-metricbeat/.ReadMe_images/containers-overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeaprendiz/learn_kubernetes/55e8a46fdea0eff5113dc13354e482d584e4c661/gcp/task-014-metricbeat/.ReadMe_images/containers-overview.png -------------------------------------------------------------------------------- /gcp/task-014-metricbeat/.ReadMe_images/host-overview-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeaprendiz/learn_kubernetes/55e8a46fdea0eff5113dc13354e482d584e4c661/gcp/task-014-metricbeat/.ReadMe_images/host-overview-2.png -------------------------------------------------------------------------------- /gcp/task-014-metricbeat/.ReadMe_images/host-overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeaprendiz/learn_kubernetes/55e8a46fdea0eff5113dc13354e482d584e4c661/gcp/task-014-metricbeat/.ReadMe_images/host-overview.png -------------------------------------------------------------------------------- /gcp/task-014-metricbeat/.ReadMe_images/system-overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeaprendiz/learn_kubernetes/55e8a46fdea0eff5113dc13354e482d584e4c661/gcp/task-014-metricbeat/.ReadMe_images/system-overview.png -------------------------------------------------------------------------------- /gcp/task-014-metricbeat/00-service-account.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: metricbeat/templates/01-serviceaccount.yaml 3 | apiVersion: v1 4 | kind: ServiceAccount 5 | metadata: 6 | name: metricbeat 7 | namespace: kube-system 8 | labels: 9 | k8s-app: metricbeat 10 | --- -------------------------------------------------------------------------------- /gcp/task-014-metricbeat/04-cluster-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: metricbeat 5 | labels: 6 | k8s-app: metricbeat 7 | rules: 8 | - apiGroups: [""] 9 | resources: 10 | - nodes 11 | - namespaces 12 | - events 13 | - pods 14 | verbs: ["get", "list", "watch"] 15 | - apiGroups: ["extensions"] 16 | resources: 17 | - replicasets 18 | verbs: ["get", "list", "watch"] 19 | - apiGroups: ["apps"] 20 | resources: 21 | - statefulsets 22 | - deployments 23 | verbs: ["get", "list", "watch"] 24 | - apiGroups: 25 | - "" 26 | resources: 27 | - nodes/stats 28 | verbs: 29 | - get 30 | --- 31 | -------------------------------------------------------------------------------- /gcp/task-014-metricbeat/08-cluster-role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: metricbeat 5 | subjects: 6 | - kind: ServiceAccount 7 | name: metricbeat 8 | namespace: kube-system 9 | roleRef: 10 | kind: ClusterRole 11 | name: metricbeat 12 | apiGroup: rbac.authorization.k8s.io 13 | --- -------------------------------------------------------------------------------- /gcp/task-014-metricbeat/12-configmap-metricbeat-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: metricbeat-deployment-modules 5 | namespace: kube-system 6 | labels: 7 | k8s-app: metricbeat 8 | data: 9 | # This module requires `kube-state-metrics` up and running under `kube-system` namespace 10 | kubernetes.yml: |- 11 | - module: kubernetes 12 | metricsets: 13 | - state_node 14 | - state_deployment 15 | - state_replicaset 16 | - state_pod 17 | - state_container 18 | - state_cronjob 19 | - state_resourcequota 20 | # Uncomment this to get k8s events: 21 | #- event 22 | period: 10s 23 | host: ${NODE_NAME} 24 | hosts: ["kube-state-metrics:8080"] 25 | --- 26 | 27 | apiVersion: v1 28 | kind: ConfigMap 29 | metadata: 30 | name: metricbeat-deployment-config 31 | namespace: kube-system 32 | labels: 33 | k8s-app: metricbeat 34 | data: 35 | metricbeat.yml: |- 36 | metricbeat.config.modules: 37 | # Mounted `metricbeat-daemonset-modules` configmap: 38 | path: ${path.config}/modules.d/*.yml 39 | # Reload module configs as they change: 40 | reload.enabled: false 41 | 42 | processors: 43 | - add_cloud_metadata: 44 | 45 | cloud.id: ${ELASTIC_CLOUD_ID} 46 | cloud.auth: ${ELASTIC_CLOUD_AUTH} 47 | 48 | output.elasticsearch: 49 | hosts: ['${ELASTICSEARCH_HOST:elasticsearch}:${ELASTICSEARCH_PORT:9200}'] 50 | username: ${ELASTICSEARCH_USERNAME} 51 | password: ${ELASTICSEARCH_PASSWORD} -------------------------------------------------------------------------------- /gcp/task-014-metricbeat/20-daemonset.yaml: -------------------------------------------------------------------------------- 1 | # Deploy a Metricbeat instance per node for node metrics retrieval 2 | apiVersion: apps/v1 3 | kind: DaemonSet 4 | metadata: 5 | name: metricbeat 6 | namespace: kube-system 7 | labels: 8 | k8s-app: metricbeat 9 | spec: 10 | selector: 11 | matchLabels: 12 | k8s-app: metricbeat 13 | template: 14 | metadata: 15 | labels: 16 | k8s-app: metricbeat 17 | spec: 18 | serviceAccountName: metricbeat 19 | terminationGracePeriodSeconds: 30 20 | hostNetwork: true 21 | dnsPolicy: ClusterFirstWithHostNet 22 | containers: 23 | - name: metricbeat 24 | image: docker.elastic.co/beats/metricbeat:7.8.1 25 | args: [ 26 | "-c", "/etc/metricbeat.yml", 27 | "-e", 28 | "-system.hostfs=/hostfs", 29 | ] 30 | env: 31 | - name: ELASTICSEARCH_HOST 32 | value: 34.68.27.112 33 | - name: ELASTICSEARCH_PORT 34 | value: "9200" 35 | - name: ELASTICSEARCH_USERNAME 36 | value: elastic 37 | - name: ELASTICSEARCH_PASSWORD 38 | value: changeme 39 | - name: ELASTIC_CLOUD_ID 40 | value: 41 | - name: ELASTIC_CLOUD_AUTH 42 | value: 43 | - name: NODE_NAME 44 | valueFrom: 45 | fieldRef: 46 | fieldPath: spec.nodeName 47 | securityContext: 48 | runAsUser: 0 49 | # If using Red Hat OpenShift uncomment this: 50 | #privileged: true 51 | resources: 52 | limits: 53 | memory: 200Mi 54 | requests: 55 | cpu: 100m 56 | memory: 100Mi 57 | volumeMounts: 58 | - name: config 59 | mountPath: /etc/metricbeat.yml 60 | readOnly: true 61 | subPath: metricbeat.yml 62 | - name: data 63 | mountPath: /usr/share/metricbeat/data 64 | - name: modules 65 | mountPath: /usr/share/metricbeat/modules.d 66 | readOnly: true 67 | - name: dockersock 68 | mountPath: /var/run/docker.sock 69 | - name: proc 70 | mountPath: /hostfs/proc 71 | readOnly: true 72 | - name: cgroup 73 | mountPath: /hostfs/sys/fs/cgroup 74 | readOnly: true 75 | volumes: 76 | - name: proc 77 | hostPath: 78 | path: /proc 79 | - name: cgroup 80 | hostPath: 81 | path: /sys/fs/cgroup 82 | - name: dockersock 83 | hostPath: 84 | path: /var/run/docker.sock 85 | - name: config 86 | configMap: 87 | defaultMode: 0600 88 | name: metricbeat-daemonset-config 89 | - name: modules 90 | configMap: 91 | defaultMode: 0600 92 | name: metricbeat-daemonset-modules 93 | - name: data 94 | hostPath: 95 | path: /var/lib/metricbeat-data 96 | type: DirectoryOrCreate 97 | 98 | -------------------------------------------------------------------------------- /gcp/task-014-metricbeat/24-deployment.yaml: -------------------------------------------------------------------------------- 1 | # Deploy singleton instance in the whole cluster for some unique data sources, like kube-state-metrics 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: metricbeat 6 | namespace: kube-system 7 | labels: 8 | k8s-app: metricbeat 9 | spec: 10 | selector: 11 | matchLabels: 12 | k8s-app: metricbeat 13 | template: 14 | metadata: 15 | labels: 16 | k8s-app: metricbeat 17 | spec: 18 | serviceAccountName: metricbeat 19 | hostNetwork: true 20 | dnsPolicy: ClusterFirstWithHostNet 21 | containers: 22 | - name: metricbeat 23 | image: docker.elastic.co/beats/metricbeat:7.8.1 24 | args: [ 25 | "-c", "/etc/metricbeat.yml", 26 | "-e", 27 | ] 28 | env: 29 | - name: ELASTICSEARCH_HOST 30 | value: 34.68.27.112 31 | - name: ELASTICSEARCH_PORT 32 | value: "9200" 33 | - name: ELASTICSEARCH_USERNAME 34 | value: elastic 35 | - name: ELASTICSEARCH_PASSWORD 36 | value: changeme 37 | - name: ELASTIC_CLOUD_ID 38 | value: 39 | - name: ELASTIC_CLOUD_AUTH 40 | value: 41 | - name: NODE_NAME 42 | valueFrom: 43 | fieldRef: 44 | fieldPath: spec.nodeName 45 | securityContext: 46 | runAsUser: 0 47 | resources: 48 | limits: 49 | memory: 200Mi 50 | requests: 51 | cpu: 100m 52 | memory: 100Mi 53 | volumeMounts: 54 | - name: config 55 | mountPath: /etc/metricbeat.yml 56 | readOnly: true 57 | subPath: metricbeat.yml 58 | - name: modules 59 | mountPath: /usr/share/metricbeat/modules.d 60 | readOnly: true 61 | volumes: 62 | - name: config 63 | configMap: 64 | defaultMode: 0600 65 | name: metricbeat-deployment-config 66 | - name: modules 67 | configMap: 68 | defaultMode: 0600 69 | name: metricbeat-deployment-modules 70 | 71 | -------------------------------------------------------------------------------- /gcp/task-014-metricbeat/ReadMe.md: -------------------------------------------------------------------------------- 1 | ### To start metricbeat in kubernetes cluster and ship the kubernetes metrics to elastic search which can be viewed by kibana 2 | 3 | 4 | #### Pre-requisites 5 | 6 | - elastic-search-docker: 7 | - You can install elastic-search docker by visiting [elastic-search-docker](https://github.com/codeaprendiz/ansible-kitchen/tree/master/playbooks/roles/elastic-search-cluster-docker) and 8 | - kibana-docker: 9 | - You can install kibana docker by using this link [kibana-docker](https://github.com/codeaprendiz/ansible-kitchen/tree/master/playbooks/roles/kibana-docker) 10 | - kube-state-metrics: 11 | - You need to install kube-state-metrics as this will be used by metric beat to featch additional metrics. You can 12 | do so by using this task-link [task-015-kube-state-metrics](../task-015-kube-state-metrics) 13 | 14 | - Docs referred 15 | 16 | - [k8s resources](https://raw.githubusercontent.com/elastic/beats/7.8/deploy/kubernetes/metricbeat-kubernetes.yaml) 17 | 18 | - [metricbeat](https://www.elastic.co/guide/en/beats/metricbeat/current/metricbeat-reference-yml.html) 19 | 20 | - Change the IPs of elastic-search `20-daemonset.yaml` and `24-deployment.yaml` with the public IP which you get. 21 | 22 | - Change the IP of kibana in `16-configmap-metricbeat-daemonset.yaml` with the public IP of kibana which you get. 23 | 24 | - Apply the k8s resources 25 | ```bash 26 | $ kubectl apply -f . 27 | 28 | ``` 29 | 30 | - Metricbeat logs after successful connection to elastic search 31 | 32 | ```bash 33 | 2020-07-31T10:18:29.404Z INFO [publisher_pipeline_output] pipeline/output.go:144 Connecting to backoff(elasticsearch(http://35.226.68.74:9200)) 34 | 2020-07-31T10:18:34.475Z INFO [publisher_pipeline_output] pipeline/output.go:152 Connection to backoff(elasticsearch(http://35.226.68.74:9200)) established 35 | ``` 36 | 37 | - Now you can check you infrastructure in kibana as showing in the following screenshot (Observability - metrics) 38 | 39 | - Infra VMs 40 | 41 | ![](.ReadMe_images/Infra-vms.png) 42 | 43 | - Infra Pods 44 | 45 | ![](.ReadMe_images/Infra-Pods.png) 46 | 47 | - Pod Metrics 48 | 49 | ![](.ReadMe_images/Pod-metrics.png) 50 | 51 | - Pre Built Imported Dashboard 52 | 53 | ![](.ReadMe_images/K8s-dashboard.png) 54 | 55 | - Pre Built System - Overview Dashboard 56 | 57 | ![](.ReadMe_images/system-overview.png) 58 | 59 | - Pre Built Host Overview Dashboard 60 | 61 | ![](.ReadMe_images/host-overview.png) 62 | 63 | ![](.ReadMe_images/host-overview-2.png) 64 | 65 | - Pre Built Containers Dashboard 66 | 67 | ![](.ReadMe_images/containers-overview.png) 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | -------------------------------------------------------------------------------- /gcp/task-015-kube-state-metrics/ReadMe.md: -------------------------------------------------------------------------------- 1 | ## To deploy kube-state-metrics on kubenetes cluster 2 | 3 | [Docs Referred](https://github.com/kubernetes/kube-state-metrics#kubernetes-deployment) 4 | 5 | - To deploy the k8s resources you can use 6 | ```bash 7 | $ kubectl apply -f . 8 | ``` -------------------------------------------------------------------------------- /gcp/task-015-kube-state-metrics/cluster-role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: kube-state-metrics 6 | app.kubernetes.io/version: 1.9.7 7 | name: kube-state-metrics 8 | roleRef: 9 | apiGroup: rbac.authorization.k8s.io 10 | kind: ClusterRole 11 | name: kube-state-metrics 12 | subjects: 13 | - kind: ServiceAccount 14 | name: kube-state-metrics 15 | namespace: kube-system -------------------------------------------------------------------------------- /gcp/task-015-kube-state-metrics/cluster-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: kube-state-metrics 6 | app.kubernetes.io/version: 1.9.7 7 | name: kube-state-metrics 8 | rules: 9 | - apiGroups: 10 | - "" 11 | resources: 12 | - configmaps 13 | - secrets 14 | - nodes 15 | - pods 16 | - services 17 | - resourcequotas 18 | - replicationcontrollers 19 | - limitranges 20 | - persistentvolumeclaims 21 | - persistentvolumes 22 | - namespaces 23 | - endpoints 24 | verbs: 25 | - list 26 | - watch 27 | - apiGroups: 28 | - extensions 29 | resources: 30 | - daemonsets 31 | - deployments 32 | - replicasets 33 | - ingresses 34 | verbs: 35 | - list 36 | - watch 37 | - apiGroups: 38 | - apps 39 | resources: 40 | - statefulsets 41 | - daemonsets 42 | - deployments 43 | - replicasets 44 | verbs: 45 | - list 46 | - watch 47 | - apiGroups: 48 | - batch 49 | resources: 50 | - cronjobs 51 | - jobs 52 | verbs: 53 | - list 54 | - watch 55 | - apiGroups: 56 | - autoscaling 57 | resources: 58 | - horizontalpodautoscalers 59 | verbs: 60 | - list 61 | - watch 62 | - apiGroups: 63 | - authentication.k8s.io 64 | resources: 65 | - tokenreviews 66 | verbs: 67 | - create 68 | - apiGroups: 69 | - authorization.k8s.io 70 | resources: 71 | - subjectaccessreviews 72 | verbs: 73 | - create 74 | - apiGroups: 75 | - policy 76 | resources: 77 | - poddisruptionbudgets 78 | verbs: 79 | - list 80 | - watch 81 | - apiGroups: 82 | - certificates.k8s.io 83 | resources: 84 | - certificatesigningrequests 85 | verbs: 86 | - list 87 | - watch 88 | - apiGroups: 89 | - storage.k8s.io 90 | resources: 91 | - storageclasses 92 | - volumeattachments 93 | verbs: 94 | - list 95 | - watch 96 | - apiGroups: 97 | - admissionregistration.k8s.io 98 | resources: 99 | - mutatingwebhookconfigurations 100 | - validatingwebhookconfigurations 101 | verbs: 102 | - list 103 | - watch 104 | - apiGroups: 105 | - networking.k8s.io 106 | resources: 107 | - networkpolicies 108 | verbs: 109 | - list 110 | - watch 111 | - apiGroups: 112 | - coordination.k8s.io 113 | resources: 114 | - leases 115 | verbs: 116 | - list 117 | - watch -------------------------------------------------------------------------------- /gcp/task-015-kube-state-metrics/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: kube-state-metrics 6 | app.kubernetes.io/version: 1.9.7 7 | name: kube-state-metrics 8 | namespace: kube-system 9 | spec: 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | app.kubernetes.io/name: kube-state-metrics 14 | template: 15 | metadata: 16 | labels: 17 | app.kubernetes.io/name: kube-state-metrics 18 | app.kubernetes.io/version: 1.9.7 19 | spec: 20 | containers: 21 | - image: quay.io/coreos/kube-state-metrics:v1.9.7 22 | livenessProbe: 23 | httpGet: 24 | path: /healthz 25 | port: 8080 26 | initialDelaySeconds: 5 27 | timeoutSeconds: 5 28 | name: kube-state-metrics 29 | ports: 30 | - containerPort: 8080 31 | name: http-metrics 32 | - containerPort: 8081 33 | name: telemetry 34 | readinessProbe: 35 | httpGet: 36 | path: / 37 | port: 8081 38 | initialDelaySeconds: 5 39 | timeoutSeconds: 5 40 | securityContext: 41 | runAsUser: 65534 42 | nodeSelector: 43 | kubernetes.io/os: linux 44 | serviceAccountName: kube-state-metrics -------------------------------------------------------------------------------- /gcp/task-015-kube-state-metrics/service-account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: kube-state-metrics 6 | app.kubernetes.io/version: 1.9.7 7 | name: kube-state-metrics 8 | namespace: kube-system -------------------------------------------------------------------------------- /gcp/task-015-kube-state-metrics/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: kube-state-metrics 6 | app.kubernetes.io/version: 1.9.7 7 | name: kube-state-metrics 8 | namespace: kube-system 9 | spec: 10 | clusterIP: None 11 | ports: 12 | - name: http-metrics 13 | port: 8080 14 | targetPort: http-metrics 15 | - name: telemetry 16 | port: 8081 17 | targetPort: telemetry 18 | selector: 19 | app.kubernetes.io/name: kube-state-metrics -------------------------------------------------------------------------------- /gcp/task-016-journalbeat/.ReadMe_images/creating-index-pattern.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeaprendiz/learn_kubernetes/55e8a46fdea0eff5113dc13354e482d584e4c661/gcp/task-016-journalbeat/.ReadMe_images/creating-index-pattern.png -------------------------------------------------------------------------------- /gcp/task-016-journalbeat/.ReadMe_images/logs-dashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeaprendiz/learn_kubernetes/55e8a46fdea0eff5113dc13354e482d584e4c661/gcp/task-016-journalbeat/.ReadMe_images/logs-dashboard.png -------------------------------------------------------------------------------- /gcp/task-016-journalbeat/00-service-account.yaml: -------------------------------------------------------------------------------- 1 | # Source: metricbeat/templates/01-serviceaccount.yaml 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: journalbeat 6 | namespace: kube-system 7 | --- -------------------------------------------------------------------------------- /gcp/task-016-journalbeat/02-cluster-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: journalbeat 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - namespaces 10 | - pods 11 | verbs: 12 | - get 13 | - watch 14 | - list 15 | - apiGroups: 16 | - extensions 17 | resourceNames: 18 | - journalbeat 19 | resources: 20 | - podsecuritypolicies 21 | verbs: 22 | - use 23 | --- -------------------------------------------------------------------------------- /gcp/task-016-journalbeat/03-cluster-role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: journalbeat 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: journalbeat 9 | subjects: 10 | - kind: ServiceAccount 11 | name: journalbeat 12 | namespace: kube-system -------------------------------------------------------------------------------- /gcp/task-016-journalbeat/04-pod-security-policy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1beta1 2 | kind: PodSecurityPolicy 3 | metadata: 4 | annotations: 5 | seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default 6 | seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default 7 | name: journalbeat 8 | namespace: kube-system 9 | spec: 10 | allowedCapabilities: 11 | - KILL 12 | - CHOWN 13 | - FSETID 14 | - FOWNER 15 | - SETGID 16 | - SETUID 17 | - SETFCAP 18 | - SETPCAP 19 | - AUDIT_WRITE 20 | - NET_BIND_SERVICE 21 | fsGroup: 22 | rule: RunAsAny 23 | hostIPC: false 24 | hostNetwork: false 25 | hostPID: false 26 | privileged: false 27 | requiredDropCapabilities: 28 | - MKNOD 29 | - DAC_OVERRIDE 30 | - NET_RAW 31 | - SYS_CHROOT 32 | runAsUser: 33 | rule: RunAsAny 34 | seLinux: 35 | rule: RunAsAny 36 | supplementalGroups: 37 | rule: RunAsAny 38 | volumes: 39 | - secret 40 | - configMap 41 | - hostPath 42 | --- -------------------------------------------------------------------------------- /gcp/task-016-journalbeat/08-configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: journalbeat-config 5 | namespace: kube-system 6 | labels: 7 | k8s-app: journalbeat-logging 8 | version: v1 9 | data: 10 | journalbeat.yml: | 11 | name: "${NODENAME}" 12 | journalbeat.inputs: 13 | - paths: [] 14 | seek: cursor 15 | cursor_seek_fallback: tail 16 | 17 | processors: 18 | - add_kubernetes_metadata: 19 | host: "${NODENAME}" 20 | in_cluster: true 21 | default_indexers.enabled: false 22 | default_matchers.enabled: false 23 | indexers: 24 | - container: 25 | matchers: 26 | - fields: 27 | lookup_fields: ["container.id"] 28 | - decode_json_fields: 29 | fields: ["message"] 30 | process_array: false 31 | max_depth: 1 32 | target: "" 33 | overwrite_keys: true 34 | - drop_event.when: 35 | or: 36 | - regexp.kubernetes.pod.name: "filebeat-.*" 37 | - regexp.kubernetes.pod.name: "journalbeat-.*" 38 | - regexp.kubernetes.pod.name: "metrics-server-.*" 39 | - equals.syslog.identifier: "audit" 40 | - equals.message: "NetworkManager-dispatcher.service: Succeeded." 41 | - equals.message: "Started Network Manager Script Dispatcher Service." 42 | - regexp.message: '^.*?\bHTTP\/1.1" 202 \b.*?\bkube-probe\b.*?$' 43 | 44 | # Updated to ignore conflict of indices between environments 45 | setup.ilm.enabled: false 46 | setup.template.enabled: false 47 | 48 | # output.logstash: 49 | # hosts: '${LOGSTASH_HOSTS}' 50 | # compression_level: 7 51 | 52 | output.elasticsearch: 53 | hosts: ["http://34.68.27.112:9200"] 54 | 55 | 56 | -------------------------------------------------------------------------------- /gcp/task-016-journalbeat/12-daemonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: journalbeat 5 | namespace: kube-system 6 | labels: 7 | k8s-app: journalbeat-logging 8 | version: v1 9 | spec: 10 | selector: 11 | matchLabels: 12 | k8s-app: journalbeat-logging 13 | version: v1 14 | template: 15 | metadata: 16 | labels: 17 | k8s-app: journalbeat-logging 18 | version: v1 19 | app: journalbeat 20 | name: journalbeat 21 | spec: 22 | containers: 23 | - args: 24 | - -e 25 | - -c 26 | - /etc/journalbeat.yml 27 | command: 28 | - journalbeat 29 | env: 30 | - name: NODENAME 31 | valueFrom: 32 | fieldRef: 33 | fieldPath: spec.nodeName 34 | - name: PODNAME 35 | valueFrom: 36 | fieldRef: 37 | fieldPath: metadata.name 38 | - name: LOGSTASH_HOSTS 39 | value: logstash-host.domain.com 40 | - name: LOGSTASH_PORT 41 | value: "5044" 42 | - name: ELASTICSEARCH_USERNAME 43 | value: elastic 44 | - name: ELASTICSEARCH_PASSWORD 45 | value: changeme 46 | - name: ENVIRONMENT 47 | value: dev 48 | 49 | image: docker.elastic.co/beats/journalbeat:7.8.0 50 | imagePullPolicy: Always 51 | name: journalbeat 52 | resources: 53 | limits: 54 | cpu: 600m 55 | memory: 800Mi 56 | requests: 57 | cpu: 200m 58 | memory: 400Mi 59 | volumeMounts: 60 | - mountPath: /usr/share/journalbeat/data 61 | name: data 62 | - mountPath: /var/log/journal 63 | name: var-journal 64 | - mountPath: /run/log/journal 65 | name: run-journal 66 | - mountPath: /etc/journalbeat.yml 67 | name: config 68 | subPath: journalbeat.yml 69 | - mountPath: /etc/machine-id 70 | name: machine-id 71 | hostNetwork: true 72 | nodeSelector: {} 73 | 74 | ### required for SE Linux protected ones 75 | securityContext: 76 | seLinuxOptions: 77 | user: system_u 78 | role: system_r 79 | type: spc_t 80 | level: s0 81 | 82 | fsGroup: 0 83 | runAsUser: 0 84 | serviceAccountName: journalbeat 85 | terminationGracePeriodSeconds: 60 86 | tolerations: 87 | - effect: NoSchedule 88 | key: node-role.kubernetes.io/master 89 | volumes: 90 | - hostPath: 91 | path: /var/log/journal/journalbeat-data 92 | name: data 93 | - hostPath: 94 | path: /var/log/journal 95 | name: var-journal 96 | - hostPath: 97 | path: /run/log/journal 98 | name: run-journal 99 | - hostPath: 100 | path: /etc/machine-id 101 | name: machine-id 102 | - configMap: 103 | items: 104 | - key: journalbeat.yml 105 | path: journalbeat.yml 106 | name: journalbeat-config 107 | name: config 108 | -------------------------------------------------------------------------------- /local-mac/application-life-cycle-management/task-033-commands-and-arguments/pod-ubuntu-ls.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: ubuntu 5 | spec: 6 | containers: 7 | - name: ubuntu-container 8 | image: ubuntu 9 | command: ["ls"] 10 | args: ["-ltrh"] -------------------------------------------------------------------------------- /local-mac/application-life-cycle-management/task-036-multi-container-pods/ReadMe.md: -------------------------------------------------------------------------------- 1 | 2 | ### Identify the number of containers running in the 'red' pod. 3 | 4 | ```bash 5 | controlplane $ kubectl describe pod red | grep -i "image id" 6 | Image ID: docker-pullable://busybox@sha256:c5439d7db88ab5423999530349d327b04279ad3161d7596d2126dfb5b02bfd1f 7 | Image ID: docker-pullable://busybox@sha256:c5439d7db88ab5423999530349d327b04279ad3161d7596d2126dfb5b02bfd1f 8 | Image ID: docker-pullable://busybox@sha256:c5439d7db88ab5423999530349d327b04279ad3161d7596d2126dfb5b02bfd1f 9 | ``` 10 | 11 | 12 | ### Identify the name of the containers running in the 'blue' pod. 13 | ```bash 14 | controlplane $ kubectl get pod blue -o yaml | grep -i name: | egrep -v "f:|default|node" 15 | name: blue 16 | name: teal 17 | name: navy 18 | name: navy 19 | name: teal 20 | ``` 21 | 22 | ### Create a multi-container pod with 2 containers. 23 | - Name: yellow 24 | - Container 1 Name: lemon 25 | - Container 1 Image: busybox 26 | - Container 2 Name: gold 27 | - Container 2 Image: redis 28 | 29 | ```bash 30 | controlplane $ kubectl run yellow --image=busybox --dry-run=client -o yaml > pod.yaml 31 | controlplane $ cat pod.yaml 32 | apiVersion: v1 33 | kind: Pod 34 | metadata: 35 | creationTimestamp: null 36 | labels: 37 | run: yellow 38 | name: yellow 39 | spec: 40 | containers: 41 | - image: busybox 42 | name: lemon 43 | resources: {} 44 | - image: redis 45 | name: gold 46 | dnsPolicy: ClusterFirst 47 | restartPolicy: Always 48 | status: {} 49 | 50 | controlplane $ kubectl apply -f pod.yaml 51 | pod/yellow created 52 | ``` 53 | 54 | ### Edit the pod to add a sidecar container to send logs to ElasticSearch. Mount the log volume to the sidecar container.. 55 | 56 | - Name: app 57 | - Container Name: sidecar 58 | - Container Image: kodekloud/filebeat-configured 59 | - Volume Mount: log-volume 60 | - Mount Path: /var/log/event-simulator/ 61 | - Existing Container Name: app 62 | - Existing Container Image: kodekloud/event-simulator 63 | 64 | ```bash 65 | controlplane $ kubectl exec -it app -n elastic-stack "ls" "log/app.log" 66 | log/app.log 67 | 68 | controlplane $ kubectl get pod app -n elastic-stack -o yaml > pod.yaml 69 | 70 | controlplane $ kubectl delete -f pod.yaml 71 | pod "app" deleted 72 | 73 | controlplane $ kubectl apply -f pod.yaml 74 | pod/app created 75 | 76 | controlplane $ cat pod.yaml | grep -i containers: -A 17 77 | containers: 78 | - image: kodekloud/event-simulator 79 | imagePullPolicy: Always 80 | name: app 81 | resources: {} 82 | terminationMessagePath: /dev/termination-log 83 | terminationMessagePolicy: File 84 | volumeMounts: 85 | - mountPath: /log 86 | name: log-volume 87 | - mountPath: /var/run/secrets/kubernetes.io/serviceaccount 88 | name: default-token-fzdbx 89 | readOnly: true 90 | - image: kodekloud/filebeat-configured 91 | name: sidecar 92 | volumeMounts: 93 | - mountPath: /var/log/event-simulator/ 94 | name: log-volume 95 | ``` -------------------------------------------------------------------------------- /local-mac/cluster-creation/task-055-create-k8s-using-kubeadm-vagrant/ubuntu/allow-bridge-nf-traffic.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | sysctl net.bridge.bridge-nf-call-iptables=1 3 | -------------------------------------------------------------------------------- /local-mac/cluster-creation/task-055-create-k8s-using-kubeadm-vagrant/ubuntu/install-docker-2.sh: -------------------------------------------------------------------------------- 1 | cd /tmp 2 | curl -fsSL https://get.docker.com -o get-docker.sh 3 | sh /tmp/get-docker.sh 4 | -------------------------------------------------------------------------------- /local-mac/cluster-creation/task-055-create-k8s-using-kubeadm-vagrant/ubuntu/install-docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | export DEBIAN_FRONTEND=noninteractive 3 | apt-get update \ 4 | && apt-get install -y \ 5 | apt-transport-https \ 6 | ca-certificates \ 7 | curl \ 8 | software-properties-common \ 9 | && curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \ 10 | && add-apt-repository \ 11 | "deb https://download.docker.com/linux/$(. /etc/os-release; echo "$ID") \ 12 | $(lsb_release -cs) \ 13 | stable" \ 14 | && apt-get update \ 15 | && apt-get install -y docker-ce=$(apt-cache madison docker-ce | grep 18.06 | head -1 | awk '{print $3}') 16 | -------------------------------------------------------------------------------- /local-mac/cluster-creation/task-055-create-k8s-using-kubeadm-vagrant/ubuntu/update-dns.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | sed -i -e 's/#DNS=/DNS=8.8.8.8/' /etc/systemd/resolved.conf 4 | 5 | service systemd-resolved restart -------------------------------------------------------------------------------- /local-mac/cluster-creation/task-055-create-k8s-using-kubeadm-vagrant/ubuntu/vagrant/install-guest-additions.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | GUEST_ADDITION_VERSION=5.2.4 3 | GUEST_ADDITION_ISO=VBoxGuestAdditions_${GUEST_ADDITION_VERSION}.iso 4 | GUEST_ADDITION_MOUNT=/media/VBoxGuestAdditions 5 | 6 | apt-get install linux-headers-$(uname -r) build-essential dkms 7 | 8 | wget http://download.virtualbox.org/virtualbox/${GUEST_ADDITION_VERSION}/${GUEST_ADDITION_ISO} 9 | mkdir -p ${GUEST_ADDITION_MOUNT} 10 | mount -o loop,ro ${GUEST_ADDITION_ISO} ${GUEST_ADDITION_MOUNT} 11 | sh ${GUEST_ADDITION_MOUNT}/VBoxLinuxAdditions.run 12 | rm ${GUEST_ADDITION_ISO} 13 | umount ${GUEST_ADDITION_MOUNT} 14 | rmdir ${GUEST_ADDITION_MOUNT} 15 | -------------------------------------------------------------------------------- /local-mac/cluster-creation/task-055-create-k8s-using-kubeadm-vagrant/ubuntu/vagrant/setup-hosts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | IFNAME=$1 4 | ADDRESS="$(ip -4 addr show $IFNAME | grep "inet" | head -1 |awk '{print $2}' | cut -d/ -f1)" 5 | sed -e "s/^.*${HOSTNAME}.*/${ADDRESS} ${HOSTNAME} ${HOSTNAME}.local/" -i /etc/hosts 6 | 7 | # remove ubuntu-bionic entry 8 | sed -e '/^.*ubuntu-bionic.*/d' -i /etc/hosts 9 | 10 | # Update /etc/hosts about other hosts 11 | cat >> /etc/hosts < 25 | ``` 26 | 27 | - How many containers are running 28 | > 1/1 Running containers/Total Containers 29 | 30 | - What is the state of the container running 31 | ```bash 32 | $ kubectl describe pod nginx | grep -i state 33 | State: Running 34 | ``` 35 | 36 | - Can you get the event section output of the pod 37 | ```bash 38 | $ kubectl describe pod nginx | tail -n 8 39 | Events: 40 | Type Reason Age From Message 41 | ---- ------ ---- ---- ------- 42 | Normal Scheduled 5m37s default-scheduler Successfully assigned default/nginx to docker-desktop 43 | Normal Pulling 5m36s kubelet Pulling image "nginx" 44 | Normal Pulled 5m21s kubelet Successfully pulled image "nginx" 45 | Normal Created 5m21s kubelet Created container nginx 46 | Normal Started 5m21s kubelet Started container nginx 47 | ``` 48 | 49 | - Can you delete the pod `nginx` 50 | ```bash 51 | $ kubectl delete pod nginx 52 | pod "nginx" deleted 53 | ``` 54 | 55 | - Create a redis pod using `redis` image by using command line, also generate the corresponding yaml file. Do a dry run first to create 56 | the yaml and then use the yaml to create the pod. 57 | ```bash 58 | $ kubectl run redis --image=redis --dry-run=client -o yaml > redis.yml 59 | $ kubectl create -f redis.yml 60 | pod/redis created 61 | ``` -------------------------------------------------------------------------------- /local-mac/core-concepts/task-017-pods/redis.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | run: redis 7 | name: redis 8 | spec: 9 | containers: 10 | - image: redis 11 | name: redis 12 | resources: {} 13 | dnsPolicy: ClusterFirst 14 | restartPolicy: Always 15 | status: {} 16 | -------------------------------------------------------------------------------- /local-mac/core-concepts/task-018-replicaset/ReadMe.md: -------------------------------------------------------------------------------- 1 | 2 | - How many replicasets exist 3 | ```bash 4 | $ kubectl get replicaset 5 | No resources found in default namespace. 6 | ``` 7 | 8 | - Which image has been used to create pods using the 9 | replica set 10 | ```bash 11 | $ kubectl describe replicaset replicaset-1 | grep Image 12 | Image: nginx 13 | ``` 14 | 15 | - How many pods are in ready state 16 | ```bash 17 | $ kubectl get pods 18 | NAME READY STATUS RESTARTS AGE 19 | replicaset-1-66pmk 1/1 Running 0 60s 20 | replicaset-1-tmlkq 1/1 Running 0 60s 21 | ``` 22 | 23 | - Create a replicaset using nginx image 24 | ```bash 25 | kubectl apply -f replicaset.yaml 26 | ``` 27 | 28 | > NOTE: The selector:matchLabels:A:B and template:metadata:labels:A:B must always match. It makes sense as the replica set is aimed at maintaining pods which have lables mentioned (for example) A:B 29 | 30 | 31 | - Can you edit the existing the replicaset to have more pods 32 | ```bash 33 | $ kubectl edit replicaset replicaset-1 34 | replicaset.apps/replicaset-1 edited 35 | 36 | OR 37 | 38 | $ kubectl scale replicaset --replicas=5 replicaset-1 39 | replicaset.apps/replicaset-1 scaled 40 | ``` 41 | -------------------------------------------------------------------------------- /local-mac/core-concepts/task-018-replicaset/replicaset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: ReplicaSet 3 | metadata: 4 | name: replicaset-1 5 | spec: 6 | replicas: 2 7 | selector: 8 | matchLabels: 9 | tier: frontend 10 | template: 11 | metadata: 12 | labels: 13 | tier: frontend 14 | spec: 15 | containers: 16 | - name: nginx 17 | image: nginx -------------------------------------------------------------------------------- /local-mac/core-concepts/task-019-deployment/ReadMe.md: -------------------------------------------------------------------------------- 1 | 2 | - Create a deployment nginx without using yaml file 3 | 4 | ```bash 5 | kubectl create deployment httpd-frontend --image=nginx 6 | ``` 7 | 8 | - Scale the deployment to 3 replicas without using yaml 9 | 10 | ```bash 11 | $ kubectl scale deployment httpd-frontend --replicas=3 12 | deployment.apps/httpd-frontend scaled 13 | ``` 14 | 15 | - Create a yaml deployment file of `nginx` 16 | ```bash 17 | $ kubectl create deployment httpd-frontend --image=nginx --dry-run=client -o yaml > nginx-deployment.yaml 18 | ``` -------------------------------------------------------------------------------- /local-mac/core-concepts/task-019-deployment/nginx-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | app: httpd-frontend 7 | name: httpd-frontend 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: httpd-frontend 13 | strategy: {} 14 | template: 15 | metadata: 16 | creationTimestamp: null 17 | labels: 18 | app: httpd-frontend 19 | spec: 20 | containers: 21 | - image: nginx 22 | name: nginx 23 | resources: {} 24 | status: {} 25 | -------------------------------------------------------------------------------- /local-mac/core-concepts/task-020-namespaces/ReadMe.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | - To get how many Namespaces exist on the system? 4 | ```bash 5 | $ kubectl get ns 6 | NAME STATUS AGE 7 | default Active 78d 8 | kube-node-lease Active 78d 9 | kube-public Active 78d 10 | kube-system Active 78d 11 | ``` 12 | 13 | - How many pods exist in kube-system namespace 14 | ```bash 15 | $ kubectl get pods -n kube-system 16 | NAME READY STATUS RESTARTS AGE 17 | coredns-864fccfb95-gwtl4 1/1 Running 14 78d 18 | coredns-864fccfb95-qqlmg 1/1 Running 14 78d 19 | etcd-docker-desktop 1/1 Running 14 78d 20 | kube-apiserver-docker-desktop 1/1 Running 15 78d 21 | kube-controller-manager-docker-desktop 1/1 Running 14 78d 22 | kube-proxy-nsmlj 1/1 Running 14 78d 23 | kube-scheduler-docker-desktop 1/1 Running 19 78d 24 | storage-provisioner 1/1 Running 27 78d 25 | vpnkit-controller 1/1 Running 16 78d 26 | 27 | ``` 28 | 29 | - To create a Pod in name `redis` from image `redis` in namespace `kube-system` 30 | ```bash 31 | $ kubectl run redis --image=redis --dry-run=client -n kube-system -o yaml > pod.yaml 32 | ``` 33 | ```bash 34 | kubectl -n get -o yaml. 35 | ``` -------------------------------------------------------------------------------- /local-mac/core-concepts/task-021-services/ReadMe.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | - To get the number of services 4 | ```bash 5 | $ kubectl get services 6 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 7 | kubernetes ClusterIP 10.96.0.1 443/TCP 80d 8 | ``` 9 | 10 | - Try getting the details like Target Port and Labels 11 | ```bash 12 | $ kubectl describe service kubernetes 13 | Name: kubernetes 14 | Namespace: default 15 | Labels: component=apiserver 16 | provider=kubernetes 17 | Annotations: 18 | Selector: 19 | Type: ClusterIP 20 | IP: 10.96.0.1 21 | Port: https 443/TCP 22 | TargetPort: 6443/TCP 23 | Endpoints: 192.168.65.3:6443 24 | Session Affinity: None 25 | Events: 26 | ``` 27 | 28 | - Create nginx deployment. Then create a service of name: webapp-service, type: NodePort, targetPort: 80, nodePort:30008, port:8080, selector: simple-app to access the nginx deployment 29 | ```yaml 30 | $ kubectl create deployment my-dep --image=nginx 31 | deployment.apps/my-dep created 32 | 33 | $ kubectl expose deployment my-dep --name=webapp-service --target-port=80 --type=NodePort --port=8080 --dry-run=client -o yaml 34 | apiVersion: v1 35 | kind: Service 36 | metadata: 37 | creationTimestamp: null 38 | labels: 39 | app: my-dep 40 | name: webapp-service 41 | spec: 42 | ports: 43 | - port: 8080 44 | protocol: TCP 45 | targetPort: 80 46 | selector: 47 | app: my-dep 48 | type: NodePort 49 | status: 50 | loadBalancer: {} 51 | ``` 52 | Now you can edit the yaml file to add `NodePort` as well. Then create the service using `kubectl create -f filename.yaml` -------------------------------------------------------------------------------- /local-mac/logging-and-monitoring/task-030-metrics-server/ReadMe.md: -------------------------------------------------------------------------------- 1 | ## Metrics-Server 2 | Metrics server is an in-memory monitoring solution 3 | 4 | The kubelet (agent which run on each node in kubernetes-cluster) also contains a component called cAdvisor (or container advisor). It is resonsible for retrieving 5 | performance metrics from pods and exposing them through the kubelet api to make the mertics available for the metrics-server 6 | 7 | 8 | ### Deployment 9 | 10 | [metrics-server](https://github.com/kubernetes-sigs/metrics-server) 11 | 12 | Download the deployment files from here and deploy using 13 | 14 | ```bash 15 | kubectl apply -f . 16 | ``` 17 | 18 | 19 | - Once deployed you can view the cluster performance by running following command. 20 | This gives CPU and Memory consumption of each of the nodes. 21 | 22 | ```bash 23 | $ kubectl top node 24 | NAME CPU(cores) CPU% MEMORY(bytes) MEMORY% 25 | controlplane 146m 7% 1048Mi 55% 26 | node01 1998m 99% 585Mi 15% 27 | ``` 28 | 29 | - To view the performance metrics of pods. This gives CPU and Memory consumption of each of the pods. 30 | ```bash 31 | $ kubectl top pods 32 | NAME CPU(cores) MEMORY(bytes) 33 | elephant 12m 50Mi 34 | lion 899m 1Mi 35 | rabbit 972m 1Mi 36 | ``` -------------------------------------------------------------------------------- /local-mac/logging-and-monitoring/task-031-managing-logs/ReadMe.md: -------------------------------------------------------------------------------- 1 | ## Managing Logs 2 | 3 | - Check the logs -f running pods having only one container 4 | ```bash 5 | $ kubectl get pods 6 | NAME READY STATUS RESTARTS AGE 7 | webapp-1 1/1 Running 0 25s 8 | $ kubectl logs -f webapp-1 9 | [2021-01-13 19:51:10,730] INFO in event-simulator: USER1 logged out 10 | [2021-01-13 19:51:11,731] INFO in event-simulator: USER3 logged out 11 | [2021-01-13 19:51:12,733] INFO in event-simulator: USER3 logged in 12 | [2021-01-13 19:51:13,735] INFO in event-simulator: USER3 is viewing page1 13 | [2021-01-13 19:51:14,736] INFO in event-simulator: USER3 is viewing page1 14 | [2021-01-13 19:51:15,738] WARNING in event-simulator: USER5 Failed to Login as the account is locked due to MANY FAILED ATTEMPTS. 15 | [2021-01-13 19:51:15,738] INFO in event-simulator: USER4 is viewing page1 16 | [2021-01-13 19:51:16,740] INFO in event-simulator: USER2 logged in 17 | [2021-01-13 19:51:17,740] INFO in event-simulator: USER4 logged out 18 | [2021-01-13 19:51:18,742] WARNING in event-simulator: USER7 Order failed as the item is OUT OF STOCK. 19 | ``` 20 | 21 | - What is a pod is having two containers, how to check the logs then 22 | ```bash 23 | controlplane $ kubectl get pods 24 | NAME READY STATUS RESTARTS AGE 25 | webapp-1 1/1 Running 0 2m13s 26 | webapp-2 2/2 Running 0 9s 27 | controlplane $ kubectl logs -f webapp-2 28 | error: a container name must be specified for pod webapp-2, choose one of: [simple-webapp db] 29 | ``` 30 | 31 | - You need to specify the container name i.e. `simple-webapp` or `db` in this case 32 | ```bash 33 | controlplane $ kubectl logs -f webapp-2 simple-webapp 34 | [2021-01-13 19:53:10,116] INFO in event-simulator: USER1 logged in 35 | [2021-01-13 19:53:11,118] INFO in event-simulator: USER3 logged out 36 | [2021-01-13 19:53:12,119] INFO in event-simulator: USER2 is viewing page1 37 | [2021-01-13 19:53:13,121] INFO in event-simulator: USER1 is viewing page2 38 | [2021-01-13 19:53:14,122] INFO in event-simulator: USER1 is viewing page1 39 | [2021-01-13 19:53:15,124] WARNING in event-simulator: USER5 Failed to Login as the account is locked due to MANY FAILED ATTEMPTS. 40 | [2021-01-13 19:53:15,124] INFO in event-simulator: USER1 is viewing page3 41 | [2021-01-13 19:53:16,126] INFO in event-simulator: USER3 logged in 42 | [2021-01-13 19:53:17,127] INFO in event-simulator: USER4 logged out 43 | [2021-01-13 19:53:18,128] WARNING in event-simulator: USER30 Order failed as the item is OUT OF STOCK. 44 | ``` -------------------------------------------------------------------------------- /local-mac/scheduling/task-022-labels-and-selectors/ReadMe.md: -------------------------------------------------------------------------------- 1 | 2 | - How many pods have the following labels `du=finanace` in default namespace 3 | ```bash 4 | $ kubectl get pods -l bu=finance 5 | ``` 6 | 7 | - How many objects have label `env=prod` including PODs, ReplicaSets and any other objects? 8 | ```bash 9 | $ kubectl get all -l env=prod --no-headers | wc -l 10 | ``` 11 | 12 | - Identify the POD which is part of the prod environment, the finance BU and of frontend tier? 13 | ```bash 14 | $ kubectl get pod -l env=prod,bu=finance,tier=frontend 15 | ``` 16 | 17 | - Note: That the labels and selectors cannot have different values 18 | ```bash 19 | $ cat replicaset-definition-1.yaml | grep -B 2 tier 20 | selector: 21 | matchLabels: 22 | tier: frontend 23 | -- 24 | metadata: 25 | labels: 26 | tier: nginx 27 | $ kubectl apply -f replicaset-definition-1.yaml 28 | The ReplicaSet "replicaset-1" is invalid: spec.template.metadata.labels: Invalid value: map[string]string{"tier":"nginx"}: `selector` does not match template `labels` 29 | 30 | $ cat replicaset-definition-1.yaml | grep -B 2 tier 31 | selector: 32 | matchLabels: 33 | tier: frontend 34 | -- 35 | metadata: 36 | labels: 37 | tier: frontend 38 | 39 | $ kubectl apply -f replicaset-definition-1.yaml 40 | replicaset.apps/replicaset-1 created 41 | ``` -------------------------------------------------------------------------------- /local-mac/scheduling/task-023-taints-and-tolerations/ReadMe.md: -------------------------------------------------------------------------------- 1 | 2 | - Do any taints exist on any of the nodes? 3 | ```bash 4 | $ kubectl describe node | egrep "Taints|Name:" 5 | Name: controlplane 6 | Taints: node-role.kubernetes.io/master:NoSchedule 7 | Name: node01 8 | Taints: 9 | ``` 10 | 11 | 12 | - Create a taint on node01 with key of 'spray', value of 'mortein' and effect of 'NoSchedule' 13 | ```bash 14 | $ kubectl taint nodes node01 spray=mortein:NoSchedule 15 | node/node01 tainted 16 | ``` 17 | 18 | - Suppose we have a pod created using following yaml 19 | ```yaml 20 | apiVersion: v1 21 | kind: Pod 22 | metadata: 23 | creationTimestamp: null 24 | labels: 25 | run: nginx 26 | name: mosquito 27 | spec: 28 | containers: 29 | - image: nginx 30 | name: nginx 31 | resources: {} 32 | dnsPolicy: ClusterFirst 33 | restartPolicy: Always 34 | status: {} 35 | ``` 36 | and `kubectl apply -f .` we get the following error 37 | ```bash 38 | FailedScheduling 62s (x3 over 2m10s) default-scheduler 0/2 nodes are available: 1 node(s) had taint {node-role.kubernetes.io/master: }, thatthe pod didn't tolerate, 1 node(s) had taint {spray: mortein}, that the pod didn't tolerate. 39 | ``` 40 | 41 | How can we fix it? 42 | ```bash 43 | $ cat pod1.yaml | grep -A 4 -B 1 tolerations: 44 | restartPolicy: Always 45 | tolerations: 46 | - key: "spray" 47 | operator: "Equal" 48 | value: "mortein" 49 | effect: "NoSchedule" 50 | ``` 51 | 52 | - Remove the taint on master, which currently has the taint effect of NoSchedule 53 | ```bash 54 | $ kubectl describe nodes master | grep -i taints 55 | Taints: node-role.kubernetes.io/master:NoSchedule # copy this and put a `-` at the end to remove it 56 | $ kubectl taint nodes master node-role.kubernetes.io/master:NoSchedule- 57 | node/master untainted 58 | ``` -------------------------------------------------------------------------------- /local-mac/scheduling/task-025-resource-requirement-and-limits/ReadMe.md: -------------------------------------------------------------------------------- 1 | 2 | #### A pod named 'rabbit' is deployed. Identify the CPU requirements set on the Pod 3 | 4 | ```bash 5 | $ kubectl get pod rabbit -o yaml | grep -A 4 resource 6 | resources: 7 | limits: 8 | cpu: "2" 9 | requests: 10 | cpu: "1" 11 | ``` 12 | 13 | > The status 'OOMKilled' indicates that the pod ran out of memory. Identify the memory limit set on the POD. 14 | 15 | #### The elephant runs a process that consume 15Mi of memory. Increase the limit of the elephant pod to 20Mi. 16 | 17 | - Generate the yaml 18 | ````bash 19 | $ kubectl get pod elephant -o yaml > pod.yaml 20 | ```` 21 | 22 | - Edit the file 23 | ```bash 24 | $ cat pod.yaml | grep -A 4 resources 25 | resources: 26 | limits: 27 | memory: 20Mi 28 | requests: 29 | memory: 5Mi 30 | ``` 31 | 32 | - Delete the pod 33 | ```bash 34 | $ kubectl delete pod elephant 35 | pod "elephant" deleted 36 | ``` 37 | 38 | - Create the pod again 39 | ```bash 40 | $ kubectl apply -f pod.yaml 41 | pod/elephant created 42 | ``` -------------------------------------------------------------------------------- /local-mac/scheduling/task-026-editing-pods-and-deployments/ReadMe.md: -------------------------------------------------------------------------------- 1 | 2 | ## Edit a POD 3 | 4 | Remember, you **CANNOT** edit specifications of an existing POD other than the below. 5 | - spec.containers[*].image 6 | - spec.initContainers[*].image 7 | - spec.activeDeadlineSeconds 8 | - spec.tolerations 9 | 10 | For example you cannot edit the environment variables, service accounts, resource limits of a running pod. 11 | **But if you really want to, you have 2 options**: 12 | 13 | ### First option 14 | - Run the command 15 | ```bash 16 | kubectl edit pod command 17 | ``` 18 | - This will open the pod specification in an editor (vi editor). Then edit the required properties. When you try to save it, you will be denied. 19 | This is because you are attempting to edit a field on the pod that is not editable 20 | - A copy of the file with your changes is saved in a temporary location as shown above. 21 | You can then delete the existing pod by running the command: 22 | ```bash 23 | kubectl delete pod webapp 24 | ``` 25 | - Then create a new pod with your changes using the temporary file 26 | ```bash 27 | kubectl create -f /tmp/kubectl-edit-ccvrq.yaml 28 | ``` 29 | 30 | ### Second Option 31 | 32 | - The second option is to extract the pod definition in YAML format to a file using the command 33 | ```bash 34 | kubectl get pod webapp -o yaml > my-new-pod.yaml 35 | ``` 36 | 37 | - Then make the changes to the exported file using an editor (vi editor). Save the changes 38 | ```bash 39 | vi my-new-pod.yaml 40 | ``` 41 | 42 | - Then delete the existing pod 43 | ```bash 44 | kubectl delete pod webapp 45 | ``` 46 | - Then create a new pod with the edited file 47 | ```bash 48 | kubectl create -f my-new-pod.yaml 49 | ``` 50 | 51 | ## Edit Deployments 52 | 53 | With Deployments, you can easily edit any field/property of the POD template. 54 | Since the pod template is a child of the deployment specification, with every change the deployment 55 | will automatically delete and create a new pod with the new changes. 56 | So if you are asked to edit a property of a POD part of a deployment you may do that 57 | simply by running the command 58 | ```bash 59 | kubectl edit deployment my-deployment 60 | ``` 61 | -------------------------------------------------------------------------------- /local-mac/scheduling/task-027-daemonsets/ReadMe.md: -------------------------------------------------------------------------------- 1 | 2 | ### How many DaemonSets are created in the cluster in all namespaces? 3 | 4 | ```bash 5 | $ kubectl get daemonset --all-namespaces 6 | NAMESPACE NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE 7 | kube-system kube-flannel-ds-amd64 2 2 2 2 2 14m 8 | kube-system kube-flannel-ds-arm 0 0 0 0 0 14m 9 | kube-system kube-flannel-ds-arm64 0 0 0 0 0 14m 10 | kube-system kube-flannel-ds-ppc64le 0 0 0 0 0 14m 11 | kube-system kube-flannel-ds-s390x 0 0 0 0 0 14m 12 | kube-system kube-proxy 2 2 2 2 2 kubernetes.io/os=linux 14m 13 | ``` 14 | 15 | 16 | ### On how many nodes are the pods scheduled by the DaemonSet kube-proxy 17 | 18 | ```bash 19 | $ kubectl get nodes --no-headers=true| wc -l 20 | 2 21 | ``` 22 | 23 | ### What is the image used by the POD deployed by the kube-flannel-ds-amd64 DaemonSet? 24 | 25 | ```bash 26 | $ kubectl describe pod kube-flannel-ds-amd64 -n kube-system | grep -i "pulling image" 27 | Normal Pulling 20m kubelet, controlplane Pulling image "quay.io/coreos/flannel:v0.12.0-amd64" 28 | ``` 29 | 30 | ### Deploy a DaemonSet for FluentD Logging 31 | 32 | - Name: elasticsearch 33 | - Namespace: kube-system 34 | - Image: k8s.gcr.io/fluentd-elasticsearch:1.20 35 | 36 | Go the official [doc](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) 37 | 38 | Copy and make corresponding changes 39 | 40 | daemonset.yaml 41 | ```yaml 42 | apiVersion: apps/v1 43 | kind: DaemonSet 44 | metadata: 45 | name: elasticsearch 46 | namespace: kube-system 47 | labels: 48 | k8s-app: fluentd-logging 49 | spec: 50 | selector: 51 | matchLabels: 52 | name: fluentd-elasticsearch 53 | template: 54 | metadata: 55 | labels: 56 | name: fluentd-elasticsearch 57 | spec: 58 | tolerations: 59 | # this toleration is to have the daemonset runnable on master nodes 60 | # remove it if your masters can't run pods 61 | - key: node-role.kubernetes.io/master 62 | effect: NoSchedule 63 | containers: 64 | - name: fluentd-elasticsearch 65 | image: k8s.gcr.io/fluentd-elasticsearch:1.20 66 | resources: 67 | limits: 68 | memory: 200Mi 69 | requests: 70 | cpu: 100m 71 | memory: 200Mi 72 | volumeMounts: 73 | - name: varlog 74 | mountPath: /var/log 75 | - name: varlibdockercontainers 76 | mountPath: /var/lib/docker/containers 77 | readOnly: true 78 | terminationGracePeriodSeconds: 30 79 | volumes: 80 | - name: varlog 81 | hostPath: 82 | path: /var/log 83 | - name: varlibdockercontainers 84 | hostPath: 85 | path: /var/lib/docker/containers 86 | ``` 87 | 88 | and then run the `kubectl apply -f .` command -------------------------------------------------------------------------------- /local-mac/scheduling/task-029-multiple-schedulers/ReadMe.md: -------------------------------------------------------------------------------- 1 | ### What is the name of the POD that deploys the default kubernetes scheduler in this environment? 2 | ```bash 3 | $ kubectl get pods -n kube-system | grep scheduler 4 | kube-scheduler-controlplane 1/1 Running 0 6m49s 5 | ``` 6 | 7 | ### What is the image used to deploy the kubernetes scheduler? 8 | 9 | ```bash 10 | $ kubectl describe pod kube-scheduler-controlplane -n kube-system | grep -i image 11 | Image: k8s.gcr.io/kube-scheduler:v1.19.0 12 | ``` 13 | 14 | ### Deploy an additional scheduler to the cluster following the given specification. 15 | Namespace: kube-system 16 | 17 | Name: my-scheduler 18 | 19 | Status: Running 20 | 21 | Custom Scheduler Name 22 | 23 | [configure-multiple-schedulers](https://kubernetes.io/docs/tasks/extend-kubernetes/configure-multiple-schedulers/) 24 | 25 | ```bash 26 | controlplane $ cd /etc/kubernetes/manifests/ 27 | 28 | controlplane $ ls 29 | etcd.yaml kube-apiserver.yaml kube-controller-manager.yaml kube-scheduler.yaml 30 | 31 | controlplane $ cp -rfp kube-scheduler.yaml ~/my-scheduler.yaml 32 | 33 | controlplane $ vi my-scheduler.yaml 34 | 35 | ## Set the leader elect to fase and set the scheduler name 36 | controlplane $ cat my-scheduler.yaml | grep -A 8 -i command 37 | - command: 38 | - kube-scheduler 39 | - --authentication-kubeconfig=/etc/kubernetes/scheduler.conf 40 | - --authorization-kubeconfig=/etc/kubernetes/scheduler.conf 41 | - --bind-address=127.0.0.1 42 | - --kubeconfig=/etc/kubernetes/scheduler.conf 43 | - --leader-elect=false 44 | - --scheduler-name=my-scheduler 45 | - --port=0 46 | 47 | $ kubectl apply -f my-scheduler.yaml 48 | pod/my-scheduler created 49 | 50 | controlplane $ kubectl get pods -n kube-system | grep my-scheduler 51 | my-scheduler 1/1 Running 0 2m1s 52 | ``` 53 | 54 | ### A POD definition file is given. Use it to create a POD with the new custom scheduler. 55 | Name: nginx 56 | 57 | Uses custom scheduler 58 | 59 | Status: Running 60 | 61 | - pod.yaml 62 | ```yaml 63 | apiVersion: v1 64 | kind: Pod 65 | metadata: 66 | name: annotation-default-scheduler 67 | labels: 68 | name: multischeduler-example 69 | spec: 70 | schedulerName: my-scheduler 71 | containers: 72 | - name: pod-with-default-annotation-container 73 | image: k8s.gcr.io/pause:2.0 74 | ``` 75 | 76 | - and then run 77 | ```bash 78 | kubectl apply -f pod.yaml 79 | 80 | ## To test the scheduler 81 | kubectl get events 82 | 83 | ## You an also describe the pod and validate 84 | kubectl describe pod nginx | grep my-scheduler 85 | ``` -------------------------------------------------------------------------------- /local-mac/security/task-042-authentication/ReadMe.md: -------------------------------------------------------------------------------- 1 | 2 | ### Setup basic authentication on kubernetes 3 | 4 | > Note: This is not recommended in a production environment. This is only for learning purposes. 5 | 6 | - Follow the below instructions to configure basic authentication in a kubeadm setup. 7 | 8 | - Create a file with user details locally at `/tmp/users/user-details.csv` 9 | 10 | ```bash 11 | # User File Contents 12 | password123,user1,u0001 13 | password123,user2,u0002 14 | password123,user3,u0003 15 | password123,user4,u0004 16 | password123,user5,u0005 17 | ``` 18 | 19 | - Edit the kube-apiserver static pod configured by kubeadm to pass in the user details. The file is located at 20 | `/etc/kubernetes/manifests/kube-apiserver.yaml` 21 | 22 | ```yaml 23 | apiVersion: v1 24 | kind: Pod 25 | metadata: 26 | name: kube-apiserver 27 | namespace: kube-system 28 | spec: 29 | containers: 30 | - command: 31 | - kube-apiserver 32 | 33 | image: k8s.gcr.io/kube-apiserver-amd64:v1.11.3 34 | name: kube-apiserver 35 | volumeMounts: 36 | - mountPath: /tmp/users 37 | name: usr-details 38 | readOnly: true 39 | volumes: 40 | - hostPath: 41 | path: /tmp/users 42 | type: DirectoryOrCreate 43 | name: usr-details 44 | ``` 45 | 46 | - Modify the kube-apiserver startup options to include the basic-auth file 47 | ```yaml 48 | apiVersion: v1 49 | kind: Pod 50 | metadata: 51 | creationTimestamp: null 52 | name: kube-apiserver 53 | namespace: kube-system 54 | spec: 55 | containers: 56 | - command: 57 | - kube-apiserver 58 | - --authorization-mode=Node,RBAC 59 | 60 | - --basic-auth-file=/tmp/users/user-details.csv 61 | ``` 62 | 63 | - Create the necessary roles and role bindings for these users: 64 | ```yaml 65 | --- 66 | kind: Role 67 | apiVersion: rbac.authorization.k8s.io/v1 68 | metadata: 69 | namespace: default 70 | name: pod-reader 71 | rules: 72 | - apiGroups: [""] # "" indicates the core API group 73 | resources: ["pods"] 74 | verbs: ["get", "watch", "list"] 75 | 76 | --- 77 | # This role binding allows "jane" to read pods in the "default" namespace. 78 | kind: RoleBinding 79 | apiVersion: rbac.authorization.k8s.io/v1 80 | metadata: 81 | name: read-pods 82 | namespace: default 83 | subjects: 84 | - kind: User 85 | name: user1 # Name is case sensitive 86 | apiGroup: rbac.authorization.k8s.io 87 | roleRef: 88 | kind: Role #this must be Role or ClusterRole 89 | name: pod-reader # this must match the name of the Role or ClusterRole you wish to bind to 90 | apiGroup: rbac.authorization.k8s.io 91 | ``` 92 | 93 | - Once created, you may authenticate into the kube-api server using the users credentials 94 | ```bash 95 | curl -v -k https://localhost:6443/api/v1/pods -u "user1:password123" 96 | -------------------------------------------------------------------------------- /local-mac/security/task-043-tls-in-k8s-certificate-creation/admin.crt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/codeaprendiz/learn_kubernetes/55e8a46fdea0eff5113dc13354e482d584e4c661/local-mac/security/task-043-tls-in-k8s-certificate-creation/admin.crt -------------------------------------------------------------------------------- /local-mac/security/task-043-tls-in-k8s-certificate-creation/admin.csr: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE REQUEST----- 2 | MIICczCCAVsCAQAwLjETMBEGA1UEAwwKa3ViZS1hZG1pbjEXMBUGA1UECgwOc3lz 3 | dGVtOm1hc3RlcnMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC9Lkhr 4 | Gpzn4ESScLAyypoNiIgFqoSJkFYkcu9AFKSao4gQLlcBJvfIiq5vHRvAHNlQ2tJw 5 | KQrT6mTkQ/07I50BkPLZolfTP0w9/DROheDg3fWKQbIzrrYxKXe9d07zbRukZnzj 6 | d3IxCZnS95fAAvq00afV8pDkOOMWzKqx41+31d6oFDZjLMl4Lduayc1wLXcxxLaQ 7 | AQnkZWN4c2UQ5nZpGFHCU6cnukssF8c5irtK4IqHskfK48Dd4jM5cTI7OTxNwJxS 8 | IZaGBtgJ4LU59iNytC+F+ovimMvkPUAgkhOjji0LEeTjEIRLzJrhbX4sjtN21kRz 9 | FRIMRyjCndiI+ORFAgMBAAGgADANBgkqhkiG9w0BAQsFAAOCAQEAKx4cUD2y/b6E 10 | f6xnvJAGP3dS3gIq+O4SN5t+O98eRMGiEcxJcCguI/6rGO6u4j7a5U1RFlEZu0Cu 11 | qYMt0QTS7/5Wc5ifRnlS7uRbpXi/DqQG74MIrWIgIGc8w6XAJb8Ybq9GORKQVSVE 12 | NTEZCRgerQyFTWZ3Tcol2J1Ag7haaTY0ZsaC2upNtarVId4MCE0mlv9d2nCEbHMK 13 | 8EreXxFonb1nADyTU5fLjFAYh78+mQrhzRb549VdFk2PFzJcYVKzyaleG5r4YpXK 14 | +2YPs0P2c8RWSwhrMplNeuTx2M+iBfMDEGqU+RBaj3y8wU9atEJ2cHqywWdNBhZS 15 | fF/nsUUKXQ== 16 | -----END CERTIFICATE REQUEST----- 17 | -------------------------------------------------------------------------------- /local-mac/security/task-043-tls-in-k8s-certificate-creation/admin.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEogIBAAKCAQEAvS5Iaxqc5+BEknCwMsqaDYiIBaqEiZBWJHLvQBSkmqOIEC5X 3 | ASb3yIqubx0bwBzZUNrScCkK0+pk5EP9OyOdAZDy2aJX0z9MPfw0ToXg4N31ikGy 4 | M662MSl3vXdO820bpGZ843dyMQmZ0veXwAL6tNGn1fKQ5DjjFsyqseNft9XeqBQ2 5 | YyzJeC3bmsnNcC13McS2kAEJ5GVjeHNlEOZ2aRhRwlOnJ7pLLBfHOYq7SuCKh7JH 6 | yuPA3eIzOXEyOzk8TcCcUiGWhgbYCeC1OfYjcrQvhfqL4pjL5D1AIJITo44tCxHk 7 | 4xCES8ya4W1+LI7TdtZEcxUSDEcowp3YiPjkRQIDAQABAoIBABXWWtuqJcSKWcAB 8 | rAUg5+l6AiGNTmFGvPrWHZHrEBkdmV4syzRB2EZY48vFs2dYrSktKS9zPWG648yJ 9 | oJiMAtqkbbP7jmG/I8Df9PW/dHbgiDBeZwCaQUOpgr3nIMDurksq3W2wYwkQD6aP 10 | l/+9GE15CgOjTWBaxLPkCiwgj/L7VTnh14uaueghe9Q2lEPDR3z0HoFxxxwgWCR+ 11 | vLcJ2u9HmonQqg62a3+Hb9r3uyEo+NIgwILipLk8L1b0svhPyfSaMk4iMHxjegxY 12 | WPaKjRzSA4Ki7tt7HsuLOm0RaflNWTikTIECuPoFOIZgJGgHQPUouaEziRqsdQnU 13 | Xe2NuX0CgYEA3uHTk+WD3KtQUB83re+kO7LlKfbS0o8x0QyXBcyNpzcGYimzJc3r 14 | RAuQYIOrisVml2Fa5ok6C2EC1pMakbKd39jBRpwET/5HDwmSWsw/5CHri+PB1bjA 15 | H9oX53Kb82FQKdKowwuqOhuKm27V/QBCzpq7Yk5QR6vDiC/Qw0Oyv6cCgYEA2Up/ 16 | Qhlqm7DD0+khjgvE/EZFqbcv4672t+vqaJ1LCZCrK3qanxVeMq5SKGWvVh8Sq60b 17 | XCAFSxlIyb7NrcxuNaEV+hhef4f8wAoj36B0X1l58yttjZNCRwnXLUlcQP3f2hI4 18 | s7/OLd+CPdzCYyaw5Ys0RUN9aVm6Jz9FJ+KkWjMCgYAU1yjhe2SJDbvEwiaCrRW+ 19 | P0wOUHYGGj2ePn8gm6/jItIF+8A0mLyW8D3fyHAR2cNuBDxrsgkrZzbMuZSSaxE4 20 | DRyFbbE5iZ07WrnEo7SVsFZGDlXhlhTg5ZO2oJDUgRTt1DTTiBOyu0CKqCJ6NbwJ 21 | dY5v8sKu8PCnm1D58SqVoQKBgB8X2/H3dRvmRX9zYsvSv1NiMuHARugLx142Mgra 22 | RzyNKRFFsA6Hnj0eWL9THa3zMFbEpJ+gLWG7soC7/u6ZzEN8U4rAJfWV3gMU8GE1 23 | srFaOmsjkb+qjNdhfxsJ3lumeHvaE6cVEUnbK+XySxapGLMgIlCvAakrn8mx8hFt 24 | CxulAoGAI7E/6qyVGpS+zVL3qkcVs+Ta15jDgr+TgnNY3v1EHMtlOnhtEFFDoAsg 25 | 6iW7YUkVZRTbVM6SMOFQUdu3QpFg8FcBn+I4I1Rdoflv1U+SY37etpf79IOKUl99 26 | Y7U/G6tBxj8Pgr6Ahjs8vq6BrwVuokiJn7otw/I+5rBOqznKNH0= 27 | -----END RSA PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /local-mac/security/task-043-tls-in-k8s-certificate-creation/ca.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIICrDCCAZQCCQDV4/l7B1QkuDANBgkqhkiG9w0BAQUFADAYMRYwFAYDVQQDDA1L 3 | VUJFUk5FVEVTLUNBMB4XDTIxMDEyNjE4NTIzOFoXDTIxMDIyNTE4NTIzOFowGDEW 4 | MBQGA1UEAwwNS1VCRVJORVRFUy1DQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC 5 | AQoCggEBANMY3SlDreMMyy2kIpTYNozNlBf0sWBgy9yBbkJ3E0mF6dQ01rlORsJg 6 | vc2kuLMZcOGCZRji+vsXEFLAf+MYnF8nVnSB1mTwbemlcgwddmaHTYIE3oUQq0Xo 7 | Kn5e3L5lKjlLtU1WbIQceOMlj8/ttCQf9qkydOPxt/B7o7avZhWZWG48pWfoiRG6 8 | b4wqVNZyTreffIZTHmcOdnD/2p9ZoYwseO5hQEyQrY/fGrLEZYJdivM3MaloYCi4 9 | kau7Ja1YvlyqY5LHWaS1g3m/5J3k+FRSY7BJQuUNhOY3G2CIWVRsiqxhLrVcNVh9 10 | l/nfUCvptbFZSYwmB53dBsMkzsPYZQMCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEA 11 | BEeTFBQilnAa/IFcJgptRfwb42o3ncsMY+uihpPr6nN9EiTxXZKXa5gK3zOiXemg 12 | Aik4bWljRLTdBuUXGFz0JyVx9h5GDhLob9Vz10+SVQoKNHW9njlvIsjVZu63Cwf3 13 | 0d6TvFnpEPDhs1eUR/Yp59e2lPy7ZEIkSSEE+Yryem/ZONzUfp0gENUAsnb+yyQW 14 | qMBIIDqgIQUXuVNG/kYPIP8lk4/wrbPMjqzcE0G6cR6ribLjn+fhHycCsLRoTiQJ 15 | HMaXIlU6ZV9bd3qrUM9MaJlTpt7eTO48q6FUwgKWFSfabSllWi2w/moR3SN0//AS 16 | IvnDsBrYZ0pEKbXd9xYrGw== 17 | -----END CERTIFICATE----- 18 | -------------------------------------------------------------------------------- /local-mac/security/task-043-tls-in-k8s-certificate-creation/ca.csr: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE REQUEST----- 2 | MIICXTCCAUUCAQAwGDEWMBQGA1UEAwwNS1VCRVJORVRFUy1DQTCCASIwDQYJKoZI 3 | hvcNAQEBBQADggEPADCCAQoCggEBANMY3SlDreMMyy2kIpTYNozNlBf0sWBgy9yB 4 | bkJ3E0mF6dQ01rlORsJgvc2kuLMZcOGCZRji+vsXEFLAf+MYnF8nVnSB1mTwbeml 5 | cgwddmaHTYIE3oUQq0XoKn5e3L5lKjlLtU1WbIQceOMlj8/ttCQf9qkydOPxt/B7 6 | o7avZhWZWG48pWfoiRG6b4wqVNZyTreffIZTHmcOdnD/2p9ZoYwseO5hQEyQrY/f 7 | GrLEZYJdivM3MaloYCi4kau7Ja1YvlyqY5LHWaS1g3m/5J3k+FRSY7BJQuUNhOY3 8 | G2CIWVRsiqxhLrVcNVh9l/nfUCvptbFZSYwmB53dBsMkzsPYZQMCAwEAAaAAMA0G 9 | CSqGSIb3DQEBCwUAA4IBAQCRYRRB5O0MLJWH3MNDbmdP5JJBVKPJnh1oK9nyiCH4 10 | P/d+xk7qtBgDZCW5H9jEg3vvr1xurTDoiwFku8edsACdde+qOpuYsee1BCvIrRmB 11 | csXu3Fatv5NK0jdbBVTF6bYaFXfpgnJ+NcY2JBjLXjbeL80SHLMkwev9Sn+Qse6V 12 | mdmGc8jRYmRa+Ap8Ic/9QeOtx4gTeMbNf0eEH/AUc+8eGlX/OK7k1wzjNQBplOOt 13 | sLN4VJft6s8Fi9ty8WLYQotWonQL60G79APFBJ0bcHWMg9sQyEg7YAfL4B7fzxoE 14 | B5pY9QiYneg09PF1+Gk1K2lk7qKB8KEA+9XXCD9AeGSM 15 | -----END CERTIFICATE REQUEST----- 16 | -------------------------------------------------------------------------------- /local-mac/security/task-043-tls-in-k8s-certificate-creation/ca.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEogIBAAKCAQEA0xjdKUOt4wzLLaQilNg2jM2UF/SxYGDL3IFuQncTSYXp1DTW 3 | uU5GwmC9zaS4sxlw4YJlGOL6+xcQUsB/4xicXydWdIHWZPBt6aVyDB12ZodNggTe 4 | hRCrRegqfl7cvmUqOUu1TVZshBx44yWPz+20JB/2qTJ04/G38Hujtq9mFZlYbjyl 5 | Z+iJEbpvjCpU1nJOt598hlMeZw52cP/an1mhjCx47mFATJCtj98assRlgl2K8zcx 6 | qWhgKLiRq7slrVi+XKpjksdZpLWDeb/kneT4VFJjsElC5Q2E5jcbYIhZVGyKrGEu 7 | tVw1WH2X+d9QK+m1sVlJjCYHnd0GwyTOw9hlAwIDAQABAoIBAEOjeOHK7MGOjN+f 8 | 8vCsYrY0/Nm8LsWJ3mhWFIU5cDWjseurzo1rlmKb6tUkjYXdhfzL8ibKO88Occ2C 9 | HrgeX6Rhj2MfYxnOq/ksaOGW4yQxAFXDx6TtVWO94zfAn4xmD+Rx7LLOGVEvalCl 10 | 7XGbYZgdocRPBYMwJVbp1ugnRVdvyLExszHd75/TenDS1Rw6rWlY/nmcX5yigXZS 11 | Y3TE/NbH/tVaGhTNr8Z9SBaxNIxvUCNjP9emux7nPmri7PxKX02xP2byXzTPWKWU 12 | 3L/JiMYV5TpmxcfWOgtqljzqNSE7NIAAnJevXF6JoqBywpFNFNIlm36x3mNEDtlw 13 | mKd1FqECgYEA+n+DT0fdTY2LHWGVYl0h/9CpwjBOipoKSMQtyXDFDLRyViFlvbK5 14 | Kdby/g1P3id4+JW3evZPMisTsCZZvstug7NvdvzyEED9EJc9Rv66vRWG4tjqUnE5 15 | Hv2XIpS7L93XGEUAxXUFowbGX+uVu2fkMK3MR9lnlmP9R8SV4LLe67ECgYEA17vP 16 | MLQzP+6yZV0S5+2InGNjmIj3VcwKI39mBT63SC0KyNOUH+18zld3YB/G097ZvTVP 17 | +53qXmfr11cDggeCACSobE8BgHROSJIOI2kheK4/5lDnS0S2DlUeGtCyYK7wXv49 18 | VIj8ORT3tSjPea0q6Hc5/WpcdjXoDxiB9R/QbPMCgYBLNspYnb1nMd7ZfZl7JXe1 19 | 9hNiyVpr54C7BJyy5XdlLzE9h12jLezHEVF40spua++4xEuJUjfJWoHY3EsKLM8M 20 | IG6RjbWpB7p0ptSxYn7zk2ki5/rDsldyZLZincHux290TBPFNjPxFXGvD2+wC6vK 21 | qgSydibSU0eTqwVNqKeioQKBgCndanG7Jo7rlXcSPs7/S5l9uCArxEPEGtuUp4mx 22 | CARbqVIbrQ0dGzE3zBS9Vepbvsl3BRSBKrAzXpMlcarUJig89Oo/0rzI1UBEB7Zb 23 | eoFe30uku53hsarKXdM6WMNmb4Mft2smQjwZk5B2YPzt2/vM11YUQ+k0wNndGasm 24 | B159AoGAcgzV6P2RvIyZjp0nPzUZeqeV7yTM/5q5oAuD5t36MbCDc1XUybdTByaw 25 | mMwneNL5lvaJSs7tSeZjcs7P7CHM3qfu2weWQXhPVvAawaSccL3pyllBcpNt3/X8 26 | haca5luii5M6GzusfoCgdWuRnghs/hJljCwnc5gaVyZssD46Ho8= 27 | -----END RSA PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /local-mac/security/task-043-tls-in-k8s-certificate-creation/jane.csr: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE REQUEST----- 2 | MIICVDCCATwCAQAwDzENMAsGA1UEAwwEamFuZTCCASIwDQYJKoZIhvcNAQEBBQAD 3 | ggEPADCCAQoCggEBAMvQgumfnKJzBaRh3Yz85VZ6RY7yGwbAe01I8O7nDHpHnovP 4 | S4Ys5KsqlYDqSqVdk3F788uvRaXjxOw3CvzwOamL/w+z8w/3xiDedorRGRfE47zt 5 | 9/fqxT5cpEtNZDsZ3QTkWrUvSVmaqYs2LPyxHrB+4olGCRHionwj4grAVtJ66FJt 6 | kMCCnX4GjqONUItvvMHjMHwsOxXSshG/XmQvQeG7yYaPlhcU9XuWignJ4e9Y2L55 7 | 0WIH1cbQDWU8tLO4iVTlnFtXgfmBTv8TMY25AxnRpKanuX1coQsrVvOKp9HueUQj 8 | VqWWXfNTMyoN/RQ7/QbpHuhDQEhZ6m4oX30nPCkCAwEAAaAAMA0GCSqGSIb3DQEB 9 | CwUAA4IBAQAQ8u8flxmkLMhSo5Aln8cFUmAdjgeS3andFeYrf8tlCIiaAE02k7cb 10 | qyMGM2Fz2eigmflJ42kLNHST0zTo12TpG15LUuW9c1FbNHagEag5CPVjw94RYgU7 11 | pho2molU34bwJJ90EZQP4oD5SJM7UdfhtuuWPDu8H2drfUuHaKj+RxsVLW/KkZsX 12 | 7eYONvNqMU1PkcIBEh0N2HuFmieXDJDWx+vhVbT5xsp5R7IIhvxPLovCVNbWjZYK 13 | t6POzx7V/EzBm6I9fP8G9OJncPa9ZevauJfFywqxIPHYovD2C19xo4KSJLuTWewe 14 | QsTIjWZGifuzT5T7SYk9B1V3oyTp95sI 15 | -----END CERTIFICATE REQUEST----- 16 | -------------------------------------------------------------------------------- /local-mac/security/task-043-tls-in-k8s-certificate-creation/jane.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEpAIBAAKCAQEAy9CC6Z+conMFpGHdjPzlVnpFjvIbBsB7TUjw7ucMekeei89L 3 | hizkqyqVgOpKpV2TcXvzy69FpePE7DcK/PA5qYv/D7PzD/fGIN52itEZF8TjvO33 4 | 9+rFPlykS01kOxndBORatS9JWZqpizYs/LEesH7iiUYJEeKifCPiCsBW0nroUm2Q 5 | wIKdfgaOo41Qi2+8weMwfCw7FdKyEb9eZC9B4bvJho+WFxT1e5aKCcnh71jYvnnR 6 | YgfVxtANZTy0s7iJVOWcW1eB+YFO/xMxjbkDGdGkpqe5fVyhCytW84qn0e55RCNW 7 | pZZd81MzKg39FDv9Buke6ENASFnqbihffSc8KQIDAQABAoIBAQCc1gwXONKwkzBR 8 | SVnSRz03aYmjMRyo+m8njqyQOk14vyn6zf6A9oTUTkWTWTctvy9ZkZeOMoa22+nK 9 | F6Ul9Ll6hZc1fzTYIcNdwbKYqcTUbJcO6FP7e7ZigamPpLMvY3yXah1+9S9Ne11i 10 | GcNOhAFosYsjNTh/gdWlD3u+6DknZzq4A8M0+9KDL2SAyF/IVF3S/XUn0hGo96qX 11 | 0OqCZz5HowUL1IccgSJOOPXR0wcIN6Tn9VimGOfWptqdfZqYNZm7WSBLEv3XTOQl 12 | ogVaNVcfOh62cgAQK0txn3qZ8db+22JxBAjYDhWpBXwWcSkpNsUfn5sUJ3z1712C 13 | 8FT/Yck9AoGBAPJ+kCLZIy9a/ZhQ3ZIytYcsWWTSJ5yLfV6hL2PIXvQytLzC9Zhr 14 | YTLeI6ur6KBml96M5nderDo8N/PaKwsIl7VZGUaZIxnNjgsSb+6oaNjpXj/3I6Pb 15 | OlKcxkigGLHcsconxucJ8dj0FtqFzzu2ANnBQla29PTxEZZ1VS03x2wjAoGBANcq 16 | dUiziLZ5KLXqLB+0kjKVD0ZOA9xJn/Gg0dYiXFqAd3rncSSGLBqJ8HmW03MICYQl 17 | zCQ7r6PZLlcI++0FYLUySYwjrq8TPY8XOLNuCHyQS0f4tOkJpZ2R/WCs+knlKepn 18 | gy8DKGLzOswlk45VsCNtbNvHKKIRABNZkFEztcVDAoGBAIlK8O5aA9q+6I+BUUGB 19 | 5lcG3AICHh06uJuIJN2Yx3H4921b8FMAkMKNFKMQrJ+MFnYRTjbNajN6fq9YmUAg 20 | +Evbizz2XPIx+Db6lFcKEHqm/g63LP2LlKTLUFQ0GGczLZHoHWwXrE8KO5ozYlZL 21 | QIYJLjcMVPoNNsj522TDjOwDAoGAMM9l6ccohVR44sOrnMmmvKjJ3UYHZCqjiPnV 22 | gEEtOC6abBwis4loaeDO15Zag2bYWc0FQVb04jN7aq0UEVG9/XzZ6FuT3g18UoCP 23 | EU0tyawmF76Ys6DHnDYpB4xV/vtohc9TmA+cUs4gkX0Yuxo/KdpB7p2AXzuCC+Gu 24 | dm1Hl+cCgYAd/1FLZq4p+ojTxCtfzTY31GOwHeeYd3jgu2Odr1BF35C+zfmbczIE 25 | XQV8dgRxSI7dq5AYwhEyLuIAbdwQv3K3Y7PJnMf7xjqftgoRupW8CLDqzfpeM6Cx 26 | 3ZhCDdk5MKGpmJx+JKR2srHY29fkr/r+UHZQFW+vX2HzOn8kMCRQyg== 27 | -----END RSA PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /local-mac/security/task-045-api-groups/ReadMe.md: -------------------------------------------------------------------------------- 1 | 2 | ### To get the version of the cluster 3 | 4 | All resources in kubernetes are grouped into different API groups. 5 | Each of those resources have associated set of actions on them called VERBs 6 | 7 | > Note: you will need to pass authentication in all the API calls 8 | 9 | ```bash 10 | curl https://kube-master:6443/version 11 | ``` 12 | 13 | 14 | ### To get the pods 15 | 16 | ```bash 17 | curl https://kube-master:6443/api/v2/pods 18 | ``` 19 | 20 | Similarly other API's which are used are 21 | 22 | `/metrics` api is used to monitor the health of the cluster 23 | 24 | `/logs` is used to integrating with the third party logging applications 25 | 26 | `/api` core group where all the core functionality exists 27 | 28 | 29 | ### To get all the api-groups 30 | 31 | ```bash 32 | curl http://localhost:6443 -k 33 | ``` 34 | 35 | alternatively, you can run 36 | 37 | ```bash 38 | kubectl proxy 39 | Starting to server on 127.0.0.1:8001 40 | 41 | curl http://localhost:8001 -k 42 | ``` 43 | 44 | 45 | ### Authorization 46 | 47 | When we share our cluster between different entities (dev team, qa team, admins) by logically partioning it 48 | using namespaces, we want to restrict access to their namespaces alone. that is what authorization can help you within a cluster 49 | 50 | 51 | - You can set the authorization mode using 52 | - When you specify multiple modes, it will authorize in the order in which it is specified 53 | 54 | ```bash 55 | ExecStart=/usr/local/bin/kube-apiserver \\ 56 | . 57 | --authorization-mode=Node, RBAC, Webhook \\ 58 | ``` -------------------------------------------------------------------------------- /local-mac/security/task-047-image-security/ReadMe.md: -------------------------------------------------------------------------------- 1 | ### Images 2 | 3 | ```yaml 4 | image: docker.io/nginx/nginx 5 | ``` 6 | Here 7 | - docker.io - Registry by default 8 | - nginx - user account 9 | - nginx - image repository 10 | 11 | Other registries examples 12 | - gcr.io/ 13 | 14 | 15 | ### How the images are downloaded from private registry 16 | 17 | - Create a secret 18 | 19 | ```bash 20 | $ kubectl create secret docker-registry regcred \ 21 | --docker-server=private-registry.io \ 22 | --docker-username=registry-user \ 23 | --docker-password=registry-password \ 24 | --docker-email=registry-user@org.com 25 | ``` 26 | 27 | 28 | 29 | ```yaml 30 | apiVersion: v1 31 | kind: Pod 32 | metadata: 33 | name: nginx-pod 34 | spec: 35 | containers: 36 | - name: nginx 37 | image: private-registry.io/apps/internal-app 38 | imagePullSecrets: 39 | - name: regcred 40 | ``` 41 | 42 | 43 | 44 | ### We have an application running on our cluster. Let us explore it first. What image is the application using? 45 | 46 | ```bash 47 | controlplane $ kubectl describe deployment web | egrep -i image 48 | Image: nginx:alpine 49 | ``` 50 | 51 | 52 | ### We decided to use a modified version of the application from an internal private registry. Update the image of the deployment to use a new image from myprivateregistry.com:5000 53 | 54 | - The registry is located at myprivateregistry.com:5000. Don't worry about the credentials for now. We will configure them in the upcoming steps. 55 | 56 | ```bash 57 | controlplane $ cat dep.yaml | egrep -i image | egrep -v "f:|If" 58 | - image: myprivateregistry.com:5000/nginx:alpine 59 | ``` 60 | 61 | ### Are the new PODs created with the new images successfully running? 62 | 63 | ```bash 64 | controlplane $ kubectl get pods 65 | NAME READY STATUS RESTARTS AGE 66 | web-85fcf65896-9xjkh 0/1 ImagePullBackOff 0 86s 67 | ``` 68 | 69 | 70 | 71 | ### Create a secret object with the credentials required to access the registry 72 | 73 | Name: private-reg-cred 74 | 75 | Username: dock_user 76 | 77 | Password: dock_password 78 | 79 | Server: myprivateregistry.com:5000 80 | 81 | Email: dock_user@myprivateregistry.com 82 | 83 | ```bash 84 | $ kubectl create secret docker-registry private-reg-cred --docker-username=dock_user --docker-password=dock_password --docker-server=myprivateregistry.com:5000 --docker-email=dock_user@myprivateregistry.com 85 | ``` 86 | 87 | 88 | ### Configure the deployment to use credentials from the new secret to pull images from the private registry 89 | 90 | ```bash 91 | controlplane $ cat dep.yaml | egrep -i imagePullSecrets: -A 2 -B 6 92 | - image: myprivateregistry.com:5000/nginx:alpine 93 | imagePullPolicy: IfNotPresent 94 | name: nginx 95 | resources: {} 96 | terminationMessagePath: /dev/termination-log 97 | terminationMessagePolicy: File 98 | imagePullSecrets: 99 | - name: private-reg-cred 100 | dnsPolicy: ClusterFirst 101 | ``` -------------------------------------------------------------------------------- /local-mac/security/task-048-security-contexts/ReadMe.md: -------------------------------------------------------------------------------- 1 | 2 | ### What is the user used to execute the sleep process within the 'ubuntu-sleeper' pod? 3 | 4 | - in the current(default) namespace 5 | 6 | ```bash 7 | controlplane $ kubectl exec ubuntu-sleeper -- whoami 8 | root 9 | ``` 10 | 11 | 12 | ### Edit the pod 'ubuntu-sleeper' to run the sleep process with user ID 1010. 13 | 14 | [link](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) 15 | 16 | Note: Only make the necessary changes. Do not modify the name or image of the pod. 17 | 18 | Ensure that the security context field is not empty 19 | 20 | ```bash 21 | controlplane $ kubectl get pod ubuntu-sleeper -o yaml > pod.yaml 22 | 23 | ### the security context field will be by default empty 24 | 25 | controlplane $ cat pod.yaml | grep security -A 2 26 | securityContext: 27 | runAsUser: 1010 28 | ``` 29 | 30 | 31 | ### A Pod definition file named 'multi-pod.yaml' is given. With what user are the processes in the 'web' container started? 32 | 33 | The pod is created with multiple containers and security contexts defined at the POD and Container level 34 | 35 | 36 | ```bash 37 | controlplane $ cat multi-pod.yaml | egrep -i security -A 4 -B 4 38 | kind: Pod 39 | metadata: 40 | name: multi-pod 41 | spec: 42 | securityContext: 43 | runAsUser: 1001 44 | containers: 45 | - image: ubuntu 46 | name: web 47 | command: ["sleep", "5000"] 48 | securityContext: 49 | runAsUser: 1002 50 | 51 | - image: ubuntu 52 | name: sidecar 53 | ``` 54 | 55 | 56 | ### With what user are the processes in the 'sidecar' container started? 57 | 58 | The pod is created with multiple containers and security contexts defined at the POD and Container level 59 | 60 | ```bash 61 | ### 1001 as it is done at the spec level 62 | ``` 63 | 64 | 65 | ### Try to run the below command in the pod 'ubuntu-sleeper' to set the date. Are you allowed to set date on the POD? 66 | 67 | ```bash 68 | controlplane $ kubectl exec -it ubuntu-sleeper -- date -s '19 APR 2012 11:14:00' 69 | date: cannot set date: Operation not permitted 70 | Thu Apr 19 11:14:00 UTC 2012 71 | 72 | 73 | 74 | command terminated with exit code 1 75 | ``` 76 | 77 | ### Update pod 'ubuntu-sleeper' to run as Root user and with the 'SYS_TIME' capability. 78 | 79 | Note: Only make the necessary changes. Do not modify the name of the pod. 80 | 81 | Pod Name: ubuntu-sleeper 82 | 83 | Image Name: ubuntu 84 | 85 | SecurityContext: Capability SYS_TIME 86 | 87 | ```bash 88 | controlplane $ kubectl get pod ubuntu-sleeper -o yaml > pod.yaml 89 | 90 | controlplane $ cat pod.yaml | egrep -i "securityContext" -A 4 -B 4 91 | -- 92 | - command: 93 | - sleep 94 | - "4800" 95 | image: ubuntu 96 | securityContext: 97 | capabilities: 98 | add: ["NET_ADMIN", "SYS_TIME"] 99 | imagePullPolicy: Always 100 | name: ubuntu 101 | ``` -------------------------------------------------------------------------------- /task-000-commands/doclinks.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | - [DNS-specification](https://github.com/kubernetes/dns/blob/master/docs/specification.md) 4 | - [DNS for Services and Pods](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/) --------------------------------------------------------------------------------