├── The Kubernetes Learning Resources List.xlsx ├── storage ├── pvc.yaml ├── pv.yaml ├── vk-pod-with-volume-pvc.yaml └── vk-pod-with-volume-hostpath.yaml ├── base ├── vk-pod.yaml ├── vk-svc.yaml └── vk-deploy.yaml ├── appmanage ├── limitrange.yaml ├── job.yaml ├── paralleljob.yaml ├── resources.yaml ├── cronjob.yaml ├── liveness.yaml ├── svc-ep.yaml ├── vk-pod-with-configmap.yaml └── vk-pod-with-secret.yaml ├── networking ├── ingress-service.yaml ├── ingress.yaml ├── ingress-controller.yaml └── ingress-controller-mandatory.yaml ├── scheduling ├── daemonset.yaml ├── nodeAffinity.yaml ├── podAffinity.yaml ├── kube-scheduler.yaml └── my-scheduler.yaml ├── security ├── developer-role.yaml ├── node-admin-role.yaml ├── network-policy.yaml ├── storage-admin-role.yaml └── jane-csr.yaml ├── .bashrc ├── mock ├── 00-link.md ├── 03-CKA-Exam-Prep.txt ├── 01-kodekloud-cka-mock.md ├── 02-cka-exam.md ├── 11-kodekloud-ckad-mock.md └── 04-cka-collection.md ├── voidking ├── dev-install-k8s-the-hard-way.md ├── dev-kubeadm-upgrade.md ├── dev-k8s-etcd-backup-restore.md ├── dev-curl-k8s-api-server.md ├── dev-k8s-rbac-auth.md ├── dev-kubeadm-install-k8s.md ├── dev-ssl-tls.md ├── dev-jsonpath.md └── dev-kubectl.md ├── arch.md └── README.md /The Kubernetes Learning Resources List.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/voidking/k8s-tool/HEAD/The Kubernetes Learning Resources List.xlsx -------------------------------------------------------------------------------- /storage/pvc.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolumeClaim 2 | apiVersion: v1 3 | metadata: 4 | name: claim-log-1 5 | spec: 6 | accessModes: 7 | - ReadWriteMany 8 | resources: 9 | requests: 10 | storage: 50Mi -------------------------------------------------------------------------------- /storage/pv.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: pv-log 5 | spec: 6 | accessModes: 7 | - ReadWriteMany 8 | capacity: 9 | storage: 100Mi 10 | hostPath: 11 | path: /pv/log -------------------------------------------------------------------------------- /base/vk-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: vk-pod 5 | labels: 6 | app: vk-pod 7 | spec: 8 | containers: 9 | - name: vk-busybox 10 | image: busybox 11 | command: ['sh', '-c', 'echo Hello Kubernetes! && sleep 3600'] -------------------------------------------------------------------------------- /appmanage/limitrange.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: LimitRange 3 | metadata: 4 | name: mem-limit-range 5 | namespace: voidking 6 | spec: 7 | limits: 8 | - default: 9 | memory: 512Mi 10 | defaultRequest: 11 | memory: 256Mi 12 | type: Container -------------------------------------------------------------------------------- /appmanage/job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: print-date 5 | spec: 6 | template: 7 | spec: 8 | containers: 9 | - name: busybox 10 | image: busybox:1.28 11 | command: ["sh","-c","date"] 12 | restartPolicy: Never -------------------------------------------------------------------------------- /appmanage/paralleljob.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: multi-print-date 5 | spec: 6 | completions: 20 7 | parallelism: 5 8 | template: 9 | spec: 10 | containers: 11 | - name: busybox 12 | image: busybox:1.28 13 | command: ["sh","-c","sleep 10 && date"] 14 | restartPolicy: Never -------------------------------------------------------------------------------- /appmanage/resources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: vk-pod 5 | namespace: voidking 6 | spec: 7 | containers: 8 | - name: nginx 9 | image: nginx:1.16 10 | resources: 11 | requests: 12 | cpu: "250m" 13 | memory: "64Mi" 14 | limits: 15 | cpu: "500m" 16 | memory: "128Mi" -------------------------------------------------------------------------------- /base/vk-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | app: vk-svc 7 | name: vk-svc 8 | spec: 9 | ports: 10 | - name: 5678-80 11 | port: 5678 12 | protocol: TCP 13 | targetPort: 80 14 | selector: 15 | app: vk-deploy 16 | type: ClusterIP 17 | status: 18 | loadBalancer: {} -------------------------------------------------------------------------------- /appmanage/cronjob.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1beta1 2 | kind: CronJob 3 | metadata: 4 | name: print-date 5 | spec: 6 | schedule: "*/3 * * * *" 7 | jobTemplate: 8 | spec: 9 | template: 10 | spec: 11 | containers: 12 | - name: busybox 13 | image: busybox:1.28 14 | command: ["sh","-c","date"] 15 | restartPolicy: OnFailure -------------------------------------------------------------------------------- /networking/ingress-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: ingress 6 | namespace: ingress-space 7 | spec: 8 | type: NodePort 9 | ports: 10 | - port: 80 11 | targetPort: 80 12 | protocol: TCP 13 | nodePort: 30080 14 | name: http 15 | - port: 443 16 | targetPort: 443 17 | protocol: TCP 18 | name: https 19 | selector: 20 | name: nginx-ingress -------------------------------------------------------------------------------- /storage/vk-pod-with-volume-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: webapp 5 | spec: 6 | containers: 7 | - name: event-simulator 8 | image: kodekloud/event-simulator 9 | env: 10 | - name: LOG_HANDLERS 11 | value: file 12 | volumeMounts: 13 | - mountPath: /log 14 | name: log-volume 15 | 16 | volumes: 17 | - name: log-volume 18 | persistentVolumeClaim: 19 | claimName: claim-log-1 -------------------------------------------------------------------------------- /scheduling/daemonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: fluentd 5 | namespace: kube-system 6 | labels: 7 | k8s-app: fluentd-logging 8 | spec: 9 | selector: 10 | matchLabels: 11 | name: fluentd 12 | template: 13 | metadata: 14 | labels: 15 | name: fluentd 16 | spec: 17 | containers: 18 | - name: fluentd 19 | image: quay.io/fluentd_elasticsearch/fluentd:v2.5.2 -------------------------------------------------------------------------------- /appmanage/liveness.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | run: liveness 7 | name: liveness 8 | spec: 9 | containers: 10 | - image: busybox:1.28 11 | name: liveness 12 | resources: {} 13 | command: ["sh","-c","touch /tmp/start.log && sleep 3600"] 14 | livenessProbe: 15 | exec: 16 | command: ["cat","/tmp/start.log"] 17 | dnsPolicy: ClusterFirst 18 | restartPolicy: Always 19 | status: {} -------------------------------------------------------------------------------- /appmanage/svc-ep.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | app: nginx 7 | name: nginx 8 | spec: 9 | ports: 10 | - port: 80 11 | protocol: TCP 12 | targetPort: 80 13 | # 不要name,不要name,不要name! 14 | type: ClusterIP 15 | status: 16 | loadBalancer: {} 17 | --- 18 | apiVersion: v1 19 | kind: Endpoints 20 | metadata: 21 | name: nginx 22 | subsets: 23 | - addresses: 24 | - ip: 10.28.0.181 25 | ports: 26 | - port: 80 -------------------------------------------------------------------------------- /base/vk-deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | app: vk-deploy 7 | name: vk-deploy 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: vk-deploy 13 | strategy: {} 14 | template: 15 | metadata: 16 | creationTimestamp: null 17 | labels: 18 | app: vk-deploy 19 | spec: 20 | containers: 21 | - image: nginx 22 | name: nginx 23 | resources: {} 24 | status: {} -------------------------------------------------------------------------------- /storage/vk-pod-with-volume-hostpath.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: webapp 5 | spec: 6 | containers: 7 | - name: event-simulator 8 | image: kodekloud/event-simulator 9 | env: 10 | - name: LOG_HANDLERS 11 | value: file 12 | volumeMounts: 13 | - mountPath: /log 14 | name: log-volume 15 | 16 | volumes: 17 | - name: log-volume 18 | hostPath: 19 | # directory location on host 20 | path: /var/log/webapp 21 | # this field is optional 22 | type: Directory -------------------------------------------------------------------------------- /security/developer-role.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Role 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | namespace: default 6 | name: developer 7 | rules: 8 | - apiGroups: [""] 9 | resources: ["pods"] 10 | verbs: ["list", "create"] 11 | 12 | --- 13 | kind: RoleBinding 14 | apiVersion: rbac.authorization.k8s.io/v1 15 | metadata: 16 | name: dev-user-binding 17 | subjects: 18 | - kind: User 19 | name: jane 20 | apiGroup: rbac.authorization.k8s.io 21 | roleRef: 22 | kind: Role 23 | name: developer 24 | apiGroup: rbac.authorization.k8s.iomaster -------------------------------------------------------------------------------- /networking/ingress.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Ingress 4 | metadata: 5 | name: ingress-wear-watch 6 | namespace: app-space 7 | annotations: 8 | nginx.ingress.kubernetes.io/rewrite-target: / 9 | nginx.ingress.kubernetes.io/ssl-redirect: "false" 10 | spec: 11 | rules: 12 | - http: 13 | paths: 14 | - path: /wear 15 | backend: 16 | serviceName: wear-service 17 | servicePort: 8080 18 | - path: /watch 19 | backend: 20 | serviceName: video-service 21 | servicePort: 8080 -------------------------------------------------------------------------------- /scheduling/nodeAffinity.yaml: -------------------------------------------------------------------------------- 1 | spec: 2 | affinity: 3 | nodeAffinity: 4 | requiredDuringSchedulingIgnoredDuringExecution: 5 | nodeSelectorTerms: 6 | - matchExpressions: 7 | - key: key1 8 | operator: In 9 | values: 10 | - value1 11 | - valuex 12 | preferredDuringSchedulingIgnoredDuringExecution: 13 | - weight: 1 14 | preference: 15 | matchExpressions: 16 | - key: key2 17 | operator: In 18 | values: 19 | - value2 20 | - valuey -------------------------------------------------------------------------------- /security/node-admin-role.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: ClusterRole 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: node-admin 6 | rules: 7 | - apiGroups: [""] 8 | resources: ["nodes"] 9 | verbs: ["get", "watch", "list", "create", "delete"] 10 | 11 | --- 12 | kind: ClusterRoleBinding 13 | apiVersion: rbac.authorization.k8s.io/v1 14 | metadata: 15 | name: michelle-binding 16 | subjects: 17 | - kind: User 18 | name: michelle 19 | apiGroup: rbac.authorization.k8s.io 20 | roleRef: 21 | kind: ClusterRole 22 | name: node-admin 23 | apiGroup: rbac.authorization.k8s.iomaster -------------------------------------------------------------------------------- /security/network-policy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: internal-policy 5 | namespace: default 6 | spec: 7 | podSelector: 8 | matchLabels: 9 | name: internal 10 | policyTypes: 11 | - Egress 12 | - Ingress 13 | ingress: 14 | - {} 15 | egress: 16 | - to: 17 | - podSelector: 18 | matchLabels: 19 | name: mysql 20 | ports: 21 | - protocol: TCP 22 | port: 3306 23 | 24 | - to: 25 | - podSelector: 26 | matchLabels: 27 | name: payroll 28 | ports: 29 | - protocol: TCP 30 | port: 8080 -------------------------------------------------------------------------------- /.bashrc: -------------------------------------------------------------------------------- 1 | # .bashrc 2 | 3 | # Source global definitions 4 | if [ -f /etc/bashrc ]; then 5 | . /etc/bashrc 6 | fi 7 | 8 | # Uncomment the following line if you don't like systemctl's auto-paging feature: 9 | # export SYSTEMD_PAGER= 10 | 11 | # User specific aliases and functions 12 | export KUBECONFIG=/home/haojin/.kube/config 13 | # kubectl config set-context $(kubectl config current-context) --namespace=voidking 14 | kubectl config set-context --current --namespace=voidking 15 | alias k="kubectl" 16 | alias kg="kubectl get" 17 | alias kd="kubectl describe" 18 | 19 | # yum install -y bash-completion 20 | source /usr/share/bash-completion/bash_completion 21 | source <(kubectl completion bash) 22 | complete -F __start_kubectl k 23 | -------------------------------------------------------------------------------- /appmanage/vk-pod-with-configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: vk-pod-configmap 5 | namespace: default 6 | data: 7 | color: red 8 | textmode: "true" 9 | --- 10 | apiVersion: v1 11 | kind: Pod 12 | metadata: 13 | name: vk-pod 14 | labels: 15 | app: vk-pod 16 | spec: 17 | containers: 18 | - name: vk-busybox 19 | image: busybox 20 | command: [ "/bin/sh", "-c", "env" ] 21 | env: 22 | # Define the environment variable 23 | - name: COLOR 24 | valueFrom: 25 | configMapKeyRef: 26 | # name of configmap 27 | name: vk-pod-configmap 28 | # Specify the key associated with the value 29 | key: color -------------------------------------------------------------------------------- /security/storage-admin-role.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: ClusterRole 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: storage-admin 6 | rules: 7 | - apiGroups: [""] 8 | resources: ["persistentvolumes"] 9 | verbs: ["get", "watch", "list", "create", "delete"] 10 | - apiGroups: ["storage.k8s.io"] 11 | resources: ["storageclasses"] 12 | verbs: ["get", "watch", "list", "create", "delete"] 13 | 14 | --- 15 | kind: ClusterRoleBinding 16 | apiVersion: rbac.authorization.k8s.io/v1 17 | metadata: 18 | name: michelle-storage-admin 19 | subjects: 20 | - kind: User 21 | name: michelle 22 | apiGroup: rbac.authorization.k8s.io 23 | roleRef: 24 | kind: ClusterRole 25 | name: storage-admin 26 | apiGroup: rbac.authorization.k8s.iomaster -------------------------------------------------------------------------------- /appmanage/vk-pod-with-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | password: dmtwYXNzd29yZA== 4 | username: dm9pZGtpbmc= 5 | kind: Secret 6 | metadata: 7 | creationTimestamp: null 8 | name: vk-pod-secret 9 | --- 10 | apiVersion: v1 11 | kind: Pod 12 | metadata: 13 | name: vk-pod 14 | labels: 15 | app: vk-pod 16 | spec: 17 | containers: 18 | - name: vk-busybox 19 | image: busybox 20 | command: [ "/bin/sh", "-c", "env" ] 21 | env: 22 | # Define the environment variable 23 | - name: USERNAME 24 | valueFrom: 25 | secretKeyRef: 26 | # name of secret 27 | name: vk-pod-secret 28 | # Specify the key associated with the value 29 | key: username 30 | volumeMounts: 31 | - name: mysecret 32 | mountPath: /secret 33 | volumes: 34 | - name: mysecret 35 | secret: 36 | secretName: vk-pod-secret -------------------------------------------------------------------------------- /scheduling/podAffinity.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | run: pod1 7 | name: pod1 8 | spec: 9 | containers: 10 | - image: busybox:1.28 11 | name: pod1 12 | resources: {} 13 | dnsPolicy: ClusterFirst 14 | restartPolicy: Always 15 | status: {} 16 | --- 17 | apiVersion: v1 18 | kind: Pod 19 | metadata: 20 | creationTimestamp: null 21 | labels: 22 | run: pod2 23 | name: pod2 24 | spec: 25 | affinity: 26 | podAffinity: 27 | requiredDuringSchedulingIgnoredDuringExecution: 28 | - labelSelector: 29 | matchExpressions: 30 | - key: run 31 | operator: In 32 | values: ["pod1"] 33 | topologyKey: "kubernetes.io/hostname" 34 | containers: 35 | - image: busybox:1.28 36 | name: pod2 37 | resources: {} 38 | dnsPolicy: ClusterFirst 39 | restartPolicy: Always 40 | status: {} 41 | -------------------------------------------------------------------------------- /networking/ingress-controller.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: ingress-controller 6 | namespace: ingress-space 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | name: nginx-ingress 12 | template: 13 | metadata: 14 | labels: 15 | name: nginx-ingress 16 | spec: 17 | serviceAccountName: ingress-serviceaccount 18 | containers: 19 | - name: nginx-ingress-controller 20 | image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.21.0 21 | args: 22 | - /nginx-ingress-controller 23 | - --configmap=$(POD_NAMESPACE)/nginx-configuration 24 | - --default-backend-service=app-space/default-http-backend 25 | env: 26 | - name: POD_NAME 27 | valueFrom: 28 | fieldRef: 29 | fieldPath: metadata.name 30 | - name: POD_NAMESPACE 31 | valueFrom: 32 | fieldRef: 33 | fieldPath: metadata.namespace 34 | ports: 35 | - name: http 36 | containerPort: 80 37 | - name: https 38 | containerPort: 443 -------------------------------------------------------------------------------- /scheduling/kube-scheduler.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | component: kube-scheduler 7 | tier: control-plane 8 | name: kube-scheduler 9 | namespace: kube-system 10 | spec: 11 | containers: 12 | - command: 13 | - kube-scheduler 14 | - --authentication-kubeconfig=/etc/kubernetes/scheduler.conf 15 | - --authorization-kubeconfig=/etc/kubernetes/scheduler.conf 16 | - --bind-address=127.0.0.1 17 | - --kubeconfig=/etc/kubernetes/scheduler.conf 18 | - --leader-elect=true 19 | image: k8s.gcr.io/kube-scheduler:v1.16.0 20 | imagePullPolicy: IfNotPresent 21 | livenessProbe: 22 | failureThreshold: 8 23 | httpGet: 24 | host: 127.0.0.1 25 | path: /healthz 26 | port: 10251 27 | scheme: HTTP 28 | initialDelaySeconds: 15 29 | timeoutSeconds: 15 30 | name: kube-scheduler 31 | resources: 32 | requests: 33 | cpu: 100m 34 | volumeMounts: 35 | - mountPath: /etc/kubernetes/scheduler.conf 36 | name: kubeconfig 37 | readOnly: true 38 | hostNetwork: true 39 | priorityClassName: system-cluster-critical 40 | volumes: 41 | - hostPath: 42 | path: /etc/kubernetes/scheduler.conf 43 | type: FileOrCreate 44 | name: kubeconfig 45 | status: {} -------------------------------------------------------------------------------- /scheduling/my-scheduler.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | annotations: 5 | scheduler.alpha.kubernetes.io/critical-pod: "" 6 | creationTimestamp: null 7 | labels: 8 | component: my-scheduler 9 | tier: control-plane 10 | name: my-scheduler 11 | namespace: kube-system 12 | spec: 13 | containers: 14 | - command: 15 | - kube-scheduler 16 | - --address=127.0.0.1 17 | - --kubeconfig=/etc/kubernetes/scheduler.conf 18 | - --leader-elect=false 19 | - --port=10282 20 | - --scheduler-name=my-scheduler 21 | - --secure-port=0 22 | image: k8s.gcr.io/kube-scheduler-amd64:v1.16.0 23 | imagePullPolicy: IfNotPresent 24 | livenessProbe: 25 | failureThreshold: 8 26 | httpGet: 27 | host: 127.0.0.1 28 | path: /healthz 29 | port: 10282 30 | scheme: HTTP 31 | initialDelaySeconds: 15 32 | timeoutSeconds: 15 33 | name: kube-scheduler 34 | resources: 35 | requests: 36 | cpu: 100m 37 | volumeMounts: 38 | - mountPath: /etc/kubernetes/scheduler.conf 39 | name: kubeconfig 40 | readOnly: true 41 | hostNetwork: true 42 | priorityClassName: system-cluster-critical 43 | volumes: 44 | - hostPath: 45 | path: /etc/kubernetes/scheduler.conf 46 | type: FileOrCreate 47 | name: kubeconfig 48 | status: {} -------------------------------------------------------------------------------- /mock/00-link.md: -------------------------------------------------------------------------------- 1 | [kubernetes学习:CKA考试题](https://www.cnblogs.com/haoprogrammer/p/11149661.html) 2 | 3 | [CKA认证考试真题解析](https://blog.csdn.net/u013352037/article/details/102611830) 4 | 5 | [CKA考试习题:安全管理--Network Policy、serviceaccount、clusterrole](https://blog.csdn.net/fly910905/article/details/103075863) 6 | 7 | [CKA考试知识总结-1](http://ljchen.net/2018/11/07/CKA%E8%80%83%E8%AF%95%E7%9F%A5%E8%AF%86%E6%80%BB%E7%BB%93/) 8 | 9 | [CKA考试知识总结-2](http://ljchen.net/2018/11/07/CKA%E8%80%83%E8%AF%95%E7%9F%A5%E8%AF%86%E6%80%BB%E7%BB%93-2/) 10 | 11 | [CKA考试知识总结-3](http://ljchen.net/2018/11/07/CKA%E8%80%83%E8%AF%95%E7%9F%A5%E8%AF%86%E6%80%BB%E7%BB%93-3/) 12 | 13 | [Introduction to Kubernetes](https://www.edx.org/course/introduction-to-kubernetes) 14 | 15 | [walidshaari/Kubernetes-Certified-Administrator](https://github.com/walidshaari/Kubernetes-Certified-Administrator) 16 | 17 | [dgkanatsios/CKAD-exercises](https://github.com/dgkanatsios/CKAD-exercises) 18 | 19 | [kelseyhightower/kubernetes-the-hard-way](https://github.com/kelseyhightower/kubernetes-the-hard-way) 20 | 21 | [cka-lab-practice](https://github.com/stretchcloud/cka-lab-practice) 22 | 23 | [cka-practice-environment](https://github.com/arush-sal/cka-practice-environment) 24 | 25 | [dgkanatsios/CKAD-exercises](https://github.com/dgkanatsios/CKAD-exercises) 26 | 27 | [bmuschko/ckad-prep](https://github.com/bmuschko/ckad-prep) -------------------------------------------------------------------------------- /security/jane-csr.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: certificates.k8s.io/v1beta1 2 | kind: CertificateSigningRequest 3 | metadata: 4 | name: jane 5 | spec: 6 | groups: 7 | - system:authenticated 8 | usages: 9 | - digital signature 10 | - key encipherment 11 | - server auth 12 | request: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0KTUlJQ1ZqQ0NBVDRDQVFBd0VURVBNQTBHQTFVRUF3d0dZV3R6YUdGNU1JSUJJakFOQmdrcWhraUc5dzBCQVFFRgpBQU9DQVE4QU1JSUJDZ0tDQVFFQTBSWHVUZnhzRDFVaHlVTUlGa1Y0aFVKZUV0QUQ2bzVFVnkzV1laU2JqejhyCmNBamRKVUlNZDV1TFdHb25FMzdoWWpwRnl4ckNzVTJKd2JyMlRTa3hJd3c1T3N6V0ZlS3AxN0RYZ3RUSjNBbXgKTkExQWx5dmMwVE03VlMxSDRpcEQxbXlBWFE3YXNKWVNCeFRKd0V5VHNnMlB6TXY0bHlicThqeVNWYXRlUFJRawpsZWlZWXFnSXdUSk1HakxRVGhOdW9BWlpiZ1lyLytydUZMQUdBWUg0UXRvRjVmNHFUUTFmTTN1ejVDS2xVb2t3CnpFcWdRUzV5eCs3M0xncSthT2xzYVAzWFhZQkdCV1dCeWFYRjhpMitSZkc0VEhCVm51L2R2Y0hiTmZ6cTY3OGEKZ0k3T1VjTnRDaE0rSHRBZkVkME95dGZnTzc1bFlvVEtRUTJMZjdaUlZRSURBUUFCb0FBd0RRWUpLb1pJaHZjTgpBUUVMQlFBRGdnRUJBSk8vM2FkV2cvR0w5Z3lBaHdCZDluSzBzUFVBc0JhU3JIUHNud2pPNjlFbTVWYWRCekh2CjM2NXQzeUMwbWdiRmtZUFNFWWRsYkwzVk8vSGxOTmZGd1NQOGF4azh4M25ZZHNpdjN1U3BzbUY2NEkyYkphMnEKUDZkNDNmVGcrTjY2UGorQmRvQmRaUXhmY3Z4TUlYa3NwZGhwS1c3Umh3UTNuV1ZFTXNCZmFodmczdHRLNks1UApUeWNRSVp5SXgwTURxTTR3R2ppR2cyTVpGUzdQTTRWTHZjbGVoQTFIS1JkZVBCWnh3b092YWdOcEdYSmNEZDgzCmJmNXRjNitGeGJZQ29DaWt4VXBiVXBucU80NTJqVDZ4YUNkZFlkdkpJVS9HL3B6TjhnYU5lN092OWY3NUlWTFUKVFhJZzhGSnZzdjFZSE4zUUE1M0VBNmw5NVFxcjdXdndZRU09Ci0tLS0tRU5EIENFUlRJRklDQVRFIFJFUVVFU1QtLS0tLQo= -------------------------------------------------------------------------------- /voidking/dev-install-k8s-the-hard-way.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "安装部署K8S集群的艰难之路" 3 | toc: true 4 | date: 2020-04-01 20:00:00 5 | tags: 6 | - docker 7 | - k8s 8 | categories: 9 | - [专业,运维,docker] 10 | - [专业,运维,k8s] 11 | --- 12 | 13 | # 前言 14 | [《使用kubeadm安装部署K8S集群》](https://www.voidking.com/dev-kubeadm-install-k8s/)一文中,使用kubeadm安装部署了k8s集群。但是,kubeadm的安装方式太简单了,而cka的要求不止这么简单。因此,我们还需要学习从零开始,一个一个组件安装配置k8s集群的方法,所谓k8s the hard way。 15 | 16 | 本文的目标是在virtualbox中,搭建一个k8s集群,一个master节点,一个node01节点。 17 | 18 | 主要参考[kelseyhightower/kubernetes-the-hard-way](https://github.com/kelseyhightower/kubernetes-the-hard-way)和[mmumshad/kubernetes-the-hard-way](https://github.com/mmumshad/kubernetes-the-hard-way)。 19 | 20 | 21 | 22 | # 准备 23 | 24 | - 创建两台centos7虚拟机,master节点1C2G,node01节点1C1G 25 | - 配置网络,master节点IP为192.168.56.150,node01节点的IP为192.168.56.151 26 | - 配置hostname,并且把两个节点的hostname添加到/etc/hosts 27 | - 安装Docker,参考[《Docker入门》](https://www.voidking.com/dev-docker-start/) 28 | 29 | # 安装流程 30 | 1、安装kubectl 31 | 32 | 2、创建CA,给每个组件生成TLS证书 33 | TLS证书包括: 34 | ETCD Server Certificate 35 | Kubernetes API Server Certificate 36 | Controller Manager Client Certificate 37 | Scheduler Client Certificate 38 | 39 | Service Account Key Pair 40 | Kube Proxy Client Certificate 41 | Kubelet Client Certificates 42 | Admin Client Certificate 43 | 44 | 3、给每个组件生成k8s配置文件,用于访问apiserver 45 | 46 | 4、生成数据加密配置和密钥,使集群支持静态加密 47 | 48 | 5、指定CA和TLS,在master节点启动etcd 49 | 50 | 6、指定CA和TLS,在master节点启动kube-apiserver、kube-controller-manager、kube-scheduler 51 | 52 | 7、指定CA和TLS,在node01节点启动kubelet和kube-proxy 53 | 54 | 8、指定CA和TLS,生成admin用户的配置文件,使用kubectl可以访问集群 55 | 56 | 9、部署weave,使pod可以获取到IP 57 | 58 | 10、部署coredns,使svc服务名可以使用 59 | 60 | 11、Smoke Test和End-to-End Tests 61 | 62 | # 实践篇 63 | 操作过程太长,具体还是参考前言中的两个 kubernetes-the-hard-way 文档吧。。。 64 | 65 | 66 | -------------------------------------------------------------------------------- /arch.md: -------------------------------------------------------------------------------- 1 | # CKA 2 | 3 | ## Introduction 4 | 5 | ## Core Concepts 6 | Cluster Architecture 7 | API Primitives 8 | Services & Other Network Primitives 9 | 10 | ## Scheduling 11 | Namespace 12 | Taints And Tolerations 13 | Node Selectors 14 | Node Affinity 15 | Node Affinity vs Taints and Tolerations 16 | 17 | Manual Scheduling 18 | Labels & Selectors 19 | Resource Limits 20 | daemon Sets 21 | Multiple Schedulers 22 | Scheduler Events 23 | Configure Kubernetes Scheduler 24 | 25 | ## Logging and Monitoring 26 | 27 | ## Application Lifecycle Management 28 | Rolling Updates & Rollbacks 29 | COMMANDS & ARGUMENTS 30 | ENVIRONMENT VARIABLES 31 | 32 | ConfigMaps 33 | Secrets 34 | SecurityContexts 35 | ServiceAccounts 36 | Resource Requirements 37 | 38 | ## Cluster Maintenance 39 | Operating System Upgrades 40 | Cluster Upgrade Process 41 | Kubernetes Releases/Versions 42 | Backup and Restore Methodologies 43 | 44 | ## Security 45 | Kubernetes Security Primitives 46 | Secure Persistent Key Value Store 47 | Authentication 48 | Authorization 49 | Security Contexts 50 | TLS Certificates for Cluster Components 51 | Images Securely 52 | Network Policies 53 | 54 | ## Storage 55 | Volumes 56 | Persistent Volumes 57 | Persistent Volume Claims 58 | 59 | ## Networking 60 | Pre-Requisites – Network, Switching, Routing, Tools 61 | Pre-Requisites – Network Namespaces 62 | Pre-Requisites – DNS, IPAM, Firewalls, LBs 63 | Pre-Requisites – Networking in Docker 64 | Networking Configuration on Cluster Nodes 65 | Service Networking 66 | POD Networking Concepts 67 | Network Loadbalancer 68 | Ingress 69 | Cluster DNS 70 | CNI 71 | 72 | ## Install K8S the Hard Way 73 | 74 | ## Install K8S the kubeadm Way 75 | 76 | ## End to End Tests on a Kubernetes Cluster 77 | 78 | ## Troubleshooting 79 | Application Failure 80 | Worker Node Failure 81 | Control Plane Failure 82 | Networking 83 | 84 | ## Other Topics 85 | JSONPath 86 | 87 | ## Lightning Labs 88 | 89 | ## Mock Exams 90 | 91 | ## Course Conclusion 92 | 93 | -------------------------------------------------------------------------------- /voidking/dev-kubeadm-upgrade.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "使用kubeadm升级K8S集群" 3 | toc: true 4 | date: 2020-03-08 20:00:00 5 | tags: 6 | - docker 7 | - k8s 8 | categories: 9 | - [专业,运维,docker] 10 | --- 11 | 12 | # K8S组件版本说明 13 | k8s集群中的常见组件包括: 14 | A类:kube-apiserver 15 | B类:controller-manager、kube-scheduler 16 | C类:kubelet、kube-proxy 17 | D类:etcd cluster、CoreDNS 18 | E类:kubectl 19 | 20 | 组件的版本号一般表示为 major.minor.patch,比如v1.10.3。其中,A类组件是主要组件,以它为版本基准。比如,A类组件版本的minor号为x,那么B类组件版本必须为x或者x-1,C类组件版本必须为x、x-1或者x-2,E类组件版本必须为x、x-1或者x+1。而D类组件,和A类组件不是同一版本体系,版本兼容情况需要查看文档。整理成表格如下: 21 | 22 | | 组件类别 | minor版本 | 组件 | 23 | | ----- | ----- | ----- | 24 | | A类 | x | kube-apiserver | 25 | | B类 | x、x-1 | controller-manager、kube-scheduler | 26 | | C类 | x、x-1、x-2 | kubelet、kube-proxy | 27 | | E类 | x、x-1、x+1 | kubectl | 28 | | D类 | 查看文档 | etcd cluster、CoreDNS | 29 | 30 | 本文学习使用kubeadm进行k8s集群的升级。 31 | 32 | 33 | 34 | # 升级顺序 35 | 推荐的升级方法,是根据minor版本号逐级进行升级。比如v1.10.0想要升级到v1.13.0,不应该直接升级到v1.13.0,而是应该v1.10.0->v1.11.0->v1.12.0->v1.13.0。 36 | 37 | 升级顺序一般为: 38 | 1、升级kubeadm 39 | 2、升级master node 40 | 3、升级worker node 41 | 4、升级kubelet 42 | 43 | # 升级操作 44 | 以v1.11.0升级v1.12.0为例。 45 | 46 | ## master节点 47 | 1、查看升级帮助 48 | `kubeadm upgrade plan` 49 | 50 | 2、升级kubeadm 51 | ``` 52 | apt-get upgrade -y kubeadm=1.12.0-00 53 | # or 54 | apt install kubeadm=1.12.0-00 55 | ``` 56 | 57 | 3、升级k8s的AB类组件 58 | ``` 59 | kubeadm upgrade apply v1.12.0 60 | ``` 61 | 此时使用kubectl get nodes,看到的version依然是v1.11.0,因为这里显示的是kubelet的版本,而不是kube-apiserver的版本。 62 | 63 | 4、升级master节点的kubelet 64 | ``` 65 | apt install kubelet=1.12.0-00 66 | systemctl restart kubelet 67 | ``` 68 | 69 | ## worker节点 70 | 1、驱逐worker节点的pods,封锁节点 71 | ``` 72 | kubectl drain node-1 73 | kubectl cordon node-1 74 | ``` 75 | 76 | 2、升级kubeadm和kubectl 77 | ``` 78 | apt-get install kubeadm=1.12.0-00 79 | apt-get install kubelet=1.12.0-00 80 | kubeadm upgrade node config --kubelet-version v1.12.0 81 | systemctl restart kubelet 82 | ``` 83 | 84 | 3、解除节点封锁 85 | `kubectl uncordon node-1` -------------------------------------------------------------------------------- /voidking/dev-k8s-etcd-backup-restore.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "K8S集群中etcd备份和恢复" 3 | toc: true 4 | date: 2020-03-08 22:00:00 5 | tags: 6 | - docker 7 | - k8s 8 | categories: 9 | - [专业,运维,docker] 10 | --- 11 | 12 | # 前言 13 | 就像备份数据库一样,很多时候,我们也想对k8s资源配置进行备份。 14 | ``` 15 | kubectl get all --all-namespaces -o yaml > all-deploy-services.yaml 16 | ``` 17 | 上面的方法,可以实现对k8s资源配置的备份。但是更好的办法,是对etcd进行备份。本文就学习一下k8s中etcd的备份和恢复方法。 18 | 19 | 20 | 21 | # etcd集群状态 22 | 23 | ``` 24 | HOST_1=10.240.0.17 25 | HOST_2=10.240.0.18 26 | HOST_3=10.240.0.19 27 | ENDPOINTS=$HOST_1:2379,$HOST_2:2379,$HOST_3:2379 28 | etcdctl --endpoints=$ENDPOINTS member list 29 | etcdctl --write-out=table --endpoints=$ENDPOINTS endpoint status 30 | etcdctl --endpoints=$ENDPOINTS endpoint health 31 | ``` 32 | 33 | # 备份 34 | 1、查看配置 35 | ``` 36 | kubectl describe pod etcd-master -n kube-system | grep Command -i -A 20 37 | ``` 38 | 看到Command字段为: 39 | ``` 40 | Command: 41 | etcd 42 | --advertise-client-urls=https://172.17.0.10:2379 43 | --cert-file=/etc/kubernetes/pki/etcd/server.crt 44 | --client-cert-auth=true 45 | --data-dir=/var/lib/etcd 46 | --initial-advertise-peer-urls=https://172.17.0.10:2380 47 | --initial-cluster=master=https://172.17.0.10:2380 48 | --key-file=/etc/kubernetes/pki/etcd/server.key 49 | --listen-client-urls=https://127.0.0.1:2379,https://172.17.0.10:2379 50 | --listen-metrics-urls=http://127.0.0.1:2381 51 | --listen-peer-urls=https://172.17.0.10:2380 52 | --name=master 53 | --peer-cert-file=/etc/kubernetes/pki/etcd/peer.crt 54 | --peer-client-cert-auth=true 55 | --peer-key-file=/etc/kubernetes/pki/etcd/peer.key 56 | --peer-trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt 57 | --snapshot-count=10000 58 | --trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt 59 | ``` 60 | 61 | 或者查看/etc/kubernetes/manifests/etcd.yaml。 62 | 63 | 2、执行备份 64 | ``` 65 | ETCDCTL_API=3 etcdctl \ 66 | --endpoints=https://[127.0.0.1]:2379 \ 67 | --cacert=/etc/kubernetes/pki/etcd/ca.crt \ 68 | --cert=/etc/kubernetes/pki/etcd/server.crt \ 69 | --key=/etc/kubernetes/pki/etcd/server.key \ 70 | snapshot save /tmp/snapshot-pre-boot.db 71 | ``` 72 | 73 | 3、查看备份 74 | ``` 75 | ETCDCTL_API=3 etcdctl \ 76 | --endpoints=https://[127.0.0.1]:2379 \ 77 | --cacert=/etc/kubernetes/pki/etcd/ca.crt \ 78 | --cert=/etc/kubernetes/pki/etcd/server.crt \ 79 | --key=/etc/kubernetes/pki/etcd/server.key \ 80 | snapshot status /tmp/snapshot-pre-boot.db -w table 81 | ``` 82 | 83 | # 恢复 84 | 1、恢复etcd数据 85 | ``` 86 | ETCDCTL_API=3 etcdctl \ 87 | --endpoints=https://[127.0.0.1]:2379 \ 88 | --cacert=/etc/kubernetes/pki/etcd/ca.crt \ 89 | --cert=/etc/kubernetes/pki/etcd/server.crt \ 90 | --key=/etc/kubernetes/pki/etcd/server.key \ 91 | --initial-cluster=master=https://127.0.0.1:2380 \ 92 | --initial-cluster-token etcd-cluster-1 \ 93 | --initial-advertise-peer-urls=https://127.0.0.1:2380 \ 94 | --name=master \ 95 | --data-dir /var/lib/etcd-from-backup \ 96 | snapshot restore /tmp/snapshot-pre-boot.db 97 | ``` 98 | 99 | 2、修改etcd.yaml 100 | `vim /etc/kubernetes/manifests/etcd.yaml`,如下修改: 101 | ``` 102 | # Update --data-dir to use new target location 103 | --data-dir=/var/lib/etcd-from-backup 104 | 105 | # Update new initial-cluster-token to specify new cluster 106 | --initial-cluster-token=etcd-cluster-1 107 | 108 | # Update volumes and volume mounts to point to new path 109 | volumeMounts: 110 | - mountPath: /var/lib/etcd-from-backup 111 | name: etcd-data 112 | - mountPath: /etc/kubernetes/pki/etcd 113 | name: etcd-certs 114 | hostNetwork: true 115 | priorityClassName: system-cluster-critical 116 | volumes: 117 | - hostPath: 118 | path: /var/lib/etcd-from-backup 119 | type: DirectoryOrCreate 120 | name: etcd-data 121 | - hostPath: 122 | path: /etc/kubernetes/pki/etcd 123 | type: DirectoryOrCreate 124 | name: etcd-certs 125 | ``` -------------------------------------------------------------------------------- /mock/03-CKA-Exam-Prep.txt: -------------------------------------------------------------------------------- 1 | My Exam Prep: 2 | 3 | 4 | * CNCF Kubernetes Class + labs 5 | * K8S The Hard Way run through 6 | * Run through all the tasks in the k8s docs 7 | * Practice with systemd, journald, openssl, cfssl, and etcd 8 | * Work through the sections in Walid’s github list 9 | 10 | 11 | Try the following exercises interactively: 12 | 13 | 14 | Note - there are no answers here on purpose. You should be able to do these yourself using the minimal docs that you are allowed to use during the test. At a minimum this should train you on where to look for this info during the test, without notes. 15 | 16 | 17 | 1. Create a node that has a SSD and label it as such. 18 | 1. Create a pod that is only scheduled on SSD nodes. 19 | 2. Create 2 pod definitions: the second pod should be scheduled to run anywhere the first pod is running - 2nd pod runs alongside the first pod. 20 | 3. Create a deployment running nginx version 1.12.2 that will run in 2 pods 21 | a. Scale this to 4 pods. 22 | b. Scale it back to 2 pods. 23 | c. Upgrade this to 1.13.8 24 | d. Check the status of the upgrade 25 | e. How do you do this in a way that you can see history of what happened? 26 | f. Undo the upgrade 27 | 4. Create a service that uses a scratch disk. 28 | a. Change the service to mount a disk from the host. 29 | b. Change the service to mount a persistent volume. 30 | 5. Create a pod that has a liveness check 31 | 6. Create a service that manually requires endpoint creation - and create that too 32 | 7. Create a daemon set 33 | a. Change the update strategy to do a rolling update but delaying 30 seconds between pod updates 34 | 8. Create a static pod 35 | 9. Create a busybox container without a manifest. Then edit the manifest. 36 | 10. Create a pod that uses secrets 37 | a. Pull secrets from environment variables 38 | b. Pull secrets from a volume 39 | c. Dump the secrets out via kubectl to show it worked 40 | 11. Create a job that runs every 3 minutes and prints out the current time. 41 | 12. Create a job that runs 20 times, 5 containers at a time, and prints "Hello parallel world" 42 | 13. Create a service that uses an external load balancer and points to a 3 pod cluster running nginx. 43 | 14. Create a horizontal autoscaling group that starts with 2 pods and scales when CPU usage is over 50%. 44 | 15. Create a custom resource definition 45 | a. Display it in the API with curl 46 | 16. Create a networking policy such that only pods with the label access=granted can talk to it. 47 | a. Create an nginx pod and attach this policy to it. 48 | b. Create a busybox pod and attempt to talk to nginx - should be blocked 49 | c. Attach the label to busybox and try again - should be allowed 50 | 17. Create a service that references an externalname. 51 | a. Test that this works from another pod 52 | 18. Create a pod that runs all processes as user 1000. 53 | 19. Create a namespace 54 | a. Run a pod in the new namespace 55 | b. Put memory limits on the namespace 56 | c. Limit pods to 2 persistent volumes in this namespace 57 | 20. Write an ingress rule that redirects calls to /foo to one service and to /bar to another 58 | 21. Write a service that exposes nginx on a nodeport 59 | a. Change it to use a cluster port 60 | b. Scale the service 61 | c. Change it to use an external IP 62 | d. Change it to use a load balancer 63 | 22. Deploy nginx with 3 replicas and then expose a port 64 | a. Use port forwarding to talk to a specific port 65 | 23. Make an API call using CURL and proper certs 66 | 24. Upgrade a cluster with kubeadm 67 | 25. Get logs for a pod 68 | 26. Deploy a pod with the wrong image name (like --image=nginy) and find the error message. 69 | 27. Get logs for kubectl 70 | 28. Get logs for the scheduler 71 | 29. Restart kubelet 72 | 73 | Non-K8S 74 | 30. Convert a CRT to a PEM 75 | a. Convert it back 76 | 31. Backup an etcd cluster 77 | 32. List the members of an etcd cluster 78 | 33. Find the health of etcd -------------------------------------------------------------------------------- /voidking/dev-curl-k8s-api-server.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "使用curl访问k8s的apiserver" 3 | toc: true 4 | date: 2020-04-15 20:00:00 5 | tags: 6 | - docker 7 | - k8s 8 | categories: 9 | - [专业,运维,docker] 10 | - [专业,运维,k8s] 11 | --- 12 | 13 | # k8s管理工具 14 | 普通人管理k8s集群,最常用的工具是kubectl。开发界大佬管理k8s集群,go-client也是一件顺手的工具。 15 | 而除了kubectl和go-client,其实还可以使用curl命令。 16 | 本文,我们就学习一下怎样使用curl访问k8s的apiserver,实现k8s集群的管理。主要参考[如何使用curl访问k8s的apiserver](https://www.codercto.com/a/89468.html)。 17 | 18 | 19 | 20 | # 查看pod 21 | 需求:使用curl命令,实现 kubectl get pod 同样的效果。 22 | 23 | ## 获取token 24 | 想要使用curl命令访问apiserver,首先要获得一个具有权限的token。 25 | 26 | ``` 27 | kubectl get secrets --all-namespaces | grep admin 28 | kubectl describe secrets admin-token-vmv2c -n kube-system 29 | ``` 30 | 输出结果为: 31 | ``` 32 | Name: admin-token-vmv2c 33 | Namespace: kube-system 34 | Labels: 35 | Annotations: kubernetes.io/service-account.name: admin 36 | kubernetes.io/service-account.uid: a75b4cdc-e120-11e9-8695-00163e300424 37 | 38 | Type: kubernetes.io/service-account-token 39 | 40 | Data 41 | ==== 42 | ca.crt: 1419 bytes 43 | namespace: 11 bytes 44 | token: xxxthisisatokenxxx 45 | ``` 46 | 最后一个字段就是token,那么这个token有哪些权限呢? 47 | 48 | ## 查看token权限 49 | 根据annotations中的key value,可以看到这个secrets绑定了一个service-account(sa),name为admin。等同于这个token绑定了一个sa,name为admin。 50 | 51 | 查看admin这个service-account的信息。 52 | ``` 53 | kubectl get sa --all-namespaces | grep admin 54 | kubectl describe sa admin -n kube-system 55 | ``` 56 | 57 | 输出结果为: 58 | ``` 59 | Name: admin 60 | Namespace: kube-system 61 | Labels: 62 | Annotations: kubectl.kubernetes.io/last-applied-configuration: 63 | {"apiVersion":"v1","kind":"ServiceAccount","metadata":{"annotations":{},"name":"admin","namespace":"kube-system"}} 64 | Image pull secrets: 65 | Mountable secrets: admin-token-vmv2c 66 | Tokens: admin-token-vmv2c 67 | Events: 68 | ``` 69 | 没有关于admin的权限信息,那么我们再看一下admin绑定了哪些role和clusterrole。 70 | 71 | ``` 72 | kubectl get rolebindings --all-namespaces -oyaml | grep "name: admin" -A10 -B10 73 | kubectl get clusterrolebindings --all-namespaces -oyaml | grep "name: admin" -A10 -B10 74 | ``` 75 | 找到有用信息为: 76 | ``` 77 | - apiVersion: rbac.authorization.k8s.io/v1 78 | kind: ClusterRoleBinding 79 | metadata: 80 | annotations: 81 | kubectl.kubernetes.io/last-applied-configuration: | 82 | {"apiVersion":"rbac.authorization.k8s.io/v1beta1","kind":"ClusterRoleBinding","metadata":{"annotations":{},"name":"admin"},"roleRef":{"apiGroup":"rbac.authorization.k8s.io","kind":"ClusterRole","name":"cluster-admin"},"subjects":[{"kind":"ServiceAccount","name":"admin","namespace":"kube-system"}]} 83 | creationTimestamp: "2019-09-27T12:16:37Z" 84 | name: admin 85 | resourceVersion: "1317" 86 | selfLink: /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/admin 87 | uid: a75e1ef9-e120-11e9-8695-00163e300424 88 | roleRef: 89 | apiGroup: rbac.authorization.k8s.io 90 | kind: ClusterRole 91 | name: cluster-admin 92 | subjects: 93 | - kind: ServiceAccount 94 | name: admin 95 | namespace: kube-system 96 | ``` 97 | 98 | 可知admin绑定了一个名为cluster-admin的clusterrole,接着查看cluster-admin的权限。 99 | 100 | ``` 101 | kubectl describe clusterrole cluster-admin -n kube-system 102 | ``` 103 | 104 | 结果为: 105 | ``` 106 | Name: cluster-admin 107 | Labels: kubernetes.io/bootstrapping=rbac-defaults 108 | Annotations: rbac.authorization.kubernetes.io/autoupdate: true 109 | PolicyRule: 110 | Resources Non-Resource URLs Resource Names Verbs 111 | --------- ----------------- -------------- ----- 112 | *.* [] [] [*] 113 | [*] [] [*] 114 | ``` 115 | 116 | cluster-admin这个角色拥有集群的所有权限,因此admin这个sa拥有集群的所有权限。 117 | 118 | ## 使用token 119 | 1、设置token和apiserver作为变量 120 | ``` 121 | TOKEN=$(kubectl describe secrets $(kubectl get secrets -n kube-system |grep admin |cut -f1 -d ' ') -n kube-system |grep -E '^token' |cut -f2 -d':'|tr -d '\t'|tr -d ' ') 122 | APISERVER=$(kubectl config view |grep server|cut -f 2- -d ":" | tr -d " ") 123 | ``` 124 | 125 | 2、使用token调用apiserver 126 | ``` 127 | curl -H "Authorization: Bearer $TOKEN" $APISERVER/ --insecure 128 | curl -H "Authorization: Bearer $TOKEN" $APISERVER/api --insecure 129 | curl -H "Authorization: Bearer $TOKEN" $APISERVER/api/v1/namespaces/default/pods/ --insecure 130 | ``` 131 | 132 | 以上,查看到了default空间下的pod信息,和 kubectl get pod 基本等同。 -------------------------------------------------------------------------------- /voidking/dev-k8s-rbac-auth.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "K8S中的RBAC鉴权" 3 | toc: true 4 | date: 2020-03-20 20:00:00 5 | tags: 6 | - docker 7 | - k8s 8 | categories: 9 | - [专业,运维,docker] 10 | --- 11 | 12 | # RBAC Authorization 13 | > Role-based access control (RBAC) is a method of regulating access to computer or network resources based on the roles of individual users within your organization. 14 | 15 | 更多内容,参考[Using RBAC Authorization](https://kubernetes.io/docs/reference/access-authn-authz/rbac/)。 16 | 17 | [《SSL和TLS》](https://www.voidking.com/dev-ssl-tls/)文中,通过API签名一节,创建了新用户jane,并且给该用户的证书进行签名。现在我们有了jane.crt和jane.key,本文中会配置jane拥有一些k8s集群的管理权限。 18 | 19 | 20 | 21 | # 角色和绑定 22 | 首先,给用户jane配置权限,使其能够创建和查看default空间下的pods。 23 | 24 | ## 命令实现 25 | 1、创建角色 26 | ``` 27 | kubectl create role --help 28 | kubectl create role developer --resource=pods --verb=list,create 29 | ``` 30 | 31 | 2、角色绑定 32 | ``` 33 | kubectl create rolebinding dev-user-binding --role=developer --user=jane 34 | ``` 35 | 36 | 3、验证权限 37 | ``` 38 | kubectl auth can-i list pods --as jane 39 | kubectl get pods --as jane 40 | ``` 41 | 至此,用户jane的权限配置完成。 42 | 43 | ## manifest实现 44 | ``` 45 | --- 46 | kind: Role 47 | apiVersion: rbac.authorization.k8s.io/v1 48 | metadata: 49 | namespace: default 50 | name: developer 51 | rules: 52 | - apiGroups: [""] 53 | resources: ["pods"] 54 | verbs: ["list", "create"] 55 | 56 | --- 57 | kind: RoleBindingapiVersion: rbac.authorization.k8s.io/v1 58 | metadata: 59 | name: dev-user-binding 60 | subjects: 61 | - kind: User 62 | name: jane 63 | apiGroup: rbac.authorization.k8s.io 64 | roleRef: 65 | kind: Role 66 | name: developer 67 | apiGroup: rbac.authorization.k8s.iomaster 68 | ``` 69 | 70 | # 集群角色和绑定 71 | 以上,给jane授权,是在namespace范围内的。当我们想给jane授权cluster范围的权限时,就需要clusterroles。 72 | 73 | 比如,我们想给jane授权node相关的权限,可以如下实现。 74 | 75 | ## 命令实现 76 | 1、创建集群角色 77 | ``` 78 | kubectl create clusterrole node-reader --verb=get,list,watch --resource=nodes 79 | ``` 80 | 81 | 2、绑定集群角色 82 | ``` 83 | kubectl create clusterrolebinding node-reader-binding --user=jane --clusterrole=node-reader 84 | ``` 85 | 86 | ## manifest实现 87 | ``` 88 | --- 89 | apiVersion: rbac.authorization.k8s.io/v1 90 | kind: ClusterRole 91 | metadata: 92 | creationTimestamp: null 93 | name: node-reader 94 | rules: 95 | - apiGroups: 96 | - "" 97 | resources: 98 | - nodes 99 | verbs: 100 | - get 101 | - list 102 | - watch 103 | 104 | --- 105 | apiVersion: rbac.authorization.k8s.io/v1beta1 106 | kind: ClusterRoleBinding 107 | metadata: 108 | creationTimestamp: null 109 | name: node-reader-binding 110 | roleRef: 111 | apiGroup: rbac.authorization.k8s.io 112 | kind: ClusterRole 113 | name: node-reader 114 | subjects: 115 | - apiGroup: rbac.authorization.k8s.io 116 | kind: User 117 | name: jane 118 | ``` 119 | 120 | ## 其他 121 | 如果使用clusterrole指定的资源是pods这种namespace级别的资源,该集群角色绑定给jane后会有什么效果? 122 | 答:jane对所有namespace中的pods资源拥有clusterrole中定义的操作权限。 123 | 124 | # 配置文件 125 | 用户jane已经拥有了需要的权限,该怎样访问k8s集群呢?答案是通过kubeconfig文件。 126 | 127 | ## kubeconfig 128 | 1、查看配置 129 | `kubectl config view` 130 | 记录当前的server地址。 131 | 132 | 2、保存ca.crt 133 | ``` 134 | cat .kube/config | grep certificate-authority-data | awk '{print $2}' | base64 --decode > ca.crt 135 | ``` 136 | 137 | ## jane.kubeconfig 138 | 1、设置集群参数 139 | ``` 140 | kubectl config set-cluster kubernetes \ 141 | --server="https://172.17.0.69:6443" \ 142 | --certificate-authority=/root/ca.crt \ 143 | --embed-certs=true \ 144 | --kubeconfig=jane.kubeconfig 145 | ``` 146 | 当前目录生成jane.kubeconfig文件。 147 | 148 | 2、设置客户端认证参数 149 | ``` 150 | kubectl config set-credentials jane \ 151 | --client-certificate=/root/jane.crt \ 152 | --client-key=/root/jane.key \ 153 | --embed-certs=true \ 154 | --kubeconfig=jane.kubeconfig 155 | ``` 156 | 157 | 3、设置上下文参数 158 | ``` 159 | kubectl config set-context jane@kubernetes \ 160 | --cluster=kubernetes \ 161 | --user=jane \ 162 | --namespace=default \ 163 | --kubeconfig=jane.kubeconfig 164 | ``` 165 | 166 | 4、查看配置 167 | ``` 168 | cat jane.kubeconfig 169 | kubectl config view --kubeconfig jane.kubeconfig 170 | ``` 171 | 172 | 5、设置默认上下文 173 | ``` 174 | export KUBECONFIG=/root/jane.kubeconfig 175 | kubectl config use-context jane@kubernetes --kubeconfig=jane.kubeconfig 176 | ``` 177 | 178 | 6、权限测试 179 | `kubectl get pods` 180 | 如果没有配置权限,会输出: 181 | ``` 182 | Error from server (Forbidden): pods is forbidden: User "jane" cannot list resource "pods" in API group "" in the namespace "default" 183 | ``` 184 | 如果配置好了权限,会输出pod相关信息。 185 | 186 | 但是,以上权限测试只是在minikube或者[katacoda](https://www.katacoda.com/courses/kubernetes/playground)平台生效。 187 | 如果使用[kodekloud](https://kodekloud.com/courses/enrolled/675080)或者阿里云k8s集群,会报错: 188 | ``` 189 | error: You must be logged in to the server (Unauthorized) 190 | ``` 191 | 研究了四个多小时,才发现是平台的问题,服气了。。。 -------------------------------------------------------------------------------- /voidking/dev-kubeadm-install-k8s.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "使用kubeadm安装部署K8S集群" 3 | toc: true 4 | date: 2020-03-16 20:00:00 5 | tags: 6 | - docker 7 | - k8s 8 | categories: 9 | - [专业,运维,docker] 10 | --- 11 | 12 | # kubeadm简介 13 | [《使用kubeadm升级K8S集群》](https://www.voidking.com/dev-kubeadm-upgrade/)一文中,了解了k8s集群中常见组件,并且使用kubeadm对k8s集群进行了升级。本文中,会学习使用kubeadm安装部署k8s集群。 14 | 15 | > Kubeadm is a tool built to provide kubeadm init and kubeadm join as best-practice “fast paths” for creating Kubernetes clusters. 16 | 17 | 更多内容,参考[Overview of kubeadm](https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm/)和[Installing kubeadm](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/)。 18 | 19 | 20 | 21 | # 安装流程 22 | 目标:搭建一个k8s集群,包括master和node01两个节点,节点系统为ubuntu16.04.2。 23 | 24 | 1、环境准备。 25 | 26 | 2、在两个节点上安装kubeadm。 27 | 28 | 3、使用kubeadm初始化节点。 29 | 30 | 4、安装网络插件。 31 | 32 | 5、验证安装。 33 | 34 | # 环境准备 35 | 36 | 1、配置主机名 37 | 38 | 2、配置IP地址 39 | 40 | 3、参考[Letting iptables see bridged traffic](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/#letting-iptables-see-bridged-traffic),配置iptables 41 | 42 | ``` 43 | # ensure legacy binaries are installed 44 | sudo apt-get install -y iptables arptables ebtables 45 | 46 | # switch to legacy versions 47 | sudo update-alternatives --set iptables /usr/sbin/iptables-legacy 48 | sudo update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy 49 | sudo update-alternatives --set arptables /usr/sbin/arptables-legacy 50 | sudo update-alternatives --set ebtables /usr/sbin/ebtables-legacy 51 | 52 | # Letting iptables see bridged traffic 53 | cat < /etc/sysctl.d/k8s.conf 54 | net.bridge.bridge-nf-call-ip6tables = 1 55 | net.bridge.bridge-nf-call-iptables = 1 56 | EOF 57 | sysctl --system 58 | ``` 59 | 60 | 4、参考[Docker入门](https://www.voidking.com/dev-docker-start/),安装Docker 61 | 62 | # 安装kubeadm 63 | 参考[Installing kubeadm](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/)。 64 | 65 | 1、确认系统版本 66 | `cat /etc/os-release` 67 | 68 | 2、执行安装kubeadm、kubelet和kubectl(两个节点都要执行) 69 | ``` 70 | sudo apt-get update && sudo apt-get install -y apt-transport-https curl 71 | curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - 72 | cat < testout.txt 170 | kubetest --test --provider=skeleton --test_args="ginkgo.focus=Secrets" > testout.txt 171 | cat testout.txt 172 | ``` 173 | 174 | ## Smoke Test 175 | 按照[Smoke Test](https://github.com/mmumshad/kubernetes-the-hard-way/blob/master/docs/15-smoke-test.md)文档操作一遍。 176 | 177 | ## sonobuoy 178 | 官网地址:[sonobuoy](https://sonobuoy.io/) 179 | 源码地址:[vmware-tanzu/sonobuoy](https://github.com/vmware-tanzu/sonobuoy) -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # k8s-tool 2 | k8s tool for cka,including shell scripts and yaml files. 3 | 4 | # generate yaml 5 | ``` 6 | # pod 7 | kubectl run vk-pod --image=nginx --generator=run-pod/v1 --dry-run -o yaml 8 | 9 | # deployment 10 | kubectl create deployment vk-deploy --image=nginx --dry-run -o yaml 11 | 12 | # service 13 | kubectl create service clusterip vk-svc --tcp="5678:8080" --dry-run -o yaml 14 | 15 | # configmap 16 | kubectl create configmap special-config --from-literal=special.how=very --from-literal=special.type=charm 17 | 18 | # secret 19 | kubectl create secret generic db-user-pass --from-literal=username=voidking --from-literal=password='vkpassword' 20 | ``` 21 | 22 | # expose service 23 | ``` 24 | kubectl expose deployment deployment-name --port=6789 --target-port=80 25 | ``` 26 | 27 | # 创建用户并授权 28 | 1、生成证书 29 | ``` 30 | openssl genrsa -out jane.key 2048 31 | openssl req -new -key jane.key -subj "/CN=jane" -out jane.csr 32 | cat jane.csr | base64 33 | ``` 34 | 35 | 2、签名 36 | ``` 37 | # edit jane-csr.yaml 38 | kubectl apply -f jane-csr.yaml 39 | kubectl get csr 40 | kubectl certificate approve jane 41 | kubectl get csr jane -o yaml 42 | ``` 43 | 44 | 3、创建角色,绑定角色 45 | ``` 46 | kubectl create role --help 47 | kubectl create role developer --resource=pods --verb=list,create 48 | kubectl create rolebinding dev-user-binding --role=developer --user=jane 49 | 50 | # or edit developer-role.yaml 51 | kubectl apply -f developer-role.yaml 52 | ``` 53 | 54 | 4、权限验证 55 | ``` 56 | kubectl auth can-i list pods --as jane 57 | kubectl get pods --as jane 58 | ``` 59 | 60 | 5、生成kubeconfig 61 | ``` 62 | kubectl config view 63 | cat .kube/config | grep certificate-authority-data | awk '{print $2}' | base64 --decode > ca.crt 64 | 65 | kubectl config set-cluster kubernetes \ 66 | --server="https://172.17.0.69:6443" \ 67 | --certificate-authority=/root/ca.crt \ 68 | --embed-certs=true \ 69 | --kubeconfig=jane.kubeconfig 70 | 71 | kubectl config set-credentials jane \ 72 | --client-certificate=/root/jane.crt \ 73 | --client-key=/root/jane.key \ 74 | --embed-certs=true \ 75 | --kubeconfig=jane.kubeconfig 76 | 77 | kubectl config set-context jane@kubernetes \ 78 | --cluster=kubernetes \ 79 | --user=jane \ 80 | --namespace=default \ 81 | --kubeconfig=jane.kubeconfig 82 | ``` 83 | 84 | 6、使用新的kubeconfig 85 | ``` 86 | cat jane.kubeconfig 87 | kubectl config view --kubeconfig jane.kubeconfig 88 | export KUBECONFIG=/root/jane.kubeconfig 89 | kubectl config use-context jane@kubernetes --kubeconfig=jane.kubeconfig 90 | kubectl get pods 91 | ``` 92 | 93 | # secret for registry 94 | 1、创建registry secret 95 | ``` 96 | kubectl create secret docker-registry private-reg-cred --docker-username=dock_user --docker-password=dock_password --docker-server=myprivateregistry.com:5000 --docker-email=dock_user@myprivateregistry.com 97 | ``` 98 | 99 | 2、在pod yaml中使用secret 100 | ``` 101 | apiVersion: v1 102 | kind: Pod 103 | metadata: 104 | name: private-reg 105 | spec: 106 | containers: 107 | - name: private-reg-container 108 | image: 109 | imagePullSecrets: 110 | - name: private-reg-cred 111 | ``` 112 | 113 | # install weave 114 | ``` 115 | kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')" 116 | ``` 117 | 118 | # upgrade 119 | ## master 120 | ``` 121 | kubeadm upgrade plan 122 | apt install kubeadm=1.12.0-00 123 | kubeadm upgrade apply v1.12.0 124 | apt install kubelet=1.12.0-00 125 | systemctl restart kubelet 126 | ``` 127 | 128 | ## node01 129 | ``` 130 | kubectl drain node01 131 | kubectl cordon node01 132 | 133 | apt-get install kubeadm=1.12.0-00 134 | apt-get install kubelet=1.12.0-00 135 | kubeadm upgrade node config --kubelet-version v1.12.0 136 | systemctl restart kubelet 137 | 138 | kubectl uncordon node01 139 | ``` 140 | 141 | # etcd 142 | ## backup 143 | ``` 144 | kubectl describe pod etcd-master -n kube-system 145 | 146 | ETCDCTL_API=3 etcdctl \ 147 | --endpoints=https://[127.0.0.1]:2379 \ 148 | --cacert=/etc/kubernetes/pki/etcd/ca.crt \ 149 | --cert=/etc/kubernetes/pki/etcd/server.crt \ 150 | --key=/etc/kubernetes/pki/etcd/server.key \ 151 | snapshot save /tmp/snapshot-pre-boot.db 152 | ``` 153 | 154 | ## restore 155 | 1、execute command 156 | ``` 157 | ETCDCTL_API=3 etcdctl \ 158 | --endpoints=https://[127.0.0.1]:2379 \ 159 | --cacert=/etc/kubernetes/pki/etcd/ca.crt \ 160 | --cert=/etc/kubernetes/pki/etcd/server.crt \ 161 | --key=/etc/kubernetes/pki/etcd/server.key \ 162 | --initial-cluster=master=https://127.0.0.1:2380 \ 163 | --initial-cluster-token etcd-cluster-1 \ 164 | --initial-advertise-peer-urls=https://127.0.0.1:2380 \ 165 | --name=master \ 166 | --data-dir /var/lib/etcd-from-backup \ 167 | snapshot restore /tmp/snapshot-pre-boot.db 168 | ``` 169 | 170 | 2、vim /etc/kubernetes/manifests/etcd.yaml 171 | ``` 172 | # Update --data-dir to use new target location 173 | --data-dir=/var/lib/etcd-from-backup 174 | 175 | # Update new initial-cluster-token to specify new cluster 176 | --initial-cluster-token=etcd-cluster-1 177 | 178 | # Update volumes and volume mounts to point to new path 179 | volumeMounts: 180 | - mountPath: /var/lib/etcd-from-backup 181 | name: etcd-data 182 | - mountPath: /etc/kubernetes/pki/etcd 183 | name: etcd-certs 184 | hostNetwork: true 185 | priorityClassName: system-cluster-critical 186 | volumes: 187 | - hostPath: 188 | path: /var/lib/etcd-from-backup 189 | type: DirectoryOrCreate 190 | name: etcd-data 191 | - hostPath: 192 | path: /etc/kubernetes/pki/etcd 193 | type: DirectoryOrCreate 194 | name: etcd-certs 195 | ``` 196 | 197 | ## status 198 | ``` 199 | HOST_1=10.240.0.17 200 | HOST_2=10.240.0.18 201 | HOST_3=10.240.0.19 202 | ENDPOINTS=$HOST_1:2379,$HOST_2:2379,$HOST_3:2379 203 | etcdctl --endpoints=$ENDPOINTS member list 204 | etcdctl --write-out=table --endpoints=$ENDPOINTS endpoint status 205 | etcdctl --endpoints=$ENDPOINTS endpoint health 206 | ``` 207 | 208 | # curl apiserver 209 | ``` 210 | curl -H "Authorization: Bearer $TOKEN" $APISERVER/api/v1/namespaces/default/pods/ --insecure 211 | ``` 212 | 213 | # cert transport 214 | 215 | ``` 216 | # 查看pem证书 217 | openssl x509 -in cert.pem -text -noout 218 | 219 | # 查看der证书 220 | openssl x509 -in cert.der -inform der -text -noout 221 | 222 | # pem to der 223 | openssl x509 -in cert.crt -outform der -out cert.der 224 | 225 | # der to pem 226 | openssl x509 -in cert.crt -inform der -outform pem -out cert.pem 227 | ``` 228 | 229 | # install k8s 230 | https://github.com/kelseyhightower/kubernetes-the-hard-way 231 | 232 | https://github.com/mmumshad/kubernetes-the-hard-way -------------------------------------------------------------------------------- /voidking/dev-ssl-tls.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "SSL和TLS" 3 | toc: true 4 | date: 2020-03-09 20:00:00 5 | tags: 6 | - tls 7 | - docker 8 | - k8s 9 | categories: 10 | - [专业,运维,k8s] 11 | --- 12 | 13 | # SSL和TLS简介 14 | [《Hexo启用https加密连接》](https://www.voidking.com/dev-hexo-https/)和[《CentOS7安装配置GitLab》](https://www.voidking.com/dev-centos7-install-gitlab/)中都涉及到了SSL/TLS,SSL和TLS是啥? 15 | 16 | > 传输层安全性协议(英语:Transport Layer Security,缩写:TLS)及其前身安全套接层(英语:Secure Sockets Layer,缩写:SSL)是一种安全协议,目的是为互联网通信提供安全及数据完整性保障。 17 | SSL包含记录层(Record Layer)和传输层,记录层协议确定传输层数据的封装格式。传输层安全协议使用X.509认证,之后利用非对称加密演算来对通信方做身份认证,之后交换对称密钥作为会谈密钥(Session key)。这个会谈密钥是用来将通信两方交换的数据做加密,保证两个应用间通信的保密性和可靠性,使客户与服务器应用之间的通信不被攻击者窃听。 18 | 19 | 更多内容参考[维基百科-传输层安全性协议](https://zh.wikipedia.org/wiki/%E5%82%B3%E8%BC%B8%E5%B1%A4%E5%AE%89%E5%85%A8%E6%80%A7%E5%8D%94%E5%AE%9A) 20 | 21 | 22 | 23 | 24 | # 相关概念 25 | ## PKI 26 | > 公开密钥基础建设(英语:Public Key Infrastructure,缩写:PKI),又称公开密钥基础架构、公钥基础建设、公钥基础设施、公开密码匙基础建设或公钥基础架构,是一组由硬件、软件、参与者、管理政策与流程组成的基础架构,其目的在于创造、管理、分配、使用、存储以及撤销数字证书。 27 | 密码学上,公开密钥基础建设借着数字证书认证机构(CA)将用户的个人身份跟公开密钥链接在一起。对每个证书中心用户的身份必须是唯一的。链接关系通过注册和发布过程创建,取决于担保级别,链接关系可能由CA的各种软件或在人为监督下完成。PKI的确定链接关系的这一角色称为注册管理中心(Registration Authority,RA)。RA确保公开密钥和个人身份链接,可以防欺诈。在微软的公开密钥基础建设之下,注册管理中心(RA)又被叫做从属数字证书认证机构(Subordinate CA)。 28 | 29 | 更多内容参考[维基百科-公开密钥基础架构](https://zh.wikipedia.org/wiki/%E5%85%AC%E9%96%8B%E9%87%91%E9%91%B0%E5%9F%BA%E7%A4%8E%E5%BB%BA%E8%A8%AD) 30 | 31 | ## CA 32 | > 数字证书认证机构(英语:Certificate Authority,缩写为CA),也称为电子商务认证中心、电子商务认证授权机构,是负责发放和管理数字证书的权威机构,并作为电子商务交易中受信任的第三方,承担公钥体系中公钥的合法性检验的责任。 33 | 34 | 更多内容参考[维基百科-证书颁发机构](https://zh.wikipedia.org/wiki/%E8%AF%81%E4%B9%A6%E9%A2%81%E5%8F%91%E6%9C%BA%E6%9E%84) 35 | 36 | ## 数字证书 37 | > 公钥证书(英语:Public key certificate),又称数字证书(digital certificate)或身份证书(identity certificate)。是用于公开密钥基础建设的电子文件,用来证明公开密钥拥有者的身份。此文件包含了公钥信息、拥有者身份信息(主体)、以及数字证书认证机构(发行者)对这份文件的数字签名,以保证这个文件的整体内容正确无误。拥有者凭着此文件,可向电脑系统或其他用户表明身份,从而对方获得信任并授权访问或使用某些敏感的电脑服务。电脑系统或其他用户可以透过一定的程序核实证书上的内容,包括证书有否过期、数字签名是否有效,如果你信任签发的机构,就可以信任证书上的密钥,凭公钥加密与拥有者进行可靠的通信。 38 | 39 | > 公钥证书包括自签证书、根证书、中介证书、授权证书、终端实体证书(TLS服务器证书和TLS客户端证书)。 40 | 41 | 更多内容参考[维基百科-公钥证书](https://zh.wikipedia.org/wiki/%E5%85%AC%E9%96%8B%E9%87%91%E9%91%B0%E8%AA%8D%E8%AD%89) 42 | 43 | # 加密原理 44 | TLS/SSL 的功能实现主要依赖于三类基本算法:散列函数 Hash、对称加密和非对称加密。其利用非对称加密实现身份认证和密钥协商,对称加密算法采用协商的密钥对数据加密,基于散列函数验证信息的完整性。 45 | ![](http://cdn.voidking.com/@/imgs/ssl-tls/tls.jpg?imageView2/0/w/600) 46 | 47 | TLS 的基本工作方式是,客户端使用非对称加密与服务器进行通信,实现身份验证并协商对称加密使用的密钥,然后对称加密算法采用协商密钥对信息以及信息摘要进行加密通信,不同的节点之间采用的对称密钥不同,从而可以保证信息只能通信双方获取。 48 | 例如,在 HTTPS 协议中,客户端发出请求,服务端会将公钥发给客户端,客户端验证过后生成一个密钥再用公钥加密后发送给服务端(非对称加密),双方会在 TLS 握手过程中生成一个协商密钥(对称密钥),成功后建立加密连接。通信过程中客户端将请求数据用协商密钥加密后发送,服务端也用协商密钥解密,响应也用相同的协商密钥。后续的通信使用对称加密是因为对称加解密快,而握手过程中非对称加密可以保证加密的有效性,但是过程复杂,计算量相对来说也大。 49 | 50 | 更多内容参考[SSL/TLS 详解](https://juejin.im/post/5b88a93df265da43231f1451) 51 | 52 | # 自建CA并签发证书 53 | [《CentOS7安装配置GitLab》](https://www.voidking.com/dev-centos7-install-gitlab/)一文中,添加SSL一节详细描述了自建CA并签发SSL证书的过程。 54 | 更多内容,可以参考[基于OpenSSL自建CA和颁发SSL证书](https://www.yuanjies.com/?p=539)和[使用 OpenSSL 自建 CA 并签发证书](https://zhuanlan.zhihu.com/p/34788439)。 55 | 56 | # k8s中证书管理 57 | ## 查看证书 58 | 1、查看证书位置 59 | ``` 60 | ps aux | grep kubelet 61 | # find config file 62 | cat /var/lib/kubelet/config.yaml | grep staticPodPath 63 | cd /etc/kubernetes/manifests 64 | cat kube-apiserver.yaml 65 | ``` 66 | 67 | 2、查看证书详情 68 | ``` 69 | openssl x509 -in /etc/kubernetes/pki/apiserver.crt -text 70 | ``` 71 | 72 | ## 签名 73 | 签名,或者签名过期后重新签名 74 | ``` 75 | openssl x509 -req -in /etc/kubernetes/pki/apiserver-etcd-client.csr -CA /etc/kubernetes/pki/etcd/ca.crt -CAkey /etc/kubernetes/pki/etcd/ca.key -CAcreateserial -out /etc/kubernetes/pki/apiserver-etcd-client.crt 76 | ``` 77 | 78 | ## 通过API签名 79 | 1、为新用户创建证书 80 | ``` 81 | openssl genrsa -out jane.key 2048 82 | openssl req -new -key jane.key -subj "/CN=jane" -out jane.csr 83 | cat jane.csr | base64 | tr -d '\n' 84 | ``` 85 | 86 | 2、创建jane-csr.yaml文件 87 | ``` 88 | apiVersion: certificates.k8s.io/v1beta1 89 | kind: CertificateSigningRequest 90 | metadata: 91 | name: jane 92 | spec: 93 | groups: 94 | - system:authenticated 95 | usages: 96 | - digital signature 97 | - key encipherment 98 | - server auth 99 | - client auth 100 | request: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0KTUlJQ1ZEQ0NBVHdDQVFBd0R6RU5NQXNHQTFVRUF3d0VhbUZ1WlRDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRApnZ0VQQURDQ0FRb0NnZ0VCQUtxbWFIa3BJeE94dDN2UmxJT1FnSUFxSUFsekhQcTRRVTBDTDVhS04xbmY4NXRzCi9LU3o0eml1a1hEQ1NOSVNIT1pWbTY5NzVJa3RXcGFySmhaTXptc1B2eUFSeXFWbWY2L1h0bmwyeE0xblhaUzAKZGc0b0E1dXFuR0w2dHpaQzF3VFY4RVFIZnRlcWYzbUpTN2JtdlppaXFlak12a2UzVkk5RTNFK0xsUUttNnVXRwprS2RDZ2ZHNUszRGJFczR1VzR6M0lMdTdEa1BlamJodWFtYzlxYVZNRVpLSGZ0bnlBYlFITkZVLzhvWVYvR1VzCnRFVWZMRXBBTmlqUFc5U0pPWHJtNUg1NXhOdExXVHMwenU3YlRSZWE0ZjFVaDFCbkZuUkhWYUJqNysydHpITTgKaklJS01KakdWOS9rUVltRmo3UTJZUW1wYzdXWGpPZEFWcHBSc1kwQ0F3RUFBYUFBTUEwR0NTcUdTSWIzRFFFQgpDd1VBQTRJQkFRQUZ2ZUxrUmYxd0xDQmN6cWdMVkJIUGZBa0MzeU1CTDA3VXl0QUlCcVhkR3h1QWtyL3NQT1dkClNxTkhIRkNzQVNmU0lNVC96djBrQS9yN3Fnd25BMCtZREZJSjNzUlBKZkJmNm1Ic3FrbjlPd1htR1E3d0orNFQKWXVCc1lJSllnNWtzVWJoQVhiQkVZekk2OUY0Uk52U0d0K1ZLOHBBdUQzcXRvejJsd3liV0cvaUo4V3FESTZNegpuMURBeDBkRDZmRWhIKy9DTWdSREY5OExCL1ZqMWZOUUlqZ2k3Rmc1aTByU1NtZUdUMllOblJldERZYWN4aWlzCjNFN1B4STdYWDd2QjRjY3pITlUrTG92N3JnSkVXM3lRMXZRTXRCNTZlbWJaNGVnL01XZEhkeWliVXo2aDQ1ZW8KUGN5b3QxaW1wdFRyK3kwSkt0SmJ1YllQOGd2RG5FeFYKLS0tLS1FTkQgQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tCg== 101 | ``` 102 | 103 | 3、签名请求并通过 104 | ``` 105 | kubectl apply -f jane-csr.yaml 106 | kubectl get csr 107 | kubectl certificate approve jane 108 | kubectl get csr jane -o yaml 109 | kubectl get csr jane -o jsonpath='{.status.certificate}' | base64 --decode > jane.crt 110 | ``` 111 | 112 | 或者,直接使用openssl命令进行签名: 113 | ``` 114 | openssl x509 -req -in /root/jane.csr -CA /etc/kubernetes/pki/etcd/ca.crt -CAkey /etc/kubernetes/pki/etcd/ca.key -CAcreateserial -out /root/jane.crt 115 | ``` 116 | 117 | PS:查看签名用的CA 118 | ``` 119 | cat /etc/kubernetes/manifests/kube-controller-manager.yaml | grep ca.crt 120 | cat /etc/kubernetes/manifests/kube-controller-manager.yaml | grep ca.key 121 | ``` 122 | 123 | 更多内容,参考[Manage TLS Certificates in a Cluster](https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/)。 124 | 125 | # 证书格式转换 126 | X.509是一种证书标准,定义了证书中应该包含哪些内容,详情参考RFC5280,SSL使用的就是这种证书标准。 127 | 同样的X.509证书,可能有不同的编码格式,目前有以下两种编码格式。 128 | PEM:Privacy Enhanced Mail,BASE64编码,以"-----BEGIN-----"开头,"-----END-----"结尾。 129 | 查看PEM格式证书的信息: 130 | `openssl x509 -in cert.pem -text -noout` 131 | 132 | DER:Distinguished Encoding Rules,二进制格式,不可读。 133 | 查看DER格式证书的信息: 134 | `openssl x509 -in cert.der -inform der -text -noout` 135 | 136 | 问题来了,k8s中的证书,除了使用pem格式,还有就是crt格式,并没有der格式啊?这是因为,crt只是一个文件后缀,编码格式可能是pem也可能是der。 137 | 138 | 那么,pem和der怎样互相转换呢? 139 | ``` 140 | # pem to der 141 | openssl x509 -in cert.crt -outform der -out cert.der 142 | # der to pem 143 | openssl x509 -in cert.crt -inform der -outform pem -out cert.pem 144 | ``` 145 | 146 | # 书签 147 | [OpenSSL 与 SSL 数字证书概念贴](https://segmentfault.com/a/1190000002568019) 148 | [SSL/TLS 原理详解](https://cloud.tencent.com/developer/article/1114555) 149 | -------------------------------------------------------------------------------- /networking/ingress-controller-mandatory.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: ingress-nginx 5 | labels: 6 | app.kubernetes.io/name: ingress-nginx 7 | app.kubernetes.io/part-of: ingress-nginx 8 | 9 | --- 10 | 11 | kind: ConfigMap 12 | apiVersion: v1 13 | metadata: 14 | name: nginx-configuration 15 | namespace: ingress-nginx 16 | labels: 17 | app.kubernetes.io/name: ingress-nginx 18 | app.kubernetes.io/part-of: ingress-nginx 19 | 20 | --- 21 | kind: ConfigMap 22 | apiVersion: v1 23 | metadata: 24 | name: tcp-services 25 | namespace: ingress-nginx 26 | labels: 27 | app.kubernetes.io/name: ingress-nginx 28 | app.kubernetes.io/part-of: ingress-nginx 29 | 30 | --- 31 | kind: ConfigMap 32 | apiVersion: v1 33 | metadata: 34 | name: udp-services 35 | namespace: ingress-nginx 36 | labels: 37 | app.kubernetes.io/name: ingress-nginx 38 | app.kubernetes.io/part-of: ingress-nginx 39 | 40 | --- 41 | apiVersion: v1 42 | kind: ServiceAccount 43 | metadata: 44 | name: nginx-ingress-serviceaccount 45 | namespace: ingress-nginx 46 | labels: 47 | app.kubernetes.io/name: ingress-nginx 48 | app.kubernetes.io/part-of: ingress-nginx 49 | 50 | --- 51 | apiVersion: rbac.authorization.k8s.io/v1beta1 52 | kind: ClusterRole 53 | metadata: 54 | name: nginx-ingress-clusterrole 55 | labels: 56 | app.kubernetes.io/name: ingress-nginx 57 | app.kubernetes.io/part-of: ingress-nginx 58 | rules: 59 | - apiGroups: 60 | - "" 61 | resources: 62 | - configmaps 63 | - endpoints 64 | - nodes 65 | - pods 66 | - secrets 67 | verbs: 68 | - list 69 | - watch 70 | - apiGroups: 71 | - "" 72 | resources: 73 | - nodes 74 | verbs: 75 | - get 76 | - apiGroups: 77 | - "" 78 | resources: 79 | - services 80 | verbs: 81 | - get 82 | - list 83 | - watch 84 | - apiGroups: 85 | - "" 86 | resources: 87 | - events 88 | verbs: 89 | - create 90 | - patch 91 | - apiGroups: 92 | - "extensions" 93 | - "networking.k8s.io" 94 | resources: 95 | - ingresses 96 | verbs: 97 | - get 98 | - list 99 | - watch 100 | - apiGroups: 101 | - "extensions" 102 | - "networking.k8s.io" 103 | resources: 104 | - ingresses/status 105 | verbs: 106 | - update 107 | 108 | --- 109 | apiVersion: rbac.authorization.k8s.io/v1beta1 110 | kind: Role 111 | metadata: 112 | name: nginx-ingress-role 113 | namespace: ingress-nginx 114 | labels: 115 | app.kubernetes.io/name: ingress-nginx 116 | app.kubernetes.io/part-of: ingress-nginx 117 | rules: 118 | - apiGroups: 119 | - "" 120 | resources: 121 | - configmaps 122 | - pods 123 | - secrets 124 | - namespaces 125 | verbs: 126 | - get 127 | - apiGroups: 128 | - "" 129 | resources: 130 | - configmaps 131 | resourceNames: 132 | # Defaults to "-" 133 | # Here: "-" 134 | # This has to be adapted if you change either parameter 135 | # when launching the nginx-ingress-controller. 136 | - "ingress-controller-leader-nginx" 137 | verbs: 138 | - get 139 | - update 140 | - apiGroups: 141 | - "" 142 | resources: 143 | - configmaps 144 | verbs: 145 | - create 146 | - apiGroups: 147 | - "" 148 | resources: 149 | - endpoints 150 | verbs: 151 | - get 152 | 153 | --- 154 | apiVersion: rbac.authorization.k8s.io/v1beta1 155 | kind: RoleBinding 156 | metadata: 157 | name: nginx-ingress-role-nisa-binding 158 | namespace: ingress-nginx 159 | labels: 160 | app.kubernetes.io/name: ingress-nginx 161 | app.kubernetes.io/part-of: ingress-nginx 162 | roleRef: 163 | apiGroup: rbac.authorization.k8s.io 164 | kind: Role 165 | name: nginx-ingress-role 166 | subjects: 167 | - kind: ServiceAccount 168 | name: nginx-ingress-serviceaccount 169 | namespace: ingress-nginx 170 | 171 | --- 172 | apiVersion: rbac.authorization.k8s.io/v1beta1 173 | kind: ClusterRoleBinding 174 | metadata: 175 | name: nginx-ingress-clusterrole-nisa-binding 176 | labels: 177 | app.kubernetes.io/name: ingress-nginx 178 | app.kubernetes.io/part-of: ingress-nginx 179 | roleRef: 180 | apiGroup: rbac.authorization.k8s.io 181 | kind: ClusterRole 182 | name: nginx-ingress-clusterrole 183 | subjects: 184 | - kind: ServiceAccount 185 | name: nginx-ingress-serviceaccount 186 | namespace: ingress-nginx 187 | 188 | --- 189 | 190 | apiVersion: apps/v1 191 | kind: Deployment 192 | metadata: 193 | name: nginx-ingress-controller 194 | namespace: ingress-nginx 195 | labels: 196 | app.kubernetes.io/name: ingress-nginx 197 | app.kubernetes.io/part-of: ingress-nginx 198 | spec: 199 | replicas: 1 200 | selector: 201 | matchLabels: 202 | app.kubernetes.io/name: ingress-nginx 203 | app.kubernetes.io/part-of: ingress-nginx 204 | template: 205 | metadata: 206 | labels: 207 | app.kubernetes.io/name: ingress-nginx 208 | app.kubernetes.io/part-of: ingress-nginx 209 | annotations: 210 | prometheus.io/port: "10254" 211 | prometheus.io/scrape: "true" 212 | spec: 213 | # wait up to five minutes for the drain of connections 214 | terminationGracePeriodSeconds: 300 215 | serviceAccountName: nginx-ingress-serviceaccount 216 | nodeSelector: 217 | kubernetes.io/os: linux 218 | containers: 219 | - name: nginx-ingress-controller 220 | image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.30.0 221 | args: 222 | - /nginx-ingress-controller 223 | - --configmap=$(POD_NAMESPACE)/nginx-configuration 224 | - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services 225 | - --udp-services-configmap=$(POD_NAMESPACE)/udp-services 226 | - --publish-service=$(POD_NAMESPACE)/ingress-nginx 227 | - --annotations-prefix=nginx.ingress.kubernetes.io 228 | securityContext: 229 | allowPrivilegeEscalation: true 230 | capabilities: 231 | drop: 232 | - ALL 233 | add: 234 | - NET_BIND_SERVICE 235 | # www-data -> 101 236 | runAsUser: 101 237 | env: 238 | - name: POD_NAME 239 | valueFrom: 240 | fieldRef: 241 | fieldPath: metadata.name 242 | - name: POD_NAMESPACE 243 | valueFrom: 244 | fieldRef: 245 | fieldPath: metadata.namespace 246 | ports: 247 | - name: http 248 | containerPort: 80 249 | protocol: TCP 250 | - name: https 251 | containerPort: 443 252 | protocol: TCP 253 | livenessProbe: 254 | failureThreshold: 3 255 | httpGet: 256 | path: /healthz 257 | port: 10254 258 | scheme: HTTP 259 | initialDelaySeconds: 10 260 | periodSeconds: 10 261 | successThreshold: 1 262 | timeoutSeconds: 10 263 | readinessProbe: 264 | failureThreshold: 3 265 | httpGet: 266 | path: /healthz 267 | port: 10254 268 | scheme: HTTP 269 | periodSeconds: 10 270 | successThreshold: 1 271 | timeoutSeconds: 10 272 | lifecycle: 273 | preStop: 274 | exec: 275 | command: 276 | - /wait-shutdown 277 | 278 | --- 279 | 280 | apiVersion: v1 281 | kind: LimitRange 282 | metadata: 283 | name: ingress-nginx 284 | namespace: ingress-nginx 285 | labels: 286 | app.kubernetes.io/name: ingress-nginx 287 | app.kubernetes.io/part-of: ingress-nginx 288 | spec: 289 | limits: 290 | - min: 291 | memory: 90Mi 292 | cpu: 100m 293 | type: Container 294 | -------------------------------------------------------------------------------- /voidking/dev-jsonpath.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "JSONPath" 3 | toc: true 4 | date: 2020-03-18 20:00:00 5 | tags: 6 | - docker 7 | - k8s 8 | - json 9 | categories: 10 | - [专业,运维,docker] 11 | --- 12 | 13 | # JSONPath简介 14 | > JSON (JavaScript Object Notation) allows for easy interchange of data, often between a program and a database. 15 | > JSONPath is a query language for JSON, similar to XPath for XML. 16 | 17 | 如上,json是一种常用的数据格式,jsonpath是json的查询语言,类似于XPath和SQL。 18 | 19 | 20 | 21 | # JSONPath语法 22 | 摘自 [json-path/JsonPath](https://github.com/json-path/JsonPath) 23 | ## 操作符 24 | 25 | | Operator | Description | 26 | | :------------------------ | :----------------------------------------------------------------- | 27 | | `$` | The root element to query. This starts all path expressions. | 28 | | `@` | The current node being processed by a filter predicate. | 29 | | `*` | Wildcard. Available anywhere a name or numeric are required. | 30 | | `..` | Deep scan. Available anywhere a name is required. | 31 | | `.` | Dot-notated child | 32 | | `['' (, '')]` | Bracket-notated child or children | 33 | | `[ (, )]` | Array index or indexes | 34 | | `[start:end]` | Array slice operator | 35 | | `[?()]` | Filter expression. Expression must evaluate to a boolean value. | 36 | 37 | ## 函数 38 | Functions can be invoked at the tail end of a path - the input to a function is the output of the path expression. 39 | The function output is dictated by the function itself. 40 | 41 | | Function | Description | Output | 42 | | :------------------------ | :------------------------------------------------------------------ |-----------| 43 | | min() | Provides the min value of an array of numbers | Double | 44 | | max() | Provides the max value of an array of numbers | Double | 45 | | avg() | Provides the average value of an array of numbers | Double | 46 | | stddev() | Provides the standard deviation value of an array of numbers | Double | 47 | | length() | Provides the length of an array | Integer | 48 | | sum() | Provides the sum value of an array of numbers | Double | 49 | 50 | ## 过滤器 51 | Filters are logical expressions used to filter arrays. A typical filter would be `[?(@.age > 18)]` where `@` represents the current item being processed. More complex filters can be created with logical operators `&&` and `||`. String literals must be enclosed by single or double quotes (`[?(@.color == 'blue')]` or `[?(@.color == "blue")]`). 52 | 53 | | Operator | Description | 54 | | :----------------------- | :-------------------------------------------------------------------- | 55 | | == | left is equal to right (note that 1 is not equal to '1') | 56 | | != | left is not equal to right | 57 | | < | left is less than right | 58 | | <= | left is less or equal to right | 59 | | > | left is greater than right | 60 | | >= | left is greater than or equal to right | 61 | | =~ | left matches regular expression [?(@.name =~ /foo.*?/i)] | 62 | | in | left exists in right [?(@.size in ['S', 'M'])] | 63 | | nin | left does not exists in right | 64 | | subsetof | left is a subset of right [?(@.sizes subsetof ['S', 'M', 'L'])] | 65 | | anyof | left has an intersection with right [?(@.sizes anyof ['M', 'L'])] | 66 | | noneof | left has no intersection with right [?(@.sizes noneof ['M', 'L'])] | 67 | | size | size of left (array or string) should match right | 68 | | empty | left (array or string) should be empty | 69 | 70 | 71 | ## demo 72 | Given the json 73 | 74 | ```javascript 75 | { 76 | "store": { 77 | "book": [ 78 | { 79 | "category": "reference", 80 | "author": "Nigel Rees", 81 | "title": "Sayings of the Century", 82 | "price": 8.95 83 | }, 84 | { 85 | "category": "fiction", 86 | "author": "Evelyn Waugh", 87 | "title": "Sword of Honour", 88 | "price": 12.99 89 | }, 90 | { 91 | "category": "fiction", 92 | "author": "Herman Melville", 93 | "title": "Moby Dick", 94 | "isbn": "0-553-21311-3", 95 | "price": 8.99 96 | }, 97 | { 98 | "category": "fiction", 99 | "author": "J. R. R. Tolkien", 100 | "title": "The Lord of the Rings", 101 | "isbn": "0-395-19395-8", 102 | "price": 22.99 103 | } 104 | ], 105 | "bicycle": { 106 | "color": "red", 107 | "price": 19.95 108 | } 109 | }, 110 | "expensive": 10 111 | } 112 | ``` 113 | 114 | | JsonPath (click link to try)| Result | 115 | | :------- | :----- | 116 | | $.store.book[*].author| The authors of all books | 117 | | $..author | All authors | 118 | | $.store.* | All things, both books and bicycles | 119 | | $.store..price | The price of everything | 120 | | $..book[2] | The third book | 121 | | $..book[-2] | The second to last book | 122 | | $..book[0,1] | The first two books | 123 | | $..book[:2] | All books from index 0 (inclusive) until index 2 (exclusive) | 124 | | $..book[1:2] | All books from index 1 (inclusive) until index 2 (exclusive) | 125 | | $..book[-2:] | Last two books | 126 | | $..book[2:] | Book number two from tail | 127 | | $..book[?(@.isbn)] | All books with an ISBN number | 128 | | $.store.book[?(@.price < 10)] | All books in store cheaper than 10 | 129 | | $..book[?(@.price <= $['expensive'])] | All books in store that are not "expensive" | 130 | | $..book[?(@.author =~ /.*REES/i)] | All books matching regex (ignore case) | 131 | | $..* | Give me every thing 132 | | $..book.length() | The number of books | 133 | 134 | 135 | # 在线测试 136 | [JSONPath Online Evaluator](https://jsonpath.com/) 137 | 138 | # kubectl + JSONPath 139 | kubelet支持JSONPath,具体参考[JSONPath 支持](https://kubernetes.io/zh/docs/reference/kubectl/jsonpath/)。 140 | 除了标准jsonpath语法外,kubernetes jsonpath模板还额外支持以下语法: 141 | 142 | - 用""双引号来引用JSONPath表达式中的文本 143 | - 使用range和end来遍历集合 144 | - 使用负数来从尾部索引集合 145 | 146 | 查看node的cpu信息: 147 | ``` 148 | kubectl get nodes -o=jsonpath='{.items[*].metadata.name}' 149 | kubectl get nodes -o=jsonpath='{.items[*].status.capacity.cpu}' 150 | kubectl get nodes -o=jsonpath='{.items[*].metadata.name}{.items[*].status.capacity.cpu}' 151 | kubectl get nodes -o=jsonpath='{.items[*].metadata.name}{"\n"}{.items[*].status.capacity.cpu}' 152 | kubectl get nodes -o=jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.status.capacity.cpu}{end}' 153 | kubectl get nodes -o=custom-columns=NODE:.metadata.name,CPU:.status.capacity.cpu 154 | kubectl get nodes --sort-by=.metadata.name 155 | kubectl get nodes --sort-by=.status.capacity.cpu 156 | ``` 157 | 158 | # json2yaml 159 | json和yaml可以相互转化,这里推荐一个在线工具[JSON to YAML](https://www.json2yaml.com/)。 160 | 161 | 如果有python3环境,还可以使用python-json2yaml工具。 162 | ``` 163 | pip install PyYAML==5.1 164 | pip install python-json2yaml 165 | cat a.json | json2yaml > a.yaml 166 | cat a.yaml | yaml2json 167 | ``` 168 | 169 | 170 | 171 | 172 | -------------------------------------------------------------------------------- /mock/01-kodekloud-cka-mock.md: -------------------------------------------------------------------------------- 1 | # Lightning Labs 2 | ## Q1 3 | Upgrade the current version of kubernetes from 1.16 to 1.17.0 exactly using the kubeadm utility. Make sure that the upgrade is carried out one node at a time starting with the master node. To minimize downtime, the deployment gold-nginx should be rescheduled on an alternate node before upgrading each node. 4 | 5 | Upgrade master node first. Drain node01 before upgrading it. Pods for gold-nginx should run on the master node subsequently. 6 | 7 | ## Q2 8 | Print the names of all deployments in the admin2406 namespace in the following format: 9 | ``` 10 | DEPLOYMENT CONTAINER_IMAGE READY_REPLICAS NAMESPACE 11 | 12 | ``` 13 | . The data should be sorted by the increasing order of the deployment name. 14 | 15 | 16 | Example: 17 | DEPLOYMENT CONTAINER_IMAGE READY_REPLICAS NAMESPACE 18 | deploy0 nginx:alpine 1 admin2406 19 | Write the result to the file /opt/admin2406_data. 20 | 21 | Hint: Make use of -o custom-columns and --sort-by to print the data in the required format. 22 | 23 | ## Q3 24 | A kubeconfig file called admin.kubeconfig has been created in /root. There is something wrong with the configuration. Troubleshoot and fix it. 25 | 26 | ## Q4 27 | Create a new deployment called nginx-deploy, with image nginx:1.16 and 1 replica. Next upgrade the deployment to version 1.17 using rolling update. Make sure that the version upgrade is recorded in the resource annotation. 28 | 29 | ## Q5 30 | A new deployment called alpha-mysql has been deployed in the alpha namespace. However, the pods are not running. Troubleshoot and fix the issue. The deployment should make use of the persistent volume alpha-pv to be mounted at /var/lib/mysql and should use the environment variable MYSQL_ALLOW_EMPTY_PASSWORD=1 to make use of an empty root password. 31 | 32 | 33 | Important: Do not alter the persistent volume. 34 | 35 | ## Q6 36 | Take the backup of ETCD at the location /opt/etcd-backup.db on the master node 37 | 38 | ## Q7 39 | Create a pod called secret-1401 in the admin1401 namespace using the busybox image. The container within the pod should be called secret-admin and should sleep for 4800 seconds. 40 | 41 | The container should mount a read-only secret volume called secret-volume at the path /etc/secret-volume. The secret being mounted has already been created for you and is called dotfile-secret. 42 | 43 | # Mock Exam - 1 44 | ## Q1 45 | Deploy a pod named nginx-pod using the nginx:alpine image. 46 | 47 | ## Q2 48 | Deploy a messaging pod using the redis:alpine image with the labels set to tier=msg. 49 | 50 | Pod Name: messaging 51 | Image: redis:alpine 52 | Labels: tier=msg 53 | 54 | ## Q3 55 | Create a namespace named apx-x9984574 56 | 57 | Namespace: apx-x9984574 58 | 59 | ## Q4 60 | Get the list of nodes in JSON format and store it in a file at /opt/outputs/nodes-z3444kd9.json 61 | 62 | ## Q5 63 | Create a service messaging-service to expose the messaging application within the cluster on port 6379. 64 | 65 | Use imperative commands 66 | 67 | Service: messaging-service 68 | Port: 6379 69 | Type: ClusterIp 70 | Use the right labels 71 | 72 | ## Q6 73 | Create a deployment named hr-web-app using the image kodekloud/webapp-color with 2 replicas 74 | 75 | Name: hr-web-app 76 | Image: kodekloud/webapp-color 77 | Replicas: 2 78 | 79 | ## Q7 80 | Create a static pod named static-busybox on the master node that uses the busybox image and the command sleep 1000 81 | 82 | Name: static-busybox 83 | Image: busybox 84 | 85 | ## Q8 86 | Create a POD in the finance namespace named temp-bus with the image redis:alpine. 87 | 88 | Name: temp-bus 89 | Image Name: redis:alpine 90 | 91 | ## Q9 92 | A new application orange is deployed. There is something wrong with it. Identify and fix the issue. 93 | 94 | ## Q10 95 | Expose the hr-web-app as service hr-web-app-service application on port 30082 on the nodes on the cluster 96 | 97 | 98 | The web application listens on port 8080 99 | 100 | Name: hr-web-app-service 101 | Type: NodePort 102 | Endpoints: 2 103 | Port: 8080 104 | NodePort: 30082 105 | 106 | ## Q11 107 | Use JSON PATH query to retrieve the osImages of all the nodes and store it in a file /opt/outputs/nodes_os_x43kj56.txt 108 | 109 | 110 | The osImages are under the nodeInfo section under status of each node. 111 | 112 | ## Q12 113 | Create a Persistent Volume with the given specification. 114 | 115 | Volume Name: pv-analytics 116 | Storage: 100Mi 117 | Access modes: ReadWriteMany 118 | Host Path: /pv/data-analytics 119 | 120 | # Mock Exam - 2 121 | ## Q1 122 | Take a backup of the etcd cluster and save it to /tmp/etcd-backup.db 123 | 124 | ## Q2 125 | Create a Pod called redis-storage with image: redis:alpine with a Volume of type emptyDir that lasts for the life of the Pod. Specs on the right. 126 | 127 | Pod named 'redis-storage' created 128 | Pod 'redis-storage' uses Volume type of emptyDir 129 | Pod 'redis-storage' uses volumeMount with mountPath = /data/redis 130 | 131 | ## Q3 132 | Create a new pod called super-user-pod with image busybox:1.28. Allow the pod to be able to set system_time 133 | 134 | 135 | The container should sleep for 4800 seconds 136 | 137 | Pod: super-user-pod 138 | Container Image: busybox:1.28 139 | SYS_TIME capabilities for the conatiner? 140 | 141 | ## Q4 142 | A pod definition file is created at /root/use-pv.yaml. Make use of this manifest file and mount the persistent volume called pv-1. Ensure the pod is running and the PV is bound. 143 | 144 | mountPath: /data persistentVolumeClaim Name: my-pvc 145 | 146 | persistentVolume Claim configured correctly 147 | pod using the correct mountPath 148 | pod using the persistent volume claim? 149 | 150 | ## Q5 151 | Create a new deployment called nginx-deploy, with image nginx:1.16 and 1 replica. Record the version. Next upgrade the deployment to version 1.17 using rolling update. Make sure that the version upgrade is recorded in the resource annotation. 152 | 153 | Deployment : nginx-deploy. Image: nginx:1.16 154 | Image: nginx:1.16 155 | Task: Upgrade the version of the deployment to 1:17 156 | Task: Record the changes for the image upgrade 157 | 158 | ## Q6 159 | Create a new user called john. Grant him access to the cluster. John should have permission to create, list, get, update and delete pods in the development namespace . The private key exists in the location: /root/john.key and csr at /root/john.csr 160 | 161 | CSR: john-developer Status:Approved 162 | Role Name: developer, namespace: development, Resource: Pods 163 | Access: User 'john' has appropriate permissions 164 | 165 | ## Q7 166 | Create an nginx pod called nginx-resolver using image nginx, expose it internally with a service called nginx-resolver-service. Test that you are able to look up the service and pod names from within the cluster. Use the image: busybox:1.28 for dns lookup. Record results in /root/nginx.svc and /root/nginx.pod 167 | 168 | Pod: nginx-resolver created 169 | Service DNS Resolution recorded correctly 170 | Pod DNS resolution recorded correctly 171 | 172 | ## Q8 173 | Create a static pod on node01 called nginx-critical with image nginx. Create this pod on node01 and make sure that it is recreated/restarted automatically in case of a failure. 174 | 175 | Use /etc/kubernetes/manifests as the Static Pod path for example. 176 | 177 | Kubelet Configured for Static Pods 178 | Pod nginx-critical-node01 is Up and running 179 | 180 | # Mock Exam - 3 181 | ## Q1 182 | Create a new service account with the name pvviewer. Grant this Service account access to list all PersistentVolumes in the cluster by creating an appropriate cluster role called pvviewer-role and ClusterRoleBinding called pvviewer-role-binding. 183 | Next, create a pod called pvviewer with the image: redis and serviceAccount: pvviewer in the default namespace 184 | 185 | ServiceAccount: pvviewer 186 | ClusterRole: pvviewer-role 187 | ClusterRoleBinding: pvviewer-role-binding 188 | Pod: pvviewer 189 | Pod configured to use ServiceAccount pvviewer ? 190 | 191 | ## Q2 192 | List the InternalIP of all nodes of the cluster. Save the result to a file /root/node_ips 193 | 194 | Answer should be in the format: InternalIP of masterInternalIP of node1InternalIP of node2InternalIP of node3 (in a single line) 195 | 196 | ## Q3 197 | Create a pod called multi-pod with two containers. 198 | Container 1, name: alpha, image: nginx 199 | Container 2: beta, image: busybox, command sleep 4800. 200 | 201 | Environment Variables: 202 | container 1: 203 | name: alpha 204 | 205 | Container 2: 206 | name: beta 207 | 208 | Pod Name: multi-pod 209 | Container 1: alpha 210 | Container 2: beta 211 | Container beta commands set correctly? 212 | Container 1 Environment Value Set 213 | Container 2 Environment Value Set 214 | 215 | ## Q4 216 | Create a Pod called non-root-pod , image: redis:alpine 217 | runAsUser: 1000 218 | fsGroup: 2000 219 | 220 | Pod `non-root-pod` fsGroup configured 221 | Pod `non-root-pod` runAsUser configured 222 | 223 | ## Q5 224 | We have deployed a new pod called np-test-1 and a service called np-test-service. Incoming connections to this service are not working. Troubleshoot and fix it. 225 | Create NetworkPolicy, by the name ingress-to-nptest that allows incoming connections to the service over port 80 226 | 227 | Important: Don't delete any current objects deployed. 228 | 229 | Important: Don't Alter Existing Objects! 230 | NetworkPolicy: Applied to All sources (Incoming traffic from all pods)? 231 | NetWorkPolicy: Correct Port? 232 | NetWorkPolicy: Applied to correct Pod? 233 | 234 | ## Q6 235 | Taint the worker node node01 to be Unschedulable. Once done, create a pod called dev-redis, image redis:alpine to ensure workloads are not scheduled to this worker node. Finally, create a new pod called prod-redis and image redis:alpine with toleration to be scheduled on node01. 236 | 237 | key:env_type, value:production and operator:NoSchedule 238 | 239 | Key = env_type 240 | Value = production 241 | Effect = NoSchedule 242 | pod 'dev-redis' (no tolerations) is not scheduled on node01? 243 | Create a pod 'prod-redis' to run on node01 244 | 245 | ## Q7 246 | Create a pod called hr-pod in hr namespace belonging to the production environment and frontend tier . 247 | image: redis:alpine 248 | 249 | Use appropriate labels and create all the required objects if it does not exist in the system already. 250 | 251 | hr-pod labeled with environment production? 252 | hr-pod labeled with frontend tier? 253 | 254 | ## Q8 255 | A kubeconfig file called super.kubeconfig has been created in /root. There is something wrong with the configuration. Troubleshoot and fix it. 256 | 257 | Fix /root/super.kubeconfig 258 | 259 | ## Q9 260 | We have created a new deployment called nginx-deploy. scale the deployment to 3 replicas. Has the replica's increased? Troubleshoot the issue and fix it. 261 | -------------------------------------------------------------------------------- /mock/02-cka-exam.md: -------------------------------------------------------------------------------- 1 | 建议:在准备CKA、以及考试前都系统做下这些考题 2 | 据说,CKA80%的考题都是相似的 3 | 考试中,英文考题是可以切换为中文显示,但可能有差异,建议对比着看 4 | 5 | 1. 考试说明 6 | 7 | 第一题 8 | 9 | 2. Set configuration context $ kubectl config use-context k8s 10 | 11 | Monitor the logs of Pod foobar and 12 | Extract log lines corresponding to error file-not-found 13 | Write them to /opt/KULM00201/foobar 14 | 15 | Question weight 5% 16 | 17 | 第二题 18 | 19 | 3. Set configuration context $ kubectl config use-context k8s 20 | 21 | List all PVs sorted by name saving the full kubectl output to /opt/KUCC0010/my_volumes . Use kubectl’s own functionally for sorting the output, and do not manipulate it any further. 22 | 23 | Question weight 3% 24 | 25 | 第三题 26 | 27 | 4. Set configuration context $ kubectl config use-context k8s 28 | 29 | Ensure a single instance of Pod nginx is running on each node of the kubernetes cluster where nginx also represents the image name which has to be used. Do no override any taints currently in place. 30 | 31 | Use Daemonsets to complete this task and use ds.kusc00201 as Daemonset name. Question weight 3% 32 | 33 | 第四题 34 | 35 | 5. Set configuration context $ kubectl config use-context k8s 36 | 37 | Perform the following tasks 38 | 39 | Add an init container to lumpy–koala (Which has been defined in spec file /opt/kucc00100/pod-spec-KUCC00100.yaml) 40 | The init container should create an empty file named /workdir/calm.txt 41 | If /workdir/calm.txt is not detected, the Pod should exit 42 | Once the spec file has been updated with the init container definition, the Pod should be created. 43 | 44 | Question weight 7% 45 | 46 | 第五题 47 | 48 | 6. Set configuration context $ kubectl config use-context k8s 49 | 50 | Create a pod named kucc4 with a single container for each of the following images running inside (there may be between 1 and 4 images specified): nginx + redis + memcached + consul 51 | 52 | Question weight: 4% 53 | 54 | 第六题 55 | 56 | 7. Set configuration context $ kubectl config use-context k8s 57 | 58 | Schedule a Pod as follows: 59 | 60 | Name: nginx-kusc00101 61 | Image: nginx 62 | Node selector: disk=ssd 63 | 64 | Question weight: 2% 65 | 66 | 第七题 67 | 68 | 8. Set configuration context $ kubectl config use-context k8s 69 | 70 | Create a deployment as follows 71 | 72 | Name: nginx-app 73 | Using container nginx with version 1.10.2-alpine 74 | The deployment should contain 3 replicas 75 | 76 | Next, deploy the app with new version 1.13.0-alpine by performing a rolling update and record that update. 77 | 78 | Finally, rollback that update to the previous version 1.10.2-alpine 79 | 80 | Question weight: 4% 81 | 82 | 第八题 83 | 84 | 9. Set configuration context $ kubectl config use-context k8s 85 | 86 | Create and configure the service front-end-service so it’s accessible through NodePort and routes to the existing pod named front-end 87 | 88 | Question weight: 4% 89 | 90 | 第九题 91 | 92 | 10. Set configuration context $ kubectl config use-context k8s 93 | 94 | Create a Pod as follows: 95 | 96 | Name: jenkins 97 | Using image: jenkins 98 | In a new Kubenetes namespace named website-frontend 99 | Question weight 3% 100 | 101 | 第十题 102 | 103 | 11. Set configuration context $ kubectl config use-context k8s 104 | 105 | Create a deployment spec file that will: 106 | 107 | Launch 7 replicas of the redis image with the label: app_env_stage=dev 108 | Deployment name: kual00201 109 | 110 | Save a copy of this spec file to /opt/KUAL00201/deploy_spec.yaml (or .json) 111 | 112 | When you are done, clean up (delete) any new k8s API objects that you produced during this task 113 | 114 | Question weight: 3% 115 | 116 | 第十一题 117 | 118 | 12. Set configuration context $ kubectl config use-context k8s 119 | 120 | Create a file /opt/KUCC00302/kucc00302.txt that lists all pods that implement Service foo in Namespace production. 121 | 122 | The format of the file should be one pod name per line. 123 | 124 | Question weight: 3% 125 | 126 | 第十二题 127 | 128 | 13. Set configuration context $ kubectl config use-context k8s 129 | 130 | Create a Kubernetes Secret as follows: 131 | 132 | Name: super-secret 133 | Credential: alice or username:bob 134 | 135 | Create a Pod named pod-secrets-via-file using the redis image which mounts a secret named super-secret at /secrets 136 | 137 | Create a second Pod named pod-secrets-via-env using the redis image, which exports credential as TOPSECRET 138 | 139 | Question weight: 9% 140 | 141 | 第十三题 142 | 143 | 14. Set configuration context $ kubectl config use-context k8s 144 | 145 | Create a pad as follows: 146 | 147 | Name: non-persistent-redis 148 | Container image: redis 149 | Named-volume with name: cache-control 150 | Mount path: /data/redis 151 | 152 | It should launch in the pre-prod namespace and the volume MUST NOT be persistent. 153 | 154 | Question weight: 4% 155 | 156 | 第十四题 157 | 158 | 15. Set configuration context $ kubectl config use-context k8s 159 | 160 | Scale the deployment webserver to 6 pods 161 | 162 | Question weight: 1% 163 | 164 | 第十五题 165 | 166 | 16. Set configuration context $ kubectl config use-context k8s 167 | 168 | Check to see how many nodes are ready (not including nodes tainted NoSchedule) and write the number to /opt/nodenum 169 | 170 | Question weight: 2% 171 | 172 | 第十六题 173 | 174 | 17. Set configuration context $ kubectl config use-context k8s 175 | 176 | From the Pod label name=cpu-utilizer, find pods running high CPU workloads and write the name of the Pod consuming most CPU to the file /opt/cpu.txt (which already exists) 177 | 178 | Question weight: 2% 179 | 180 | 第十七题 181 | 182 | 18. Set configuration context $ kubectl config use-context k8s 183 | 184 | Create a deployment as follows 185 | 186 | Name: nginx-dns 187 | Exposed via a service: nginx-dns 188 | Ensure that the service & pod are accessible via their respective DNS records 189 | The container(s) within any Pod(s) running as a part of this deployment should use the nginx image 190 | 191 | Next, use the utility nslookup to look up the DNS records of the service & pod and write the output to /opt/service.dns and /opt/pod.dns respectively. 192 | 193 | Ensure you use the busybox:1.28 image(or earlier) for any testing, an the latest release has an unpstream bug which impacts thd use of nslookup. 194 | 195 | Question weight: 7% 196 | 197 | 第十八题 198 | 199 | 19. No configuration context change required for this item 200 | 201 | Create a snapshot of the etcd instance running at https://127.0.0.1:2379 saving the snapshot to the file path /data/backup/etcd-snapshot.db 202 | 203 | The etcd instance is running etcd version 3.1.10 204 | 205 | The following TLS certificates/key are supplied for connecting to the server with etcdctl 206 | 207 | CA certificate: /opt/KUCM00302/ca.crt 208 | Client certificate: /opt/KUCM00302/etcd-client.crt 209 | Clientkey:/opt/KUCM00302/etcd-client.key 210 | 211 | Question weight: 7% 212 | 213 | 第十九题 214 | 215 | 20. Set configuration context $ kubectl config use-context ek8s 216 | 217 | Set the node labelled with name=ek8s-node-1 as unavailable and reschedule all the pods running on it. 218 | 219 | Question weight: 4% 220 | 221 | 第二十题 222 | 223 | 21. Set configuration context $ kubectl config use-context wk8s 224 | 225 | A Kubernetes worker node, labelled with name=wk8s-node-0 is in state NotReady . Investigate why this is the case, and perform any appropriate steps to bring the node to a Ready state, ensuring that any changes are made permanent. 226 | 227 | Hints: 228 | 229 | You can ssh to the failed node using $ ssh wk8s-node-0 230 | You can assume elevated privileges on the node with the following command $ sudo -i Question weight: 4% 231 | 232 | 第二十一题 233 | 234 | 22. Set configuration context $ kubectl config use-context wk8s 235 | 236 | Configure the kubelet systemd managed service, on the node labelled with name=wk8s-node-1, to launch a Pod containing a single container of image nginx named myservice automatically. Any spec files required should be placed in the /etc/kubernetes/manifests directory on the node. 237 | 238 | Hints: 239 | 240 | You can ssh to the failed node using $ ssh wk8s-node-1 241 | You can assume elevated privileges on the node with the following command $ sudo -i Question weight: 4% 242 | 243 | 第二十二题 244 | 245 | 23. Set configuration context $ kubectl config use-context ik8s 246 | 247 | In this task, you will configure a new Node, ik8s-node-0, to join a Kubernetes cluster as follows: 248 | 249 | Configure kubelet for automatic certificate rotation and ensure that both server and client CSRs are automatically approved and signed as appropnate via the use of RBAC. 250 | Ensure that the appropriate cluster-info ConfigMap is created and configured appropriately in the correct namespace so that future Nodes can easily join the cluster 251 | Your bootstrap kubeconfig should be created on the new Node at /etc/kubernetes/bootstrap-kubelet.conf (do not remove this file once your Node has successfully joined the cluster) 252 | The appropriate cluster-wide CA certificate is located on the Node at /etc/kubernetes/pki/ca.crt . You should ensure that any automatically issued certificates are installed to the node at /var/lib/kubelet/pki and that the kubeconfig file for kubelet will be rendered at /etc/kubernetes/kubelet.conf upon successful bootstrapping 253 | Use an additional group for bootstrapping Nodes attempting to join the cluster which should be called system:bootstrappers:cka:default-node-token 254 | Solution should start automatically on boot, with the systemd service unit file for kubelet available at /etc/systemd/system/kubelet.service 255 | 256 | To test your solution, create the appropriate resources from the spec file located at /opt/…./kube-flannel.yaml This will create the necessary supporting resources as well as the kube-flannel -ds DaemonSet . You should ensure that this DaemonSet is correctly deployed to the single node in the cluster. 257 | 258 | Hints: 259 | 260 | kubelet is not configured or running on ik8s-master-0 for this task, and you should not attempt to configure it. 261 | You will make use of TLS bootstrapping to complete this task. 262 | You can obtain the IP address of the Kubernetes API server via the following command $ ssh ik8s-node-0 getent hosts ik8s-master-0 263 | The API server is listening on the usual port, 6443/tcp, and will only server TLS requests 264 | The kubelet binary is already installed on ik8s-node-0 at /usr/bin/kubelet . You will not need to deploy kube-proxy to the cluster during this task. 265 | You can ssh to the new worker node using $ ssh ik8s-node-0 266 | You can ssh to the master node with the following command $ ssh ik8s-master-0 267 | No further configuration of control plane services running on ik8s-master-0 is required 268 | You can assume elevated privileges on both nodes with the following command $ sudo -i 269 | Docker is already installed and running on ik8s-node-0 270 | 271 | Question weight: 8% 272 | 273 | 第二十三题 274 | 275 | 24. Set configuration context $ kubectl config use-context bk8s 276 | 277 | Given a partially-functioning Kubenetes cluster, identify symptoms of failure on the cluster. Determine the node, the failing service and take actions to bring up the failed service and restore the health of the cluster. Ensure that any changes are made permanently. 278 | 279 | The worker node in this cluster is labelled with name=bk8s-node-0 Hints: 280 | 281 | You can ssh to the relevant nodes using $ ssh $(NODE) where $(NODE) is one of bk8s-master-0 or bk8s-node-0 282 | You can assume elevated privileges on any node in the cluster with the following command$ sudo -i 283 | 284 | Question weight: 4% 285 | 286 | 第二十四题 287 | 288 | 25. Set configuration context $ kubectl config use-context hk8s 289 | 290 | Create a persistent volume with name app-config of capacity 1Gi and access mode ReadWriteOnce. The type of volume is hostPath and its location is /srv/app-config 291 | 292 | Question weight: 3% -------------------------------------------------------------------------------- /mock/11-kodekloud-ckad-mock.md: -------------------------------------------------------------------------------- 1 | # Lightning Lab - 1 2 | ## Q1 3 | Create a Persistent Volume called log-volume. It should make use of a storage class name manual. It should use RWX as the access mode and have a size of 1Gi. The volume should use the hostPath /opt/volume/nginx 4 | 5 | Next, create a PVC called log-claim requesting a minimum of 200Mi of storage. This PVC should bind to log-volume. 6 | 7 | Mount this in a pod called logger at the location /var/www/nginx. This pod should use the image nginx:alpine. 8 | 9 | Tips: 10 | log-volume created with correct parameters? 11 | 12 | ## Q2 13 | We have deployed a new pod called secure-pod and a service called secure-service. Incoming or Outgoing connections to this pod are not working. 14 | Troubleshoot why this is happening. 15 | 16 | Make sure that incoming connection from the pod webapp-color are successful. 17 | 18 | 19 | Important: Don't delete any current objects deployed. 20 | 21 | Tips: 22 | Important: Don't Alter Existing Objects! 23 | Connectivity working? 24 | 25 | ## Q3 26 | Create a pod called time-check in the dvl1987 namespace. This pod should run a container called time-check that uses the busybox image. 27 | 1. Create a config map called time-config with the data TIME_FREQ=10 in the same namespace. 28 | 2. The time-check container should run the command: while true; do date; sleep $TIME_FREQ;done and write the result to the location /opt/time/time-check.log. 29 | 3. The path /opt/time on the pod should mount a volume that lasts the lifetime of this pod. 30 | 31 | Tips: 32 | Pod `time-check` configured correctly? 33 | 34 | ## Q4 35 | Create a new deployment called nginx-deploy, with one signle container called nginx, image nginx:1.16 and 4 replicas. The deployment should use RollingUpdate strategy with maxSurge=1, and maxUnavailable=2. 36 | Next upgrade the deployment to version 1.17 using rolling update. 37 | Finally, once all pods are updated, undo the update and go back to the previous version. 38 | 39 | Tips: 40 | Deployment created correctly? 41 | Was the deployment created with nginx:1.16? 42 | Was it upgraded to 1.17? 43 | Deployment rolled back to 1.16? 44 | 45 | ## Q5 46 | Create a redis deployment with the following parameters: 47 | Name of the deployment should be redis using the redis:alpine image. It should have exactly 1 replica. 48 | The container should request for .2 CPU. It should use the label app=redis. 49 | It should mount exactly 2 volumes: 50 | Make sure that the pod is scheduled on master node. 51 | 52 | 53 | a. An Empty directory volume called data at path /redis-master-data. 54 | b. A configmap volume called redis-config at path /redis-master. 55 | c.The container should expose the port 6379. 56 | 57 | 58 | The configmap has already been created. 59 | 60 | Tips: 61 | Deployment created correctly? 62 | 63 | # Lightning Lab - 2 64 | ## Q1 65 | We have deployed a few pods in this cluster in various namespaces. Inspect them and identify the pod which is not in a Ready state. Troubleshoot and fix the issue. 66 | 67 | Next, add a check to restart the container on the same pod if the command ls /var/www/html/file_check fails. This check should start after a delay of 10 seconds and run every 60 seconds. 68 | 69 | 70 | You may delete and recreate the object. Ignore the warnings from the probe. 71 | 72 | Tips: 73 | Task completed correctly? 74 | 75 | ## Q2 76 | Create a cronjob called dice that runs every one minute. Use the Pod template located at /root/throw-a-dice. The image throw-dice randomly returns a value between 1 and 6. The result of 6 is considered success and all others are failure. 77 | The job should be non-parallel and complete the task once. Use a backoffLimit of 25. 78 | If the task is not completed within 20 seconds the job should fail and pods should be terminated. 79 | 80 | 81 | You don't have to wait for the job completion. As long as the cronjob has been created as per the requirements. 82 | 83 | Tips: 84 | Cronjob created correctly? 85 | 86 | ## Q3 87 | Create a pod called my-busybox in the dev2406 namespace using the busybox image. The container should be called secret and should sleep for 3600 seconds. 88 | 89 | The container should mount a read-only secret volume called secret-volume at the path /etc/secret-volume. The secret being mounted has already been created for you and is called dotfile-secret. 90 | 91 | Make sure that the pod is scheduled on master and no other node in the cluster. 92 | 93 | Tips: 94 | Pod created correctly? 95 | 96 | ## Q4 97 | Create a single ingress resource called ingress-vh-routing. The resource should route HTTP traffic to multiple hostnames as specified below: 98 | 99 | The service video-service should be accessible on http://watch.ecom-store.com:30093/video 100 | 101 | The service apparels-service should be accessible on http://apparels.ecom-store.com:30093/wear 102 | 103 | 104 | Here 30093 is the port used by the Ingress Controller 105 | 106 | Tips: 107 | Ingress resource configured correctly? 108 | 109 | ## Q5 110 | A pod called dev-pod-dind-878516 has been deployed in the default namespace. Inspect the logs for the container called log-x and redirect the warnings to /opt/dind-878516_logs.txt on the master node 111 | 112 | Tips: 113 | Redirect warnings to file 114 | 115 | 116 | 117 | # Mock Exam - 1 118 | ## Q1 119 | Deploy a pod named nginx-448839 using the nginx:alpine image. 120 | 121 | 122 | Once done, click on the Next Question button in the top right corner of this panel. You may navigate back and forth freely between all questions. Once done with all questions, click on End Exam. Your work will be validated at the end and score shown. Good Luck! 123 | 124 | Tips: 125 | Name: nginx-448839 126 | Image: nginx:alpine 127 | 128 | ## Q2 129 | Create a namespace named apx-z993845 130 | 131 | Tips: 132 | Namespace: apx-z993845 133 | 134 | ## Q3 135 | Create a new Deployment named httpd-frontend with 3 replicas using image httpd:2.4-alpine 136 | 137 | Tips: 138 | Name: httpd-frontend 139 | Replicas: 3 140 | Image: httpd:2.4-alpine 141 | 142 | ## Q4 143 | Deploy a messaging pod using the redis:alpine image with the labels set to tier=msg. 144 | 145 | Tips: 146 | Pod Name: messaging 147 | Image: redis:alpine 148 | Labels: tier=msg 149 | 150 | ## Q5 151 | A replicaset rs-d33393 is created. However the pods are not coming up. Identify and fix the issue. 152 | 153 | Once fixed, ensure the ReplicaSet has 4 Ready replicas. 154 | 155 | Tips: 156 | Replicas: 4 157 | 158 | ## Q6 159 | Create a service messaging-service to expose the redis deployment in the marketing namespace within the cluster on port 6379. 160 | 161 | Use imperative commands 162 | 163 | Tips: 164 | Service: messaging-service 165 | Port: 6379 166 | Use the right type of Service 167 | Use the right labels 168 | 169 | ## Q7 170 | Update the environment variable on the pod webapp-color to use a green background 171 | 172 | Tips: 173 | Pod Name: webapp-color 174 | Label Name: webapp-color 175 | Env: APP_COLOR=green 176 | 177 | ## Q8 178 | Create a new ConfigMap named cm-3392845. Use the spec given on the right. 179 | 180 | Tips: 181 | ConfigName Name: cm-3392845 182 | Data: DB_NAME=SQL3322 183 | Data: DB_HOST=sql322.mycompany.com 184 | Data: DB_PORT=3306 185 | 186 | ## Q9 187 | Create a new Secret named db-secret-xxdf with the data given(on the right). 188 | 189 | Tips: 190 | Secret Name: db-secret-xxdf 191 | Secret 1: DB_Host=sql01 192 | Secret 2: DB_User=root 193 | Secret 3: DB_Password=password123 194 | 195 | ## Q10 196 | Update pod app-sec-kff3345 to run as Root user and with the SYS_TIME capability. 197 | 198 | Tips: 199 | Pod Name: app-sec-kff3345 200 | Image Name: ubuntu 201 | SecurityContext: Capability SYS_TIME 202 | 203 | ## Q11 204 | Export the logs of the e-com-1123 pod to the file /opt/outputs/e-com-1123.logs 205 | 206 | It is in a different namespace. Identify the namespace first. 207 | 208 | Tips: 209 | Task Completed 210 | 211 | ## Q12 212 | Create a Persistent Volume with the given specification. 213 | 214 | Tips: 215 | Volume Name: pv-analytics 216 | Storage: 100Mi 217 | Access modes: ReadWriteMany 218 | Host Path: /pv/data-analytics 219 | 220 | ## Q13 221 | Create a redis deployment using the image redis:alpine with 1 replica and label app=redis. Expose it via a ClusterIP service called redis on port 6379. Create a new Ingress Type NetworkPolicy called redis-access which allows only the pods with label access=redis to access the deployment. 222 | 223 | Tips: 224 | Image: redis:alpine 225 | Deployment created correctly? 226 | Service created correctly? 227 | Network Policy allows the correct pods? 228 | Network Policy applied on the correct pods? 229 | 230 | ## Q14 231 | Create a Pod called sega with two containers: 232 | 233 | Container 1: Name tails with image busybox and command: sleep 3600. 234 | Container 2: Name sonic with image nginx and Environment variable: NGINX_PORT with the value 8080. 235 | 236 | Tips: 237 | Container Sonic has the correct ENV name 238 | Container Sonic has the correct ENV value 239 | Container tails created correctly? 240 | 241 | # Mock Exam - 2 242 | ## Q1 243 | Create a deployment called my-webapp with image: nginx, label tier:frontend and 2 replicas. Expose the deployment as a NodePort service with name front-end-service , port: 80 and NodePort: 30083 244 | 245 | Tips: 246 | Deployment my-webapp created? 247 | image: nginx 248 | Replicas = 2 ? 249 | service front-end-service created? 250 | service Type created correctly? 251 | Correct node Port used? 252 | 253 | ## Q2 254 | Add a taint to the node node01 of the cluster. Use the specification below: 255 | 256 | key:app_type, value:alpha and effect:NoSchedule 257 | Create a pod called alpha, image:redis with toleration to node01 258 | 259 | Tips: 260 | node01 with the correct taint? 261 | Pod alpha has the correct toleration? 262 | 263 | ## Q3 264 | Apply a label app_type=beta to node node02. Create a new deployment called beta-apps with image:nginx and replicas:3. Set Node Affinity to the deployment to place the PODs on node02 only 265 | 266 | NodeAffinity: requiredDuringSchedulingIgnoredDuringExecution 267 | 268 | Tips: 269 | node02 has the correct labels? 270 | Deployment beta-apps: NodeAffinity set to requiredDuringSchedulingIgnoredDuringExecution ? 271 | Deployment beta-apps has correct Key for NodeAffinity? 272 | Deployment beta-apps has correct Value for NodeAffinity? 273 | Deployment beta-apps has pods running only on node02? 274 | Deployment beta-apps has 3 pods running? 275 | 276 | ## Q4 277 | Create a new Ingress Resource for the service: my-video-service to be made available at the URL: http://ckad-mock-exam-solution.com:30093/video. 278 | 279 | Create an ingress resource with host: ckad-mock-exam-solution.com 280 | path:/video 281 | Once set up, curl test of the URL from the nodes should be successful / HTTP 200 282 | 283 | Tips: 284 | Ingress resource configured correct and accessible via http://ckad-mock-exam-solution.com:30093/video 285 | 286 | ## Q5 287 | We have deployed a new pod called pod-with-rprobe. This Pod has an initial delay before it is Ready. Update the newly created pod pod-with-rprobe with a readinessProbe using the given spec 288 | 289 | httpGet path: /ready 290 | httpGet port: 8080 291 | 292 | Tips: 293 | readinessProbe with the correct httpGet path? 294 | readinessProbe with the correct httpGet port? 295 | 296 | ## Q6 297 | Create a new pod called nginx1401 in the default namespace with the image nginx. Add a livenessProbe to the container to restart it if the command ls /var/www/html/probe fails. This check should start after a delay of 10 seconds and run every 60 seconds. 298 | 299 | You may delete and recreate the object. Ignore the warnings from the probe. 300 | 301 | Tips: 302 | Pod created correctly with the livenessProbe? 303 | 304 | ## Q7 305 | Create a job called whalesay with image docker/whalesay and command "cowsay I am going to ace CKAD!". 306 | completions: 10 307 | backoffLimit: 6 308 | restartPolicy: Never 309 | 310 | This simple job runs the popular cowsay game that was modifed by docker… 311 | 312 | Tips: 313 | Job "whalsay" uses correct image? 314 | Job "whalesay" configured with completions = 10? 315 | Job "whalesay" with backoffLimit = 6 316 | Job run's the command "cowsay I am going to ace CKAD!"? 317 | Job "whalesay" completed successfully? 318 | 319 | ## Q8 320 | Create a pod called multi-pod with two containers. 321 | Container 1: name: jupiter, image: nginx 322 | Container 2: europa, image: busybox 323 | command: sleep 4800 324 | 325 | Environment Variables: Container 1: type: planet 326 | 327 | Container 2: type: moon 328 | 329 | Tips: 330 | Pod Name: multi-pod 331 | Container 1: jupiter 332 | Container 2: europa 333 | Container europa commands set correctly? 334 | Container 1 Environment Value Set 335 | Container 2 Environment Value Set 336 | 337 | ## Q9 338 | Create a PersistentVolume called custom-volume with size: 50MiB reclaim policy:retain, Access Modes: ReadWriteMany and hostPath: /opt/data 339 | 340 | Tips: 341 | PV custom-volume created? 342 | custom-volume uses the correct access mode? 343 | PV custom-volume has the correct storage capacity? 344 | PV custom-volume has the correct host path? -------------------------------------------------------------------------------- /voidking/dev-kubectl.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "kubectl常用命令" 3 | toc: true 4 | date: 2019-09-15 20:00:00 5 | tags: 6 | - docker 7 | - k8s 8 | categories: 9 | - [专业,运维,k8s] 10 | --- 11 | # kubectl简介 12 | 13 | > Kubectl is a command line interface for running commands against Kubernetes clusters. 14 | 15 | 没错,kubectl是一个命令行工具,用来控制K8S集群。kubectl该怎么读?可以参考[HowToPronounce-kubectl](http://www.howtopronounce.cc/kubectl),小编喜欢读作kubecontrol。 16 | 17 | kubectl命令格式为: 18 | ``` 19 | kubectl [command] [TYPE] [NAME] [flags] 20 | ``` 21 | 22 | 更多内容,参考[Overview of kubectl](https://kubernetes.io/docs/reference/kubectl/overview/)。 23 | 24 | [《K8S入门篇》](https://www.voidking.com/dev-k8s-start/)一文中,已经学习了kubectl的安装方法,并且使用了一些简单命令。本文整理一下kubectl的常用命令,方便记忆和复习。 25 | 26 | 27 | 28 | 29 | # 环境准备 30 | ## 指定配置文件 31 | ``` 32 | # 指定默认配置文件 33 | export KUBECONFIG=~/.kube/config 34 | 35 | # 查看kubectl配置 36 | kubectl config view 37 | 38 | # 指定配置文件和context 39 | kubectl config --kubeconfig=/root/vk-kube-config use-context voidking@kubenertes 40 | 41 | # 指定单次运行配置文件 42 | kubectl get deployments --kubeconfig=/root/.kube/config 43 | kubectl get deployments --kubeconfig /root/.kube/config 44 | 45 | # 指定默认namespace 46 | kubectl config set-context --current --namespace=voidking 47 | kubectl config set-context $(kubectl config current-context) --namespace=voidking 48 | ``` 49 | 50 | ## 命令自动补全 51 | ``` 52 | yum install -y bash-completion 53 | source /usr/share/bash-completion/bash_completion 54 | source <(kubectl completion bash) 55 | echo "source /usr/share/bash-completion/bash_completion" >> ~/.bashrc 56 | echo "source <(kubectl completion bash)" >> ~/.bashrc 57 | ``` 58 | 59 | ## 使用别名缩写 60 | ``` 61 | # kubectl缩写为k 62 | # alias k=kubectl 63 | alias k="/usr/local/bin/kubectl" 64 | complete -F __start_kubectl k 65 | 66 | # 可选alias 67 | alias kg="kubectl get" 68 | alisa kd="kubectl describe" 69 | ``` 70 | 建议把配置写入 .bashrc ,登录后别名自动生效。 71 | 72 | ## 查看集群信息 73 | ``` 74 | # 查看集群信息 75 | kubectl cluster-info 76 | kubectl cluster-info dump 77 | 78 | # 查看集群状态 79 | kubectl get cs 80 | 81 | # 查看node资源使用 82 | kubectl top node 83 | 84 | # 查看集群事件 85 | kubectl get ev 86 | ``` 87 | 88 | # 查看帮助 89 | ## 查看资源缩写 90 | ``` 91 | kubectl describe 92 | kubectl api-resources 93 | ``` 94 | 建议记住常用资源的SHORTNAMES,可以提升输入效率。 95 | 此外,记住常用资源的APIGROUP,可以提高编写yaml文件时的效率。 96 | 97 | ## 查看可用api版本 98 | `kubectl api-versions` 99 | 100 | ## yaml帮助 101 | yaml文件分成四部分,apiVersion、kind、metadata和spec。 102 | apiVersion和kind是关联的,参考`kubectl api-resources`。 103 | metadata必填name、namespace、labels。 104 | pod.spec主要填containers的name和image;deployment.spec主要填replicas、template和selector;service.spec主要填selector、ports和type。 105 | 106 | 编写yaml文件的过程中,如果忘记了某些结构和字段,可以使用kubectl explain命令来获取帮助。 107 | 108 | 1、查看资源包含哪些字段 109 | 以查看deployment的yaml包含哪些字段为例: 110 | ``` 111 | kubectl explain deployment 112 | kubectl explain deployment --api-version=apps/v1 113 | ``` 114 | 115 | 2、查看子字段 116 | 以查看节点亲和性字段为例: 117 | ``` 118 | kubectl explain deployment.spec.template.spec.affinity 119 | kubectl explain deployment.spec.template.spec.affinity.nodeAffinity 120 | ... 121 | kubectl explain deployment.spec.template.spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms.matchExpressions 122 | ``` 123 | 124 | # 资源相关 125 | ## 查看资源 126 | 1、查看集群的所有资源 127 | ``` 128 | kubectl get all 129 | kubectl get all -o wide 130 | 131 | # 根据label筛选所有资源 132 | kubectl get all -l 'env=dev' 133 | ``` 134 | 135 | 2、查看deployment 136 | ``` 137 | # 查看deployment 138 | kubectl get deploy 139 | kubectl get deploy -n voidking -o wide 140 | kubectl get deploy -l 'env=dev' 141 | kubectl get deploy --selector='env=dev' 142 | kubectl get deploy --all-namespaces 143 | kubectl get deploy --show-labels 144 | 145 | # 查看deployment实时变化 146 | kubectl get deploy --watch 147 | 148 | # 查看指定deployment 149 | kubectl get deploy/deployment-name 150 | kubectl get deploy deployment-name 151 | 152 | # 指定namespace 153 | kubectl get deploy -n voidking 154 | 155 | # 根据label选择deployment 156 | kubectl get deploy --selector="name=nginx,type=frontend" 157 | 158 | # 查看deployment详细信息 159 | kubectl describe deploy 160 | ``` 161 | pod、service、node的查看方法和deployment相同。 162 | 163 | ## 创建资源 164 | ``` 165 | kubectl create -f deploy.yaml 166 | kubectl apply -f deploy.yaml 167 | ``` 168 | 169 | ## 更新资源 170 | ``` 171 | # 编辑集群中的资源 172 | kubectl edit deployment deployment-name 173 | 174 | # 比较manifest与集群中当前资源的不同 175 | kubectl diff -f deploy.yaml 176 | 177 | # 应用最新定义 178 | kubectl replace -f deploy.yaml 179 | kubectl apply -f deploy.yaml 180 | 181 | # 添加label 182 | kubectl label deployment deployment-name new-label=awesome 183 | 184 | # 添加annotation 185 | kubectl annotate deployment deployment-name icon-url=http://goo.gl/XXBTWq 186 | 187 | # 部分修改deployment 188 | kubectl patch deployment deployment-name --type json -p='[{"op": "remove", "path": "/spec/template/spec/containers/0/livenessProbe"}]' 189 | ``` 190 | 191 | ## 删除资源 192 | ``` 193 | kubectl delete deployment deployment-name 194 | ``` 195 | 196 | ## 扩缩容 197 | 方法一:通过扩缩容命令。 198 | ``` 199 | kubectl scale --replicas=2 deployment deployment-name 200 | ``` 201 | 202 | 方法二:通过更新yaml文件。 203 | 204 | ## 暴露服务 205 | ``` 206 | # 为deployment创建clusterip,暴露clusterip的80端口 207 | kubectl expose deployment deployment-name --port=80 --name svc-name 208 | # 为deployment创建clusterip,暴露clusterip的6789端口 209 | kubectl expose deployment deployment-name --target-port=80 --port=6789 210 | kubectl expose -f vk-deploy.yaml --target-port=80 --port=6789 211 | # 创建nodeport,暴露clusterip的80端口,暴露node的随机端口 212 | kubectl expose deployment deployment-name --port=80 --type=NodePort --name svc-name 213 | 214 | # 创建clusterip,暴露clusterip的80端口 215 | kubectl create service clusterip svc-name --tcp=80:80 216 | # 创建nodeport,暴露clusterip的80端口,暴露node的30080端口 217 | kubectl create service nodeport svc-name --tcp=80:80 --node-port=30080 218 | ``` 219 | 220 | ## 版本回退 221 | ``` 222 | # 查看发布历史 223 | kubectl rollout history deployment deployment-name 224 | # 查看发布状态 225 | kubectl rollout status deployment deployment-name 226 | # 回退到上一个版本 227 | kubectl rollout undo deployment deployment-name 228 | ``` 229 | 230 | ## cm和secret 231 | 1、创建configmap 232 | ``` 233 | kubectl create configmap special-config --from-literal=special.how=very --from-literal=special.type=charm 234 | kubectl create configmap game-config-3 --from-file== 235 | ``` 236 | 237 | 2、创建secret 238 | ``` 239 | # 在命令中指定key和value 240 | kubectl create secret generic db-user-pass --from-literal=username=voidking --from-literal=password='vkpassword' 241 | kubectl get secret db-user-pass -o yaml 242 | 243 | # 在文件中指定value 244 | echo -n 'voidking' > ./username.txt 245 | echo -n 'vkpassword' > ./password.txt 246 | kubectl create secret generic db-user-pass --from-file=./username.txt --from-file=./password.txt 247 | kubectl create secret generic db-user-pass --from-file=username=./username.txt --from-file=password=./password.txt 248 | kubectl get secret db-user-pass -o yaml 249 | ``` 250 | 在yaml文件中看到的username和password,都是经过base64加密的字符串。 251 | ``` 252 | # 加密 253 | echo -n 'voidking' | base64 254 | # 解密 255 | echo 'dm9pZGtpbmc=' | base64 --decode 256 | ``` 257 | 258 | # yaml相关 259 | ## 导出yaml或json文件 260 | ``` 261 | # 导出deployment的yaml文件 262 | kubectl get deploy -o yaml > deploy.yaml 263 | 264 | # 导出deployment的json文件 265 | kubectl get deploy -o json > deploy.json 266 | 267 | # 导出指定deployment的yaml文件 268 | kubectl get deploy/deployment-name -o yaml > deploy-name.yaml 269 | 270 | # 导出指定deployment的json文件 271 | kubectl get deploy/deployment-name -o json > deploy-name.json 272 | ``` 273 | pod、service、node的yaml/json文件的导出方法和deployment相同。 274 | 275 | ## pod yaml 276 | 生成pod的yaml文件模板: 277 | ``` 278 | kubectl run vk-pod --image=nginx --generator=run-pod/v1 -l 'name=vk-pod,env=dev' --dry-run -o yaml 279 | kubectl run vk-pod --image=nginx --generator=run-pod/v1 --labels='name=vk-pod,env=dev' --dry-run -o yaml 280 | kubectl run vk-pod --image=busybox --generator=run-pod/v1 --dry-run -o yaml --command -- sleep 1000 281 | ``` 282 | 283 | 更多内容,参考[Kubernetes kubectl run 命令详解](http://docs.kubernetes.org.cn/468.html)。 284 | 285 | ## deployment yaml 286 | 1、生成deployment的yaml文件模板(历史方法): 287 | ``` 288 | kubectl run vk-deploy --image=nginx --dry-run -o yaml 289 | ``` 290 | 会出现提示: 291 | kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead. 292 | 因为官方不推荐使用 run-pod/v1 以外的其他生成器,其他生成器不久后就会弃用。更多内容参考[kubectl run](https://kubernetes.io/zh/docs/reference/kubectl/conventions/#kubectl-run)。 293 | 294 | 2、从已有K8S集群中已有资源中导出yaml模板文件(历史方法): 295 | ``` 296 | kubectl get deploy/deployment-name -o yaml --export > deploy-name.yaml 297 | ``` 298 | 也会出现提示: 299 | Flag --export has been deprecated, This flag is deprecated and will be removed in future. 300 | 很尴尬,--export 也要弃用了,且用且珍惜吧。 301 | 302 | 3、生成deployment的yaml文件模板(推荐方法): 303 | ``` 304 | kubectl create deployment vk-deploy --image=nginx --dry-run -o yaml 305 | ``` 306 | 307 | 更多内容,参考[Kubernetes kubectl create deployment 命令详解](http://docs.kubernetes.org.cn/535.html)。 308 | 309 | 注意,我们并不需要在deployment中指定容器对外暴露的ports,因为该字段只是一个提示作用。 310 | 311 | > List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated. 312 | 313 | ## service yaml 314 | 生成service的yaml文件模板(推荐方法): 315 | ``` 316 | kubectl create service clusterip vk-svc --tcp="5678:80" --dry-run -o yaml 317 | ``` 318 | 319 | 更多内容,参考[Kubernetes kubectl create service 命令详解](http://docs.kubernetes.org.cn/564.html)和[Service](https://kubernetes.io/docs/concepts/services-networking/service/)。 320 | 321 | ## yaml验证 322 | ``` 323 | kubectl create --validate -f deployment.yaml 324 | ``` 325 | 326 | # node相关 327 | ## taint和tolerations 328 | ``` 329 | # 添加taint 330 | kubectl taint nodes node1 key1=value1:NoSchedule 331 | kubectl taint nodes node1 key2=value2:PreferNoSchedule 332 | kubectl taint nodes node1 key3=value3:NoExecute 333 | 334 | # 删除taint 335 | kubectl taint nodes node1 value1:NoSchedule- 336 | ``` 337 | 338 | 同时,要在pod yaml的spec下添加tolerations字段: 339 | ``` 340 | spec: 341 | tolerations: 342 | - key: "key1" 343 | operator: "Equal" 344 | value: "value1" 345 | effect: "NoSchedule" 346 | - key: "key2" 347 | operator: "Exists" 348 | effect: "PreferNoSchedule" 349 | - key: "key3" 350 | operator: "Equal" 351 | value: "value3" 352 | effect: "NoExecute" 353 | ``` 354 | 355 | ## label和affinity 356 | ``` 357 | # 添加label 358 | kubectl label nodes node1 key1=value1,key2=value2 359 | 360 | # 删除label 361 | kubectl label nodes node1 key1- 362 | ``` 363 | 364 | 同时,要在pod yaml中添加tolerations字段: 365 | ``` 366 | nodeSelector: 367 | disktype: ssd 368 | ``` 369 | 或者,使用: 370 | ``` 371 | spec: 372 | affinity: 373 | nodeAffinity: 374 | requiredDuringSchedulingIgnoredDuringExecution: 375 | nodeSelectorTerms: 376 | - matchExpressions: 377 | - key: key1 378 | operator: In 379 | values: 380 | - value1 381 | - valuex 382 | preferredDuringSchedulingIgnoredDuringExecution: 383 | - weight: 1 384 | preference: 385 | matchExpressions: 386 | - key: key2 387 | operator: In 388 | values: 389 | - value2 390 | - valuey 391 | ``` 392 | 393 | ## node封锁 394 | 如果node存在问题,或者node需要升级维护,这时需要对node进行封锁,并且驱除pod。 395 | ``` 396 | # 封锁node,不允许分配pod 397 | kubectl cordon nodename 398 | 399 | # 从指定node驱除pod 400 | kubectl drain nodename --ignore-daemonsets 401 | 402 | # 解除node的封锁,允许分配pod 403 | kubectl uncordon nodename 404 | ``` 405 | 406 | # 容器交互 407 | ## 执行命令 408 | 1、登录容器 409 | ``` 410 | kubectl exec -it pod-name /bin/bash 411 | kubectl exec -it pod-name -c container-name /bin/bash 412 | kubectl exec -it pod-name -c container-name sh 413 | ``` 414 | 415 | 2、直接执行命令 416 | ``` 417 | kubectl exec pod-name env 418 | kubectl exec pod-name -- env 419 | kubectl exec pod-name -it -- env 420 | kubectl exec -n default pod-name -it -- env 421 | # 命令带参数时必须加双横线 422 | kubectl exec pod-name -- sh -c 'echo ${LANG}' 423 | ``` 424 | 425 | ## 拷贝文件 426 | ``` 427 | # 拷贝pod内容到宿主机 428 | kubectl cp podname-564949c96c-m986n:/path/filename . 429 | ``` 430 | 431 | # 故障排查 432 | 故障排查的第一步是先给问题分下类。这个问题是什么?Pods,Replication Controller或者Service? 433 | 更多内容参考[应用故障排查](https://kubernetes.io/zh/docs/tasks/debug-application-cluster/debug-application/)。 434 | 435 | ## Pods排查 436 | ``` 437 | # 查看pod详细信息 438 | kubectl describe pods ${POD_NAME} 439 | 440 | # 查看容器日志 441 | kubectl logs ${POD_NAME} ${CONTAINER_NAME} 442 | 443 | # 查看crashed容器日志 444 | kubectl logs --previous ${POD_NAME} ${CONTAINER_NAME} 445 | 446 | # 查看运行的容器内部日志 447 | kubectl exec ${POD_NAME} -c ${CONTAINER_NAME} -- cat /var/log/cassandra/system.log 448 | 449 | # 查看运行的容器内部日志(pod只有一个容器) 450 | # kubectl exec ${POD_NAME} -- cat /var/log/cassandra/system.log 451 | ``` 452 | 453 | ## RC排查 454 | Replication Controllers排查 455 | ``` 456 | # 监控rc相关事件 457 | kubectl describe rc ${CONTROLLER_NAME} 458 | ``` 459 | 460 | ## Services排查 461 | ``` 462 | # 查看endpoints资源,service选择到了哪些pod和端口 463 | kubectl get endpoints ${SERVICE_NAME} 464 | ``` 465 | 466 | # 小技巧 467 | ## service cidr 468 | 怎样查看一个k8s集群的service ip范围? 469 | ``` 470 | kubeadm config view | grep Subnet 471 | kubectl get pods -n kube-system kube-apiserver-master -oyaml | grep service-cluster-ip-range 472 | ``` 473 | 474 | ## pod cidr 475 | 怎样查看一个k8s集群的pod ip范围? 476 | ``` 477 | kubeadm config view | grep Subnet 478 | kubectl cluster-info dump | grep -i cidr 479 | ``` 480 | 481 | 如果上面两个方法都找不到,那么还可以通过网络组件的日志来查看,以weave为例。 482 | ``` 483 | docker ps | grep weave 484 | docker logs | grep ipalloc-range 485 | ``` 486 | 487 | # k8s工具箱 488 | [voidking/k8s-tool](https://github.com/voidking/k8s-tool) 489 | 490 | # 书签 491 | [kubectl Cheat Sheet](https://kubernetes.io/docs/reference/kubectl/cheatsheet/) 492 | 493 | [Overview of kubectl](https://kubernetes.io/docs/reference/kubectl/overview/) 494 | 495 | [Kubernetes kubectl 命令表](http://docs.kubernetes.org.cn/683.html) -------------------------------------------------------------------------------- /mock/04-cka-collection.md: -------------------------------------------------------------------------------- 1 | 2 | Questions are not from any actual exam!!! 3 | 4 | 5 | Q: Create a secret that has the following username password data: 6 | username=missawesome 7 | password=123kube321 8 | Create a pod running nginx that has access to those data items in a volume mount path at /tmp/secret-volume 9 | log into the nginx pod you created and list the items and cat the output of the data items to a file "credentials.txt" 10 | 11 | echo -n 123kube321 | base64 12 | MTIza3ViZTMyMQ== 13 | 14 | kubectl create secret generic test-secret --from-literal=username=missawesome --from-literal=password=MTIza3ViZTMyMQ== -o yaml --dry-run > test-secret.yaml 15 | 16 | 17 | 18 | apiVersion: v1 19 | data: 20 | username: missawesome 21 | password: MTIza3ViZTMyMQ== 22 | kind: Secret 23 | metadata: 24 | creationTimestamp: null 25 | name: test-secret 26 | 27 | ## create the secret, then create the pod with volume reference to it: 28 | 29 | apiVersion: v1 30 | kind: Pod 31 | metadata: 32 | creationTimestamp: null 33 | labels: 34 | run: secret-pod 35 | name: secret-pod 36 | spec: 37 | containers: 38 | - image: nginx 39 | name: secret-pod 40 | volumeMounts: 41 | - name: secret-volume 42 | mountPath: /tmp/secret-volume 43 | resources: {} 44 | volumes: 45 | - name: secret-volume 46 | secret: 47 | secretName: test-secret 48 | status: {} 49 | 50 | ### go into the pod, then list the contents of /tmp/secret-volume 51 | 52 | kubectl exec -it secret-pod /bin/bash 53 | 54 | 55 | 56 | 57 | 58 | Q: Create a job that calculates pi to 2000 decimal points using the container with the image named perl 59 | and the following commands issued to the container: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"] 60 | Once the job has completed, check the logs to and export the result to pi-result.txt. 61 | 62 | Solution: 63 | 64 | kc job pi2000 --image=perl -o yaml --dry-run > pi2000.yaml 65 | 66 | ### edit the file, edit the name, remove any ID references and include the command argument under container spec. 67 | 68 | command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"] 69 | 70 | 71 | aapiVersion: batch/v1 72 | kind: Job 73 | metadata: 74 | creationTimestamp: null 75 | name: pi2000 76 | spec: 77 | template: 78 | metadata: 79 | creationTimestamp: null 80 | spec: 81 | containers: 82 | - image: perl 83 | name: pi2000 84 | command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"] 85 | resources: {} 86 | restartPolicy: Never 87 | status: {} 88 | 89 | 90 | kc -f pi2000.yaml 91 | 92 | ### get the output from the logs and export them to text file 93 | 94 | gl pi2000-xvrpt > pi-result.txt 95 | 96 | 97 | 98 | Q. Create a yaml file called nginx-deploy.yaml for a deployment of three replicas of nginx, listening on the container's port 80. 99 | They should have the labels role=webserver and app=nginx. The deployment should be named nginx-deploy. 100 | Expose the deployment with a load balancer and use a curl statement on the IP address of the load balancer 101 | to export the output to a file titled output.txt. 102 | 103 | 104 | Solution: 105 | 106 | sudo kubectl run nginx-deploy --labels="role=webserver,app=nginx" --image=nginx --replicas=3 --port=80 -o yaml > nginx-deployment.yaml 107 | 108 | ### expose the deployment with a loadbalancer type, call it nginx-service 109 | 110 | kubectl expose deployment nginx-deploy --type=LoadBalancer --name=nginx-service 111 | 112 | ### use a curl statement that connects to the IP endpoint of the nginx-service and save the output to a file called output.txt 113 | 114 | curl IP > output.txt 115 | 116 | 117 | 118 | 119 | Q. Scale the deployment you just made down to 2 replicas 120 | 121 | Solution: 122 | 123 | sudo kubectl scale deployment nginx-deploy --replicas=2 124 | 125 | 126 | 127 | Q. Create a pod called "haz-docs" with an nginx image listening on port 80. 128 | Attach the pod to emptyDir storage, mounted to /tmp in the container. 129 | Connect to the pod and create a file with zero bytes in the /tmp directory called my-doc.txt. 130 | 131 | Solution: 132 | 133 | apiVersion: apps/v1beta1 134 | kind: Deployment 135 | metadata: 136 | creationTimestamp: null 137 | labels: 138 | run: haz-docs 139 | name: haz-docs 140 | spec: 141 | replicas: 1 142 | selector: 143 | matchLabels: 144 | run: haz-docs 145 | strategy: {} 146 | template: 147 | metadata: 148 | creationTimestamp: null 149 | labels: 150 | run: haz-docs 151 | spec: 152 | containers: 153 | - image: nginx 154 | name: haz-docs 155 | volumeMounts: 156 | - mountPath: /tmp 157 | name: tmpvolume 158 | ports: 159 | - containerPort: 80 160 | resources: {} 161 | volumes: 162 | - name: tmpvolume 163 | emptyDir: {} 164 | status: {} 165 | 166 | 167 | sudo kubectl exec -it haz-docs-5b49cb4d87-2lm5g /bin/bash 168 | 169 | root@haz-docs-5b49cb4d87-2lm5g:/# cd /tmp/ 170 | root@haz-docs-5b49cb4d87-2lm5g:/tmp# touch my-doc.txt 171 | root@haz-docs-5b49cb4d87-2lm5g:/tmp# ls 172 | my-doc.txt 173 | 174 | 175 | Q. Label the worker node of your cluster with rack=qa. 176 | 177 | Solution: 178 | 179 | sudo kubectl label node texasdave2c.mylabserver.com rack=qa 180 | 181 | 182 | Q. Create a file called counter.yaml in your home directory and paste the following yaml into it: 183 | 184 | Solution: 185 | 186 | apiVersion: v1 187 | kind: Pod 188 | metadata: 189 | name: counter 190 | spec: 191 | containers: 192 | - name: count 193 | image: busybox 194 | args: [/bin/sh, -c, 'i=0; while true; do echo "$i: $(date)"; i=$((i+1)); sleep 1; done'] 195 | 196 | Start this pod. 197 | Once its logs exceed a count of 20 (no need to be precise — any time after it has reached 20 is fine), 198 | save the logs into a file in your home directory called count.result.txt. 199 | 200 | 201 | Q. 202 | Create a new namespace called "cloud9". 203 | Create a pod running k8s.gcr.io/liveness with a liveliness probe that uses httpGet to 204 | probe an endpoint path located at /cloud-health on port 8080. 205 | The httpHeaders are name=Custom-Header and value=Awesome. 206 | The initial delay is 3 seconds and the period is 3. 207 | 208 | 209 | 210 | Q. Create a deployment with two replicas of nginx:1.7.9. 211 | The container listens on port 80. It should be named "web-dep" and be labeled 212 | with tier=frontend with an annotation of AppVersion=3.4. 213 | The containers must be running with the UID of 1000. 214 | 215 | Solution: 216 | 217 | kubectl run web-dep --labels="tier=frontend" --image=nginx --replicas=2 --port=80 -o yaml > web-dep.yaml 218 | 219 | ### edit the page to add the annotation in the metadata section: 220 | 221 | apiVersion: apps/v1beta1 222 | kind: Deployment 223 | metadata: 224 | annotations: 225 | AppVersion: "3.4" 226 | creationTimestamp: 2019-03-02T18:17:19Z 227 | generation: 1 228 | labels: 229 | tier: frontend 230 | 231 | ### output the description of the deployment to the file web-dep-description.txt 232 | 233 | sudo kubectl describe deploy/web-dep > web-dep-description.txt 234 | 235 | 236 | Q. Upgrade the image in use by the web-dep deployment to nginx:1.9. 237 | 238 | Solution: 239 | 240 | kubectl --record deployment/web-dep set image deployment/web-dep nginx=nginx:1.9.1 241 | 242 | 243 | Q. Roll the image in use by the web-dep deployment to the previous version. 244 | Do not set the version number of the image explicitly for this command. 245 | 246 | kubectl rollout history deployment/web-dep 247 | 248 | kubectl rollout undo deployment/web-dep 249 | 250 | 251 | 252 | Q. Expose the web-dep deployment as a service using a NodePort. 253 | 254 | Solution: 255 | 256 | kubectl expose deployment/web-dep --type=NodePort 257 | 258 | 259 | Q. Configure a DaemonSet to run the image k8s.gcr.io/pause:2.0 in the cluster. 260 | 261 | Solution: 262 | 263 | kubectl run testds --image=k8s.gcr.io/pause:2.0 -o yaml > testds.yaml 264 | 265 | then edited it as Daemonset to get it running, you don't do replicas in a daemonset, it runs on all nodes 266 | 267 | 268 | 269 | Q. Configure the cluster to use 8.8.8.8 and 8.8.4.4 as upstream DNS servers. 270 | 271 | Solution: 272 | 273 | The answer can be found at: https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/ 274 | 275 | apiVersion: v1 276 | kind: ConfigMap 277 | metadata: 278 | name: kube-dns 279 | namespace: kube-system 280 | data: 281 | stubDomains: | 282 | {"acme.local": ["1.2.3.4"]} 283 | upstreamNameservers: | 284 | ["8.8.8.8", "8.8.4.4"] 285 | 286 | 287 | 288 | Q. An app inside a container needs the IP address of the web-dep endpoint to be passed to it as an 289 | environment variable called "ULTIMA". Save the yaml as env-ultima.yaml 290 | 291 | Solution: 292 | 293 | ### get the IP address of the web-dep service 294 | 295 | sudo kubectl get svc 296 | 297 | 298 | apiVersion: apps/v1beta2 299 | kind: Deployment 300 | metadata: 301 | name: ultima-dep 302 | namespace: default 303 | spec: 304 | selector: 305 | matchLabels: 306 | app: ultima-app 307 | template: 308 | metadata: 309 | labels: 310 | app: ultima-app 311 | spec: 312 | containers: 313 | - name: pause-pod 314 | image: k8s.gcr.io/pause:2.0 315 | env: 316 | - name: ULTIMA 317 | value: 55.55.58.23 318 | 319 | kc -f env-ultima.yaml 320 | 321 | Q. Figure out a way to create a pod with 3 replicas using the the nginx container that can have pods deployed 322 | on a worker node and the master node if needed. 323 | 324 | Solution: 325 | 326 | sudo kubectl get nodes 327 | 328 | sudo kubectl describe node MASTERNODE 329 | 330 | ### notice the taint on the master node: 331 | 332 | Taints: node-role.kubernetes.io/master:NoSchedule 333 | 334 | ### add the toleration to the yaml file 335 | 336 | apiVersion: apps/v1 337 | kind: Deployment 338 | metadata: 339 | name: nginx 340 | namespace: default 341 | spec: 342 | replicas: 3 343 | selector: 344 | matchLabels: 345 | app: nginx 346 | template: 347 | metadata: 348 | labels: 349 | app: nginx 350 | spec: 351 | containers: 352 | - name: nginx 353 | image: nginx 354 | tolerations: 355 | - key: "node-role.kubernetes.io/master" 356 | operator: "Equal" 357 | effect: "NoSchedule" 358 | 359 | 360 | 361 | Q. Copy all Kubernetes scheduler logs into a logs directory in your home directory. 362 | 363 | Solution: 364 | 365 | gp --namespace=kube-system 366 | 367 | gl --namespace=kube-system kube-scheduler-ubuntu.local > test-log.txt 368 | 369 | 370 | 371 | Q. Run the pod below until the counter in exceeds 30, export the log file into a file called counter-log.txt. 372 | 373 | Solution: 374 | 375 | apiVersion: v1 376 | kind: Pod 377 | metadata: 378 | name: counter 379 | spec: 380 | containers: 381 | - name: count 382 | image: busybox 383 | args: [/bin/sh, -c, 'i=0; while true; do echo "$i: $(date)"; echo "$(date) - File - $i" >> /var/www/countlog; i=$((i+1)); sleep 3; done'] 384 | 385 | 386 | gl POD > counter-log.txt 387 | 388 | 389 | 390 | Q. Create a yaml file called db-secret.yaml for a secret called db-user-pass. 391 | The secret should have two fields: a username and password. 392 | The username should be "superadmin" and the password should be "imamazing". 393 | 394 | Solution: 395 | 396 | echo -n 'superadmin' > ./username.txt 397 | echo -n 'imamazing' > ./password.txt 398 | 399 | kc secret generic db-user-pass --from-file=./username.txt --from-file=./password.txt -o yaml > db-secret.yaml 400 | 401 | apiVersion: v1 402 | data: 403 | password.txt: aWhlYXJ0a2l0dGVucw== 404 | username.txt: YWRtaW4= 405 | kind: Secret 406 | metadata: 407 | creationTimestamp: 2019-03-03T00:21:16Z 408 | name: db-user-pass 409 | namespace: default 410 | resourceVersion: "30182" 411 | selfLink: /api/v1/namespaces/default/secrets/db-user-pass 412 | uid: 42b979da-3d4a-11e9-8f41-06f514f6b3f0 413 | type: Opaque 414 | 415 | 416 | Q. 417 | Create a ConfigMap called web-config that contains the following two entries: 418 | 'web_port' set to 'localhost:8080' 419 | 'external_url' set to 'reddit.com' 420 | Run a pod called web-config-pod running nginx, expose the configmap settings as environment variables inside the nginx container. 421 | 422 | ### this has to be done in several steps, first create configmap from literals on command line 423 | 424 | sudo kubectl create configmap test-cm --from-literal=web_port='localhost:8080' --from-literal=external_url='reddit.com' 425 | 426 | ### double check the configmap 427 | 428 | sudo kubectl describe cm test-cm 429 | 430 | Name: test-cm 431 | Namespace: default 432 | Labels: 433 | Annotations: 434 | 435 | Data 436 | ==== 437 | external_url: 438 | ---- 439 | reddit.com 440 | web_port: 441 | ---- 442 | localhost:8080 443 | Events: 444 | 445 | 446 | ### create the pod deployment yaml and then edit the file 447 | 448 | sudo kubectl run web-config-pod --image=nginx -o yaml > web-config-pod.yaml 449 | 450 | ## edit the file: 451 | 452 | spec: 453 | containers: 454 | - image: nginx 455 | env: 456 | - name: WEB_PORT 457 | valueFrom: 458 | configMapKeyRef: 459 | name: test-cm 460 | key: web_port 461 | - name: EXTERNAL_URL 462 | valueFrom: 463 | configMapKeyRef: 464 | name: test-cm 465 | key: external_url 466 | imagePullPolicy: Always 467 | name: web-config-pod 468 | resources: {} 469 | terminationMessagePath: /dev/termination-log 470 | terminationMessagePolicy: File 471 | 472 | 473 | ## delete the UID references in the file 474 | ## delete and then reinstall deployment 475 | vi web-config-pod.yaml 476 | sudo kubectl delete deploy/web-config-pod 477 | sudo kubectl create -f web-config-pod.yaml 478 | 479 | ## get env vars from nginx pod: 480 | 481 | sudo kubectl exec nginx-deploy-868c8d4b79-czc2j env 482 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin 483 | HOSTNAME=nginx-deploy-868c8d4b79-czc2j 484 | EXTERNAL_URL=reddit.com 485 | WEB_PORT=localhost:8080 486 | KUBERNETES_PORT_443_TCP_PROTO=tcp 487 | KUBERNETES_PORT_443_TCP_PORT=443 488 | KUBERNETES_PORT_443_TCP_ADDR=10.96.0.1 489 | KUBERNETES_SERVICE_HOST=10.96.0.1 490 | KUBERNETES_SERVICE_PORT=443 491 | KUBERNETES_SERVICE_PORT_HTTPS=443 492 | KUBERNETES_PORT=tcp://10.96.0.1:443 493 | KUBERNETES_PORT_443_TCP=tcp://10.96.0.1:443 494 | NGINX_VERSION=1.15.9-1~stretch 495 | NJS_VERSION=1.15.9.0.2.8-1~stretch 496 | HOME=/root 497 | 498 | 499 | 500 | Q. Create a namespace called awsdb in your cluster. 501 | Create a pod called db-deploy that has one container running mysql image, and one container running nginx:1.7.9 502 | In the same namespace create a pod called nginx-deploy with a single container running the image nginx:1.9.1. 503 | Export the output of kubectl get pods for the awsdb namespace into a file called "pod-list.txt" 504 | 505 | ## mysql requires a pv and pvc with the yaml to create them found here: 506 | https://kubernetes.io/docs/tasks/run-application/run-single-instance-stateful-application/#deploy-mysql 507 | 508 | ## create the deployment yaml 509 | ## make sure the workers have the correct file host paths /mnt/data 510 | 511 | 512 | 513 | Q. 514 | Create a pod running k8s.gcr.io/liveness with the following arguments: 515 | 516 | args: 517 | - /bin/sh 518 | - -c 519 | - touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600 520 | 521 | and the following commands: 522 | 523 | command: 524 | - cat 525 | - /tmp/healthy 526 | 527 | with an initial delay of 5 seconds and a probe period of 5 seconds 528 | 529 | Output the events of the description showing that the container started and then the health check failed. 530 | 531 | 532 | Q. This requires having a cluster with 2 worker nodes 533 | Safely remove one node from the cluster. Print the output of the node status into a file "worker-removed.txt". 534 | Reboot the worker node. 535 | Print the output of node status showing worker unable to be scheduled to "rebooted-worker.txt" 536 | Now bring the node back into the cluster and schedule several nginx pods to it, print the get pods wide output showing at least 537 | one pod is on the node you rebooted. 538 | 539 | 540 | 541 | Q. Create a deployment running nginx, mount a volume called "hostvolume" with a container volume mount at /tmp 542 | and mounted to the host at /data. If the directory isn't there make sure it is created in the pod spec at run time. 543 | Go into the container and create an empty file called "my-doc.txt" inside the /tmp directory. On the worker node 544 | that it was scheduled to, go into the /data directory and output a list of the contents to list-output.txt showing 545 | the file exists. 546 | 547 | apiVersion: apps/v1beta1 548 | kind: Deployment 549 | metadata: 550 | creationTimestamp: null 551 | labels: 552 | run: haz-docs2 553 | name: haz-docs2 554 | spec: 555 | replicas: 1 556 | selector: 557 | matchLabels: 558 | run: haz-docs2 559 | strategy: {} 560 | template: 561 | metadata: 562 | creationTimestamp: null 563 | labels: 564 | run: haz-docs2 565 | spec: 566 | containers: 567 | - image: nginx 568 | name: haz-docs2 569 | volumeMounts: 570 | - mountPath: /tmp 571 | name: hostvolume 572 | ports: 573 | - containerPort: 80 574 | resources: {} 575 | volumes: 576 | - name: hostvolume 577 | hostPath: 578 | path: /data 579 | type: DirectoryOrCreate 580 | status: {} 581 | 582 | 583 | 584 | ### interesting way to quickly spin up busy box pod and run shell temporarily 585 | kubectl run -i --tty --image busybox dns-test --restart=Never --rm /bin/sh 586 | 587 | ### useful tool sorting output from get objects: 588 | 589 | 590 | ### to get a list of any of the jsonpath objects, just do a kubectl get pods -o json and it will 591 | list them like this, these are the json path items you can get under status: 592 | 593 | 594 | } 595 | } 596 | ], 597 | "hostIP": "192.168.0.3", 598 | "phase": "Running", 599 | "podIP": "10.244.2.33", 600 | "qosClass": "BestEffort", 601 | "startTime": "2019-03-09T22:52:09Z" 602 | } 603 | 604 | 605 | use status.phase as example: kubectl get pods --sort-by=.status.phase 606 | 607 | 608 | kubectl get pvc --sort-by=.spec.resources.requests.storage 609 | 610 | kubectl get namespaces --sort-by=.metadata.name 611 | 612 | https://elastisys.com/2018/12/10/backup-kubernetes-how-and-why/ 613 | 614 | --------------------------------------------------------------------------------