├── pipeline-sample ├── test.sh ├── test.py ├── Dockerfile ├── app.py └── README.md ├── book-img.png ├── chapters ├── 10 │ ├── my-pvc-sc.yaml │ ├── hostpath-pv.yaml │ ├── aws-ebs.yaml │ ├── nfs-sc.yaml │ ├── use-pvc.yaml │ ├── my-nfs.yaml │ ├── use-pvc-sc.yaml │ ├── use-nfs-sc.yaml │ └── README.md ├── 11 │ ├── no-tolerate.yaml │ ├── heavy-load.yaml │ ├── tolerate.yaml │ ├── hpa.yaml │ ├── badsector.yaml │ ├── node-affinity.yaml │ ├── pod-affinity.yaml │ ├── heavy-cal.yaml │ ├── pod-antiaffinity.yaml │ ├── redis-cache.yaml │ ├── web-server.yaml │ └── README.md ├── 12 │ ├── nginx-pdb.yaml │ ├── res-quota.yaml │ ├── pod-exceed.yaml │ ├── limit-range.yaml │ └── README.md ├── 13 │ ├── deny-all.yaml │ ├── nginx-sa.yaml │ ├── dont-leave-dev.yaml │ ├── role.yaml │ ├── read-pods.yaml │ ├── block-metadata.yaml │ ├── allow-from-web.yaml │ ├── web-open.yaml │ ├── allow-dmz.yaml │ └── db-accessable.yaml ├── 14 │ └── README.md ├── 15 │ ├── Dockerfile │ ├── pod.yaml │ ├── app.py │ ├── argocd-ingress.yaml │ ├── Jenkinsfile │ └── README.md ├── 16 │ ├── myHelmRelease.yaml │ ├── mypod-crd.yaml │ ├── jenkins.yaml │ ├── minio-instance.yaml │ └── README.md ├── 17 │ ├── single-job.yaml │ ├── param.yaml │ ├── error-handlers.yaml │ ├── serial-step.yaml │ ├── parallel-steps.yaml │ ├── dag-diamond.yaml │ └── README.md ├── 05 │ ├── game.properties │ ├── user-info.properties │ ├── user-info-stringdata.yaml │ ├── env.yaml │ ├── user-info.yaml │ ├── monster-config.yaml │ ├── mynginx.yaml │ ├── node-selector.yaml │ ├── liveness.yaml │ ├── readiness.yaml │ ├── requests.yaml │ ├── cmd.yaml │ ├── readiness-cmd.yaml │ ├── second.yaml │ ├── volume-empty.yaml │ ├── resources.yaml │ ├── secret-envfrom.yaml │ ├── monster-env.yaml │ ├── limits.yaml │ ├── volume.yaml │ ├── special-env.yaml │ ├── game-volume.yaml │ ├── secret-volume.yaml │ ├── secret-env.yaml │ ├── downward-volume.yaml │ ├── init-container.yaml │ └── downward-env.yaml ├── 04 │ ├── mynginx.yaml │ └── README.md ├── 01 │ ├── hello.py │ ├── Dockerfile │ └── README.md ├── 06 │ ├── external.yaml │ ├── myservice.yaml │ ├── cluster-ip.yaml │ ├── load-bal.yaml │ ├── node-port.yaml │ └── README.md ├── 07 │ ├── Dockerfile │ ├── job.yaml │ ├── job-bug.yaml │ ├── myreplicaset.yaml │ ├── cronjob.yaml │ ├── mydeploy.yaml │ ├── fluentd.yaml │ ├── mysts.yaml │ └── train.py ├── 09 │ ├── http-issuer.yaml │ ├── mynginx-ingress.yaml │ ├── apache-tls.yaml │ ├── apache-auth.yaml │ ├── apache-tls-issuer.yaml │ ├── domain-based-ingress.yaml │ ├── path-based-ingress.yaml │ └── README.md ├── 02 │ └── README.md ├── 03 │ └── README.md └── 08 │ └── README.md ├── gitops ├── service.yaml ├── deployment.yaml └── README.md └── README.md /pipeline-sample/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | py.test /work/test.py 3 | -------------------------------------------------------------------------------- /book-img.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bjpublic/core_kubernetes/HEAD/book-img.png -------------------------------------------------------------------------------- /chapters/05/game.properties: -------------------------------------------------------------------------------- 1 | # game.properties 2 | weapon=gun 3 | health=3 4 | potion=5 -------------------------------------------------------------------------------- /chapters/05/user-info.properties: -------------------------------------------------------------------------------- 1 | # user-info.properties 2 | username=admin 3 | password=password123 -------------------------------------------------------------------------------- /chapters/15/Dockerfile: -------------------------------------------------------------------------------- 1 | # Dockerfile 2 | FROM python:3.7 3 | 4 | RUN pip install flask 5 | ADD app.py . 6 | 7 | ENTRYPOINT ["python", "app.py"] -------------------------------------------------------------------------------- /chapters/04/mynginx.yaml: -------------------------------------------------------------------------------- 1 | # mynginx.yaml 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: mynginx 6 | spec: 7 | containers: 8 | - name: mynginx 9 | image: nginx -------------------------------------------------------------------------------- /chapters/01/hello.py: -------------------------------------------------------------------------------- 1 | # hello.py 2 | import os 3 | import sys 4 | 5 | my_ver = os.environ["my_ver"] 6 | arg = sys.argv[1] 7 | 8 | print("hello %s, my version is %s!" % (arg, my_ver)) 9 | -------------------------------------------------------------------------------- /chapters/11/no-tolerate.yaml: -------------------------------------------------------------------------------- 1 | # no-tolerate.yaml 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: no-tolerate 6 | spec: 7 | containers: 8 | - name: nginx 9 | image: nginx -------------------------------------------------------------------------------- /chapters/06/external.yaml: -------------------------------------------------------------------------------- 1 | # external.yaml 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: google-svc # 별칭 6 | spec: 7 | type: ExternalName 8 | externalName: google.com # 외부 DNS -------------------------------------------------------------------------------- /chapters/07/Dockerfile: -------------------------------------------------------------------------------- 1 | # Dockerfile 2 | FROM python:3.6.8-stretch 3 | 4 | RUN pip install tensorflow==1.5 keras==2.0.8 h5py==2.7.1 5 | 6 | COPY train.py . 7 | 8 | ENTRYPOINT ["python", "train.py"] -------------------------------------------------------------------------------- /chapters/13/deny-all.yaml: -------------------------------------------------------------------------------- 1 | # deny-all.yaml 2 | kind: NetworkPolicy 3 | apiVersion: networking.k8s.io/v1 4 | metadata: 5 | name: deny-all 6 | namespace: default 7 | spec: 8 | podSelector: {} 9 | ingress: [] -------------------------------------------------------------------------------- /chapters/15/pod.yaml: -------------------------------------------------------------------------------- 1 | # pod.yaml 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: skaffold-flask 6 | spec: 7 | containers: 8 | - image: /flask # 각 사용자 docker hub 계정을 입력합니다. 9 | name: flask -------------------------------------------------------------------------------- /gitops/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: mynginx 5 | spec: 6 | ports: 7 | - port: 80 8 | protocol: TCP 9 | targetPort: 80 10 | selector: 11 | run: mynginx -------------------------------------------------------------------------------- /chapters/15/app.py: -------------------------------------------------------------------------------- 1 | # app.py 2 | from flask import Flask 3 | app = Flask(__name__) 4 | 5 | 6 | @app.route('/') 7 | def hello(): 8 | return "Hello World!" 9 | 10 | if __name__ == '__main__': 11 | app.run() -------------------------------------------------------------------------------- /chapters/05/user-info-stringdata.yaml: -------------------------------------------------------------------------------- 1 | # user-info-stringdata.yaml 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: user-info-stringdata 6 | type: Opaque 7 | stringData: 8 | username: admin 9 | password: password123 -------------------------------------------------------------------------------- /pipeline-sample/test.py: -------------------------------------------------------------------------------- 1 | import app 2 | 3 | 4 | def test_answer(): 5 | assert app.func(3) == 4 6 | 7 | 8 | def test_hello(): 9 | ret = app.func(2) 10 | assert app.hello() == "Hello World!: " + str(ret) 11 | -------------------------------------------------------------------------------- /chapters/05/env.yaml: -------------------------------------------------------------------------------- 1 | # env.yaml 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: env 6 | spec: 7 | containers: 8 | - name: nginx 9 | image: nginx 10 | env: 11 | - name: hello 12 | value: "world!" -------------------------------------------------------------------------------- /chapters/05/user-info.yaml: -------------------------------------------------------------------------------- 1 | # user-info.yaml 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: user-info 6 | type: Opaque 7 | data: 8 | username: YWRtaW4= # admin 9 | password: cGFzc3dvcmQxMjM= # password123 -------------------------------------------------------------------------------- /chapters/12/nginx-pdb.yaml: -------------------------------------------------------------------------------- 1 | # nginx-pdb.yaml 2 | apiVersion: policy/v1beta1 3 | kind: PodDisruptionBudget 4 | metadata: 5 | name: nginx-pdb 6 | spec: 7 | minAvailable: 9 8 | selector: 9 | matchLabels: 10 | app: nginx -------------------------------------------------------------------------------- /chapters/05/monster-config.yaml: -------------------------------------------------------------------------------- 1 | # monster-config.yaml 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: monster-config 6 | namespace: default 7 | data: 8 | monsterType: fire 9 | monsterNum: "5" 10 | monsterLife: "3" -------------------------------------------------------------------------------- /chapters/05/mynginx.yaml: -------------------------------------------------------------------------------- 1 | # mynginx.yaml 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | labels: 6 | run: mynginx 7 | name: mynginx 8 | spec: 9 | containers: 10 | - image: nginx 11 | name: mynginx 12 | restartPolicy: Never -------------------------------------------------------------------------------- /chapters/13/nginx-sa.yaml: -------------------------------------------------------------------------------- 1 | # nginx-sa.yaml 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: nginx-sa 6 | spec: 7 | containers: 8 | - image: nginx 9 | name: nginx 10 | # mysa ServiceAccount 사용 11 | serviceAccountName: mysa -------------------------------------------------------------------------------- /pipeline-sample/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.7 2 | 3 | RUN pip install flask pytest 4 | 5 | WORKDIR /work 6 | COPY test.sh /test 7 | COPY test.py test.py 8 | COPY app.py app.py 9 | 10 | RUN chmod +x /test 11 | 12 | CMD ["python", "app.py"] 13 | -------------------------------------------------------------------------------- /chapters/05/node-selector.yaml: -------------------------------------------------------------------------------- 1 | # node-selector.yaml 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: node-selector 6 | spec: 7 | containers: 8 | - name: nginx 9 | image: nginx 10 | # 특정 노드 라벨 선택 11 | nodeSelector: 12 | disktype: ssd -------------------------------------------------------------------------------- /chapters/12/res-quota.yaml: -------------------------------------------------------------------------------- 1 | # res-quota.yaml 2 | apiVersion: v1 3 | kind: ResourceQuota 4 | metadata: 5 | name: res-quota 6 | spec: 7 | hard: 8 | limits.cpu: 700m 9 | limits.memory: 800Mi 10 | requests.cpu: 500m 11 | requests.memory: 700Mi -------------------------------------------------------------------------------- /chapters/13/dont-leave-dev.yaml: -------------------------------------------------------------------------------- 1 | # dont-leave-dev.yaml 2 | kind: NetworkPolicy 3 | apiVersion: networking.k8s.io/v1 4 | metadata: 5 | name: dont-leave-dev 6 | namespace: dev 7 | spec: 8 | podSelector: {} 9 | egress: 10 | - to: 11 | - podSelector: {} -------------------------------------------------------------------------------- /chapters/01/Dockerfile: -------------------------------------------------------------------------------- 1 | # Dockerfile 2 | FROM ubuntu:20.04 3 | 4 | RUN apt-get update \ 5 | && apt-get install -y \ 6 | curl \ 7 | python-dev 8 | 9 | WORKDIR /root 10 | COPY hello.py . 11 | ENV my_ver 1.0 12 | 13 | CMD ["python", "hello.py", "guest"] -------------------------------------------------------------------------------- /chapters/05/liveness.yaml: -------------------------------------------------------------------------------- 1 | # liveness.yaml 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: liveness 6 | spec: 7 | containers: 8 | - name: nginx 9 | image: nginx 10 | livenessProbe: 11 | httpGet: 12 | path: /live 13 | port: 80 -------------------------------------------------------------------------------- /chapters/05/readiness.yaml: -------------------------------------------------------------------------------- 1 | # readiness.yaml 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: readiness 6 | spec: 7 | containers: 8 | - name: nginx 9 | image: nginx 10 | readinessProbe: 11 | httpGet: 12 | path: /ready 13 | port: 80 -------------------------------------------------------------------------------- /chapters/05/requests.yaml: -------------------------------------------------------------------------------- 1 | # requests.yaml 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: requests 6 | spec: 7 | containers: 8 | - name: nginx 9 | image: nginx 10 | resources: 11 | requests: 12 | cpu: "250m" 13 | memory: "500Mi" -------------------------------------------------------------------------------- /chapters/06/myservice.yaml: -------------------------------------------------------------------------------- 1 | # myservice.yaml 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | labels: 6 | hello: world 7 | name: myservice 8 | spec: 9 | ports: 10 | - port: 8080 11 | protocol: TCP 12 | targetPort: 80 13 | selector: 14 | run: mynginx -------------------------------------------------------------------------------- /chapters/10/my-pvc-sc.yaml: -------------------------------------------------------------------------------- 1 | # my-pvc-sc.yaml 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: my-pvc-sc 6 | spec: 7 | storageClassName: local-path 8 | accessModes: 9 | - ReadWriteOnce 10 | resources: 11 | requests: 12 | storage: 1Gi -------------------------------------------------------------------------------- /chapters/05/cmd.yaml: -------------------------------------------------------------------------------- 1 | # cmd.yaml 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: cmd 6 | spec: 7 | restartPolicy: OnFailure 8 | containers: 9 | - name: nginx 10 | image: nginx 11 | # 실행명령 12 | command: ["/bin/echo"] 13 | # 파라미터 14 | args: ["hello"] -------------------------------------------------------------------------------- /chapters/10/hostpath-pv.yaml: -------------------------------------------------------------------------------- 1 | # hostpath-pv.yaml 2 | apiVersion: v1 3 | kind: PersistentVolume 4 | metadata: 5 | name: my-volume 6 | spec: 7 | storageClassName: manual 8 | capacity: 9 | storage: 1Gi 10 | accessModes: 11 | - ReadWriteOnce 12 | hostPath: 13 | path: /tmp -------------------------------------------------------------------------------- /chapters/10/aws-ebs.yaml: -------------------------------------------------------------------------------- 1 | # aws-ebs.yaml 2 | apiVersion: v1 3 | kind: PersistentVolume 4 | metadata: 5 | name: aws-ebs 6 | spec: 7 | capacity: 8 | storage: 1Gi 9 | accessModes: 10 | - ReadWriteOnce 11 | awsElasticBlockStore: 12 | volumeID: 13 | fsType: ext4 -------------------------------------------------------------------------------- /chapters/11/heavy-load.yaml: -------------------------------------------------------------------------------- 1 | # heavy-load.yaml 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: heavy-load 6 | spec: 7 | containers: 8 | - name: busybox 9 | image: busybox 10 | command: ["/bin/sh"] 11 | args: ["-c", "while true; do wget -q -O- http://heavy-cal; done"] -------------------------------------------------------------------------------- /chapters/11/tolerate.yaml: -------------------------------------------------------------------------------- 1 | # tolerate.yaml 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: tolerate 6 | spec: 7 | containers: 8 | - name: nginx 9 | image: nginx 10 | tolerations: 11 | - key: "project" 12 | value: "A" 13 | operator: "Equal" 14 | effect: "NoSchedule" -------------------------------------------------------------------------------- /chapters/05/readiness-cmd.yaml: -------------------------------------------------------------------------------- 1 | # readiness-cmd.yaml 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: readiness-cmd 6 | spec: 7 | containers: 8 | - name: nginx 9 | image: nginx 10 | readinessProbe: 11 | exec: 12 | command: 13 | - cat 14 | - /tmp/ready -------------------------------------------------------------------------------- /chapters/13/role.yaml: -------------------------------------------------------------------------------- 1 | # role.yaml 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: pod-viewer 6 | namespace: default 7 | rules: 8 | - apiGroups: [""] # ""은 core API group을 나타냅니다. 9 | resources: 10 | - pods 11 | verbs: 12 | - get 13 | - watch 14 | - list -------------------------------------------------------------------------------- /chapters/07/job.yaml: -------------------------------------------------------------------------------- 1 | # job.yaml 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | name: myjob 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - name: ml 11 | image: $USERNAME/train 12 | args: ['3', 'softmax', '0.5'] 13 | restartPolicy: Never 14 | backoffLimit: 2 -------------------------------------------------------------------------------- /chapters/05/second.yaml: -------------------------------------------------------------------------------- 1 | # second.yaml 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: second 6 | spec: 7 | containers: 8 | - name: nginx 9 | image: nginx 10 | - name: curl 11 | image: curlimages/curl 12 | command: ["/bin/sh"] 13 | args: ["-c", "while true; do sleep 5; curl -s localhost; done"] -------------------------------------------------------------------------------- /chapters/11/hpa.yaml: -------------------------------------------------------------------------------- 1 | # hpa.yaml 2 | apiVersion: autoscaling/v1 3 | kind: HorizontalPodAutoscaler 4 | metadata: 5 | name: heavy-cal 6 | spec: 7 | maxReplicas: 50 8 | minReplicas: 1 9 | scaleTargetRef: 10 | apiVersion: apps/v1 11 | kind: Deployment 12 | name: heavy-cal 13 | targetCPUUtilizationPercentage: 50 -------------------------------------------------------------------------------- /chapters/13/read-pods.yaml: -------------------------------------------------------------------------------- 1 | # read-pods.yaml 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: RoleBinding 4 | metadata: 5 | name: read-pods 6 | namespace: default 7 | subjects: 8 | - kind: ServiceAccount 9 | name: mysa 10 | roleRef: 11 | kind: Role 12 | name: pod-viewer 13 | apiGroup: rbac.authorization.k8s.io -------------------------------------------------------------------------------- /chapters/10/nfs-sc.yaml: -------------------------------------------------------------------------------- 1 | # nfs-sc.yaml 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: nfs-sc 6 | spec: 7 | # 기존 local-path에서 nfs로 변경 8 | storageClassName: nfs 9 | # accessModes를 ReadWriteMany로 변경 10 | accessModes: 11 | - ReadWriteMany 12 | resources: 13 | requests: 14 | storage: 1Gi -------------------------------------------------------------------------------- /chapters/05/volume-empty.yaml: -------------------------------------------------------------------------------- 1 | # volume-empty.yaml 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: volume-empty 6 | spec: 7 | containers: 8 | - name: nginx 9 | image: nginx 10 | volumeMounts: 11 | - mountPath: /container-volume 12 | name: my-volume 13 | volumes: 14 | - name: my-volume 15 | emptyDir: {} -------------------------------------------------------------------------------- /chapters/13/block-metadata.yaml: -------------------------------------------------------------------------------- 1 | # block-metadata.yaml 2 | kind: NetworkPolicy 3 | apiVersion: networking.k8s.io/v1 4 | metadata: 5 | name: block-metadata 6 | namespace: default 7 | spec: 8 | podSelector: {} 9 | egress: 10 | - to: 11 | - ipBlock: 12 | cidr: 0.0.0.0/0 13 | except: 14 | - 169.254.169.254/32 -------------------------------------------------------------------------------- /chapters/05/resources.yaml: -------------------------------------------------------------------------------- 1 | # resources.yaml 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: resources 6 | spec: 7 | containers: 8 | - name: nginx 9 | image: nginx 10 | resources: 11 | requests: 12 | cpu: "250m" 13 | memory: "500Mi" 14 | limits: 15 | cpu: "500m" 16 | memory: "1Gi" -------------------------------------------------------------------------------- /chapters/05/secret-envfrom.yaml: -------------------------------------------------------------------------------- 1 | # secret-envfrom.yaml 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: secret-envfrom 6 | spec: 7 | restartPolicy: OnFailure 8 | containers: 9 | - name: secret-envfrom 10 | image: k8s.gcr.io/busybox 11 | command: [ "printenv" ] 12 | envFrom: 13 | - secretRef: 14 | name: user-info -------------------------------------------------------------------------------- /chapters/10/use-pvc.yaml: -------------------------------------------------------------------------------- 1 | # use-pvc.yaml 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: use-pvc 6 | spec: 7 | containers: 8 | - name: nginx 9 | image: nginx 10 | volumeMounts: 11 | - mountPath: /test-volume 12 | name: vol 13 | volumes: 14 | - name: vol 15 | persistentVolumeClaim: 16 | claimName: my-pvc -------------------------------------------------------------------------------- /chapters/12/pod-exceed.yaml: -------------------------------------------------------------------------------- 1 | # pod-exceed.yaml 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: pod-exceed 6 | spec: 7 | containers: 8 | - image: nginx 9 | name: nginx 10 | resources: 11 | limits: 12 | cpu: "700m" 13 | memory: "700Mi" 14 | requests: 15 | cpu: "300m" 16 | memory: "256Mi" -------------------------------------------------------------------------------- /chapters/13/allow-from-web.yaml: -------------------------------------------------------------------------------- 1 | # allow-from-web.yaml 2 | kind: NetworkPolicy 3 | apiVersion: networking.k8s.io/v1 4 | metadata: 5 | name: allow-from-web 6 | namespace: default 7 | spec: 8 | podSelector: 9 | matchLabels: 10 | run: app 11 | ingress: 12 | - from: 13 | - podSelector: 14 | matchLabels: 15 | run: web -------------------------------------------------------------------------------- /chapters/13/web-open.yaml: -------------------------------------------------------------------------------- 1 | # web-open.yaml 2 | kind: NetworkPolicy 3 | apiVersion: networking.k8s.io/v1 4 | metadata: 5 | name: web-open 6 | namespace: default 7 | spec: 8 | podSelector: 9 | matchLabels: 10 | run: web 11 | ingress: 12 | - from: 13 | - podSelector: {} 14 | ports: 15 | - protocol: TCP 16 | port: 80 -------------------------------------------------------------------------------- /chapters/11/badsector.yaml: -------------------------------------------------------------------------------- 1 | # badsector.yaml 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: badsector 6 | spec: 7 | containers: 8 | - name: nginx 9 | image: nginx 10 | tolerations: 11 | - key: "project" 12 | value: "A" 13 | operator: "Equal" 14 | effect: "NoSchedule" 15 | - key: "badsector" 16 | operator: "Exists" -------------------------------------------------------------------------------- /pipeline-sample/app.py: -------------------------------------------------------------------------------- 1 | from flask import Flask 2 | app = Flask(__name__) 3 | 4 | 5 | def func(x): 6 | return x + 1 7 | 8 | 9 | def func(x): 10 | return x + 1 11 | 12 | 13 | @app.route('/') 14 | def hello(): 15 | ret = func(2) 16 | return "Hello World!: " + str(ret) 17 | 18 | 19 | if __name__ == '__main__': 20 | app.run() 21 | -------------------------------------------------------------------------------- /chapters/10/my-nfs.yaml: -------------------------------------------------------------------------------- 1 | # my-nfs.yaml 2 | apiVersion: v1 3 | kind: PersistentVolume 4 | metadata: 5 | name: my-nfs 6 | spec: 7 | storageClassName: nfs 8 | capacity: 9 | storage: 5Gi 10 | accessModes: 11 | - ReadWriteMany 12 | mountOptions: 13 | - hard 14 | - nfsvers=4.1 15 | nfs: 16 | path: /tmp 17 | server: -------------------------------------------------------------------------------- /chapters/10/use-pvc-sc.yaml: -------------------------------------------------------------------------------- 1 | # use-pvc-sc.yaml 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: use-pvc-sc 6 | spec: 7 | volumes: 8 | - name: vol 9 | persistentVolumeClaim: 10 | claimName: my-pvc-sc 11 | containers: 12 | - name: nginx 13 | image: nginx 14 | volumeMounts: 15 | - mountPath: "/usr/share/nginx/html" 16 | name: vol -------------------------------------------------------------------------------- /chapters/16/myHelmRelease.yaml: -------------------------------------------------------------------------------- 1 | # myHelmRelease.yaml 2 | apiVersion: helm.fluxcd.io/v1 3 | kind: HelmRelease 4 | metadata: 5 | name: rabbit 6 | namespace: default 7 | spec: 8 | releaseName: rabbitmq 9 | chart: 10 | repository: https://kubernetes-charts.storage.googleapis.com/ 11 | name: rabbitmq 12 | version: 3.3.6 13 | values: 14 | replicas: 1 -------------------------------------------------------------------------------- /chapters/05/monster-env.yaml: -------------------------------------------------------------------------------- 1 | # monster-env.yaml 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: monster-env 6 | spec: 7 | restartPolicy: OnFailure 8 | containers: 9 | - name: monster-env 10 | image: k8s.gcr.io/busybox 11 | command: [ "printenv" ] 12 | # env 대신에 envFrom 사용 13 | envFrom: 14 | - configMapRef: 15 | name: monster-config -------------------------------------------------------------------------------- /chapters/07/job-bug.yaml: -------------------------------------------------------------------------------- 1 | # job-bug.yaml 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | name: myjob-bug 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - name: ml 11 | image: $USERNAME/train 12 | # int 타입이 아닌 string 타입 전달 13 | args: ['bug-string', 'softmax', '0.5'] 14 | restartPolicy: Never 15 | backoffLimit: 2 -------------------------------------------------------------------------------- /chapters/07/myreplicaset.yaml: -------------------------------------------------------------------------------- 1 | # myreplicaset.yaml 2 | apiVersion: apps/v1 3 | kind: ReplicaSet 4 | metadata: 5 | name: myreplicaset 6 | spec: 7 | replicas: 2 8 | selector: 9 | matchLabels: 10 | run: nginx-rs 11 | template: 12 | metadata: 13 | labels: 14 | run: nginx-rs 15 | spec: 16 | containers: 17 | - name: nginx 18 | image: nginx -------------------------------------------------------------------------------- /chapters/09/http-issuer.yaml: -------------------------------------------------------------------------------- 1 | # http-issuer.yaml 2 | apiVersion: cert-manager.io/v1alpha2 3 | kind: ClusterIssuer 4 | metadata: 5 | name: http-issuer 6 | spec: 7 | acme: 8 | email: 9 | server: https://acme-v02.api.letsencrypt.org/directory 10 | privateKeySecretRef: 11 | name: issuer-key 12 | solvers: 13 | - http01: 14 | ingress: 15 | class: nginx -------------------------------------------------------------------------------- /chapters/09/mynginx-ingress.yaml: -------------------------------------------------------------------------------- 1 | # mynginx-ingress.yaml 2 | apiVersion: extensions/v1beta1 3 | kind: Ingress 4 | metadata: 5 | annotations: 6 | kubernetes.io/ingress.class: nginx 7 | name: mynginx 8 | spec: 9 | rules: 10 | - host: 10.0.1.1.sslip.io 11 | http: 12 | paths: 13 | - path: / 14 | backend: 15 | serviceName: mynginx 16 | servicePort: 80 -------------------------------------------------------------------------------- /chapters/05/limits.yaml: -------------------------------------------------------------------------------- 1 | # limits.yaml 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: limits 6 | spec: 7 | restartPolicy: OnFailure 8 | containers: 9 | - name: mynginx 10 | image: python:3.7 11 | command: [ "python" ] 12 | args: [ "-c", "arr = []\nwhile True: arr.append(range(1000))" ] 13 | resources: 14 | limits: 15 | cpu: "500m" 16 | memory: "1Gi" -------------------------------------------------------------------------------- /chapters/13/allow-dmz.yaml: -------------------------------------------------------------------------------- 1 | # allow-dmz.yaml 2 | kind: NetworkPolicy 3 | apiVersion: networking.k8s.io/v1 4 | metadata: 5 | name: allow-dmz 6 | namespace: default 7 | spec: 8 | podSelector: 9 | matchLabels: 10 | run: web 11 | ingress: 12 | - from: 13 | - namespaceSelector: 14 | matchLabels: 15 | zone: dmz 16 | ports: 17 | - protocol: TCP 18 | port: 80 -------------------------------------------------------------------------------- /gitops/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: mynginx 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: 9 | run: mynginx 10 | template: 11 | metadata: 12 | labels: 13 | run: mynginx 14 | spec: 15 | containers: 16 | - image: nginx 17 | name: mynginx 18 | ports: 19 | - containerPort: 80 -------------------------------------------------------------------------------- /chapters/05/volume.yaml: -------------------------------------------------------------------------------- 1 | # volume.yaml 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: volume 6 | spec: 7 | containers: 8 | - name: nginx 9 | image: nginx 10 | 11 | # 컨테이너 내부의 연결 위치 지정 12 | volumeMounts: 13 | - mountPath: /container-volume 14 | name: my-volume 15 | 16 | # host 서버의 연결 위치 지정 17 | volumes: 18 | - name: my-volume 19 | hostPath: 20 | path: /home -------------------------------------------------------------------------------- /chapters/16/mypod-crd.yaml: -------------------------------------------------------------------------------- 1 | # mypod-crd.yaml 2 | apiVersion: apiextensions.k8s.io/v1beta1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: mypods.crd.example.com 6 | spec: 7 | group: crd.example.com 8 | version: v1 9 | scope: Namespaced 10 | names: 11 | plural: mypods # 복수 이름 12 | singular: mypod # 단수 이름 13 | kind: MyPod # Kind 이름 14 | shortNames: # 축약 이름 15 | - mp -------------------------------------------------------------------------------- /chapters/13/db-accessable.yaml: -------------------------------------------------------------------------------- 1 | # db-accessable.yaml 2 | kind: NetworkPolicy 3 | apiVersion: networking.k8s.io/v1 4 | metadata: 5 | name: db-accessable 6 | namespace: default 7 | spec: 8 | podSelector: 9 | matchLabels: 10 | run: db 11 | ingress: 12 | - from: 13 | - podSelector: 14 | matchLabels: 15 | db-accessable: "true" 16 | ports: 17 | - protocol: TCP 18 | port: 80 -------------------------------------------------------------------------------- /chapters/12/limit-range.yaml: -------------------------------------------------------------------------------- 1 | # limit-range.yaml 2 | apiVersion: v1 3 | kind: LimitRange 4 | metadata: 5 | name: limit-range 6 | spec: 7 | limits: 8 | - default: 9 | cpu: 400m 10 | memory: 512Mi 11 | defaultRequest: 12 | cpu: 300m 13 | memory: 256Mi 14 | max: 15 | cpu: 600m 16 | memory: 600Mi 17 | min: 18 | cpu: 200m 19 | memory: 200Mi 20 | type: Container -------------------------------------------------------------------------------- /chapters/09/apache-tls.yaml: -------------------------------------------------------------------------------- 1 | # apache-tls.yaml 2 | apiVersion: networking.k8s.io/v1beta1 3 | kind: Ingress 4 | metadata: 5 | name: apache-tls 6 | spec: 7 | tls: 8 | - hosts: 9 | - apache-tls.10.0.1.1.sslip.io 10 | secretName: my-tls-certs 11 | rules: 12 | - host: apache-tls.10.0.1.1.sslip.io 13 | http: 14 | paths: 15 | - path: / 16 | backend: 17 | serviceName: apache 18 | servicePort: 80 -------------------------------------------------------------------------------- /chapters/17/single-job.yaml: -------------------------------------------------------------------------------- 1 | # single-job.yaml 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: Workflow 4 | metadata: 5 | generateName: hello-world- 6 | namespace: default 7 | spec: 8 | entrypoint: whalesay 9 | templates: 10 | - name: whalesay 11 | container: 12 | image: docker/whalesay 13 | command: [cowsay] 14 | args: ["hello world"] 15 | resources: 16 | limits: 17 | memory: 32Mi 18 | cpu: 100m -------------------------------------------------------------------------------- /chapters/05/special-env.yaml: -------------------------------------------------------------------------------- 1 | # special-env.yaml 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: special-env 6 | spec: 7 | restartPolicy: OnFailure 8 | containers: 9 | - name: special-env 10 | image: k8s.gcr.io/busybox 11 | command: [ "printenv" ] 12 | args: [ "special_env" ] 13 | env: 14 | - name: special_env 15 | valueFrom: 16 | configMapKeyRef: 17 | name: special-config 18 | key: special.power -------------------------------------------------------------------------------- /chapters/11/node-affinity.yaml: -------------------------------------------------------------------------------- 1 | # node-affinity.yaml 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: node-affinity 6 | spec: 7 | containers: 8 | - name: nginx 9 | image: nginx 10 | affinity: 11 | nodeAffinity: 12 | requiredDuringSchedulingIgnoredDuringExecution: 13 | nodeSelectorTerms: 14 | - matchExpressions: 15 | - key: disktype 16 | operator: In 17 | values: 18 | - ssd -------------------------------------------------------------------------------- /chapters/05/game-volume.yaml: -------------------------------------------------------------------------------- 1 | # game-volume.yaml 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: game-volume 6 | spec: 7 | restartPolicy: OnFailure 8 | containers: 9 | - name: game-volume 10 | image: k8s.gcr.io/busybox 11 | command: [ "/bin/sh", "-c", "cat /etc/config/game.properties" ] 12 | volumeMounts: 13 | - name: game-volume 14 | mountPath: /etc/config 15 | volumes: 16 | - name: game-volume 17 | configMap: 18 | name: game-config -------------------------------------------------------------------------------- /chapters/05/secret-volume.yaml: -------------------------------------------------------------------------------- 1 | # secret-volume.yaml 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: secret-volume 6 | spec: 7 | restartPolicy: OnFailure 8 | containers: 9 | - name: secret-volume 10 | image: k8s.gcr.io/busybox 11 | command: [ "sh" ] 12 | args: ["-c", "ls /secret; cat /secret/username"] 13 | volumeMounts: 14 | - name: secret 15 | mountPath: "/secret" 16 | volumes: 17 | - name: secret 18 | secret: 19 | secretName: user-info -------------------------------------------------------------------------------- /chapters/07/cronjob.yaml: -------------------------------------------------------------------------------- 1 | # cronjob.yaml 2 | apiVersion: batch/v1beta1 3 | kind: CronJob 4 | metadata: 5 | name: hello 6 | spec: 7 | schedule: "*/1 * * * *" 8 | jobTemplate: 9 | spec: 10 | template: 11 | spec: 12 | containers: 13 | - name: hello 14 | image: busybox 15 | args: 16 | - /bin/sh 17 | - -c 18 | - date; echo Hello from the Kubernetes cluster 19 | restartPolicy: OnFailure -------------------------------------------------------------------------------- /chapters/07/mydeploy.yaml: -------------------------------------------------------------------------------- 1 | # mydeploy.yaml 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: mydeploy 6 | spec: 7 | replicas: 10 8 | selector: 9 | matchLabels: 10 | run: nginx 11 | strategy: 12 | type: RollingUpdate 13 | rollingUpdate: 14 | maxUnavailable: 25% 15 | maxSurge: 25% 16 | template: 17 | metadata: 18 | labels: 19 | run: nginx 20 | spec: 21 | containers: 22 | - name: nginx 23 | image: nginx:1.7.9 -------------------------------------------------------------------------------- /chapters/06/cluster-ip.yaml: -------------------------------------------------------------------------------- 1 | # cluster-ip.yaml 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: cluster-ip 6 | spec: 7 | # type: ClusterIP # 생략되어 있음 8 | ports: 9 | - port: 8080 10 | protocol: TCP 11 | targetPort: 80 12 | selector: 13 | run: cluster-ip 14 | --- 15 | apiVersion: v1 16 | kind: Pod 17 | metadata: 18 | labels: 19 | run: cluster-ip 20 | name: cluster-ip 21 | spec: 22 | containers: 23 | - image: nginx 24 | name: nginx 25 | ports: 26 | - containerPort: 80 -------------------------------------------------------------------------------- /chapters/05/secret-env.yaml: -------------------------------------------------------------------------------- 1 | # secret-env.yaml 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: secret-env 6 | spec: 7 | restartPolicy: OnFailure 8 | containers: 9 | - name: secret-env 10 | image: k8s.gcr.io/busybox 11 | command: [ "printenv" ] 12 | env: 13 | - name: USERNAME 14 | valueFrom: 15 | secretKeyRef: 16 | name: user-info 17 | key: username 18 | - name: PASSWORD 19 | valueFrom: 20 | secretKeyRef: 21 | name: user-info 22 | key: password -------------------------------------------------------------------------------- /chapters/16/jenkins.yaml: -------------------------------------------------------------------------------- 1 | # jenkins.yaml 2 | apiVersion: helm.fluxcd.io/v1 3 | kind: HelmRelease 4 | metadata: 5 | name: jenkins 6 | namespace: default 7 | spec: 8 | releaseName: jenkins 9 | chart: 10 | repository: https://kubernetes-charts.storage.googleapis.com 11 | name: jenkins 12 | version: 2.3.0 13 | values: 14 | master: 15 | adminUser: "jenkins" 16 | resources: 17 | limits: 18 | cpu: "500m" 19 | memory: "512Mi" 20 | serviceType: LoadBalancer 21 | servicePort: 8080 -------------------------------------------------------------------------------- /chapters/06/load-bal.yaml: -------------------------------------------------------------------------------- 1 | # load-bal.yaml 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: load-bal 6 | spec: 7 | type: LoadBalancer # 타입 LoadBalancer 8 | ports: 9 | - port: 8080 10 | protocol: TCP 11 | targetPort: 80 12 | nodePort: 30088 # 30088로 변경 13 | selector: 14 | run: load-bal 15 | --- 16 | apiVersion: v1 17 | kind: Pod 18 | metadata: 19 | labels: 20 | run: load-bal 21 | name: load-bal 22 | spec: 23 | containers: 24 | - image: nginx 25 | name: nginx 26 | ports: 27 | - containerPort: 80 -------------------------------------------------------------------------------- /chapters/06/node-port.yaml: -------------------------------------------------------------------------------- 1 | # node-port.yaml 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: node-port 6 | spec: 7 | type: NodePort # type 추가 8 | ports: 9 | - port: 8080 10 | protocol: TCP 11 | targetPort: 80 12 | nodePort: 30080 # 호스트(노드)의 포트 지정 13 | selector: 14 | run: node-port 15 | --- 16 | apiVersion: v1 17 | kind: Pod 18 | metadata: 19 | labels: 20 | run: node-port 21 | name: node-port 22 | spec: 23 | containers: 24 | - image: nginx 25 | name: nginx 26 | ports: 27 | - containerPort: 80 -------------------------------------------------------------------------------- /chapters/09/apache-auth.yaml: -------------------------------------------------------------------------------- 1 | # apache-auth.yaml 2 | apiVersion: extensions/v1beta1 3 | kind: Ingress 4 | metadata: 5 | annotations: 6 | kubernetes.io/ingress.class: nginx 7 | nginx.ingress.kubernetes.io/auth-type: basic 8 | nginx.ingress.kubernetes.io/auth-secret: basic-auth 9 | nginx.ingress.kubernetes.io/auth-realm: 'Authentication Required - foo' 10 | name: apache-auth 11 | spec: 12 | rules: 13 | - host: apache-auth.10.0.1.1.sslip.io 14 | http: 15 | paths: 16 | - backend: 17 | serviceName: apache 18 | servicePort: 80 19 | path: / -------------------------------------------------------------------------------- /chapters/17/param.yaml: -------------------------------------------------------------------------------- 1 | # param.yaml 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: Workflow 4 | metadata: 5 | generateName: hello-world-parameters- 6 | namespace: default 7 | spec: 8 | entrypoint: whalesay 9 | arguments: 10 | parameters: 11 | - name: message 12 | value: hello world through param 13 | 14 | templates: 15 | ############### 16 | # entrypoint 17 | ############### 18 | - name: whalesay 19 | inputs: 20 | parameters: 21 | - name: message 22 | container: 23 | image: docker/whalesay 24 | command: [cowsay] 25 | args: ["{{inputs.parameters.message}}"] -------------------------------------------------------------------------------- /chapters/05/downward-volume.yaml: -------------------------------------------------------------------------------- 1 | # downward-volume.yaml 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: downward-volume 6 | labels: 7 | zone: ap-north-east 8 | cluster: cluster1 9 | spec: 10 | restartPolicy: OnFailure 11 | containers: 12 | - name: downward 13 | image: k8s.gcr.io/busybox 14 | command: ["sh", "-c"] 15 | args: ["cat /etc/podinfo/labels"] 16 | volumeMounts: 17 | - name: podinfo 18 | mountPath: /etc/podinfo 19 | volumes: 20 | - name: podinfo 21 | downwardAPI: 22 | items: 23 | - path: "labels" 24 | fieldRef: 25 | fieldPath: metadata.labels -------------------------------------------------------------------------------- /chapters/09/apache-tls-issuer.yaml: -------------------------------------------------------------------------------- 1 | # apache-tls-issuer.yaml 2 | apiVersion: extensions/v1beta1 3 | kind: Ingress 4 | metadata: 5 | annotations: 6 | kubernetes.io/ingress.class: nginx 7 | # 앞서 생성한 발급자 지정 8 | cert-manager.io/cluster-issuer: http-issuer 9 | name: apache-tls-issuer 10 | spec: 11 | rules: 12 | # 10.0.1.1을 공인IP로 변경해 주세요. 13 | - host: apache-issuer.10.0.1.1.sslip.io 14 | http: 15 | paths: 16 | - backend: 17 | serviceName: apache 18 | servicePort: 80 19 | path: / 20 | tls: 21 | - hosts: 22 | # 10.0.1.1을 공인IP로 변경해 주세요. 23 | - apache-issuer.10.0.1.1.sslip.io 24 | secretName: apache-tls -------------------------------------------------------------------------------- /chapters/07/fluentd.yaml: -------------------------------------------------------------------------------- 1 | # fluentd.yaml 2 | apiVersion: apps/v1 3 | kind: DaemonSet 4 | metadata: 5 | name: fluentd 6 | spec: 7 | selector: 8 | matchLabels: 9 | name: fluentd 10 | template: 11 | metadata: 12 | labels: 13 | name: fluentd 14 | spec: 15 | containers: 16 | - name: fluentd 17 | image: quay.io/fluentd_elasticsearch/fluentd:v2.5.2 18 | volumeMounts: 19 | - name: varlibdockercontainers 20 | mountPath: /var/lib/docker/containers 21 | readOnly: true 22 | volumes: 23 | - name: varlibdockercontainers 24 | hostPath: 25 | path: /var/lib/docker/containers -------------------------------------------------------------------------------- /chapters/05/init-container.yaml: -------------------------------------------------------------------------------- 1 | # init-container.yaml 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: init-container 6 | spec: 7 | restartPolicy: OnFailure 8 | containers: 9 | - name: busybox 10 | image: k8s.gcr.io/busybox 11 | command: [ "ls" ] 12 | args: [ "/tmp/moby" ] 13 | volumeMounts: 14 | - name: workdir 15 | mountPath: /tmp 16 | initContainers: 17 | - name: git 18 | image: alpine/git 19 | command: ["sh"] 20 | args: 21 | - "-c" 22 | - "git clone https://github.com/moby/moby.git /tmp/moby" 23 | volumeMounts: 24 | - name: workdir 25 | mountPath: "/tmp" 26 | volumes: 27 | - name: workdir 28 | emptyDir: {} -------------------------------------------------------------------------------- /chapters/05/downward-env.yaml: -------------------------------------------------------------------------------- 1 | # downward-env.yaml 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: downward-env 6 | spec: 7 | restartPolicy: OnFailure 8 | containers: 9 | - name: downward 10 | image: k8s.gcr.io/busybox 11 | command: [ "printenv"] 12 | env: 13 | - name: NODE_NAME 14 | valueFrom: 15 | fieldRef: 16 | fieldPath: spec.nodeName 17 | - name: POD_NAME 18 | valueFrom: 19 | fieldRef: 20 | fieldPath: metadata.name 21 | - name: POD_NAMESPACE 22 | valueFrom: 23 | fieldRef: 24 | fieldPath: metadata.namespace 25 | - name: POD_IP 26 | valueFrom: 27 | fieldRef: 28 | fieldPath: status.podIP -------------------------------------------------------------------------------- /chapters/11/pod-affinity.yaml: -------------------------------------------------------------------------------- 1 | # pod-affinity.yaml 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: pod-affinity 6 | spec: 7 | selector: 8 | matchLabels: 9 | app: affinity 10 | replicas: 2 11 | template: 12 | metadata: 13 | labels: 14 | app: affinity 15 | spec: 16 | containers: 17 | - name: nginx 18 | image: nginx 19 | affinity: 20 | podAffinity: 21 | requiredDuringSchedulingIgnoredDuringExecution: 22 | - labelSelector: 23 | matchExpressions: 24 | - key: app 25 | operator: In 26 | values: 27 | - affinity 28 | topologyKey: "kubernetes.io/hostname" -------------------------------------------------------------------------------- /chapters/11/heavy-cal.yaml: -------------------------------------------------------------------------------- 1 | # heavy-cal.yaml 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: heavy-cal 6 | spec: 7 | selector: 8 | matchLabels: 9 | run: heavy-cal 10 | replicas: 1 11 | template: 12 | metadata: 13 | labels: 14 | run: heavy-cal 15 | spec: 16 | containers: 17 | - name: heavy-cal 18 | image: k8s.gcr.io/hpa-example 19 | ports: 20 | - containerPort: 80 21 | resources: 22 | limits: 23 | cpu: 500m 24 | requests: 25 | cpu: 300m 26 | --- 27 | apiVersion: v1 28 | kind: Service 29 | metadata: 30 | name: heavy-cal 31 | spec: 32 | ports: 33 | - port: 80 34 | selector: 35 | run: heavy-cal -------------------------------------------------------------------------------- /chapters/15/argocd-ingress.yaml: -------------------------------------------------------------------------------- 1 | # argocd-ingress.yaml 2 | apiVersion: networking.k8s.io/v1beta1 3 | kind: Ingress 4 | metadata: 5 | name: argocd 6 | namespace: argocd 7 | annotations: 8 | cert-manager.io/cluster-issuer: http-issuer 9 | kubernetes.io/ingress.class: nginx 10 | kubernetes.io/tls-acme: "true" 11 | nginx.ingress.kubernetes.io/backend-protocol: HTTPS 12 | nginx.ingress.kubernetes.io/ssl-passthrough: "true" 13 | spec: 14 | rules: 15 | - host: argocd.10.0.1.1.sslip.io 16 | http: 17 | paths: 18 | - path: / 19 | backend: 20 | serviceName: argocd-server 21 | servicePort: https 22 | tls: 23 | - hosts: 24 | - argocd.10.0.1.1.sslip.io 25 | secretName: argocd-tls -------------------------------------------------------------------------------- /chapters/11/pod-antiaffinity.yaml: -------------------------------------------------------------------------------- 1 | # pod-antiaffinity.yaml 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: pod-antiaffinity 6 | spec: 7 | selector: 8 | matchLabels: 9 | app: antiaffinity 10 | replicas: 2 11 | template: 12 | metadata: 13 | labels: 14 | app: antiaffinity 15 | spec: 16 | containers: 17 | - name: nginx 18 | image: nginx 19 | affinity: 20 | podAntiAffinity: 21 | requiredDuringSchedulingIgnoredDuringExecution: 22 | - labelSelector: 23 | matchExpressions: 24 | - key: app 25 | operator: In 26 | values: 27 | - antiaffinity 28 | topologyKey: "kubernetes.io/hostname" -------------------------------------------------------------------------------- /chapters/11/redis-cache.yaml: -------------------------------------------------------------------------------- 1 | # redis-cache.yaml 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: redis-cache 6 | spec: 7 | selector: 8 | matchLabels: 9 | app: store 10 | replicas: 2 11 | template: 12 | metadata: 13 | labels: 14 | app: store 15 | spec: 16 | affinity: 17 | # cache 서버끼리 멀리 스케줄링 18 | # app=store 라벨을 가진 Pod끼리 멀리 스케줄링 19 | podAntiAffinity: 20 | requiredDuringSchedulingIgnoredDuringExecution: 21 | - labelSelector: 22 | matchExpressions: 23 | - key: app 24 | operator: In 25 | values: 26 | - store 27 | topologyKey: "kubernetes.io/hostname" 28 | containers: 29 | - name: redis-server 30 | image: redis -------------------------------------------------------------------------------- /chapters/09/domain-based-ingress.yaml: -------------------------------------------------------------------------------- 1 | # domain-based-ingress.yaml 2 | apiVersion: extensions/v1beta1 3 | kind: Ingress 4 | metadata: 5 | annotations: 6 | kubernetes.io/ingress.class: nginx 7 | name: apache-domain 8 | spec: 9 | rules: 10 | # apache 서브 도메인 11 | - host: apache.10.0.1.1.sslip.io 12 | http: 13 | paths: 14 | - backend: 15 | serviceName: apache 16 | servicePort: 80 17 | path: / 18 | --- 19 | apiVersion: extensions/v1beta1 20 | kind: Ingress 21 | metadata: 22 | annotations: 23 | kubernetes.io/ingress.class: nginx 24 | name: nginx-domain 25 | spec: 26 | rules: 27 | # nginx 서브 도메인 28 | - host: nginx.10.0.1.1.sslip.io 29 | http: 30 | paths: 31 | - backend: 32 | serviceName: nginx 33 | servicePort: 80 34 | path: / -------------------------------------------------------------------------------- /chapters/10/use-nfs-sc.yaml: -------------------------------------------------------------------------------- 1 | # use-nfs-sc.yaml 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: use-nfs-sc-master 6 | spec: 7 | volumes: 8 | - name: vol 9 | persistentVolumeClaim: 10 | claimName: nfs-sc 11 | containers: 12 | - name: nginx 13 | image: nginx 14 | volumeMounts: 15 | - mountPath: "/usr/share/nginx/html" 16 | name: vol 17 | nodeSelector: 18 | kubernetes.io/hostname: master 19 | --- 20 | apiVersion: v1 21 | kind: Pod 22 | metadata: 23 | name: use-nfs-sc-worker 24 | spec: 25 | volumes: 26 | - name: vol 27 | persistentVolumeClaim: 28 | claimName: nfs-sc 29 | containers: 30 | - name: nginx 31 | image: nginx 32 | volumeMounts: 33 | - mountPath: "/usr/share/nginx/html" 34 | name: vol 35 | nodeSelector: 36 | kubernetes.io/hostname: worker -------------------------------------------------------------------------------- /chapters/09/path-based-ingress.yaml: -------------------------------------------------------------------------------- 1 | # path-based-ingress.yaml 2 | apiVersion: extensions/v1beta1 3 | kind: Ingress 4 | metadata: 5 | annotations: 6 | kubernetes.io/ingress.class: nginx 7 | nginx.ingress.kubernetes.io/rewrite-target: / 8 | name: apache-path 9 | spec: 10 | rules: 11 | - host: 10.0.1.1.sslip.io 12 | http: 13 | paths: 14 | - backend: 15 | serviceName: apache 16 | servicePort: 80 17 | path: /apache 18 | --- 19 | apiVersion: extensions/v1beta1 20 | kind: Ingress 21 | metadata: 22 | annotations: 23 | kubernetes.io/ingress.class: nginx 24 | nginx.ingress.kubernetes.io/rewrite-target: / 25 | name: nginx-path 26 | spec: 27 | rules: 28 | - host: 10.0.1.1.sslip.io 29 | http: 30 | paths: 31 | - backend: 32 | serviceName: nginx 33 | servicePort: 80 34 | path: /nginx -------------------------------------------------------------------------------- /chapters/07/mysts.yaml: -------------------------------------------------------------------------------- 1 | # mysts.yaml 2 | apiVersion: apps/v1 3 | kind: StatefulSet 4 | metadata: 5 | name: mysts 6 | spec: 7 | serviceName: mysts 8 | replicas: 3 9 | selector: 10 | matchLabels: 11 | run: nginx 12 | template: 13 | metadata: 14 | labels: 15 | run: nginx 16 | spec: 17 | containers: 18 | - name: nginx 19 | image: nginx 20 | volumeMounts: 21 | - name: vol 22 | mountPath: /usr/share/nginx/html 23 | volumeClaimTemplates: 24 | - metadata: 25 | name: vol 26 | spec: 27 | accessModes: [ "ReadWriteOnce" ] 28 | resources: 29 | requests: 30 | storage: 1Gi 31 | --- 32 | apiVersion: v1 33 | kind: Service 34 | metadata: 35 | name: mysts 36 | spec: 37 | clusterIP: None 38 | ports: 39 | - port: 8080 40 | protocol: TCP 41 | targetPort: 80 42 | selector: 43 | run: nginx -------------------------------------------------------------------------------- /chapters/17/error-handlers.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: Workflow 3 | metadata: 4 | generateName: error-handlers- 5 | namespace: default 6 | spec: 7 | entrypoint: intentional-fail 8 | # 에러 핸들러 작업 지정 9 | onExit: error-handler 10 | 11 | templates: 12 | 13 | ############### 14 | # template job 15 | ############### 16 | - name: send-email 17 | container: 18 | image: alpine:latest 19 | command: [sh, -c] 20 | args: ["echo send e-mail: {{workflow.name}} {{workflow.status}}"] 21 | 22 | ############### 23 | # 종료 핸들러 24 | ############### 25 | - name: error-handler 26 | steps: 27 | - - name: notify 28 | template: send-email 29 | 30 | ############### 31 | # entrypoint 32 | ############### 33 | - name: intentional-fail 34 | container: 35 | image: alpine:latest 36 | command: [sh, -c] 37 | args: ["echo intentional failure; exit 1"] -------------------------------------------------------------------------------- /gitops/README.md: -------------------------------------------------------------------------------- 1 | # gitops 2 | 3 | 단일 진실의 원천 예제 디렉토리입니다. GitOps 구현체인 FluxCD, ArgoCD에서 해당 디렉토리를 지속적으로 관찰하다가 새로운 YAML 정의서가 생성되거나 기존의 값이 변경될 때, 그에 따라 새롭게 배포합니다. GitOps 설정 후, 다음과 같은 리소스가 나의 클러스터에 정상적으로 배포되었는지 확인해보시기 바랍니다. 4 | 5 | - `deployment.yaml` 6 | - `service.yaml` 7 | 8 | ```yaml 9 | # deployment.yaml 10 | apiVersion: apps/v1 11 | kind: Deployment 12 | metadata: 13 | name: mynginx 14 | spec: 15 | replicas: 1 16 | selector: 17 | matchLabels: 18 | run: mynginx 19 | template: 20 | metadata: 21 | labels: 22 | run: mynginx 23 | spec: 24 | containers: 25 | - image: nginx 26 | name: mynginx 27 | ports: 28 | - containerPort: 80 29 | ``` 30 | 31 | 32 | ```yaml 33 | # service.yaml 34 | apiVersion: v1 35 | kind: Service 36 | metadata: 37 | name: mynginx 38 | spec: 39 | ports: 40 | - port: 80 41 | protocol: TCP 42 | targetPort: 80 43 | selector: 44 | run: mynginx 45 | ``` -------------------------------------------------------------------------------- /pipeline-sample/README.md: -------------------------------------------------------------------------------- 1 | # pipeline-sample 2 | 3 | ## CI 파이프라인 4 | 5 | Jenkins CI 파이프라인을 위한 샘플코드입니다. 6 | 7 | 다음과 같은 파일들이 있습니다. 8 | 9 | - `app.py`: CI/CD를 통해 쿠버네티스 클러스터로 배포하려는 어플리케이션 소스코드입니다. 10 | - `Dockerfile`: 소스코드를 실행가능한 파일(도커 이미지)로 변환하는 도커파일입니다. 11 | - `test.py`: CI 파이프라인에서 테스트를 담당하는 스크립트입니다. 12 | - `test.sh`: `docker run --entrypoint=/test` 명령을 이용하여 테스트 수행 시, 실제 실행되는 스크립트입니다. 13 | 14 | CI 파이프라인은 다음과 같은 흐름을 가집니다. 15 | 16 | ```bash 17 | # 소스코드 checkout 18 | git clone $PROJECT_NAME 19 | git checkout $BRANCH 20 | 21 | # 빌드 Artifact 생성 22 | docker build . -t $PROJECT_NAME 23 | 24 | # 빌드 결과물 테스트 25 | docker run --entrypoint=/test $PROJECT_NAME 26 | 27 | # 빌드 결과물 저장 28 | docker push docker.io/$PROJECT_NAME 29 | ``` 30 | 31 | ## 도커 안에 도커 32 | 33 | 도커 안에서 호스트 서버의 도커 데몬을 접근하기 위해서는 다음과 같이 실행합니다. 34 | 35 | ```bash 36 | docker run --rm -it -v /var/run/docker.sock:/var/run/docker.sock docker 37 | ``` 38 | 39 | 호스트 도커 데몬과 통신할 수 있는 소켓파일을 볼륨으로 넘겨줌으로써 컨테이너 안에서 호스트 도커 데몬을 호출할 수 있게 실행합니다. 40 | 41 | [Docker in Docker 읽을거리](https://jpetazzo.github.io/2015/09/03/do-not-use-docker-in-docker-for-ci/) 42 | -------------------------------------------------------------------------------- /chapters/17/serial-step.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: Workflow 3 | metadata: 4 | generateName: serial-step- 5 | namespace: default 6 | spec: 7 | entrypoint: hello-step 8 | templates: 9 | 10 | ############### 11 | # template job 12 | ############### 13 | - name: whalesay 14 | inputs: 15 | parameters: 16 | - name: message 17 | container: 18 | image: docker/whalesay 19 | command: [cowsay] 20 | args: ["{{inputs.parameters.message}}"] 21 | 22 | ############### 23 | # entrypoint 24 | ############### 25 | - name: hello-step 26 | # 순차 실행 27 | steps: 28 | - - name: hello1 29 | template: whalesay 30 | arguments: 31 | parameters: 32 | - name: message 33 | value: "hello1" 34 | - - name: hello2 35 | template: whalesay 36 | arguments: 37 | parameters: 38 | - name: message 39 | value: "hello2" 40 | - - name: hello3 41 | template: whalesay 42 | arguments: 43 | parameters: 44 | - name: message 45 | value: "hello3" -------------------------------------------------------------------------------- /chapters/02/README.md: -------------------------------------------------------------------------------- 1 | # 2. 쿠버네티스 소개 2 | 3 | ## 쿠버네티스란? 4 | 5 | - 쿠버네티스 공식 웹사이트 : [https://kubernetes.io/](https://kubernetes.io/) 6 | - 쿠버네티스 깃허브 : [https://github.com/kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) 7 | 8 | 9 | ## 쿠버네티스 읽을거리 10 | 11 | ### 블로그 12 | 13 | - [쿠버네티스 전신, Borg](https://kubernetes.io/blog/2015/04/borg-predecessor-to-kubernetes) 14 | - [쿠버네티스란 무엇인가?](https://subicura.com/2019/05/19/kubernetes-basic-1.html) 15 | - [쿠버네티스 관련 블로그: coffeewhale.com](https://coffeewhale.com) 16 | 17 | ### 데모 클러스터 18 | - [Play with k8s](https://labs.play-with-k8s.com/) 19 | - [Playground](https://www.katacoda.com/courses/kubernetes/playground) 20 | 21 | ### Examples & Tutorials 22 | - [쿠버네티스 공식 튜토리얼](https://kubernetes.io/docs/tutorials/) 23 | - [쿠버네티스 example](https://kubernetesbyexample.com/) 24 | 25 | ### 책 26 | - [The Kubernetes Book](https://www.amazon.com/Kubernetes-Book-Version-January-2018-ebook/dp/B072TS9ZQZ/ref=sr_1_3?ie=UTF8&qid=1528625195&sr=8-3&keywords=kubernetes&dpID=41SyKBO3UcL&preST=_SX342_QL70_&dpSrc=srch) 27 | - [Designing Distributed System](https://azure.microsoft.com/en-us/resources/designing-distributed-systems/en-us/) 28 | 29 | -------------------------------------------------------------------------------- /chapters/17/parallel-steps.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: Workflow 3 | metadata: 4 | generateName: parallel-steps- 5 | namespace: default 6 | spec: 7 | entrypoint: hello-step 8 | templates: 9 | 10 | ############### 11 | # template job 12 | ############### 13 | - name: whalesay 14 | inputs: 15 | parameters: 16 | - name: message 17 | container: 18 | image: docker/whalesay 19 | command: [cowsay] 20 | args: ["{{inputs.parameters.message}}"] 21 | 22 | ############### 23 | # entrypoint 24 | ############### 25 | - name: hello-step 26 | # 병렬 실행 27 | steps: 28 | - - name: hello1 29 | template: whalesay 30 | arguments: 31 | parameters: 32 | - name: message 33 | value: "hello1" 34 | - - name: hello2 35 | template: whalesay 36 | arguments: 37 | parameters: 38 | - name: message 39 | value: "hello2" 40 | - name: hello3 # 기존 double dash에서 single dash로 변경 41 | template: whalesay 42 | arguments: 43 | parameters: 44 | - name: message 45 | value: "hello3" -------------------------------------------------------------------------------- /chapters/17/dag-diamond.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: Workflow 3 | metadata: 4 | generateName: dag-diamond- 5 | namespace: default 6 | spec: 7 | entrypoint: diamond 8 | templates: 9 | 10 | ############### 11 | # template job 12 | ############### 13 | - name: echo 14 | inputs: 15 | parameters: 16 | - name: message 17 | container: 18 | image: alpine:3.7 19 | command: [echo, "{{inputs.parameters.message}}"] 20 | 21 | ############### 22 | # entrypoint 23 | ############### 24 | - name: diamond 25 | # DAG 구성 26 | dag: 27 | tasks: 28 | - name: A 29 | template: echo 30 | arguments: 31 | parameters: [{name: message, value: A}] 32 | - name: B 33 | dependencies: [A] 34 | template: echo 35 | arguments: 36 | parameters: [{name: message, value: B}] 37 | - name: C 38 | dependencies: [A] 39 | template: echo 40 | arguments: 41 | parameters: [{name: message, value: C}] 42 | - name: D 43 | dependencies: [B, C] 44 | template: echo 45 | arguments: 46 | parameters: [{name: message, value: D}] -------------------------------------------------------------------------------- /chapters/11/web-server.yaml: -------------------------------------------------------------------------------- 1 | # web-server.yaml 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: web-server 6 | spec: 7 | selector: 8 | matchLabels: 9 | app: web-store 10 | replicas: 2 11 | template: 12 | metadata: 13 | labels: 14 | app: web-store 15 | spec: 16 | affinity: 17 | # web 서버끼리 멀리 스케줄링 18 | # app=web-store 라벨을 가진 Pod끼리 멀리 스케줄링 19 | podAntiAffinity: 20 | requiredDuringSchedulingIgnoredDuringExecution: 21 | - labelSelector: 22 | matchExpressions: 23 | - key: app 24 | operator: In 25 | values: 26 | - web-store 27 | topologyKey: "kubernetes.io/hostname" 28 | # web-cache 서버끼리 가까이 스케줄링 29 | # app=store 라벨을 가진 Pod끼리 가까이 스케줄링 30 | podAffinity: 31 | requiredDuringSchedulingIgnoredDuringExecution: 32 | - labelSelector: 33 | matchExpressions: 34 | - key: app 35 | operator: In 36 | values: 37 | - store 38 | topologyKey: "kubernetes.io/hostname" 39 | containers: 40 | - name: web-app 41 | image: nginx -------------------------------------------------------------------------------- /chapters/07/train.py: -------------------------------------------------------------------------------- 1 | # train.py 2 | import os, sys, json 3 | import keras 4 | from keras.datasets import mnist 5 | from keras.models import Sequential 6 | from keras.layers import Dense, Dropout 7 | from keras.optimizers import RMSprop 8 | 9 | ##################### 10 | # parameters 11 | ##################### 12 | epochs = int(sys.argv[1]) 13 | activate = sys.argv[2] 14 | dropout = float(sys.argv[3]) 15 | print(sys.argv) 16 | ##################### 17 | 18 | batch_size, num_classes, hidden = (128, 10, 512) 19 | loss_func = "categorical_crossentropy" 20 | opt = RMSprop() 21 | 22 | # preprocess 23 | (x_train, y_train), (x_test, y_test) = mnist.load_data() 24 | x_train = x_train.reshape(60000, 784) 25 | x_test = x_test.reshape(10000, 784) 26 | x_train = x_train.astype('float32') / 255 27 | x_test = x_test.astype('float32') / 255 28 | 29 | # convert class vectors to binary class matrices 30 | y_train = keras.utils.to_categorical(y_train, num_classes) 31 | y_test = keras.utils.to_categorical(y_test, num_classes) 32 | 33 | # build model 34 | model = Sequential() 35 | model.add(Dense(hidden, activation='relu', input_shape=(784,))) 36 | model.add(Dropout(dropout)) 37 | model.add(Dense(num_classes, activation=activate)) 38 | model.summary() 39 | 40 | model.compile(loss=loss_func, optimizer=opt, metrics=['accuracy']) 41 | 42 | # train 43 | history = model.fit(x_train, y_train, batch_size=batch_size, 44 | epochs=epochs, validation_data=(x_test, y_test)) 45 | 46 | score = model.evaluate(x_test, y_test, verbose=0) 47 | print('Test loss:', score[0]) 48 | print('Test accuracy:', score[1]) -------------------------------------------------------------------------------- /chapters/16/minio-instance.yaml: -------------------------------------------------------------------------------- 1 | # minio-instance.yaml 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: minio-creds-secret 6 | type: Opaque 7 | data: 8 | accesskey: bWluaW8= # minio 9 | secretkey: bWluaW8xMjM= # minio123 10 | --- 11 | # MinIO Service 리소스 12 | apiVersion: v1 13 | kind: Service 14 | metadata: 15 | name: minio-service 16 | spec: 17 | type: ClusterIP 18 | ports: 19 | - port: 9000 20 | targetPort: 9000 21 | protocol: TCP 22 | selector: 23 | app: minio-dev 24 | --- 25 | # MinIO 사용자 정의 리소스 26 | apiVersion: operator.min.io/v1 27 | kind: MinIOInstance 28 | metadata: 29 | name: minio-dev 30 | spec: 31 | metadata: 32 | labels: 33 | app: minio-dev 34 | annotations: 35 | prometheus.io/path: /minio/prometheus/metrics 36 | prometheus.io/port: "9000" 37 | prometheus.io/scrape: "true" 38 | image: minio/minio:RELEASE.2020-06-03T22-13-49Z 39 | serviceName: minio-internal-service 40 | zones: 41 | - name: "zone-0" 42 | servers: 1 43 | volumesPerServer: 1 44 | mountPath: /export 45 | volumeClaimTemplate: 46 | metadata: 47 | name: data 48 | spec: 49 | accessModes: 50 | - ReadWriteOnce 51 | resources: 52 | requests: 53 | storage: 1Gi 54 | credsSecret: 55 | name: minio-creds-secret 56 | podManagementPolicy: Parallel 57 | requestAutoCert: false 58 | certConfig: 59 | commonName: "" 60 | organizationName: [] 61 | dnsNames: [] 62 | liveness: 63 | initialDelaySeconds: 120 64 | periodSeconds: 60 65 | readiness: 66 | initialDelaySeconds: 120 67 | periodSeconds: 60 -------------------------------------------------------------------------------- /chapters/15/Jenkinsfile: -------------------------------------------------------------------------------- 1 | # Jenkinsfile 2 | pipeline { 3 | agent { 4 | kubernetes { 5 | yaml ''' 6 | apiVersion: v1 7 | kind: Pod 8 | spec: 9 | containers: 10 | - name: git 11 | image: alpine/git 12 | tty: true 13 | command: ["cat"] 14 | env: 15 | - name: PROJECT_URL 16 | value: https://github.com/bjpublic/core_kubernetes.git 17 | - name: docker 18 | image: docker 19 | tty: true 20 | command: ["cat"] 21 | env: 22 | - name: PROJECT_NAME 23 | value: jenkins-pipeline-sample 24 | volumeMounts: 25 | - mountPath: /var/run/docker.sock 26 | name: docker-socket 27 | volumes: 28 | - name: docker-socket 29 | hostPath: 30 | path: /var/run/docker.sock 31 | ''' 32 | } 33 | } 34 | stages { 35 | stage('Checkout') { 36 | steps { 37 | container('git') { 38 | // 소스코드 checkout 39 | sh "git clone \$PROJECT_URL" 40 | } 41 | } 42 | } 43 | stage('Build') { 44 | steps { 45 | container('docker') { 46 | // 도커 빌드 47 | sh """ 48 | cd \$PROJECT_NAME/pipeline-sample 49 | docker build -t \$PROJECT_NAME . 50 | """ 51 | } 52 | } 53 | } 54 | stage('Test') { 55 | steps { 56 | container('docker') { 57 | // 이미지 테스트 58 | sh "docker run --entrypoint /test \$PROJECT_NAME" 59 | } 60 | } 61 | } 62 | stage('Push') { 63 | steps { 64 | container('docker') { 65 | // 도커헙 계정 정보 가져오기 66 | withCredentials([[$class: 'UsernamePasswordMultiBinding', 67 | credentialsId: 'dockerhub', 68 | usernameVariable: 'DOCKERHUB_USER', 69 | passwordVariable: 'DOCKERHUB_PASSWORD']]) { 70 | // 이미지 패키징 & 업로드 71 | sh """ 72 | docker login -u ${DOCKERHUB_USER} -p ${DOCKERHUB_PASSWORD} 73 | docker tag \$PROJECT_NAME ${DOCKERHUB_USER}/\$PROJECT_NAME 74 | docker push ${DOCKERHUB_USER}/\$PROJECT_NAME 75 | """ 76 | } 77 | } 78 | } 79 | } 80 | } 81 | } -------------------------------------------------------------------------------- /chapters/03/README.md: -------------------------------------------------------------------------------- 1 | # 3. 쿠버네티스 설치 2 | 3 | 4 | ### 3.2.2 마스터 노드 설치 5 | 6 | ```bash 7 | sudo apt update 8 | sudo apt install -y docker.io nfs-common dnsutils curl 9 | 10 | # k3s 마스터 설치 11 | curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="\ 12 | --disable traefik \ 13 | --disable metrics-server \ 14 | --node-name master --docker" \ 15 | INSTALL_K3S_VERSION="v1.18.6+k3s1" sh -s - 16 | 17 | # 마스터 통신을 위한 설정 18 | mkdir ~/.kube 19 | sudo cp /etc/rancher/k3s/k3s.yaml ~/.kube/config 20 | sudo chown -R $(id -u):$(id -g) ~/.kube 21 | echo "export KUBECONFIG=~/.kube/config" >> ~/.bashrc 22 | source ~/.bashrc 23 | 24 | # 설치 확인 25 | kubectl cluster-info 26 | # Kubernetes master is running at https://127.0.0.1:6443 27 | # CoreDNS is running at https://127.0.0.1:6443/api/v1/namespaces... 28 | # 29 | # To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'. 30 | 31 | kubectl get node -o wide 32 | # NAME STATUS ROLES AGE VERSION INTERNAL-IP ... 33 | # master Ready master 27m v1.18.6+k3s1 10.0.1.1 ... 34 | ``` 35 | 36 | ```bash 37 | # 마스터 노드 토큰 확인 38 | NODE_TOKEN=$(sudo cat /var/lib/rancher/k3s/server/node-token) 39 | echo $NODE_TOKEN 40 | # K10e6f5a983710a836b9ad21ca4a99fcxx::server:c8ae61726384c19726022879xx 41 | 42 | MASTER_IP=$(kubectl get node master -ojsonpath="{.status.addresses[0].address}") 43 | echo $MASTER_IP 44 | # 10.0.1.1 45 | ``` 46 | 47 | 48 | ### 3.2.3 워커 노드 추가 49 | 50 | ```bash 51 | NODE_TOKEN=<마스터에서 확인한 토큰 입력> 52 | MASTER_IP=<마스터에서 얻은 내부IP 입력> 53 | 54 | sudo apt update 55 | sudo apt install -y docker.io nfs-common curl 56 | 57 | # k3s 워커 노드 설치 58 | curl -sfL https://get.k3s.io | K3S_URL=https://$MASTER_IP:6443 \ 59 | K3S_TOKEN=$NODE_TOKEN \ 60 | INSTALL_K3S_EXEC="--node-name worker --docker" \ 61 | INSTALL_K3S_VERSION="v1.18.6+k3s1" sh -s - 62 | ``` 63 | 64 | 65 | ### 3.2.4 설치문제 해결 방법 66 | 67 | #### 1) 마스터 노드 로그 확인 68 | 69 | ```bash 70 | # 마스터 노드 상태 확인 71 | sudo systemctl status k3s.service 72 | # * k3s.service - Lightweight Kubernetes 73 | # ... 74 | # CGroup: /system.slice/k3s.service 75 | # └─955 /usr/local/bin/k3s server --disable traefik \ 76 | # --disable metrics-server \ 77 | # --node-name master \ 78 | # --docker 79 | # 80 | # Aug 11 17:37:09 ip-10-0-1-1 k3s[955]: I0811 17:37:09.189289 ... 81 | # Aug 11 17:37:14 ip-10-0-1-1 k3s[955]: I0811 17:37:14.190442 ... 82 | # ... 83 | 84 | # journald 로그 확인 85 | sudo journalctl -u k3s.service 86 | # Jul 13 17:51:08 ip-10-0-1-1 k3s[955]: W0713 17:51:08.168244 ... 87 | # Jul 13 17:51:08 ip-10-0-1-1 k3s[955]: I0713 17:51:08.649295 ... 88 | # ... 89 | ``` 90 | 91 | 에러 메세지나 exception 메세지를 확인합니다. 92 | 93 | #### 2) 워커 노드 로그 확인 94 | 95 | ```bash 96 | # 워커 노드 상태 확인 97 | sudo systemctl status k3s-agent.service 98 | # * k3s-agent.service - Lightweight Kubernetes 99 | # ... 100 | # CGroup: /system.slice/k3s-agent.service 101 | # └─955 /usr/local/bin/k3s agent --token K10e6f5a983710 ..\ 102 | # --server 10.0.1.1 \ 103 | # --node-name worker \ 104 | # --docker 105 | # 106 | # Aug 11 17:37:09 ip-10-0-1-2 k3s[955]: I0811 17:37:09.189289 ... 107 | # Aug 11 17:37:14 ip-10-0-1-2 k3s[955]: I0811 17:37:14.190442 ... 108 | # ... 109 | 110 | # journald 로그 확인 111 | sudo journalctl -u k3s-agent.service 112 | # Jul 13 17:51:08 ip-10-0-1-2 k3s[955]: W0713 17:51:08.168244 ... 113 | # Jul 13 17:51:08 ip-10-0-1-2 k3s[955]: I0713 17:51:08.649295 ... 114 | # ... 115 | ``` 116 | 117 | 체크리스트 118 | 119 | - `NODE_TOKEN` 값이 제대로 설정 되었나요? 120 | - `MASTER_IP`가 제대로 설정 되었나요? 121 | - 워커 노드에서 마스터 노드로 IP 연결이 가능한가요? 122 | - 마스터, 워커 노드에 적절한 포트가 열려 있나요? 123 | 124 | 125 | #### 3) 마스터 & 워커 노드 재설치 126 | 127 | 마스터 노드에서 다음 명령을 수행하여 마스터를 제거하시기 바랍니다. 128 | 129 | ```bash 130 | /usr/local/bin/k3s-uninstall.sh 131 | ``` 132 | 133 | 워커 노드에서 다음 명령을 수행하여 워커를 제거하시기 바랍니다. 134 | 135 | ```bash 136 | /usr/local/bin/k3s-agent-uninstall.sh 137 | ``` 138 | 139 | 삭제 완료 후, 처음부터 다시 재설치를 진행합니다. 140 | 141 | 142 | #### 4) 공식문서 참고 143 | 144 | [https://rancher.com/docs/k3s/latest/en/installation](https://rancher.com/docs/k3s/latest/en/installation) -------------------------------------------------------------------------------- /chapters/14/README.md: -------------------------------------------------------------------------------- 1 | # 14. 로깅과 모니터링 2 | 3 | ## 14.1 로깅 시스템 구축 4 | 5 | ### 14.1.2 클러스터 레벨 로깅 원리 6 | 7 | ```bash 8 | docker logs 9 | ``` 10 | 11 | ```bash 12 | /var/lib/docker/containers//-json.log 13 | ``` 14 | 15 | ```bash 16 | # nginx라는 컨테이너를 하나 실행하고 CONTAINER_ID 값을 복사합니다. 17 | docker run -d nginx 18 | # 4373b7e095215c23057b1dc4423527239e56a33dbd 19 | 20 | # docker 명령을 통한 로그 확인 21 | docker logs 4373b7e095215c23057b1dc4423527239e56a33dbd 22 | # /docker-entrypoint.sh: /docker-entrypoint.d/ is not empty, will ... 23 | # /docker-entrypoint.sh: Looking for shell scripts in /docker-... 24 | # /docker-entrypoint.sh: Launching /docker-entrypoint.d/... 25 | # ... 26 | 27 | # 호스트 서버의 로그 파일 확인 28 | sudo tail /var/lib/docker/containers/4373b7e095215c23057b1dc4423527239e56a33dbd/4373b7e095215c23057b1dc4423527239e56a33dbd-json.log 29 | # {"log":"/docker-entrypoint.sh: /docker-entrypoint.d/ is not empty, \ 30 | # will attempt to perform configuration\n","stream":"stdout",\ 31 | # "time":"2020-07-11T03:22:11.817939191Z"} 32 | # ... 33 | 34 | # 컨테이너 정리 35 | docker stop 4373b7e095215c23057b1dc4423527239e56a33dbd 36 | docker rm 4373b7e095215c23057b1dc4423527239e56a33dbd 37 | ``` 38 | 39 | ### 14.1.6 EFK Stack 40 | 41 | ```bash 42 | # fetch stable repository의 elastic-stack 43 | helm fetch --untar stable/elastic-stack --version 2.0.1 44 | 45 | vim elastic-stack/values.yaml 46 | ``` 47 | 48 | ```yaml 49 | # elastic-stack/values.yaml 50 | 51 | # 약 12줄 52 | logstash: 53 | enabled: false # 기존 true 54 | 55 | # 약 29줄 56 | fluent-bit: 57 | enabled: true # 기존 false 58 | ``` 59 | 60 | ```bash 61 | # elasticsearch 수정 62 | vim elastic-stack/charts/elasticsearch/values.yaml 63 | ``` 64 | 65 | ```yaml 66 | # elastic-stack/charts/elasticsearch/values.yaml 67 | # ... 68 | # 약 110줄 69 | client: 70 | replicas: 1 # 기존 2 71 | # ... 72 | # 약 171줄 73 | master: 74 | replicas: 2 # 기존 3 75 | # ... 76 | # 약 225줄 77 | data: 78 | replicas: 1 # 기존 2 79 | ``` 80 | 81 | ```bash 82 | # fluent-bit 수정 83 | vim elastic-stack/charts/fluent-bit/values.yaml 84 | ``` 85 | 86 | ```yaml 87 | # elastic-stack/charts/fluent-bit/values.yaml 88 | # ... 89 | # 약 45줄 90 | backend: 91 | type: es # 기존 forward 92 | # ... 93 | es: 94 | host: efk-elasticsearch-client # 기존 elasticsearch -> host 변경 95 | 96 | # ... 97 | 98 | # 약 226줄 99 | input: 100 | tail: 101 | memBufLimit: 5MB 102 | parser: docker 103 | path: /var/log/containers/*.log 104 | ignore_older: "" 105 | systemd: 106 | enabled: true # 기존 false 107 | filters: 108 | systemdUnit: 109 | - docker.service 110 | - k3.service # 기존 kubelet.service 111 | # - node-problem-detector.service --> 주석처리 112 | ``` 113 | 114 | ```bash 115 | # kibana ingress 수정 116 | vim elastic-stack/charts/kibana/values.yaml 117 | ``` 118 | 119 | ```yaml 120 | # elastic-stack/charts/kibana/values.yaml 121 | # ... 122 | # 약 79줄 - kibana ingress 설정하기 123 | ingress: 124 | enabled: true # 기존 false 125 | hosts: 126 | - kibana.10.0.1.1.sslip.io # 공인IP 입력 127 | annotations: 128 | kubernetes.io/ingress.class: nginx 129 | ``` 130 | 131 | ```bash 132 | helm install efk ./elastic-stack 133 | # NAME: efk 134 | # LAST DEPLOYED: Sat Jul 11 07:17:06 2020 135 | # NAMESPACE: default 136 | # STATUS: deployed 137 | # REVISION: 1 138 | # NOTES: 139 | # The elasticsearch cluster and associated extras have been installed. 140 | # Kibana can be accessed: 141 | # ... 142 | 143 | # 모든 Pod가 다 실행되기까지 wait 144 | watch kubectl get pod,svc 145 | ``` 146 | 147 | index를 생성하는 방법은 다음과 같습니다. 148 | 149 | 1. `Explore on my own` 클릭 150 | 2. 왼쪽 패널 `Discover` 클릭 151 | 3. Index pattern에 `kubernetes_cluster-*` 입력 > Next step 152 | 4. Time Filter field name에 `@timestamp` 선택 > Create index pattern 153 | 5. 다시 `Discover` 패널로 가면 `Pod`들의 로그들을 볼 수 있습니다. 154 | 155 | ### Clean up 156 | 157 | ```bash 158 | helm delete efk 159 | ``` 160 | 161 | ## 14.2 리소스 모니터링 시스템 구축 162 | 163 | ### 14.2.2 컨테이너 메트릭 정보 수집 원리 164 | 165 | ```bash 166 | docker run -d nginx 167 | # 4373b7e095215c23057b1dc4423527239e56a33dbd 168 | 169 | docker stats 4373b7e095215c23057b1dc4423527239e56a33dbd 170 | # CONTAINER ID NAME CPU % MEM USAGE / LIMIT MEM ... 171 | # 4af9f73eb06f dreamy 0.00% 3.227MiB / 7.773GiB 0.04% ... 172 | 173 | docker stop 4373b7e095215c23057b1dc4423527239e56a33dbd 174 | docker rm 4373b7e095215c23057b1dc4423527239e56a33dbd 175 | ``` 176 | 177 | ### 14.2.3 Prometheus & Grafana 구축 178 | 179 | ```bash 180 | helm fetch --untar stable/prometheus-operator --version 8.16.1 181 | 182 | vim prometheus-operator/values.yaml 183 | ``` 184 | 185 | ```yaml 186 | # 약 495줄 - grafana ingress 설정하기 187 | grafana: 188 | ... 189 | ingress: 190 | enabled: true # 기존 false 191 | annotations: 192 | kubernetes.io/ingress.class: nginx # 추가 193 | hosts: 194 | - grafana.10.0.1.1.sslip.io # 공인IP 입력 195 | ``` 196 | 197 | ```bash 198 | helm install mon ./prometheus-operator 199 | # manifest_sorter.go:192: info: skipping unknown hook: "crd-install" 200 | # manifest_sorter.go:192: info: skipping unknown hook: "crd-install" 201 | # manifest_sorter.go:192: info: skipping unknown hook: "crd-install" 202 | # manifest_sorter.go:192: info: skipping unknown hook: "crd-install" 203 | # manifest_sorter.go:192: info: skipping unknown hook: "crd-install" 204 | # manifest_sorter.go:192: info: skipping unknown hook: "crd-install" 205 | # NAME: mon 206 | # LAST DEPLOYED: Thu Jul 16 08:44:38 2020 207 | # ... 208 | 209 | watch kubectl get pod 210 | ``` 211 | 212 | 웹 브라우저를 통해 grafana를 접근합니다. 213 | 214 | - `username`: admin 215 | - `password`: prom-operator 216 | 217 | 좌측 상단의 `Home`을 누르면 다양한 대시보드가 생성된 것을 볼 수 있습니다. 218 | 219 | ### Clean up 220 | 221 | ```bash 222 | helm delete mon 223 | ``` 224 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 6 | # 핵심만 콕! 쿠버네티스 7 | 8 | - 부제: 쿠버네티스의 핵심을 실습하고 이해하는 9 | - 저자: 유홍근 ([커피고래](https://coffeewhale.com)) 10 | - 출간: 2020년 9월 18일 11 | - 정가: 32,000원 12 | - 페이지: 504 13 | 14 | ## 구매 링크 15 | 16 | - [yes24](http://www.yes24.com/Product/Goods/92426926?OzSrank=2) 17 | - [교보문고](http://www.kyobobook.co.kr/product/detailViewKor.laf?ejkGb=KOR&mallGb=KOR&barcode=9791165920180&orderClick=LAG&Kc=) 18 | - [알라딘](https://www.aladin.co.kr/shop/wproduct.aspx?ItemId=250937523) 19 | - [인터파크](http://book.interpark.com/product/BookDisplay.do?_method=detail&sc.shopNo=0000400000&sc.prdNo=338936080&sc.saNo=003002001&bid1=search&bid2=product&bid3=title&bid4=001) 20 | 21 | ![쿠버네티스_입체이미지](book-img.png) 22 | 23 | ## 책 소개 24 | 25 | > 쿠버네티스 첫 시작을 위한 최고의 선택 26 | 27 | 쿠버네티스를 처음 접하시는 분을 위해 준비하였습니다. 컨테이너, 쿠버네티스 기술에 대해 잘 모르더라도 이 책의 예제를 따라하다보면 어느새 쿠버네티스의 매력에 푹 빠질 것입니다. 쿠버네티스 입문서로 시작하기에 최고의 선택, 여러분도 쿠버네티스라는 거인의 어깨 위에 올라서서 클라우드 네이티브가 꿈꾸는 세상을 바라보시기 바랍니다. 28 | 29 | ## 이 책의 특징 30 | 31 | - 핵심을 위주로 설명하여 쿠버네티스의 큰 그림을 빠르게 이해할 수 있습니다. 32 | - 컨테이너 기술에 대한 기본적인 이해와 장점을 파악할 수 있습니다. 33 | - 예제를 직접 따라 하면서 사용법을 익힐 수 있습니다. 34 | 35 | ## 이 책이 필요한 독자 36 | 37 | - 쿠버네티스를 처음 접해보시는 분 38 | - 클라우드 네이티브 기술에 관심이 많으신 분 39 | - 효율적인 운영 환경을 고민하시는 분 40 | 41 | ## 출판사 리뷰 42 | 43 | 이 책에서는 방대한 시스템인 쿠버네티스를 처음 접할 때 어떤 부분을 집중적으로 살펴볼지 설명하고 최적의 학습 경로를 따라가면서, 단기간에 쿠버네티스에 대해서 이해하고 현실적으로 활용해 볼 수 있는 방법을 제공합니다. 이 책은 쿠버네티스의 모든 내용을 상세히 다루는 레퍼런스 북 형태라기보다는, 전반적인 내용에 대해서 핵심 부분만을 설명하고 직접 실습해 보면서 쿠버네티스의 큰 그림을 이해하는 것에 초점을 맞췄습니다. 각 챕터마다 마무리할 수 있도록 도와주며 참고와 주의를 통해 꿀팁을 확인해볼 수 있습니다. 제목처럼 핵심만 콕! 학습하여 이해해보시기 바랍니다. 44 | 45 | ## 목차 및 예제코드 46 | 47 | 1. [도커 기초](chapters/01) 48 | - 1.1 도커 소개 49 | - 1.2 도커 기본 명령 50 | - 1.3 도커 저장소 51 | - 1.4 도커 파일 작성 52 | - 1.5 도커 실행 고급 53 | - 1.6 마치며 54 | 2. [쿠버네티스 소개](chapters/02) 55 | - 2.1 쿠버네티스란? 56 | - 2.2 쿠버네티스의 기본 개념 57 | - 2.3 아키텍처 58 | - 2.4 장점 59 | - 2.5 마치며 60 | 3. [쿠버네티스 설치](chapters/03) 61 | - 3.1 k3s 소개 62 | - 3.2 k3s 설치하기 63 | - 3.3 마치며 64 | 4. [쿠버네티스 첫 만남](chapters/04) 65 | - 4.1 기본 명령 66 | - 4.2 고급 명령 67 | - 4.3 마치며 68 | 5. [Pod 살펴보기](chapters/05) 69 | - 5.1 Pod 소개 70 | - 5.2 라벨링 시스템 71 | - 5.3 실행 명령 및 파라미터 지정 72 | - 5.4 환경변수 설정 73 | - 5.5 볼륨 연결 74 | - 5.6 리소스 관리 75 | - 5.7 상태 확인 76 | - 5.8 2개 컨테이너 실행 77 | - 5.9 초기화 컨테이너 78 | - 5.10 Config 설정 79 | - 5.11 민감 데이터 관리 80 | - 5.12 메타데이터 전달 81 | - 5.13 마치며 82 | 6. [쿠버네티스 네트워킹](chapters/06) 83 | - 6.1 Service 소개 84 | - 6.2 Service 종류 85 | - 6.3 네트워크 모델 86 | - 6.4 마치며 87 | 7. [쿠버네티스 컨트롤러](chapters/07) 88 | - 7.1 컨트롤러란? 89 | - 7.2 ReplicaSet 90 | - 7.3 Deployment 91 | - 7.4 StatefulSet 92 | - 7.5 DaemonSet 93 | - 7.6 Job & CronJob 94 | - 7.7 마치며 95 | 8. [helm 패키지 매니저](chapters/08) 96 | - 8.1 helm이란? 97 | - 8.2 원격 리파지토리(repository) 98 | - 8.3 외부 chart 설치(WordPress) 99 | - 8.4 마치며 100 | 9. [Ingress 리소스](chapters/09) 101 | - 9.1 Ingress란? 102 | - 9.2 Ingress 기본 사용법 103 | - 9.3 Basic Auth 설정 104 | - 9.4 TLS 설정 105 | - 9.5 마치며 106 | 10. [스토리지](chapters/10) 107 | - 10.1 PersistentVolume 108 | - 10.2 PersistentVolumeClaim 109 | - 10.3 StorageClass 110 | - 10.4 쿠버네티스 스토리지 활용 111 | - 10.5 마치며 112 | 11. [고급 스케줄링](chapters/11) 113 | - 11.1 고가용성 확보 – Pod 레벨 114 | - 11.2 고사용성 확보 – Node 레벨 115 | - 11.3 Taint & Toleration 116 | - 11.4 Affinity & AntiAffinity 117 | - 11.5 마치며 118 | 12. [클러스터 관리](chapters/12) 119 | - 12.1 리소스 관리 120 | - 12.2 노드 관리 121 | - 12.3 Pod 개수 유지 122 | - 12.4 마치며 123 | 13. [접근 제어](chapters/13) 124 | - 13.1 사용자 인증(Authentication) 125 | - 13.2 역할 기반 접근 제어(RBAC) 126 | - 13.2.1 Role (ClusterRole) 127 | - 13.3 네트워크 접근 제어(Network Policy) 128 | - 13.4 마치며 129 | 14. [로깅과 모니터링](chapters/14) 130 | - 14.1 로깅 시스템 구축 131 | - 14.2 리소스 모니터링 시스템 구축 132 | - 14.3 마치며 133 | 15. [CI/CD](chapters/15) 134 | - 15.1 DevOps와 CI/CD 135 | - 15.2 CI 파이프라인 136 | - 15.3 GitOps를 이용한 CD 137 | - 15.4 로컬 쿠버네티스 개발 138 | - 15.5 마치며 139 | 16. [사용자 정의 리소스](chapters/16) 140 | - 16.1 사용자 정의 리소스란? 141 | - 16.2 Operator 패턴 142 | - 16.3 유용한 Operators 143 | - 16.4 마치며 144 | 17. [Workflow 관리](chapters/17) 145 | - 17.1 Argo workflow 소개 146 | - 17.2 Workflow 구성하기 147 | - 17.3 활용 방법 소개 148 | - 17.4 마치며 149 | - 부록 쿠버네티스의 미래 150 | - 클라우드 플랫폼 표준 151 | - 애플리케이션 배포 표준화 152 | - 범용 클러스터 플랫폼 153 | - 마치며 154 | 155 | ## 참고자료 156 | 157 | ### VirtualBox를 이용한 k3s 클러스터 구축 방법 소개 158 | 159 | Chapter 3 `쿠버네티스 설치`에 대한 참고자료입니다. 내 로컬 PC(윈도우)에서 클러스터를 구축하기 위해 VirtualBox를 이용하여 k3s 클러스터를 구축하는 방법에 대해서 소개합니다. 160 | 161 | - [VirtualBox를 이용한 k3s 클러스터 구축](https://coffeewhale.com/kubernetes/cluster/virtualbox/2020/08/31/k8s-virtualbox) 162 | 163 | ### 클라우드 서비스별 클러스터 구축 방법 소개 164 | 165 | Chapter 11 `고급 스케줄링`에서 Node 레벨 고가용성 확보를 위한 Cluster Auto Scaler 예제를 따라하기 위한 클라우드 서비스별 클러스터 구축 방법을 설명드립니다. 166 | 167 | - [AWS EKS 클러스터 구축](https://coffeewhale.com/kubernetes/cluster/eks/2020/09/03/k8s-eks/) 168 | - [GCP GKE 클러스터 구축](https://coffeewhale.com/kubernetes/cluster/gke/2020/09/04/k8s-gke/) 169 | 170 | ### CI Pipeline 샘플코드 171 | 172 | Chapter 15 `CI/CD`에서 Jenkins CI pipeline으로 활용하는 샘플코드입니다. 173 | 174 | - [pipeline-sample/](pipeline-sample/) 디렉토리 참조 바랍니다. 175 | 176 | 177 | ### GitOps 단일 진실의 원천 배포 디렉토리 178 | 179 | Chapter 15 `CI/CD`에서 FluxCD, ArgoCD에서 단일 진실의 원천으로 사용하는 샘플코드입니다. 180 | 181 | - [gitops/](gitops/) 디렉토리 참조 바랍니다. 182 | 183 | ## 오탈자 제보 및 문의 사항 184 | 185 | 다음 2가지 방법을 이용하여 연락주시기 바랍니다. 186 | 187 | - 깃허브 리파지토리 [issue 생성](https://github.com/bjpublic/core_kubernetes/issues/new) 188 | - `hongkunyoo (at) gmail.com` (저자, 유홍근)으로 메일 전송 189 | -------------------------------------------------------------------------------- /chapters/08/README.md: -------------------------------------------------------------------------------- 1 | # 8. helm 패키지 매니저 2 | 3 | ## 8.1 `helm`이란 4 | 5 | ### 8.1.1 `helm` 설치 6 | 7 | ```bash 8 | curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash -s -- --version v3.2.2 9 | ``` 10 | 11 | ### 8.1.2 `chart` 생성 12 | 13 | ```bash 14 | helm create mychart 15 | # Creating mychart 16 | 17 | ls mychart 18 | # Chart.yaml charts templates values.yaml 19 | ``` 20 | 21 | ```bash 22 | ls mychart/templates 23 | # NOTES.txt 24 | # _helpers.tpl 25 | # deployment.yaml 26 | # ingress.yaml 27 | # service.yaml 28 | # serviceaccount.yaml 29 | # tests/ 30 | ``` 31 | 32 | ```yaml 33 | # mychart/templates/service.yaml 34 | apiVersion: v1 35 | kind: Service 36 | metadata: 37 | name: {{ include "mychart.fullname" . }} 38 | labels: 39 | {{- include "mychart.labels" . | nindent 4 }} 40 | spec: 41 | type: {{ .Values.service.type }} # 서비스 타입 지정 42 | ports: 43 | - port: {{ .Values.service.port }} # 서비스 포트 지정 44 | targetPort: http 45 | protocol: TCP 46 | name: http 47 | selector: 48 | {{- include "mychart.selectorLabels" . | nindent 4 }} 49 | ``` 50 | 51 | ```yaml 52 | # values.yaml 53 | replicaCount: 1 54 | 55 | image: 56 | repository: nginx 57 | pullPolicy: IfNotPresent 58 | 59 | imagePullSecrets: [] 60 | nameOverride: "" 61 | fullnameOverride: "" 62 | 63 | ... 64 | # 약 40줄 65 | service: 66 | type: LoadBalancer # 기존 ClusterIP 67 | port: 8888 # 기존 80 68 | 69 | ... 70 | ``` 71 | 72 | ### 8.1.3 chart 설치 73 | 74 | ```bash 75 | helm install foo ./mychart 76 | # NAME: foo 77 | # LAST DEPLOYED: Tue Mar 10 14:26:02 2020 78 | # NAMESPACE: default 79 | # STATUS: deployed 80 | # REVISION: 1 81 | # NOTES: 82 | # .... 83 | ``` 84 | 85 | ```bash 86 | # service 리소스를 조회합니다. 87 | kubectl get svc 88 | # NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) 89 | # kubernetes ClusterIP 10.43.0.1 443/TCP 90 | # foo-mychart LoadBalancer 10.43.142.107 10.0.1.1 8888:32597/TCP 91 | ``` 92 | 93 | ### 8.1.4 `chart` 리스트 조회 94 | 95 | ```bash 96 | # 설치된 chart 리스트 확인하기 97 | helm list 98 | # NAME NAMESPACE REVISION UPDATED STATUS CHART APP VER 99 | # foo default 1 2020-3-1 deployed mychart-0.1.0 1.16.0 100 | 101 | # 다른 네임스페이스에는 설치된 chart가 없습니다. 102 | helm list -n kube-system 103 | # NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION 104 | ``` 105 | 106 | ### 8.1.5 chart 랜더링 107 | 108 | ```bash 109 | helm template foo ./mychart > foo-output.yaml 110 | 111 | cat foo-output.yaml 112 | # 전체 YAML 정의서 출력 113 | ``` 114 | 115 | ### 8.1.6 chart 업그레이드 116 | 117 | ```yaml 118 | # values.yaml 119 | ... 120 | 121 | service: 122 | type: NodePort # 기존 LoadBalancer 123 | port: 8888 124 | ... 125 | ``` 126 | 127 | ```bash 128 | helm upgrade foo ./mychart 129 | # Release "foo" has been upgraded. Happy Helming! 130 | # NAME: foo 131 | # LAST DEPLOYED: Mon Jul 6 19:26:35 2020 132 | # NAMESPACE: default 133 | # STATUS: deployed 134 | # REVISION: 2 135 | # ... 136 | 137 | kubectl get svc 138 | # NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) 139 | # kubernetes ClusterIP 10.43.0.1 443/TCP 140 | # foo NodePort 10.43.155.85 8888:32160/TCP 141 | 142 | helm list 143 | # NAME NAMESPACE REVISION UPDATED STATUS CHART 144 | # foo default 2 2020-3-2 deployed mychart-0.1.0 145 | ``` 146 | 147 | ### 8.1.7 chart 배포상태 확인 148 | 149 | ```bash 150 | helm status foo 151 | # Release "foo" has been upgraded. Happy Helming! 152 | # NAME: foo 153 | # LAST DEPLOYED: Mon Jul 6 19:26:35 2020 154 | # NAMESPACE: default 155 | # STATUS: deployed 156 | # REVISION: 2 157 | # ... 158 | ``` 159 | 160 | ### 8.1.8 `chart` 삭제 161 | 162 | ```bash 163 | helm delete foo 164 | # release "foo" uninstalled 165 | 166 | helm list 167 | # NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION 168 | ``` 169 | 170 | ## 8.2 원격 레포지토리 171 | 172 | ### 8.2.1 레포지토리 추가 173 | 174 | ```bash 175 | # stable repo 추가 176 | helm repo add stable https://kubernetes-charts.storage.googleapis.com 177 | ``` 178 | 179 | ### 8.2.2 레포지토리 업데이트 180 | 181 | ```bash 182 | # repo update 183 | helm repo update 184 | # ...Successfully got an update from the "stable" chart repository 185 | # Update Complete. ⎈ Happy Helming!⎈ 186 | ``` 187 | 188 | ### 8.2.3 레포지토리 조회 189 | 190 | ```bash 191 | # 현재 등록된 repo 리스트 192 | helm repo list 193 | # NAME URL 194 | # stable https://kubernetes-charts.storage.googleapis.com 195 | ``` 196 | 197 | ### 8.2.4 레포지토리내 chart 조회 198 | 199 | ```bash 200 | # stable 레포 안의 chart 리스트 201 | helm search repo stable 202 | # NAME CHART VERSION APP VERSION DESCRIPTION 203 | # stable/aerospike 0.3.2 v4.5.0.5 A Helm chart .. 204 | # stable/airflow 7.1.4 1.10.10 Airflow is a .. 205 | # stable/ambassador 5.3.2 0.86.1 DEPRECATED ... 206 | # stable/anchore-engine 1.6.8 0.7.2 Anchore container 207 | # stable/apm-server 2.1.5 7.0.0 The server ... 208 | # ... 209 | 210 | helm search repo stable/airflow 211 | # NAME CHART VERSION APP VERSION DESCRIPTION 212 | # stable/airflow 7.2.0 1.10.10 Airflow is a plat... 213 | ``` 214 | 215 | 다음 주소에서 `stable` 레포지토리 외에 다양한 원격 저장소를 조회해 볼 수 있습니다. 216 | 217 | helm 허브: [https://hub.helm.sh/charts](https://hub.helm.sh/charts) 218 | 219 | ## 8.3 외부 chart 설치 (WordPress) 220 | 221 | ### 8.3.1 `chart install` 222 | 223 | ```bash 224 | helm install wp stable/wordpress \ 225 | --version 9.0.3 \ 226 | --set service.port=8080 \ 227 | --namespace default 228 | # WARNING: This chart is deprecated 229 | # NAME: wp 230 | # LAST DEPLOYED: Mon Jul 6 20:44:55 2020 231 | # NAMESPACE: default 232 | # STATUS: deployed 233 | # REVISION: 1 234 | # NOTES: 235 | # ... 236 | 237 | kubectl get pod 238 | # NAME READY STATUS RESTARTS AGE 239 | # svclb-wp-xv6b6 2/2 Running 0 6s 240 | # wp-mariadb-0 0/1 ContainerCreating 0 6s 241 | # wp-6d78b5c456 0/1 Running 0 6s 242 | 243 | kubectl get svc 244 | # NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) 245 | # kubernetes ClusterIP 10.43.0.1 443/TCP 246 | # wp-mariadb ClusterIP 10.43.90.229 3306/TCP 247 | # wp LoadBalancer 10.43.167.4 10.0.1.1 8080:30887/TCP,... 248 | ``` 249 | 250 | ```yaml 251 | # values.yaml 252 | ... 253 | service: 254 | port: 80 --> 8080 255 | ... 256 | ``` 257 | 258 | ```bash 259 | # curl로 접근해 봅니다. 260 | curl localhost:8080 261 | ``` 262 | 263 | ### 8.3.2 `chart fetch` 264 | 265 | ```bash 266 | helm fetch --untar stable/wordpress --version 9.0.3 267 | 268 | ls wordpress/ 269 | # Chart.yaml README.md charts requirements.lock 270 | # requirements.yaml templates values.schema.json values.yaml 271 | 272 | # 사용자 입맛에 따라 세부 설정값 변경 273 | vim wordpress/values.yaml 274 | # ... 275 | 276 | helm install wp-fetch ./wordpress 277 | # WARNING: This chart is deprecated 278 | # NAME: wp-fetch 279 | # LAST DEPLOYED: Mon Jul 6 20:44:55 2020 280 | # NAMESPACE: default 281 | # STATUS: deployed 282 | # REVISION: 1 283 | # NOTES: 284 | # ... 285 | ``` 286 | 287 | ### Clean up 288 | 289 | ```bash 290 | helm delete wp 291 | helm delete wp-fetch 292 | kubectl delete pvc data-wp-mariadb-0 data-wp-fetch-mariadb-0 293 | ``` 294 | -------------------------------------------------------------------------------- /chapters/17/README.md: -------------------------------------------------------------------------------- 1 | # 17. 워크플로우 관리 2 | 3 | ## 17.1 Argo workflow 소개 4 | 5 | ### 17.1.4 설치 6 | 7 | ```bash 8 | kubectl create namespace argo 9 | # namespace/argo created 10 | 11 | # Workflow CRD 및 Argo controller 설치 12 | kubectl apply -n argo -f https://raw.githubusercontent.com/argoproj/argo/v2.8.1/manifests/install.yaml 13 | # customresourcedefinition.apiextensions.k8s.io/clusterworkflowtemplates.argoproj.io created 14 | # customresourcedefinition.apiextensions.k8s.io/cronworkflows.argoproj.io created 15 | # customresourcedefinition.apiextensions.k8s.io/workflows.argoproj.io created 16 | # ... 17 | 18 | # default 서비스계정에 admin 권한 부여 19 | kubectl create rolebinding default-admin --clusterrole=admin \ 20 | --serviceaccount=default:default 21 | # rolebinding.rbac.authorization.k8s.io/default-admin created 22 | 23 | # ingress 설정 24 | cat << EOF | kubectl apply -f - 25 | apiVersion: extensions/v1beta1 26 | kind: Ingress 27 | metadata: 28 | annotations: 29 | kubernetes.io/ingress.class: nginx 30 | name: argo-server 31 | namespace: argo 32 | spec: 33 | rules: 34 | - host: argo.10.0.1.1.sslip.io 35 | http: 36 | paths: 37 | - backend: 38 | serviceName: argo-server 39 | servicePort: 2746 40 | path: / 41 | EOF 42 | ``` 43 | 44 | - `SUBMIT NEW WORKFLOW` 클릭 45 | - `SUBMIT` 클릭 46 | - 노란색 (혹은 파란색) Workflow 클릭 (예제 이름: `fantastic-tiger`) 47 | - `YAML`과 `LOGS` 클릭하여 Workflow 정보 확인 48 | 49 | ```bash 50 | kubectl get workflow # wf 51 | # NAME STATUS AGE 52 | # fantastic-tiger Succeeded 10m 53 | 54 | kubectl describe workflow fantastic-tiger 55 | # ... 56 | ``` 57 | 58 | ## 17.2 Workflow 구성하기 59 | 60 | ### 17.2.1 단일 Job 실행 61 | 62 | ```yaml 63 | # single-job.yaml 64 | apiVersion: argoproj.io/v1alpha1 65 | kind: Workflow 66 | metadata: 67 | generateName: hello-world- 68 | namespace: default 69 | spec: 70 | entrypoint: whalesay 71 | templates: 72 | - name: whalesay 73 | container: 74 | image: docker/whalesay 75 | command: [cowsay] 76 | args: ["hello world"] 77 | resources: 78 | limits: 79 | memory: 32Mi 80 | cpu: 100m 81 | ``` 82 | 83 | ```bash 84 | # Workflow 생성 85 | kubectl create -f single-job.yaml 86 | # workflow.argoproj.io/hello-world-tcnjj created 87 | 88 | kubectl get wf 89 | # NAME STATUS AGE 90 | # wonderful-dragon Succeeded 8m21s 91 | # hello-world-tcnjj Succeeded 17s 92 | 93 | kubectl get pod 94 | # NAME READY STATUS RESTARTS AGE 95 | # wonderful-dragon 0/2 Completed 0 8m34s 96 | # hello-world-tcnjj 0/2 Completed 0 31s 97 | 98 | kubectl logs hello-world-tcnjj -c main 99 | # _____________ 100 | # < hello world > 101 | # ------------- 102 | # \ 103 | # \ 104 | # \ 105 | # ## . 106 | # ## ## ## == 107 | # ## ## ## ## === 108 | # /""""""""""""""""___/ === 109 | # ~~~ {~~ ~~~~ ~~~ ~~~~ ~~ ~ / ===- ~~~ 110 | # \______ o __/ 111 | # \ \ __/ 112 | # \____\______/ 113 | # 114 | # 115 | # Hello from Docker! 116 | # This message shows that your installation appears to be working 117 | ``` 118 | 119 | ### 17.2.2 파라미터 전달 120 | 121 | ```yaml 122 | # param.yaml 123 | apiVersion: argoproj.io/v1alpha1 124 | kind: Workflow 125 | metadata: 126 | generateName: hello-world-parameters- 127 | namespace: default 128 | spec: 129 | entrypoint: whalesay 130 | arguments: 131 | parameters: 132 | - name: message 133 | value: hello world through param 134 | 135 | templates: 136 | ############### 137 | # entrypoint 138 | ############### 139 | - name: whalesay 140 | inputs: 141 | parameters: 142 | - name: message 143 | container: 144 | image: docker/whalesay 145 | command: [cowsay] 146 | args: ["{{inputs.parameters.message}}"] 147 | ``` 148 | 149 | ### 17.2.3 Serial step 실행 150 | 151 | ```yaml 152 | apiVersion: argoproj.io/v1alpha1 153 | kind: Workflow 154 | metadata: 155 | generateName: serial-step- 156 | namespace: default 157 | spec: 158 | entrypoint: hello-step 159 | templates: 160 | 161 | ############### 162 | # template job 163 | ############### 164 | - name: whalesay 165 | inputs: 166 | parameters: 167 | - name: message 168 | container: 169 | image: docker/whalesay 170 | command: [cowsay] 171 | args: ["{{inputs.parameters.message}}"] 172 | 173 | ############### 174 | # entrypoint 175 | ############### 176 | - name: hello-step 177 | # 순차 실행 178 | steps: 179 | - - name: hello1 180 | template: whalesay 181 | arguments: 182 | parameters: 183 | - name: message 184 | value: "hello1" 185 | - - name: hello2 186 | template: whalesay 187 | arguments: 188 | parameters: 189 | - name: message 190 | value: "hello2" 191 | - - name: hello3 192 | template: whalesay 193 | arguments: 194 | parameters: 195 | - name: message 196 | value: "hello3" 197 | ``` 198 | 199 | ### 17.2.4 Parallel step 실행 200 | 201 | ```yaml 202 | apiVersion: argoproj.io/v1alpha1 203 | kind: Workflow 204 | metadata: 205 | generateName: parallel-steps- 206 | namespace: default 207 | spec: 208 | entrypoint: hello-step 209 | templates: 210 | 211 | ############### 212 | # template job 213 | ############### 214 | - name: whalesay 215 | inputs: 216 | parameters: 217 | - name: message 218 | container: 219 | image: docker/whalesay 220 | command: [cowsay] 221 | args: ["{{inputs.parameters.message}}"] 222 | 223 | ############### 224 | # entrypoint 225 | ############### 226 | - name: hello-step 227 | # 병렬 실행 228 | steps: 229 | - - name: hello1 230 | template: whalesay 231 | arguments: 232 | parameters: 233 | - name: message 234 | value: "hello1" 235 | - - name: hello2 236 | template: whalesay 237 | arguments: 238 | parameters: 239 | - name: message 240 | value: "hello2" 241 | - name: hello3 # 기존 double dash에서 single dash로 변경 242 | template: whalesay 243 | arguments: 244 | parameters: 245 | - name: message 246 | value: "hello3" 247 | ``` 248 | 249 | ### 17.2.5 복잡한 DAG 실행 250 | 251 | ```yaml 252 | apiVersion: argoproj.io/v1alpha1 253 | kind: Workflow 254 | metadata: 255 | generateName: dag-diamond- 256 | namespace: default 257 | spec: 258 | entrypoint: diamond 259 | templates: 260 | 261 | ############### 262 | # template job 263 | ############### 264 | - name: echo 265 | inputs: 266 | parameters: 267 | - name: message 268 | container: 269 | image: alpine:3.7 270 | command: [echo, "{{inputs.parameters.message}}"] 271 | 272 | ############### 273 | # entrypoint 274 | ############### 275 | - name: diamond 276 | # DAG 구성 277 | dag: 278 | tasks: 279 | - name: A 280 | template: echo 281 | arguments: 282 | parameters: [{name: message, value: A}] 283 | - name: B 284 | dependencies: [A] 285 | template: echo 286 | arguments: 287 | parameters: [{name: message, value: B}] 288 | - name: C 289 | dependencies: [A] 290 | template: echo 291 | arguments: 292 | parameters: [{name: message, value: C}] 293 | - name: D 294 | dependencies: [B, C] 295 | template: echo 296 | arguments: 297 | parameters: [{name: message, value: D}] 298 | ``` 299 | 300 | ### 17.2.6 종료 핸들링 301 | 302 | ```yaml 303 | apiVersion: argoproj.io/v1alpha1 304 | kind: Workflow 305 | metadata: 306 | generateName: error-handlers- 307 | namespace: default 308 | spec: 309 | entrypoint: intentional-fail 310 | # 에러 핸들러 작업 지정 311 | onExit: error-handler 312 | 313 | templates: 314 | 315 | ############### 316 | # template job 317 | ############### 318 | - name: send-email 319 | container: 320 | image: alpine:latest 321 | command: [sh, -c] 322 | args: ["echo send e-mail: {{workflow.name}} {{workflow.status}}"] 323 | 324 | ############### 325 | # 종료 핸들러 326 | ############### 327 | - name: error-handler 328 | steps: 329 | - - name: notify 330 | template: send-email 331 | 332 | ############### 333 | # entrypoint 334 | ############### 335 | - name: intentional-fail 336 | container: 337 | image: alpine:latest 338 | command: [sh, -c] 339 | args: ["echo intentional failure; exit 1"] 340 | ``` 341 | 342 | ### Clean up 343 | 344 | ```bash 345 | kubectl delete wf --all 346 | kubectl delete -n argo -f https://raw.githubusercontent.com/argoproj/argo/v2.8.1/manifests/install.yaml 347 | ``` 348 | -------------------------------------------------------------------------------- /chapters/06/README.md: -------------------------------------------------------------------------------- 1 | # 6. 쿠버네티스 네트워킹 2 | 3 | ## 6.1 Service 소개 4 | 5 | ```bash 6 | kubectl run mynginx --image nginx 7 | # pod/mynginx created 8 | 9 | # Pod IP는 사용자마다 다릅니다. 10 | kubectl get pod -owide 11 | # NAME READY STATUS RESTARTS AGE IP NODE ... 12 | # mynginx 1/1 Running 0 12d 10.42.0.26 master ... 13 | 14 | kubectl exec mynginx -- curl -s 10.42.0.26 15 | # 16 | # 17 | # Welcome to nginx! 18 | #