├── _config.yml ├── assets ├── ghost.png ├── minio.png ├── nginx-web.png ├── dashboard-lb.png ├── ceph-replicapool.png ├── dashboard-intro.png ├── ceph-dashboard-fs.png └── dashboard-overview.png ├── kubernetes ├── dashboard-service-account.yaml ├── dashboard-clusterrolebinding.yaml └── kubernetes-dashboard.yaml ├── docker ├── load_docker_images.py └── download-images.py ├── ghost ├── ghost-svc.yaml └── ghost-deploy.yaml ├── mysql ├── mysql-svc.yaml ├── mysql-pvc.yaml └── mysql-deploy.yaml ├── minio ├── minio-svc.yaml └── minio-deploy.yaml ├── metallb ├── layer2-config.yaml └── metallb.yaml ├── nginx ├── nginx-svc.yaml └── nginx-deploy.yaml ├── rook ├── filesystem.yaml ├── storageclass.yaml ├── dashboard-external-https.yaml ├── dashboard-external-http.yaml ├── pool.yaml ├── nginx-fs-deploy.yaml ├── toolbox.yaml ├── cluster.yaml └── operator.yaml ├── helm ├── rbac-config.yaml └── get_helm.sh ├── redis ├── redis-svc.yaml └── redis-storage-pv.yaml ├── Prepare-hands-on.md ├── cni ├── rbac-kdd.yaml └── calico.yaml ├── Vagrantfile └── README.md /_config.yml: -------------------------------------------------------------------------------- 1 | theme: jekyll-theme-leap-day -------------------------------------------------------------------------------- /assets/ghost.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chanshik/kubernetes-201811-meetup/HEAD/assets/ghost.png -------------------------------------------------------------------------------- /assets/minio.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chanshik/kubernetes-201811-meetup/HEAD/assets/minio.png -------------------------------------------------------------------------------- /assets/nginx-web.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chanshik/kubernetes-201811-meetup/HEAD/assets/nginx-web.png -------------------------------------------------------------------------------- /assets/dashboard-lb.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chanshik/kubernetes-201811-meetup/HEAD/assets/dashboard-lb.png -------------------------------------------------------------------------------- /assets/ceph-replicapool.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chanshik/kubernetes-201811-meetup/HEAD/assets/ceph-replicapool.png -------------------------------------------------------------------------------- /assets/dashboard-intro.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chanshik/kubernetes-201811-meetup/HEAD/assets/dashboard-intro.png -------------------------------------------------------------------------------- /assets/ceph-dashboard-fs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chanshik/kubernetes-201811-meetup/HEAD/assets/ceph-dashboard-fs.png -------------------------------------------------------------------------------- /assets/dashboard-overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chanshik/kubernetes-201811-meetup/HEAD/assets/dashboard-overview.png -------------------------------------------------------------------------------- /kubernetes/dashboard-service-account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: admin-user 5 | namespace: kube-system 6 | -------------------------------------------------------------------------------- /docker/load_docker_images.py: -------------------------------------------------------------------------------- 1 | import os 2 | import glob 3 | 4 | files = glob.glob("*.docker") 5 | for filename in files: 6 | os.system("sudo docker load -i {}".format(filename)) 7 | -------------------------------------------------------------------------------- /ghost/ghost-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: ghost-svc 5 | labels: 6 | k8s-app: ghost 7 | spec: 8 | type: LoadBalancer 9 | ports: 10 | - port: 2368 11 | selector: 12 | k8s-app: ghost 13 | -------------------------------------------------------------------------------- /mysql/mysql-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: mysql 5 | labels: 6 | k8s-app: mysql 7 | spec: 8 | type: LoadBalancer 9 | ports: 10 | - port: 3306 11 | selector: 12 | k8s-app: mysql 13 | -------------------------------------------------------------------------------- /minio/minio-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: minio-svc 5 | spec: 6 | type: LoadBalancer 7 | ports: 8 | - port: 9000 9 | targetPort: 9000 10 | protocol: TCP 11 | selector: 12 | k8s-app: minio 13 | -------------------------------------------------------------------------------- /metallb/layer2-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | namespace: metallb-system 5 | name: config 6 | data: 7 | config: | 8 | address-pools: 9 | - name: default 10 | protocol: layer2 11 | addresses: 12 | - 10.254.1.150-10.254.1.250 13 | -------------------------------------------------------------------------------- /mysql/mysql-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: mysql-pvc 5 | labels: 6 | k8s-app: mysql 7 | spec: 8 | storageClassName: rook-ceph-block 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: 5Gi 14 | -------------------------------------------------------------------------------- /nginx/nginx-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | k8s-app: nginx 6 | name: nginx-svc 7 | namespace: default 8 | spec: 9 | ports: 10 | - port: 80 11 | protocol: TCP 12 | targetPort: 80 13 | nodePort: 31000 14 | selector: 15 | k8s-app: nginx 16 | type: NodePort 17 | -------------------------------------------------------------------------------- /kubernetes/dashboard-clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1beta1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: admin-user 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: cluster-admin 9 | subjects: 10 | - kind: ServiceAccount 11 | name: admin-user 12 | namespace: kube-system 13 | -------------------------------------------------------------------------------- /rook/filesystem.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: ceph.rook.io/v1beta1 2 | kind: Filesystem 3 | metadata: 4 | name: k8s-fs 5 | namespace: rook-ceph 6 | spec: 7 | metadataPool: 8 | replicated: 9 | size: 2 10 | dataPools: 11 | - failureDomain: osd 12 | replicated: 13 | size: 2 14 | metadataServer: 15 | activeCount: 1 16 | activeStandby: true 17 | placement: 18 | resources: 19 | -------------------------------------------------------------------------------- /rook/storageclass.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: ceph.rook.io/v1beta1 2 | kind: Pool 3 | metadata: 4 | name: replicapool 5 | namespace: rook-ceph 6 | spec: 7 | replicated: 8 | size: 2 9 | --- 10 | apiVersion: storage.k8s.io/v1 11 | kind: StorageClass 12 | metadata: 13 | name: rook-ceph-block 14 | provisioner: ceph.rook.io/block 15 | parameters: 16 | pool: replicapool 17 | clusterNamespace: rook-ceph 18 | fstype: xfs 19 | -------------------------------------------------------------------------------- /helm/rbac-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: tiller 6 | namespace: kube-system 7 | 8 | --- 9 | apiVersion: rbac.authorization.k8s.io/v1 10 | kind: ClusterRoleBinding 11 | metadata: 12 | name: tiller 13 | roleRef: 14 | apiGroup: rbac.authorization.k8s.io 15 | kind: ClusterRole 16 | name: cluster-admin 17 | subjects: 18 | - kind: ServiceAccount 19 | name: tiller 20 | namespace: kube-system 21 | -------------------------------------------------------------------------------- /rook/dashboard-external-https.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: rook-ceph-mgr-dashboard-external-https 5 | namespace: rook-ceph 6 | labels: 7 | app: rook-ceph-mgr 8 | rook_cluster: rook-ceph 9 | spec: 10 | ports: 11 | - name: dashboard 12 | port: 8443 13 | protocol: TCP 14 | targetPort: 8443 15 | selector: 16 | app: rook-ceph-mgr 17 | rook_cluster: rook-ceph 18 | sessionAffinity: None 19 | type: NodePort 20 | -------------------------------------------------------------------------------- /rook/dashboard-external-http.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: rook-ceph-mgr-dashboard-external-http 5 | namespace: rook-ceph 6 | labels: 7 | app: rook-ceph-mgr 8 | rook_cluster: rook-ceph 9 | spec: 10 | ports: 11 | - name: dashboard 12 | port: 7000 13 | protocol: TCP 14 | targetPort: 7000 15 | selector: 16 | app: rook-ceph-mgr 17 | rook_cluster: rook-ceph 18 | sessionAffinity: None 19 | type: LoadBalancer 20 | -------------------------------------------------------------------------------- /nginx/nginx-deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | k8s-app: nginx 6 | name: nginx 7 | namespace: default 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | k8s-app: nginx 13 | template: 14 | metadata: 15 | labels: 16 | k8s-app: nginx 17 | name: nginx 18 | spec: 19 | containers: 20 | - image: nginx 21 | name: nginx 22 | nodeSelector: 23 | app: "yes" 24 | -------------------------------------------------------------------------------- /redis/redis-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app: redis-ha 6 | chart: redis-ha-3.0.1 7 | heritage: Tiller 8 | release: redis-k8s 9 | name: redis-k8s-redis-ha-svc 10 | namespace: redis 11 | spec: 12 | ports: 13 | - name: server 14 | port: 6379 15 | protocol: TCP 16 | targetPort: redis 17 | - name: sentinel 18 | port: 26379 19 | protocol: TCP 20 | targetPort: sentinel 21 | selector: 22 | app: redis-ha 23 | release: redis-k8s 24 | type: LoadBalancer 25 | -------------------------------------------------------------------------------- /mysql/mysql-deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: mysql 5 | labels: 6 | k8s-app: mysql 7 | spec: 8 | strategy: 9 | type: Recreate 10 | template: 11 | metadata: 12 | labels: 13 | k8s-app: mysql 14 | spec: 15 | containers: 16 | - image: mysql:5.7 17 | name: mysql 18 | env: 19 | - name: MYSQL_ROOT_PASSWORD 20 | value: changeme 21 | ports: 22 | - containerPort: 3306 23 | name: mysql 24 | volumeMounts: 25 | - name: mysql-persistent-storage 26 | mountPath: /var/lib/mysql 27 | volumes: 28 | - name: mysql-persistent-storage 29 | persistentVolumeClaim: 30 | claimName: mysql-pvc 31 | -------------------------------------------------------------------------------- /rook/pool.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: ceph.rook.io/v1beta1 2 | kind: Pool 3 | metadata: 4 | name: replicapool 5 | namespace: rook-ceph 6 | spec: 7 | # The failure domain will spread the replicas of the data across different failure zones 8 | failureDomain: osd 9 | # The root of the crush hierarchy that will be used for the pool. If not set, will use "default". 10 | crushRoot: default 11 | # For a pool based on raw copies, specify the number of copies. A size of 1 indicates no redundancy. 12 | replicated: 13 | size: 1 14 | # For an erasure-coded pool, comment out the replicated size above and uncomment the following settings. 15 | # Make sure you have enough OSDs to support the replica size or sum of the erasure coding and data chunks. 16 | #erasureCoded: 17 | # dataChunks: 2 18 | # codingChunks: 1 19 | -------------------------------------------------------------------------------- /redis/redis-storage-pv.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolume 2 | apiVersion: v1 3 | metadata: 4 | name: redis-pv-1 5 | labels: 6 | type: local 7 | spec: 8 | capacity: 9 | storage: 5Gi 10 | accessModes: 11 | - ReadWriteOnce 12 | hostPath: 13 | path: "/media/data/redis" 14 | 15 | --- 16 | kind: PersistentVolume 17 | apiVersion: v1 18 | metadata: 19 | name: redis-pv-2 20 | labels: 21 | type: local 22 | spec: 23 | capacity: 24 | storage: 5Gi 25 | accessModes: 26 | - ReadWriteOnce 27 | hostPath: 28 | path: "/media/data/redis" 29 | 30 | --- 31 | kind: PersistentVolume 32 | apiVersion: v1 33 | metadata: 34 | name: redis-pv-3 35 | labels: 36 | type: local 37 | spec: 38 | capacity: 39 | storage: 5Gi 40 | accessModes: 41 | - ReadWriteOnce 42 | hostPath: 43 | path: "/media/data/redis" 44 | -------------------------------------------------------------------------------- /rook/nginx-fs-deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | k8s-app: nginx-fs 6 | name: nginx-fs 7 | namespace: default 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | k8s-app: nginx-fs 13 | template: 14 | metadata: 15 | labels: 16 | k8s-app: nginx-fs 17 | name: nginx-fs 18 | spec: 19 | containers: 20 | - image: nginx 21 | name: nginx 22 | volumeMounts: 23 | - name: k8s-root 24 | mountPath: /tmp/fs 25 | nodeSelector: 26 | app: "yes" 27 | volumes: 28 | - name: k8s-root 29 | flexVolume: 30 | driver: ceph.rook.io/rook 31 | fsType: ceph 32 | options: 33 | fsName: k8s-fs 34 | clusterNamespace: rook-ceph 35 | path: / 36 | -------------------------------------------------------------------------------- /minio/minio-deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: minio 5 | spec: 6 | template: 7 | metadata: 8 | labels: 9 | k8s-app: minio 10 | spec: 11 | containers: 12 | - name: minio 13 | volumeMounts: 14 | - name: minio-store 15 | mountPath: "/data" 16 | image: minio/minio:RELEASE.2018-11-17T01-23-48Z 17 | args: 18 | - server 19 | - /data 20 | env: 21 | - name: MINIO_ACCESS_KEY 22 | value: "minio" 23 | - name: MINIO_SECRET_KEY 24 | value: "minio123" 25 | ports: 26 | - containerPort: 9000 27 | volumes: 28 | - name: minio-store 29 | flexVolume: 30 | driver: ceph.rook.io/rook 31 | fsType: ceph 32 | options: 33 | fsName: k8s-fs 34 | clusterNamespace: rook-ceph 35 | path: /minio 36 | -------------------------------------------------------------------------------- /ghost/ghost-deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: ghost 5 | spec: 6 | template: 7 | metadata: 8 | labels: 9 | k8s-app: ghost 10 | spec: 11 | containers: 12 | - name: ghost 13 | volumeMounts: 14 | - name: ghost-volume 15 | mountPath: "/var/lib/ghost/content" 16 | image: ghost:2 17 | env: 18 | - name: database__client 19 | value: "mysql" 20 | - name: database__connection__host 21 | value: "mysql" 22 | - name: database__connection__user 23 | value: "root" 24 | - name: database__connection__database 25 | value: "ghost" 26 | - name: database__connection__password 27 | value: "changeme" 28 | ports: 29 | - containerPort: 2368 30 | volumes: 31 | - name: ghost-volume 32 | flexVolume: 33 | driver: ceph.rook.io/rook 34 | fsType: ceph 35 | options: 36 | fsName: k8s-fs 37 | clusterNamespace: rook-ceph 38 | path: /ghost 39 | -------------------------------------------------------------------------------- /docker/download-images.py: -------------------------------------------------------------------------------- 1 | import urllib.request 2 | import sys 3 | 4 | if len(sys.argv) == 1: 5 | print("Downloads saved docker images.") 6 | print("Usage: python3 [HOST]") 7 | sys.exit(1) 8 | 9 | host = sys.argv[1] 10 | 11 | docker_images = [ 12 | "gcr.io_kubernetes-helm_tiller+v2.11.0.docker", 13 | "ghost+2.docker", 14 | "k8s.gcr.io_coredns+1.2.2.docker", 15 | "k8s.gcr.io_etcd+3.2.24.docker", 16 | "k8s.gcr.io_kube-apiserver+v1.12.2.docker", 17 | "k8s.gcr.io_kube-controller-manager+v1.12.2.docker", 18 | "k8s.gcr.io_kube-proxy+v1.12.2.docker", 19 | "k8s.gcr.io_kubernetes-dashboard-amd64+v1.10.0.docker", 20 | "k8s.gcr.io_kube-scheduler+v1.12.2.docker", 21 | "k8s.gcr.io_pause+3.1.docker", 22 | "metallb_controller+v0.7.3.docker", 23 | "metallb_speaker+v0.7.3.docker", 24 | "minio_minio+RELEASE.2018-11-17T01-23-48Z.docker", 25 | "mysql+5.7.docker", 26 | "nginx+latest.docker", 27 | "quay.io_calico_cni+v3.3.1.docker", 28 | "quay.io_calico_node+v3.3.1.docker", 29 | "redis+4.0.11-alpine.docker", 30 | "rook_ceph+v0.8.3.docker", 31 | ] 32 | 33 | for docker_image in docker_images: 34 | url = "http://{}/{}".format(host, docker_image) 35 | 36 | print("Downloading {}".format(url)) 37 | urllib.request.urlretrieve(url, docker_image) 38 | -------------------------------------------------------------------------------- /Prepare-hands-on.md: -------------------------------------------------------------------------------- 1 | # 실습 환경 구축 2 | 3 | ## Programs 4 | 5 | ### USB 6 | 7 | `USB/programs` 디렉토리에 Virtualbox, Vagrant 파일이 저장되어 있습니다. 8 | 9 | 10 | 11 | ### Web 12 | 13 | 강의장에서 알려준 IP 로 접속하면 `/programs` 디렉토리에서 원하는 운영체제에 맞는 파일을 받을 수 있습니다. 14 | 15 | 16 | 17 | ## APT proxy 18 | 19 | 강의장에서 알려준 IP 와 Port (3241) 를 이용해 APT proxy 설정을 추가하여 apt 를 이용해 프로그램을 설치할 때 캐시된 파일을 이용할 수 있습니다. 20 | 21 | ### VM 에 APT proxy 설정 추가 22 | 23 | `/etc/apt/apt.conf.d/00proxy` 파일을 추가하여 apt 에서 이용할 Proxy 설정을 넣어줍니다. 24 | 25 | ```bash 26 | # cat < /etc/apt/apt.conf.d/00proxy 27 | Acquire::http { Proxy "http://[IP]:3142"; }; 28 | END 29 | ``` 30 | 31 | 32 | 33 | ## Vagrant Box 34 | 35 | ### USB 36 | 37 | `USB/ubuntu-VAGRANTSLASH-bionic64` 디렉토리를 `~/.vagrant.d/boxes` 안으로 복사합니다. 38 | 39 | 40 | 41 | ## Web 42 | 43 | 강의장에서 알려준 IP 로 접속해 `/box/ubuntu-VAGRANTSLASH-bionic64.tgz` 파일을 받아서 압축을 풀고, `~/.vagrant.d/boxes` 안으로 복사합니다. 44 | 45 | 46 | 47 | ## Docker Images 48 | 49 | Vagrant 를 이용해 VM 을 실행하면, `/vagrant` 디렉토리를 통해 `kubernetes-201811-meetup` 디렉토리에 접근할 수 있습니다. Docker 이미지를 받은 이후에 작업은 VM 내부에서 진행합니다. 50 | 51 | 52 | 53 | ## USB 54 | 55 | `USB/docker-images` 안에 있는 파일을 `kubernetes-201811-meetup/docker` 디렉토리에 모두 복사합니다. 56 | 57 | 58 | 59 | ## Web 60 | 61 | 강의장에서 알려준 IP 를 `download-images.py` 프로그램 인자로 넘겨주면 필요한 도커 이미지를 모두 다운 받습니다. 62 | 63 | ```bash 64 | /vagrant/docker$ python3 download_images.py [IP] 65 | ``` 66 | 67 | 68 | 69 | ## Load docker Images 70 | 71 | USB 혹은 Web 을 통해 이미지를 받은 이후에 VM 에서 아래 프로그램을 실행하여 Docker 이미지를 등록합니다. 72 | 73 | ```bash 74 | /vagrant/docker$ python3 load_docker_images.py 75 | ``` 76 | 77 | -------------------------------------------------------------------------------- /rook/toolbox.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: rook-ceph-tools 5 | namespace: rook-ceph 6 | labels: 7 | app: rook-ceph-tools 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: rook-ceph-tools 13 | template: 14 | metadata: 15 | labels: 16 | app: rook-ceph-tools 17 | spec: 18 | dnsPolicy: ClusterFirstWithHostNet 19 | containers: 20 | - name: rook-ceph-tools 21 | image: rook/ceph:master 22 | command: ["/tini"] 23 | args: ["-g", "--", "/usr/local/bin/toolbox.sh"] 24 | imagePullPolicy: IfNotPresent 25 | env: 26 | - name: ROOK_ADMIN_SECRET 27 | valueFrom: 28 | secretKeyRef: 29 | name: rook-ceph-mon 30 | key: admin-secret 31 | securityContext: 32 | privileged: true 33 | volumeMounts: 34 | - mountPath: /dev 35 | name: dev 36 | - mountPath: /sys/bus 37 | name: sysbus 38 | - mountPath: /lib/modules 39 | name: libmodules 40 | - name: mon-endpoint-volume 41 | mountPath: /etc/rook 42 | # if hostNetwork: false, the "rbd map" command hangs, see https://github.com/rook/rook/issues/2021 43 | hostNetwork: true 44 | volumes: 45 | - name: dev 46 | hostPath: 47 | path: /dev 48 | - name: sysbus 49 | hostPath: 50 | path: /sys/bus 51 | - name: libmodules 52 | hostPath: 53 | path: /lib/modules 54 | - name: mon-endpoint-volume 55 | configMap: 56 | name: rook-ceph-mon-endpoints 57 | items: 58 | - key: data 59 | path: mon-endpoints 60 | -------------------------------------------------------------------------------- /cni/rbac-kdd.yaml: -------------------------------------------------------------------------------- 1 | # Calico Version v3.3.1 2 | # https://docs.projectcalico.org/v3.3/releases#v3.3.1 3 | kind: ClusterRole 4 | apiVersion: rbac.authorization.k8s.io/v1beta1 5 | metadata: 6 | name: calico-node 7 | rules: 8 | - apiGroups: [""] 9 | resources: 10 | - namespaces 11 | - serviceaccounts 12 | verbs: 13 | - get 14 | - list 15 | - watch 16 | - apiGroups: [""] 17 | resources: 18 | - pods/status 19 | verbs: 20 | - patch 21 | - apiGroups: [""] 22 | resources: 23 | - pods 24 | verbs: 25 | - get 26 | - list 27 | - watch 28 | - apiGroups: [""] 29 | resources: 30 | - services 31 | verbs: 32 | - get 33 | - apiGroups: [""] 34 | resources: 35 | - endpoints 36 | verbs: 37 | - get 38 | - apiGroups: [""] 39 | resources: 40 | - nodes 41 | verbs: 42 | - get 43 | - list 44 | - update 45 | - watch 46 | - apiGroups: ["extensions"] 47 | resources: 48 | - networkpolicies 49 | verbs: 50 | - get 51 | - list 52 | - watch 53 | - apiGroups: ["networking.k8s.io"] 54 | resources: 55 | - networkpolicies 56 | verbs: 57 | - watch 58 | - list 59 | - apiGroups: ["crd.projectcalico.org"] 60 | resources: 61 | - globalfelixconfigs 62 | - felixconfigurations 63 | - bgppeers 64 | - globalbgpconfigs 65 | - bgpconfigurations 66 | - ippools 67 | - globalnetworkpolicies 68 | - globalnetworksets 69 | - networkpolicies 70 | - clusterinformations 71 | - hostendpoints 72 | verbs: 73 | - create 74 | - get 75 | - list 76 | - update 77 | - watch 78 | 79 | --- 80 | 81 | apiVersion: rbac.authorization.k8s.io/v1beta1 82 | kind: ClusterRoleBinding 83 | metadata: 84 | name: calico-node 85 | roleRef: 86 | apiGroup: rbac.authorization.k8s.io 87 | kind: ClusterRole 88 | name: calico-node 89 | subjects: 90 | - kind: ServiceAccount 91 | name: calico-node 92 | namespace: kube-system 93 | -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | Vagrant.configure("2") do |config| 5 | config.vm.box = "ubuntu/bionic64" 6 | config.vm.box_check_update = false 7 | node_subnet = "10.254.1" 8 | 9 | (1..3).each do |i| 10 | config.vm.define "k8s-#{i}" do |node| 11 | node.vm.hostname = "k8s-#{i}" 12 | node.vm.network "private_network", ip: "#{node_subnet}.#{i + 1}" 13 | 14 | attached_disk_a = "disk-k8s-#{i}-a.vdi" 15 | attached_disk_b = "disk-k8s-#{i}-b.vdi" 16 | 17 | node.vm.provider "virtualbox" do |vb| 18 | vb.name = "k8s-#{i}" 19 | vb.gui = false 20 | 21 | vb.cpus = 2 22 | vb.memory = "4096" 23 | 24 | unless File.exists?(attached_disk_a) 25 | vb.customize [ 26 | 'createhd', '--filename', attached_disk_a, 27 | '--variant', 'Fixed', 28 | '--size', 10 * 1024] 29 | end 30 | 31 | unless File.exists?(attached_disk_b) 32 | vb.customize [ 33 | 'createhd', '--filename', attached_disk_b, 34 | '--variant', 'Fixed', 35 | '--size', 10 * 1024] 36 | end 37 | 38 | vb.customize [ 39 | 'storageattach', :id, '--storagectl', 'SCSI', 40 | '--port', 2, '--device', 0, '--type', 'hdd', 41 | '--medium', attached_disk_a] 42 | 43 | vb.customize [ 44 | 'storageattach', :id, '--storagectl', 'SCSI', 45 | '--port', 3, '--device', 0, '--type', 'hdd', 46 | '--medium', attached_disk_b] 47 | end 48 | 49 | node.vm.provision "bootstrap", type: "shell", inline: <<-SHELL 50 | sudo curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - 51 | sudo cat </etc/apt/sources.list.d/kubernetes.list 52 | deb http://apt.kubernetes.io/ kubernetes-xenial main 53 | EOF 54 | sudo apt update 55 | sudo apt install -y docker.io kubelet kubeadm kubectl ntp nfs-kernel-server 56 | sudo usermod -aG docker vagrant 57 | sudo systemctl enable docker.service 58 | sudo sed -i '/k8s/d' /etc/hosts 59 | sudo echo "#{node_subnet}.#{i + 1} k8s-#{i}" | sudo tee -a /etc/hosts 60 | 61 | sudo mkfs.ext4 /dev/sdc 62 | sudo mkdir /media/data 63 | SHELL 64 | 65 | node.vm.provision "shell", run: "always", 66 | inline: "sudo mount /dev/sdc /media/data" 67 | end 68 | end 69 | end 70 | -------------------------------------------------------------------------------- /rook/cluster.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: rook-ceph 5 | --- 6 | apiVersion: v1 7 | kind: ServiceAccount 8 | metadata: 9 | name: rook-ceph-cluster 10 | namespace: rook-ceph 11 | --- 12 | kind: Role 13 | apiVersion: rbac.authorization.k8s.io/v1beta1 14 | metadata: 15 | name: rook-ceph-cluster 16 | namespace: rook-ceph 17 | rules: 18 | - apiGroups: [""] 19 | resources: ["configmaps"] 20 | verbs: [ "get", "list", "watch", "create", "update", "delete" ] 21 | --- 22 | # Allow the operator to create resources in this cluster's namespace 23 | kind: RoleBinding 24 | apiVersion: rbac.authorization.k8s.io/v1beta1 25 | metadata: 26 | name: rook-ceph-cluster-mgmt 27 | namespace: rook-ceph 28 | roleRef: 29 | apiGroup: rbac.authorization.k8s.io 30 | kind: ClusterRole 31 | name: rook-ceph-cluster-mgmt 32 | subjects: 33 | - kind: ServiceAccount 34 | name: rook-ceph-system 35 | namespace: rook-ceph-system 36 | --- 37 | # Allow the pods in this namespace to work with configmaps 38 | kind: RoleBinding 39 | apiVersion: rbac.authorization.k8s.io/v1beta1 40 | metadata: 41 | name: rook-ceph-cluster 42 | namespace: rook-ceph 43 | roleRef: 44 | apiGroup: rbac.authorization.k8s.io 45 | kind: Role 46 | name: rook-ceph-cluster 47 | subjects: 48 | - kind: ServiceAccount 49 | name: rook-ceph-cluster 50 | namespace: rook-ceph 51 | --- 52 | apiVersion: ceph.rook.io/v1beta1 53 | kind: Cluster 54 | metadata: 55 | name: rook-ceph 56 | namespace: rook-ceph 57 | spec: 58 | cephVersion: 59 | # The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw). 60 | # v12 is luminous, v13 is mimic, and v14 is nautilus. 61 | # RECOMMENDATION: In production, use a specific version tag instead of the general v13 flag, which pulls the latest release and could result in different 62 | # versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/. 63 | image: ceph/ceph:v13 64 | # Whether to allow unsupported versions of Ceph. Currently only luminous and mimic are supported. 65 | # After nautilus is released, Rook will be updated to support nautilus. 66 | # Do not set to true in production. 67 | allowUnsupported: false 68 | # The path on the host where configuration files will be persisted. If not specified, a kubernetes emptyDir will be created (not recommended). 69 | # Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster. 70 | # In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment. 71 | dataDirHostPath: /var/lib/rook 72 | # The service account under which to run the daemon pods in this cluster if the default account is not sufficient (OSDs) 73 | serviceAccount: rook-ceph-cluster 74 | # set the amount of mons to be started 75 | mon: 76 | count: 2 77 | allowMultiplePerNode: true 78 | # enable the ceph dashboard for viewing cluster status 79 | dashboard: 80 | enabled: true 81 | # serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy) 82 | # urlPrefix: /ceph-dashboard 83 | network: 84 | # toggle to use hostNetwork 85 | hostNetwork: false 86 | # To control where various services will be scheduled by kubernetes, use the placement configuration sections below. 87 | # The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and 88 | # tolerate taints with a key of 'storage-node'. 89 | # placement: 90 | # all: 91 | # nodeAffinity: 92 | # requiredDuringSchedulingIgnoredDuringExecution: 93 | # nodeSelectorTerms: 94 | # - matchExpressions: 95 | # - key: role 96 | # operator: In 97 | # values: 98 | # - storage-node 99 | # podAffinity: 100 | # podAntiAffinity: 101 | # tolerations: 102 | # - key: storage-node 103 | # operator: Exists 104 | # The above placement information can also be specified for mon, osd, and mgr components 105 | # mon: 106 | # osd: 107 | # mgr: 108 | resources: 109 | # The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory 110 | # mgr: 111 | # limits: 112 | # cpu: "500m" 113 | # memory: "1024Mi" 114 | # requests: 115 | # cpu: "500m" 116 | # memory: "1024Mi" 117 | # The above example requests/limits can also be added to the mon and osd components 118 | # mon: 119 | # osd: 120 | storage: 121 | useAllNodes: false 122 | useAllDevices: false 123 | deviceFilter: 124 | location: 125 | config: 126 | storeType: "bluestore" 127 | databaseSizeMB: "1024" 128 | journalSizeMB: "1024" 129 | nodes: 130 | - name: "k8s-1" 131 | devices: 132 | - name: "sdd" 133 | - name: "k8s-2" 134 | devices: 135 | - name: "sdd" 136 | - name: "k8s-3" 137 | devices: 138 | - name: "sdd" 139 | -------------------------------------------------------------------------------- /kubernetes/kubernetes-dashboard.yaml: -------------------------------------------------------------------------------- 1 | # Copyright 2017 The Kubernetes Authors. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # ------------------- Dashboard Secret ------------------- # 16 | 17 | apiVersion: v1 18 | kind: Secret 19 | metadata: 20 | labels: 21 | k8s-app: kubernetes-dashboard 22 | name: kubernetes-dashboard-certs 23 | namespace: kube-system 24 | type: Opaque 25 | 26 | --- 27 | # ------------------- Dashboard Service Account ------------------- # 28 | 29 | apiVersion: v1 30 | kind: ServiceAccount 31 | metadata: 32 | labels: 33 | k8s-app: kubernetes-dashboard 34 | name: kubernetes-dashboard 35 | namespace: kube-system 36 | 37 | --- 38 | # ------------------- Dashboard Role & Role Binding ------------------- # 39 | 40 | kind: Role 41 | apiVersion: rbac.authorization.k8s.io/v1 42 | metadata: 43 | name: kubernetes-dashboard-minimal 44 | namespace: kube-system 45 | rules: 46 | # Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret. 47 | - apiGroups: [""] 48 | resources: ["secrets"] 49 | verbs: ["create"] 50 | # Allow Dashboard to create 'kubernetes-dashboard-settings' config map. 51 | - apiGroups: [""] 52 | resources: ["configmaps"] 53 | verbs: ["create"] 54 | # Allow Dashboard to get, update and delete Dashboard exclusive secrets. 55 | - apiGroups: [""] 56 | resources: ["secrets"] 57 | resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"] 58 | verbs: ["get", "update", "delete"] 59 | # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. 60 | - apiGroups: [""] 61 | resources: ["configmaps"] 62 | resourceNames: ["kubernetes-dashboard-settings"] 63 | verbs: ["get", "update"] 64 | # Allow Dashboard to get metrics from heapster. 65 | - apiGroups: [""] 66 | resources: ["services"] 67 | resourceNames: ["heapster"] 68 | verbs: ["proxy"] 69 | - apiGroups: [""] 70 | resources: ["services/proxy"] 71 | resourceNames: ["heapster", "http:heapster:", "https:heapster:"] 72 | verbs: ["get"] 73 | 74 | --- 75 | apiVersion: rbac.authorization.k8s.io/v1 76 | kind: RoleBinding 77 | metadata: 78 | name: kubernetes-dashboard-minimal 79 | namespace: kube-system 80 | roleRef: 81 | apiGroup: rbac.authorization.k8s.io 82 | kind: Role 83 | name: kubernetes-dashboard-minimal 84 | subjects: 85 | - kind: ServiceAccount 86 | name: kubernetes-dashboard 87 | namespace: kube-system 88 | 89 | --- 90 | # ------------------- Dashboard Deployment ------------------- # 91 | 92 | kind: Deployment 93 | apiVersion: apps/v1beta2 94 | metadata: 95 | labels: 96 | k8s-app: kubernetes-dashboard 97 | name: kubernetes-dashboard 98 | namespace: kube-system 99 | spec: 100 | replicas: 1 101 | revisionHistoryLimit: 10 102 | selector: 103 | matchLabels: 104 | k8s-app: kubernetes-dashboard 105 | template: 106 | metadata: 107 | labels: 108 | k8s-app: kubernetes-dashboard 109 | spec: 110 | containers: 111 | - name: kubernetes-dashboard 112 | image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.0 113 | ports: 114 | - containerPort: 8443 115 | protocol: TCP 116 | args: 117 | - --auto-generate-certificates 118 | # Uncomment the following line to manually specify Kubernetes API server Host 119 | # If not specified, Dashboard will attempt to auto discover the API server and connect 120 | # to it. Uncomment only if the default does not work. 121 | # - --apiserver-host=http://my-address:port 122 | volumeMounts: 123 | - name: kubernetes-dashboard-certs 124 | mountPath: /certs 125 | # Create on-disk volume to store exec logs 126 | - mountPath: /tmp 127 | name: tmp-volume 128 | livenessProbe: 129 | httpGet: 130 | scheme: HTTPS 131 | path: / 132 | port: 8443 133 | initialDelaySeconds: 30 134 | timeoutSeconds: 30 135 | volumes: 136 | - name: kubernetes-dashboard-certs 137 | secret: 138 | secretName: kubernetes-dashboard-certs 139 | - name: tmp-volume 140 | emptyDir: {} 141 | serviceAccountName: kubernetes-dashboard 142 | # Comment the following tolerations if Dashboard must not be deployed on master 143 | tolerations: 144 | - key: node-role.kubernetes.io/master 145 | effect: NoSchedule 146 | 147 | --- 148 | # ------------------- Dashboard Service ------------------- # 149 | 150 | kind: Service 151 | apiVersion: v1 152 | metadata: 153 | labels: 154 | k8s-app: kubernetes-dashboard 155 | name: kubernetes-dashboard 156 | namespace: kube-system 157 | spec: 158 | ports: 159 | - port: 443 160 | targetPort: 8443 161 | selector: 162 | k8s-app: kubernetes-dashboard 163 | -------------------------------------------------------------------------------- /metallb/metallb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: metallb-system 5 | labels: 6 | app: metallb 7 | --- 8 | 9 | apiVersion: v1 10 | kind: ServiceAccount 11 | metadata: 12 | namespace: metallb-system 13 | name: controller 14 | labels: 15 | app: metallb 16 | --- 17 | apiVersion: v1 18 | kind: ServiceAccount 19 | metadata: 20 | namespace: metallb-system 21 | name: speaker 22 | labels: 23 | app: metallb 24 | 25 | --- 26 | apiVersion: rbac.authorization.k8s.io/v1 27 | kind: ClusterRole 28 | metadata: 29 | name: metallb-system:controller 30 | labels: 31 | app: metallb 32 | rules: 33 | - apiGroups: [""] 34 | resources: ["services"] 35 | verbs: ["get", "list", "watch", "update"] 36 | - apiGroups: [""] 37 | resources: ["services/status"] 38 | verbs: ["update"] 39 | - apiGroups: [""] 40 | resources: ["events"] 41 | verbs: ["create", "patch"] 42 | --- 43 | apiVersion: rbac.authorization.k8s.io/v1 44 | kind: ClusterRole 45 | metadata: 46 | name: metallb-system:speaker 47 | labels: 48 | app: metallb 49 | rules: 50 | - apiGroups: [""] 51 | resources: ["services", "endpoints", "nodes"] 52 | verbs: ["get", "list", "watch"] 53 | --- 54 | apiVersion: rbac.authorization.k8s.io/v1 55 | kind: Role 56 | metadata: 57 | namespace: metallb-system 58 | name: config-watcher 59 | labels: 60 | app: metallb 61 | rules: 62 | - apiGroups: [""] 63 | resources: ["configmaps"] 64 | verbs: ["get", "list", "watch"] 65 | - apiGroups: [""] 66 | resources: ["events"] 67 | verbs: ["create"] 68 | --- 69 | 70 | ## Role bindings 71 | apiVersion: rbac.authorization.k8s.io/v1 72 | kind: ClusterRoleBinding 73 | metadata: 74 | name: metallb-system:controller 75 | labels: 76 | app: metallb 77 | subjects: 78 | - kind: ServiceAccount 79 | name: controller 80 | namespace: metallb-system 81 | roleRef: 82 | apiGroup: rbac.authorization.k8s.io 83 | kind: ClusterRole 84 | name: metallb-system:controller 85 | --- 86 | apiVersion: rbac.authorization.k8s.io/v1 87 | kind: ClusterRoleBinding 88 | metadata: 89 | name: metallb-system:speaker 90 | labels: 91 | app: metallb 92 | subjects: 93 | - kind: ServiceAccount 94 | name: speaker 95 | namespace: metallb-system 96 | roleRef: 97 | apiGroup: rbac.authorization.k8s.io 98 | kind: ClusterRole 99 | name: metallb-system:speaker 100 | --- 101 | apiVersion: rbac.authorization.k8s.io/v1 102 | kind: RoleBinding 103 | metadata: 104 | namespace: metallb-system 105 | name: config-watcher 106 | labels: 107 | app: metallb 108 | subjects: 109 | - kind: ServiceAccount 110 | name: controller 111 | - kind: ServiceAccount 112 | name: speaker 113 | roleRef: 114 | apiGroup: rbac.authorization.k8s.io 115 | kind: Role 116 | name: config-watcher 117 | --- 118 | apiVersion: apps/v1beta2 119 | kind: DaemonSet 120 | metadata: 121 | namespace: metallb-system 122 | name: speaker 123 | labels: 124 | app: metallb 125 | component: speaker 126 | spec: 127 | selector: 128 | matchLabels: 129 | app: metallb 130 | component: speaker 131 | template: 132 | metadata: 133 | labels: 134 | app: metallb 135 | component: speaker 136 | annotations: 137 | prometheus.io/scrape: "true" 138 | prometheus.io/port: "7472" 139 | spec: 140 | serviceAccountName: speaker 141 | terminationGracePeriodSeconds: 0 142 | hostNetwork: true 143 | containers: 144 | - name: speaker 145 | image: metallb/speaker:v0.7.3 146 | imagePullPolicy: IfNotPresent 147 | args: 148 | - --port=7472 149 | - --config=config 150 | env: 151 | - name: METALLB_NODE_NAME 152 | valueFrom: 153 | fieldRef: 154 | fieldPath: spec.nodeName 155 | ports: 156 | - name: monitoring 157 | containerPort: 7472 158 | resources: 159 | limits: 160 | cpu: 100m 161 | memory: 100Mi 162 | 163 | securityContext: 164 | allowPrivilegeEscalation: false 165 | readOnlyRootFilesystem: true 166 | capabilities: 167 | drop: 168 | - all 169 | add: 170 | - net_raw 171 | 172 | --- 173 | apiVersion: apps/v1beta2 174 | kind: Deployment 175 | metadata: 176 | namespace: metallb-system 177 | name: controller 178 | labels: 179 | app: metallb 180 | component: controller 181 | spec: 182 | revisionHistoryLimit: 3 183 | selector: 184 | matchLabels: 185 | app: metallb 186 | component: controller 187 | template: 188 | metadata: 189 | labels: 190 | app: metallb 191 | component: controller 192 | annotations: 193 | prometheus.io/scrape: "true" 194 | prometheus.io/port: "7472" 195 | spec: 196 | serviceAccountName: controller 197 | terminationGracePeriodSeconds: 0 198 | securityContext: 199 | runAsNonRoot: true 200 | runAsUser: 65534 # nobody 201 | containers: 202 | - name: controller 203 | image: metallb/controller:v0.7.3 204 | imagePullPolicy: IfNotPresent 205 | args: 206 | - --port=7472 207 | - --config=config 208 | ports: 209 | - name: monitoring 210 | containerPort: 7472 211 | resources: 212 | limits: 213 | cpu: 100m 214 | memory: 100Mi 215 | 216 | securityContext: 217 | allowPrivilegeEscalation: false 218 | capabilities: 219 | drop: 220 | - all 221 | readOnlyRootFilesystem: true 222 | 223 | --- 224 | -------------------------------------------------------------------------------- /helm/get_helm.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Copyright The Helm Authors. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # The install script is based off of the MIT-licensed script from glide, 18 | # the package manager for Go: https://github.com/Masterminds/glide.sh/blob/master/get 19 | 20 | PROJECT_NAME="helm" 21 | TILLER_NAME="tiller" 22 | 23 | : ${USE_SUDO:="true"} 24 | : ${HELM_INSTALL_DIR:="/usr/local/bin"} 25 | 26 | # initArch discovers the architecture for this system. 27 | initArch() { 28 | ARCH=$(uname -m) 29 | case $ARCH in 30 | armv5*) ARCH="armv5";; 31 | armv6*) ARCH="armv6";; 32 | armv7*) ARCH="armv7";; 33 | aarch64) ARCH="arm64";; 34 | x86) ARCH="386";; 35 | x86_64) ARCH="amd64";; 36 | i686) ARCH="386";; 37 | i386) ARCH="386";; 38 | esac 39 | } 40 | 41 | # initOS discovers the operating system for this system. 42 | initOS() { 43 | OS=$(echo `uname`|tr '[:upper:]' '[:lower:]') 44 | 45 | case "$OS" in 46 | # Minimalist GNU for Windows 47 | mingw*) OS='windows';; 48 | esac 49 | } 50 | 51 | # runs the given command as root (detects if we are root already) 52 | runAsRoot() { 53 | local CMD="$*" 54 | 55 | if [ $EUID -ne 0 -a $USE_SUDO = "true" ]; then 56 | CMD="sudo $CMD" 57 | fi 58 | 59 | $CMD 60 | } 61 | 62 | # verifySupported checks that the os/arch combination is supported for 63 | # binary builds. 64 | verifySupported() { 65 | local supported="darwin-386\ndarwin-amd64\nlinux-386\nlinux-amd64\nlinux-arm\nlinux-arm64\nlinux-ppc64le\nwindows-386\nwindows-amd64" 66 | if ! echo "${supported}" | grep -q "${OS}-${ARCH}"; then 67 | echo "No prebuilt binary for ${OS}-${ARCH}." 68 | echo "To build from source, go to https://github.com/helm/helm" 69 | exit 1 70 | fi 71 | 72 | if ! type "curl" > /dev/null && ! type "wget" > /dev/null; then 73 | echo "Either curl or wget is required" 74 | exit 1 75 | fi 76 | } 77 | 78 | # checkDesiredVersion checks if the desired version is available. 79 | checkDesiredVersion() { 80 | # Use the GitHub releases webpage for the project to find the desired version for this project. 81 | local release_url="https://github.com/helm/helm/releases/${DESIRED_VERSION:-latest}" 82 | if type "curl" > /dev/null; then 83 | TAG=$(curl -SsL $release_url | awk '/\/tag\//' | grep -v no-underline | head -n 1 | cut -d '"' -f 2 | awk '{n=split($NF,a,"/");print a[n]}' | awk 'a !~ $0{print}; {a=$0}') 84 | elif type "wget" > /dev/null; then 85 | TAG=$(wget -q -O - $release_url | awk '/\/tag\//' | grep -v no-underline | head -n 1 | cut -d '"' -f 2 | awk '{n=split($NF,a,"/");print a[n]}' | awk 'a !~ $0{print}; {a=$0}') 86 | fi 87 | if [ "x$TAG" == "x" ]; then 88 | echo "Cannot determine ${DESIRED_VERSION} tag." 89 | exit 1 90 | fi 91 | } 92 | 93 | # checkHelmInstalledVersion checks which version of helm is installed and 94 | # if it needs to be changed. 95 | checkHelmInstalledVersion() { 96 | if [[ -f "${HELM_INSTALL_DIR}/${PROJECT_NAME}" ]]; then 97 | local version=$(helm version -c | grep '^Client' | cut -d'"' -f2) 98 | if [[ "$version" == "$TAG" ]]; then 99 | echo "Helm ${version} is already ${DESIRED_VERSION:-latest}" 100 | return 0 101 | else 102 | echo "Helm ${TAG} is available. Changing from version ${version}." 103 | return 1 104 | fi 105 | else 106 | return 1 107 | fi 108 | } 109 | 110 | # downloadFile downloads the latest binary package and also the checksum 111 | # for that binary. 112 | downloadFile() { 113 | HELM_DIST="helm-$TAG-$OS-$ARCH.tar.gz" 114 | DOWNLOAD_URL="https://kubernetes-helm.storage.googleapis.com/$HELM_DIST" 115 | CHECKSUM_URL="$DOWNLOAD_URL.sha256" 116 | HELM_TMP_ROOT="$(mktemp -dt helm-installer-XXXXXX)" 117 | HELM_TMP_FILE="$HELM_TMP_ROOT/$HELM_DIST" 118 | HELM_SUM_FILE="$HELM_TMP_ROOT/$HELM_DIST.sha256" 119 | echo "Downloading $DOWNLOAD_URL" 120 | if type "curl" > /dev/null; then 121 | curl -SsL "$CHECKSUM_URL" -o "$HELM_SUM_FILE" 122 | elif type "wget" > /dev/null; then 123 | wget -q -O "$HELM_SUM_FILE" "$CHECKSUM_URL" 124 | fi 125 | if type "curl" > /dev/null; then 126 | curl -SsL "$DOWNLOAD_URL" -o "$HELM_TMP_FILE" 127 | elif type "wget" > /dev/null; then 128 | wget -q -O "$HELM_TMP_FILE" "$DOWNLOAD_URL" 129 | fi 130 | } 131 | 132 | # installFile verifies the SHA256 for the file, then unpacks and 133 | # installs it. 134 | installFile() { 135 | HELM_TMP="$HELM_TMP_ROOT/$PROJECT_NAME" 136 | local sum=$(openssl sha1 -sha256 ${HELM_TMP_FILE} | awk '{print $2}') 137 | local expected_sum=$(cat ${HELM_SUM_FILE}) 138 | if [ "$sum" != "$expected_sum" ]; then 139 | echo "SHA sum of ${HELM_TMP_FILE} does not match. Aborting." 140 | exit 1 141 | fi 142 | 143 | mkdir -p "$HELM_TMP" 144 | tar xf "$HELM_TMP_FILE" -C "$HELM_TMP" 145 | HELM_TMP_BIN="$HELM_TMP/$OS-$ARCH/$PROJECT_NAME" 146 | TILLER_TMP_BIN="$HELM_TMP/$OS-$ARCH/$TILLER_NAME" 147 | echo "Preparing to install $PROJECT_NAME and $TILLER_NAME into ${HELM_INSTALL_DIR}" 148 | runAsRoot cp "$HELM_TMP_BIN" "$HELM_INSTALL_DIR" 149 | echo "$PROJECT_NAME installed into $HELM_INSTALL_DIR/$PROJECT_NAME" 150 | if [ -x "$TILLER_TMP_BIN" ]; then 151 | runAsRoot cp "$TILLER_TMP_BIN" "$HELM_INSTALL_DIR" 152 | echo "$TILLER_NAME installed into $HELM_INSTALL_DIR/$TILLER_NAME" 153 | else 154 | echo "info: $TILLER_NAME binary was not found in this release; skipping $TILLER_NAME installation" 155 | fi 156 | } 157 | 158 | # fail_trap is executed if an error occurs. 159 | fail_trap() { 160 | result=$? 161 | if [ "$result" != "0" ]; then 162 | if [[ -n "$INPUT_ARGUMENTS" ]]; then 163 | echo "Failed to install $PROJECT_NAME with the arguments provided: $INPUT_ARGUMENTS" 164 | help 165 | else 166 | echo "Failed to install $PROJECT_NAME" 167 | fi 168 | echo -e "\tFor support, go to https://github.com/helm/helm." 169 | fi 170 | cleanup 171 | exit $result 172 | } 173 | 174 | # testVersion tests the installed client to make sure it is working. 175 | testVersion() { 176 | set +e 177 | HELM="$(which $PROJECT_NAME)" 178 | if [ "$?" = "1" ]; then 179 | echo "$PROJECT_NAME not found. Is $HELM_INSTALL_DIR on your "'$PATH?' 180 | exit 1 181 | fi 182 | set -e 183 | echo "Run '$PROJECT_NAME init' to configure $PROJECT_NAME." 184 | } 185 | 186 | # help provides possible cli installation arguments 187 | help () { 188 | echo "Accepted cli arguments are:" 189 | echo -e "\t[--help|-h ] ->> prints this help" 190 | echo -e "\t[--version|-v ] . When not defined it defaults to latest" 191 | echo -e "\te.g. --version v2.4.0 or -v latest" 192 | echo -e "\t[--no-sudo] ->> install without sudo" 193 | } 194 | 195 | # cleanup temporary files to avoid https://github.com/helm/helm/issues/2977 196 | cleanup() { 197 | if [[ -d "${HELM_TMP_ROOT:-}" ]]; then 198 | rm -rf "$HELM_TMP_ROOT" 199 | fi 200 | } 201 | 202 | # Execution 203 | 204 | #Stop execution on any error 205 | trap "fail_trap" EXIT 206 | set -e 207 | 208 | # Parsing input arguments (if any) 209 | export INPUT_ARGUMENTS="${@}" 210 | set -u 211 | while [[ $# -gt 0 ]]; do 212 | case $1 in 213 | '--version'|-v) 214 | shift 215 | if [[ $# -ne 0 ]]; then 216 | export DESIRED_VERSION="${1}" 217 | else 218 | echo -e "Please provide the desired version. e.g. --version v2.4.0 or -v latest" 219 | exit 0 220 | fi 221 | ;; 222 | '--no-sudo') 223 | USE_SUDO="false" 224 | ;; 225 | '--help'|-h) 226 | help 227 | exit 0 228 | ;; 229 | *) exit 1 230 | ;; 231 | esac 232 | shift 233 | done 234 | set +u 235 | 236 | initArch 237 | initOS 238 | verifySupported 239 | checkDesiredVersion 240 | if ! checkHelmInstalledVersion; then 241 | downloadFile 242 | installFile 243 | fi 244 | testVersion 245 | cleanup 246 | -------------------------------------------------------------------------------- /rook/operator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: rook-ceph-system 5 | --- 6 | apiVersion: apiextensions.k8s.io/v1beta1 7 | kind: CustomResourceDefinition 8 | metadata: 9 | name: clusters.ceph.rook.io 10 | spec: 11 | group: ceph.rook.io 12 | names: 13 | kind: Cluster 14 | listKind: ClusterList 15 | plural: clusters 16 | singular: cluster 17 | shortNames: 18 | - rcc 19 | scope: Namespaced 20 | version: v1beta1 21 | validation: 22 | openAPIV3Schema: 23 | properties: 24 | spec: 25 | properties: 26 | cephVersion: 27 | properties: 28 | allowUnsupported: 29 | type: boolean 30 | image: 31 | type: string 32 | name: 33 | pattern: ^(luminous|mimic|nautilus)$ 34 | type: string 35 | dashboard: 36 | properties: 37 | enabled: 38 | type: boolean 39 | urlPrefix: 40 | type: string 41 | dataDirHostPath: 42 | pattern: ^/(\S+) 43 | type: string 44 | mon: 45 | properties: 46 | allowMultiplePerNode: 47 | type: boolean 48 | count: 49 | maximum: 9 50 | minimum: 1 51 | type: integer 52 | required: 53 | - count 54 | network: 55 | properties: 56 | hostNetwork: 57 | type: boolean 58 | storage: 59 | properties: 60 | nodes: 61 | items: {} 62 | type: array 63 | useAllDevices: {} 64 | useAllNodes: 65 | type: boolean 66 | required: 67 | - mon 68 | --- 69 | apiVersion: apiextensions.k8s.io/v1beta1 70 | kind: CustomResourceDefinition 71 | metadata: 72 | name: filesystems.ceph.rook.io 73 | spec: 74 | group: ceph.rook.io 75 | names: 76 | kind: Filesystem 77 | listKind: FilesystemList 78 | plural: filesystems 79 | singular: filesystem 80 | shortNames: 81 | - rcfs 82 | scope: Namespaced 83 | version: v1beta1 84 | --- 85 | apiVersion: apiextensions.k8s.io/v1beta1 86 | kind: CustomResourceDefinition 87 | metadata: 88 | name: objectstores.ceph.rook.io 89 | spec: 90 | group: ceph.rook.io 91 | names: 92 | kind: ObjectStore 93 | listKind: ObjectStoreList 94 | plural: objectstores 95 | singular: objectstore 96 | shortNames: 97 | - rco 98 | scope: Namespaced 99 | version: v1beta1 100 | --- 101 | apiVersion: apiextensions.k8s.io/v1beta1 102 | kind: CustomResourceDefinition 103 | metadata: 104 | name: pools.ceph.rook.io 105 | spec: 106 | group: ceph.rook.io 107 | names: 108 | kind: Pool 109 | listKind: PoolList 110 | plural: pools 111 | singular: pool 112 | shortNames: 113 | - rcp 114 | scope: Namespaced 115 | version: v1beta1 116 | --- 117 | apiVersion: apiextensions.k8s.io/v1beta1 118 | kind: CustomResourceDefinition 119 | metadata: 120 | name: volumes.rook.io 121 | spec: 122 | group: rook.io 123 | names: 124 | kind: Volume 125 | listKind: VolumeList 126 | plural: volumes 127 | singular: volume 128 | shortNames: 129 | - rv 130 | scope: Namespaced 131 | version: v1alpha2 132 | --- 133 | # The cluster role for managing all the cluster-specific resources in a namespace 134 | apiVersion: rbac.authorization.k8s.io/v1beta1 135 | kind: ClusterRole 136 | metadata: 137 | name: rook-ceph-cluster-mgmt 138 | labels: 139 | operator: rook 140 | storage-backend: ceph 141 | rules: 142 | - apiGroups: 143 | - "" 144 | resources: 145 | - secrets 146 | - pods 147 | - pods/log 148 | - services 149 | - configmaps 150 | verbs: 151 | - get 152 | - list 153 | - watch 154 | - patch 155 | - create 156 | - update 157 | - delete 158 | - apiGroups: 159 | - extensions 160 | resources: 161 | - deployments 162 | - daemonsets 163 | - replicasets 164 | verbs: 165 | - get 166 | - list 167 | - watch 168 | - create 169 | - update 170 | - delete 171 | --- 172 | # The role for the operator to manage resources in the system namespace 173 | apiVersion: rbac.authorization.k8s.io/v1beta1 174 | kind: Role 175 | metadata: 176 | name: rook-ceph-system 177 | namespace: rook-ceph-system 178 | labels: 179 | operator: rook 180 | storage-backend: ceph 181 | rules: 182 | - apiGroups: 183 | - "" 184 | resources: 185 | - pods 186 | - configmaps 187 | verbs: 188 | - get 189 | - list 190 | - watch 191 | - patch 192 | - create 193 | - update 194 | - delete 195 | - apiGroups: 196 | - extensions 197 | resources: 198 | - daemonsets 199 | verbs: 200 | - get 201 | - list 202 | - watch 203 | - create 204 | - update 205 | - delete 206 | --- 207 | # The cluster role for managing the Rook CRDs 208 | apiVersion: rbac.authorization.k8s.io/v1beta1 209 | kind: ClusterRole 210 | metadata: 211 | name: rook-ceph-global 212 | labels: 213 | operator: rook 214 | storage-backend: ceph 215 | rules: 216 | - apiGroups: 217 | - "" 218 | resources: 219 | # Pod access is needed for fencing 220 | - pods 221 | # Node access is needed for determining nodes where mons should run 222 | - nodes 223 | - nodes/proxy 224 | verbs: 225 | - get 226 | - list 227 | - watch 228 | - apiGroups: 229 | - "" 230 | resources: 231 | - events 232 | # PVs and PVCs are managed by the Rook provisioner 233 | - persistentvolumes 234 | - persistentvolumeclaims 235 | verbs: 236 | - get 237 | - list 238 | - watch 239 | - patch 240 | - create 241 | - update 242 | - delete 243 | - apiGroups: 244 | - storage.k8s.io 245 | resources: 246 | - storageclasses 247 | verbs: 248 | - get 249 | - list 250 | - watch 251 | - apiGroups: 252 | - batch 253 | resources: 254 | - jobs 255 | verbs: 256 | - get 257 | - list 258 | - watch 259 | - create 260 | - update 261 | - delete 262 | - apiGroups: 263 | - ceph.rook.io 264 | resources: 265 | - "*" 266 | verbs: 267 | - "*" 268 | - apiGroups: 269 | - rook.io 270 | resources: 271 | - "*" 272 | verbs: 273 | - "*" 274 | --- 275 | # The rook system service account used by the operator, agent, and discovery pods 276 | apiVersion: v1 277 | kind: ServiceAccount 278 | metadata: 279 | name: rook-ceph-system 280 | namespace: rook-ceph-system 281 | labels: 282 | operator: rook 283 | storage-backend: ceph 284 | --- 285 | # Grant the operator, agent, and discovery agents access to resources in the rook-ceph-system namespace 286 | kind: RoleBinding 287 | apiVersion: rbac.authorization.k8s.io/v1beta1 288 | metadata: 289 | name: rook-ceph-system 290 | namespace: rook-ceph-system 291 | labels: 292 | operator: rook 293 | storage-backend: ceph 294 | roleRef: 295 | apiGroup: rbac.authorization.k8s.io 296 | kind: Role 297 | name: rook-ceph-system 298 | subjects: 299 | - kind: ServiceAccount 300 | name: rook-ceph-system 301 | namespace: rook-ceph-system 302 | --- 303 | # Grant the rook system daemons cluster-wide access to manage the Rook CRDs, PVCs, and storage classes 304 | kind: ClusterRoleBinding 305 | apiVersion: rbac.authorization.k8s.io/v1beta1 306 | metadata: 307 | name: rook-ceph-global 308 | namespace: rook-ceph-system 309 | labels: 310 | operator: rook 311 | storage-backend: ceph 312 | roleRef: 313 | apiGroup: rbac.authorization.k8s.io 314 | kind: ClusterRole 315 | name: rook-ceph-global 316 | subjects: 317 | - kind: ServiceAccount 318 | name: rook-ceph-system 319 | namespace: rook-ceph-system 320 | --- 321 | # The deployment for the rook operator 322 | apiVersion: apps/v1beta1 323 | kind: Deployment 324 | metadata: 325 | name: rook-ceph-operator 326 | namespace: rook-ceph-system 327 | labels: 328 | operator: rook 329 | storage-backend: ceph 330 | spec: 331 | replicas: 1 332 | template: 333 | metadata: 334 | labels: 335 | app: rook-ceph-operator 336 | spec: 337 | serviceAccountName: rook-ceph-system 338 | containers: 339 | - name: rook-ceph-operator 340 | image: rook/ceph:v0.8.3 341 | args: ["ceph", "operator"] 342 | volumeMounts: 343 | - mountPath: /var/lib/rook 344 | name: rook-config 345 | - mountPath: /etc/ceph 346 | name: default-config-dir 347 | env: 348 | # To disable RBAC, uncomment the following: 349 | # - name: RBAC_ENABLED 350 | # value: "false" 351 | # Rook Agent toleration. Will tolerate all taints with all keys. 352 | # Choose between NoSchedule, PreferNoSchedule and NoExecute: 353 | # - name: AGENT_TOLERATION 354 | # value: "NoSchedule" 355 | # (Optional) Rook Agent toleration key. Set this to the key of the taint you want to tolerate 356 | # - name: AGENT_TOLERATION_KEY 357 | # value: "" 358 | # Set the path where the Rook agent can find the flex volumes 359 | # - name: FLEXVOLUME_DIR_PATH 360 | # value: "" 361 | # Set the path where kernel modules can be found 362 | # - name: LIB_MODULES_DIR_PATH 363 | # value: "" 364 | # Mount any extra directories into the agent container 365 | # - name: AGENT_MOUNTS 366 | # value: "somemount=/host/path:/container/path,someothermount=/host/path2:/container/path2" 367 | # Rook Discover toleration. Will tolerate all taints with all keys. 368 | # Choose between NoSchedule, PreferNoSchedule and NoExecute: 369 | # - name: DISCOVER_TOLERATION 370 | # value: "NoSchedule" 371 | # (Optional) Rook Discover toleration key. Set this to the key of the taint you want to tolerate 372 | # - name: DISCOVER_TOLERATION_KEY 373 | # value: "" 374 | # Allow rook to create multiple file systems. Note: This is considered 375 | # an experimental feature in Ceph as described at 376 | # http://docs.ceph.com/docs/master/cephfs/experimental-features/#multiple-filesystems-within-a-ceph-cluster 377 | # which might cause mons to crash as seen in https://github.com/rook/rook/issues/1027 378 | - name: ROOK_ALLOW_MULTIPLE_FILESYSTEMS 379 | value: "true" 380 | # The logging level for the operator: INFO | DEBUG 381 | - name: ROOK_LOG_LEVEL 382 | value: "INFO" 383 | # The interval to check if every mon is in the quorum. 384 | - name: ROOK_MON_HEALTHCHECK_INTERVAL 385 | value: "45s" 386 | # The duration to wait before trying to failover or remove/replace the 387 | # current mon with a new mon (useful for compensating flapping network). 388 | - name: ROOK_MON_OUT_TIMEOUT 389 | value: "300s" 390 | # The duration between discovering devices in the rook-discover daemonset. 391 | - name: ROOK_DISCOVER_DEVICES_INTERVAL 392 | value: "60m" 393 | # Whether to start pods as privileged that mount a host path, which includes the Ceph mon and osd pods. 394 | # This is necessary to workaround the anyuid issues when running on OpenShift. 395 | # For more details see https://github.com/rook/rook/issues/1314#issuecomment-355799641 396 | - name: ROOK_HOSTPATH_REQUIRES_PRIVILEGED 397 | value: "true" 398 | # The name of the node to pass with the downward API 399 | - name: NODE_NAME 400 | valueFrom: 401 | fieldRef: 402 | fieldPath: spec.nodeName 403 | # The pod name to pass with the downward API 404 | - name: POD_NAME 405 | valueFrom: 406 | fieldRef: 407 | fieldPath: metadata.name 408 | # The pod namespace to pass with the downward API 409 | - name: POD_NAMESPACE 410 | valueFrom: 411 | fieldRef: 412 | fieldPath: metadata.namespace 413 | volumes: 414 | - name: rook-config 415 | emptyDir: {} 416 | - name: default-config-dir 417 | emptyDir: {} 418 | -------------------------------------------------------------------------------- /cni/calico.yaml: -------------------------------------------------------------------------------- 1 | # Calico Version v3.3.1 2 | # https://docs.projectcalico.org/v3.3/releases#v3.3.1 3 | # This manifest includes the following component versions: 4 | # calico/node:v3.3.1 5 | # calico/cni:v3.3.1 6 | 7 | # This ConfigMap is used to configure a self-hosted Calico installation. 8 | kind: ConfigMap 9 | apiVersion: v1 10 | metadata: 11 | name: calico-config 12 | namespace: kube-system 13 | data: 14 | # To enable Typha, set this to "calico-typha" *and* set a non-zero value for Typha replicas 15 | # below. We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is 16 | # essential. 17 | typha_service_name: "none" 18 | # Configure the Calico backend to use. 19 | calico_backend: "bird" 20 | 21 | # Configure the MTU to use 22 | veth_mtu: "1440" 23 | 24 | # The CNI network configuration to install on each node. The special 25 | # values in this config will be automatically populated. 26 | cni_network_config: |- 27 | { 28 | "name": "k8s-pod-network", 29 | "cniVersion": "0.3.0", 30 | "plugins": [ 31 | { 32 | "type": "calico", 33 | "log_level": "info", 34 | "datastore_type": "kubernetes", 35 | "nodename": "__KUBERNETES_NODE_NAME__", 36 | "mtu": __CNI_MTU__, 37 | "ipam": { 38 | "type": "host-local", 39 | "subnet": "usePodCidr" 40 | }, 41 | "policy": { 42 | "type": "k8s" 43 | }, 44 | "kubernetes": { 45 | "kubeconfig": "__KUBECONFIG_FILEPATH__" 46 | } 47 | }, 48 | { 49 | "type": "portmap", 50 | "snat": true, 51 | "capabilities": {"portMappings": true} 52 | } 53 | ] 54 | } 55 | 56 | --- 57 | 58 | 59 | # This manifest creates a Service, which will be backed by Calico's Typha daemon. 60 | # Typha sits in between Felix and the API server, reducing Calico's load on the API server. 61 | 62 | apiVersion: v1 63 | kind: Service 64 | metadata: 65 | name: calico-typha 66 | namespace: kube-system 67 | labels: 68 | k8s-app: calico-typha 69 | spec: 70 | ports: 71 | - port: 5473 72 | protocol: TCP 73 | targetPort: calico-typha 74 | name: calico-typha 75 | selector: 76 | k8s-app: calico-typha 77 | 78 | --- 79 | 80 | # This manifest creates a Deployment of Typha to back the above service. 81 | 82 | apiVersion: apps/v1beta1 83 | kind: Deployment 84 | metadata: 85 | name: calico-typha 86 | namespace: kube-system 87 | labels: 88 | k8s-app: calico-typha 89 | spec: 90 | # Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the 91 | # typha_service_name variable in the calico-config ConfigMap above. 92 | # 93 | # We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential 94 | # (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In 95 | # production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade. 96 | replicas: 0 97 | revisionHistoryLimit: 2 98 | template: 99 | metadata: 100 | labels: 101 | k8s-app: calico-typha 102 | annotations: 103 | # This, along with the CriticalAddonsOnly toleration below, marks the pod as a critical 104 | # add-on, ensuring it gets priority scheduling and that its resources are reserved 105 | # if it ever gets evicted. 106 | scheduler.alpha.kubernetes.io/critical-pod: '' 107 | cluster-autoscaler.kubernetes.io/safe-to-evict: 'true' 108 | spec: 109 | nodeSelector: 110 | beta.kubernetes.io/os: linux 111 | hostNetwork: true 112 | tolerations: 113 | # Mark the pod as a critical add-on for rescheduling. 114 | - key: CriticalAddonsOnly 115 | operator: Exists 116 | # Since Calico can't network a pod until Typha is up, we need to run Typha itself 117 | # as a host-networked pod. 118 | serviceAccountName: calico-node 119 | containers: 120 | - image: quay.io/calico/typha:v3.3.1 121 | name: calico-typha 122 | ports: 123 | - containerPort: 5473 124 | name: calico-typha 125 | protocol: TCP 126 | env: 127 | # Enable "info" logging by default. Can be set to "debug" to increase verbosity. 128 | - name: TYPHA_LOGSEVERITYSCREEN 129 | value: "info" 130 | # Disable logging to file and syslog since those don't make sense in Kubernetes. 131 | - name: TYPHA_LOGFILEPATH 132 | value: "none" 133 | - name: TYPHA_LOGSEVERITYSYS 134 | value: "none" 135 | # Monitor the Kubernetes API to find the number of running instances and rebalance 136 | # connections. 137 | - name: TYPHA_CONNECTIONREBALANCINGMODE 138 | value: "kubernetes" 139 | - name: TYPHA_DATASTORETYPE 140 | value: "kubernetes" 141 | - name: TYPHA_HEALTHENABLED 142 | value: "true" 143 | # Uncomment these lines to enable prometheus metrics. Since Typha is host-networked, 144 | # this opens a port on the host, which may need to be secured. 145 | #- name: TYPHA_PROMETHEUSMETRICSENABLED 146 | # value: "true" 147 | #- name: TYPHA_PROMETHEUSMETRICSPORT 148 | # value: "9093" 149 | livenessProbe: 150 | exec: 151 | command: 152 | - calico-typha 153 | - check 154 | - liveness 155 | periodSeconds: 30 156 | initialDelaySeconds: 30 157 | readinessProbe: 158 | exec: 159 | command: 160 | - calico-typha 161 | - check 162 | - readiness 163 | periodSeconds: 10 164 | 165 | --- 166 | 167 | # This manifest creates a Pod Disruption Budget for Typha to allow K8s Cluster Autoscaler to evict 168 | 169 | apiVersion: policy/v1beta1 170 | kind: PodDisruptionBudget 171 | metadata: 172 | name: calico-typha 173 | namespace: kube-system 174 | labels: 175 | k8s-app: calico-typha 176 | spec: 177 | maxUnavailable: 1 178 | selector: 179 | matchLabels: 180 | k8s-app: calico-typha 181 | 182 | --- 183 | 184 | # This manifest installs the calico/node container, as well 185 | # as the Calico CNI plugins and network config on 186 | # each master and worker node in a Kubernetes cluster. 187 | kind: DaemonSet 188 | apiVersion: extensions/v1beta1 189 | metadata: 190 | name: calico-node 191 | namespace: kube-system 192 | labels: 193 | k8s-app: calico-node 194 | spec: 195 | selector: 196 | matchLabels: 197 | k8s-app: calico-node 198 | updateStrategy: 199 | type: RollingUpdate 200 | rollingUpdate: 201 | maxUnavailable: 1 202 | template: 203 | metadata: 204 | labels: 205 | k8s-app: calico-node 206 | annotations: 207 | # This, along with the CriticalAddonsOnly toleration below, 208 | # marks the pod as a critical add-on, ensuring it gets 209 | # priority scheduling and that its resources are reserved 210 | # if it ever gets evicted. 211 | scheduler.alpha.kubernetes.io/critical-pod: '' 212 | spec: 213 | nodeSelector: 214 | beta.kubernetes.io/os: linux 215 | hostNetwork: true 216 | tolerations: 217 | # Make sure calico-node gets scheduled on all nodes. 218 | - effect: NoSchedule 219 | operator: Exists 220 | # Mark the pod as a critical add-on for rescheduling. 221 | - key: CriticalAddonsOnly 222 | operator: Exists 223 | - effect: NoExecute 224 | operator: Exists 225 | serviceAccountName: calico-node 226 | # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force 227 | # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. 228 | terminationGracePeriodSeconds: 0 229 | containers: 230 | # Runs calico/node container on each Kubernetes node. This 231 | # container programs network policy and routes on each 232 | # host. 233 | - name: calico-node 234 | image: quay.io/calico/node:v3.3.1 235 | env: 236 | # Use Kubernetes API as the backing datastore. 237 | - name: DATASTORE_TYPE 238 | value: "kubernetes" 239 | # Typha support: controlled by the ConfigMap. 240 | - name: FELIX_TYPHAK8SSERVICENAME 241 | valueFrom: 242 | configMapKeyRef: 243 | name: calico-config 244 | key: typha_service_name 245 | # Wait for the datastore. 246 | - name: WAIT_FOR_DATASTORE 247 | value: "true" 248 | # Set based on the k8s node name. 249 | - name: NODENAME 250 | valueFrom: 251 | fieldRef: 252 | fieldPath: spec.nodeName 253 | # Choose the backend to use. 254 | - name: CALICO_NETWORKING_BACKEND 255 | valueFrom: 256 | configMapKeyRef: 257 | name: calico-config 258 | key: calico_backend 259 | # Cluster type to identify the deployment type 260 | - name: CLUSTER_TYPE 261 | value: "k8s,bgp" 262 | # Auto-detect the BGP IP address. 263 | - name: IP 264 | value: "autodetect" 265 | # Enable IPIP 266 | - name: CALICO_IPV4POOL_IPIP 267 | value: "Always" 268 | # Set MTU for tunnel device used if ipip is enabled 269 | - name: FELIX_IPINIPMTU 270 | valueFrom: 271 | configMapKeyRef: 272 | name: calico-config 273 | key: veth_mtu 274 | # The default IPv4 pool to create on startup if none exists. Pod IPs will be 275 | # chosen from this range. Changing this value after installation will have 276 | # no effect. This should fall within `--cluster-cidr`. 277 | - name: CALICO_IPV4POOL_CIDR 278 | value: "192.168.0.0/16" 279 | # Disable file logging so `kubectl logs` works. 280 | - name: CALICO_DISABLE_FILE_LOGGING 281 | value: "true" 282 | # Set Felix endpoint to host default action to ACCEPT. 283 | - name: FELIX_DEFAULTENDPOINTTOHOSTACTION 284 | value: "ACCEPT" 285 | # Disable IPv6 on Kubernetes. 286 | - name: FELIX_IPV6SUPPORT 287 | value: "false" 288 | # Set Felix logging to "info" 289 | - name: FELIX_LOGSEVERITYSCREEN 290 | value: "info" 291 | - name: FELIX_HEALTHENABLED 292 | value: "true" 293 | securityContext: 294 | privileged: true 295 | resources: 296 | requests: 297 | cpu: 250m 298 | livenessProbe: 299 | httpGet: 300 | path: /liveness 301 | port: 9099 302 | host: localhost 303 | periodSeconds: 10 304 | initialDelaySeconds: 10 305 | failureThreshold: 6 306 | readinessProbe: 307 | exec: 308 | command: 309 | - /bin/calico-node 310 | - -bird-ready 311 | - -felix-ready 312 | periodSeconds: 10 313 | volumeMounts: 314 | - mountPath: /lib/modules 315 | name: lib-modules 316 | readOnly: true 317 | - mountPath: /run/xtables.lock 318 | name: xtables-lock 319 | readOnly: false 320 | - mountPath: /var/run/calico 321 | name: var-run-calico 322 | readOnly: false 323 | - mountPath: /var/lib/calico 324 | name: var-lib-calico 325 | readOnly: false 326 | # This container installs the Calico CNI binaries 327 | # and CNI network config file on each node. 328 | - name: install-cni 329 | image: quay.io/calico/cni:v3.3.1 330 | command: ["/install-cni.sh"] 331 | env: 332 | # Name of the CNI config file to create. 333 | - name: CNI_CONF_NAME 334 | value: "10-calico.conflist" 335 | # Set the hostname based on the k8s node name. 336 | - name: KUBERNETES_NODE_NAME 337 | valueFrom: 338 | fieldRef: 339 | fieldPath: spec.nodeName 340 | # The CNI network config to install on each node. 341 | - name: CNI_NETWORK_CONFIG 342 | valueFrom: 343 | configMapKeyRef: 344 | name: calico-config 345 | key: cni_network_config 346 | # CNI MTU Config variable 347 | - name: CNI_MTU 348 | valueFrom: 349 | configMapKeyRef: 350 | name: calico-config 351 | key: veth_mtu 352 | volumeMounts: 353 | - mountPath: /host/opt/cni/bin 354 | name: cni-bin-dir 355 | - mountPath: /host/etc/cni/net.d 356 | name: cni-net-dir 357 | volumes: 358 | # Used by calico/node. 359 | - name: lib-modules 360 | hostPath: 361 | path: /lib/modules 362 | - name: var-run-calico 363 | hostPath: 364 | path: /var/run/calico 365 | - name: var-lib-calico 366 | hostPath: 367 | path: /var/lib/calico 368 | - name: xtables-lock 369 | hostPath: 370 | path: /run/xtables.lock 371 | type: FileOrCreate 372 | # Used to install CNI. 373 | - name: cni-bin-dir 374 | hostPath: 375 | path: /opt/cni/bin 376 | - name: cni-net-dir 377 | hostPath: 378 | path: /etc/cni/net.d 379 | --- 380 | 381 | apiVersion: v1 382 | kind: ServiceAccount 383 | metadata: 384 | name: calico-node 385 | namespace: kube-system 386 | 387 | --- 388 | 389 | # Create all the CustomResourceDefinitions needed for 390 | # Calico policy and networking mode. 391 | 392 | apiVersion: apiextensions.k8s.io/v1beta1 393 | kind: CustomResourceDefinition 394 | metadata: 395 | name: felixconfigurations.crd.projectcalico.org 396 | spec: 397 | scope: Cluster 398 | group: crd.projectcalico.org 399 | version: v1 400 | names: 401 | kind: FelixConfiguration 402 | plural: felixconfigurations 403 | singular: felixconfiguration 404 | --- 405 | 406 | apiVersion: apiextensions.k8s.io/v1beta1 407 | kind: CustomResourceDefinition 408 | metadata: 409 | name: bgppeers.crd.projectcalico.org 410 | spec: 411 | scope: Cluster 412 | group: crd.projectcalico.org 413 | version: v1 414 | names: 415 | kind: BGPPeer 416 | plural: bgppeers 417 | singular: bgppeer 418 | 419 | --- 420 | 421 | apiVersion: apiextensions.k8s.io/v1beta1 422 | kind: CustomResourceDefinition 423 | metadata: 424 | name: bgpconfigurations.crd.projectcalico.org 425 | spec: 426 | scope: Cluster 427 | group: crd.projectcalico.org 428 | version: v1 429 | names: 430 | kind: BGPConfiguration 431 | plural: bgpconfigurations 432 | singular: bgpconfiguration 433 | 434 | --- 435 | 436 | apiVersion: apiextensions.k8s.io/v1beta1 437 | kind: CustomResourceDefinition 438 | metadata: 439 | name: ippools.crd.projectcalico.org 440 | spec: 441 | scope: Cluster 442 | group: crd.projectcalico.org 443 | version: v1 444 | names: 445 | kind: IPPool 446 | plural: ippools 447 | singular: ippool 448 | 449 | --- 450 | 451 | apiVersion: apiextensions.k8s.io/v1beta1 452 | kind: CustomResourceDefinition 453 | metadata: 454 | name: hostendpoints.crd.projectcalico.org 455 | spec: 456 | scope: Cluster 457 | group: crd.projectcalico.org 458 | version: v1 459 | names: 460 | kind: HostEndpoint 461 | plural: hostendpoints 462 | singular: hostendpoint 463 | 464 | --- 465 | 466 | apiVersion: apiextensions.k8s.io/v1beta1 467 | kind: CustomResourceDefinition 468 | metadata: 469 | name: clusterinformations.crd.projectcalico.org 470 | spec: 471 | scope: Cluster 472 | group: crd.projectcalico.org 473 | version: v1 474 | names: 475 | kind: ClusterInformation 476 | plural: clusterinformations 477 | singular: clusterinformation 478 | 479 | --- 480 | 481 | apiVersion: apiextensions.k8s.io/v1beta1 482 | kind: CustomResourceDefinition 483 | metadata: 484 | name: globalnetworkpolicies.crd.projectcalico.org 485 | spec: 486 | scope: Cluster 487 | group: crd.projectcalico.org 488 | version: v1 489 | names: 490 | kind: GlobalNetworkPolicy 491 | plural: globalnetworkpolicies 492 | singular: globalnetworkpolicy 493 | 494 | --- 495 | 496 | apiVersion: apiextensions.k8s.io/v1beta1 497 | kind: CustomResourceDefinition 498 | metadata: 499 | name: globalnetworksets.crd.projectcalico.org 500 | spec: 501 | scope: Cluster 502 | group: crd.projectcalico.org 503 | version: v1 504 | names: 505 | kind: GlobalNetworkSet 506 | plural: globalnetworksets 507 | singular: globalnetworkset 508 | 509 | --- 510 | 511 | apiVersion: apiextensions.k8s.io/v1beta1 512 | kind: CustomResourceDefinition 513 | metadata: 514 | name: networkpolicies.crd.projectcalico.org 515 | spec: 516 | scope: Namespaced 517 | group: crd.projectcalico.org 518 | version: v1 519 | names: 520 | kind: NetworkPolicy 521 | plural: networkpolicies 522 | singular: networkpolicy 523 | 524 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Kubernetes Hands-on with VM 2 | 3 | VM 환경 (Virtualbox) 에서 Kubernetes 클러스터를 직접 구축하는 실습을 진행합니다. 4 | 5 | * Kubernetes 를 바로 설치해볼 수 있도록 Vagrant 를 이용해 VM 을 실행하고 클러스터 구성 6 | * Helm, Ceph, Rook, Metallb 등을 활용해서 어플리케이션 배포 7 | 8 | 9 | 10 | ## Used packages in this hands-on 11 | 12 | * Kubernetes (http://kubernetes.io) 13 | * Docker (https://www.docker.com/) 14 | * Helm (https://helm.sh) 15 | * Rook (https://rook.io) 16 | * Ceph (https://ceph.com) 17 | * Metallb (https://metallb.universe.tf) 18 | 19 | 20 | 21 | # Setup Environments 22 | 23 | * Host OS: Ubuntu 16.04 24 | * Guest OS: Ubuntu 16.04 (ubuntu/xenial64) / 18.04 (ubuntu/bionic64) 25 | * Automation Tool: Vagrant 26 | 27 | 28 | 29 | ## Install Virtualbox 30 | 31 | 사용하는 운영체제에 맞는 패키지를 받아 설치합니다. 32 | 33 | * https://www.virtualbox.org/wiki/Downloads 34 | 35 | ```bash 36 | sudo apt install virtualbox 37 | ``` 38 | 39 | 40 | 41 | ## Install Vagrant 42 | 43 | VM 을 생성하면서 기본적인 초기화를 진행할 때 사용할 Vagrant 프로그램을 설치합니다. 44 | 45 | * https://www.vagrantup.com/downloads.html 46 | 47 | ```bash 48 | sudo dpkg -i vagrant_2.2.1_x86_64.deb 49 | ``` 50 | 51 | 52 | 53 | ## Downloads Vagrant box image 54 | 55 | Vagrant 를 이용해 VM 을 생성할 때 사용할 Box 파일을 미리 받아 디스크에 저장해둡니다. 56 | 57 | Ubuntu 16.04 혹은 18.04 이미지를 이용합니다. 58 | 59 | ```bash 60 | vagrant box add ubuntu/bionic64 61 | ``` 62 | 63 | 64 | 65 | ## Downloads worksheet 66 | 67 | github 저장소에 실습을 진행하면서 사용할 파일을 디렉토리별로 구분하여 저장해두었습니다. 68 | 69 | ```bash 70 | git clone https://github.com/chanshik/kubernetes-201811-meetup.git 71 | cd kubernetes-201811-meetup 72 | kubernetes-201811-meetup$ 73 | ``` 74 | 75 | 76 | 77 | ## VM Networks 78 | 79 | VM 에 할당한 IP 와 역할은 다음과 같습니다. 80 | 81 | | Node | IP | Role | 82 | | ----- | ---------- | ------ | 83 | | k8s-1 | 10.254.1.2 | Master | 84 | | k8s-2 | 10.254.1.3 | Worker | 85 | | k8s-3 | 10.254.1.4 | Wokrer | 86 | 87 | 88 | 89 | ## Start VMs 90 | 91 | 미리 작성해둔 **Vagrantfile** 을 이용해 VM 3대를 시작합니다. 사용하는 장비 사양에 맞도록 CPU, 메모리, 추가 디스크 공간을 지정합니다. 92 | 93 | 실습에 필요한 환경을 구축하기 위해 세 대의 VM 이 실행되며, 각 VM 은 시스템 디스크 외에 두 개의 디스크를 더 가지고 있습니다. **/dev/sdc** 디스크는 배포하는 어플리케이션이 노드에 종속적인 디스크를 사용할 경우에 할당되는 공간이며, **/dev/sdd** 디스크는 **Ceph** 클러스터에서 활용하게 됩니다. 94 | 95 | ```ruby 96 | # -*- mode: ruby -*- 97 | # vi: set ft=ruby : 98 | 99 | Vagrant.configure("2") do |config| 100 | config.vm.box = "ubuntu/bionic64" 101 | config.vm.box_check_update = false 102 | node_subnet = "10.254.1" 103 | 104 | (1..3).each do |i| 105 | config.vm.define "k8s-#{i}" do |node| 106 | node.vm.hostname = "k8s-#{i}" 107 | node.vm.network "private_network", ip: "#{node_subnet}.#{i + 1}" 108 | 109 | attached_disk_a = "disk-k8s-#{i}-a.vdi" 110 | attached_disk_b = "disk-k8s-#{i}-b.vdi" 111 | 112 | node.vm.provider "virtualbox" do |vb| 113 | vb.name = "k8s-#{i}" 114 | vb.gui = false 115 | 116 | vb.cpus = 2 117 | vb.memory = "4096" 118 | 119 | unless File.exists?(attached_disk_a) 120 | vb.customize [ 121 | 'createhd', '--filename', attached_disk_a, 122 | '--variant', 'Fixed', 123 | '--size', 10 * 1024] 124 | end 125 | 126 | unless File.exists?(attached_disk_b) 127 | vb.customize [ 128 | 'createhd', '--filename', attached_disk_b, 129 | '--variant', 'Fixed', 130 | '--size', 10 * 1024] 131 | end 132 | 133 | vb.customize [ 134 | 'storageattach', :id, '--storagectl', 'SCSI', 135 | '--port', 2, '--device', 0, '--type', 'hdd', 136 | '--medium', attached_disk_a] 137 | 138 | vb.customize [ 139 | 'storageattach', :id, '--storagectl', 'SCSI', 140 | '--port', 3, '--device', 0, '--type', 'hdd', 141 | '--medium', attached_disk_b] 142 | end 143 | 144 | node.vm.provision "bootstrap", type: "shell", inline: <<-SHELL 145 | sudo curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - 146 | sudo bash -c 'cat </etc/apt/sources.list.d/kubernetes.list 147 | deb http://apt.kubernetes.io/ kubernetes-xenial main 148 | EOF' 149 | sudo apt update 150 | sudo apt install -y docker.io kubelet kubeadm kubectl ntp nfs-kernel-server 151 | sudo systemctl enable docker.service 152 | sudo usermod -aG docker vagrant 153 | 154 | sudo sed -i '/k8s/d' /etc/hosts 155 | sudo echo "#{node_subnet}.#{i + 1} k8s-#{i}" | sudo tee -a /etc/hosts 156 | 157 | sudo mkfs.ext4 /dev/sdc 158 | sudo mkdir /media/data 159 | SHELL 160 | 161 | node.vm.provision "shell", run: "always", 162 | inline: "sudo mount /dev/sdc /media/data" 163 | end 164 | end 165 | end 166 | ``` 167 | 168 | 앞에서 작성한 **Vagrantfile** 을 이용해 VM 을 생성합니다. 169 | 170 | ```bash 171 | vagrant up 172 | ``` 173 | 174 | VM 생성이 모두 끝난 다음에 ssh 를 실행하여 원하는 노드에 접속합니다. 175 | 176 | ```bash 177 | vagrant ssh k8s-1 178 | ``` 179 | 180 | 181 | 182 | ## Slow network environment 183 | 184 | 네트워크 속도가 느린 곳에서는 VM 을 생성하면서 패키지를 설치하는 방식보다, VM 을 모두 시작한 이후에 터미널로 접속해서 필요한 작업을 진행합니다. 185 | 186 | ```bash 187 | vagrant up --no-provision 188 | ``` 189 | 190 | VM 을 실행한 이후에 각 VM 에 접속해서 초기 작업을 진행합니다. 191 | 192 | ```bash 193 | curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - 194 | 195 | sudo bash -c 'cat </etc/apt/sources.list.d/kubernetes.list 196 | deb http://apt.kubernetes.io/ kubernetes-xenial main 197 | EOF' 198 | 199 | sudo apt update 200 | sudo apt install -y docker.io kubelet kubeadm kubectl ntp nfs-kernel-server 201 | sudo usermod -aG docker vagrant 202 | 203 | sudo sed -i '/k8s/d' /etc/hosts 204 | sudo echo "10.254.1.2 k8s-1" | sudo tee -a /etc/hosts 205 | ``` 206 | 207 | 초기화 단계를 진행할 때 `sudo echo "10.254.1.2 k8s-1" | sudo tee -a /etc/hosts` 명령은 각 VM 에 접속해서 노드 이름과 IP 를 개별로 지정해서 실행합니다. 208 | 209 | 210 | 211 | ## Format data disk 212 | 213 | VM 에 추가한 2개 디스크 중에 하나를 **ext4** 형식으로 포맷해서 준비해둡니다. Vagrant 를 이용해 VM 을 생성할 때, 기본적인 작업이 이루어지도록 추가해두었습니다. 만약에 **Provision** 단계를 건너뛰었다면, 각 노드에 접속하여 디스크 초기화 작업을 진행해 **Persistent Volume** 으로 사용할 디스크를 준비해두어야 합니다. 214 | 215 | ```bash 216 | sudo mkfs.ext4 /dev/sdc 217 | 218 | mke2fs 1.44.1 (24-Mar-2018) 219 | Found a dos partition table in /dev/sdc 220 | Proceed anyway? (y,N) y 221 | Creating filesystem with 2621440 4k blocks and 655360 inodes 222 | Filesystem UUID: dfac8c39-ef7c-43dc-8594-b81467306723 223 | Superblock backups stored on blocks: 224 | 32768, 98304, 163840, 229376, 294912, 819200, 884736, 1605632 225 | 226 | Allocating group tables: done 227 | Writing inode tables: done 228 | Creating journal (16384 blocks): done 229 | Writing superblocks and filesystem accounting information: done 230 | 231 | ``` 232 | 233 | ```bash 234 | sudo mkdir /media/data 235 | sudo mount /dev/sdc /media/data 236 | ``` 237 | 238 | ```bash 239 | df -h 240 | 241 | Filesystem Size Used Avail Use% Mounted on 242 | udev 2.0G 0 2.0G 0% /dev 243 | tmpfs 395M 1.1M 394M 1% /run 244 | /dev/sda1 9.7G 3.1G 6.7G 32% / 245 | tmpfs 2.0G 0 2.0G 0% /dev/shm 246 | tmpfs 5.0M 0 5.0M 0% /run/lock 247 | tmpfs 2.0G 0 2.0G 0% /sys/fs/cgroup 248 | vagrant 267G 84G 183G 32% /vagrant 249 | tmpfs 395M 0 395M 0% /run/user/1000 250 | /dev/sdc 9.8G 37M 9.3G 1% /media/data 251 | ``` 252 | 253 | 254 | 255 | # Setup Kubernetes Cluster 256 | 257 | ## Select pod network add-on 258 | 259 | Kubernetes 에서 사용할 CNI (Container Network Interface) 선택하고 **kubeadm** 을 이용해 초기화 할 때 같이 지정합니다. 실습에서는 **Calico** CNI 를 사용합니다. 260 | 261 | * kubeadm 은 CNI 기반 네트워크만 지원 262 | * Calico CIDR: 192.168.0.0/16 263 | * https://kubernetes.io/docs/concepts/cluster-administration/addons/ 264 | 265 | 266 | 267 | ## Initialize master node 268 | 269 | Master node 에서 **kubeadm init** 명령을 실행하여 클러스터 초기화 작업을 시작합니다. 270 | 271 | ```bash 272 | sudo swapoff -a 273 | sudo kubeadm init --pod-network-cidr=192.168.0.0/16 --apiserver-advertise-address=10.254.1.2 274 | 275 | [init] using Kubernetes version: v1.12.2 276 | [preflight] running pre-flight checks 277 | [WARNING Service-Docker]: docker service is not enabled, please run 'systemctl enable docker.service' 278 | [preflight/images] Pulling images required for setting up a Kubernetes cluster 279 | [preflight/images] This might take a minute or two, depending on the speed of your internet connection 280 | [preflight/images] You can also perform this action in beforehand using 'kubeadm config images pull' 281 | [kubelet] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" 282 | [kubelet] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" 283 | [preflight] Activating the kubelet service 284 | ... 285 | [bootstraptoken] using token: s9qd0j.beetbemlhmmx1etd 286 | [bootstraptoken] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials 287 | [bootstraptoken] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token 288 | [bootstraptoken] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster 289 | [bootstraptoken] creating the "cluster-info" ConfigMap in the "kube-public" namespace 290 | [addons] Applied essential addon: CoreDNS 291 | [addons] Applied essential addon: kube-proxy 292 | 293 | Your Kubernetes master has initialized successfully! 294 | 295 | To start using your cluster, you need to run the following as a regular user: 296 | 297 | mkdir -p $HOME/.kube 298 | sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config 299 | sudo chown $(id -u):$(id -g) $HOME/.kube/config 300 | 301 | You should now deploy a pod network to the cluster. 302 | Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: 303 | https://kubernetes.io/docs/concepts/cluster-administration/addons/ 304 | 305 | You can now join any number of machines by running the following on each node 306 | as root: 307 | 308 | kubeadm join 10.254.1.2:6443 --token dzjclo.a8d0kjwcc64r7kvs --discovery-token-ca-cert-hash sha256:ce7c94f7863dbc1ad8d32028cb5388e4ea47a12959317d035b722e2a4fb3e5f3 309 | ``` 310 | 311 | 312 | 313 | ## Add nodes 314 | 315 | Master node 초기화 이후에는 추가하려는 노드에서 **kubeadm join** 명령을 실행합니다. 316 | 317 | **@k8s-2** 318 | 319 | ```bash 320 | sudo swapoff -a 321 | sudo kubeadm join 10.254.1.2:6443 --token s9qd0j.beetbemlhmmx1etd --discovery-token-ca-cert-hash sha256:573bf08c800f2c9736d9b1b8a66421777dcd9e8991a2b9e0d7612c248bcdcdc5 322 | ``` 323 | 324 | **@k8s-3** 325 | 326 | ```bash 327 | sudo swapoff -a 328 | sudo kubeadm join 10.254.1.2:6443 --token s9qd0j.beetbemlhmmx1etd --discovery-token-ca-cert-hash sha256:573bf08c800f2c9736d9b1b8a66421777dcd9e8991a2b9e0d7612c248bcdcdc5 329 | ``` 330 | 331 | 332 | 333 | ## Show kubernetes nodes 334 | 335 | 위 과정을 거쳐 생성한 Kubernetes 에 접근하려면 /etc/kubernetes/admin.conf 파일이 필요합니다. 홈 디렉토리에 복사하고 소유자를 변경한 이후에 **KUBECONFIG** 환경변수에 위치를 지정합니다. 336 | 337 | ```bash 338 | sudo cp /etc/kubernetes/admin.conf ./k8s-admin.conf 339 | sudo chown vagrant:vagrant k8s-admin.conf 340 | export KUBECONFIG=/home/vagrant/k8s-admin.conf 341 | echo "export KUBECONFIG=/home/vagrant/k8s-admin.conf" >> .bashrc 342 | kubectl get nodes 343 | 344 | NAME STATUS ROLES AGE VERSION 345 | k8s-1 NotReady master 8m48s v1.12.2 346 | k8s-2 NotReady 2m31s v1.12.2 347 | k8s-3 NotReady 2m28s v1.12.2 348 | ``` 349 | 350 | 351 | 352 | ## Install CNI 353 | 354 | **kubectl get nodes** 명령 결과를 보면 **STATUS** 가 현재 **NotReady** 입니다. 초기화 단계에서 선택한 CNI 를 설치해야 실제로 사용 가능한 상태가 됩니다. 355 | 356 | **Calico** CNI 를 사용하기 위해 **kubectl** 명령어를 이용해 설치합니다. 357 | 358 | ```bash 359 | kubectl apply -f https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/rbac-kdd.yaml 360 | 361 | clusterrole.rbac.authorization.k8s.io/calico-node created 362 | clusterrolebinding.rbac.authorization.k8s.io/calico-node created 363 | ``` 364 | 365 | ```bash 366 | kubectl apply -f https://docs.projectcalico.org/v3.9/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml 367 | 368 | configmap/calico-config created 369 | service/calico-typha created 370 | deployment.apps/calico-typha created 371 | poddisruptionbudget.policy/calico-typha created 372 | daemonset.extensions/calico-node created 373 | serviceaccount/calico-node created 374 | customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created 375 | customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created 376 | customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created 377 | customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created 378 | customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created 379 | customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created 380 | customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created 381 | customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created 382 | customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created 383 | ``` 384 | 385 | ```bash 386 | kubectl get nodes 387 | NAME STATUS ROLES AGE VERSION 388 | k8s-1 Ready master 25m v1.12.2 389 | k8s-2 Ready 19m v1.12.2 390 | k8s-3 Ready 19m v1.12.2 391 | ``` 392 | 393 | 394 | 395 | ## Master isolation 396 | 397 | Kubernetes 기본 설정은 Master 역할을 하는 노드에 다른 컨테이너를 배포하지 않도록 되어있습니다. 실습을 진행할 때는 Master 노드도 사용하기 위해 설정을 변경합니다. 398 | 399 | ```bash 400 | kubectl taint nodes --all node-role.kubernetes.io/master- 401 | 402 | node/k8s-1 untainted 403 | taint "node-role.kubernetes.io/master:" not found 404 | taint "node-role.kubernetes.io/master:" not found 405 | ``` 406 | 407 | 408 | 409 | ## Install dashboard 410 | 411 | Kubernetes 를 편하게 사용하기 위해 Dashboard 를 설치합니다. 412 | 413 | ```bash 414 | kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v1.10.1/src/deploy/recommended/kubernetes-dashboard.yaml 415 | 416 | secret/kubernetes-dashboard-certs created 417 | serviceaccount/kubernetes-dashboard created 418 | role.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created 419 | rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard-minimal created 420 | deployment.apps/kubernetes-dashboard created 421 | service/kubernetes-dashboard created 422 | ``` 423 | 424 | Dashboard 에서 사용할 계정을 생성하는데, 여기에서는 관리자 권한을 준 **admin-user** 를 생성하여 접속하는데 이용합니다. 425 | 426 | **kubernetes/dashboard-service-account.yaml** 427 | 428 | ```yaml 429 | apiVersion: v1 430 | kind: ServiceAccount 431 | metadata: 432 | name: admin-user 433 | namespace: kube-system 434 | ``` 435 | 436 | **kubernetes/dashboard-clusterrolebinding.yaml** 437 | 438 | ```yaml 439 | apiVersion: rbac.authorization.k8s.io/v1beta1 440 | kind: ClusterRoleBinding 441 | metadata: 442 | name: admin-user 443 | roleRef: 444 | apiGroup: rbac.authorization.k8s.io 445 | kind: ClusterRole 446 | name: cluster-admin 447 | subjects: 448 | - kind: ServiceAccount 449 | name: admin-user 450 | namespace: kube-system 451 | ``` 452 | 453 | 위 두 파일을 이용하여 Dashboard 에 접속할 때 사용할 계정을 생성합니다. 454 | 455 | ```bash 456 | kubectl create -f kubernetes/dashboard-service-account.yaml 457 | serviceaccount/admin-user created 458 | kubectl create -f kubernetes/dashboard-clusterrolebinding.yaml 459 | clusterrolebinding.rbac.authorization.k8s.io/admin-user created 460 | ``` 461 | 462 | 설치한 Dashboard 상태를 확인합니다. 463 | 464 | ```bash 465 | kubectl get svc -n kube-system 466 | 467 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 468 | calico-typha ClusterIP 10.100.9.93 5473/TCP 2m53s 469 | kube-dns ClusterIP 10.96.0.10 53/UDP,53/TCP 24m 470 | kubernetes-dashboard ClusterIP 10.105.107.14 443/TCP 119s 471 | ``` 472 | 473 | 외부에서 접속하기 위해 **Dashboard Service Type** 을 **NodePort** 로 변경합니다. 474 | 475 | ```bash 476 | kubectl edit svc -n kube-system kubernetes-dashboard 477 | ``` 478 | 479 | vi 에디터 화면에서 **nodePort** 를 추가하고 **type** 에 **NodePort** 를 지정합니다. 480 | 481 | ```yaml 482 | spec: 483 | clusterIP: 10.105.107.14 484 | ports: 485 | - port: 443 486 | protocol: TCP 487 | targetPort: 8443 488 | nodePort: 30000 489 | selector: 490 | k8s-app: kubernetes-dashboard 491 | sessionAffinity: None 492 | type: NodePort 493 | ``` 494 | 495 | ```bash 496 | $ kubectl get svc -n kube-system kubernetes-dashboard 497 | 498 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 499 | kubernetes-dashboard NodePort 10.105.107.14 443:30000/TCP 3m54s 500 | ``` 501 | 502 | 웹 브라우져를 통해 Dashboard 에 접속합니다. 503 | 504 | ![dashboard-intro](./assets/dashboard-intro.png) 505 | 506 | 507 | 508 | ## Get dashboard bearer token 509 | 510 | Dashboard 에 접속하기 위해 관리자 Token 을 가져옵니다. 511 | 512 | ```bash 513 | kubectl get secret -n kube-system 514 | 515 | NAME TYPE DATA AGE 516 | admin-user-token-9m6zn kubernetes.io/service-account-token 3 115s 517 | attachdetach-controller-token-htnpk kubernetes.io/service-account-token 3 5m38s 518 | bootstrap-signer-token-6ztxm kubernetes.io/service-account-token 3 5m52s 519 | bootstrap-token-11h5df bootstrap.kubernetes.io/token 7 5m52s 520 | calico-node-token-2kxw5 kubernetes.io/service-account-token 3 2m43s 521 | certificate-controller-token-6lvgq kubernetes.io/service-account-token 3 5m52s 522 | ... 523 | ``` 524 | 525 | ```bash 526 | kubectl describe secret admin-user-token-9m6zn -n kube-system 527 | 528 | Name: admin-user-token-9m6zn 529 | Namespace: kube-system 530 | Labels: 531 | Annotations: kubernetes.io/service-account.name: admin-user 532 | kubernetes.io/service-account.uid: 407a5a06-ed68-11e8-a94d-02c44c503abe 533 | 534 | Type: kubernetes.io/service-account-token 535 | 536 | Data 537 | ==== 538 | namespace: 11 bytes 539 | token: eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLTltNnpuIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiI0MDdhNWEwNi1lZDY4LTExZTgtYTk0ZC0wMmM0NGM1MDNhYmUiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.dhPeoOsMCwmvwNFWFPE6Gn16afd0CpY22uOzNliEgYyALoZndU-j2r62gm3W697UzfatWg5Ezj7m52mq3wKkhr1tHZeEUXHBjmRulOh_sbtJJKBOACGDl9yhWSbhb8F5NMfWhqBnpFwKws9uL3mapiN5Pks8z4yky-pZf3SMpFNtvo_FtoynNbnxo_kalOhvMeqNrpZrJZBGCCCFR9Z9uDu3kaDqsVrfNrMZE0Yx6Rk8TIma9_gibSr57va8XSLFa35P31UwFTHiafVFyOSyvp9ZHkVw2Me-V_SYYQmfjZjjBXr8QZSeEjp8mTJMD5R_NInkl37DtVCG6uf8xUuzjw 540 | ca.crt: 1025 bytes 541 | ``` 542 | 543 | 마지막 token: 밑에 있는 문자열을 이용해 Dashboard 에 접속할 수 있습니다. 544 | 545 | 546 | 547 | ![dashboard-overview](./assets/dashboard-overview.png) 548 | 549 | 550 | 551 | # Isolate resources 552 | 553 | ## Use label 554 | 555 | Kubernetes 에서 어플리케이션을 실행할 때 특정 노드에서만 실행되도록 할 수 있습니다. Managed Kubernetes 에서는 이렇게 사용할 이유가 없겠지만, 내부에서 클러스터를 구축해 사용한다면 유용하게 사용할 수 있습니다. 556 | 557 | 노드에 부여되어 있는 Label 을 확인해봅니다. 558 | 559 | ```bash 560 | kubectl get nodes --show-labels 561 | 562 | NAME STATUS ROLES AGE VERSION LABELS 563 | k8s-1 Ready master 80m v1.12.2 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=k8s-1,node-role.kubernetes.io/master= 564 | k8s-2 Ready 80m v1.12.2 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=k8s-2 565 | k8s-3 Ready 79m v1.12.2 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=k8s-3 566 | ``` 567 | 568 | **beta.kubernetes.io/arch**, **beta.kubernetes.io/os**, **kubernetes.io/hostname** Label 들은 클러스터에 참여하고 있는 노드들에 부여한 기본적인 속성입니다. 특정한 노드에만 어플리케이션이 동작하도록 설정하려면, **kubernetes.io/hostname** Label 을 이용합니다. 569 | 570 | 앞으로는 일반적인 목적을 가진 어플리케이션을 실행하는데 사용할 Label 로 **app** 를 사용하겠습니다. 571 | 572 | ```bash 573 | kubectl label nodes k8s-1 k8s-2 k8s-3 app=yes 574 | 575 | node/k8s-1 labeled 576 | node/k8s-2 labeled 577 | node/k8s-3 labeled 578 | ``` 579 | 580 | 581 | 582 | ### Deploy nginx with label 583 | 584 | nginx 어플리케이션을 Kubernetes 에서 배포하면서 실행할 노드를 label 을 이용해 지정해보겠습니다. 585 | 586 | **nginx/nginx-deploy.yaml** 587 | 588 | ```yaml 589 | apiVersion: apps/v1 590 | kind: Deployment 591 | metadata: 592 | labels: 593 | k8s-app: nginx 594 | name: nginx 595 | namespace: default 596 | spec: 597 | replicas: 1 598 | selector: 599 | matchLabels: 600 | k8s-app: nginx 601 | template: 602 | metadata: 603 | labels: 604 | k8s-app: nginx 605 | name: nginx 606 | spec: 607 | containers: 608 | - image: nginx 609 | name: nginx 610 | nodeSelector: 611 | app: "yes" 612 | ``` 613 | 614 | **nginx/nginx-svc.yaml** 615 | 616 | ```yaml 617 | apiVersion: v1 618 | kind: Service 619 | metadata: 620 | labels: 621 | k8s-app: nginx 622 | name: nginx-svc 623 | namespace: default 624 | spec: 625 | ports: 626 | - port: 80 627 | protocol: TCP 628 | targetPort: 80 629 | nodePort: 31000 630 | selector: 631 | k8s-app: nginx 632 | type: NodePort 633 | ``` 634 | 635 | Deploy 와 Service 생성 파일을 이용해 어플리케이션을 배포합니다. 636 | 637 | ```bash 638 | kubectl create -f nginx/nginx-deploy.yaml 639 | 640 | deployment.apps/nginx created 641 | ``` 642 | 643 | ```bash 644 | kubectl get deploy 645 | 646 | NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE 647 | nginx 1 1 1 0 12s 648 | ``` 649 | 650 | ```bash 651 | kubectl create -f nginx/nginx-svc.yaml 652 | 653 | service/nginx-svc created 654 | ``` 655 | 656 | ```bash 657 | kubectl get svc 658 | 659 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 660 | kubernetes ClusterIP 10.96.0.1 443/TCP 117m 661 | nginx-svc NodePort 10.109.129.149 80:31000/TCP 3s 662 | ``` 663 | 664 | Service 에서 **NodePort** 로 지정한 31000 번으로 접속하여 nginx 서비스가 배포된 것을 확인할 수 있습니다. 665 | 666 | ![nginx-web](./assets/nginx-web.png) 667 | 668 | 669 | 670 | ## Use namespace 671 | 672 | Kubernetes 에서 생성한 모든 객체는 기본적으로 **default** namespace 에 속하게 됩니다. 사용자 접근 제어 혹은 자원 관리를 namespace 단위로 하는 것이 권장합니다. 여기에서는 어플리케이션 단위로 namespace 를 생성해 사용해보도록 하겠습니다. 673 | 674 | ```bash 675 | kubectl create namespace redis 676 | 677 | namespace/redis created 678 | ``` 679 | 680 | ```bash 681 | kubectl get namespace 682 | NAME STATUS AGE 683 | default Active 98m 684 | kube-public Active 98m 685 | kube-system Active 98m 686 | redis Active 7s 687 | ``` 688 | 689 | 690 | 691 | # Setup Helm 692 | 693 | Helm 은 Kubernetes Package Manager 로서 어플리케이션을 구성하는 여러 컴포넌트를 패키지로 묶어서 쉽게 배포하고 관리할 수 있게 도움을 줍니다. 694 | 695 | ## Install helm 696 | 697 | ```bash 698 | curl https://raw.githubusercontent.com/helm/helm/master/scripts/get | bash 699 | 700 | % Total % Received % Xferd Average Speed Time Time Time Current 701 | Dload Upload Total Spent Left Speed 702 | 100 7236 100 7236 0 0 7452 0 --:--:-- --:--:-- --:--:-- 7444 703 | Downloading https://kubernetes-helm.storage.googleapis.com/helm-v2.11.0-linux-amd64.tar.gz 704 | Preparing to install helm and tiller into /usr/local/bin 705 | helm installed into /usr/local/bin/helm 706 | tiller installed into /usr/local/bin/tiller 707 | Run 'helm init' to configure helm. 708 | ``` 709 | 710 | 711 | 712 | ## Create service account 713 | 714 | helm 에서 사용할 Service Account 를 생성합니다. 715 | 716 | **helm/rbac-config.yaml** 717 | 718 | ```yaml 719 | --- 720 | apiVersion: v1 721 | kind: ServiceAccount 722 | metadata: 723 | name: tiller 724 | namespace: kube-system 725 | 726 | --- 727 | apiVersion: rbac.authorization.k8s.io/v1 728 | kind: ClusterRoleBinding 729 | metadata: 730 | name: tiller 731 | roleRef: 732 | apiGroup: rbac.authorization.k8s.io 733 | kind: ClusterRole 734 | name: cluster-admin 735 | subjects: 736 | - kind: ServiceAccount 737 | name: tiller 738 | namespace: kube-system 739 | ``` 740 | 741 | ```bash 742 | kubectl apply -f helm/rbac-config.yaml 743 | 744 | serviceaccount/tiller created 745 | clusterrolebinding.rbac.authorization.k8s.io/tiller created 746 | ``` 747 | 748 | 749 | 750 | ## Update repository 751 | 752 | 설치한 helm 을 초기화하고 **stable** 패키지 리스트를 가져옵니다. 753 | 754 | ```bash 755 | helm init --service-account tiller --node-selectors "app"="yes" 756 | 757 | Creating /home/vagrant/.helm 758 | Creating /home/vagrant/.helm/repository 759 | Creating /home/vagrant/.helm/repository/cache 760 | Creating /home/vagrant/.helm/repository/local 761 | Creating /home/vagrant/.helm/plugins 762 | Creating /home/vagrant/.helm/starters 763 | Creating /home/vagrant/.helm/cache/archive 764 | Creating /home/vagrant/.helm/repository/repositories.yaml 765 | Adding stable repo with URL: https://kubernetes-charts.storage.googleapis.com 766 | Adding local repo with URL: http://127.0.0.1:8879/charts 767 | $HELM_HOME has been configured at /home/vagrant/.helm. 768 | 769 | Tiller (the Helm server-side component) has been installed into your Kubernetes Cluster. 770 | 771 | Please note: by default, Tiller is deployed with an insecure 'allow unauthenticated users' policy. 772 | To prevent this, run `helm init` with the --tiller-tls-verify flag. 773 | For more information on securing your installation see: https://docs.helm.sh/using_helm/#securing-your-helm-installation 774 | Happy Helming! 775 | ``` 776 | 777 | ```bash 778 | helm repo update 779 | 780 | Hang tight while we grab the latest from your chart repositories... 781 | ...Skip local chart repository 782 | ...Successfully got an update from the "stable" chart repository 783 | Update Complete. ⎈ Happy Helming!⎈ 784 | ``` 785 | 786 | 787 | 788 | # Setup Metallb 789 | 790 | ## NodePort, LoadBalancer 791 | 792 | Kubernetes 에서 외부에서 접속할 수 있도록 서비스를 열어두는 방법에는 두 가지가 있습니다. NodePort 는 30000 ~ 32767 (기본 범위) 포트를 내부 서비스와 연결하여 외부에서 접속을 가능하도록 허용해줍니다. LoadBalancer 는 서비스에 접근할 수 있는 특정 IP 를 할당하여 외부에서 할당된 IP 를 통해 내부 서비스에 접근할 수 있습니다. Cloud Platform 에서 Kubernetes 클러스터를 생성하거나 Managed Kubernetes 를 이용할 경우에는 각 Cloud Platform 에서 제공하는 LoadBalancer 를 활용할 수 있습니다. 793 | 794 | 795 | 796 | ## Metallb 797 | 798 | Kubernetes 를 Bare metal 클러스터로 구축한 경우에는 Metallb 를 이용해 LoadBalancer 로 사용할 수 있습니다. 799 | 800 | Metallb 에서는 Layer 2 mode 와 BGP mode 를 통해 서비스 IP 를 부여하는 방법을 제공하는데, 여기에서는 Layer 2 mode 를 이용하여 외부 IP 를 부여하도록 하겠습니다. 801 | 802 | 803 | 804 | ## Install Metallb 805 | 806 | ```bash 807 | kubectl apply -f https://raw.githubusercontent.com/google/metallb/v0.7.3/manifests/metallb.yaml 808 | 809 | namespace/metallb-system created 810 | serviceaccount/controller created 811 | serviceaccount/speaker created 812 | clusterrole.rbac.authorization.k8s.io/metallb-system:controller created 813 | clusterrole.rbac.authorization.k8s.io/metallb-system:speaker created 814 | role.rbac.authorization.k8s.io/config-watcher created 815 | clusterrolebinding.rbac.authorization.k8s.io/metallb-system:controller created 816 | clusterrolebinding.rbac.authorization.k8s.io/metallb-system:speaker created 817 | rolebinding.rbac.authorization.k8s.io/config-watcher created 818 | daemonset.apps/speaker created 819 | deployment.apps/controller created 820 | ``` 821 | 822 | 823 | 824 | ## Configure LoadBalancer IP range 825 | 826 | LoadBalancer 로 사용할 IP 대역을 설정 파일 안에 기술하여 지정할 수 있습니다. 여기에서는 10.254.1.150 ~ 10.254.1.250 을 외부에서 접속할 때 사용할 IP 대역으로 할당하였습니다. 827 | 828 | **metallb/layer2-config.yaml** 829 | 830 | ```yaml 831 | apiVersion: v1 832 | kind: ConfigMap 833 | metadata: 834 | namespace: metallb-system 835 | name: config 836 | data: 837 | config: | 838 | address-pools: 839 | - name: default 840 | protocol: layer2 841 | addresses: 842 | - 10.254.1.150-10.254.1.250 843 | ``` 844 | 845 | ```bash 846 | kubectl create -f metallb/layer2-config.yaml 847 | 848 | configmap/config created 849 | ``` 850 | 851 | 852 | 853 | ## Access Dashboard using LoadBalancer 854 | 855 | 앞에서는 Dashboard 를 NodePort 로 외부에 개방했는데, LoadBalancer 를 이용해 미리 지정한 외부에서 접속 가능하도록 변경해봅니다. 856 | 857 | ```bash 858 | kubectl edit svc kubernetes-dashboard -n kube-system 859 | ``` 860 | 861 | ```yaml 862 | ... 863 | spec: 864 | clusterIP: 10.101.69.172 865 | externalTrafficPolicy: Cluster 866 | ports: 867 | - nodePort: 30000 868 | port: 443 869 | protocol: TCP 870 | targetPort: 8443 871 | selector: 872 | k8s-app: kubernetes-dashboard 873 | sessionAffinity: None 874 | type: LoadBalancer 875 | ... 876 | ``` 877 | 878 | IP 가 제대로 할당되었는지 확인합니다. 879 | 880 | ```bash 881 | kubectl get svc -n kube-system 882 | 883 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 884 | kubernetes-dashboard LoadBalancer 10.101.69.172 10.254.1.150 443:30000/TCP 14h 885 | ``` 886 | 887 | ![dashboard-lb](./assets/dashboard-lb.png) 888 | 889 | 890 | 891 | # Setup Redis with Helm 892 | 893 | ## Create storage directory 894 | 895 | Redis 에서 사용할 디렉토리를 배포할 노드에 미리 생성해두고 권한을 조정합니다. 896 | 897 | **@k8s-1** 898 | 899 | ```bash 900 | sudo mkdir /media/data/redis 901 | sudo chmod 777 /media/data/redis 902 | ``` 903 | 904 | **@k8s-2** 905 | 906 | ```bash 907 | sudo mkdir /media/data/redis 908 | sudo chmod 777 /media/data/redis 909 | ``` 910 | 911 | **@k8s-3** 912 | 913 | ```bash 914 | sudo mkdir /media/data/redis 915 | sudo chmod 777 /media/data/redis 916 | ``` 917 | 918 | 919 | 920 | ## Deploy redis-ha 921 | 922 | Redis 를 위해 Namespace 를 생성하고 **helm** 을 이용해 서비스를 배포합니다. 실행할 Redis 컨테이너는 **app=yes** label 을 가지고 있는 노드에 배포되며, **PersistentVolume** 으로 5Gi 공간을 요청합니다. 923 | 924 | ```bash 925 | kubectl create namespace redis 926 | 927 | namespace/redis created 928 | ``` 929 | 930 | ```bash 931 | helm install --set "nodeSelector.app"="yes","persistentVolume.size"="5Gi" -n redis-k8s --namespace redis stable/redis-ha 932 | 933 | NAME: redis-k8s 934 | LAST DEPLOYED: Sun Nov 18 03:11:14 2018 935 | NAMESPACE: redis 936 | STATUS: DEPLOYED 937 | 938 | RESOURCES: 939 | ==> v1/Pod(related) 940 | NAME READY STATUS RESTARTS AGE 941 | redis-k8s-redis-ha-server-0 0/2 Pending 0 0s 942 | 943 | ==> v1/ConfigMap 944 | 945 | NAME AGE 946 | redis-k8s-redis-ha-configmap 0s 947 | 948 | ==> v1/Service 949 | redis-k8s-redis-ha 0s 950 | 951 | ==> v1/StatefulSet 952 | redis-k8s-redis-ha-server 0s 953 | 954 | 955 | NOTES: 956 | Redis cluster can be accessed via port 6379 on the following DNS name from within your cluster: 957 | redis-k8s-redis-ha.redis.svc.cluster.local 958 | 959 | To connect to your Redis server: 960 | 1. Run a Redis pod that you can use as a client: 961 | 962 | kubectl exec -it redis-k8s-redis-ha-server-0 sh -n redis 963 | 964 | 2. Connect using the Redis CLI: 965 | 966 | redis-cli -h redis-k8s-redis-ha.redis.svc.cluster.local 967 | ``` 968 | 969 | 서비스를 배포하는 데 필요한 PersistentVolume 을 생성하지 않았기 때문에, **Pending** 상태에 머물러 있습니다. 970 | 971 | ```bash 972 | kubectl get pods -n redis 973 | 974 | NAME READY STATUS RESTARTS AGE 975 | redis-k8s-redis-ha-server-0 0/2 Pending 0 6m59s 976 | ``` 977 | 978 | 노드에 데이터를 저장할 공간을 미리 초기화를 진행했던 별도 디스크로 지정합니다. 979 | 980 | **redis/redis-storage-pv.yaml** 981 | 982 | ```yaml 983 | kind: PersistentVolume 984 | apiVersion: v1 985 | metadata: 986 | name: redis-pv-1 987 | labels: 988 | type: local 989 | spec: 990 | capacity: 991 | storage: 5Gi 992 | accessModes: 993 | - ReadWriteOnce 994 | hostPath: 995 | path: "/media/data/redis" 996 | ``` 997 | 998 | redis-ha 에서 기본적으로 실행시키는 컨테이너는 3 입니다. 그러므로 세 개의 PersistentVolume 을 생성합니다. 999 | 1000 | ```bash 1001 | kubectl create -f redis/redis-storage-pv.yaml 1002 | 1003 | persistentvolume/redis-pv-1 created 1004 | persistentvolume/redis-pv-2 created 1005 | persistentvolume/redis-pv-3 created 1006 | ``` 1007 | 1008 | PersistentVolume 을 생성하면 PersistentVolumeClaim 과 연결하여 Pod 을 생성하기 시작합니다. 1009 | 1010 | ```bash 1011 | kubectl get pods -n redis 1012 | 1013 | NAME READY STATUS RESTARTS AGE 1014 | redis-k8s-redis-ha-server-0 2/2 Running 0 12m 1015 | redis-k8s-redis-ha-server-1 2/2 Running 0 10m 1016 | redis-k8s-redis-ha-server-2 2/2 Running 0 10m 1017 | ``` 1018 | 1019 | Pod 배포가 마무리되면 외부에서 Redis 에 접속할 수 있도록 설정합니다. 1020 | 1021 | **redis/redis-svc.yaml** 1022 | 1023 | ```yaml 1024 | apiVersion: v1 1025 | kind: Service 1026 | metadata: 1027 | labels: 1028 | app: redis-ha 1029 | chart: redis-ha-3.0.1 1030 | heritage: Tiller 1031 | release: redis-k8s 1032 | name: redis-k8s-redis-ha-svc 1033 | namespace: redis 1034 | spec: 1035 | ports: 1036 | - name: server 1037 | port: 6379 1038 | protocol: TCP 1039 | targetPort: redis 1040 | - name: sentinel 1041 | port: 26379 1042 | protocol: TCP 1043 | targetPort: sentinel 1044 | selector: 1045 | app: redis-ha 1046 | release: redis-k8s 1047 | type: LoadBalancer 1048 | ``` 1049 | 1050 | Redis 에 LoadBalancer 를 설정한 후에 외부에서 접속하여 값을 기록하고 읽어봅니다. 1051 | 1052 | ```bash 1053 | kubectl create -f redis/redis-svc.yaml 1054 | 1055 | service/redis-k8s-redis-ha-svc created 1056 | ``` 1057 | 1058 | ```bash 1059 | kubectl get svc -n redis 1060 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 1061 | redis-k8s-redis-ha ClusterIP None 6379/TCP,26379/TCP 11h 1062 | redis-k8s-redis-ha-svc LoadBalancer 10.110.125.91 10.254.1.151 6379:32696/TCP,26379:30121/TCP 10s 1063 | ``` 1064 | 1065 | ```bash 1066 | redis-cli -h 10.254.1.151 1067 | 1068 | 10.254.1.151:6379> SET cluster.name "kubernetes" 1069 | OK 1070 | 10.254.1.151:6379> GET cluster.name 1071 | "kubernetes" 1072 | ``` 1073 | 1074 | 1075 | 1076 | # Setup Ceph with Rook 1077 | 1078 | ## Ceph and Rook 1079 | 1080 | Kubernetes 위에서 동작하는 어플리케이션이 저장 공간을 필요로 할 경우에는 Persistent Volume 을 생성하여 연결해주어야 합니다. 여기에서는 **Ceph** 분산 파일 시스템을 이용하여 실행된 노드에 관계없이 원하는 저장 공간을 생성하고 연결하는 데 활용합니다. 1081 | 1082 | Ceph 클러스터를 직접 구축하고 설정하는 것은 쉽지 않은 일이지만, **Rook** 을 이용해 상대적으로 쉽고 편리하게 구축할 수 있습니다. 아래는 Rook 프로젝트 홈페이지에서 가져온 Rook 소개글입니다. 1083 | 1084 | > Rook is an open source **cloud-native storage orchestrator**, 1085 | > providing the platform, framework, and support for a diverse set of 1086 | > storage solutions to natively integrate with cloud-native environments. 1087 | 1088 | Rook 을 이용해 클러스터를 생성할 때 사용하는 설정 파일은 https://github.com/rook/rook/tree/release-0.8/cluster/examples/kubernetes/ceph 경로에 있는 것을 사용합니다. 1089 | 1090 | 1091 | 1092 | ## Initialize Ceph Cluster 1093 | 1094 | VM 노드 3대가 가지고 있는 디스크 중에 아무런 작업을 하지 않은 디스크(**/dev/sdd**)를 Ceph 에 할당하여 클러스터를 구성합니다. Ceph 에서 사용하는 파일 시스템 중에 **BlueStore** 는 직접 파티션을 생성하고 관리하기 때문에, 노드에 장착되어 있는 빈 디스크를 직접 지정합니다. 미리 작성해둔 Vagrantfile 에서는 **/dev/sdd** 장치에 아무런 파티션 작업도 하지 않은 디스크를 연결해두었습니다. 1095 | 1096 | **Rook Operator** 를 통해 Ceph 클러스터를 생성할 때 필요한 몇 가지 설정을 rook/operator.yaml 파일에 기록합니다. 1097 | 1098 | **rook/operator.yaml** 1099 | 1100 | ```yaml 1101 | ... 1102 | spec: 1103 | serviceAccountName: rook-ceph-system 1104 | containers: 1105 | - name: rook-ceph-operator 1106 | image: rook/ceph:v0.8.3 1107 | ... 1108 | - name: ROOK_ALLOW_MULTIPLE_FILESYSTEMS 1109 | value: "true" 1110 | ... 1111 | - name: ROOK_HOSTPATH_REQUIRES_PRIVILEGED 1112 | value: "true" 1113 | ... 1114 | ``` 1115 | 1116 | **rook/ceph** 컨테이너 버전을 현재 Stable 상태인 **v0.8.3** 으로 지정합니다. 그리고 Shared File System 을 두 개 이상 만들어서 사용하려면 **ROOK_ALLOW_MULTIPLE_FILESYSTEMS** 옵션을 "true" 로 지정합니다. 1117 | 1118 | **BlueStore** 를 사용하려면 컨테이너에서 직접 파일 시스템을 생성할 수 있어야 하기 때문에 **ROOK_HOSTPATH_REQUIRES_PRIVILEGED** 옵션에 **"true"** 를 지정합니다. 1119 | 1120 | **operator.yaml** 파일을 수정하고 **Rook operator** 를 배포합니다. 1121 | 1122 | ```bash 1123 | kubectl create -f rook/operator.yaml 1124 | 1125 | namespace/rook-ceph-system created 1126 | customresourcedefinition.apiextensions.k8s.io/clusters.ceph.rook.io created 1127 | customresourcedefinition.apiextensions.k8s.io/filesystems.ceph.rook.io created 1128 | customresourcedefinition.apiextensions.k8s.io/objectstores.ceph.rook.io created 1129 | customresourcedefinition.apiextensions.k8s.io/pools.ceph.rook.io created 1130 | customresourcedefinition.apiextensions.k8s.io/volumes.rook.io created 1131 | clusterrole.rbac.authorization.k8s.io/rook-ceph-cluster-mgmt created 1132 | role.rbac.authorization.k8s.io/rook-ceph-system created 1133 | clusterrole.rbac.authorization.k8s.io/rook-ceph-global created 1134 | serviceaccount/rook-ceph-system created 1135 | rolebinding.rbac.authorization.k8s.io/rook-ceph-system created 1136 | clusterrolebinding.rbac.authorization.k8s.io/rook-ceph-global created 1137 | deployment.apps/rook-ceph-operator created 1138 | ``` 1139 | 1140 | **cluster.yaml** 파일에 Ceph 에서 사용할 디스크 장치명을 나열합니다. 1141 | 1142 | **rook/cluster.yaml** 1143 | 1144 | ```yaml 1145 | ... 1146 | storage: 1147 | useAllNodes: false 1148 | useAllDevices: false 1149 | deviceFilter: 1150 | location: 1151 | config: 1152 | storeType: bluestore 1153 | databaseSizeMB: "1024" 1154 | journalSizeMB: "1024" 1155 | nodes: 1156 | - name: "k8s-1" 1157 | devices: 1158 | - name: "sdd" 1159 | - name: "k8s-2" 1160 | devices: 1161 | - name: "sdd" 1162 | - name: "k8s-3" 1163 | devices: 1164 | - name: "sdd" 1165 | ``` 1166 | 1167 | 각 노드에서 사용할 디스크 장치명을 추가한 후 Ceph 클러스터를 생성합니다. 1168 | 1169 | ```bash 1170 | kubectl create -f rook/cluster.yaml 1171 | 1172 | namespace/rook-ceph created 1173 | serviceaccount/rook-ceph-cluster created 1174 | role.rbac.authorization.k8s.io/rook-ceph-cluster created 1175 | rolebinding.rbac.authorization.k8s.io/rook-ceph-cluster-mgmt created 1176 | rolebinding.rbac.authorization.k8s.io/rook-ceph-cluster created 1177 | cluster.ceph.rook.io/rook-ceph created 1178 | ``` 1179 | 1180 | 만약에 Rook 으로 Ceph 클러스터를 한번 생성한 이후에 삭제하고 다시 생성하려면 **/var/lib/rook/osd-\***, **/var/lib/rook/mon-\*** 디렉토리를 모두 지운 이후에 위 명령을 다시 실행합니다. 1181 | 1182 | 디스크 파티션 작업을 마무리하고 모두 완료되면 **rook-ceph** namespace 에서 배포된 Ceph 구성요소를 확인할 수 있습니다. 1183 | 1184 | ```bash 1185 | kubectl get deploy -n rook-ceph 1186 | 1187 | NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE 1188 | rook-ceph-mgr-a 1 1 1 1 112s 1189 | rook-ceph-mon-a 1 1 1 1 2m31s 1190 | rook-ceph-mon-b 1 1 1 1 2m15s 1191 | rook-ceph-mon-c 1 1 1 1 2m5s 1192 | rook-ceph-osd-0 1 1 1 1 95s 1193 | rook-ceph-osd-1 1 1 1 1 94s 1194 | rook-ceph-osd-2 1 1 1 1 93s 1195 | ``` 1196 | 1197 | Ceph Dashboard 를 외부에서 접속할 수 있게 해주는 Service 객체를 생성합니다. 1198 | 1199 | **rook/dashboard-external-http.yaml** 1200 | 1201 | ```yaml 1202 | apiVersion: v1 1203 | kind: Service 1204 | metadata: 1205 | name: rook-ceph-mgr-dashboard-external-http 1206 | namespace: rook-ceph 1207 | labels: 1208 | app: rook-ceph-mgr 1209 | rook_cluster: rook-ceph 1210 | spec: 1211 | ports: 1212 | - name: dashboard 1213 | port: 7000 1214 | protocol: TCP 1215 | targetPort: 7000 1216 | selector: 1217 | app: rook-ceph-mgr 1218 | rook_cluster: rook-ceph 1219 | sessionAffinity: None 1220 | type: LoadBalancer 1221 | ``` 1222 | 1223 | LoadBalancer 를 이용해 외부 서비스용 IP 를 Dashboard 에 부여합니다. 1224 | 1225 | ```bash 1226 | kubectl create -f rook/dashboard-external-http.yaml 1227 | 1228 | service/rook-ceph-mgr-dashboard-external-http created 1229 | ``` 1230 | 1231 | 생성한 Service 객체를 확인합니다. 1232 | 1233 | ```bash 1234 | kubectl get svc -n rook-ceph 1235 | 1236 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 1237 | rook-ceph-mgr ClusterIP 10.107.101.123 9283/TCP 17m 1238 | rook-ceph-mgr-dashboard ClusterIP 10.111.254.202 7000/TCP 17m 1239 | rook-ceph-mgr-dashboard-external-http LoadBalancer 10.111.106.222 10.254.1.152 7000:32346/TCP 11m 1240 | rook-ceph-mon-a ClusterIP 10.110.197.249 6790/TCP 18m 1241 | rook-ceph-mon-b ClusterIP 10.96.137.141 6790/TCP 18m 1242 | rook-ceph-mon-c ClusterIP 10.107.126.92 6790/TCP 17m 1243 | ``` 1244 | 1245 | 1246 | 1247 | ## Create Shared File System 1248 | 1249 | Kubernetes 에 배포할 어플리케이션이 사용할 파일 시스템을 생성합니다. Shared File System 은 여러 Pod 에서 동시에 접근이 가능합니다. 1250 | 1251 | **rook/filesystem.yaml** 1252 | 1253 | ```yaml 1254 | apiVersion: ceph.rook.io/v1beta1 1255 | kind: Filesystem 1256 | metadata: 1257 | name: k8s-fs 1258 | namespace: rook-ceph 1259 | spec: 1260 | metadataPool: 1261 | replicated: 1262 | size: 2 1263 | dataPools: 1264 | - failureDomain: osd 1265 | replicated: 1266 | size: 2 1267 | metadataServer: 1268 | activeCount: 1 1269 | activeStandby: true 1270 | placement: 1271 | resources: 1272 | ``` 1273 | 1274 | k8s-fs 이름을 가진 File System 을 생성합니다. 1275 | 1276 | ```bash 1277 | kubectl create -f rook/filesystem.yaml 1278 | 1279 | filesystem.ceph.rook.io/k8s-fs created 1280 | ``` 1281 | 1282 | 생성한 File System 을 Ceph Dashboard 화면에서 확인할 수 있습니다. 1283 | 1284 | ![ceph-dashboard-fs](./assets/ceph-dashboard-fs.png) 1285 | 1286 | 1287 | 1288 | ## Create Block Storage 1289 | 1290 | Block storage 를 사용하기 위해 **StorageClass** 를 등록합니다. StorageClass 는 Kubernetes 가 Rook 을 통해 PersistentVolume 을 생성할 때 사용합니다. 1291 | 1292 | **rook/storageclass.yaml** 1293 | 1294 | ```yaml 1295 | apiVersion: ceph.rook.io/v1beta1 1296 | kind: Pool 1297 | metadata: 1298 | name: replicapool 1299 | namespace: rook-ceph 1300 | spec: 1301 | replicated: 1302 | size: 2 1303 | --- 1304 | apiVersion: storage.k8s.io/v1 1305 | kind: StorageClass 1306 | metadata: 1307 | name: rook-ceph-block 1308 | provisioner: ceph.rook.io/block 1309 | parameters: 1310 | pool: replicapool 1311 | clusterNamespace: rook-ceph 1312 | fstype: xfs 1313 | ``` 1314 | 1315 | 위에서 지정한 Replicapool 은 두 개의 복제본을 유지합니다. 1316 | 1317 | ```bash 1318 | kubectl create -f rook/storageclass.yaml 1319 | 1320 | pool.ceph.rook.io/replicapool created 1321 | storageclass.storage.k8s.io/rook-ceph-block created 1322 | ``` 1323 | 1324 | Ceph Dashboard 에서 생성한 ReplicaPool 을 확인할 수 있습니다. 1325 | 1326 | ![ceph-replicapool](./assets/ceph-replicapool.png) 1327 | 1328 | 1329 | 1330 | # Deploy Application with Ceph 1331 | 1332 | ## Deploy Minio 1333 | 1334 | Minio 어플리케이션을 Shared File System 과 함께 배포해보겠습니다. 1335 | 1336 | 배포하기 전에 File System 안에 사용할 디렉토리를 먼저 만드는 것이 필요합니다. 여기에서는 간단하게 nginx 컨테이너 내부 /tmp/fs 디렉토리에 Share File System 을 붙인 후에 디렉토리를 생성합니다. 1337 | 1338 | ```bash 1339 | kubectl create -f rook/nginx-fs-deploy.yaml 1340 | 1341 | deployment.apps/nginx-fs created 1342 | ``` 1343 | 1344 | 실행된 Pod 이름을 확인합니다. 1345 | 1346 | ```bash 1347 | kubectl get pod 1348 | 1349 | NAME READY STATUS RESTARTS AGE 1350 | nginx-fs-5bfc8dbf5f-5ggz8 1/1 Running 0 77s 1351 | ``` 1352 | 1353 | **kubectl exec** 명령을 이용해 앞에서 실행한 Pod 에 접속합니다. 1354 | 1355 | ```bash 1356 | rook$ kubectl exec -it nginx-fs-5bfc8dbf5f-5ggz8 /bin/bash 1357 | root@nginx-fs-5bfc8dbf5f-5ggz8:/# cd /tmp/fs 1358 | root@nginx-fs-5bfc8dbf5f-5ggz8:/tmp/fs# mkdir minio 1359 | root@nginx-fs-5bfc8dbf5f-5ggz8:/tmp/fs# exit 1360 | exit 1361 | ``` 1362 | 1363 | 1364 | 1365 | **minio/minio-deploy.yaml** 1366 | 1367 | ```yaml 1368 | apiVersion: extensions/v1beta1 1369 | kind: Deployment 1370 | metadata: 1371 | name: minio 1372 | spec: 1373 | template: 1374 | metadata: 1375 | labels: 1376 | k8s-app: minio 1377 | spec: 1378 | containers: 1379 | - name: minio 1380 | volumeMounts: 1381 | - name: minio-store 1382 | mountPath: "/data" 1383 | image: minio/minio:RELEASE.2018-11-17T01-23-48Z 1384 | args: 1385 | - server 1386 | - /data 1387 | env: 1388 | - name: MINIO_ACCESS_KEY 1389 | value: "minio" 1390 | - name: MINIO_SECRET_KEY 1391 | value: "minio123" 1392 | ports: 1393 | - containerPort: 9000 1394 | volumes: 1395 | - name: minio-store 1396 | flexVolume: 1397 | driver: ceph.rook.io/rook 1398 | fsType: ceph 1399 | options: 1400 | fsName: k8s-fs 1401 | clusterNamespace: rook-ceph 1402 | path: /minio 1403 | ``` 1404 | 1405 | minio 를 클러스터에 배포합니다. 1406 | 1407 | ```bash 1408 | kubectl create -f minio/minio-deploy.yaml 1409 | 1410 | deployment.extensions/minio created 1411 | ``` 1412 | 1413 | ```bash 1414 | kubectl create -f minio/minio-svc.yaml 1415 | 1416 | service/minio-svc created 1417 | ``` 1418 | 1419 | 1420 | 1421 | 배포한 minio 저장소에 파일을 저장해보겠습니다. 1422 | 1423 | ![minio](./assets/minio.png) 1424 | 1425 | 1426 | 1427 | ## Deploy MySQL 1428 | 1429 | MySQL 어플리케이션을 Block Storage 와 함께 배포해보겠습니다. 1430 | 1431 | 먼저 앞에서 생성한 StorageClass 이름으로 PersistentVolumeClaim 을 생성합니다. 1432 | 1433 | **mysql/mysql-pvc.yaml** 1434 | 1435 | ```yaml 1436 | apiVersion: v1 1437 | kind: PersistentVolumeClaim 1438 | metadata: 1439 | name: mysql-pvc 1440 | labels: 1441 | k8s-app: mysql 1442 | spec: 1443 | storageClassName: rook-ceph-block 1444 | accessModes: 1445 | - ReadWriteOnce 1446 | resources: 1447 | requests: 1448 | storage: 5Gi 1449 | ``` 1450 | 1451 | ```bash 1452 | kubectl create -f mysql/mysql-pvc.yaml 1453 | 1454 | persistentvolumeclaim/mysql-pvc created 1455 | ``` 1456 | 1457 | MySQL 를 배포할 때 컨테이너에 앞에서 생성한 mysql-pvc 를 붙여줍니다. 1458 | 1459 | 1460 | 1461 | **mysql/mysql-deploy.yaml** 1462 | 1463 | ```yaml 1464 | apiVersion: apps/v1beta1 1465 | kind: Deployment 1466 | metadata: 1467 | name: mysql 1468 | labels: 1469 | k8s-app: mysql 1470 | spec: 1471 | strategy: 1472 | type: Recreate 1473 | template: 1474 | metadata: 1475 | labels: 1476 | k8s-app: mysql 1477 | spec: 1478 | containers: 1479 | - image: mysql:5.7 1480 | name: mysql 1481 | env: 1482 | - name: MYSQL_ROOT_PASSWORD 1483 | value: changeme 1484 | ports: 1485 | - containerPort: 3306 1486 | name: mysql 1487 | volumeMounts: 1488 | - name: mysql-persistent-storage 1489 | mountPath: /var/lib/mysql 1490 | volumes: 1491 | - name: mysql-persistent-storage 1492 | persistentVolumeClaim: 1493 | claimName: mysql-pvc 1494 | ``` 1495 | 1496 | ```bash 1497 | kubectl create -f mysql/mysql-deploy.yaml 1498 | 1499 | deployment.apps/mysql created 1500 | ``` 1501 | 1502 | ```bash 1503 | kubectl create -f mysql/mysql-svc.yaml 1504 | 1505 | service/mysql created 1506 | ``` 1507 | 1508 | 1509 | 1510 | 생성한 MySQL 서버에 접속하여 제대로 동작하고 있는지 확인해봅니다. 1511 | 1512 | ```bash 1513 | kubectl get svc 1514 | 1515 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 1516 | kubernetes ClusterIP 10.96.0.1 443/TCP 4h21m 1517 | minio-svc LoadBalancer 10.101.22.31 10.254.1.153 9000:32719/TCP 3m46s 1518 | mysql LoadBalancer 10.99.254.138 10.254.1.154 3306:31821/TCP 9s 1519 | nginx-svc NodePort 10.101.189.208 80:31000/TCP 45m 1520 | ``` 1521 | 1522 | IP 주소를 확인하고 mysql client 를 이용해 접속합니다. 1523 | 1524 | ```bash 1525 | mysql -uroot -p -h 10.254.1.154 1526 | 1527 | Enter password: 1528 | Welcome to the MySQL monitor. Commands end with ; or \g. 1529 | Your MySQL connection id is 3 1530 | Server version: 5.7.24 MySQL Community Server (GPL) 1531 | 1532 | Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. 1533 | 1534 | Oracle is a registered trademark of Oracle Corporation and/or its 1535 | affiliates. Other names may be trademarks of their respective 1536 | owners. 1537 | 1538 | Type 'help;' or '\h' for help. Type '\c' to clear the current input statement. 1539 | 1540 | mysql> show databases; 1541 | +--------------------+ 1542 | | Database | 1543 | +--------------------+ 1544 | | information_schema | 1545 | | mysql | 1546 | | performance_schema | 1547 | | sys | 1548 | +--------------------+ 1549 | 4 rows in set (0.00 sec) 1550 | ``` 1551 | 1552 | 1553 | 1554 | ## Deploy Ghost 1555 | 1556 | Ghost 어플리케이션에서 사용할 디렉토리를 Share File System 에 미리 생성해둡니다. 1557 | 1558 | ```bash 1559 | kubectl exec -it nginx-fs-5bfc8dbf5f-5ggz8 /bin/bash 1560 | 1561 | root@nginx-fs-5bfc8dbf5f-5ggz8:/# cd /tmp/fs 1562 | root@nginx-fs-5bfc8dbf5f-5ggz8:/tmp/fs# mkdir ghost 1563 | root@nginx-fs-5bfc8dbf5f-5ggz8:/tmp/fs# ls -al 1564 | total 4 1565 | drwxr-xr-x 1 root root 2 Nov 18 15:35 . 1566 | drwxrwxrwt 1 root root 4096 Nov 18 15:05 .. 1567 | drwxr-xr-x 1 root root 0 Nov 18 15:35 ghost 1568 | drwxr-xr-x 1 root root 2 Nov 18 15:15 minio 1569 | ``` 1570 | 1571 | MySQL 에 접속하여 사용할 데이터베이스를 생성합니다. 1572 | 1573 | ```bash 1574 | mysql -uroot -p -h 10.254.1.154 1575 | 1576 | Enter password: 1577 | Welcome to the MySQL monitor. Commands end with ; or \g. 1578 | Your MySQL connection id is 4 1579 | Server version: 5.7.24 MySQL Community Server (GPL) 1580 | 1581 | Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. 1582 | 1583 | Oracle is a registered trademark of Oracle Corporation and/or its 1584 | affiliates. Other names may be trademarks of their respective 1585 | owners. 1586 | 1587 | Type 'help;' or '\h' for help. Type '\c' to clear the current input statement. 1588 | 1589 | mysql> create database ghost; 1590 | Query OK, 1 row affected (0.02 sec) 1591 | ``` 1592 | 1593 | 1594 | 1595 | PersistentVolume 과 Database 생성을 완료한 후에 Ghost 어플리케이션을 배포합니다. 1596 | 1597 | **ghost/ghost-deploy.yaml** 1598 | 1599 | ```yaml 1600 | apiVersion: apps/v1beta1 1601 | kind: Deployment 1602 | metadata: 1603 | name: ghost 1604 | spec: 1605 | template: 1606 | metadata: 1607 | labels: 1608 | k8s-app: ghost 1609 | spec: 1610 | containers: 1611 | - name: ghost 1612 | volumeMounts: 1613 | - name: ghost-volume 1614 | mountPath: "/var/lib/ghost/content" 1615 | image: ghost:2 1616 | env: 1617 | - name: database__client 1618 | value: "mysql" 1619 | - name: database__connection__host 1620 | value: "mysql" 1621 | - name: database__connection__user 1622 | value: "root" 1623 | - name: database__connection__database 1624 | value: "ghost" 1625 | - name: database__connection__password 1626 | value: "changeme" 1627 | ports: 1628 | - containerPort: 2368 1629 | volumes: 1630 | - name: ghost-volume 1631 | flexVolume: 1632 | driver: ceph.rook.io/rook 1633 | fsType: ceph 1634 | options: 1635 | fsName: k8s-fs 1636 | clusterNamespace: rook-ceph 1637 | path: /ghost 1638 | ``` 1639 | 1640 | Ghost 어플리케이션을 배포합니다. 1641 | 1642 | ```bash 1643 | kubectl create -f ghost/ghost-deploy.yaml 1644 | 1645 | deployment.apps/ghost created 1646 | ``` 1647 | 1648 | 외부에서 접속할 수 있도록 Service 를 생성합니다. 1649 | 1650 | ```bash 1651 | kubectl create -f ghost/ghost-svc.yaml 1652 | 1653 | service/ghost-svc created 1654 | ``` 1655 | 1656 | Ghost 어플리케이션이 배포된 것을 확인합니다. 1657 | 1658 | ```bash 1659 | kubectl get deploy ghost 1660 | 1661 | NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE 1662 | ghost 1 1 1 1 6m9s 1663 | ``` 1664 | 1665 | 1666 | 1667 | 배포가 완료된 후 LoadBalancer IP 로 접속하여 확인합니다. 1668 | 1669 | ![ghost](./assets/ghost.png) 1670 | 1671 | MySQL 에 테이블이 제대로 생성되었는지 확인해봅니다. 1672 | 1673 | ```bash 1674 | mysql -uroot -p -h 10.254.1.154 1675 | 1676 | Enter password: 1677 | Welcome to the MySQL monitor. Commands end with ; or \g. 1678 | Your MySQL connection id is 350 1679 | Server version: 5.7.24 MySQL Community Server (GPL) 1680 | 1681 | Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. 1682 | 1683 | Oracle is a registered trademark of Oracle Corporation and/or its 1684 | affiliates. Other names may be trademarks of their respective 1685 | owners. 1686 | 1687 | Type 'help;' or '\h' for help. Type '\c' to clear the current input statement. 1688 | 1689 | mysql> use ghost; 1690 | Reading table information for completion of table and column names 1691 | You can turn off this feature to get a quicker startup with -A 1692 | 1693 | Database changed 1694 | mysql> show tables; 1695 | +------------------------+ 1696 | | Tables_in_ghost | 1697 | +------------------------+ 1698 | | accesstokens | 1699 | | api_keys | 1700 | | app_fields | 1701 | | app_settings | 1702 | | apps | 1703 | | brute | 1704 | | client_trusted_domains | 1705 | | clients | 1706 | | integrations | 1707 | | invites | 1708 | | migrations | 1709 | | migrations_lock | 1710 | | mobiledoc_revisions | 1711 | | permissions | 1712 | | permissions_apps | 1713 | | permissions_roles | 1714 | | permissions_users | 1715 | | posts | 1716 | | posts_authors | 1717 | | posts_tags | 1718 | | refreshtokens | 1719 | | roles | 1720 | | roles_users | 1721 | | sessions | 1722 | | settings | 1723 | | subscribers | 1724 | | tags | 1725 | | users | 1726 | | webhooks | 1727 | +------------------------+ 1728 | 29 rows in set (0.00 sec) 1729 | ``` 1730 | 1731 | 1732 | 1733 | # Summary 1734 | 1735 | IDC 혹은 로컬 네트워크에서 **Kubernetes** 클러스터를 구축하고 사용할 경우에는 **Rook**, **Ceph**, **Metallb** 등을 활용하여 **Cloud Native** 한 환경을 구축해볼 수 있습니다. **Rook** 과 **Ceph** 를 이용해 특정 노드에 종속적인 **Persistent Volume** 을 생성하고 사용해야하는 제약을 극복할 수 있습니다. 마지막으로 **Metallb** 를 이용해 외부에서 Kubernetes 클러스터에 배포되어 있는 서비스를 연결하는데 도움을 받을 수 있습니다. 1736 | 1737 | 실습 내용에 대하여 부족하거나 보완해야할 점이 있다면 있다면 메일로 보내주세요. 감사합니다. 1738 | 1739 | 임찬식 (chanshik@gmail.com) 1740 | 1741 | --------------------------------------------------------------------------------