├── .gitignore ├── 2.study ├── demo1 │ ├── password.txt │ ├── local-volume.yaml │ ├── wordpress-autoscaling.yaml │ ├── wordpress.yaml │ ├── mysql.yaml │ └── README.txt ├── 20.namespace │ └── 1.namespace.yaml ├── 21.role │ ├── 3.serviceaccount-user.yaml │ ├── 1.serviceaccount-admin.yaml │ ├── 6.team-namespace.yaml │ ├── 7.team-serviceaccount.yaml │ ├── 2.admin-rbac.yaml │ ├── kubeconfig.conf │ ├── 4.user-rbac.yaml │ ├── 5.user-rbac2.yaml │ ├── 8.team-role.yaml │ └── README.txt ├── demo2 │ ├── mongo-express-configmap.yaml │ ├── mongo-service.yaml │ ├── mongo-secret.yaml │ ├── mongo-express-service.yaml │ ├── mongo.yaml │ ├── mongo-express.yaml │ └── README.txt ├── 22.network_policy │ ├── README │ ├── 2.pod_tomcat.yaml │ ├── 1.pod_mysql.yaml │ ├── 3.networkPolicy.yaml │ └── 4.networkPolicy2.yaml ├── 12.secret │ ├── 1.user-pass-basic.yaml │ ├── 2.user-pass-opaque.yaml │ ├── 3.user-pass-ssh.yaml │ ├── 4.load-secret1.yaml │ ├── 5.load-secret2.yaml │ └── README.txt ├── 06.statefulset │ ├── README.txt │ └── 1.mysql_sts.yaml ├── 01.pod │ ├── 1.nginx_pod.yaml │ ├── 4.mysql_svc.yaml │ ├── 2.tomcat_pod.yaml │ ├── 5.nginx_svc.yaml │ ├── 3.mysql_pod.yaml │ ├── 6.nginx_tomcat.yaml │ └── README.txt ├── 10.volume │ ├── README.txt │ ├── 2.pvc.yaml │ ├── 3.deployment.yaml │ └── 1.pv.yaml ├── demo3 │ ├── myapp-service.yaml │ ├── myapp-red.yaml │ ├── myapp-green.yaml │ ├── myapp-orange.yaml │ └── README.txt ├── 03.svc │ ├── 3.ext_svc.yaml │ ├── 1.mysql_svc.yaml │ ├── 2.tomcat_svc.yaml │ └── README.txt ├── 11.configmap │ ├── 1.configmap.yaml │ ├── README.txt │ ├── 3.deploy_envFrom.yaml │ ├── 2.deploy_env.yaml │ └── 4.deploy_volume.yaml ├── 08.job │ ├── 1.job.yaml │ ├── 2.job_fail.yaml │ ├── 3.cronjob.yaml │ └── README.txt ├── 04.deployment │ ├── README.txt │ ├── 2.expressapp.yaml │ └── 1.nginx_deploy.yaml ├── 02.replicaset │ ├── 2.replica.yaml │ ├── 1.pods.yaml │ └── README.txt ├── 05.ingress │ ├── README.txt │ ├── 2.ingress-dashboard.yaml │ └── 1.ingress.yaml ├── 07.daemonset │ ├── README.txt │ ├── 1.fluentd_ds.yaml │ └── 2.fluentd_ds_update.yaml └── 30.helm │ └── README.md ├── 10.devel ├── neo4j │ ├── neo4j-sa.yaml │ ├── neo4j-role.yaml │ ├── README.txt │ └── get_all.py └── k8s │ ├── node │ ├── client.js │ ├── package.json │ └── package-lock.json │ ├── python │ ├── client.py │ ├── client2.py │ └── client3.py │ └── README.txt ├── 3.real-egs ├── metallb │ ├── namespace.yaml │ ├── metallb_myconf.yaml │ └── metallb.yaml ├── prometheus │ ├── volumeF.yaml │ └── README ├── metrics │ ├── README │ └── components.yaml └── networking │ ├── README │ └── kube-flannel.yml ├── 1.setup └── setup │ └── README.txt └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | -------------------------------------------------------------------------------- /2.study/demo1/password.txt: -------------------------------------------------------------------------------- 1 | my-password -------------------------------------------------------------------------------- /2.study/20.namespace/1.namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: my-space 5 | -------------------------------------------------------------------------------- /10.devel/neo4j/neo4j-sa.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: neo4j-sa 5 | namespace: default 6 | 7 | -------------------------------------------------------------------------------- /3.real-egs/metallb/namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: metallb-system 5 | labels: 6 | app: metallb 7 | -------------------------------------------------------------------------------- /2.study/21.role/3.serviceaccount-user.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: readonly-user 5 | namespace: default 6 | -------------------------------------------------------------------------------- /2.study/21.role/1.serviceaccount-admin.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: lovehyun-admin 5 | namespace: kube-system 6 | -------------------------------------------------------------------------------- /2.study/demo2/mongo-express-configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: mongodb-configmap 5 | data: 6 | database_url: mongodb-svc 7 | -------------------------------------------------------------------------------- /2.study/22.network_policy/README: -------------------------------------------------------------------------------- 1 | # 각각 pod 실행하고... mysql 에서 tomcat 접속 시도 2 | kubectl get pods -o wide 3 | 4 | kubectl exec -it mysql-pod -- /bin/bash 5 | curl 10.244.2.5:8080 6 | 7 | -------------------------------------------------------------------------------- /2.study/21.role/6.team-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: team-a 5 | --- 6 | apiVersion: v1 7 | kind: Namespace 8 | metadata: 9 | name: team-b 10 | -------------------------------------------------------------------------------- /2.study/12.secret/1.user-pass-basic.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: my-secret-basic 5 | type: kubernetes.io/basic-auth 6 | stringData: 7 | username: user 8 | password: pass 9 | -------------------------------------------------------------------------------- /2.study/06.statefulset/README.txt: -------------------------------------------------------------------------------- 1 | # 참고 : https://kubernetes.io/ko/docs/concepts/workloads/controllers/statefulset/ 2 | 3 | # MySQL 스테이트풀셋 생성 4 | kubectl apply -f mysql_sts.yaml 5 | 6 | 중단 및 재시동 시 pod의 이름이 동일하게 생성 됨 7 | 8 | -------------------------------------------------------------------------------- /2.study/01.pod/1.nginx_pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: my-nginx-pod 5 | 6 | spec: 7 | containers: 8 | - name: my-nginx-cont 9 | image: nginx 10 | ports: 11 | - containerPort: 80 12 | -------------------------------------------------------------------------------- /2.study/demo2/mongo-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: mongodb-svc 5 | spec: 6 | selector: 7 | app: mongodb 8 | ports: 9 | - protocol: TCP 10 | port: 27017 11 | targetPort: 27017 12 | -------------------------------------------------------------------------------- /2.study/12.secret/2.user-pass-opaque.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: my-secret-opaque 5 | type: Opaque # 기본값: (key,value) 형식으로 임의의 데이터 설정 6 | data: 7 | username: dXNlcg== # base64 8 | password: cGFzcw== 9 | -------------------------------------------------------------------------------- /2.study/10.volume/README.txt: -------------------------------------------------------------------------------- 1 | # 참고 : https://kubernetes.io/ko/docs/concepts/storage/volumes/ 2 | 3 | # 볼륨 생성 (1Gi 스토리지 1개) 4 | kubectl apply -f 1.pv.yaml 5 | 6 | # 볼륨 요청 (2Gi 요청시 실패 (Pending), 100Mi 요청시에도 1Gi 할당) 7 | kubectl apply -f 2.pvc.yaml 8 | 9 | -------------------------------------------------------------------------------- /2.study/12.secret/3.user-pass-ssh.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: my-secret-ssh 5 | type: kubernetes.io/ssh-auth 6 | data: 7 | # 본 예시를 위해 축약된 데이터임 8 | ssh-privatekey: | 9 | MIIEpQIBAAKCAQEAulqb/Y ... 10 | -------------------------------------------------------------------------------- /3.real-egs/prometheus/volumeF.yaml: -------------------------------------------------------------------------------- 1 | alertmanager: 2 | persistentVolume: 3 | enabled: false 4 | server: 5 | persistentVolume: 6 | enabled: false 7 | pushgateway: 8 | persistentVolume: 9 | enabled: false 10 | 11 | -------------------------------------------------------------------------------- /2.study/demo3/myapp-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: myapp-svc 5 | spec: 6 | type: NodePort 7 | ports: 8 | - port: 80 9 | targetPort: 5000 10 | nodePort: 30100 11 | selector: 12 | app: my-apps 13 | -------------------------------------------------------------------------------- /2.study/21.role/7.team-serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: sa-team-a 5 | namespace: team-a 6 | --- 7 | apiVersion: v1 8 | kind: ServiceAccount 9 | metadata: 10 | name: sa-team-b 11 | namespace: team-b 12 | -------------------------------------------------------------------------------- /2.study/03.svc/3.ext_svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: my-googledns-svc # 내부 도메인 명 (my-googledns-svc or my-googledns-svc..svc.cluster.local) 5 | spec: 6 | type: ExternalName 7 | externalName: dns.google # 외부 도메인 (8.8.8.8) 8 | -------------------------------------------------------------------------------- /2.study/demo2/mongo-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: mongodb-secret 5 | type: Opaque 6 | data: 7 | root-username: cm9vdA== # base64 encoding (echo -n 'root' | base64) 8 | root-password: cGFzcw== # base64 encoding (echo -n 'pass' | base64) 9 | -------------------------------------------------------------------------------- /10.devel/k8s/node/client.js: -------------------------------------------------------------------------------- 1 | const k8s = require('@kubernetes/client-node'); 2 | 3 | const kc = new k8s.KubeConfig(); 4 | kc.loadFromDefault(); 5 | 6 | const k8sApi = kc.makeApiClient(k8s.CoreV1Api); 7 | 8 | k8sApi.listNamespacedPod('default').then((res) => { 9 | console.log(res.body); 10 | }); 11 | -------------------------------------------------------------------------------- /2.study/22.network_policy/2.pod_tomcat.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: tomcat-pod 5 | labels: 6 | app: tomcat 7 | network: my-policy 8 | spec: 9 | containers: 10 | - name: tomcat-container 11 | image: tomcat:8 12 | ports: 13 | - containerPort: 8080 14 | 15 | -------------------------------------------------------------------------------- /2.study/01.pod/4.mysql_svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: mysql-svc 5 | labels: 6 | app: mysql 7 | spec: 8 | type: ClusterIP 9 | ports: 10 | - name: my-clusterip-mysql 11 | port: 3306 12 | targetPort: 3306 13 | selector: 14 | app: mysql # 앞서 만든 pod 의 (key,value) pair 15 | -------------------------------------------------------------------------------- /2.study/11.configmap/1.configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: my-configmap 5 | data: # data 하위에 전달할 (Key,Value) 를 명시 6 | MY_VERSION: "MY_VERSION_1.0.0" # Key: MY_VERSION, Value: MY_VERSION_1.0.0 7 | MY_CONFIG: "MY_CONFIG_123" # Key: MY_CONFIG, Value: MY_CONFIG_123 8 | -------------------------------------------------------------------------------- /2.study/demo1/local-volume.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: local-volume 5 | labels: 6 | type: local 7 | spec: 8 | capacity: 9 | storage: 1Gi 10 | accessModes: 11 | - ReadWriteOnce 12 | hostPath: 13 | path: /tmp/local-volume 14 | persistentVolumeReclaimPolicy: Recycle 15 | -------------------------------------------------------------------------------- /2.study/21.role/2.admin-rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: lovehyun-admin 5 | subjects: 6 | - kind: ServiceAccount 7 | name: lovehyun-admin 8 | namespace: kube-system 9 | roleRef: 10 | apiGroup: rbac.authorization.k8s.io 11 | kind: ClusterRole 12 | name: cluster-admin 13 | -------------------------------------------------------------------------------- /2.study/22.network_policy/1.pod_mysql.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: mysql-pod 5 | labels: 6 | app: mysql 7 | spec: 8 | containers: 9 | - name: mysql-container 10 | image: mysql:5.6 11 | env: 12 | - name: MYSQL_ROOT_PASSWORD 13 | value: (******) 14 | ports: 15 | - containerPort: 3306 16 | -------------------------------------------------------------------------------- /2.study/22.network_policy/3.networkPolicy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: test-network-policy 5 | spec: 6 | podSelector: 7 | matchLabels: 8 | network: my-policy 9 | policyTypes: 10 | - Ingress 11 | ingress: 12 | - from: 13 | - ipBlock: 14 | cidr: 10.244.0.0/24 15 | 16 | -------------------------------------------------------------------------------- /2.study/08.job/1.job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: my-job 5 | spec: 6 | template: 7 | spec: 8 | containers: 9 | - name: busybox 10 | image: busybox 11 | command: ["sh", "-c", "sleep 5; exit 0"] 12 | restartPolicy: Never 13 | completions: 10 # 총 실행 횟수 14 | parallelism: 2 # 동시 실행 횟수 15 | -------------------------------------------------------------------------------- /2.study/12.secret/4.load-secret1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: secret-test-pod1 5 | spec: 6 | containers: 7 | - name: test-container 8 | image: k8s.gcr.io/busybox 9 | command: [ "/bin/sh", "-c", "env" ] 10 | envFrom: 11 | - secretRef: 12 | name: my-secret-opaque 13 | restartPolicy: Never 14 | -------------------------------------------------------------------------------- /2.study/demo1/wordpress-autoscaling.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: autoscaling/v1 2 | kind: HorizontalPodAutoscaler 3 | metadata: 4 | name: wordpress-hpa 5 | namespace: default 6 | spec: 7 | maxReplicas: 10 8 | minReplicas: 1 9 | scaleTargetRef: 10 | apiVersion: apps/v1 11 | kind: Deployment 12 | name: wordpress 13 | targetCPUUtilizationPercentage: 80 14 | -------------------------------------------------------------------------------- /2.study/demo2/mongo-express-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: mongo-express-svc 5 | spec: 6 | selector: 7 | app: mongo-express 8 | type: LoadBalancer # Minikube 속에서는 결국 NodePort 와 동일함 9 | ports: 10 | - protocol: TCP 11 | port: 8081 12 | targetPort: 8081 13 | nodePort: 30000 # NodePort : 30000 ~ 32767 14 | -------------------------------------------------------------------------------- /10.devel/k8s/node/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "node", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "client.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "keywords": [], 10 | "author": "", 11 | "license": "ISC", 12 | "dependencies": { 13 | "@kubernetes/client-node": "^0.15.0" 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /10.devel/k8s/python/client.py: -------------------------------------------------------------------------------- 1 | # pip install kubernetes 2 | from kubernetes import client, config 3 | 4 | config.load_kube_config() 5 | 6 | v1 = client.CoreV1Api() 7 | 8 | print("Listing pods with their IPs:") 9 | ret = v1.list_pod_for_all_namespaces(watch=False) 10 | for i in ret.items: 11 | print("%s\t%s\t%s" % (i.status.pod_ip, i.metadata.namespace, i.metadata.name)) 12 | -------------------------------------------------------------------------------- /2.study/03.svc/1.mysql_svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: mysql-svc 5 | labels: 6 | name: mysql-svc 7 | spec: 8 | type: ClusterIP 9 | ports: 10 | - name: "mysql-service-port" 11 | port: 4406 # Service 자신의 포트 12 | targetPort: 3306 # pod 내 컨테이너 포트 13 | selector: # 뒷단의 pod 와 연계 14 | app: mysql 15 | -------------------------------------------------------------------------------- /2.study/22.network_policy/4.networkPolicy2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: network-policy2 5 | spec: 6 | podSelector: 7 | matchLabels: 8 | network: my-policy 9 | policyTypes: 10 | - Ingress 11 | ingress: 12 | - from: 13 | - podSelector: 14 | matchLabels: 15 | app: mysql-allow 16 | 17 | -------------------------------------------------------------------------------- /2.study/01.pod/2.tomcat_pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: my-tomcat-pod 5 | labels: 6 | app: tomcat8 # 원하는 형태의 (key,value) pair, 추후 svc 에서 참조 7 | spec: 8 | containers: 9 | - name: my-tomcat-cont 10 | image: tomcat:jdk8-openjdk 11 | ports: 12 | - containerPort: 8080 13 | # nodeSelector: 14 | # kubernetes.io/hostname: k8s-worker1 15 | -------------------------------------------------------------------------------- /2.study/03.svc/2.tomcat_svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: tomcat-svc 5 | labels: 6 | name: tomcat8 7 | spec: 8 | type: NodePort 9 | ports: 10 | - name: my-nodeport-tomcat 11 | port: 8888 # 서비스 자신의 포트 12 | targetPort: 8080 # pod 내 컨테이너 포트 13 | nodePort: 30001 # 외부(노드) 포트 - 생략시 자동 할당 14 | selector: 15 | app: tomcat8 16 | -------------------------------------------------------------------------------- /2.study/08.job/2.job_fail.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: my-job 5 | spec: 6 | backoffLimit: 3 # 실패 시 재시작 횟수 제한 7 | template: 8 | spec: 9 | containers: 10 | - name: busybox 11 | image: busybox 12 | command: ["sh", "-c", "sleep 5; exit 1"] 13 | restartPolicy: Never 14 | completions: 10 # 총 실행 횟수 15 | parallelism: 2 # 동시 실행 횟수 16 | -------------------------------------------------------------------------------- /2.study/04.deployment/README.txt: -------------------------------------------------------------------------------- 1 | # 참고 : https://kubernetes.io/ko/docs/concepts/workloads/controllers/deployment/ 2 | 3 | # nginx 의 deployment 4 | kubectl apply -f 1.nginx_deploy.yaml 5 | 6 | nginx 배포 및 서비스 연결 7 | 8 | curl 192.168.49.2:xxxxx 9 | 10 | 11 | # express-app 의 deployment 12 | kubectl apply -f 2.expressapp.yaml 13 | 14 | lovehyun/express-app 컨테이너의 배포 및 서비스 연결 15 | 16 | curl 192.168.49.2:xxxxx 17 | 18 | -------------------------------------------------------------------------------- /3.real-egs/metrics/README: -------------------------------------------------------------------------------- 1 | # Metrics 서버 2 | https://github.com/kubernetes-sigs/metrics-server 3 | wget https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.3.7/components.yaml 4 | 5 | # 위 components.yaml 파일 내에 인증서 관련 부분 수정 6 | args 필드 아래 7 | - certs-dir=/tmp 8 | - secure-port=4443 9 | 10 | - --kubelet-insecure-tls 11 | - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname 12 | 13 | -------------------------------------------------------------------------------- /3.real-egs/metallb/metallb_myconf.yaml: -------------------------------------------------------------------------------- 1 | # kubectl apply -f https://raw.githubusercontent.com/google/metallb/v0.8.3/manifests/metallb.yaml 2 | 3 | apiVersion: v1 4 | kind: ConfigMap 5 | metadata: 6 | namespace: metallb-system 7 | name: config 8 | data: 9 | config: | 10 | address-pools: 11 | - name: my-ip-space 12 | protocol: layer2 13 | addresses: 14 | - 192.168.56.90-192.168.56.99 15 | 16 | -------------------------------------------------------------------------------- /2.study/02.replicaset/2.replica.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: ReplicaSet 3 | metadata: 4 | name: tomcat 5 | 6 | spec: 7 | replicas: 3 8 | selector: 9 | matchLabels: 10 | tier: tomcat 11 | 12 | template: 13 | metadata: 14 | labels: 15 | tier: tomcat 16 | spec: 17 | containers: 18 | - name: tomcat8 19 | image: tomcat 20 | ports: 21 | - containerPort: 8080 22 | -------------------------------------------------------------------------------- /2.study/05.ingress/README.txt: -------------------------------------------------------------------------------- 1 | # 참고 : https://kubernetes.io/ko/docs/concepts/services-networking/ingress/ 2 | 3 | # Minikube 에서 ingress 활성화 (기본은 disable 되어 있음) 4 | minikube addons enable ingress 5 | 6 | 7 | # kubernetes-dashboard 를 기반으로 ingress 예제 만들기 8 | kubectl apply -f ingress-eg1.yaml 9 | 10 | 호스트 내에 해당 도메인이 없음으로, 추가 설정 11 | kubectl get ingress -n kubernetes-dashboard 12 | 13 | sudo vim /etc/hosts 14 | 15 | 192.168.xx.x my-dashboard.com 16 | 17 | -------------------------------------------------------------------------------- /2.study/05.ingress/2.ingress-dashboard.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: dashboard-ingress 5 | namespace: kubernetes-dashboard 6 | spec: 7 | rules: 8 | - host: my-dashboard.com 9 | http: 10 | paths: 11 | - path: "/" 12 | pathType: Prefix 13 | backend: 14 | service: 15 | name: kubernetes-dashboard 16 | port: 17 | number: 80 18 | -------------------------------------------------------------------------------- /2.study/05.ingress/1.ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: myapp-ingress 5 | annotations: 6 | nginx.ingress.kubernetes.io/rewrite-target: / 7 | spec: 8 | rules: 9 | - host: myapp.com 10 | http: 11 | paths: 12 | - path: / 13 | pathType: Prefix 14 | backend: 15 | service: 16 | name: myapp-internal-service 17 | port: 18 | number: 8080 19 | -------------------------------------------------------------------------------- /2.study/02.replicaset/1.pods.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: pod1 5 | labels: 6 | tier: tomcat 7 | spec: 8 | containers: 9 | - name: tomcat1 10 | image: tomcat 11 | ports: 12 | - containerPort: 8080 13 | --- 14 | apiVersion: v1 15 | kind: Pod 16 | metadata: 17 | name: pod2 18 | labels: 19 | tier: tomcat 20 | spec: 21 | containers: 22 | - name: tomcat2 23 | image: tomcat 24 | ports: 25 | - containerPort: 8080 26 | -------------------------------------------------------------------------------- /10.devel/k8s/python/client2.py: -------------------------------------------------------------------------------- 1 | # kubectl proxy --port=8888 2 | from kubernetes import client 3 | 4 | configuration = client.Configuration() 5 | configuration.host = "127.0.0.1:8888" 6 | api_client = client.ApiClient(configuration) 7 | 8 | v1 = client.CoreV1Api(api_client) 9 | 10 | print("Listing pods with their IPs:") 11 | ret = v1.list_pod_for_all_namespaces(watch=False) 12 | for i in ret.items: 13 | print("%s\t%s\t%s" % (i.status.pod_ip, i.metadata.namespace, i.metadata.name)) 14 | -------------------------------------------------------------------------------- /10.devel/k8s/python/client3.py: -------------------------------------------------------------------------------- 1 | from kubernetes import client, config, watch 2 | 3 | # Configs can be set in Configuration class directly or using helper utility 4 | config.load_kube_config() 5 | 6 | v1 = client.CoreV1Api() 7 | 8 | count = 10 9 | w = watch.Watch() 10 | for event in w.stream(v1.list_namespace, _request_timeout=60): 11 | print("Event: %s %s" % (event['type'], event['object'].metadata.name)) 12 | count -= 1 13 | if not count: 14 | w.stop() 15 | 16 | print("Ended.") 17 | -------------------------------------------------------------------------------- /2.study/08.job/3.cronjob.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: CronJob 3 | metadata: 4 | name: my-cronjob 5 | spec: 6 | schedule: "*/1 * * * *" # every minute 7 | jobTemplate: 8 | spec: 9 | template: 10 | spec: 11 | containers: 12 | - name: hello 13 | image: busybox 14 | args: 15 | - /bin/sh 16 | - -c 17 | - echo `date +"[%Y-%m-%d %H:%M:%S]"` Hello from CronJob 18 | restartPolicy: OnFailure 19 | -------------------------------------------------------------------------------- /2.study/11.configmap/README.txt: -------------------------------------------------------------------------------- 1 | # 참고 : https://kubernetes.io/ko/docs/concepts/configuration/configmap/ 2 | 3 | # configmap 설정 후, 변수로 가져오는 예시와 4 | deployment.yaml 5 | 6 | 7 | # configmap 설정 후, 파일로 생성하는 예시 8 | deployment2.yaml 9 | 10 | 11 | # 템플릿 12 | apiVersion: v1 13 | kind: ConfigMap 14 | metadata: 15 | name: db-config 16 | namespace: default 17 | data: 18 | DB_URL: localhost 19 | DB_USER: myuser 20 | DB_PASS: mypass 21 | DEBUG_INFO: debug 22 | 23 | kubectl describe configmap db-config 24 | -------------------------------------------------------------------------------- /2.study/08.job/README.txt: -------------------------------------------------------------------------------- 1 | # 참고 : https://kubernetes.io/ko/docs/concepts/workloads/controllers/job/ 2 | # https://kubernetes.io/ko/docs/concepts/workloads/controllers/cron-jobs/ 3 | 4 | # 동시 잡 수행 5 | kubectl apply -f job.yaml 6 | 7 | kubectl get jobs 8 | 9 | kubectl describe job my-job 10 | 11 | 12 | 13 | # 반복 잡 수행 14 | kubectl apply -f cronjob.yaml 15 | 16 | kubectl get cronjobs 17 | 18 | kubectl describe cronjob my-cronjob 19 | 20 | kubectl get pods 21 | 22 | kubectl logs my-cronjob-xxxxx 23 | 24 | -------------------------------------------------------------------------------- /2.study/demo3/myapp-red.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: myapp-red 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: 9 | app: my-apps 10 | template: 11 | metadata: 12 | labels: 13 | app: my-apps 14 | spec: 15 | containers: 16 | - name: app-orange 17 | image: lovehyun/flask-app:1.2 18 | ports: 19 | - containerPort: 5000 20 | env: 21 | - name: APP_COLOR 22 | value: red 23 | -------------------------------------------------------------------------------- /2.study/demo3/myapp-green.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: myapp-green 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: 9 | app: my-apps 10 | template: 11 | metadata: 12 | labels: 13 | app: my-apps 14 | spec: 15 | containers: 16 | - name: app-green 17 | image: lovehyun/flask-app:1.2 18 | ports: 19 | - containerPort: 5000 20 | env: 21 | - name: APP_COLOR 22 | value: green 23 | -------------------------------------------------------------------------------- /2.study/demo3/myapp-orange.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: myapp-orange 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: 9 | app: my-apps 10 | template: 11 | metadata: 12 | labels: 13 | app: my-apps 14 | spec: 15 | containers: 16 | - name: app-orange 17 | image: lovehyun/flask-app:1.2 18 | ports: 19 | - containerPort: 5000 20 | env: 21 | - name: APP_COLOR 22 | value: orange 23 | -------------------------------------------------------------------------------- /2.study/12.secret/5.load-secret2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: secret-test-pod2 5 | spec: 6 | containers: 7 | - name: mycontainer 8 | image: redis 9 | env: 10 | - name: SECRET_USERNAME 11 | valueFrom: 12 | secretKeyRef: 13 | name: my-secret-opaque 14 | key: username 15 | - name: SECRET_PASSWORD 16 | valueFrom: 17 | secretKeyRef: 18 | name: my-secret-opaque 19 | key: password 20 | restartPolicy: Never 21 | -------------------------------------------------------------------------------- /2.study/01.pod/5.nginx_svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: my-nginx-pod 5 | labels: 6 | app: nginx 7 | 8 | spec: 9 | containers: 10 | - name: my-nginx-cont 11 | image: nginx 12 | ports: 13 | - containerPort: 80 14 | --- 15 | apiVersion: v1 16 | kind: Service 17 | metadata: 18 | name: my-nginx-svc 19 | labels: 20 | app: nginx 21 | 22 | spec: 23 | type: NodePort 24 | ports: 25 | - name: my-nginx-nodeport 26 | port: 8000 27 | targetPort: 80 28 | selector: 29 | app: nginx # pod 의 label 명을 통해서 select 함 30 | -------------------------------------------------------------------------------- /3.real-egs/networking/README: -------------------------------------------------------------------------------- 1 | # Flannel 설치 2 | kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml 3 | 4 | # Flannel 삭제 5 | kubectl delete -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml 6 | 7 | 8 | # 삭제 후 적용되려면 모든 노드의 docker 를 재시작 9 | service docker restart 10 | 11 | 12 | # 다른 네트워킹 설치 13 | # Canal = Flannel + Calico 14 | 15 | curl https://docs.projectcalico.org/manifests/canal.yaml -O 16 | 17 | kubectl apply -f canal.yaml 18 | 19 | 20 | # 설치 확인 21 | kubectl get all -n kube-system 22 | 23 | -------------------------------------------------------------------------------- /2.study/11.configmap/3.deploy_envFrom.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: my-deployment 5 | labels: 6 | app: my-tomcat 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: my-pod 12 | template: 13 | metadata: 14 | labels: 15 | app: my-pod 16 | spec: 17 | containers: 18 | - name: my-tomcat 19 | image: tomcat 20 | ports: 21 | - containerPort: 8080 22 | envFrom: # envFrom: configmap 전체 내용을 로딩함 23 | - configMapRef: 24 | name: my-configmap 25 | -------------------------------------------------------------------------------- /2.study/21.role/kubeconfig.conf: -------------------------------------------------------------------------------- 1 | # 서비스 계정의 토큰 인증방식으로 로그인 할 파일 2 | apiVersion: v1 3 | clusters: 4 | - cluster: 5 | insecure-skip-tls-verify: true 6 | certificate-authority-data: 7 | server: # K8s 클러스터 접속 API 주소 - https://192.168.49.2:8443 8 | name: minikube # 또는 local_vm_k8s-master 9 | 10 | contexts: 11 | - context: 12 | cluster: minikube # local_vm_k8s-master 13 | user: lovehyun-admin # 내가 만든 계정 14 | name: lovehyun-admin@minikube 15 | 16 | current-context: lovehyun-admin@minikube 17 | kind: Config 18 | preferences: {} 19 | users: 20 | - name: lovehyun-admin 21 | user: 22 | token: # 여기에 실제 토큰 입력할것 23 | -------------------------------------------------------------------------------- /2.study/21.role/4.user-rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: readonly-role 5 | namespace: default 6 | rules: 7 | - apiGroups: [""] # "" = Default (Core API Group) 8 | resources: ["pods"] 9 | verbs: ["get", "list"] 10 | --- 11 | apiVersion: rbac.authorization.k8s.io/v1 12 | kind: RoleBinding 13 | metadata: 14 | name: readonly-user-readonly-role-binding 15 | namespace: default 16 | subjects: 17 | - kind: ServiceAccount 18 | name: readonly-user 19 | apiGroup: "" # Core API Group 20 | roleRef: 21 | kind: Role 22 | name: readonly-role 23 | apiGroup: rbac.authorization.k8s.io 24 | -------------------------------------------------------------------------------- /10.devel/neo4j/neo4j-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: neo4j-sa-role 5 | 6 | rules: 7 | - apiGroups: ["", "extensions", "apps"] 8 | resources: ["services", "pods", "replicasets", "deployments", "daemonsets"] 9 | verbs: ["get", "list"] 10 | 11 | --- 12 | apiVersion: rbac.authorization.k8s.io/v1 13 | kind: ClusterRoleBinding 14 | metadata: 15 | name: neo4j-sa-rolebinding 16 | subjects: 17 | - kind: ServiceAccount 18 | name: neo4j-sa 19 | namespace: default 20 | roleRef: 21 | kind: ClusterRole 22 | name: neo4j-sa-role 23 | apiGroup: rbac.authorization.k8s.io 24 | 25 | -------------------------------------------------------------------------------- /2.study/04.deployment/2.expressapp.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: express-app 5 | spec: 6 | selector: 7 | matchLabels: 8 | app: myapp 9 | replicas: 3 10 | template: 11 | metadata: 12 | labels: 13 | app: myapp 14 | spec: 15 | containers: 16 | - name: express-app 17 | image: lovehyun/express-app:latest 18 | ports: 19 | - containerPort: 8000 20 | --- 21 | apiVersion: v1 22 | kind: Service 23 | metadata: 24 | name: express-svc 25 | spec: 26 | type: LoadBalancer 27 | selector: 28 | app: myapp 29 | ports: 30 | - port: 8000 31 | name: http 32 | -------------------------------------------------------------------------------- /2.study/04.deployment/1.nginx_deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-deploy 5 | spec: 6 | selector: 7 | matchLabels: 8 | app: nginx # 아래 생성될 파드의 레이블 9 | replicas: 2 10 | template: 11 | metadata: 12 | labels: 13 | app: nginx 14 | spec: 15 | containers: 16 | - name: nginx 17 | image: nginx:latest 18 | ports: 19 | - containerPort: 80 20 | --- 21 | apiVersion: v1 22 | kind: Service 23 | metadata: 24 | name: nginx-svc 25 | spec: 26 | type: LoadBalancer 27 | selector: 28 | app: nginx # 위 생성된 파드의 레이블 29 | ports: 30 | - port: 80 31 | name: http 32 | -------------------------------------------------------------------------------- /2.study/01.pod/3.mysql_pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: my-mysql-pod 5 | labels: 6 | app: mysql 7 | spec: 8 | containers: 9 | - name: my-mysql-cont 10 | image: mysql:5.6 11 | ports: 12 | - containerPort: 3306 13 | env: 14 | - name: MYSQL_ALLOW_EMPTY_PASSWORD 15 | value: "1" 16 | # nodeSelector: 17 | # kubernetes.io/hostname: k8s-worker2 18 | 19 | # --- 20 | # apiVersion: v1 21 | # kind: Service 22 | # metadata: 23 | # name: mysql-svc 24 | # spec: 25 | # type: ClusterIP # NodePort 26 | # ports: 27 | # - name: mysql-port 28 | # port: 3306 29 | # targetPort: 3306 30 | # # nodePort: 30001 31 | # selector: 32 | # app: mysql 33 | -------------------------------------------------------------------------------- /2.study/01.pod/6.nginx_tomcat.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: my-nginx-tomcat-pods 5 | namespace: default 6 | labels: 7 | app: my-app 8 | spec: 9 | containers: 10 | - name: my-nginx 11 | image: nginx:1.18 12 | ports: 13 | - containerPort: 80 14 | - name: my-tomcat 15 | image: tomcat:jdk8-openjdk 16 | ports: 17 | - containerPort: 8080 18 | # --- 19 | # apiVersion: v1 20 | # kind: Service 21 | # metadata: 22 | # name: my-nginx-svc 23 | # spec: 24 | # type: NodePort 25 | # ports: 26 | # - name: web 27 | # port: 8000 28 | # targetPort: 80 29 | # - name: tomcat 30 | # port: 8001 31 | # targetPort: 8080 32 | # selector: 33 | # app: my-app 34 | -------------------------------------------------------------------------------- /2.study/10.volume/2.pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: pvc-hostpath 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | volumeMode: Filesystem 9 | resources: # PV 자원 중 어느정도의 자원을 사용할 것인지 명시 10 | requests: 11 | storage: 1Gi # storage 설정이 1GB로 되어있는데 상위 PV에서 5GB가 할당되어 있음으로 12 | # pvc 에서는 pv storage 용량 이하로 설정해야함 13 | # 만약 더 큰용량을 설정하면 STATUS가 Pending 상태로 남게되고 생성이 안됨. 14 | storageClassName: my-storage 15 | selector: # 예제 설명을 위해 추가. PVC는 PV를 selector 를 통해 Label 로 확인할 수도 있음 16 | matchLabels: # selector 를 사용하지 않고 storageClassName도 사용하지 않을 시 PVC에서 설정한 스펙의 PV가 있는지 자동으로 탐지하여 PV와 연결됨 17 | storage: pv-test 18 | -------------------------------------------------------------------------------- /2.study/21.role/5.user-rbac2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: readonly-role 5 | namespace: default 6 | rules: 7 | - apiGroups: [""] # "" = Default (Core API Group - v1) 8 | # resources: ["pods", "services"] 9 | resources: ["*"] 10 | verbs: ["get", "list"] 11 | - apiGroups: ["apps"] # "" = Default (Apps API Group - apps/v1) 12 | resources: ["*"] 13 | verbs: ["get", "list"] 14 | 15 | --- 16 | apiVersion: rbac.authorization.k8s.io/v1 17 | kind: RoleBinding 18 | metadata: 19 | name: readonly-rolebind 20 | namespace: default 21 | subjects: 22 | - kind: ServiceAccount 23 | name: readonly-user 24 | roleRef: 25 | kind: Role 26 | name: readonly-role 27 | apiGroup: rbac.authorization.k8s.io 28 | -------------------------------------------------------------------------------- /2.study/07.daemonset/README.txt: -------------------------------------------------------------------------------- 1 | # 참고 : https://kubernetes.io/ko/docs/concepts/workloads/controllers/daemonset/ 2 | 3 | # FluentD-Elasticsearch 데몬셋 생성 4 | kubectl apply -f fluentd_ds.yaml 5 | 6 | 또는 외부에서 직접 적용 시 (위 코드와 동일 내용) 7 | kubectl apply -f https://k8s.io/examples/controllers/daemonset.yaml 8 | 9 | * 적용되는 네임스페이스 확인 - 시스템 서비스임으로 kube-system 에 적용 10 | 11 | 12 | 13 | # 데몬셋의 업데이트 전략 확인 14 | kubectl get ds/fluentd-elasticsearch -o go-template='{{.spec.updateStrategy.type}}{{"\n"}}' -n kube-system 15 | => RollingUpate 16 | 17 | 18 | # 데몬셋의 업데이트 진행 (v2.5.2 -> v2.6.0) 19 | kubectl set image ds/fluentd-elasticsearch fluentd-elasticsearch=quay.io/fluentd_elasticsearch/fluentd:v2.6.0 -n kube-system 20 | 21 | 22 | 23 | # 삭제 및 클린업 24 | kubectl delete ds fluentd-elasticsearch -n kube-system 25 | 26 | -------------------------------------------------------------------------------- /10.devel/k8s/README.txt: -------------------------------------------------------------------------------- 1 | # Kubernetes Clients 2 | https://github.com/kubernetes-client 3 | 4 | https://github.com/kubernetes-client/python 5 | https://github.com/kubernetes-client/javascript 6 | 7 | 8 | # 기본 개발 환경 꾸미기 9 | ## Python 환경 10 | sudo apt install python3-venv 11 | python3 -m venv ~/.venv/ 12 | 13 | ## JavaScript 환경 14 | sudo apt install node npm 15 | 16 | 17 | # 라이브러리 설치 18 | ## Python 환경 19 | pip install kubernetes 20 | 21 | ## JavaScript 환경 22 | npm install @kubernetes/client-node 23 | 24 | 25 | # Documents 26 | ## Common 27 | https://kubernetes.io/docs/reference/kubernetes-api/ 28 | https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/ 29 | 30 | ## Python-client 31 | https://kubernetes.readthedocs.io/en/latest/ 32 | https://github.com/kubernetes-client/python/tree/master/kubernetes/docs 33 | 34 | ## Javascript-client 35 | -------------------------------------------------------------------------------- /2.study/03.svc/README.txt: -------------------------------------------------------------------------------- 1 | # 참고 : https://kubernetes.io/ko/docs/concepts/services-networking/service/ 2 | 3 | # NodePort 는 Pod가 있는 WorkerNode 의 IP 주소로 접속함 4 | # 노드 포트의 번호는 30000 ~ 32767 사이로만 사용 가능하며, 하나의 포트로 하나의 서비스만 가능. 5 | 6 | # ClusterIP 사용해서 내부 pods 간에 통신 7 | kubectl apply -f 1.mysql_svc.yaml 8 | 9 | 10 | # NodePort 사용해서 tomcat 접속 11 | kubectl apply -f 2.tomcat_svc.yaml 12 | 13 | 14 | # ExternalName 사용해서 클러스터 내부에서 외부에 접속 15 | kubectl apply -f 3.ext_svc.yaml 16 | 17 | 18 | kubectl run -it busybox --rm --image=busybox sh 19 | > ping my-googledns-svc 20 | > ping my-googledns-svc.default.svc.cluster.local 21 | 22 | 23 | ExternalName 필드에 예전 kubedns 에서는 IP 주소도 설정 가능하였으나, 24 | coredns 에서는 해당 필드를 모두 string 으로 인지하여, 더이상 IP 주소 사용할 수 없음. 25 | 26 | 위 예에서는 외부에 존재하는 DNS를(google dns) 사용해서 잘 와닿지 않을 수 있는데, 27 | 외부 공식 DNS가 없는 Cloud 내부의 사설 IP 의 URL 등을 생각하면 됨. 28 | 29 | 예) Cloud 내에 구축한 내 RDS 주소라던지... 30 | 31 | -------------------------------------------------------------------------------- /2.study/02.replicaset/README.txt: -------------------------------------------------------------------------------- 1 | # 참고 : https://kubernetes.io/ko/docs/concepts/workloads/controllers/replicaset/ 2 | 3 | # replicaset 4 | label 을 pod에 설정하고, selector 를 통해서 분산처리 할 리소스 레이블과 연결함 5 | 6 | 7 | # tomcat pod 수동실행 테스트 (이미지 잘 가져와서 배포되는지 테스트용) 8 | kubectl apply -f 1.pods.yaml 9 | 10 | 11 | # tomcat pod 삭제 12 | kubectl delete -f 1.tomcat.yaml 13 | 14 | 15 | # 또는 수동으로 pod 삭제 16 | kubectl delete pods tomcat 17 | 18 | 19 | # replicaset 20 | kubectl apply -f 2.replica.yaml 21 | 22 | 23 | # replicaset 조회 24 | kubectl get rs 25 | 26 | 27 | # 묶여서 제어권 누가 하는지 확인 28 | kubectl describe pods pod1 29 | 30 | 31 | # 수동으로 갯수 늘리기 32 | kubectl scale rs/tomcat --replicas=5 33 | 34 | 35 | # replicaset auto healing 테스트 36 | kubectl delete pods/tomcat-xxxxx 37 | 38 | 39 | # 수동으로 모두 삭제하기 40 | kubectl delete rs/tomcat 41 | 42 | 43 | # 레이블을 통해 해당 파드 조회 44 | kubectl get pods -l tier=tomcat 45 | kubectl get pods --selector tier=tomcat 46 | -------------------------------------------------------------------------------- /2.study/10.volume/3.deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: my-deployment 5 | labels: 6 | app: my-tomcat 7 | spec: 8 | replicas: 3 9 | selector: 10 | matchLabels: 11 | app: my-pod 12 | template: 13 | metadata: 14 | labels: 15 | app: my-pod 16 | spec: 17 | containers: 18 | - name: my-tomcat 19 | image: tomcat 20 | ports: 21 | - containerPort: 8080 22 | volumeMounts: # Volume object를 Pod에 적용 23 | - mountPath: /VolumeTest # Mount 경로 설정 24 | name: my-volume # volume name에 해당하는 volume을 pod 에 적용 25 | volumes: # volume object 를 생성 26 | - name: my-volume # volume object 이름을 my-volume으로 설정 27 | persistentVolumeClaim: # PVC를 설정 28 | claimName: pvc-hostpath # PVC이름이 pvc-hostpath 인 volume object를 생성 29 | -------------------------------------------------------------------------------- /2.study/demo2/mongo.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: mongodb-deploy 5 | labels: 6 | app: mongodb 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: mongodb 12 | template: 13 | metadata: 14 | labels: 15 | app: mongodb 16 | spec: 17 | containers: 18 | - name: mongodb 19 | image: mongo # dockerhub 에서 필요한 포트/환경변수 참조 20 | ports: 21 | - containerPort: 27017 22 | env: 23 | # - name: MONGO_INITDB_ROOT_USERNAME 24 | # value: 25 | # - name: MONGO_INITDB_ROOT_PASSWORD 26 | # value: 27 | - name: MONGO_INITDB_ROOT_USERNAME 28 | valueFrom: 29 | secretKeyRef: 30 | name: mongodb-secret 31 | key: root-username 32 | - name: MONGO_INITDB_ROOT_PASSWORD 33 | valueFrom: 34 | secretKeyRef: 35 | name: mongodb-secret 36 | key: root-password 37 | -------------------------------------------------------------------------------- /2.study/11.configmap/2.deploy_env.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: my-deployment 5 | labels: 6 | app: my-tomcat 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: my-pod 12 | template: 13 | metadata: 14 | labels: 15 | app: my-pod 16 | spec: 17 | containers: 18 | - name: my-tomcat 19 | image: tomcat 20 | ports: 21 | - containerPort: 8080 22 | env: # env: 항목을 추가 해서 configmap 설정을 적용함 23 | - name: VERSION_TEST # - name: 해당 Pod 에 적용될 환경변수 이름 24 | valueFrom: # valueFrom: name에서 명시한 환경변수에 적용할 값을 설정함 25 | configMapKeyRef: # configMapKeyRef: 불러올 configMap과 key를 선택 26 | name: my-configmap # name: 항목에 configMap 파일의 metadata: name 으로 선택 27 | key: MY_VERSION # key: 항목에 configMap 에 설정되어있는 data의 key 이름 선택 28 | - name: CONFIG_TEST 29 | valueFrom: 30 | configMapKeyRef: 31 | name: my-configmap 32 | key: MY_CONFIG 33 | -------------------------------------------------------------------------------- /2.study/demo2/mongo-express.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: mongo-express 5 | labels: 6 | app: mongo-express 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: mongo-express 12 | template: 13 | metadata: 14 | labels: 15 | app: mongo-express 16 | spec: 17 | containers: 18 | - name: mongo-express 19 | image: mongo-express # 마찬가지로 dockerhub 통해서 각종 포트/환경변수 확인 20 | ports: 21 | - containerPort: 8081 22 | env: 23 | - name: ME_CONFIG_MONGODB_ADMINUSERNAME 24 | valueFrom: 25 | secretKeyRef: 26 | name: mongodb-secret 27 | key: root-username 28 | - name: ME_CONFIG_MONGODB_ADMINPASSWORD 29 | valueFrom: 30 | secretKeyRef: 31 | name: mongodb-secret 32 | key: root-password 33 | # - name: ME_CONFIG_MONGODB_SERVER 34 | # value: 35 | - name: ME_CONFIG_MONGODB_SERVER 36 | valueFrom: 37 | configMapKeyRef: 38 | name: mongodb-configmap 39 | key: database_url 40 | -------------------------------------------------------------------------------- /2.study/21.role/8.team-role.yaml: -------------------------------------------------------------------------------- 1 | # https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles 2 | 3 | # Custom Role Binding 4 | # - ClusterRole : cluster-admin, admin, edit, view 5 | # - Binding : RoleBinding, ClusterRoleBinding 6 | kind: Role 7 | apiVersion: rbac.authorization.k8s.io/v1 8 | metadata: 9 | name: custom-role 10 | namespace: team-a 11 | rules: 12 | - apiGroups: [""] # "" indicates the core API group 13 | resources: ["pods"] 14 | verbs: ["get", "watch", "list"] 15 | --- 16 | kind: RoleBinding 17 | apiVersion: rbac.authorization.k8s.io/v1 18 | metadata: 19 | name: custom-rolebinding 20 | namespace: team-a 21 | subjects: 22 | - kind: ServiceAccount 23 | name: sa-team-a 24 | namespace: team-a 25 | roleRef: 26 | kind: Role 27 | name: custom-role 28 | apiGroup: rbac.authorization.k8s.io 29 | --- 30 | # Use default role to bind 31 | kind: RoleBinding 32 | apiVersion: rbac.authorization.k8s.io/v1 33 | metadata: 34 | name: default-rolebinding 35 | namespace: team-b 36 | subjects: 37 | - kind: ServiceAccount 38 | name: sa-team-b 39 | namespace: team-b 40 | roleRef: 41 | kind: ClusterRole 42 | name: view 43 | apiGroup: rbac.authorization.k8s.io 44 | -------------------------------------------------------------------------------- /2.study/demo1/wordpress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: wordpress 5 | labels: 6 | app: wordpress 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: wordpress 12 | strategy: 13 | type: Recreate 14 | template: 15 | metadata: 16 | labels: 17 | app: wordpress 18 | spec: 19 | containers: 20 | - image: wordpress:4.8-apache 21 | name: wordpress 22 | env: 23 | - name: WORDPRESS_DB_HOST 24 | value: mysql 25 | # - name: WORDPRESS_DB_NAME 26 | # value: wp 27 | # - name: WORDPRESS_DB_USER 28 | # value: wp 29 | - name: WORDPRESS_DB_PASSWORD 30 | valueFrom: 31 | secretKeyRef: 32 | name: mysql-pass 33 | key: password.txt 34 | ports: 35 | - containerPort: 80 36 | name: wordpress 37 | resources: 38 | requests: 39 | cpu: 25m 40 | limits: 41 | cpu: 50m 42 | --- 43 | apiVersion: v1 44 | kind: Service 45 | metadata: 46 | name: wordpress 47 | labels: 48 | app: wordpress 49 | spec: 50 | type: NodePort 51 | ports: 52 | - port: 80 53 | targetPort: 80 54 | nodePort: 30100 55 | selector: 56 | app: wordpress 57 | -------------------------------------------------------------------------------- /2.study/11.configmap/4.deploy_volume.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: my-deployment 5 | labels: 6 | app: my-tomcat 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: my-pod 12 | template: 13 | metadata: 14 | labels: 15 | app: my-pod 16 | spec: 17 | containers: 18 | - name: my-tomcat 19 | image: tomcat 20 | ports: 21 | - containerPort: 8080 22 | volumeMounts: # pod에 mount할 volume을 설정 23 | - name: my-volume # name이 my-volume인 volume을 mount 24 | mountPath: /tmp/my-config # mount할 경로 설정 25 | volumes: # volume 설정 26 | - name: my-volume # volume name은 위와 일치하도록 my-volume으로 함 27 | configMap: # volume에 ConfigMap 내용을 적용함 28 | name: my-configmap # name: 항목에 configMap 파일의 metadata:name 으로 선택 29 | # items: # configMap 의 data 항목 중에 가져올 key를 선정함 (여기서부터는 Optional - 추가 안하면 ALL) 30 | # - key: MY_VERSION # ConfigMap에 포함되어있는 Key 31 | # path: filename1 # Volume에 저장되는 파일이름 32 | # - key: MY_CONFIG # ConfigMap에 포함되어있는 Key 33 | # path: filename2 # Volume에 저장되는 파일이름 34 | -------------------------------------------------------------------------------- /1.setup/setup/README.txt: -------------------------------------------------------------------------------- 1 | 1. 개요 2 | 3 | Kubernetes 에 노드 추가하기 (초기 설치가 아닌) 4 | 5 | 최초 kubeadm init 시 표시된 6 | 7 | kubeadm join --token --discovery-token-ca-cert-hash sha256: 8 | 9 | 명령을 이용하면 token이 만료 되었다는 에러와 함께 Worker 노드가 Master 노드에 붙지 않는 문제가 있음. 10 | 11 | 이유는 최초 노드 조인을 위해 토큰을 발행 하면 아래와 같이 EXPIRES 기간이 있기 때문이다. 12 | 13 | $ kubeadm token list 14 | TOKEN TTL EXPIRES USAGES DESCRIPTION EXTRA GROUPS 15 | <토큰값> 2019-11-15T12:08:22+09:00 authentication,signing The default bootstrap token generated by 'kubeadm init'. system:bootstrappers:kubeadm:default-node-token 16 | 17 | 18 | 물론 토큰값이 존재 한다면 이 값을 이용하면 되고 19 | 20 | kubeadm token list 를 수행해서 아무런 값이 없을 경우엔 노드를 붙일 수 없다. 21 | 22 | 23 | $ kubeadm token list 24 | TOKEN TTL EXPIRES USAGES DESCRIPTION EXTRA GROUPS 25 | 26 | 2. Token 생성(확인) 27 | $ kubeadm token list 28 | $ kubeadm token create 29 | 30 | 3. Hash 확인 31 | openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //' 32 | 33 | 4. Join 34 | kubeadm join --token --discovery-token-ca-cert-hash sha256: 35 | -------------------------------------------------------------------------------- /2.study/06.statefulset/1.mysql_sts.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: mysql-sts 5 | spec: 6 | serviceName: mysql-svc # 연결할 서비스 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | app: mysql-sts 11 | template: 12 | metadata: 13 | labels: 14 | app: mysql-sts 15 | spec: 16 | containers: 17 | - name: mysql 18 | image: mysql:5.7 19 | env: 20 | - name: MYSQL_ROOT_PASSWORD 21 | value: password 22 | ports: 23 | - containerPort: 3306 24 | name: mysql 25 | volumeMounts: 26 | - name: my-pvc 27 | mountPath: /var/lib/mysql 28 | subPath: data 29 | livenessProbe: # 동작 헬스 체크 30 | exec: 31 | command: ["mysqladmin", "-p$MYSQL_ROOT_PASSWORD", "ping"] 32 | initialDelaySeconds: 60 33 | timeoutSeconds: 10 34 | volumeClaimTemplates: 35 | - metadata: 36 | name: my-pvc 37 | spec: 38 | accessModes: ["ReadWriteOnce"] 39 | storageClassName: standard # Minikube/GKE 기본 2Gi 용량 40 | resources: 41 | requests: 42 | storage: 1Gi 43 | --- 44 | apiVersion: v1 45 | kind: Service 46 | metadata: 47 | name: mysql-svc 48 | labels: 49 | app: mysql-sts 50 | spec: 51 | ports: 52 | - port: 3306 53 | name: mysql 54 | clusterIP: None # headless 모드 55 | selector: 56 | app: mysql-sts # statefulset 과 연결을 위한 레이블 57 | -------------------------------------------------------------------------------- /2.study/21.role/README.txt: -------------------------------------------------------------------------------- 1 | # 참고 : https://kubernetes.io/ko/docs/concepts/configuration/organize-cluster-access-kubeconfig/ 2 | # 참고 : https://kubernetes.io/ko/docs/reference/access-authn-authz/authorization/ 3 | 4 | 5 | # 현재 계정 확인 6 | kubectl config view 7 | cat ~/.kube/config 8 | 9 | 10 | 11 | # 서비스 계정 생성 12 | kubectl create -f 1.serviceaccount-admin.yaml 13 | kubectl describe secret lovehyun-admin-token-xxxx -n kube-system 14 | 15 | # 관리자 권한 부여 16 | kubectl create -f 2.admin-rbac.yaml 17 | 18 | # 사용자 설정 구성 (별도 파일로 - 원본 파일 망가트리지 않도록 임시로 새 파일 생성) 19 | kubeconfig.conf 20 | 21 | kubectl get pods --all-namespaces --kubeconfig kubeconfig.conf 22 | 23 | 24 | 25 | # Pod 읽기 전용 사용자 만들기 26 | kubectl create -f 3.serviceaccount-user.yaml 27 | 28 | kubectl create -f 4.user-rbac.yaml 29 | 30 | kubectl get secret 31 | kubectl describe secret readonly-user-token-qsx66 32 | 33 | kubectl config set-credentials readonly-user --token=xxxxxxxxxx 34 | 35 | 36 | # 컨텍스트 전환 37 | kubectl config get-contexts 38 | kubectl config set-context readonly-user-context --cluster=minikube --user=readonly-user 39 | kubectl config get-contexts 40 | 41 | kubectl config use-context readonly-user-context 42 | 43 | 44 | # 삭제 45 | kubectl config delete-context readonly-user-context 46 | kubectl config delete-user readonly-user 47 | 48 | kubectl delete -f 4.uesr-rbac.yaml 49 | kubectl delete -f 3.serviceaccount-user.yaml 50 | kubectl delete -f 2.admin-rbac.yaml 51 | kubectl delete -f 1.serviceaccount-admin.yaml 52 | -------------------------------------------------------------------------------- /2.study/demo1/mysql.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: mysql-vol-claim 5 | labels: 6 | app: mysql 7 | spec: 8 | accessModes: 9 | - ReadWriteOnce 10 | resources: 11 | requests: 12 | storage: 1Gi 13 | --- 14 | apiVersion: apps/v1 15 | kind: Deployment 16 | metadata: 17 | name: mysql 18 | labels: 19 | app: mysql 20 | spec: 21 | replicas: 1 22 | selector: 23 | matchLabels: 24 | app: mysql 25 | strategy: 26 | type: Recreate 27 | template: 28 | metadata: 29 | labels: 30 | app: mysql 31 | spec: 32 | containers: 33 | - image: mysql:5.6 34 | name: mysql 35 | env: 36 | - name: MYSQL_ROOT_PASSWORD 37 | valueFrom: 38 | secretKeyRef: 39 | name: mysql-pass 40 | key: password.txt 41 | ports: 42 | - containerPort: 3306 43 | name: mysql 44 | resources: 45 | requests: 46 | cpu: 25m 47 | limits: 48 | cpu: 50m 49 | volumeMounts: 50 | - name: mysql-local-storage 51 | mountPath: /var/lib/mysql 52 | volumes: 53 | - name: mysql-local-storage 54 | persistentVolumeClaim: 55 | claimName: mysql-vol-claim 56 | --- 57 | apiVersion: v1 58 | kind: Service 59 | metadata: 60 | name: mysql 61 | labels: 62 | app: mysql 63 | spec: 64 | ports: 65 | - port: 3306 66 | selector: 67 | app: mysql 68 | -------------------------------------------------------------------------------- /2.study/07.daemonset/1.fluentd_ds.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: fluentd-elasticsearch 5 | namespace: kube-system 6 | labels: 7 | k8s-app: fluentd-logging 8 | spec: 9 | selector: 10 | matchLabels: 11 | name: fluentd-elasticsearch 12 | template: 13 | metadata: 14 | labels: 15 | name: fluentd-elasticsearch 16 | spec: 17 | tolerations: 18 | # 이 톨러레이션(toleration)은 데몬셋이 컨트롤 플레인 노드에서 실행될 수 있도록 만든다. 19 | # 컨트롤 플레인 노드가 이 파드를 실행해서는 안 되는 경우, 이 톨러레이션을 제거한다. 20 | - key: node-role.kubernetes.io/control-plane 21 | operator: Exists 22 | effect: NoSchedule 23 | - key: node-role.kubernetes.io/master 24 | operator: Exists 25 | effect: NoSchedule 26 | containers: 27 | - name: fluentd-elasticsearch 28 | image: quay.io/fluentd_elasticsearch/fluentd:v2.5.2 29 | resources: 30 | limits: 31 | memory: 200Mi 32 | requests: 33 | cpu: 100m 34 | memory: 200Mi 35 | volumeMounts: 36 | - name: varlog 37 | mountPath: /var/log 38 | - name: varlibdockercontainers 39 | mountPath: /var/lib/docker/containers 40 | readOnly: true 41 | terminationGracePeriodSeconds: 30 42 | volumes: 43 | - name: varlog 44 | hostPath: 45 | path: /var/log 46 | - name: varlibdockercontainers 47 | hostPath: 48 | path: /var/lib/docker/containers 49 | -------------------------------------------------------------------------------- /2.study/demo3/README.txt: -------------------------------------------------------------------------------- 1 | # green/orange/red 컨테이너 앱 생성 2 | kubectl apply -f myapp-green.yaml 3 | kubectl apply -f myapp-orange.yaml 4 | kubectl apply -f myapp-red.yaml 5 | 6 | # 연결하기 위한 서비스 생성 (web 서버 별도로 필요 없음) 7 | kubectl apply -f myapp-service.yaml 8 | 9 | # 연결 확인 10 | minikube service myapp-svc --url 11 | > http://192.168.49.2:30100 12 | 13 | 14 | curl http://192.168.49.2:30100 15 | 16 | 17 | Hello Flask 18 | 19 |
20 |

Hello, World from shpark!

21 |
22 | 23 | 24 | 25 | curl http://192.168.49.2:30100 26 | 27 | 28 | Hello Flask 29 | 30 |
31 |

Hello, World from shpark!

32 |
33 | 34 | 35 | 36 | # 연결 확인 (VSCODE) 37 | 포트포워딩 추가 : 포트 (192.168.49.2:30100) 추가 후 localhost:30100 으로 확인 38 | 39 | 40 | # 서비스 및 로그 확인 41 | kubectl get pods 42 | kubectl logs -f deploy/myapp-green 43 | kubectl logs -f deploy/myapp-orange --all-containers=true --since=1m 44 | 45 | 46 | # 서비스 업그레이드 (아래가 최신버전) 47 | kubectl set image deploy/myapp-green app-green=lovehyun/flask-app:1.3 48 | 49 | kubectl rollout history deploy myapp-green 50 | kubectl rollout history deploy myapp-green --revision=2 51 | kubectl rollout history deploy myapp-green --revision=3 52 | 53 | kubectl rollout undo deploy myapp-green 54 | kubectl rollout history deploy myapp-green --revision=3 55 | kubectl rollout history deploy myapp-green --revision=4 56 | -------------------------------------------------------------------------------- /2.study/07.daemonset/2.fluentd_ds_update.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: fluentd-elasticsearch 5 | namespace: kube-system 6 | labels: 7 | k8s-app: fluentd-logging 8 | spec: 9 | selector: 10 | matchLabels: 11 | name: fluentd-elasticsearch 12 | updateStrategy: 13 | type: RollingUpdate 14 | rollingUpdate: 15 | maxUnavailable: 1 16 | template: 17 | metadata: 18 | labels: 19 | name: fluentd-elasticsearch 20 | spec: 21 | tolerations: 22 | # 이 톨러레이션(toleration)은 데몬셋이 컨트롤 플레인 노드에서 실행될 수 있도록 만든다. 23 | # 컨트롤 플레인 노드가 이 파드를 실행해서는 안 되는 경우, 이 톨러레이션을 제거한다. 24 | - key: node-role.kubernetes.io/control-plane 25 | operator: Exists 26 | effect: NoSchedule 27 | - key: node-role.kubernetes.io/master 28 | operator: Exists 29 | effect: NoSchedule 30 | containers: 31 | - name: fluentd-elasticsearch 32 | image: quay.io/fluentd_elasticsearch/fluentd:v2.5.2 33 | resources: 34 | limits: 35 | memory: 200Mi 36 | requests: 37 | cpu: 100m 38 | memory: 200Mi 39 | volumeMounts: 40 | - name: varlog 41 | mountPath: /var/log 42 | - name: varlibdockercontainers 43 | mountPath: /var/lib/docker/containers 44 | readOnly: true 45 | terminationGracePeriodSeconds: 30 46 | volumes: 47 | - name: varlog 48 | hostPath: 49 | path: /var/log 50 | - name: varlibdockercontainers 51 | hostPath: 52 | path: /var/lib/docker/containers 53 | -------------------------------------------------------------------------------- /2.study/demo1/README.txt: -------------------------------------------------------------------------------- 1 | # WordPress / MySQL 연결 및 배포 2 | 3 | # HPA 실습을 위해서는 metric-service 활성화 필요함 4 | minikube addons enable metrics-server 5 | 6 | 7 | # Wordpress on K8s 8 | # -> WordPress 9 | # User -> Service -> WordPress -> MySQL 10 | # -> WordPress 11 | 12 | # 사용자 계정 만들기 13 | echo -n "my-password" > ./password.txt 14 | 15 | kubectl create secret generic mysql-pass --from-file=./password.txt 16 | 17 | kubectl get secret 18 | 19 | kubectl describe secret mysql-pass 20 | 21 | 22 | # 볼륨 만들기 (DB용) 23 | kubectl apply -f local-volume.yaml 24 | 25 | kubectl get pv 26 | 27 | 28 | # MySQL 생성 29 | kubectl apply -f mysql.yaml 30 | 31 | kubectl get pods 32 | 33 | 34 | # MySQL 생성 후 계정 생성 (필요 시) - wordpress 버전에 따라 구버전 필요, 신버전 불필요 35 | kubectl exec -it mysql-xxxxx -- bash 36 | 37 | mysql -u root -p 38 | 39 | show databases; 40 | 41 | create database wp CHARACTER SET utf8; 42 | grant all privileges on wp.* to wp@'%' identified by 'my-password'; 43 | flush privileges; 44 | exit 45 | 46 | exit 47 | 48 | 49 | # WordPress 생성 50 | kubectl apply -f wordpress.yaml 51 | 52 | 53 | # 접속 시도 54 | minikube service wordpress --url 55 | 56 | 57 | # 모니터링 대시보드 58 | (생략) 59 | 60 | 61 | # 스케일링 62 | kubectl scale deployment wordpress --replicas 3 63 | 64 | 65 | # 오토 스케일링 66 | kubectl apply -f wordpress-autoscaling.yaml 67 | 68 | kubectl get hpa 69 | 70 | while true; do curl localhost:30100; done 71 | 72 | kubectl get hpa 73 | 74 | kubectl get pods 75 | 76 | 77 | # 모두 삭제 및 클린업 78 | kubectl delete -f wordpress.yaml 79 | kubectl delete -f mysql.yaml 80 | kubectl delete -f local-volume.yaml 81 | kubectl delete secret mysql-pass 82 | 83 | -------------------------------------------------------------------------------- /2.study/demo2/README.txt: -------------------------------------------------------------------------------- 1 | # 시크릿 생성 2 | kubectl apply -f mongo-secret.yaml 3 | kubectl get secret 4 | 5 | 6 | # mongodb 에서 시크릿 참조하도록 변경 7 | 8 | spec: 9 | containers: 10 | - name: mongodb 11 | image: mongo # dockerhub 에서 필요한 포트/환경변수 참조 12 | ports: 13 | - containerPort: 27017 14 | env: 15 | - name: MONGO_INITDB_ROOT_USERNAME 16 | value: 17 | - name: MONGO_INITDB_ROOT_PASSWORD 18 | value: 19 | 20 | => 위에는 plaintext 넣어도 되는데 비추 21 | 그래서 secret 참조하도록 변경 22 | 23 | 24 | # mongodb 생성 25 | kubectl apply -f mongo.yaml 26 | 27 | kubectl get deploy 28 | kubectl get pods 29 | 30 | kubectl describe pod mongo-xxxx 31 | 32 | 33 | # service 랑 deployment 랑 --- 통해 한 파일에 작성 가능함 34 | # 일단 이 예제에서는 따로 분리해서 진행하였음 35 | 36 | kubectl apply -f mongo-service.yaml 37 | 38 | kubectl get svc 39 | 40 | kubectl describe svc mongodb-svc 41 | Endpoints: 172.17.0.8:27017 42 | 43 | 44 | kubectl get all -l app=mongodb 45 | 46 | 47 | # mongo-express 웹 프런트 설치 48 | 49 | # - name: ME_CONFIG_MONGODB_SERVER 50 | # value: 51 | - name: ME_CONFIG_MONGODB_SERVER 52 | valueFrom: 53 | configMapRef: 54 | name: mongodb-configmap 55 | key: database_url 56 | 57 | => 위에는 plaintext 넣어도 되는데 비추 58 | 그래서 configmap 참조하도록 변경 59 | 60 | kubectl apply -f mongo-express-configmap.yaml 61 | 62 | kubectl apply -f mongo-express.yaml 63 | 64 | 65 | # mongo-express-service 적용 66 | 67 | kubectl apply -f mongo-express-service.yaml 68 | 69 | 70 | # external ip 가져오기 71 | 72 | minikube service mongo-express-svc --url 73 | 74 | 또는 원격 vscode 환경이라면... 75 | 76 | kubectl port-forward svc/mongo-express-svc 8081:8081 77 | 78 | 79 | # mongodb 테스트하기 80 | 81 | SOMA <- Create Database 82 | 83 | -------------------------------------------------------------------------------- /2.study/10.volume/1.pv.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume # PV 를 생성함을 명시 3 | metadata: 4 | name: pv-hostpath 5 | labels: 6 | storage: pv-test 7 | spec: 8 | capacity: # PV 설정 9 | storage: 1Gi # storage : 용량을 지정 (Mi: 메가, Gi : 기가, Ti: 테라) 10 | accessModes: # accessModes : volume의 읽기/쓰기에 관한 옵션을 지정, volume은 한번에 하나의 accessModes만 설정 가능 11 | - ReadWriteOnce # - ReadWriteOnce : 하나의 node에만 mount 가능, volume 읽기/쓰기 가능 12 | # - ReadOnlyOnce : 다수의 node에 mount 가능, 여러개의 node에서 동시에 volume 읽기 가능, 쓰기 못함 13 | # - ReadWriteMany : 다수의 node에 mount 가능, 여러개의 node에서 동시에 volume 읽기,쓰기 가능 14 | # 해당 옵션은 volume 종류에 따라 설정할 수 있고/없음이 결정됨 15 | volumeMode: Filesystem # volumeMode : Kubernetes 1.8버전에 알파 기능으로 추가된 옵션. 16 | # - filesystem : default 옵션으로 volume을 일반 파일시스템형식으로 붙여서 사용하게 합 17 | # - raw : valume을 RAW 파일시스템형식으로 붙여서 사용하게 함 18 | # - block : Filesysetm이 없는 Block 장치와 연결될 때는 Block으로 설정 19 | storageClassName: my-storage 20 | # storageClassName : 아래에서 설명할 Dynamic Provisioning 방식에 사용하는 옵션(예제에서는 설명을 하기 위해 추가함). 21 | # storage의 Name을 명시함 특정 StorageClass 가진 PV는 22 | # 그 스토리지 클래스에 맞는 PVC하고만 연결됨 23 | # PV에 storageClassName이 없으면 storageClassName이 없는 PVC에만 연결 24 | persistentVolumeReclaimPolicy: Delete 25 | # persistentVolumeReclaimPolicy: PV 생명주기 중 Reclaim에 해당 26 | # - Delete : 볼륨 사용이 종료되면 실제 디스크내용도 삭제, 스토리지를 할당 받은 경우 할당받은 공간도 해제 27 | # - Recycle: 볼륨 사용이 종료되면 실제 디스크내용도 삭제, 스토리지를 할당 받은 경우 할당받은 공간은 유지 28 | # - Retain : 볼륨 사용이 중지되도 유지함, PVC를 삭제해도 PV유지, 실제 디스크내용은 지워지지 않음 29 | # (아래의 PV와 PVC의 LifeCycle 항목에서 자세히 설명함) 30 | hostPath: # hostPath: PV Type 을 설정하는 부분 hostname은 노드에 저장되는 실제 저장 공간 설정하는 방법. 31 | path: /tmp/volumeK8s # 해당 예제에서는 hostPath로 생성(로컬 디스크 사용) 32 | # hostpath이외에 상위 1.1 volume의 종류에 명시되어있는 33 | # 다양한 종류의 저장공간을 설정해서 사용할 수 있음 34 | -------------------------------------------------------------------------------- /2.study/01.pod/README.txt: -------------------------------------------------------------------------------- 1 | # single container pod 2 | # create vs apply 차이점 이해하기 3 | kubectl create -f 1.nginx_pod.yaml 4 | kubectl apply -f 1.nginx_pod.yaml 5 | 6 | 7 | # delete 8 | kubectl delete -f 1.nginx_pod.yaml 9 | 10 | 11 | # 생성한 pod 에 접속하기 12 | kubectl apply -f 2.tomcat_pod.yaml 13 | 14 | kubectl exec -it my-nginx-pod -- /bin/bash 15 | kubectl exec -it my-tomcat-pod -- /bin/bash 16 | 17 | 18 | # 환경변수 추가하기 19 | kubectl apply -f 3.mysql_pod.yaml 20 | 21 | 22 | # 서비스 추가하고 연동하기 23 | kubectl apply -f 4.mysql_svc.yaml 24 | 25 | 26 | # 파드와 서비스 생성하기 및 서비스 연결 27 | kubectl apply -f 5.nginx_svc.yaml 28 | 29 | kubectl get svc 30 | 31 | curl 192.168.49.2:3XXXX 32 | 33 | 34 | # multiple container pod 35 | kubectl apply -f 6.nginx_tomcat.yaml 36 | 37 | kubectl exec -it my-nginx-tomcat-pods -c my-nginx -- /bin/bash 38 | kubectl exec -it my-nginx-tomcat-pods -c my-tomcat -- /bin/bash 39 | 40 | > exit 41 | > ctrl + p, ctrl + q 42 | 43 | 44 | # delete multiple objects 45 | kubectl delete pod,service foo bar 46 | 47 | 48 | # delete multiple objects by label 49 | kubectl delete pod,services -l name=myLabel 50 | 51 | 52 | # delete all in namespace 53 | kubectl delete pod,svc --all -n my-namespace 54 | 55 | 56 | # Pod의 상태값(Status) 57 | kubectl describe pods 58 | - Pending : 생성 중 59 | - Running : 실행 중 60 | - Succeeded : 컨테이너 실행 성공적으로 마치고 종료 61 | - Failed : 오류로 종료 62 | - Unknown : 통신 불가 63 | 64 | 65 | # ----------------------------------------------- 66 | # 템플릿 참고 67 | apiVersion: v1 68 | kind: Pod 69 | metadata: 70 | name: my-pod 71 | labels: 72 | app: my-pod-label 73 | spec: 74 | containers: 75 | - name: my-pod 76 | image: my-app:latest 77 | resources: 78 | requests: 79 | cpu: 0.1 <-- core 당 연산양 (0.1 = 10%) 80 | memory: 200M 81 | limits: 82 | cpu: 0.5 83 | memory: 1G 84 | ports: 85 | - containerPort: 8000 86 | env: 87 | - name: MY-ENV1 88 | value: "my-value1" 89 | - name: HOSTNAME 90 | valueFrom: 91 | fieldRef: 92 | fieldPath: spec.nodeName 93 | - name: POD_NAME 94 | valueFrom: 95 | fieldRef: 96 | fieldPath: metadata.name 97 | - name: POD_IP 98 | valueFrom: 99 | fieldRef: 100 | fieldPath: status.podIP 101 | 102 | 103 | # pod 에 접속 104 | kubectl exec -it my-pod sh 105 | env 106 | -------------------------------------------------------------------------------- /2.study/12.secret/README.txt: -------------------------------------------------------------------------------- 1 | # 참고 : https://kubernetes.io/ko/docs/concepts/configuration/secret/ 2 | 3 | # 사용자 계정/암호 기반으로 secret 생성 4 | kubectl apply -f user-pass.yaml 5 | 6 | kubectl get secret 7 | 8 | 9 | 10 | # 템플릿 11 | apiVersion: v1 12 | kind: Secret 13 | metadata: 14 | name: my-secret 15 | type: Opaque # Opaque : 기본값, (key,value) 형식으로 임의의 데이터 설정 16 | # kubernetes.io/service-account-token : 쿠버네티스 인증 토큰 저장 17 | # kubernetes.io/dockerconfigjson : 도커 저장소의 인증 정보를 저장 18 | # kubernetes.io/tls : TLS 인증서를 저장 19 | data: 20 | username: dXNlcg== <-- echo -n "user" | base64 21 | password: cGFzcw== 22 | 23 | 24 | 25 | # 시크릿 사용 (Deployment 등) 26 | apiVersion: apps/v1 27 | kind: Deployment 28 | (중략) 29 | spec: 30 | template: 31 | spec: 32 | containers: 33 | - name: my-app 34 | image: my-app:latest 35 | ports: 36 | - containerPort: 5000 37 | env: 38 | - name: SECRET_USERNAME 39 | valueFrom: 40 | secretKeyRef: 41 | name: my-secret 42 | key: username 43 | - name: SECRET_PASSWORD 44 | valueFrom: 45 | secretKeyRef: 46 | name: my-secret 47 | key: password 48 | 49 | 50 | 51 | # 프라이빗 레지스트리에서 이미지 풀링 52 | kubectl create secret docker-registry dockersecret --docker-username=USERNAME --docker-password=PASSWORD --docker-email=EMAIL --docker-server=https://my-private.repo/v1/ 53 | 54 | 55 | # 시크릿 사용 (Deployment 등) 56 | apiVersion: apps/v1 57 | kind: Deployment 58 | (중략) 59 | spec: 60 | template: 61 | spec: 62 | containers: 63 | - name: my-app 64 | image: my-app:latest 65 | ports: 66 | - containerPort: 5000 67 | imagePullSecrets: 68 | - name: dockersecret 69 | 70 | 71 | 72 | # 시크릿 데이터 저장 용량 73 | 최대 1MB, etcd에 비 암호화 상태의 text로 저장 됨 74 | 75 | 76 | 77 | # 그 외 커맨드 라인 활용... 78 | 79 | ## 도커 레지스크리 시크릿 80 | kubectl create secret docker-registry secret-tiger-docker \ 81 | --docker-username=tiger \ 82 | --docker-password=pass113 \ 83 | --docker-email=tiger@acme.com 84 | 85 | ## TLS 시크릿 86 | kubectl create secret tls my-tls-secret \ 87 | --cert=path/to/cert/file \ 88 | --key=path/to/key/file 89 | 90 | ## SSH 키 시크릿 91 | kubectl create secret generic ssh-key-secret --from-file=ssh-privatekey=/path/to/.ssh/id_rsa --from-file=ssh-publickey=/path/to/.ssh/id_rsa.pub 92 | 93 | -------------------------------------------------------------------------------- /2.study/30.helm/README.md: -------------------------------------------------------------------------------- 1 | # Helm Charts 2 | ## Helm 이란? 3 | - 쿠버네티스 패키지 메니저 4 | - 공식 사이트 : https://helm.sh/ (한국어 : https://helm.sh/ko/) 5 | - 패키지 리포 : https://artifacthub.io/ 6 | 7 | ## Helm Charts 란? 8 | - yaml 파일의 묶음 패키지 9 | 10 | ## 주요 버전 차이점 11 | - Ver 2.0 vs Ver 3.0 은 매우 차이가 큼 12 | - Ver 2.0 에서, Client / Server(Tiller - K8s 내부에 설치) 13 | - 각종 설치와 업데이트 등 기록들을 Tiller 서버에서 보관하여 Release Management 를 수행함 14 | - 다만, Tiller 서버의 복잡성/보안성 등이 있어 K8s 클러스터에서 무거운 요소가 되어 3.0 에서 제거 15 | - 3.0 에서는 배포만 하는 간단한 바이너리로 동작 함 16 | 17 | ## 설치 18 | - helm 설치 (수동) 19 | ```bash 20 | > curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 21 | > chmod 700 get_helm.sh 22 | > ./get_helm.sh 23 | ``` 24 | - helm 설치 (패키지) 25 | ```bash 26 | # MacOS 27 | > brew install kubernetes-helm 28 | # 우분투 29 | > sudo snap install helm --classic 30 | # 윈도우 31 | > choco install kubernetes-helm 32 | ``` 33 | - 헬름 차트 파일 구조 34 | - 예제 myapp 35 | ```bash 36 | myapp/ 37 | Chart.yaml 38 | values.yaml 39 | charts/ 40 | templates/ 41 | ... 42 | ``` 43 | 44 | ## 사용법 45 | - 공식 가이드 : https://helm.sh/ko/docs/intro/using_helm/ 46 | 47 | - 헬름 리포(퍼블릭) 업데이트 : ` helm repo update ` 48 | 49 | - 퍼블릭 리포에서 패키지 검색 : ` helm search ` 50 | - 예제) 검색 / 상세보기 51 | - ` helm search mysql ` 52 | - ` helm inspect stable/mariadb ` 53 | 54 | - 응용 예제) mysql 설치 55 | ```bash 56 | helm install stable/mysql --name=my-mysql 57 | 58 | # 설치 후 비밀번호 알아오기 59 | MYSQL_ROOT_PASSWORD=$(kubectl get secret --namespace default my-mysql -o jsonppath="{.data.mysql-root-password}" | base64 --decode; echo) 60 | 61 | # 접속하기 #1 (호스트에서) 62 | MYSQL_HOST=127.0.0.1 63 | MYSQL_PORT=3306 64 | kubectl port-forward svc/my-mysql 3306 65 | mysql -h ${MYSQL_HOSt} -P ${MYSQL_PORT} -u root -p ${MYSQL_ROOT_PASSWORD} 66 | 67 | # 접속하기 #2 (새로운 파드 생성해서 그곳을 통해 DB에 접속) 68 | kubectl run -i --tty ubuntu --image=ubuntu:16.04 --restart=Never -- bash -il 69 | apt update && apt install mysql-client -y 70 | mysql -h my-mysql -p 71 | ``` 72 | 73 | - 헬름 챠트 설치 : ` helm install ` 74 | - 예제) 75 | - ` helm install stable/mariadb ` 76 | - ` helm install stable/mariadb --name my-mariadb ` 77 | - ` helm status my-mariadb ` 78 | 79 | - 헬름 차트 설치 상세 80 | - 기존값에 추가해서 overide 하기 81 | - ` helm inspect values ` 82 | - ` helm install -f my-values.yaml ` 83 | ```bash 84 | values.yaml + my-values.yaml = result 85 | image: myapp image: myapp 86 | version: 1.0.0 version: 2.0.0 version: 2.0.0 87 | ``` 88 | 89 | - 헬름 차트 만들기 90 | - ` helm create sample ` 91 | - 디렉토리 구조 확인하고 내용 수정 : ` tree sample ` 92 | ```bash 93 | tree sample 94 | sample 95 | ├── Chart.yaml 96 | ├── charts 97 | ├── templates 98 | │   ├── NOTES.txt 99 | │   ├── _helpers.tpl 100 | │   ├── deployment.yaml 101 | │   ├── hpa.yaml 102 | │   ├── ingress.yaml 103 | │   ├── service.yaml 104 | │   ├── serviceaccount.yaml 105 | │   └── tests 106 | │   └── test-connection.yaml 107 | └── values.yaml 108 | 109 | 3 directories, 10 files 110 | ``` 111 | 112 | - 헬름 차트 수정하기 113 | - ` helm fetch stable/mysql ` 114 | - ` tar xvfz mysql-1.x.x.tar ` 115 | - ` tree mysql ` 116 | - ` # 내용 수정 : mysql/mysql/values.yaml 및 mysql/Chart.yaml 의 verion 등 ` 117 | - ` helm install ./mysql ` 118 | - ` helm ls ` 119 | -------------------------------------------------------------------------------- /10.devel/neo4j/README.txt: -------------------------------------------------------------------------------- 1 | # Kubernetes API 접속 2 | https://kubernetes.io/ko/docs/tasks/administer-cluster/access-cluster-api/ 3 | https://kubernetes.io/ko/docs/tasks/administer-cluster/access-cluster-api/#rest-api%EC%97%90-%EC%A7%81%EC%A0%91-%EC%A0%91%EA%B7%BC 4 | 5 | 6 | kubectl proxy --port=8080 & 7 | curl http://localhost:8080/api/ 8 | 9 | Output 10 | { 11 | "versions": [ 12 | "v1" 13 | ], 14 | "serverAddressByClientCIDRs": [ 15 | { 16 | "clientCIDR": "0.0.0.0/0", 17 | "serverAddress": "10.0.1.149:443" 18 | } 19 | ] 20 | } 21 | 22 | 23 | ============================================================================== 24 | 25 | # GraphDB - Neo4j 26 | https://neo4j.com/developer/get-started/ 27 | 28 | 29 | # 도커 이미지로 Neo4j 설치 30 | docker run -p7474:7474 -p7687:7687 -e NEO4J_AUTH=neo4j/s3cr3t neo4j 31 | # then open http://localhost:7474 to connect with Neo4j Browser 32 | 33 | 34 | # 호스트에 Neo4j 설치 35 | sudo apt update 36 | sudo apt install apt-transport-https ca-certificates curl software-properties-common 37 | curl -fsSL https://debian.neo4j.com/neotechnology.gpg.key | sudo apt-key add - 38 | sudo add-apt-repository "deb https://debian.neo4j.com stable 4.1" 39 | sudo apt install neo4j 40 | sudo systemctl enable neo4j.service 41 | sudo systemctl status neo4j.service 42 | 43 | 44 | # Neo4j 설정 45 | cypher-shell 46 | 47 | cypher-shell prompt 48 | username: neo4j 49 | password: ***** <-- neo4j 50 | Password change required 51 | new password: ******************** 52 | Connected to Neo4j 4.1.0 at neo4j://localhost:7687 as user neo4j. 53 | Type :help for a list of available commands or :exit to exit the shell. 54 | Note that Cypher queries must end with a semicolon. 55 | neo4j@neo4j> 56 | 57 | 58 | # Neo4j Remote접속 설정 59 | sudo nano /etc/neo4j/neo4j.conf 60 | 61 | . . . 62 | #***************************************************************** 63 | # Network connector configuration 64 | #***************************************************************** 65 | 66 | # With default configuration Neo4j only accepts local connections. 67 | # To accept non-local connections, uncomment this line: 68 | dbms.default_listen_address=0.0.0.0 69 | . . . 70 | 71 | 72 | cypher-shell -a 'neo4j://your_hostname:7687' 73 | 74 | neo4j@neo4j> CREATE (:Shark {name: 'Great White'}); 75 | 76 | 77 | neo4j@neo4j> CREATE 78 | neo4j@neo4j> (:Shark {name: 'Hammerhead'})-[:FRIEND]-> 79 | neo4j@neo4j> (:Shark {name: 'Sammy'})-[:FRIEND]-> 80 | neo4j@neo4j> (:Shark {name: 'Megalodon'}); 81 | 82 | neo4j@neo4j> MATCH (a:Shark),(b:Shark) 83 | neo4j@neo4j> WHERE a.name = 'Sammy' AND b.name = 'Megalodon' 84 | neo4j@neo4j> CREATE (a)-[r:ORDER { name: 'Lamniformes' }]->(b) 85 | neo4j@neo4j> RETURN type(r), r.name; 86 | 87 | Output 88 | +-------------------------+ 89 | | type(r) | r.name | 90 | +-------------------------+ 91 | | "ORDER" | "Lamniformes" | 92 | +-------------------------+ 93 | 94 | neo4j@neo4j> MATCH (a:Shark),(b:Shark) 95 | neo4j@neo4j> WHERE a.name = 'Sammy' AND b.name = 'Hammerhead' 96 | neo4j@neo4j> CREATE (a)-[r:SUPERORDER { name: 'Selachimorpha'}]->(b) 97 | neo4j@neo4j> RETURN type(r), r.name; 98 | 99 | Output 100 | +--------------------------------+ 101 | | type(r) | r.name | 102 | +--------------------------------+ 103 | | "SUPERORDER" | "Selachimorpha" | 104 | +--------------------------------+ 105 | 106 | neo4j@neo4j> MATCH (a)-[r]->(b) 107 | neo4j@neo4j> RETURN a.name,r,b.name 108 | neo4j@neo4j> ORDER BY r; 109 | 110 | Output 111 | +---------------------------------------------------------------------+ 112 | | a.name | r | b.name | 113 | +---------------------------------------------------------------------+ 114 | | "Hammerhead" | [:FRIEND] | "Sammy" | 115 | | "Sammy" | [:FRIEND] | "Megalodon" | 116 | | "Sammy" | [:ORDER {name: "Lamniformes"}] | "Megalodon" | 117 | | "Sammy" | [:SUPERORDER {name: "Selachimorpha"}] | "Hammerhead" | 118 | +---------------------------------------------------------------------+ 119 | 120 | 121 | -------------------------------------------------------------------------------- /3.real-egs/metrics/components.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: system:aggregated-metrics-reader 6 | labels: 7 | rbac.authorization.k8s.io/aggregate-to-view: "true" 8 | rbac.authorization.k8s.io/aggregate-to-edit: "true" 9 | rbac.authorization.k8s.io/aggregate-to-admin: "true" 10 | rules: 11 | - apiGroups: ["metrics.k8s.io"] 12 | resources: ["pods", "nodes"] 13 | verbs: ["get", "list", "watch"] 14 | --- 15 | apiVersion: rbac.authorization.k8s.io/v1 16 | kind: ClusterRoleBinding 17 | metadata: 18 | name: metrics-server:system:auth-delegator 19 | roleRef: 20 | apiGroup: rbac.authorization.k8s.io 21 | kind: ClusterRole 22 | name: system:auth-delegator 23 | subjects: 24 | - kind: ServiceAccount 25 | name: metrics-server 26 | namespace: kube-system 27 | --- 28 | apiVersion: rbac.authorization.k8s.io/v1 29 | kind: RoleBinding 30 | metadata: 31 | name: metrics-server-auth-reader 32 | namespace: kube-system 33 | roleRef: 34 | apiGroup: rbac.authorization.k8s.io 35 | kind: Role 36 | name: extension-apiserver-authentication-reader 37 | subjects: 38 | - kind: ServiceAccount 39 | name: metrics-server 40 | namespace: kube-system 41 | --- 42 | apiVersion: apiregistration.k8s.io/v1beta1 43 | kind: APIService 44 | metadata: 45 | name: v1beta1.metrics.k8s.io 46 | spec: 47 | service: 48 | name: metrics-server 49 | namespace: kube-system 50 | group: metrics.k8s.io 51 | version: v1beta1 52 | insecureSkipTLSVerify: true 53 | groupPriorityMinimum: 100 54 | versionPriority: 100 55 | --- 56 | apiVersion: v1 57 | kind: ServiceAccount 58 | metadata: 59 | name: metrics-server 60 | namespace: kube-system 61 | --- 62 | apiVersion: apps/v1 63 | kind: Deployment 64 | metadata: 65 | name: metrics-server 66 | namespace: kube-system 67 | labels: 68 | k8s-app: metrics-server 69 | spec: 70 | selector: 71 | matchLabels: 72 | k8s-app: metrics-server 73 | template: 74 | metadata: 75 | name: metrics-server 76 | labels: 77 | k8s-app: metrics-server 78 | spec: 79 | serviceAccountName: metrics-server 80 | volumes: 81 | # mount in tmp so we can safely use from-scratch images and/or read-only containers 82 | - name: tmp-dir 83 | emptyDir: {} 84 | containers: 85 | - name: metrics-server 86 | image: k8s.gcr.io/metrics-server/metrics-server:v0.3.7 87 | imagePullPolicy: IfNotPresent 88 | args: 89 | - --cert-dir=/tmp 90 | - --secure-port=4443 91 | - --kubelet-insecure-tls 92 | - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname 93 | ports: 94 | - name: main-port 95 | containerPort: 4443 96 | protocol: TCP 97 | securityContext: 98 | readOnlyRootFilesystem: true 99 | runAsNonRoot: true 100 | runAsUser: 1000 101 | volumeMounts: 102 | - name: tmp-dir 103 | mountPath: /tmp 104 | nodeSelector: 105 | kubernetes.io/os: linux 106 | --- 107 | apiVersion: v1 108 | kind: Service 109 | metadata: 110 | name: metrics-server 111 | namespace: kube-system 112 | labels: 113 | kubernetes.io/name: "Metrics-server" 114 | kubernetes.io/cluster-service: "true" 115 | spec: 116 | selector: 117 | k8s-app: metrics-server 118 | ports: 119 | - port: 443 120 | protocol: TCP 121 | targetPort: main-port 122 | --- 123 | apiVersion: rbac.authorization.k8s.io/v1 124 | kind: ClusterRole 125 | metadata: 126 | name: system:metrics-server 127 | rules: 128 | - apiGroups: 129 | - "" 130 | resources: 131 | - pods 132 | - nodes 133 | - nodes/stats 134 | - namespaces 135 | - configmaps 136 | verbs: 137 | - get 138 | - list 139 | - watch 140 | --- 141 | apiVersion: rbac.authorization.k8s.io/v1 142 | kind: ClusterRoleBinding 143 | metadata: 144 | name: system:metrics-server 145 | roleRef: 146 | apiGroup: rbac.authorization.k8s.io 147 | kind: ClusterRole 148 | name: system:metrics-server 149 | subjects: 150 | - kind: ServiceAccount 151 | name: metrics-server 152 | namespace: kube-system 153 | -------------------------------------------------------------------------------- /10.devel/neo4j/get_all.py: -------------------------------------------------------------------------------- 1 | # pip install neo4j 2 | # pip install kubernetes 3 | from kubernetes import client, config 4 | import json 5 | import logging 6 | from neo4j import GraphDatabase 7 | from neo4j.exceptions import ServiceUnavailable 8 | 9 | class App: 10 | 11 | def __init__(self, uri, user, password): 12 | self.driver = GraphDatabase.driver(uri, auth=(user, password)) 13 | 14 | def close(self): 15 | # Don't forget to close the driver connection when you are finished with it 16 | self.driver.close() 17 | 18 | def create_node(self, object_name, node_name): 19 | with self.driver.session() as session: 20 | session.write_transaction(self._create_node, object_name, node_name) 21 | 22 | @staticmethod 23 | def _create_node(tx, object_name, node_name): 24 | if object_name == "service": 25 | query = ("MERGE (:service { name: $node_name }) ") 26 | elif object_name == "deployment": 27 | query = ("MERGE (:deployment { name: $node_name }) ") 28 | elif object_name == "replicaset": 29 | query = ("MERGE (:replicaret { name: $node_name }) ") 30 | elif object_name == "pod": 31 | query = ("MERGE (:pod { name: $node_name }) ") 32 | if object_name != "": 33 | tx.run(query, object_name=object_name, node_name=node_name) 34 | 35 | def create_relation(self, first_node, second_node): 36 | with self.driver.session() as session: 37 | session.write_transaction(self._create_relation, first_node, second_node) 38 | 39 | @staticmethod 40 | def _create_relation(tx, first_node, second_node): 41 | query = ( 42 | "MATCH (a), (b) " 43 | "WHERE a.name = $first_node and b.name = $second_node " 44 | "MERGE (a)-[:relation_to]->(b) " 45 | ) 46 | tx.run(query, first_node=first_node, second_node=second_node) 47 | 48 | 49 | def main(app): 50 | # kubectl describe secret default-token-xxxxx -n kube-public 51 | # kubectl describe secret neo4j-sa-token-xxxxx 52 | # aToken = "(token from 'kubectl describe secret neo4j-sa-token-xxxxx')" 53 | aConfiguration = client.Configuration() 54 | # K8s 55 | # aConfiguration.host = "https://127.0.0.1:6443" 56 | # MiniKube with proxy 57 | aConfiguration.host = "http://127.0.0.1:8080" 58 | 59 | aConfiguration.verify_ssl = False 60 | aConfiguration.api_key = {"authorization": "Bearer " + aToken} 61 | aApiClient = client.ApiClient(aConfiguration) 62 | 63 | v1 = client.CoreV1Api(aApiClient) 64 | v2 = client.AppsV1Api(aApiClient) 65 | v4 = v1.list_service_for_all_namespaces(watch=False) 66 | v5 = v2.list_deployment_for_all_namespaces(watch=False) 67 | v6 = v2.list_replica_set_for_all_namespaces(watch=False) 68 | 69 | service_dict = {} 70 | deployment_dict = {} 71 | replica_set_dict = {} 72 | 73 | for i in v4.items: 74 | selector = i.spec.selector 75 | if selector == None: 76 | continue 77 | for key, value in selector.items(): 78 | temp = '%s=%s' % (key, value) 79 | service_dict[temp] = i 80 | for i in v5.items: 81 | selector = i.spec.selector.match_labels 82 | if selector == None: 83 | continue 84 | for key, value in selector.items(): 85 | temp = '%s=%s' % (key, value) 86 | deployment_dict[temp] = i 87 | for i in v6.items: 88 | selector = i.spec.selector.match_labels 89 | if selector == None: 90 | continue 91 | for key, value in selector.items(): 92 | temp = '%s=%s' % (key, value) 93 | replica_set_dict[temp] = i 94 | 95 | v7 = v1.list_namespaced_pod("default") 96 | 97 | for i in v7.items: 98 | labels = "" 99 | for key, value in i.metadata.labels.items(): 100 | labels = '%s=%s' % (key, value) 101 | break 102 | app.create_node('pod', i.metadata.name) 103 | if labels in service_dict.keys(): 104 | app.create_node('service', service_dict[labels].metadata.name) 105 | app.create_relation(service_dict[labels].metadata.name, i.metadata.name) 106 | if labels in deployment_dict.keys(): 107 | app.create_node('deployment', deployment_dict[labels].metadata.name) 108 | app.create_node('replicaset', replica_set_dict[labels].metadata.name) 109 | app.create_relation(deployment_dict[labels].metadata.name, replica_set_dict[labels].metadata.name) 110 | app.create_relation(replica_set_dict[labels].metadata.name, i.metadata.name) 111 | 112 | 113 | if __name__ == '__main__': 114 | scheme = "neo4j" 115 | host_name = "127.0.0.1" 116 | port = 7687 117 | url = "{scheme}://{host_name}:{port}".format(scheme=scheme, host_name=host_name, port=port) 118 | user = "neo4j" 119 | password = "" 120 | app = App(url, user, password) 121 | main(app) 122 | app.close() 123 | -------------------------------------------------------------------------------- /3.real-egs/prometheus/README: -------------------------------------------------------------------------------- 1 | # 수동 설치 2 | 3 | git clone https://github.com/kubernetes/charts 4 | cd charts/stable 5 | 6 | vi values.yaml 7 | 119번째줄 type: ClusterIP -> NodePort 8 | 포트 원하면 아래 targetPort:3000 아래에 nodePort:30007 써도 됨 9 | 10 | 254번째줄 adminUser: admin, adminPassword: ****** 11 | 암호 설정하고 설치... 12 | 13 | helm install grafana --create-namespace --namespace grafana stable/grafana -f values.yaml 14 | 15 | helm delete grfana --namespace grafana stable/grfana 16 | 17 | 18 | # 설치후 접속 19 | nodeport 위에 설정한것으로 접속해서... 20 | data sources -> add data source -> prometheus 선택하고 주소 입력... 21 | http://prometheus-server.prometheus.svc.cluster.local 22 | 23 | 내 상황에서는... 24 | http://monitor-prometheus-server.monitor.svc.cluster.local 25 | 추가하고... 26 | + Create 아래에서 Import 에서... 템플릿 추가하기 27 | 28 | https://grafana.com/grafana/dashboards 가면 다양한 탬플릿 있음.. 29 | Name 에다가 kubernetes 선택하고 아래아래아래 필터에서 Downloads 선택하면 많이 다운로드 하는것 있음.. 30 | 31 | 그거 골라다 보고 ID 값을 고르면 됨... 32 | 11454 이런 ID 값... 33 | 34 | 가장 많이 사용하는 디폴트는 1621 대시보드... 35 | 36 | 37 | 38 | =============================================== 39 | 40 | # 정보 가져오기.. 41 | 42 | # 모든 node 에는 kubelet 이 설치되어 있고, 그 kubelet 에는 cadvisor 를 갖고 있음. 43 | 44 | # 1. 메뉴얼로 키를 사용하여 정보 가져오기 45 | 46 | sudo curl https://localhost:10250/metrics -k --cert /etc/kubernetes/pki/apiserver-kubelet-client.crt --key /etc/kubernetes/pki/apiserver-kubelet-client.key 47 | 48 | sudo curl https://localhost:10250/stats/summary -k cert /etc/kubernetes/pki/apiserver-kubelet-client.crt --key /etc/kubernetes/pki/apiserver-kubelet-client.key 49 | 50 | 51 | # 2. SA 를 생성하여 해당 token 으로 kubelet https 인증 52 | 53 | 참고 : https://kubernetes.io/ko/docs/concepts/cluster-administration/system-metrics/ 54 | 55 | [SA 생성 명령어] 56 | 57 | kubectl create sa kubelet-api-test 58 | kubectl create clusterrolebinding kubelet-api-test --clusterrole=system:kubelet-api-admin -- serviceaccount=default:kubelet-api-test 59 | 60 | SECRET=$(kubectl get secrets | grep kubelet-api-test | awk '{print $1}') 61 | TOKEN=$(kubectl describe secret ${SECRET} | grep -E '^token' | awk '{print $2}') 62 | 63 | echo ${TOKEN} 64 | 65 | [이제 SA 토큰을 사용한 curl] 66 | curl -Ssk --header "Authorization: Bearer ${TOKEN}" https://localhost:10250/metrics 67 | curl -Ssk --header "Authorization: Bearer ${TOKEN}" https://localhost:10250/stats/summary 68 | 69 | 70 | # 그 이후 다른 경로들... 71 | 72 | /metrics 73 | /metrics/cadvisor 74 | /metrics/resource 75 | /metrics/probes 76 | 77 | 78 | 내부 가져올수 있는 정보들... 79 | https://github.com/google/cadvisor/blob/master/docs/storage/prometheus.md 80 | 81 | 각종 네트워크 정보는 pod 내의 pause 컨테이너의 정보... 82 | 83 | TYPE container_network_receive_bytes_total counter 84 | 85 | cadvisor 에서 container_network_tcp_usage_total, udp 정보를 enable 하는 방법 86 | https://github.com/google/cadvisor/issues/2380 87 | 88 | 89 | 90 | 91 | =============================================== 92 | 93 | https://gruuuuu.github.io/cloud/l-helm-basic/# 94 | 95 | 1. 헬름 설치 96 | $ curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 > get_helm.sh 97 | $ chmod 700 get_helm.sh 98 | $ ./get_helm.sh 99 | 100 | 2. 리포 추가 101 | $ helm repo add stable https://kubernetes-charts.storage.googleapis.com/ 102 | 103 | 3. 차트 목록 출력 104 | $ helm search repo stable 105 | 106 | 4. 차트 업데이트 107 | $ helm repo update 108 | 109 | 5. 프로메테우스 설치 110 | $ helm install monitor stable/prometheus 111 | 112 | 6. 확인 113 | $ kubectl get pod 114 | 115 | 몇개는 Pending 상태... k8s클러스터에 StorageClass가 정의되어 있지 않기 때문.. 그래서 일단 pv 를 false 로 emptydir 사용하도록 116 | 117 | 6-1. yaml 로 해결 118 | $ helm inspect values stable/prometheus 119 | 120 | persistentVolume: 121 | enabled: true 122 | 123 | 되어 있는 부분 확인하고... 124 | 125 | vim volumeF.yaml 로 새로 하나 만들기 126 | alertmanager: 127 | persistentVolume: 128 | enabled: false 129 | server: 130 | persistentVolume: 131 | enabled: false 132 | pushgateway: 133 | persistentVolume: 134 | enabled: false 135 | 136 | $ helm upgrade -f volumeF.yaml monitor stable/prometheus 137 | 138 | 6-2. 커맨드로 해결... 139 | 140 | helm install monitor stable/prometheus --set alertmanager.persistentVolume.enabled=false --set server.persistentVolume.enabled=false --set pushgateway.persistentVolume.enabled=false 141 | 142 | 7. 웹으로 접속 143 | $ kubectl get svc 144 | 145 | 근데 monitor-prometheus-server 가 clusterip 라서 접속할곳이 없으니... 이걸 수정해서 nodeport 로 변경 146 | 147 | $ kubectl edit svc monitor-prometheus-server (맨 아래쪽 쯤 spec.type 을 변경) 148 | 149 | $ kubectl get svc 150 | 다시 해보면 NodePort 로 바껴서 포트 포워딩 된 포트가 보임 151 | 이제 ip:port(31557)/graph 로 접속 152 | 153 | 154 | 155 | ================================== 156 | 157 | grafana 설치하기 158 | 159 | https://dev.to/reoring/deploy-prometheus-grafana-to-kubernetes-by-helm-3-1485 160 | 161 | 1. 리포 추가 (상동) 162 | $ helm repo add stable https://kubernetes-charts.storage.googleapis.com 163 | 164 | 2. Prom-Operator 설치 165 | $ helm install my-prometheus-operator stable/prometheus-operator 166 | 167 | 3. Show Pods 168 | $ kubectl --namespace default get pods -l "release=my-prometheus-operator" 169 | 170 | 4. Show Grafana UI 171 | $ kubectl port-forward $(kubectl get pods --selector=app=grafana --output=jsonpath="{.items..metadata.name}") 3000 172 | 173 | 끝나면 브라우저에서 열기 http://localhost:3000/ 174 | 계정 정보는 admin/prom-operator 175 | 176 | 177 | -------------------------------------------------------------------------------- /3.real-egs/networking/kube-flannel.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: policy/v1beta1 3 | kind: PodSecurityPolicy 4 | metadata: 5 | name: psp.flannel.unprivileged 6 | annotations: 7 | seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default 8 | seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default 9 | apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default 10 | apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default 11 | spec: 12 | privileged: false 13 | volumes: 14 | - configMap 15 | - secret 16 | - emptyDir 17 | - hostPath 18 | allowedHostPaths: 19 | - pathPrefix: "/etc/cni/net.d" 20 | - pathPrefix: "/etc/kube-flannel" 21 | - pathPrefix: "/run/flannel" 22 | readOnlyRootFilesystem: false 23 | # Users and groups 24 | runAsUser: 25 | rule: RunAsAny 26 | supplementalGroups: 27 | rule: RunAsAny 28 | fsGroup: 29 | rule: RunAsAny 30 | # Privilege Escalation 31 | allowPrivilegeEscalation: false 32 | defaultAllowPrivilegeEscalation: false 33 | # Capabilities 34 | allowedCapabilities: ['NET_ADMIN', 'NET_RAW'] 35 | defaultAddCapabilities: [] 36 | requiredDropCapabilities: [] 37 | # Host namespaces 38 | hostPID: false 39 | hostIPC: false 40 | hostNetwork: true 41 | hostPorts: 42 | - min: 0 43 | max: 65535 44 | # SELinux 45 | seLinux: 46 | # SELinux is unused in CaaSP 47 | rule: 'RunAsAny' 48 | --- 49 | kind: ClusterRole 50 | apiVersion: rbac.authorization.k8s.io/v1 51 | metadata: 52 | name: flannel 53 | rules: 54 | - apiGroups: ['extensions'] 55 | resources: ['podsecuritypolicies'] 56 | verbs: ['use'] 57 | resourceNames: ['psp.flannel.unprivileged'] 58 | - apiGroups: 59 | - "" 60 | resources: 61 | - pods 62 | verbs: 63 | - get 64 | - apiGroups: 65 | - "" 66 | resources: 67 | - nodes 68 | verbs: 69 | - list 70 | - watch 71 | - apiGroups: 72 | - "" 73 | resources: 74 | - nodes/status 75 | verbs: 76 | - patch 77 | --- 78 | kind: ClusterRoleBinding 79 | apiVersion: rbac.authorization.k8s.io/v1 80 | metadata: 81 | name: flannel 82 | roleRef: 83 | apiGroup: rbac.authorization.k8s.io 84 | kind: ClusterRole 85 | name: flannel 86 | subjects: 87 | - kind: ServiceAccount 88 | name: flannel 89 | namespace: kube-system 90 | --- 91 | apiVersion: v1 92 | kind: ServiceAccount 93 | metadata: 94 | name: flannel 95 | namespace: kube-system 96 | --- 97 | kind: ConfigMap 98 | apiVersion: v1 99 | metadata: 100 | name: kube-flannel-cfg 101 | namespace: kube-system 102 | labels: 103 | tier: node 104 | app: flannel 105 | data: 106 | cni-conf.json: | 107 | { 108 | "name": "cbr0", 109 | "cniVersion": "0.3.1", 110 | "plugins": [ 111 | { 112 | "type": "flannel", 113 | "delegate": { 114 | "hairpinMode": true, 115 | "isDefaultGateway": true 116 | } 117 | }, 118 | { 119 | "type": "portmap", 120 | "capabilities": { 121 | "portMappings": true 122 | } 123 | } 124 | ] 125 | } 126 | net-conf.json: | 127 | { 128 | "Network": "10.244.0.0/16", 129 | "Backend": { 130 | "Type": "vxlan" 131 | } 132 | } 133 | --- 134 | apiVersion: apps/v1 135 | kind: DaemonSet 136 | metadata: 137 | name: kube-flannel-ds 138 | namespace: kube-system 139 | labels: 140 | tier: node 141 | app: flannel 142 | spec: 143 | selector: 144 | matchLabels: 145 | app: flannel 146 | template: 147 | metadata: 148 | labels: 149 | tier: node 150 | app: flannel 151 | spec: 152 | affinity: 153 | nodeAffinity: 154 | requiredDuringSchedulingIgnoredDuringExecution: 155 | nodeSelectorTerms: 156 | - matchExpressions: 157 | - key: kubernetes.io/os 158 | operator: In 159 | values: 160 | - linux 161 | hostNetwork: true 162 | priorityClassName: system-node-critical 163 | tolerations: 164 | - operator: Exists 165 | effect: NoSchedule 166 | serviceAccountName: flannel 167 | initContainers: 168 | - name: install-cni 169 | image: quay.io/coreos/flannel:v0.13.0 170 | command: 171 | - cp 172 | args: 173 | - -f 174 | - /etc/kube-flannel/cni-conf.json 175 | - /etc/cni/net.d/10-flannel.conflist 176 | volumeMounts: 177 | - name: cni 178 | mountPath: /etc/cni/net.d 179 | - name: flannel-cfg 180 | mountPath: /etc/kube-flannel/ 181 | containers: 182 | - name: kube-flannel 183 | image: quay.io/coreos/flannel:v0.13.0 184 | command: 185 | - /opt/bin/flanneld 186 | args: 187 | - --ip-masq 188 | - --kube-subnet-mgr 189 | resources: 190 | requests: 191 | cpu: "100m" 192 | memory: "50Mi" 193 | limits: 194 | cpu: "100m" 195 | memory: "50Mi" 196 | securityContext: 197 | privileged: false 198 | capabilities: 199 | add: ["NET_ADMIN", "NET_RAW"] 200 | env: 201 | - name: POD_NAME 202 | valueFrom: 203 | fieldRef: 204 | fieldPath: metadata.name 205 | - name: POD_NAMESPACE 206 | valueFrom: 207 | fieldRef: 208 | fieldPath: metadata.namespace 209 | volumeMounts: 210 | - name: run 211 | mountPath: /run/flannel 212 | - name: flannel-cfg 213 | mountPath: /etc/kube-flannel/ 214 | volumes: 215 | - name: run 216 | hostPath: 217 | path: /run/flannel 218 | - name: cni 219 | hostPath: 220 | path: /etc/cni/net.d 221 | - name: flannel-cfg 222 | configMap: 223 | name: kube-flannel-cfg 224 | -------------------------------------------------------------------------------- /3.real-egs/metallb/metallb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1beta1 2 | kind: PodSecurityPolicy 3 | metadata: 4 | labels: 5 | app: metallb 6 | name: controller 7 | namespace: metallb-system 8 | spec: 9 | allowPrivilegeEscalation: false 10 | allowedCapabilities: [] 11 | allowedHostPaths: [] 12 | defaultAddCapabilities: [] 13 | defaultAllowPrivilegeEscalation: false 14 | fsGroup: 15 | ranges: 16 | - max: 65535 17 | min: 1 18 | rule: MustRunAs 19 | hostIPC: false 20 | hostNetwork: false 21 | hostPID: false 22 | privileged: false 23 | readOnlyRootFilesystem: true 24 | requiredDropCapabilities: 25 | - ALL 26 | runAsUser: 27 | ranges: 28 | - max: 65535 29 | min: 1 30 | rule: MustRunAs 31 | seLinux: 32 | rule: RunAsAny 33 | supplementalGroups: 34 | ranges: 35 | - max: 65535 36 | min: 1 37 | rule: MustRunAs 38 | volumes: 39 | - configMap 40 | - secret 41 | - emptyDir 42 | --- 43 | apiVersion: policy/v1beta1 44 | kind: PodSecurityPolicy 45 | metadata: 46 | labels: 47 | app: metallb 48 | name: speaker 49 | namespace: metallb-system 50 | spec: 51 | allowPrivilegeEscalation: false 52 | allowedCapabilities: 53 | - NET_ADMIN 54 | - NET_RAW 55 | - SYS_ADMIN 56 | allowedHostPaths: [] 57 | defaultAddCapabilities: [] 58 | defaultAllowPrivilegeEscalation: false 59 | fsGroup: 60 | rule: RunAsAny 61 | hostIPC: false 62 | hostNetwork: true 63 | hostPID: false 64 | hostPorts: 65 | - max: 7472 66 | min: 7472 67 | privileged: true 68 | readOnlyRootFilesystem: true 69 | requiredDropCapabilities: 70 | - ALL 71 | runAsUser: 72 | rule: RunAsAny 73 | seLinux: 74 | rule: RunAsAny 75 | supplementalGroups: 76 | rule: RunAsAny 77 | volumes: 78 | - configMap 79 | - secret 80 | - emptyDir 81 | --- 82 | apiVersion: v1 83 | kind: ServiceAccount 84 | metadata: 85 | labels: 86 | app: metallb 87 | name: controller 88 | namespace: metallb-system 89 | --- 90 | apiVersion: v1 91 | kind: ServiceAccount 92 | metadata: 93 | labels: 94 | app: metallb 95 | name: speaker 96 | namespace: metallb-system 97 | --- 98 | apiVersion: rbac.authorization.k8s.io/v1 99 | kind: ClusterRole 100 | metadata: 101 | labels: 102 | app: metallb 103 | name: metallb-system:controller 104 | rules: 105 | - apiGroups: 106 | - '' 107 | resources: 108 | - services 109 | verbs: 110 | - get 111 | - list 112 | - watch 113 | - update 114 | - apiGroups: 115 | - '' 116 | resources: 117 | - services/status 118 | verbs: 119 | - update 120 | - apiGroups: 121 | - '' 122 | resources: 123 | - events 124 | verbs: 125 | - create 126 | - patch 127 | - apiGroups: 128 | - policy 129 | resourceNames: 130 | - controller 131 | resources: 132 | - podsecuritypolicies 133 | verbs: 134 | - use 135 | --- 136 | apiVersion: rbac.authorization.k8s.io/v1 137 | kind: ClusterRole 138 | metadata: 139 | labels: 140 | app: metallb 141 | name: metallb-system:speaker 142 | rules: 143 | - apiGroups: 144 | - '' 145 | resources: 146 | - services 147 | - endpoints 148 | - nodes 149 | verbs: 150 | - get 151 | - list 152 | - watch 153 | - apiGroups: 154 | - '' 155 | resources: 156 | - events 157 | verbs: 158 | - create 159 | - patch 160 | - apiGroups: 161 | - policy 162 | resourceNames: 163 | - speaker 164 | resources: 165 | - podsecuritypolicies 166 | verbs: 167 | - use 168 | --- 169 | apiVersion: rbac.authorization.k8s.io/v1 170 | kind: Role 171 | metadata: 172 | labels: 173 | app: metallb 174 | name: config-watcher 175 | namespace: metallb-system 176 | rules: 177 | - apiGroups: 178 | - '' 179 | resources: 180 | - configmaps 181 | verbs: 182 | - get 183 | - list 184 | - watch 185 | --- 186 | apiVersion: rbac.authorization.k8s.io/v1 187 | kind: Role 188 | metadata: 189 | labels: 190 | app: metallb 191 | name: pod-lister 192 | namespace: metallb-system 193 | rules: 194 | - apiGroups: 195 | - '' 196 | resources: 197 | - pods 198 | verbs: 199 | - list 200 | --- 201 | apiVersion: rbac.authorization.k8s.io/v1 202 | kind: ClusterRoleBinding 203 | metadata: 204 | labels: 205 | app: metallb 206 | name: metallb-system:controller 207 | roleRef: 208 | apiGroup: rbac.authorization.k8s.io 209 | kind: ClusterRole 210 | name: metallb-system:controller 211 | subjects: 212 | - kind: ServiceAccount 213 | name: controller 214 | namespace: metallb-system 215 | --- 216 | apiVersion: rbac.authorization.k8s.io/v1 217 | kind: ClusterRoleBinding 218 | metadata: 219 | labels: 220 | app: metallb 221 | name: metallb-system:speaker 222 | roleRef: 223 | apiGroup: rbac.authorization.k8s.io 224 | kind: ClusterRole 225 | name: metallb-system:speaker 226 | subjects: 227 | - kind: ServiceAccount 228 | name: speaker 229 | namespace: metallb-system 230 | --- 231 | apiVersion: rbac.authorization.k8s.io/v1 232 | kind: RoleBinding 233 | metadata: 234 | labels: 235 | app: metallb 236 | name: config-watcher 237 | namespace: metallb-system 238 | roleRef: 239 | apiGroup: rbac.authorization.k8s.io 240 | kind: Role 241 | name: config-watcher 242 | subjects: 243 | - kind: ServiceAccount 244 | name: controller 245 | - kind: ServiceAccount 246 | name: speaker 247 | --- 248 | apiVersion: rbac.authorization.k8s.io/v1 249 | kind: RoleBinding 250 | metadata: 251 | labels: 252 | app: metallb 253 | name: pod-lister 254 | namespace: metallb-system 255 | roleRef: 256 | apiGroup: rbac.authorization.k8s.io 257 | kind: Role 258 | name: pod-lister 259 | subjects: 260 | - kind: ServiceAccount 261 | name: speaker 262 | --- 263 | apiVersion: apps/v1 264 | kind: DaemonSet 265 | metadata: 266 | labels: 267 | app: metallb 268 | component: speaker 269 | name: speaker 270 | namespace: metallb-system 271 | spec: 272 | selector: 273 | matchLabels: 274 | app: metallb 275 | component: speaker 276 | template: 277 | metadata: 278 | annotations: 279 | prometheus.io/port: '7472' 280 | prometheus.io/scrape: 'true' 281 | labels: 282 | app: metallb 283 | component: speaker 284 | spec: 285 | containers: 286 | - args: 287 | - --port=7472 288 | - --config=config 289 | env: 290 | - name: METALLB_NODE_NAME 291 | valueFrom: 292 | fieldRef: 293 | fieldPath: spec.nodeName 294 | - name: METALLB_HOST 295 | valueFrom: 296 | fieldRef: 297 | fieldPath: status.hostIP 298 | - name: METALLB_ML_BIND_ADDR 299 | valueFrom: 300 | fieldRef: 301 | fieldPath: status.podIP 302 | - name: METALLB_ML_LABELS 303 | value: "app=metallb,component=speaker" 304 | - name: METALLB_ML_NAMESPACE 305 | valueFrom: 306 | fieldRef: 307 | fieldPath: metadata.namespace 308 | - name: METALLB_ML_SECRET_KEY 309 | valueFrom: 310 | secretKeyRef: 311 | name: memberlist 312 | key: secretkey 313 | image: metallb/speaker:v0.9.3 314 | imagePullPolicy: Always 315 | name: speaker 316 | ports: 317 | - containerPort: 7472 318 | name: monitoring 319 | resources: 320 | limits: 321 | cpu: 100m 322 | memory: 100Mi 323 | securityContext: 324 | allowPrivilegeEscalation: false 325 | capabilities: 326 | add: 327 | - NET_ADMIN 328 | - NET_RAW 329 | - SYS_ADMIN 330 | drop: 331 | - ALL 332 | readOnlyRootFilesystem: true 333 | hostNetwork: true 334 | nodeSelector: 335 | beta.kubernetes.io/os: linux 336 | serviceAccountName: speaker 337 | terminationGracePeriodSeconds: 2 338 | tolerations: 339 | - effect: NoSchedule 340 | key: node-role.kubernetes.io/master 341 | --- 342 | apiVersion: apps/v1 343 | kind: Deployment 344 | metadata: 345 | labels: 346 | app: metallb 347 | component: controller 348 | name: controller 349 | namespace: metallb-system 350 | spec: 351 | revisionHistoryLimit: 3 352 | selector: 353 | matchLabels: 354 | app: metallb 355 | component: controller 356 | template: 357 | metadata: 358 | annotations: 359 | prometheus.io/port: '7472' 360 | prometheus.io/scrape: 'true' 361 | labels: 362 | app: metallb 363 | component: controller 364 | spec: 365 | containers: 366 | - args: 367 | - --port=7472 368 | - --config=config 369 | image: metallb/controller:v0.9.3 370 | imagePullPolicy: Always 371 | name: controller 372 | ports: 373 | - containerPort: 7472 374 | name: monitoring 375 | resources: 376 | limits: 377 | cpu: 100m 378 | memory: 100Mi 379 | securityContext: 380 | allowPrivilegeEscalation: false 381 | capabilities: 382 | drop: 383 | - all 384 | readOnlyRootFilesystem: true 385 | nodeSelector: 386 | beta.kubernetes.io/os: linux 387 | securityContext: 388 | runAsNonRoot: true 389 | runAsUser: 65534 390 | serviceAccountName: controller 391 | terminationGracePeriodSeconds: 0 392 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 개요 2 | ## 쿠버네티스란? 3 | - (생략) 수업 강의자료 참조 4 | - 또는 공식 홈페이지 참조 5 | - https://kubernetes.io/ko/docs/home/ 6 | 7 | ## 쿠버네티스 유형 8 | - Kubernetes (K8s) Original 9 | - Minikube : Single node K8s cluster inside a VM (or docker or host) 10 | - MicroK8s : Low-ops, light-weight, minimal production K8s (single/multi nodes) 11 | 12 | 13 | # 쿠버네티스 설치 14 | ## K8s original 15 | - (생략) 16 | 17 | ## Minikube 18 | - 공식가이드 19 | - https://v1-18.docs.kubernetes.io/ko/docs/tasks/tools/install-minikube/ 20 | - 수동설치 (바이너리) 21 | - 주의 : PC환경에 따라 아키텍처 다른 것 받아야 함 22 | - x86-64 기준 23 | ```bash 24 | 1.18.0 특정 버전 25 | > curl -Lo minikube https://storage.googleapis.com/minikube/releases/v1.18.0/minikube-linux-amd64 26 | 27 | 최신 버전 (v1.22.0 등) 28 | > curl -Lo minikube https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 && chmod +x minikube 29 | 30 | > sudo mkdir -p /usr/local/bin/ 31 | > sudo install minikube /usr/local/bin/ 32 | ``` 33 | - MAC 기준 34 | ```bash 35 | > curl -Lo minikube https://storage.googleapis.com/minikube/releases/latest/minikube-darwin-amd64 && chmod +x minikube 36 | ``` 37 | - 실행 38 | - 시작 (도커 설치 시, 기본 드라이버 = docker, 그 외 none, qemu, ssh 등) 39 | - ` minikube start --driver= ` 40 | - 고급 옵션 41 | - 메모리 증설 후 시작 42 | ```bash 43 | minikube stop 44 | minikube start --cpus 2 --memory 4096 45 | (또는 시작 전 설정파일 생성) minikube config set memory 4096 46 | ``` 47 | - 멀티 노트로 시작 48 | ```bash 49 | minikube start --nodes 2 -p multinode 50 | minikube status -p multinode 51 | (-p 옵션은 프로파일 신규 생성으로 옵셔널 필드임) 52 | ``` 53 | - 다른 사용자 계정으로부터 현재 노드에 접속 54 | ```bash 55 | minikube kubectl -- config view --flatten > minikube-config 56 | # 클라이언트 노드 57 | mkdir -p /home/$USER/.kube 58 | cp minikube-config /home/$USER/.kube/config 59 | ``` 60 | - 상태 61 | - ` minikube status ` 62 | - 중지(종료) 63 | - ` minikube stop ` 64 | - 삭제 65 | - ` minikube delete ` 66 | - 내부 설정 확인 (minikube 컨테이너) 67 | ```bash 68 | > minikube kubectl get nodes 69 | > docker ps 70 | > minikube ssh 71 | > docker ps 72 | ``` 73 | - 설치환경 참고사항 74 | - 리눅스에 설치 시 : 호스트와 동일한 레벨 75 | - MAC에 설치 시 : HyperKit 아래 FreeBSD 에 설치 76 | - 윈도우에 설치 시 : HyperV 아래 Ubuntu 에 설치 (WSL의 경우) - 또는 VirtualBox 아래 우분투 77 | 78 | ### 확장팩 설치 79 | - Addons 를 통한 추가 기능 활성화 80 | - 목록 확인 : ` minikube addons list ` 81 | - 추가 활성화1 : ` minikube addons enable dashboard ` 82 | - 추가 활성화2 : ` minikube addons enable metrics-server ` 83 | - 비활성화 : ` minikube addons disable metrics-server ` 84 | - 대시보드 접속 : ` minikube dashboard --url ` 85 | 86 | ## Microk8s 87 | - 공식가이드 88 | - https://microk8s.io/docs 89 | 90 | 91 | # 쿠버네티스 명령어 92 | ## kubectl 설치 93 | ### 우부투 snap 방식 94 | - 설치 명령어 95 | - ` snap install kubectl ` 96 | 97 | ### apt 패키지를 통한 설치 방식 98 | - 공식 사이트 99 | - https://kubernetes.io/ko/docs/tasks/tools/install-kubectl-linux/ 100 | - 패키지 관리자를 통한 설치 101 | 1. apt 패키지 색인을 업데이트하고 쿠버네티스 apt 리포지터리를 사용하는 데 필요한 패키지들을 설치한다. 102 | - ` sudo apt-get update ` 103 | - ` sudo apt-get install -y apt-transport-https ca-certificates curl ` 104 | 2. 구글 클라우드 공개 사이닝 키를 다운로드한다. 105 | - ` sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg ` 106 | 3. 쿠버네티스 apt 리포지터리를 추가한다. 107 | - ` echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list ` 108 | 4. 새 리포지터리의 apt 패키지 색인을 업데이트하고 kubectl을 설치한다. 109 | - ` sudo apt-get update ` 110 | - ` sudo apt-get install -y kubectl ` 111 | 112 | ### 수동설치 (바이너리) 방식 113 | - 설치 명령어 (최신버전) 114 | - ```bash 115 | > curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" 116 | > curl -LO "https://dl.k8s.io/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl.sha256" 117 | > echo "$( sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl 119 | ``` 120 | - 설치 명령어 (특정버전 - OLD) 121 | - ```bash 122 | > curl -LO https://dl.k8s.io/release/v1.20.0/bin/linux/amd64/kubectl 123 | > curl -LO https://dl.k8s.io/release/v1.22.0/bin/linux/amd64/kubectl 124 | > curl -LO https://dl.k8s.io/release/v1.26.0/bin/linux/amd64/kubectl 125 | > sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl 126 | ``` 127 | 128 | ## 설치관련 각종 이슈 129 | - 쿠버네티스 (및 minikube) 는 지속적으로 버전이 올라가고 있으며, 다양한 이슈들이 해결되고는 있음. 매우 빠르게 변화하는 소프트웨어 중 하나임. 130 | - 따라서, 버전에 따른 다양한 동작 차이가 발생 할 수 있으며, 커널 버전 (리눅스/우분투) 등과도 미스매치가 있으면 이슈가 발생 할 수 있으며, minikube 버전과 kubectl 버전이 너무 차이가 많이 있을 경우에도 각종 문제가 발생 할 수 있음. (따라서 실습 환경을 해당 시점에서 잘 맞추어야 함) 131 | - kubectl get pods -A 통해서 모든 서비스 데몬 정상인지 확인 (crashLoopBackOff 없어야 함) 132 | - 예) Minikube v1.18 과 Ubuntu 16.04 는 큰 이슈가 없었으나, Ubuntu 20.04 에서는 kernel 버전이 올라가며 nf_conntrack_max 설정 관련 이슈가 있음. 133 | - 임시 방편으로는 sudo sysctl net/netfilter/nf_conntrack_max=131072 로 설정해두고 minikube 를 실행하여 해결 (부팅시 자동 적용은 /etc/sysctl.conf), 134 | - 또는 kubectl edit configmap kube-proxy --namespace=kube-system 를 통해 maxPerCore 를 null -> 0 으로 변경해서 해결, 135 | - 또는 최신 버전의 minikube 로 업그레이드 해야 한다. 136 | 137 | ## kubectl 사용법 (명령어) 138 | - https://kubernetes.io/ko/docs/reference/kubectl/cheatsheet/ 139 | 140 | ### kubectl 명령어 (기초) 141 | - 명령어(command) 유형 142 | - 정보 조회 : ` kubectl get ` 143 | - 생성 : ` kubectl create ` 144 | - 생성(pod) : ` kubectl run ` 145 | - 생성(service) : ` kubectl expose ` 146 | - 로그 : ` kubectl logs ` 147 | - 삭제 : ` kubectl delete ` 148 | - 컨트롤러 유형 149 | - 파드(pod) : ` pods = po ` 150 | - ` kubectl get pods ` 151 | - ` kubectl get po ` 152 | - 리플리카셋(replicaset) : ` replicatsets = rs ` 153 | - ` kubectl get replicasets ` 154 | - ` kubectl get rs ` 155 | - 디플로이먼트(deployment) : ` deployment = deploy ` 156 | - ` kubectl get deployments ` 157 | - ` kubectl get deploy ` 158 | - 서비스 유형 159 | - 서비스(service) : ` service = svc ` 160 | - ` kubectl get services ` 161 | - ` kubectl get svc ` 162 | - 네임스페이스 163 | - 네임스페이스(namespace) : ` namespace = ns ` 164 | - 다수의 서비스 조회 165 | - ` kubectl get deploy,rc,pods ` 166 | 167 | ### kubectl 명령어 (일반) 168 | - 컨트롤러 유형 169 | - 스테이트풀셋(statefulset) : ` statefulset = sts ` 170 | - 데몬셋(daemonset) : ` daemonset = ds ` 171 | - 잡(job) : ` job ` 172 | - 크론(cronjab) : ` cronjob ` 173 | - 서비스 유형 174 | - 엔드포인트(endpoint) : ` endpoint = ep ` 175 | - 인그레스(ingress) : ` ingress ` 176 | - 볼륨 유형 177 | - 볼륨(persistentvolume) : ` persistentvolume = pv ` 178 | - 볼륨요청(persistentvolumeclaim) : `pesistentvolumeclaim = pvc ` 179 | - 스토리지(storageclass) : ` storageclass ` 180 | - 설정 181 | - 설정(configmap) : ` configmap = cm ` 182 | - 시크릿(secret) : ` secret ` 183 | 184 | ### kubectl 디버깅 185 | - 이벤트 상태 로그 : ` kubectl get events ` 186 | 187 | ### 쿠버네티스 환경 기본 확인 188 | - 클러스터 확인 189 | - ` kubectl cluster-info ` 190 | - 설정 확인 (멀티 클러스터 사용 시) 191 | - ` kubectl config view ` 192 | - https://kubernetes.io/ko/docs/concepts/configuration/organize-cluster-access-kubeconfig/ 193 | 194 | ## 쿠버네티스 활용/운영 195 | - https://kubernetes.io/docs/tutorials/hello-minikube/ 196 | 197 | ### 헬로월드 198 | - 기본 서버 배포 (맥북 M1에서는 해당 컨테이너 이미지가 실행되지 않음 - 이미지 빌드 아키텍처 차이) 199 | - ` kubectl create deployment hello-minikube --image=k8s.gcr.io/echoserver:1.10 ` 200 | - ` kubectl expose deployment hello-minikube --type=NodePort --port=8080 ` 201 | 202 | - 배포된 내용 확인 203 | - 기본 배포 컨테이너 확인 204 | - ` kubectl get pod ` 205 | - 컨테이너 내에서 직접 접속 확인 206 | - ` kubectl exec hello-minikube-xxxxxxxx -- curl localhost:8080 ` 207 | - 서비스 확인 208 | - ` kubectl get svc ` 209 | - 기본 배포 컨테이너 전체 확인 210 | - ` kubectl get all ` 211 | 212 | - 서비스 접속을 위한 다양한 인터페이스 213 | 1. 서비스 접속하기 (맥북에서는 특히 유용함 - 노드의 IP가 외부에 노출되지 않음으로) 214 | - ` minikube service hello-minikube --url ` 215 | 2. 호스트 포트 포워딩 (pod) 216 | - ` kubectl port-forward hello-minikube-64b64df8c9-4rpfp 8080:8080 ` 217 | 3. 호스트 포트 포워딩 (svc) 218 | - ` kubectl port-forward svc/hello-minikube 8080:8080 ` 219 | 220 | - 배포한 서비스 모두 삭제 221 | - ` kubectl delete deploy,svc hello-minikube ` 222 | 223 | ### 헬로 노드JS #1 224 | - 컨테이너 pod 형태로 배포 및 서비스 확인 225 | ```bash 226 | > kubectl run nodejs --image=lovehyun/express-app:1.0 --port=8000 227 | pod/nodejs created 228 | 229 | > kubectl get pods 230 | NAME READY STATUS RESTARTS AGE 231 | nodejs 1/1 Running 0 24s 232 | 233 | > kubectl logs nodejs 234 | Express is ready at localhost:8000 235 | 236 | > kubectl exec nodejs -- curl 127.0.0.1:8000 --silent 237 | Hello Express 238 | 239 | > kubectl expose pod/nodejs --type=NodePort --name nodejs-svc 240 | service/nodejs-svc exposed 241 | 242 | > kubectl get svc 243 | nodejs-svcc NodePort 10.103.4.59 8080:32681/TCP 4s 244 | 245 | > minikube service nodejs-svc --url 246 | http://192.168.49.2:32681 247 | 248 | > curl 192.168.49.2:32681 249 | Hello Express 250 | 251 | > VSCode 사용 시 포트포워딩 추가 (192.168.49.2:32681 <- localhost:32681) 후 웹브라우저에서 확인 252 | ``` 253 | 254 | ### 헬로 노드JS #2 255 | - 컨테이너 deployment 형태로 배포 및 서비스 확인 256 | ```bash 257 | > kubectl create deployment nodejs --image=lovehyun/express-app:1.1 --port=8000 258 | deployment.apps/nodejs created 259 | 260 | > kubectl get deployments 261 | NAME READY UP-TO-DATE AVAILABLE AGE 262 | nodejs 1/1 1 1 7s 263 | 264 | > kubectl get replicasets 265 | NAME DESIRED CURRENT READY AGE 266 | nodejs-775cf96dc5 1 1 1 59s 267 | 268 | > kubectl get pods 269 | NAME READY STATUS RESTARTS AGE 270 | nodejs-775cf96dc5-6qp5k 1/1 Running 0 2m30s 271 | 272 | > kubectl expose deployment/nodejs --type=NodePort --name nodejs-svc 273 | service/nodejs-svc exposed 274 | 275 | > kubectl get svc 276 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 277 | nodejs-svc NodePort 10.97.135.189 8080:30518/TCP 4s 278 | 279 | > kubectl get nodes -o wide 280 | NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME 281 | minikube Ready control-plane,master 5d v1.21.2 192.168.49.2 Ubuntu 20.04.2 LTS 5.11.0-25-generic docker://20.10.7 282 | 283 | > minikube service nodejs-svc --url 284 | http://192.168.49.2:30518 285 | 286 | > curl 192.168.49.2:30518 287 |

Welcome to nodejs-775cf96dc5-6qp5k

288 | 289 | > kubectl scale deployment/nodejs --replicas=2 290 | deployment.apps/nodejs scaled 291 | 292 | > kubectl get replicasets 293 | NAME DESIRED CURRENT READY AGE 294 | nodejs-775cf96dc5 2 2 2 4m9s 295 | 296 | > kubectl get pods 297 | NAME READY STATUS RESTARTS AGE 298 | nodejs-775cf96dc5-6qp5k 1/1 Running 0 4m13s 299 | nodejs-775cf96dc5-f6nwl 1/1 Running 0 14s 300 | 301 | > curl 192.168.49.2:30518 302 |

Welcome to nodejs-775cf96dc5-6qp5k

303 | 304 | > curl 192.168.49.2:30518 305 |

Welcome to nodejs-775cf96dc5-f6nwl

306 | 307 | > VSCode 사용 시 포트포워딩 추가 (192.168.49.2:30518 <- localhost:30518) 후 웹브라우저에서 확인 308 | ``` 309 | 310 | - 확인사항 (self-healing) 311 | ```bash 312 | > kubectl get po -w (watch) 313 | > kubectl delete po nodejs-xxxx 314 | 315 | > kubectl get rs -w (watch) 316 | > kubectl delete rs nodejs-xxxx 317 | ``` 318 | 319 | ### 헬로 노드JS #3 - 리소스 업데이트 (롤아웃/롤백) 320 | - 버전 변경 배포 (1.1 -> 1.2) 321 | ```bash 322 | > kubectl describe deployment/nodejs | grep Image 323 | 324 | > kubectl set image deployment/nodejs express-app=lovehyun/express-app:1.2 325 | 326 | # 현 리비전을 포함한 디플로이먼트 이력 출력 327 | > kubectl rollout history deployment/nodejs 328 | deployment.apps/nodejs 329 | REVISION CHANGE-CAUSE 330 | 1 331 | 2 332 | 333 | # 이전 디플로이먼트로 롤백 (1.2 -> 1.1) 334 | > kubectl rollout undo deployment/nodejs 335 | deployment.apps/nodejs rolled back 336 | 337 | > kubectl rollout history deployment/nodejs 338 | deployment.apps/nodejs 339 | REVISION CHANGE-CAUSE 340 | 2 341 | 3 342 | 343 | # 특정 리비전으로 롤백 (1.1 -> 1.2) 344 | > kubectl rollout undo deployment/nodejs --to-revision=2 345 | deployment.apps/nodejs rolled back 346 | 347 | > kubectl rollout history deployment/nodejs 348 | deployment.apps/nodejs 349 | REVISION CHANGE-CAUSE 350 | 3 351 | 4 kubectl delete deployment/nodejs 355 | ``` 356 | 357 | ### 헬로 노드JS #4 - 확장(오토스케일링) 358 | - HPA (HorizontalPodAutoscaler) 사용해서 확장 359 | metric-server 동작하지 않을 경우 (kubectl top pods) 360 | ```bash 361 | > minikube start --extra-config=kubelet.housekeeping-interval=10s 362 | ``` 363 | 364 | ```bash 365 | > minikube addons enable metrics-server 366 | 367 | > kubectl -n kube-system rollout status deployment metrics-server 368 | deployment "metrics-server" successfully rolled out 369 | 370 | > kubectl create deployment nodejs --image=lovehyun/express-app:latest --port=8000 371 | deployment.apps/nodejs created 372 | 373 | > kubectl expose deployment/nodejs --type=NodePort --name nodejs-svc 374 | service/nodejs-svc exposed 375 | 376 | > kubectl get svc 377 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 378 | nodejs-svc NodePort 10.97.135.189 8080:30518/TCP 4s 379 | 380 | > kubectl top pods 381 | NAME CPU(cores) MEMORY(bytes) 382 | nodejs-66c754554d-bc7vz 0m 1Mi 383 | 384 | > kubectl autoscale deployment/nodejs --cpu-percent=70 --min=1 --max=5 385 | horizontalpodautoscaler.autoscaling/nodejs autoscaled 386 | 387 | > kubectl get hpa 388 | NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE 389 | nodejs Deployment/nodejs /70% 1 5 0 7s 390 | 391 | > kubectl top pods 392 | error: Metrics not available for pod default/nodejs-66c754554d-bc7vz, age: 2m12.737349993s 393 | 394 | # 1 core = 1000m, 2 core = 2000m, recommand <100m 395 | > kubectl set resources deployment/nodejs --requests=cpu=50m --limits=cpu=50m,memory=64Mi 396 | deployment.apps/nodejs resource requirements updated 397 | 398 | # Jobs 1~3 399 | > while true; do curl 192.168.49.2:31410 --silent >/dev/null; done & 400 | # 확인 및 중지 401 | > jobs 402 | > kill %1 %2 %3 403 | 404 | > kubectl get hpa -w 405 | NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE 406 | nodejs Deployment/nodejs 0%/70% 1 10 1 19h 407 | nodejs Deployment/nodejs 14%/70% 1 10 1 19h 408 | nodejs Deployment/nodejs 85%/70% 1 10 1 19h 409 | nodejs Deployment/nodejs 92%/70% 1 10 1 19h 410 | nodejs Deployment/nodejs 61%/70% 1 10 2 19h 411 | nodejs Deployment/nodejs 45%/70% 1 10 2 19h 412 | 413 | # Clean-up (모두 삭제) 414 | > kubectl delete hpa nodejs 415 | > kubectl delete deploy/nodejs 416 | > kubectl delete svc/nodejs-svc 417 | ``` 418 | 419 | ## 메뉴얼 (한글) 420 | - https://kubernetes.io/ko/docs/home/ 421 | 422 | ## kubectl 설정 및 환경변수 423 | ### kubeconfig 환경 변수 424 | - kubeconfig 란? 425 | - ` $HOME/.kube/config ` 파일을 통해 클러스터, 인증, 컨텍스트 정보 확인 426 | - 클러스터 내 사용 가능한 자원의 목록 427 | - ` kubectl api-resources ` 428 | - 접속 가능한 컨텍스트 정보 확인 429 | - ` kubectl config get-contexts ` 430 | - ```bash 431 | CURRENT NAME CLUSTER AUTHINFO NAMESPACE 432 | * minikube minikube minikube default 433 | ``` 434 | - 원하는 컨텍스트로 변경 435 | - ` kubectl config use-context minikube ` 436 | - ` kubectl config current-context ` 437 | - ` kubectl config set-context minikube --namespace=my-namespace ` 438 | - or (버전에따라) `kubectl config set-context --current --namespace=my-nsmeapce ` 439 | - 현재 컨텍스트 보기 440 | - ` kubectl config view ` 441 | - K8s 클라우드 서비스 접속하기 442 | - Amazon EKS (AWS K8s 서비스) 접속하기 : https://docs.aws.amazon.com/ko_kr/eks/latest/userguide/create-kubeconfig.html 443 | - Google GKE (GCP K8s 서비스) 접속하기 : https://cloud.google.com/kubernetes-engine/docs/how-to/cluster-access-for-kubectl 444 | - Azure AKS (Azure K8s 서비스) 접속하기 : https://docs.microsoft.com/ko-kr/azure/aks/control-kubeconfig-access 445 | 446 | - 명령어 실행 시 한시적으로 다른 컨텍스트에 명령어 요청 447 | - ` kubectl --kubeconfig= get pods ` 448 | - bash 쉘 자동완성 449 | - ` echo 'source <(kubectl completion bash)' >> ~/.bashrc ` 450 | - 클러스터 노드들의 IP 주소 확인 451 | - ` kubectl get nodes -o wide ` 452 | - ` kubectl get nodes -o json | jq -r '.items[].status.addresses[] | select(.type=="InternalIP") | .address' ` 453 | 454 | ### kubectl 설정파일 (yaml 포멧) 455 | - https://kubernetes.io/ko/docs/concepts/overview/working-with-objects/kubernetes-objects/ 456 | - YAML 의 자료형 457 | - 주석 : # 458 | - 여러파일 구분자 : --- 459 | - 기본 템플릿 460 | - ```bash 461 | apiVersion: v1 462 | kind: Pod 463 | metadata: 464 | spec: 465 | ``` 466 | - Scalars(strings/numbers) 467 | - ```bash 468 | Name: shpark 469 | Year: 2021 470 | ``` 471 | - Sequence(arrays/lists) 472 | - ```bash 473 | MySpecs: 474 | - item1 475 | - item2 476 | - item3 477 | ``` 478 | - Mappings(hashes/dictionaries) 479 | - ```bash 480 | Score: 481 | Math: 100 482 | Eng: 90 483 | ``` 484 | 485 | ### kubectl 명령어 디버깅 486 | - 로그레벨 변경을 통한 상세 로그 확인 487 | - ` kubectl get pods --v=7 ` 488 | - v=3 : 변경 사항에 대한 확장 정보 489 | - v=4 : 디버그 수준 상세화 490 | - v=5 : 트레이스 수준 상세화 491 | - v=6 : 요청한 리소스 표시 492 | - v=7 : HTTP 요청 헤더를 표시 493 | - v=8 : HTTP 요청 내용을 표시 494 | - v=9 : HTTP 요청 내용을 생략 없이 모두 표시 495 | 496 | 497 | # 강의자료 소개 498 | ## 실습 499 | - examples 디렉토리의, 아래 개별 디렉토리 내의 README.txt 참고 500 | 501 | ### 01. pod 502 | - 파드 생성 (실제로는 이렇게 배포하지 않음(개념 학습용), 실제로는 deployment 를 사용함) 503 | - ` README.txt ` 참고 504 | 505 | ### 02. replicaset 506 | - 리플리카셋 생성 (상동) 507 | 508 | ### 03. svc 509 | - 서비스 컨트롤러 510 | - imperative 명렁어 (deploy/rs/pod 등 다양하게 expose 가능) 511 | - ```bash 512 | kubectl expose deployment nginx-app --type=NodePort 513 | kubectl get service 514 | kubectl describe service nginx-app 515 | 516 | curl localhost:31000 517 | ``` 518 | - 서비스 포트 타입 519 | - ClusterIP : 기본 타입이며 클러스터 내에서만 사용 가능 (외부 접속 불가) 520 | - NodePort : 모든 노드의 지정된 포트 할당 (외부에서 클러스터 안으로 접속 가능) 521 | - LoadBalancer : 퍼블릭 클라우드 또는 로드밸런서 장비가 있는 경우 사용 가능 (External-IP 로 표시) 522 | - ExternalName : 클러스터 안에서 외부로 접근할 때 사용 (도메인 주소로 응답) 523 | 524 | ### 04. deployment 525 | - 디프로이먼트 컨트롤러 526 | - imperative 명령어 527 | - ```bash 528 | kubectl run nginx-app --image nginx --port=80 529 | kubectl get pods 530 | kubectl get deployments 531 | 532 | kubectl scale deploy nginx-app --replicas=3 533 | kubectl get pods 534 | kubectl get deployments 535 | 536 | kubectl edit deployments nginx-app 537 | 538 | kubectl delete pod nginx-app-xxxxxxxx 539 | kubectl delete deployments nginx-app 540 | 541 | kubectl set image deploy/nginx-app nginx-app=nginx:1.11 542 | 543 | kubectl rollout history deploy nginx-app 544 | kubectl rollout history deploy nginx-app --revision=3 545 | kubectl rollout undo deploy nginx-app 546 | kubectl rollout undo deploy nginx-app --to-revision=3 547 | kubectl rollout pause deploy/nginx-app 548 | kubectl rollout resume deploy/nginx-app 549 | ``` 550 | - declarative 명령어 551 | - ```bash 552 | kubectl create -f 1.nginx-deployment.yml 553 | or 554 | kubectl apply -f 1.nginx-deployment.yml 555 | ``` 556 | 557 | ### 05. ingress 558 | - 인그레스 서비스 559 | 560 | ### 06. statefulset 561 | - 상태관리 서비스 562 | - 상태가 있는 파드들의 관리 (볼륨을 사용해서 특정 데이터를 저장) 563 | 564 | ### 07. daemonset 565 | - 노드별 서비스 566 | - 클러스터 내 모든 노드에 파드 배포 (예, 모니터링 / 로그 수집 등) 567 | 568 | ### 08. job 569 | - job 및 cronob 570 | - 다중 작업 실행 또는 정해진 날자/시간에 정기적으로 수행하는 파드들의 생성 571 | 572 | ### 10. volume 573 | - 볼륨 574 | 575 | ### 11. configmap 576 | - 컨피그맵 577 | 578 | ### 12. secret 579 | - 시크릿 580 | 581 | ### 20. namespace 582 | - 작업공간 관리 583 | *보안에 관심이 있는 학생들은 20.namespace 부터 시작, 그렇지 않으면 1.pod 부터 시작* 584 | 585 | ### 21. role 586 | - 사용자 계정 확인 587 | - ` cat ~/.kube/config ` 588 | - 서비스 계정 확인 589 | - ` kubectl get serviceaccount ` 590 | - ` kubectl get serviceaccount default -o yaml ` 591 | - ` kubectl describe secret default-token-xxxxx ` 592 | 593 | ### 21. network_policy 594 | - 네트워크 595 | 596 | ### 30. helm 597 | - 헬름 차트를 통한 쿠버네티스 패키지 설치 598 | 599 | ### demo1 600 | - wordpress 배포 예제 601 | 602 | ### demo2 603 | - mongodb 배포 예제 604 | -------------------------------------------------------------------------------- /10.devel/k8s/node/package-lock.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "node", 3 | "version": "1.0.0", 4 | "lockfileVersion": 1, 5 | "requires": true, 6 | "dependencies": { 7 | "@kubernetes/client-node": { 8 | "version": "0.15.0", 9 | "resolved": "https://registry.npmjs.org/@kubernetes/client-node/-/client-node-0.15.0.tgz", 10 | "integrity": "sha512-AnEcsWWadl5IWOzzvO/gWpTnJb1d1CzA/rbV/qK1c0fD1SOxTDPj6jFllyQ9icGDfCgNw3TafZftmuepm6z9JA==", 11 | "requires": { 12 | "@types/js-yaml": "^3.12.1", 13 | "@types/node": "^10.12.0", 14 | "@types/request": "^2.47.1", 15 | "@types/stream-buffers": "^3.0.3", 16 | "@types/tar": "^4.0.3", 17 | "@types/underscore": "^1.8.9", 18 | "@types/ws": "^6.0.1", 19 | "byline": "^5.0.0", 20 | "execa": "5.0.0", 21 | "isomorphic-ws": "^4.0.1", 22 | "js-yaml": "^3.13.1", 23 | "jsonpath-plus": "^0.19.0", 24 | "openid-client": "^4.1.1", 25 | "request": "^2.88.0", 26 | "rfc4648": "^1.3.0", 27 | "shelljs": "^0.8.4", 28 | "stream-buffers": "^3.0.2", 29 | "tar": "^6.0.2", 30 | "tmp-promise": "^3.0.2", 31 | "tslib": "^1.9.3", 32 | "underscore": "^1.9.1", 33 | "ws": "^7.3.1" 34 | } 35 | }, 36 | "@panva/asn1.js": { 37 | "version": "1.0.0", 38 | "resolved": "https://registry.npmjs.org/@panva/asn1.js/-/asn1.js-1.0.0.tgz", 39 | "integrity": "sha512-UdkG3mLEqXgnlKsWanWcgb6dOjUzJ+XC5f+aWw30qrtjxeNUSfKX1cd5FBzOaXQumoe9nIqeZUvrRJS03HCCtw==" 40 | }, 41 | "@sindresorhus/is": { 42 | "version": "4.0.1", 43 | "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-4.0.1.tgz", 44 | "integrity": "sha512-Qm9hBEBu18wt1PO2flE7LPb30BHMQt1eQgbV76YntdNk73XZGpn3izvGTYxbGgzXKgbCjiia0uxTd3aTNQrY/g==" 45 | }, 46 | "@szmarczak/http-timer": { 47 | "version": "4.0.6", 48 | "resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-4.0.6.tgz", 49 | "integrity": "sha512-4BAffykYOgO+5nzBWYwE3W90sBgLJoUPRWWcL8wlyiM8IB8ipJz3UMJ9KXQd1RKQXpKp8Tutn80HZtWsu2u76w==", 50 | "requires": { 51 | "defer-to-connect": "^2.0.0" 52 | } 53 | }, 54 | "@types/cacheable-request": { 55 | "version": "6.0.2", 56 | "resolved": "https://registry.npmjs.org/@types/cacheable-request/-/cacheable-request-6.0.2.tgz", 57 | "integrity": "sha512-B3xVo+dlKM6nnKTcmm5ZtY/OL8bOAOd2Olee9M1zft65ox50OzjEHW91sDiU9j6cvW8Ejg1/Qkf4xd2kugApUA==", 58 | "requires": { 59 | "@types/http-cache-semantics": "*", 60 | "@types/keyv": "*", 61 | "@types/node": "*", 62 | "@types/responselike": "*" 63 | } 64 | }, 65 | "@types/caseless": { 66 | "version": "0.12.2", 67 | "resolved": "https://registry.npmjs.org/@types/caseless/-/caseless-0.12.2.tgz", 68 | "integrity": "sha512-6ckxMjBBD8URvjB6J3NcnuAn5Pkl7t3TizAg+xdlzzQGSPSmBcXf8KoIH0ua/i+tio+ZRUHEXp0HEmvaR4kt0w==" 69 | }, 70 | "@types/http-cache-semantics": { 71 | "version": "4.0.1", 72 | "resolved": "https://registry.npmjs.org/@types/http-cache-semantics/-/http-cache-semantics-4.0.1.tgz", 73 | "integrity": "sha512-SZs7ekbP8CN0txVG2xVRH6EgKmEm31BOxA07vkFaETzZz1xh+cbt8BcI0slpymvwhx5dlFnQG2rTlPVQn+iRPQ==" 74 | }, 75 | "@types/js-yaml": { 76 | "version": "3.12.7", 77 | "resolved": "https://registry.npmjs.org/@types/js-yaml/-/js-yaml-3.12.7.tgz", 78 | "integrity": "sha512-S6+8JAYTE1qdsc9HMVsfY7+SgSuUU/Tp6TYTmITW0PZxiyIMvol3Gy//y69Wkhs0ti4py5qgR3uZH6uz/DNzJQ==" 79 | }, 80 | "@types/keyv": { 81 | "version": "3.1.2", 82 | "resolved": "https://registry.npmjs.org/@types/keyv/-/keyv-3.1.2.tgz", 83 | "integrity": "sha512-/FvAK2p4jQOaJ6CGDHJTqZcUtbZe820qIeTg7o0Shg7drB4JHeL+V/dhSaly7NXx6u8eSee+r7coT+yuJEvDLg==", 84 | "requires": { 85 | "@types/node": "*" 86 | } 87 | }, 88 | "@types/minipass": { 89 | "version": "3.1.0", 90 | "resolved": "https://registry.npmjs.org/@types/minipass/-/minipass-3.1.0.tgz", 91 | "integrity": "sha512-b2yPKwCrB8x9SB65kcCistMoe3wrYnxxt5rJSZ1kprw0uOXvhuKi9kTQ746Y+Pbqoh+9C0N4zt0ztmTnG9yg7A==", 92 | "requires": { 93 | "@types/node": "*" 94 | } 95 | }, 96 | "@types/node": { 97 | "version": "10.17.60", 98 | "resolved": "https://registry.npmjs.org/@types/node/-/node-10.17.60.tgz", 99 | "integrity": "sha512-F0KIgDJfy2nA3zMLmWGKxcH2ZVEtCZXHHdOQs2gSaQ27+lNeEfGxzkIw90aXswATX7AZ33tahPbzy6KAfUreVw==" 100 | }, 101 | "@types/request": { 102 | "version": "2.48.7", 103 | "resolved": "https://registry.npmjs.org/@types/request/-/request-2.48.7.tgz", 104 | "integrity": "sha512-GWP9AZW7foLd4YQxyFZDBepl0lPsWLMEXDZUjQ/c1gqVPDPECrRZyEzuhJdnPWioFCq3Tv0qoGpMD6U+ygd4ZA==", 105 | "requires": { 106 | "@types/caseless": "*", 107 | "@types/node": "*", 108 | "@types/tough-cookie": "*", 109 | "form-data": "^2.5.0" 110 | } 111 | }, 112 | "@types/responselike": { 113 | "version": "1.0.0", 114 | "resolved": "https://registry.npmjs.org/@types/responselike/-/responselike-1.0.0.tgz", 115 | "integrity": "sha512-85Y2BjiufFzaMIlvJDvTTB8Fxl2xfLo4HgmHzVBz08w4wDePCTjYw66PdrolO0kzli3yam/YCgRufyo1DdQVTA==", 116 | "requires": { 117 | "@types/node": "*" 118 | } 119 | }, 120 | "@types/stream-buffers": { 121 | "version": "3.0.4", 122 | "resolved": "https://registry.npmjs.org/@types/stream-buffers/-/stream-buffers-3.0.4.tgz", 123 | "integrity": "sha512-qU/K1tb2yUdhXkLIATzsIPwbtX6BpZk0l3dPW6xqWyhfzzM1ECaQ/8faEnu3CNraLiQ9LHyQQPBGp7N9Fbs25w==", 124 | "requires": { 125 | "@types/node": "*" 126 | } 127 | }, 128 | "@types/tar": { 129 | "version": "4.0.5", 130 | "resolved": "https://registry.npmjs.org/@types/tar/-/tar-4.0.5.tgz", 131 | "integrity": "sha512-cgwPhNEabHaZcYIy5xeMtux2EmYBitfqEceBUi2t5+ETy4dW6kswt6WX4+HqLeiiKOo42EXbGiDmVJ2x+vi37Q==", 132 | "requires": { 133 | "@types/minipass": "*", 134 | "@types/node": "*" 135 | } 136 | }, 137 | "@types/tough-cookie": { 138 | "version": "4.0.1", 139 | "resolved": "https://registry.npmjs.org/@types/tough-cookie/-/tough-cookie-4.0.1.tgz", 140 | "integrity": "sha512-Y0K95ThC3esLEYD6ZuqNek29lNX2EM1qxV8y2FTLUB0ff5wWrk7az+mLrnNFUnaXcgKye22+sFBRXOgpPILZNg==" 141 | }, 142 | "@types/underscore": { 143 | "version": "1.11.3", 144 | "resolved": "https://registry.npmjs.org/@types/underscore/-/underscore-1.11.3.tgz", 145 | "integrity": "sha512-Fl1TX1dapfXyDqFg2ic9M+vlXRktcPJrc4PR7sRc7sdVrjavg/JHlbUXBt8qWWqhJrmSqg3RNAkAPRiOYw6Ahw==" 146 | }, 147 | "@types/ws": { 148 | "version": "6.0.4", 149 | "resolved": "https://registry.npmjs.org/@types/ws/-/ws-6.0.4.tgz", 150 | "integrity": "sha512-PpPrX7SZW9re6+Ha8ojZG4Se8AZXgf0GK6zmfqEuCsY49LFDNXO3SByp44X3dFEqtB73lkCDAdUazhAjVPiNwg==", 151 | "requires": { 152 | "@types/node": "*" 153 | } 154 | }, 155 | "aggregate-error": { 156 | "version": "3.1.0", 157 | "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz", 158 | "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==", 159 | "requires": { 160 | "clean-stack": "^2.0.0", 161 | "indent-string": "^4.0.0" 162 | } 163 | }, 164 | "ajv": { 165 | "version": "6.12.6", 166 | "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", 167 | "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", 168 | "requires": { 169 | "fast-deep-equal": "^3.1.1", 170 | "fast-json-stable-stringify": "^2.0.0", 171 | "json-schema-traverse": "^0.4.1", 172 | "uri-js": "^4.2.2" 173 | } 174 | }, 175 | "argparse": { 176 | "version": "1.0.10", 177 | "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", 178 | "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", 179 | "requires": { 180 | "sprintf-js": "~1.0.2" 181 | } 182 | }, 183 | "asn1": { 184 | "version": "0.2.4", 185 | "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.4.tgz", 186 | "integrity": "sha512-jxwzQpLQjSmWXgwaCZE9Nz+glAG01yF1QnWgbhGwHI5A6FRIEY6IVqtHhIepHqI7/kyEyQEagBC5mBEFlIYvdg==", 187 | "requires": { 188 | "safer-buffer": "~2.1.0" 189 | } 190 | }, 191 | "assert-plus": { 192 | "version": "1.0.0", 193 | "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", 194 | "integrity": "sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU=" 195 | }, 196 | "asynckit": { 197 | "version": "0.4.0", 198 | "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", 199 | "integrity": "sha1-x57Zf380y48robyXkLzDZkdLS3k=" 200 | }, 201 | "aws-sign2": { 202 | "version": "0.7.0", 203 | "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz", 204 | "integrity": "sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg=" 205 | }, 206 | "aws4": { 207 | "version": "1.11.0", 208 | "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.11.0.tgz", 209 | "integrity": "sha512-xh1Rl34h6Fi1DC2WWKfxUTVqRsNnr6LsKz2+hfwDxQJWmrx8+c7ylaqBMcHfl1U1r2dsifOvKX3LQuLNZ+XSvA==" 210 | }, 211 | "balanced-match": { 212 | "version": "1.0.2", 213 | "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", 214 | "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" 215 | }, 216 | "bcrypt-pbkdf": { 217 | "version": "1.0.2", 218 | "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz", 219 | "integrity": "sha1-pDAdOJtqQ/m2f/PKEaP2Y342Dp4=", 220 | "requires": { 221 | "tweetnacl": "^0.14.3" 222 | } 223 | }, 224 | "brace-expansion": { 225 | "version": "1.1.11", 226 | "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", 227 | "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", 228 | "requires": { 229 | "balanced-match": "^1.0.0", 230 | "concat-map": "0.0.1" 231 | } 232 | }, 233 | "byline": { 234 | "version": "5.0.0", 235 | "resolved": "https://registry.npmjs.org/byline/-/byline-5.0.0.tgz", 236 | "integrity": "sha1-dBxSFkaOrcRXsDQQEYrXfejB3bE=" 237 | }, 238 | "cacheable-lookup": { 239 | "version": "5.0.4", 240 | "resolved": "https://registry.npmjs.org/cacheable-lookup/-/cacheable-lookup-5.0.4.tgz", 241 | "integrity": "sha512-2/kNscPhpcxrOigMZzbiWF7dz8ilhb/nIHU3EyZiXWXpeq/au8qJ8VhdftMkty3n7Gj6HIGalQG8oiBNB3AJgA==" 242 | }, 243 | "cacheable-request": { 244 | "version": "7.0.2", 245 | "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-7.0.2.tgz", 246 | "integrity": "sha512-pouW8/FmiPQbuGpkXQ9BAPv/Mo5xDGANgSNXzTzJ8DrKGuXOssM4wIQRjfanNRh3Yu5cfYPvcorqbhg2KIJtew==", 247 | "requires": { 248 | "clone-response": "^1.0.2", 249 | "get-stream": "^5.1.0", 250 | "http-cache-semantics": "^4.0.0", 251 | "keyv": "^4.0.0", 252 | "lowercase-keys": "^2.0.0", 253 | "normalize-url": "^6.0.1", 254 | "responselike": "^2.0.0" 255 | }, 256 | "dependencies": { 257 | "get-stream": { 258 | "version": "5.2.0", 259 | "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz", 260 | "integrity": "sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==", 261 | "requires": { 262 | "pump": "^3.0.0" 263 | } 264 | } 265 | } 266 | }, 267 | "caseless": { 268 | "version": "0.12.0", 269 | "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz", 270 | "integrity": "sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw=" 271 | }, 272 | "chownr": { 273 | "version": "2.0.0", 274 | "resolved": "https://registry.npmjs.org/chownr/-/chownr-2.0.0.tgz", 275 | "integrity": "sha512-bIomtDF5KGpdogkLd9VspvFzk9KfpyyGlS8YFVZl7TGPBHL5snIOnxeshwVgPteQ9b4Eydl+pVbIyE1DcvCWgQ==" 276 | }, 277 | "clean-stack": { 278 | "version": "2.2.0", 279 | "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz", 280 | "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==" 281 | }, 282 | "clone-response": { 283 | "version": "1.0.2", 284 | "resolved": "https://registry.npmjs.org/clone-response/-/clone-response-1.0.2.tgz", 285 | "integrity": "sha1-0dyXOSAxTfZ/vrlCI7TuNQI56Ws=", 286 | "requires": { 287 | "mimic-response": "^1.0.0" 288 | } 289 | }, 290 | "combined-stream": { 291 | "version": "1.0.8", 292 | "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", 293 | "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", 294 | "requires": { 295 | "delayed-stream": "~1.0.0" 296 | } 297 | }, 298 | "concat-map": { 299 | "version": "0.0.1", 300 | "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", 301 | "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=" 302 | }, 303 | "core-util-is": { 304 | "version": "1.0.2", 305 | "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", 306 | "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=" 307 | }, 308 | "cross-spawn": { 309 | "version": "7.0.3", 310 | "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", 311 | "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", 312 | "requires": { 313 | "path-key": "^3.1.0", 314 | "shebang-command": "^2.0.0", 315 | "which": "^2.0.1" 316 | } 317 | }, 318 | "dashdash": { 319 | "version": "1.14.1", 320 | "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz", 321 | "integrity": "sha1-hTz6D3y+L+1d4gMmuN1YEDX24vA=", 322 | "requires": { 323 | "assert-plus": "^1.0.0" 324 | } 325 | }, 326 | "decompress-response": { 327 | "version": "6.0.0", 328 | "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz", 329 | "integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==", 330 | "requires": { 331 | "mimic-response": "^3.1.0" 332 | }, 333 | "dependencies": { 334 | "mimic-response": { 335 | "version": "3.1.0", 336 | "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz", 337 | "integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==" 338 | } 339 | } 340 | }, 341 | "defer-to-connect": { 342 | "version": "2.0.1", 343 | "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-2.0.1.tgz", 344 | "integrity": "sha512-4tvttepXG1VaYGrRibk5EwJd1t4udunSOVMdLSAL6mId1ix438oPwPZMALY41FCijukO1L0twNcGsdzS7dHgDg==" 345 | }, 346 | "delayed-stream": { 347 | "version": "1.0.0", 348 | "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", 349 | "integrity": "sha1-3zrhmayt+31ECqrgsp4icrJOxhk=" 350 | }, 351 | "ecc-jsbn": { 352 | "version": "0.1.2", 353 | "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz", 354 | "integrity": "sha1-OoOpBOVDUyh4dMVkt1SThoSamMk=", 355 | "requires": { 356 | "jsbn": "~0.1.0", 357 | "safer-buffer": "^2.1.0" 358 | } 359 | }, 360 | "end-of-stream": { 361 | "version": "1.4.4", 362 | "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", 363 | "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", 364 | "requires": { 365 | "once": "^1.4.0" 366 | } 367 | }, 368 | "esprima": { 369 | "version": "4.0.1", 370 | "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", 371 | "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==" 372 | }, 373 | "execa": { 374 | "version": "5.0.0", 375 | "resolved": "https://registry.npmjs.org/execa/-/execa-5.0.0.tgz", 376 | "integrity": "sha512-ov6w/2LCiuyO4RLYGdpFGjkcs0wMTgGE8PrkTHikeUy5iJekXyPIKUjifk5CsE0pt7sMCrMZ3YNqoCj6idQOnQ==", 377 | "requires": { 378 | "cross-spawn": "^7.0.3", 379 | "get-stream": "^6.0.0", 380 | "human-signals": "^2.1.0", 381 | "is-stream": "^2.0.0", 382 | "merge-stream": "^2.0.0", 383 | "npm-run-path": "^4.0.1", 384 | "onetime": "^5.1.2", 385 | "signal-exit": "^3.0.3", 386 | "strip-final-newline": "^2.0.0" 387 | } 388 | }, 389 | "extend": { 390 | "version": "3.0.2", 391 | "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", 392 | "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==" 393 | }, 394 | "extsprintf": { 395 | "version": "1.3.0", 396 | "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz", 397 | "integrity": "sha1-lpGEQOMEGnpBT4xS48V06zw+HgU=" 398 | }, 399 | "fast-deep-equal": { 400 | "version": "3.1.3", 401 | "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", 402 | "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" 403 | }, 404 | "fast-json-stable-stringify": { 405 | "version": "2.1.0", 406 | "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", 407 | "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==" 408 | }, 409 | "forever-agent": { 410 | "version": "0.6.1", 411 | "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz", 412 | "integrity": "sha1-+8cfDEGt6zf5bFd60e1C2P2sypE=" 413 | }, 414 | "form-data": { 415 | "version": "2.5.1", 416 | "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.5.1.tgz", 417 | "integrity": "sha512-m21N3WOmEEURgk6B9GLOE4RuWOFf28Lhh9qGYeNlGq4VDXUlJy2th2slBNU8Gp8EzloYZOibZJ7t5ecIrFSjVA==", 418 | "requires": { 419 | "asynckit": "^0.4.0", 420 | "combined-stream": "^1.0.6", 421 | "mime-types": "^2.1.12" 422 | } 423 | }, 424 | "fs-minipass": { 425 | "version": "2.1.0", 426 | "resolved": "https://registry.npmjs.org/fs-minipass/-/fs-minipass-2.1.0.tgz", 427 | "integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==", 428 | "requires": { 429 | "minipass": "^3.0.0" 430 | } 431 | }, 432 | "fs.realpath": { 433 | "version": "1.0.0", 434 | "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", 435 | "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=" 436 | }, 437 | "function-bind": { 438 | "version": "1.1.1", 439 | "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", 440 | "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==" 441 | }, 442 | "get-stream": { 443 | "version": "6.0.1", 444 | "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz", 445 | "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==" 446 | }, 447 | "getpass": { 448 | "version": "0.1.7", 449 | "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz", 450 | "integrity": "sha1-Xv+OPmhNVprkyysSgmBOi6YhSfo=", 451 | "requires": { 452 | "assert-plus": "^1.0.0" 453 | } 454 | }, 455 | "glob": { 456 | "version": "7.1.7", 457 | "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.7.tgz", 458 | "integrity": "sha512-OvD9ENzPLbegENnYP5UUfJIirTg4+XwMWGaQfQTY0JenxNvvIKP3U3/tAQSPIu/lHxXYSZmpXlUHeqAIdKzBLQ==", 459 | "requires": { 460 | "fs.realpath": "^1.0.0", 461 | "inflight": "^1.0.4", 462 | "inherits": "2", 463 | "minimatch": "^3.0.4", 464 | "once": "^1.3.0", 465 | "path-is-absolute": "^1.0.0" 466 | } 467 | }, 468 | "got": { 469 | "version": "11.8.2", 470 | "resolved": "https://registry.npmjs.org/got/-/got-11.8.2.tgz", 471 | "integrity": "sha512-D0QywKgIe30ODs+fm8wMZiAcZjypcCodPNuMz5H9Mny7RJ+IjJ10BdmGW7OM7fHXP+O7r6ZwapQ/YQmMSvB0UQ==", 472 | "requires": { 473 | "@sindresorhus/is": "^4.0.0", 474 | "@szmarczak/http-timer": "^4.0.5", 475 | "@types/cacheable-request": "^6.0.1", 476 | "@types/responselike": "^1.0.0", 477 | "cacheable-lookup": "^5.0.3", 478 | "cacheable-request": "^7.0.1", 479 | "decompress-response": "^6.0.0", 480 | "http2-wrapper": "^1.0.0-beta.5.2", 481 | "lowercase-keys": "^2.0.0", 482 | "p-cancelable": "^2.0.0", 483 | "responselike": "^2.0.0" 484 | } 485 | }, 486 | "har-schema": { 487 | "version": "2.0.0", 488 | "resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz", 489 | "integrity": "sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI=" 490 | }, 491 | "har-validator": { 492 | "version": "5.1.5", 493 | "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.1.5.tgz", 494 | "integrity": "sha512-nmT2T0lljbxdQZfspsno9hgrG3Uir6Ks5afism62poxqBM6sDnMEuPmzTq8XN0OEwqKLLdh1jQI3qyE66Nzb3w==", 495 | "requires": { 496 | "ajv": "^6.12.3", 497 | "har-schema": "^2.0.0" 498 | } 499 | }, 500 | "has": { 501 | "version": "1.0.3", 502 | "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", 503 | "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", 504 | "requires": { 505 | "function-bind": "^1.1.1" 506 | } 507 | }, 508 | "http-cache-semantics": { 509 | "version": "4.1.0", 510 | "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.0.tgz", 511 | "integrity": "sha512-carPklcUh7ROWRK7Cv27RPtdhYhUsela/ue5/jKzjegVvXDqM2ILE9Q2BGn9JZJh1g87cp56su/FgQSzcWS8cQ==" 512 | }, 513 | "http-signature": { 514 | "version": "1.2.0", 515 | "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.2.0.tgz", 516 | "integrity": "sha1-muzZJRFHcvPZW2WmCruPfBj7rOE=", 517 | "requires": { 518 | "assert-plus": "^1.0.0", 519 | "jsprim": "^1.2.2", 520 | "sshpk": "^1.7.0" 521 | } 522 | }, 523 | "http2-wrapper": { 524 | "version": "1.0.3", 525 | "resolved": "https://registry.npmjs.org/http2-wrapper/-/http2-wrapper-1.0.3.tgz", 526 | "integrity": "sha512-V+23sDMr12Wnz7iTcDeJr3O6AIxlnvT/bmaAAAP/Xda35C90p9599p0F1eHR/N1KILWSoWVAiOMFjBBXaXSMxg==", 527 | "requires": { 528 | "quick-lru": "^5.1.1", 529 | "resolve-alpn": "^1.0.0" 530 | } 531 | }, 532 | "human-signals": { 533 | "version": "2.1.0", 534 | "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz", 535 | "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==" 536 | }, 537 | "indent-string": { 538 | "version": "4.0.0", 539 | "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz", 540 | "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==" 541 | }, 542 | "inflight": { 543 | "version": "1.0.6", 544 | "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", 545 | "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", 546 | "requires": { 547 | "once": "^1.3.0", 548 | "wrappy": "1" 549 | } 550 | }, 551 | "inherits": { 552 | "version": "2.0.4", 553 | "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", 554 | "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" 555 | }, 556 | "interpret": { 557 | "version": "1.4.0", 558 | "resolved": "https://registry.npmjs.org/interpret/-/interpret-1.4.0.tgz", 559 | "integrity": "sha512-agE4QfB2Lkp9uICn7BAqoscw4SZP9kTE2hxiFI3jBPmXJfdqiahTbUuKGsMoN2GtqL9AxhYioAcVvgsb1HvRbA==" 560 | }, 561 | "is-core-module": { 562 | "version": "2.5.0", 563 | "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.5.0.tgz", 564 | "integrity": "sha512-TXCMSDsEHMEEZ6eCA8rwRDbLu55MRGmrctljsBX/2v1d9/GzqHOxW5c5oPSgrUt2vBFXebu9rGqckXGPWOlYpg==", 565 | "requires": { 566 | "has": "^1.0.3" 567 | } 568 | }, 569 | "is-stream": { 570 | "version": "2.0.1", 571 | "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz", 572 | "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==" 573 | }, 574 | "is-typedarray": { 575 | "version": "1.0.0", 576 | "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", 577 | "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=" 578 | }, 579 | "isexe": { 580 | "version": "2.0.0", 581 | "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", 582 | "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=" 583 | }, 584 | "isomorphic-ws": { 585 | "version": "4.0.1", 586 | "resolved": "https://registry.npmjs.org/isomorphic-ws/-/isomorphic-ws-4.0.1.tgz", 587 | "integrity": "sha512-BhBvN2MBpWTaSHdWRb/bwdZJ1WaehQ2L1KngkCkfLUGF0mAWAT1sQUQacEmQ0jXkFw/czDXPNQSL5u2/Krsz1w==" 588 | }, 589 | "isstream": { 590 | "version": "0.1.2", 591 | "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz", 592 | "integrity": "sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo=" 593 | }, 594 | "jose": { 595 | "version": "2.0.5", 596 | "resolved": "https://registry.npmjs.org/jose/-/jose-2.0.5.tgz", 597 | "integrity": "sha512-BAiDNeDKTMgk4tvD0BbxJ8xHEHBZgpeRZ1zGPPsitSyMgjoMWiLGYAE7H7NpP5h0lPppQajQs871E8NHUrzVPA==", 598 | "requires": { 599 | "@panva/asn1.js": "^1.0.0" 600 | } 601 | }, 602 | "js-yaml": { 603 | "version": "3.14.1", 604 | "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz", 605 | "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", 606 | "requires": { 607 | "argparse": "^1.0.7", 608 | "esprima": "^4.0.0" 609 | } 610 | }, 611 | "jsbn": { 612 | "version": "0.1.1", 613 | "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz", 614 | "integrity": "sha1-peZUwuWi3rXyAdls77yoDA7y9RM=" 615 | }, 616 | "json-buffer": { 617 | "version": "3.0.1", 618 | "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", 619 | "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==" 620 | }, 621 | "json-schema": { 622 | "version": "0.2.3", 623 | "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.2.3.tgz", 624 | "integrity": "sha1-tIDIkuWaLwWVTOcnvT8qTogvnhM=" 625 | }, 626 | "json-schema-traverse": { 627 | "version": "0.4.1", 628 | "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", 629 | "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==" 630 | }, 631 | "json-stringify-safe": { 632 | "version": "5.0.1", 633 | "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", 634 | "integrity": "sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus=" 635 | }, 636 | "jsonpath-plus": { 637 | "version": "0.19.0", 638 | "resolved": "https://registry.npmjs.org/jsonpath-plus/-/jsonpath-plus-0.19.0.tgz", 639 | "integrity": "sha512-GSVwsrzW9LsA5lzsqe4CkuZ9wp+kxBb2GwNniaWzI2YFn5Ig42rSW8ZxVpWXaAfakXNrx5pgY5AbQq7kzX29kg==" 640 | }, 641 | "jsprim": { 642 | "version": "1.4.1", 643 | "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.1.tgz", 644 | "integrity": "sha1-MT5mvB5cwG5Di8G3SZwuXFastqI=", 645 | "requires": { 646 | "assert-plus": "1.0.0", 647 | "extsprintf": "1.3.0", 648 | "json-schema": "0.2.3", 649 | "verror": "1.10.0" 650 | } 651 | }, 652 | "keyv": { 653 | "version": "4.0.3", 654 | "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.0.3.tgz", 655 | "integrity": "sha512-zdGa2TOpSZPq5mU6iowDARnMBZgtCqJ11dJROFi6tg6kTn4nuUdU09lFyLFSaHrWqpIJ+EBq4E8/Dc0Vx5vLdA==", 656 | "requires": { 657 | "json-buffer": "3.0.1" 658 | } 659 | }, 660 | "lowercase-keys": { 661 | "version": "2.0.0", 662 | "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-2.0.0.tgz", 663 | "integrity": "sha512-tqNXrS78oMOE73NMxK4EMLQsQowWf8jKooH9g7xPavRT706R6bkQJ6DY2Te7QukaZsulxa30wQ7bk0pm4XiHmA==" 664 | }, 665 | "lru-cache": { 666 | "version": "6.0.0", 667 | "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", 668 | "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", 669 | "requires": { 670 | "yallist": "^4.0.0" 671 | } 672 | }, 673 | "make-error": { 674 | "version": "1.3.6", 675 | "resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz", 676 | "integrity": "sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==" 677 | }, 678 | "merge-stream": { 679 | "version": "2.0.0", 680 | "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", 681 | "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==" 682 | }, 683 | "mime-db": { 684 | "version": "1.49.0", 685 | "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.49.0.tgz", 686 | "integrity": "sha512-CIc8j9URtOVApSFCQIF+VBkX1RwXp/oMMOrqdyXSBXq5RWNEsRfyj1kiRnQgmNXmHxPoFIxOroKA3zcU9P+nAA==" 687 | }, 688 | "mime-types": { 689 | "version": "2.1.32", 690 | "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.32.tgz", 691 | "integrity": "sha512-hJGaVS4G4c9TSMYh2n6SQAGrC4RnfU+daP8G7cSCmaqNjiOoUY0VHCMS42pxnQmVF1GWwFhbHWn3RIxCqTmZ9A==", 692 | "requires": { 693 | "mime-db": "1.49.0" 694 | } 695 | }, 696 | "mimic-fn": { 697 | "version": "2.1.0", 698 | "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", 699 | "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==" 700 | }, 701 | "mimic-response": { 702 | "version": "1.0.1", 703 | "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-1.0.1.tgz", 704 | "integrity": "sha512-j5EctnkH7amfV/q5Hgmoal1g2QHFJRraOtmx0JpIqkxhBhI/lJSl1nMpQ45hVarwNETOoWEimndZ4QK0RHxuxQ==" 705 | }, 706 | "minimatch": { 707 | "version": "3.0.4", 708 | "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", 709 | "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", 710 | "requires": { 711 | "brace-expansion": "^1.1.7" 712 | } 713 | }, 714 | "minipass": { 715 | "version": "3.1.3", 716 | "resolved": "https://registry.npmjs.org/minipass/-/minipass-3.1.3.tgz", 717 | "integrity": "sha512-Mgd2GdMVzY+x3IJ+oHnVM+KG3lA5c8tnabyJKmHSaG2kAGpudxuOf8ToDkhumF7UzME7DecbQE9uOZhNm7PuJg==", 718 | "requires": { 719 | "yallist": "^4.0.0" 720 | } 721 | }, 722 | "minizlib": { 723 | "version": "2.1.2", 724 | "resolved": "https://registry.npmjs.org/minizlib/-/minizlib-2.1.2.tgz", 725 | "integrity": "sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg==", 726 | "requires": { 727 | "minipass": "^3.0.0", 728 | "yallist": "^4.0.0" 729 | } 730 | }, 731 | "mkdirp": { 732 | "version": "1.0.4", 733 | "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-1.0.4.tgz", 734 | "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==" 735 | }, 736 | "normalize-url": { 737 | "version": "6.1.0", 738 | "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-6.1.0.tgz", 739 | "integrity": "sha512-DlL+XwOy3NxAQ8xuC0okPgK46iuVNAK01YN7RueYBqqFeGsBjV9XmCAzAdgt+667bCl5kPh9EqKKDwnaPG1I7A==" 740 | }, 741 | "npm-run-path": { 742 | "version": "4.0.1", 743 | "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", 744 | "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", 745 | "requires": { 746 | "path-key": "^3.0.0" 747 | } 748 | }, 749 | "oauth-sign": { 750 | "version": "0.9.0", 751 | "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.9.0.tgz", 752 | "integrity": "sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ==" 753 | }, 754 | "object-hash": { 755 | "version": "2.2.0", 756 | "resolved": "https://registry.npmjs.org/object-hash/-/object-hash-2.2.0.tgz", 757 | "integrity": "sha512-gScRMn0bS5fH+IuwyIFgnh9zBdo4DV+6GhygmWM9HyNJSgS0hScp1f5vjtm7oIIOiT9trXrShAkLFSc2IqKNgw==" 758 | }, 759 | "oidc-token-hash": { 760 | "version": "5.0.1", 761 | "resolved": "https://registry.npmjs.org/oidc-token-hash/-/oidc-token-hash-5.0.1.tgz", 762 | "integrity": "sha512-EvoOtz6FIEBzE+9q253HsLCVRiK/0doEJ2HCvvqMQb3dHZrP3WlJKYtJ55CRTw4jmYomzH4wkPuCj/I3ZvpKxQ==" 763 | }, 764 | "once": { 765 | "version": "1.4.0", 766 | "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", 767 | "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", 768 | "requires": { 769 | "wrappy": "1" 770 | } 771 | }, 772 | "onetime": { 773 | "version": "5.1.2", 774 | "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", 775 | "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", 776 | "requires": { 777 | "mimic-fn": "^2.1.0" 778 | } 779 | }, 780 | "openid-client": { 781 | "version": "4.7.4", 782 | "resolved": "https://registry.npmjs.org/openid-client/-/openid-client-4.7.4.tgz", 783 | "integrity": "sha512-n+RURXYuR0bBZo9i0pn+CXZSyg5JYQ1nbwEwPQvLE7EcJt/vMZ2iIMjLehl5DvCN53XUoPVZs9KAE5r6d9fxsw==", 784 | "requires": { 785 | "aggregate-error": "^3.1.0", 786 | "got": "^11.8.0", 787 | "jose": "^2.0.5", 788 | "lru-cache": "^6.0.0", 789 | "make-error": "^1.3.6", 790 | "object-hash": "^2.0.1", 791 | "oidc-token-hash": "^5.0.1" 792 | } 793 | }, 794 | "p-cancelable": { 795 | "version": "2.1.1", 796 | "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-2.1.1.tgz", 797 | "integrity": "sha512-BZOr3nRQHOntUjTrH8+Lh54smKHoHyur8We1V8DSMVrl5A2malOOwuJRnKRDjSnkoeBh4at6BwEnb5I7Jl31wg==" 798 | }, 799 | "path-is-absolute": { 800 | "version": "1.0.1", 801 | "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", 802 | "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=" 803 | }, 804 | "path-key": { 805 | "version": "3.1.1", 806 | "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", 807 | "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==" 808 | }, 809 | "path-parse": { 810 | "version": "1.0.7", 811 | "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", 812 | "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==" 813 | }, 814 | "performance-now": { 815 | "version": "2.1.0", 816 | "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz", 817 | "integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns=" 818 | }, 819 | "psl": { 820 | "version": "1.8.0", 821 | "resolved": "https://registry.npmjs.org/psl/-/psl-1.8.0.tgz", 822 | "integrity": "sha512-RIdOzyoavK+hA18OGGWDqUTsCLhtA7IcZ/6NCs4fFJaHBDab+pDDmDIByWFRQJq2Cd7r1OoQxBGKOaztq+hjIQ==" 823 | }, 824 | "pump": { 825 | "version": "3.0.0", 826 | "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", 827 | "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", 828 | "requires": { 829 | "end-of-stream": "^1.1.0", 830 | "once": "^1.3.1" 831 | } 832 | }, 833 | "punycode": { 834 | "version": "2.1.1", 835 | "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz", 836 | "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==" 837 | }, 838 | "qs": { 839 | "version": "6.5.2", 840 | "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz", 841 | "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==" 842 | }, 843 | "quick-lru": { 844 | "version": "5.1.1", 845 | "resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-5.1.1.tgz", 846 | "integrity": "sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA==" 847 | }, 848 | "rechoir": { 849 | "version": "0.6.2", 850 | "resolved": "https://registry.npmjs.org/rechoir/-/rechoir-0.6.2.tgz", 851 | "integrity": "sha1-hSBLVNuoLVdC4oyWdW70OvUOM4Q=", 852 | "requires": { 853 | "resolve": "^1.1.6" 854 | } 855 | }, 856 | "request": { 857 | "version": "2.88.2", 858 | "resolved": "https://registry.npmjs.org/request/-/request-2.88.2.tgz", 859 | "integrity": "sha512-MsvtOrfG9ZcrOwAW+Qi+F6HbD0CWXEh9ou77uOb7FM2WPhwT7smM833PzanhJLsgXjN89Ir6V2PczXNnMpwKhw==", 860 | "requires": { 861 | "aws-sign2": "~0.7.0", 862 | "aws4": "^1.8.0", 863 | "caseless": "~0.12.0", 864 | "combined-stream": "~1.0.6", 865 | "extend": "~3.0.2", 866 | "forever-agent": "~0.6.1", 867 | "form-data": "~2.3.2", 868 | "har-validator": "~5.1.3", 869 | "http-signature": "~1.2.0", 870 | "is-typedarray": "~1.0.0", 871 | "isstream": "~0.1.2", 872 | "json-stringify-safe": "~5.0.1", 873 | "mime-types": "~2.1.19", 874 | "oauth-sign": "~0.9.0", 875 | "performance-now": "^2.1.0", 876 | "qs": "~6.5.2", 877 | "safe-buffer": "^5.1.2", 878 | "tough-cookie": "~2.5.0", 879 | "tunnel-agent": "^0.6.0", 880 | "uuid": "^3.3.2" 881 | }, 882 | "dependencies": { 883 | "form-data": { 884 | "version": "2.3.3", 885 | "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.3.tgz", 886 | "integrity": "sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ==", 887 | "requires": { 888 | "asynckit": "^0.4.0", 889 | "combined-stream": "^1.0.6", 890 | "mime-types": "^2.1.12" 891 | } 892 | } 893 | } 894 | }, 895 | "resolve": { 896 | "version": "1.20.0", 897 | "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.20.0.tgz", 898 | "integrity": "sha512-wENBPt4ySzg4ybFQW2TT1zMQucPK95HSh/nq2CFTZVOGut2+pQvSsgtda4d26YrYcr067wjbmzOG8byDPBX63A==", 899 | "requires": { 900 | "is-core-module": "^2.2.0", 901 | "path-parse": "^1.0.6" 902 | } 903 | }, 904 | "resolve-alpn": { 905 | "version": "1.2.0", 906 | "resolved": "https://registry.npmjs.org/resolve-alpn/-/resolve-alpn-1.2.0.tgz", 907 | "integrity": "sha512-e4FNQs+9cINYMO5NMFc6kOUCdohjqFPSgMuwuZAOUWqrfWsen+Yjy5qZFkV5K7VO7tFSLKcUL97olkED7sCBHA==" 908 | }, 909 | "responselike": { 910 | "version": "2.0.0", 911 | "resolved": "https://registry.npmjs.org/responselike/-/responselike-2.0.0.tgz", 912 | "integrity": "sha512-xH48u3FTB9VsZw7R+vvgaKeLKzT6jOogbQhEe/jewwnZgzPcnyWui2Av6JpoYZF/91uueC+lqhWqeURw5/qhCw==", 913 | "requires": { 914 | "lowercase-keys": "^2.0.0" 915 | } 916 | }, 917 | "rfc4648": { 918 | "version": "1.5.0", 919 | "resolved": "https://registry.npmjs.org/rfc4648/-/rfc4648-1.5.0.tgz", 920 | "integrity": "sha512-FA6W9lDNeX8WbMY31io1xWg+TpZCbeDKsBo0ocwACZiWnh9TUAyk9CCuBQuOPmYnwwdEQZmraQ2ZK7yJsxErBg==" 921 | }, 922 | "rimraf": { 923 | "version": "3.0.2", 924 | "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", 925 | "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", 926 | "requires": { 927 | "glob": "^7.1.3" 928 | } 929 | }, 930 | "safe-buffer": { 931 | "version": "5.2.1", 932 | "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", 933 | "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==" 934 | }, 935 | "safer-buffer": { 936 | "version": "2.1.2", 937 | "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", 938 | "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" 939 | }, 940 | "shebang-command": { 941 | "version": "2.0.0", 942 | "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", 943 | "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", 944 | "requires": { 945 | "shebang-regex": "^3.0.0" 946 | } 947 | }, 948 | "shebang-regex": { 949 | "version": "3.0.0", 950 | "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", 951 | "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==" 952 | }, 953 | "shelljs": { 954 | "version": "0.8.4", 955 | "resolved": "https://registry.npmjs.org/shelljs/-/shelljs-0.8.4.tgz", 956 | "integrity": "sha512-7gk3UZ9kOfPLIAbslLzyWeGiEqx9e3rxwZM0KE6EL8GlGwjym9Mrlx5/p33bWTu9YG6vcS4MBxYZDHYr5lr8BQ==", 957 | "requires": { 958 | "glob": "^7.0.0", 959 | "interpret": "^1.0.0", 960 | "rechoir": "^0.6.2" 961 | } 962 | }, 963 | "signal-exit": { 964 | "version": "3.0.3", 965 | "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.3.tgz", 966 | "integrity": "sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA==" 967 | }, 968 | "sprintf-js": { 969 | "version": "1.0.3", 970 | "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", 971 | "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=" 972 | }, 973 | "sshpk": { 974 | "version": "1.16.1", 975 | "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.16.1.tgz", 976 | "integrity": "sha512-HXXqVUq7+pcKeLqqZj6mHFUMvXtOJt1uoUx09pFW6011inTMxqI8BA8PM95myrIyyKwdnzjdFjLiE6KBPVtJIg==", 977 | "requires": { 978 | "asn1": "~0.2.3", 979 | "assert-plus": "^1.0.0", 980 | "bcrypt-pbkdf": "^1.0.0", 981 | "dashdash": "^1.12.0", 982 | "ecc-jsbn": "~0.1.1", 983 | "getpass": "^0.1.1", 984 | "jsbn": "~0.1.0", 985 | "safer-buffer": "^2.0.2", 986 | "tweetnacl": "~0.14.0" 987 | } 988 | }, 989 | "stream-buffers": { 990 | "version": "3.0.2", 991 | "resolved": "https://registry.npmjs.org/stream-buffers/-/stream-buffers-3.0.2.tgz", 992 | "integrity": "sha512-DQi1h8VEBA/lURbSwFtEHnSTb9s2/pwLEaFuNhXwy1Dx3Sa0lOuYT2yNUr4/j2fs8oCAMANtrZ5OrPZtyVs3MQ==" 993 | }, 994 | "strip-final-newline": { 995 | "version": "2.0.0", 996 | "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", 997 | "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==" 998 | }, 999 | "tar": { 1000 | "version": "6.1.8", 1001 | "resolved": "https://registry.npmjs.org/tar/-/tar-6.1.8.tgz", 1002 | "integrity": "sha512-sb9b0cp855NbkMJcskdSYA7b11Q8JsX4qe4pyUAfHp+Y6jBjJeek2ZVlwEfWayshEIwlIzXx0Fain3QG9JPm2A==", 1003 | "requires": { 1004 | "chownr": "^2.0.0", 1005 | "fs-minipass": "^2.0.0", 1006 | "minipass": "^3.0.0", 1007 | "minizlib": "^2.1.1", 1008 | "mkdirp": "^1.0.3", 1009 | "yallist": "^4.0.0" 1010 | } 1011 | }, 1012 | "tmp": { 1013 | "version": "0.2.1", 1014 | "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.2.1.tgz", 1015 | "integrity": "sha512-76SUhtfqR2Ijn+xllcI5P1oyannHNHByD80W1q447gU3mp9G9PSpGdWmjUOHRDPiHYacIk66W7ubDTuPF3BEtQ==", 1016 | "requires": { 1017 | "rimraf": "^3.0.0" 1018 | } 1019 | }, 1020 | "tmp-promise": { 1021 | "version": "3.0.2", 1022 | "resolved": "https://registry.npmjs.org/tmp-promise/-/tmp-promise-3.0.2.tgz", 1023 | "integrity": "sha512-OyCLAKU1HzBjL6Ev3gxUeraJNlbNingmi8IrHHEsYH8LTmEuhvYfqvhn2F/je+mjf4N58UmZ96OMEy1JanSCpA==", 1024 | "requires": { 1025 | "tmp": "^0.2.0" 1026 | } 1027 | }, 1028 | "tough-cookie": { 1029 | "version": "2.5.0", 1030 | "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.5.0.tgz", 1031 | "integrity": "sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g==", 1032 | "requires": { 1033 | "psl": "^1.1.28", 1034 | "punycode": "^2.1.1" 1035 | } 1036 | }, 1037 | "tslib": { 1038 | "version": "1.14.1", 1039 | "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz", 1040 | "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==" 1041 | }, 1042 | "tunnel-agent": { 1043 | "version": "0.6.0", 1044 | "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", 1045 | "integrity": "sha1-J6XeoGs2sEoKmWZ3SykIaPD8QP0=", 1046 | "requires": { 1047 | "safe-buffer": "^5.0.1" 1048 | } 1049 | }, 1050 | "tweetnacl": { 1051 | "version": "0.14.5", 1052 | "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz", 1053 | "integrity": "sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q=" 1054 | }, 1055 | "underscore": { 1056 | "version": "1.13.1", 1057 | "resolved": "https://registry.npmjs.org/underscore/-/underscore-1.13.1.tgz", 1058 | "integrity": "sha512-hzSoAVtJF+3ZtiFX0VgfFPHEDRm7Y/QPjGyNo4TVdnDTdft3tr8hEkD25a1jC+TjTuE7tkHGKkhwCgs9dgBB2g==" 1059 | }, 1060 | "uri-js": { 1061 | "version": "4.4.1", 1062 | "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", 1063 | "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", 1064 | "requires": { 1065 | "punycode": "^2.1.0" 1066 | } 1067 | }, 1068 | "uuid": { 1069 | "version": "3.4.0", 1070 | "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz", 1071 | "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==" 1072 | }, 1073 | "verror": { 1074 | "version": "1.10.0", 1075 | "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz", 1076 | "integrity": "sha1-OhBcoXBTr1XW4nDB+CiGguGNpAA=", 1077 | "requires": { 1078 | "assert-plus": "^1.0.0", 1079 | "core-util-is": "1.0.2", 1080 | "extsprintf": "^1.2.0" 1081 | } 1082 | }, 1083 | "which": { 1084 | "version": "2.0.2", 1085 | "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", 1086 | "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", 1087 | "requires": { 1088 | "isexe": "^2.0.0" 1089 | } 1090 | }, 1091 | "wrappy": { 1092 | "version": "1.0.2", 1093 | "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", 1094 | "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=" 1095 | }, 1096 | "ws": { 1097 | "version": "7.5.3", 1098 | "resolved": "https://registry.npmjs.org/ws/-/ws-7.5.3.tgz", 1099 | "integrity": "sha512-kQ/dHIzuLrS6Je9+uv81ueZomEwH0qVYstcAQ4/Z93K8zeko9gtAbttJWzoC5ukqXY1PpoouV3+VSOqEAFt5wg==" 1100 | }, 1101 | "yallist": { 1102 | "version": "4.0.0", 1103 | "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", 1104 | "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" 1105 | } 1106 | } 1107 | } 1108 | --------------------------------------------------------------------------------