├── .gitignore ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── config └── project.yml ├── docker-build.sh ├── docker-entrypoint.sh ├── json └── C_DirectNextServiceByDataJson.json ├── k8s ├── delete-container-image-from-edge-kube-bak │ ├── delete-container-image-from-edge-kube.yml │ └── role.yml └── delete-container-image-from-edge-kube │ ├── delete-container-image-from-edge-kube.yml │ └── role.yml ├── main.sh ├── requirements.txt ├── shell ├── do.sh └── exec.sh └── src ├── controllers ├── const.py ├── deployment_controller.py ├── docker_controller.py ├── docker_registry_controller.py ├── kubernetes_controller.py ├── namespace_controller.py ├── pvc_controller.py └── service_controller.py └── main.py /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | __pycache__ 3 | 4 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM l4t:latest 2 | 3 | # Definition of a Device & Service 4 | ENV POSITION=Runtime \ 5 | SERVICE=delete-container-image-from-edge\ 6 | AION_HOME=/var/lib/aion 7 | 8 | RUN mkdir ${AION_HOME} 9 | WORKDIR ${AION_HOME} 10 | # Setup Directoties 11 | RUN mkdir -p \ 12 | $POSITION/$SERVICE 13 | WORKDIR ${AION_HOME}/$POSITION/$SERVICE/ 14 | 15 | RUN rm -rf /usr/local/lib/python3.6/dist-packages/protobuf* 16 | 17 | ADD requirements.txt . 18 | RUN pip3 install -r requirements.txt 19 | 20 | ENV PYTHONPATH ${AION_HOME}/$POSITION/$SERVICE 21 | ENV REGISTRY_USER aion 22 | 23 | ADD docker-entrypoint.sh . 24 | ADD src/ . 25 | 26 | CMD ["/bin/sh", "docker-entrypoint.sh"] 27 | # CMD ["/bin/sh", "-c", "while :; do sleep 10000; done"] 28 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Latona, Inc. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | apply: 2 | kubectl apply -f k8s/delete-container-image-from-edge-kube/ 3 | 4 | delete: 5 | kubectl delete -f k8s/delete-container-image-from-edge-kube/ 6 | kubectl delete deployment nginx-deployment 7 | sudo rm -rf delete_container_image_from_edge_kube/__pycache__/ 8 | 9 | exec: 10 | sh shell/exec.sh 11 | 12 | do: 13 | sh shell/do.sh 14 | 15 | redo: 16 | -kubectl delete deployment nginx-deployment 17 | -sudo rm -rf delete_container_image_from_edge_kube/__pycache__/ 18 | sleep 5 19 | sh shell/do.sh 20 | 21 | build: 22 | sh docker-build.sh -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # delete-container-image-from-edge-kube 2 | delete-container-image-from-edge-kube は、エッジ端末において特定のコンテナイメージを削除するマイクロサービスです。 3 | 本マイクロサービスは、例えばコンテナデプロイメントシステムにおいて、デプロイ先端末のコンテナを削除して代わりの新しいコンテナイメージをデプロイするために利用されます。 4 | 5 | ## 動作環境 6 | delete-container-image-from-edge-kube は、AION のプラットフォーム上での動作を前提としています。 7 | 使用する際は、事前に下記の通り AION の動作環境を用意してください。 8 | ・ Kubernetes 9 | ・ AION のリソース 10 | 11 | ## セットアップ 12 | 13 | [1] 適切なストレージ領域における任意のディレクトリで、本マイクロサービスをクローンする 14 | 15 | [2] 本マイクロサービスがクローンされているディレクトリ直下で、下記コマンドを実行し、イメージをビルドする。 16 | 17 | ``` 18 | sh docker-build.sh 19 | ``` 20 | -------------------------------------------------------------------------------- /config/project.yml: -------------------------------------------------------------------------------- 1 | microservices: 2 | sample-microservice: 3 | command: python3 ./python/main.py 4 | startup: yes 5 | scale: 1 6 | nextService: 7 | default: 8 | - name: delete-container-image-from-edge 9 | pattern: "n" 10 | delete-container-image-from-edge: 11 | command: python3 ./delete-container-image-from-edge 12 | scale: 1 13 | multiple: yes 14 | serviceAccount: controller-serviceaccount 15 | env: 16 | REGISTRY_USER: XXXX 17 | REGISTRY_PASSWORD: XXXX 18 | -------------------------------------------------------------------------------- /docker-build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | DATE="$(date "+%Y%m%d%H%M")" 4 | REPOSITORY_PREFIX="latonaio" 5 | SERVICE_NAME="delete-container-image-from-edge" 6 | 7 | DOCKER_BUILDKIT=1 docker build --progress=plain -t ${SERVICE_NAME}:"${DATE}" . 8 | 9 | # tagging 10 | docker tag ${SERVICE_NAME}:"${DATE}" ${SERVICE_NAME}:latest 11 | docker tag ${SERVICE_NAME}:"${DATE}" ${REPOSITORY_PREFIX}/${SERVICE_NAME}:"${DATE}" 12 | docker tag ${REPOSITORY_PREFIX}/${SERVICE_NAME}:"${DATE}" ${REPOSITORY_PREFIX}/${SERVICE_NAME}:latest 13 | 14 | if [[ $PUSH == "push" ]]; then 15 | docker push ${REPOSITORY_PREFIX}/${SERVICE_NAME}:"${DATE}" 16 | docker push ${REPOSITORY_PREFIX}/${SERVICE_NAME}:latest 17 | fi 18 | -------------------------------------------------------------------------------- /docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | python3 -u main.py 4 | /bin/sh -c "sleep 3" 5 | curl -s -X POST localhost:10001/quitquitquit 6 | -------------------------------------------------------------------------------- /json/C_DirectNextServiceByDataJson.json: -------------------------------------------------------------------------------- 1 | { 2 | "connections": { 3 | "default": { 4 | "outputDataPath": "/var/lib/aion/Data/sample-microservice_1", 5 | "metadata": { 6 | "project_name": "sample-project", 7 | "microservice_name": "nginx", 8 | "ip": "192.168.XXX.XXX", 9 | "port": "31112" 10 | } 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /k8s/delete-container-image-from-edge-kube-bak/delete-container-image-from-edge-kube.yml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: delete-container-image-from-edge 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: 9 | role: delete-container-image-from-edge 10 | template: 11 | metadata: 12 | labels: 13 | role: delete-container-image-from-edge 14 | spec: 15 | serviceAccount: controller-serviceaccount 16 | containers: 17 | - name: delete-container-image-from-edge 18 | image: delete-container-image-from-edge:latest 19 | imagePullPolicy: IfNotPresent 20 | -------------------------------------------------------------------------------- /k8s/delete-container-image-from-edge-kube-bak/role.yml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: clusterrolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: cluster-admin 9 | subjects: 10 | - kind: ServiceAccount 11 | name: controller-serviceaccount 12 | namespace: default 13 | --- 14 | apiVersion: v1 15 | kind: ServiceAccount 16 | metadata: 17 | name: controller-serviceaccount 18 | namespace: default 19 | -------------------------------------------------------------------------------- /k8s/delete-container-image-from-edge-kube/delete-container-image-from-edge-kube.yml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: delete-container-image-from-edge 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: 9 | role: delete-container-image-from-edge 10 | template: 11 | metadata: 12 | labels: 13 | role: delete-container-image-from-edge 14 | spec: 15 | serviceAccount: controller-serviceaccount 16 | # hostNetwork: true 17 | containers: 18 | - name: delete-container-image-from-edge 19 | image: delete-container-image-from-edge:latest 20 | imagePullPolicy: IfNotPresent 21 | volumeMounts: 22 | - mountPath: /var/run/docker.sock 23 | name: docker-sock-volume 24 | readOnly: true 25 | # for development 26 | # - name: scripts 27 | # mountPath: /var/lib/aion/Runtime/delete-container-image-from-edge/delete_container_image_from_edge_kube 28 | volumes: 29 | - name: docker-sock-volume 30 | hostPath: 31 | path: /var/run/docker.sock 32 | # for development 33 | # - name: scripts 34 | # hostPath: 35 | # path: /home/latona/vega/KubernetesYaml/delete-container-image-from-edge-kube/delete_container_image_from_edge_kube 36 | -------------------------------------------------------------------------------- /k8s/delete-container-image-from-edge-kube/role.yml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: clusterrolebinding2 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: cluster-admin 9 | subjects: 10 | - kind: ServiceAccount 11 | name: controller-serviceaccount 12 | namespace: default 13 | --- 14 | apiVersion: v1 15 | kind: ServiceAccount 16 | metadata: 17 | name: controller-serviceaccount 18 | namespace: default 19 | -------------------------------------------------------------------------------- /main.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | python3 -u main.py 4 | # /bin/sh -c "sleep 3" 5 | /bin/sh -c "sleep 10000" 6 | curl -s -X POST localhost:10001/quitquitquit 7 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | requests==2.21.0 2 | pyyaml==3.12 3 | kubernetes==11.0.0 4 | protobuf==3.11.3 5 | docker==3.7.3 6 | pycurl==7.43.0.5 7 | simplejson==3.17.2 8 | -------------------------------------------------------------------------------- /shell/do.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | kubectl exec -it $(kubectl get po | awk '{print $1}' | grep -v NAME | grep delete-container-image-from-edge) -- python3 src/main.py -------------------------------------------------------------------------------- /shell/exec.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | kubectl exec -it $(kubectl get po | awk '{print $1}' | grep -v NAME | grep delete-container-image-from-edge) -- bash -------------------------------------------------------------------------------- /src/controllers/const.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | SERVICE_NAME = os.environ.get("SERVICE") 4 | KUBERNETES_PACKAGE = "kubernetes" 5 | DELETED_STATUS_INDEX = "802" 6 | SUCCEED_STATUS_INDEX = "803" 7 | FAILED_STATUS_INDEX = "980" 8 | VOLUME_TYPE_PVC = "persistentVolumeClaim" 9 | VOLUME_TYPE_HOST_PATH = "hostPath" 10 | -------------------------------------------------------------------------------- /src/controllers/deployment_controller.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | from aion.logger import lprint 4 | from kubernetes import client 5 | from kubernetes.client.rest import ApiException 6 | 7 | from controllers.const import VOLUME_TYPE_PVC, VOLUME_TYPE_HOST_PATH 8 | from controllers.kubernetes_controller import KubernetesController 9 | 10 | 11 | class DeploymentController(KubernetesController): 12 | def __init__(self): 13 | super().__init__() 14 | 15 | def set_client(self): 16 | self._set_conf() 17 | self.k8s_apps_v1 = client.AppsV1Api(client.ApiClient(self.configuration)) 18 | self.k8s_v1 = client.CoreV1Api(client.ApiClient(self.configuration)) 19 | 20 | def apply(self, name, image_name, container_port, envs, volume_mounts, volumes, service_account_name, 21 | prior_device_name, namespace): 22 | try: 23 | is_deployment = self._is(name, namespace) 24 | if is_deployment: 25 | return self._update(image_name, name, container_port, envs, volume_mounts, volumes, 26 | service_account_name, prior_device_name, namespace) 27 | 28 | return self._create(image_name, name, container_port, envs, volume_mounts, volumes, service_account_name, 29 | prior_device_name, namespace) 30 | except ApiException as e: 31 | lprint(e) 32 | raise RuntimeError(e) 33 | 34 | def _is(self, name, namespace): 35 | try: 36 | ret = self.k8s_apps_v1.list_namespaced_deployment(namespace) 37 | for item in ret.items: 38 | if item.metadata.name == name: 39 | return True 40 | return False 41 | except ApiException as e: 42 | lprint(e) 43 | raise RuntimeError(e) 44 | 45 | def _create(self, image_name, name, container_port, envs, volume_mounts, volumes, service_account_name, 46 | prior_device_name, namespace): 47 | body = self._get_body(image_name, name, container_port, envs, volume_mounts, volumes, service_account_name, 48 | prior_device_name, namespace) 49 | 50 | try: 51 | ret = self.k8s_apps_v1.create_namespaced_deployment(namespace, body) 52 | return ret 53 | except ApiException as e: 54 | lprint(e) 55 | raise RuntimeError(e) 56 | 57 | def _update(self, image_name, name, container_port, envs, volume_mounts, volumes, service_account_name, 58 | prior_device_name, namespace): 59 | body = self._get_body(image_name, name, container_port, envs, volume_mounts, volumes, service_account_name, 60 | prior_device_name, namespace) 61 | 62 | try: 63 | return self.k8s_apps_v1.replace_namespaced_deployment(name, namespace, body) 64 | except ApiException as e: 65 | lprint(e) 66 | raise RuntimeError(e) 67 | 68 | def _get_body(self, image_name, name, container_port, envs, volume_mounts, volumes, service_account_name, 69 | prior_device_name, namespace): 70 | pod_template = self._get_pod_template(image_name, name, container_port, envs, volume_mounts, volumes, 71 | service_account_name, prior_device_name) 72 | 73 | return client.V1Deployment( 74 | api_version="apps/v1", 75 | kind="Deployment", 76 | metadata=client.V1ObjectMeta(name=name, namespace=namespace), 77 | spec=client.V1DeploymentSpec( 78 | replicas=1, 79 | template=pod_template, 80 | selector={'matchLabels': {"run": name}} 81 | ) 82 | ) 83 | 84 | def is_pod_with_retry(self, name, namespace): 85 | pod_running = self._is_pod(name, namespace) 86 | if pod_running: 87 | return True 88 | 89 | for i in range(self.retry_cnt): 90 | lprint("retrying in 5 seconds...") 91 | time.sleep(5) 92 | pod_running = self._is_pod(name, namespace) 93 | if pod_running: 94 | return True 95 | 96 | return False 97 | 98 | def _is_pod(self, name, namespace): 99 | try: 100 | ret = self.k8s_v1.list_namespaced_pod(namespace) 101 | if ret is None: 102 | return False 103 | 104 | for pod in ret.items: 105 | if name not in pod.metadata.name: 106 | continue 107 | 108 | if pod.status.container_statuses is None: 109 | continue 110 | 111 | for status in pod.status.container_statuses: 112 | if status.ready: 113 | return True 114 | return False 115 | except ApiException as e: 116 | raise RuntimeError(e) 117 | 118 | def _get_pod_template(self, image_name, name, container_port, envs, volume_mounts, volumes, service_account_name, 119 | prior_device_name): 120 | env_list = self._get_envs(envs) 121 | volume_mount_list = self._get_volume_mounts(volume_mounts) 122 | volume_list = self._get_volumes(volumes) 123 | image_pull_secret_name = prior_device_name + "-registry" 124 | 125 | if container_port != None and container_port != "": 126 | return client.V1PodTemplateSpec( 127 | metadata=client.V1ObjectMeta(labels={"run": name}), 128 | spec=client.V1PodSpec( 129 | service_account_name=service_account_name, 130 | image_pull_secrets=[client.V1LocalObjectReference( 131 | name=image_pull_secret_name 132 | )], 133 | containers=[client.V1Container( 134 | name=name, 135 | image=image_name, 136 | image_pull_policy="Always", 137 | ports=[client.V1ContainerPort(container_port=int(container_port))], 138 | env=env_list, 139 | volume_mounts=volume_mount_list 140 | )], 141 | volumes=volume_list 142 | ) 143 | ) 144 | 145 | return client.V1PodTemplateSpec( 146 | metadata=client.V1ObjectMeta(labels={"run": name}), 147 | spec=client.V1PodSpec( 148 | service_account_name=service_account_name, 149 | image_pull_secrets=[client.V1LocalObjectReference( 150 | name=image_pull_secret_name 151 | )], 152 | containers=[client.V1Container( 153 | name=name, 154 | image=image_name, 155 | image_pull_policy="Always", 156 | env=env_list, 157 | volume_mounts=volume_mount_list 158 | )], 159 | volumes=volume_list 160 | ) 161 | ) 162 | 163 | def _get_envs(self, env): 164 | envs = [] 165 | if env != None and env != "": 166 | for name, item in env.items(): 167 | envs.append(client.V1EnvVar( 168 | name=name, 169 | value=item 170 | )) 171 | 172 | return envs 173 | 174 | def _get_volume_mounts(self, volume_mounts): 175 | volume_mount_list = [] 176 | if volume_mounts != None and volume_mounts != "": 177 | for name, path in volume_mounts.items(): 178 | volume_mount_list.append(client.V1VolumeMount( 179 | name=name, 180 | mount_path=path 181 | )) 182 | 183 | return volume_mount_list 184 | 185 | def _get_volumes(self, volumes): 186 | volume_list = [] 187 | if volumes != None and volumes != "": 188 | for name, item in volumes.items(): 189 | if item.get("type") == VOLUME_TYPE_PVC: 190 | volume_list.append(client.V1Volume( 191 | name=name, 192 | persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource( 193 | claim_name=name 194 | ) 195 | )) 196 | elif item.get("type") == VOLUME_TYPE_HOST_PATH: 197 | volume_list.append(client.V1Volume( 198 | name=name, 199 | host_path=client.V1HostPathVolumeSource( 200 | path=item.get("path") 201 | ) 202 | )) 203 | 204 | def delete(self, name, namespace): 205 | return self.k8s_apps_v1.delete_namespaced_deployment(name, namespace) 206 | -------------------------------------------------------------------------------- /src/controllers/docker_controller.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import docker 4 | from aion.logger import lprint 5 | 6 | 7 | class DockerController: 8 | def __init__(self): 9 | self.client = docker.DockerClient(base_url='unix://var/run/docker.sock') 10 | self.local_registry_addr = "localhost:31112" 11 | self.local_image_name = "" 12 | 13 | def login(self): 14 | username = os.environ.get("REGISTRY_USER") 15 | password = os.environ.get("REGISTRY_PASSWORD") 16 | self.client.login( 17 | username=username, password=password, registry=self.local_registry_addr) 18 | 19 | def tag(self, remote_image_name, name, docker_tag): 20 | image = self.client.images.get(remote_image_name) 21 | self.local_image_name = self.local_registry_addr + '/' + name + ":" + docker_tag 22 | image.tag(self.local_image_name) 23 | 24 | def push(self): 25 | try: 26 | lprint(self.local_image_name) 27 | ret = self.client.images.push(self.local_image_name) 28 | if "err" in ret: 29 | return False 30 | 31 | lprint(ret) 32 | return True 33 | except docker.errors.APIError as e: 34 | lprint(e) 35 | raise RuntimeError(e) 36 | 37 | def remove_image(self, microservice_name, ip, port, docker_tag): 38 | self.client.images.remove(f"{ip}:{port}/{microservice_name}:{docker_tag}", force=True) 39 | self.client.images.remove(f"{self.local_registry_addr}/{microservice_name}:{docker_tag}", force=True) 40 | -------------------------------------------------------------------------------- /src/controllers/docker_registry_controller.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import pycurl 4 | import simplejson 5 | 6 | SELF_IP = 'localhost' 7 | 8 | USERNAME = os.environ.get("REGISTRY_USER") 9 | PASSWORD = os.environ.get("REGISTRY_PASSWORD") 10 | 11 | 12 | class DockerRegistryController(): 13 | def __init__(self): 14 | self.local_registry_inner_addr = "kube-registry:5000" 15 | self.local_image_name = "" 16 | 17 | def remove_from_docker_registry(self, repository_name, docker_tag): 18 | ''' 19 | docker registryからのtagの削除 20 | 21 | @param repository_name: リポジトリ名(image名) 22 | @param docker_tag: タグ名 23 | @return: docker registryのdeleteAPIから返されたレスポンス 24 | ''' 25 | digest = self.get_digest(repository_name, docker_tag) 26 | 27 | if digest is None: 28 | print(f"[error] failed to get digest form docker registry)") 29 | raise 30 | 31 | print(f"[info] remove from docker registry(digest: {digest})") 32 | result = self.delete_tag_from_docker_registry(repository_name, digest) 33 | 34 | if 'errors' in result: 35 | print(f"[error] failed to remove from docker registry (error: {result['errors']})") 36 | raise 37 | 38 | return result 39 | 40 | def get_digest(self, repository_name, image_tag): 41 | ''' 42 | docker registryのAPIをコールしてimageのdigestを取得する 43 | 44 | @param repository_name: リポジトリ名(image名) 45 | @param image_tag: タグ名 46 | @return: docker registry APIからのレスポンス 47 | ''' 48 | curl = pycurl.Curl() 49 | curl.setopt(pycurl.URL, 50 | f'https://{self.local_registry_inner_addr}/v2/{repository_name}/manifests/{image_tag}') 51 | curl.setopt(pycurl.USERPWD, '%s:%s' % (USERNAME, PASSWORD)) 52 | curl.setopt(pycurl.HTTPHEADER, ["Accept: application/vnd.docker.distribution.manifest.v2+json"]) 53 | curl.setopt(pycurl.SSL_VERIFYPEER, 0) 54 | curl.setopt(pycurl.SSL_VERIFYHOST, 0) 55 | 56 | header = ManifestHeader() 57 | curl.setopt(pycurl.HEADERFUNCTION, header.store) 58 | curl.perform() 59 | 60 | return header.get_digest() 61 | 62 | def delete_tag_from_docker_registry(self, repository_name, digest): 63 | ''' 64 | docker registryのAPIをコールしてdocker registry上のtagを削除する 65 | 66 | @param repository_name: リポジトリ名(image名) 67 | @param digest: imageのcontent digest 68 | @return: docker registry APIからのレスポンス 69 | ''' 70 | url = f'https://{self.local_registry_inner_addr}/v2/{repository_name}/manifests/{digest}' 71 | 72 | curl = pycurl.Curl() 73 | curl.setopt(pycurl.URL, url) 74 | curl.setopt(pycurl.CUSTOMREQUEST, 'DELETE') 75 | curl.setopt(pycurl.USERPWD, '%s:%s' % (USERNAME, PASSWORD)) 76 | curl.setopt(pycurl.SSL_VERIFYPEER, 0) 77 | curl.setopt(pycurl.SSL_VERIFYHOST, 0) 78 | 79 | response = simplejson.loads(curl.perform_rs()) 80 | curl.close() 81 | 82 | return response 83 | 84 | 85 | class ManifestHeader(): 86 | def __init__(self): 87 | self.headers = [] 88 | 89 | def store(self, b): 90 | self.headers.append(b.decode('utf-8')) 91 | 92 | def get_digest(self): 93 | digest_header = next(filter(lambda h: h.startswith("Docker-Content-Digest:"), self.headers), None) 94 | # 23 -> len("Docker-Content-Digest: ") 95 | # 94 -> end of digest 96 | 97 | # e.g. sha256:XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX 98 | return digest_header[23:94] if digest_header is not None else None 99 | -------------------------------------------------------------------------------- /src/controllers/kubernetes_controller.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from kubernetes import client 4 | 5 | 6 | class KubernetesController(): 7 | def __init__(self): 8 | self.configuration = "" 9 | self.retry_cnt = 5 10 | 11 | def _set_conf(self): 12 | configuration = client.Configuration() 13 | configuration.verify_ssl = True 14 | configuration.host = "https://" + os.environ.get("KUBERNETES_SERVICE_HOST") 15 | configuration.api_key["authorization"] = self._get_token() 16 | configuration.api_key_prefix["authorization"] = "Bearer" 17 | configuration.ssl_ca_cert = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" 18 | self.configuration = configuration 19 | 20 | def _get_token(self): 21 | with open("/var/run/secrets/kubernetes.io/serviceaccount/token") as f: 22 | return f.read() 23 | -------------------------------------------------------------------------------- /src/controllers/namespace_controller.py: -------------------------------------------------------------------------------- 1 | from aion.logger import lprint 2 | from kubernetes import client 3 | from kubernetes.client.rest import ApiException 4 | 5 | from controllers.kubernetes_controller import KubernetesController 6 | 7 | 8 | class NamespaceController(KubernetesController): 9 | def __init__(self): 10 | super().__init__() 11 | 12 | def set_client(self): 13 | self._set_conf() 14 | self.k8s_v1 = client.CoreV1Api(client.ApiClient(self.configuration)) 15 | 16 | def apply(self, name): 17 | try: 18 | is_namespace = self._is(name) 19 | if is_namespace is False: 20 | return self._create(name) 21 | except ApiException as e: 22 | lprint(e) 23 | raise RuntimeError(e) 24 | 25 | def _is(self, name): 26 | try: 27 | ret = self.k8s_v1.list_namespace() 28 | for item in ret.items: 29 | if item.metadata.name == name: 30 | return True 31 | return False 32 | except ApiException as e: 33 | lprint(e) 34 | raise RuntimeError(e) 35 | 36 | def _create(self, name): 37 | try: 38 | ret = self.k8s_v1.create_namespace(client.V1Namespace( 39 | api_version="v1", 40 | kind="Namespace", 41 | metadata=client.V1ObjectMeta(name=name) 42 | )) 43 | return ret 44 | except ApiException as e: 45 | lprint(e) 46 | raise RuntimeError(e) 47 | -------------------------------------------------------------------------------- /src/controllers/pvc_controller.py: -------------------------------------------------------------------------------- 1 | from aion.logger import lprint 2 | from kubernetes import client 3 | from kubernetes.client.rest import ApiException 4 | from controllers.kubernetes_controller import KubernetesController 5 | 6 | 7 | class PvcController(KubernetesController): 8 | def __init__(self): 9 | super().__init__() 10 | 11 | def set_client(self): 12 | self._set_conf() 13 | self.k8s_v1 = client.CoreV1Api(client.ApiClient(self.configuration)) 14 | 15 | def apply(self, name, path, storage, namespace): 16 | if storage is None: 17 | storage = "1Gi" 18 | 19 | try: 20 | is_pvc = self._is_pvc(name, namespace) 21 | if is_pvc is False: 22 | self._create_pv(name, path, storage, namespace) 23 | return self._create_pvc(name, storage, namespace) 24 | 25 | # if self._delete_pvc(name, namespace): 26 | # time.sleep(5) 27 | # self._create_pv(name, path, storage, namespace) 28 | # return self._create_pvc(name, storage, namespace) 29 | except ApiException as e: 30 | lprint(e) 31 | raise RuntimeError(e) 32 | 33 | def _is_pvc(self, name, namespace): 34 | try: 35 | ret = self.k8s_v1.list_namespaced_persistent_volume_claim(namespace) 36 | for item in ret.items: 37 | if item.metadata.name == name: 38 | return True 39 | return False 40 | except ApiException as e: 41 | lprint(e) 42 | raise RuntimeError(e) 43 | 44 | def _create_pvc(self, name, storage, namespace): 45 | body = self._get_pvc_body(name, storage, namespace) 46 | 47 | try: 48 | return self.k8s_v1.create_namespaced_persistent_volume_claim(namespace, body) 49 | except ApiException as e: 50 | lprint(e) 51 | raise RuntimeError(e) 52 | 53 | def _create_pv(self, name, path, storage, namespace): 54 | body = self._get_pv_body(name, path, storage, namespace) 55 | 56 | try: 57 | return self.k8s_v1.create_persistent_volume(body) 58 | except ApiException as e: 59 | lprint(e) 60 | raise RuntimeError(e) 61 | 62 | def delete_pvc(self, name, namespace): 63 | if namespace == "default": 64 | self.k8s_v1.delete_persistent_volume(name) 65 | else: 66 | self.k8s_v1.delete_persistent_volume(name + "-" + namespace) 67 | return self.k8s_v1.delete_namespaced_persistent_volume_claim(name, namespace) 68 | 69 | def _get_pvc_body(self, name, storage, namespace): 70 | spec = self._get_spec_pvc(name, storage) 71 | 72 | return client.V1PersistentVolumeClaim( 73 | api_version="v1", 74 | kind="PersistentVolumeClaim", 75 | metadata=client.V1ObjectMeta(name=name, namespace=namespace), 76 | spec=spec 77 | ) 78 | 79 | # can not specify namespace in PV. so add suffix namespace with PV name 80 | def _get_pv_body(self, name, path, storage, namespace): 81 | spec = self._get_pv_spec(name, path, storage) 82 | if namespace != "default": 83 | name = name + "-" + namespace 84 | 85 | return client.V1PersistentVolume( 86 | api_version="v1", 87 | kind="PersistentVolume", 88 | metadata=client.V1ObjectMeta(name=name), 89 | spec=spec 90 | ) 91 | 92 | def _get_spec_pvc(self, name, storage): 93 | return client.V1PersistentVolumeClaimSpec( 94 | storage_class_name=name, 95 | access_modes=["ReadWriteOnce"], 96 | resources=client.V1ResourceRequirements( 97 | requests={"storage": storage} 98 | ) 99 | ) 100 | 101 | def _get_pv_spec(self, name, path, storage): 102 | return client.V1PersistentVolumeSpec( 103 | storage_class_name=name, 104 | access_modes=["ReadWriteOnce"], 105 | capacity={"storage": storage}, 106 | host_path=client.V1HostPathVolumeSource( 107 | path=path 108 | ) 109 | ) 110 | -------------------------------------------------------------------------------- /src/controllers/service_controller.py: -------------------------------------------------------------------------------- 1 | from aion.logger import lprint 2 | from kubernetes import client 3 | from kubernetes.client.rest import ApiException 4 | 5 | from controllers.kubernetes_controller import KubernetesController 6 | 7 | SERVICE_TYPE_CLUSTER_IP = "ClusterIP" 8 | SERVICE_TYPE_NODE_PORT = "NodePort" 9 | 10 | 11 | class ServiceController(KubernetesController): 12 | def __init__(self): 13 | super().__init__() 14 | 15 | def set_client(self): 16 | self._set_conf() 17 | self.k8s_v1 = client.CoreV1Api(client.ApiClient(self.configuration)) 18 | 19 | def apply(self, name, container_port, node_port, namespace): 20 | try: 21 | is_service = self._is(name, namespace) 22 | if is_service is False: 23 | return self._create(name, container_port, node_port, namespace) 24 | 25 | if self.delete(name, namespace): 26 | return self._create(name, container_port, node_port, namespace) 27 | 28 | except ApiException as e: 29 | lprint(e) 30 | raise RuntimeError(e) 31 | 32 | def _is(self, name, namespace): 33 | try: 34 | ret = self.k8s_v1.list_namespaced_service(namespace) 35 | for item in ret.items: 36 | if item.metadata.name == name: 37 | return True 38 | return False 39 | except ApiException as e: 40 | lprint(e) 41 | raise RuntimeError(e) 42 | 43 | def _create(self, name, container_port, node_port, namespace): 44 | body = self._get_body(name, container_port, node_port, namespace) 45 | 46 | try: 47 | return self.k8s_v1.create_namespaced_service(namespace, body) 48 | except ApiException as e: 49 | lprint(e) 50 | raise RuntimeError(e) 51 | 52 | def delete(self, name, namespace): 53 | return self.k8s_v1.delete_namespaced_service(name, namespace) 54 | 55 | def _get_body(self, name, container_port, node_port, namespace): 56 | spec = self._get_spec(name, container_port, node_port) 57 | 58 | return client.V1Service( 59 | api_version="v1", 60 | kind="Service", 61 | metadata=client.V1ObjectMeta(name=name, namespace=namespace), 62 | spec=spec 63 | ) 64 | 65 | def _get_spec(self, name, container_port, node_port): 66 | selector = {"run": name} 67 | service_type = SERVICE_TYPE_CLUSTER_IP 68 | if node_port: 69 | service_type = SERVICE_TYPE_NODE_PORT 70 | 71 | ports = [] 72 | if service_type == SERVICE_TYPE_CLUSTER_IP: 73 | ports = [ 74 | { 75 | "name": name, 76 | "port": int(container_port) 77 | } 78 | ] 79 | elif service_type == SERVICE_TYPE_NODE_PORT: 80 | ports = [ 81 | { 82 | "name": name, 83 | "port": int(container_port), 84 | "targetPort": int(container_port), 85 | "nodePort": int(node_port) 86 | } 87 | ] 88 | 89 | return client.V1ServiceSpec( 90 | selector=selector, 91 | type=service_type, 92 | ports=ports 93 | ) 94 | -------------------------------------------------------------------------------- /src/main.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import time 3 | import traceback 4 | 5 | from aion.logger import lprint, initialize_logger 6 | from aion.microservice import Options, main_decorator 7 | 8 | from controllers.const import SERVICE_NAME, KUBERNETES_PACKAGE, SUCCEED_STATUS_INDEX, FAILED_STATUS_INDEX, \ 9 | VOLUME_TYPE_PVC, DELETED_STATUS_INDEX 10 | from controllers.deployment_controller import DeploymentController 11 | from controllers.docker_controller import DockerController 12 | from controllers.docker_registry_controller import DockerRegistryController 13 | from controllers.pvc_controller import PvcController 14 | from controllers.service_controller import ServiceController 15 | 16 | METADATA_KEYS = [ 17 | "priorDeviceName", 18 | "deviceName", 19 | "projectName", 20 | "projectCommitId", 21 | "ip", 22 | "port", 23 | "microserviceName", 24 | "dockerTag" 25 | ] 26 | 27 | 28 | @main_decorator(SERVICE_NAME) 29 | def main(opt: Options): 30 | initialize_logger(SERVICE_NAME) 31 | logging.getLogger(KUBERNETES_PACKAGE).setLevel(logging.ERROR) 32 | 33 | conn = opt.get_conn() 34 | num = opt.get_number() 35 | kanban = conn.get_one_kanban(SERVICE_NAME, num) 36 | metadata = kanban.get_metadata() 37 | 38 | lprint("metadata: ", metadata) 39 | 40 | for key in METADATA_KEYS: 41 | if key not in metadata: 42 | raise RuntimeError(f"Not found '{key}' in metadadata.") 43 | 44 | deployment_name = metadata.get("microserviceName") 45 | docker_tag = metadata.get("dockerTag") 46 | remote_ip = metadata.get("ip") 47 | remote_port = metadata.get("port") 48 | container_port = metadata.get("containerPort") 49 | volumes = metadata.get("volumes") 50 | prior_device_name = metadata.get("priorDeviceName") 51 | namespace = metadata.get("projectName").lower() 52 | 53 | try: 54 | lprint("==========DELETE pvc==========") 55 | # pvc 56 | if volumes != None and volumes != "": 57 | pvc_controller = PvcController() 58 | for name, item in volumes.items(): 59 | if item.get("type") == VOLUME_TYPE_PVC: 60 | try: 61 | pvc_controller.set_client() 62 | pvc_controller.delete_pvc(name, namespace) 63 | except Exception as e: 64 | lprint(f"[info]pvc is not deleted (name: {name})") 65 | lprint(e) 66 | 67 | lprint("==========DELETE deployment==========") 68 | # deployment 69 | try: 70 | deployment_controller = DeploymentController() 71 | deployment_controller.set_client() 72 | deployment_controller.delete(deployment_name, namespace) 73 | except Exception as e: 74 | lprint(f"[info]deployment is not deleted (name: {deployment_name})") 75 | lprint(e) 76 | 77 | lprint("==========DELETE service==========") 78 | # service 79 | if container_port != None and container_port != "": 80 | try: 81 | service_controller = ServiceController() 82 | service_controller.set_client() 83 | service_controller.delete(deployment_name, namespace) 84 | except Exception as e: 85 | lprint(f"[info]service is not deleted (name: {deployment_name})") 86 | lprint(e) 87 | 88 | # デプロイ元でカンバンが滞留する問題を防ぐ 89 | time.sleep(10) 90 | 91 | metadata['status'] = DELETED_STATUS_INDEX 92 | metadata['error'] = "" 93 | 94 | conn.output_kanban( 95 | metadata=metadata, 96 | device_name=prior_device_name 97 | ) 98 | 99 | time.sleep(30) 100 | 101 | lprint("==========DELETE image from host==========") 102 | # remove image from host 103 | try: 104 | docker_controller = DockerController() 105 | docker_controller.remove_image( 106 | microservice_name=deployment_name, 107 | ip=remote_ip, 108 | port=remote_port, 109 | docker_tag=docker_tag 110 | ) 111 | except Exception as e: 112 | lprint(f"[info]local image is not removed (name: {deployment_name})") 113 | lprint(e) 114 | 115 | lprint("==========DELETE image from docker registry==========") 116 | try: 117 | docker_registry_controller = DockerRegistryController() 118 | image_digest = docker_registry_controller.get_digest( 119 | repository_name=deployment_name, 120 | image_tag=docker_tag 121 | ) 122 | 123 | lprint(f'digest is {image_digest}') 124 | docker_registry_controller.delete_tag_from_docker_registry( 125 | repository_name=deployment_name, 126 | digest=image_digest 127 | ) 128 | except Exception as e: 129 | lprint(f"[info]registry image is not removed (name: {deployment_name})") 130 | lprint(e) 131 | 132 | metadata['status'] = SUCCEED_STATUS_INDEX 133 | metadata['error'] = "" 134 | 135 | conn.output_kanban( 136 | metadata=metadata, 137 | device_name=prior_device_name 138 | ) 139 | 140 | return 141 | 142 | except Exception: 143 | lprint(traceback.format_exc()) 144 | metadata['status'] = FAILED_STATUS_INDEX 145 | metadata['error'] = "" 146 | conn.output_kanban( 147 | metadata=metadata, 148 | device_name=prior_device_name 149 | ) 150 | 151 | 152 | if __name__ == "__main__": 153 | main() 154 | --------------------------------------------------------------------------------