├── .deepsource.toml ├── .github └── workflows │ ├── docker-publish.yml │ └── trivy.yaml ├── .gitignore ├── Dockerfile ├── LICENSE ├── README.md ├── files ├── aautoscaler.py ├── k8s.py ├── logs.py ├── main.py └── requirements.txt └── kubernetes └── full.yaml /.deepsource.toml: -------------------------------------------------------------------------------- 1 | version = 1 2 | 3 | [[analyzers]] 4 | name = "python" 5 | enabled = true 6 | 7 | [analyzers.meta] 8 | runtime_version = "3.x.x" 9 | 10 | [[analyzers]] 11 | name = "docker" 12 | enabled = true -------------------------------------------------------------------------------- /.github/workflows/docker-publish.yml: -------------------------------------------------------------------------------- 1 | name: Docker build and push 2 | 3 | on: 4 | push: 5 | branches: [master] 6 | release: 7 | types: [published] 8 | 9 | env: 10 | REGISTRY: ghcr.io 11 | # github.repository as / 12 | IMAGE_NAME: ${{ github.repository }} 13 | 14 | jobs: 15 | build: 16 | 17 | runs-on: ubuntu-latest 18 | permissions: 19 | contents: read 20 | packages: write 21 | 22 | steps: 23 | - name: Checkout repository 24 | uses: actions/checkout@v2 25 | 26 | # Login against a Docker registry except on PR 27 | # https://github.com/docker/login-action 28 | - name: Log into registry ${{ env.REGISTRY }} 29 | if: github.event_name != 'pull_request' 30 | uses: docker/login-action@28218f9b04b4f3f62068d7b6ce6ca5b26e35336c 31 | with: 32 | registry: ${{ env.REGISTRY }} 33 | username: ${{ github.actor }} 34 | password: ${{ secrets.GITHUB_TOKEN }} 35 | 36 | # Extract metadata (tags, labels) for Docker 37 | # https://github.com/docker/metadata-action 38 | - name: Extract Docker metadata 39 | if: github.event_name != 'pull_request' 40 | id: meta 41 | uses: docker/metadata-action@98669ae865ea3cffbcbaa878cf57c20bbf1c6c38 42 | with: 43 | images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} 44 | 45 | # Build and push Docker image with Buildx (don't push on PR) 46 | # https://github.com/docker/build-push-action 47 | - name: Build and push Docker image 48 | if: github.event_name != 'pull_request' 49 | uses: docker/build-push-action@ad44023a93711e3deb337508980b4b5e9bcdc5dc 50 | with: 51 | context: . 52 | push: true 53 | tags: ${{ steps.meta.outputs.tags }} 54 | labels: ${{ steps.meta.outputs.labels }} 55 | -------------------------------------------------------------------------------- /.github/workflows/trivy.yaml: -------------------------------------------------------------------------------- 1 | name: Scan Docker image 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | pull_request: 7 | branches: [ master ] 8 | schedule: 9 | - cron: '00 03 * * *' 10 | 11 | jobs: 12 | build: 13 | name: Build 14 | runs-on: "ubuntu-18.04" 15 | steps: 16 | - name: Checkout code 17 | uses: actions/checkout@v2 18 | 19 | - name: Build an image from Dockerfile 20 | run: | 21 | docker build -t docker.io/dignajar/another-autoscaler:${{ github.sha }} . 22 | 23 | - name: Run Trivy vulnerability scanner 24 | uses: aquasecurity/trivy-action@master 25 | with: 26 | image-ref: 'docker.io/dignajar/another-autoscaler:${{ github.sha }}' 27 | format: 'template' 28 | template: '@/contrib/sarif.tpl' 29 | output: 'trivy-results.sarif' 30 | severity: 'CRITICAL,HIGH' 31 | 32 | - name: Upload Trivy scan results to GitHub Security tab 33 | uses: github/codeql-action/upload-sarif@v1 34 | with: 35 | sarif_file: 'trivy-results.sarif' 36 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .vscode 2 | files/__pycache__ -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.9.9-alpine3.14 2 | 3 | ENV PYTHONUNBUFFERED=1 4 | 5 | COPY files/requirements.txt /tmp/requirements.txt 6 | RUN pip install -r /tmp/requirements.txt --no-cache-dir 7 | 8 | # Run as non-root 9 | ENV USER autoscaler 10 | ENV UID 10001 11 | ENV GROUP autoscaler 12 | ENV GID 10001 13 | ENV HOME /home/$USER 14 | RUN addgroup -g $GID -S $GROUP && adduser -u $UID -S $USER -G $GROUP 15 | 16 | # Copy app 17 | COPY --chown=$UID:$GID ./files/ $HOME/ 18 | 19 | USER $UID:$GID 20 | WORKDIR $HOME 21 | CMD ["python3", "-u", "main.py"] -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Diego Najar 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Another Autoscaler 2 | Another Autoscaler is a Kubernetes controller that automatically starts, stops, or restarts pods from a deployment at a specified time using a cron syntax. 3 | 4 | Another Autoscaler read the annotation of each deployment and performs an increase or decrease in the number of replicas. 5 | 6 | [![Docker image](https://img.shields.io/badge/Docker-image-blue.svg)](https://github.com/dignajar/another-autoscaler/pkgs/container/another-autoscaler) 7 | [![Kubernetes YAML manifests](https://img.shields.io/badge/Kubernetes-manifests-blue.svg)](https://github.com/dignajar/another-autoscaler/tree/master/kubernetes) 8 | [![codebeat badge](https://codebeat.co/badges/f57de995-ca62-49e5-b309-82ed60570324)](https://codebeat.co/projects/github-com-dignajar-another-autoscaler-master) 9 | [![release](https://img.shields.io/github/v/release/dignajar/another-autoscaler.svg)](https://github.com/dignajar/another-autoscaler/releases) 10 | [![license](https://img.shields.io/badge/license-MIT-green)](https://github.com/dignajar/another-autoscaler/blob/master/LICENSE) 11 | 12 | > The date and time must be in UTC. 13 | 14 | > The restart feature execute a rollout restart deployment. 15 | 16 | ## Use cases 17 | - Cost savings by reducing the number of replicas after working hours or weekends. 18 | - Stop GPU deployments during non-working hours. 19 | 20 | Another Autoscaler is a perfect combination with [Cluster Autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler). 21 | 22 | ## Installation 23 | ``` 24 | # Deploy Another Autoscaler into Kubernetes on "another" namespace 25 | kubectl apply -f https://raw.githubusercontent.com/dignajar/another-autoscaler/master/kubernetes/full.yaml 26 | 27 | # Check if Another Autoscaler is working 28 | kubectl get pods -n another 29 | ``` 30 | 31 | ## Configuration 32 | The following annotations for the deployments are valid (`metadata.annotations`). 33 | 34 | - `another-autoscaler/stop-time`: Define the date and time when the replica of the deployment will be set to 0. 35 | - `another-autoscaler/start-time` Define the date and time when the replica of the deployment will be set to 1. 36 | - `another-autoscaler/restart-time:`: Define the date and time when the rollout restart will be peformerd to a deployment. 37 | - `another-autoscaler/stop-replicas`: This is the number of replicas to set when Another Autoscaler scale down the deployment, by default is 0. 38 | - `another-autoscaler/start-replicas`: This is the number of replicas to set when Another Autoscaler scale up the deployment, by default is 1. 39 | 40 | ## Examples 41 | 42 | ### Stop pods at 6pm every day: 43 | ``` 44 | another-autoscaler/stop-time: "00 18 * * *" 45 | ``` 46 | 47 | ### Start pods at 1pm every day: 48 | ``` 49 | another-autoscaler/start-time: "00 13 * * *" 50 | ``` 51 | 52 | ### Start 3 pods at 2:30pm every day: 53 | ``` 54 | another-autoscaler/start-time: "30 14 * * *" 55 | another-autoscaler/start-replicas: "3" 56 | ``` 57 | 58 | ### Restart pods at 9:15am every day: 59 | ``` 60 | another-autoscaler/restart-time: "15 09 * * *" 61 | ``` 62 | 63 | ### Restart pods at 2:30am, only on Saturday and Sunday: 64 | ``` 65 | another-autoscaler/restart-time: "00 02 * * 0,6" 66 | ``` 67 | 68 | ### Full example, how to start pods at 2pm and stop them at 3pm every day 69 | The following example start `5` replicas in total at `2pm` and stop `4` of them at `3pm` every day, the deployment start with `0` replicas. 70 | 71 | The `start-replicas` is not incremental, the value is the number of replicas will be setup by Another Autoscaler at the defined time by `start-time`. 72 | 73 | > The date and time must be in UTC. 74 | 75 | ``` 76 | apiVersion: apps/v1 77 | kind: Deployment 78 | metadata: 79 | name: nginx 80 | labels: 81 | app: nginx 82 | annotations: 83 | another-autoscaler/start-time: "00 14 * * *" 84 | another-autoscaler/start-replicas: "5" 85 | another-autoscaler/stop-time: "00 15 * * *" 86 | another-autoscaler/stop-replicas: "1" 87 | spec: 88 | replicas: 0 89 | selector: 90 | matchLabels: 91 | app: nginx 92 | template: 93 | metadata: 94 | labels: 95 | app: nginx 96 | spec: 97 | containers: 98 | - name: nginx 99 | image: nginx:latest 100 | ports: 101 | - containerPort: 80 102 | ``` 103 | 104 | ## GitOps / FluxCD 105 | To avoid conflicts with Flux and Another Autoscaler, you can remove the field `spec.replicas` from your deployment manifest and leave Another Autoscaler to manage the number of replicas. 106 | -------------------------------------------------------------------------------- /files/aautoscaler.py: -------------------------------------------------------------------------------- 1 | import os 2 | from croniter import croniter 3 | from datetime import date, datetime, timezone, timedelta 4 | from dateutil import parser 5 | from logs import Logs 6 | from k8s import K8s 7 | 8 | class AAutoscaler: 9 | 10 | def __init__(self): 11 | self.logs = Logs(self.__class__.__name__) 12 | 13 | # K8s client object 14 | # Auth via Bearer token 15 | if 'K8S_BEARER_TOKEN' in os.environ and 'K8S_API_ENDPOINT' in os.environ: 16 | self.logs.info({'message': 'Kubernetes object via bearer token.'}) 17 | k8sAPI = os.environ.get('K8S_API_ENDPOINT') 18 | k8sToken = os.environ.get('K8S_BEARER_TOKEN') 19 | self.k8s = K8s(k8sAPI, k8sToken) 20 | # Auth via in-cluster configuration, running inside a pod 21 | elif 'KUBERNETES_SERVICE_HOST' in os.environ: 22 | self.logs.info({'message': 'Kubernetes object via in-cluster configuration.'}) 23 | self.k8s = K8s() 24 | else: 25 | self.logs.error({'message': 'Error trying to create the Kubernetes object.'}) 26 | raise Exception('Error trying to create the Kubernetes object.') 27 | 28 | def __start__(self, namespace:str, deploy:dict, currentTime:datetime): 29 | ''' 30 | Logic for start the pods 31 | ''' 32 | deployName = deploy.metadata.name 33 | deployAnnotations = deploy.metadata.annotations 34 | deployReplicas = deploy.spec.replicas 35 | 36 | startAnnotation = 'another-autoscaler/start-time' 37 | if startAnnotation in deployAnnotations: 38 | self.logs.debug({'message': 'Start time detected.', 'namespace': namespace, 'deployment': deployName}) 39 | startTime = deployAnnotations[startAnnotation] 40 | 41 | if croniter.match(startTime, currentTime): 42 | self.logs.debug({'message': 'Start time Cron expression matched.', 'namespace': namespace, 'deployment': deployName, 'startTime': str(startTime), 'currentTime': str(currentTime)}) 43 | 44 | # start-replicas 45 | startReplicas = 1 46 | startReplicasAnnotation = 'another-autoscaler/start-replicas' 47 | if startReplicasAnnotation in deployAnnotations: 48 | self.logs.debug({'message': 'Number of replicas.', 'namespace': namespace, 'deployment': deployName, 'startReplicas': deployAnnotations[startReplicasAnnotation]}) 49 | startReplicas = int(deployAnnotations[startReplicasAnnotation]) 50 | 51 | if deployReplicas != startReplicas: 52 | self.logs.info({'message': 'Deployment set to start.', 'namespace': namespace, 'deployment': deployName, 'startTime': str(startTime), 'availableReplicas': deploy.status.available_replicas, 'startReplicas': str(startReplicas)}) 53 | self.k8s.setReplicas(namespace, deployName, startReplicas) 54 | 55 | def __stop__(self, namespace:str, deploy:dict, currentTime:datetime): 56 | ''' 57 | Logic for stop the pods 58 | ''' 59 | deployName = deploy.metadata.name 60 | deployAnnotations = deploy.metadata.annotations 61 | deployReplicas = deploy.spec.replicas 62 | 63 | stopAnnotation = 'another-autoscaler/stop-time' 64 | if stopAnnotation in deployAnnotations: 65 | self.logs.debug({'message': 'Stop time detected.', 'namespace': namespace, 'deployment': deployName}) 66 | stopTime = deployAnnotations[stopAnnotation] 67 | 68 | if croniter.match(stopTime, currentTime): 69 | self.logs.debug({'message': 'Stop time Cron expression matched.', 'namespace': namespace, 'deployment': deployName, 'stopTime': str(stopTime), 'currentTime': str(currentTime)}) 70 | 71 | # stop-replicas 72 | stopReplicas = 0 73 | stopReplicasAnnotation = 'another-autoscaler/stop-replicas' 74 | if stopReplicasAnnotation in deployAnnotations: 75 | self.logs.debug({'message': 'Number of replicas.', 'namespace': namespace, 'deployment': deployName, 'stopReplicas': deployAnnotations[stopReplicasAnnotation]}) 76 | stopReplicas = int(deployAnnotations[stopReplicasAnnotation]) 77 | 78 | if deployReplicas != stopReplicas: 79 | self.logs.info({'message': 'Deployment set to stop.', 'namespace': namespace, 'deployment': deployName, 'stopTime': str(stopTime), 'availableReplicas': deploy.status.available_replicas, 'stopReplicas': str(stopReplicas)}) 80 | self.k8s.setReplicas(namespace, deployName, stopReplicas) 81 | 82 | def __restart__(self, namespace:str, deploy:dict, currentTime:datetime): 83 | ''' 84 | Logic for restart the pods 85 | ''' 86 | deployName = deploy.metadata.name 87 | deployAnnotations = deploy.metadata.annotations 88 | 89 | restartAnnotation = 'another-autoscaler/restart-time' 90 | if restartAnnotation in deployAnnotations: 91 | self.logs.debug({'message': 'Restart time detected.', 'namespace': namespace, 'deployment': deployName}) 92 | restartTime = deployAnnotations[restartAnnotation] 93 | 94 | if croniter.match(restartTime, currentTime): 95 | self.logs.debug({'message': 'Restart time Cron expression matched.', 'namespace': namespace, 'deployment': deployName, 'restartTime': str(restartTime), 'currentTime': str(currentTime)}) 96 | 97 | # Check if was already restarted 98 | try: 99 | restartedAt = parser.parse(deploy.spec.template.metadata.annotations['kubectl.kubernetes.io/restartedAt']) 100 | except: 101 | restartedAt = currentTime - timedelta(days=1) 102 | 103 | if ((currentTime - restartedAt).total_seconds() / 60.0) > 1: 104 | self.logs.info({'message': 'Deployment set to restart.', 'namespace': namespace, 'deployment': deployName, 'restartTime': str(restartTime), 'currentTime': str(currentTime)}) 105 | self.k8s.rolloutDeployment(namespace, deployName) 106 | 107 | def execute(self): 108 | # Current time in UTC format 109 | currentTime = datetime.now(tz=timezone.utc) 110 | 111 | # For each namespace 112 | self.logs.info({'message': 'Getting list of namespaces.'}) 113 | namespaces = self.k8s.getNamespaces() 114 | for namespace in namespaces: 115 | namespaceName = namespace.metadata.name 116 | 117 | # For each deployment inside the namespace 118 | deployments = self.k8s.getDeployments(namespaceName) 119 | for deploy in deployments: 120 | self.__start__(namespaceName, deploy, currentTime) 121 | self.__stop__(namespaceName, deploy, currentTime) 122 | self.__restart__(namespaceName, deploy, currentTime) 123 | -------------------------------------------------------------------------------- /files/k8s.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import pytz 3 | import urllib3 4 | from kubernetes import client, config 5 | from kubernetes.client.rest import ApiException 6 | from logs import Logs 7 | 8 | 9 | class K8s: 10 | 11 | def __init__(self, apiEndpoint:str='', token:str=''): 12 | self.logs = Logs(self.__class__.__name__) 13 | 14 | # Client via Bearer token 15 | if token: 16 | urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) 17 | configuration = client.Configuration() 18 | configuration.api_key['authorization'] = token 19 | configuration.api_key_prefix['authorization'] = 'Bearer' 20 | configuration.verify_ssl = False 21 | configuration.host = apiEndpoint 22 | self.CoreV1Api = client.CoreV1Api(client.ApiClient(configuration)) 23 | self.AppsV1Api = client.AppsV1Api(client.ApiClient(configuration)) 24 | self.NetworkingV1Api = client.NetworkingV1Api(client.ApiClient(configuration)) 25 | # Client via in-cluster configuration, 26 | # running inside a pod with proper service account 27 | else: 28 | config.load_incluster_config() 29 | self.CoreV1Api = client.CoreV1Api() 30 | self.AppsV1Api = client.AppsV1Api() 31 | self.NetworkingV1Api = client.NetworkingV1Api() 32 | 33 | def getNamespaces(self) -> list: 34 | ''' 35 | Returns a list of namespaces. 36 | ''' 37 | try: 38 | response = self.CoreV1Api.list_namespace() 39 | return response.items 40 | except ApiException as e: 41 | print(e) 42 | self.logs.error({'message': 'Exception when calling CoreV1Api.list_namespace'}) 43 | return [] 44 | 45 | def getDeployments(self, namespace:str, labelSelector:str=False) -> list: 46 | ''' 47 | Returns all deployments from a namespace. 48 | Label selector should be an string "app=kube-web-view". 49 | ''' 50 | try: 51 | if labelSelector: 52 | response = self.AppsV1Api.list_namespaced_deployment(namespace=namespace, label_selector=labelSelector) 53 | else: 54 | response = self.AppsV1Api.list_namespaced_deployment(namespace=namespace) 55 | return response.items 56 | except ApiException as e: 57 | print(e) 58 | self.logs.error({'message': 'Exception when calling AppsV1Api.list_namespaced_deployment'}) 59 | return [] 60 | 61 | def getDeployment(self, namespace:str, deploymentName:str): 62 | ''' 63 | Returns a particular deployment. 64 | ''' 65 | try: 66 | return self.AppsV1Api.read_namespaced_deployment(namespace=namespace, name=deploymentName) 67 | except ApiException as e: 68 | print(e) 69 | self.logs.error({'message': 'Exception when calling AppsV1Api.read_namespaced_deployment'}) 70 | return [] 71 | 72 | def getPods(self, namespace:str, labelSelector:str, limit:int=1) -> list: 73 | ''' 74 | Returns a list of pods for the label selector. 75 | Label selector should be an string "app=kube-web-view". 76 | ''' 77 | try: 78 | response = self.CoreV1Api.list_namespaced_pod(namespace=namespace, label_selector=labelSelector, limit=limit) 79 | return response.items 80 | except ApiException as e: 81 | print(e) 82 | self.logs.error({'message': 'Exception when calling CoreV1Api.list_namespaced_pod'}) 83 | return [] 84 | 85 | def getPodsByDeployment(self, namespace:str, deploymentName:str, limit:int=1) -> list: 86 | ''' 87 | Returns a list of pods related to a deployment. 88 | ''' 89 | deployment = self.getDeployment(namespace, deploymentName) 90 | if not deployment: 91 | return [] 92 | 93 | matchLabels = deployment.spec.selector.match_labels 94 | labelSelector = '' 95 | for key, value in matchLabels.items(): 96 | labelSelector += key+'='+value+',' 97 | labelSelector = labelSelector[:-1] # remove the last comma from the string 98 | return self.getPods(namespace, labelSelector, limit) 99 | 100 | def deleteAllPods(self, namespace:str, labelSelector:str): 101 | ''' 102 | Delete all pods from a namespace filter by label selector. 103 | ''' 104 | deployments = self.getDeployments(namespace=namespace, labelSelector=labelSelector) 105 | if not deployments: 106 | return False 107 | 108 | deployment = deployments[0] 109 | for labelKey, labelValue in deployment.spec.selector.match_labels.items(): 110 | pods = self.getPods(namespace, labelKey+'='+labelValue) 111 | for pod in pods: 112 | self.deletePod(namespace=namespace, podName=pod.metadata.name) 113 | return True 114 | 115 | def setReplicas(self, namespace:str, deploymentName:str, replicas:int) -> bool: 116 | ''' 117 | Set the number of replicas of a deployment. 118 | ''' 119 | try: 120 | currentScale = self.AppsV1Api.read_namespaced_deployment_scale(namespace=namespace, name=deploymentName) 121 | currentScale.spec.replicas = replicas 122 | self.AppsV1Api.replace_namespaced_deployment_scale(namespace=namespace, name=deploymentName, body=currentScale) 123 | return True 124 | except ApiException as e: 125 | print(e) 126 | self.logs.error({'message': 'Exception when calling AppsV1Api.read_namespaced_deployment_scale'}) 127 | return False 128 | 129 | def getReplicas(self, namespace:str, deploymentName:str): 130 | ''' 131 | Returns the number of replicas of a deployment. 132 | ''' 133 | try: 134 | return self.AppsV1Api.read_namespaced_deployment_scale(namespace=namespace, name=deploymentName) 135 | except ApiException as e: 136 | print(e) 137 | self.logs.error({'message': 'Exception when calling AppsV1Api.read_namespaced_deployment_scale'}) 138 | return False 139 | 140 | def rolloutDeployment(self, namespace:str, deploymentName:str) -> bool: 141 | ''' 142 | Execute a rollout restart deployment. 143 | ''' 144 | deploymentManifest = self.getDeployment(namespace, deploymentName) 145 | if not deploymentManifest: 146 | return False 147 | 148 | deploymentManifest.spec.template.metadata.annotations = {"kubectl.kubernetes.io/restartedAt": datetime.datetime.utcnow().replace(tzinfo=pytz.UTC).isoformat()} 149 | try: 150 | self.AppsV1Api.replace_namespaced_deployment(namespace=namespace, name=deploymentName, body=deploymentManifest) 151 | return True 152 | except ApiException as e: 153 | print(e) 154 | self.logs.error({'message': 'Exception when calling AppsV1Api.replace_namespaced_deployment'}) 155 | return False 156 | 157 | def getIngress(self, namespace, ingressName): 158 | response = self.NetworkingV1Api.read_namespaced_ingress(namespace=namespace, name=ingressName) 159 | return response 160 | 161 | def getLogs(self, namespace, podName, containerName, tailLines): 162 | response = self.CoreV1Api.read_namespaced_pod_log(namespace=namespace, name=podName, container=containerName, tail_lines=tailLines) 163 | return response 164 | 165 | def getReplicaSet(self, namespace, labelSelector): 166 | response = self.AppsV1Api.list_namespaced_replica_set(namespace=namespace, label_selector=labelSelector) 167 | return response.items 168 | 169 | def deletePod(self, namespace, podName): 170 | response = self.CoreV1Api.delete_namespaced_pod(namespace=namespace, name=podName, body={}) 171 | return response 172 | -------------------------------------------------------------------------------- /files/logs.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | from datetime import datetime, timezone 4 | 5 | class Logs: 6 | 7 | def __init__(self, objectName:str): 8 | self.level = 'INFO' 9 | if "LOG_LEVEL" in os.environ: 10 | self.level = os.environ["LOG_LEVEL"] 11 | 12 | self.format = 'TEXT' 13 | if "LOG_FORMAT" in os.environ: 14 | self.format = os.environ["LOG_FORMAT"] 15 | 16 | self.objectName = objectName 17 | 18 | def __print__(self, level:str, extraFields:dict): 19 | fields = { 20 | 'date': datetime.now(tz=timezone.utc).strftime("%Y-%m-%d %H:%M:%S"), 21 | 'level': level, 22 | 'objectName': self.objectName 23 | } 24 | 25 | # Include extra fields custom by the user 26 | if extraFields is not None: 27 | fields.update(extraFields) 28 | 29 | if self.format == 'JSON': 30 | print(json.dumps(fields)) 31 | else: 32 | print(' - '.join(map(str, fields.values()))) 33 | 34 | def error(self, extraFields:dict=None): 35 | if self.level in ['DEBUG', 'INFO', 'WARNING', 'ERROR']: 36 | self.__print__('ERROR', extraFields) 37 | 38 | def warning(self, extraFields:dict=None): 39 | if self.level in ['DEBUG', 'INFO', 'WARNING']: 40 | self.__print__('WARNING', extraFields) 41 | 42 | def info(self, extraFields:dict=None): 43 | if self.level in ['DEBUG', 'INFO']: 44 | self.__print__('INFO', extraFields) 45 | 46 | def debug(self, extraFields:dict=None): 47 | if self.level in ['DEBUG']: 48 | self.__print__('DEBUG', extraFields) 49 | -------------------------------------------------------------------------------- /files/main.py: -------------------------------------------------------------------------------- 1 | import os 2 | import schedule 3 | import time 4 | from aautoscaler import AAutoscaler 5 | from logs import Logs 6 | 7 | checkEvery = 5 # Check annotations every 5 seconds by default 8 | if 'CHECK_EVERY' in os.environ: 9 | checkEvery = int(os.environ['CHECK_EVERY']) 10 | 11 | mode = 'daemon' 12 | if 'MODE' in os.environ: 13 | mode = os.environ['MODE'] 14 | 15 | aautoscaler = AAutoscaler() 16 | if mode == 'daemon': 17 | schedule.every(checkEvery).seconds.do(aautoscaler.execute) 18 | while True: 19 | schedule.run_pending() 20 | time.sleep(1) 21 | else: 22 | Logs(__name__).info({'message': 'Running once then exit.'}) 23 | aautoscaler.execute() 24 | -------------------------------------------------------------------------------- /files/requirements.txt: -------------------------------------------------------------------------------- 1 | kubernetes==23.3.0 2 | schedule==1.1.0 3 | croniter==1.3.4 4 | pytz==2022.1 5 | -------------------------------------------------------------------------------- /kubernetes/full.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: another 6 | 7 | --- 8 | apiVersion: v1 9 | kind: ServiceAccount 10 | metadata: 11 | name: another-autoscaler 12 | namespace: another 13 | 14 | --- 15 | kind: ClusterRole 16 | apiVersion: rbac.authorization.k8s.io/v1 17 | metadata: 18 | name: another-autoscaler 19 | rules: 20 | - apiGroups: [""] 21 | resources: ["namespaces"] 22 | verbs: ["get","list"] 23 | - apiGroups: ["extensions", "apps"] 24 | resources: ["deployments", "deployments/scale", "replicasets"] 25 | verbs: ["get","list","patch","update"] 26 | 27 | --- 28 | kind: ClusterRoleBinding 29 | apiVersion: rbac.authorization.k8s.io/v1 30 | metadata: 31 | name: another-autoscaler 32 | roleRef: 33 | kind: ClusterRole 34 | name: another-autoscaler 35 | apiGroup: rbac.authorization.k8s.io 36 | subjects: 37 | - kind: ServiceAccount 38 | name: another-autoscaler 39 | namespace: another 40 | 41 | --- 42 | apiVersion: v1 43 | kind: ConfigMap 44 | metadata: 45 | name: another-autoscaler 46 | namespace: another 47 | data: 48 | LOG_LEVEL: "DEBUG" 49 | LOG_FORMAT: "JSON" 50 | CHECK_EVERY: "5" 51 | 52 | --- 53 | apiVersion: apps/v1 54 | kind: Deployment 55 | metadata: 56 | name: another-autoscaler 57 | namespace: another 58 | labels: 59 | app: another-autoscaler 60 | spec: 61 | replicas: 1 62 | selector: 63 | matchLabels: 64 | app: another-autoscaler 65 | template: 66 | metadata: 67 | labels: 68 | app: another-autoscaler 69 | spec: 70 | serviceAccountName: another-autoscaler 71 | containers: 72 | - name: another-autoscaler 73 | image: ghcr.io/dignajar/another-autoscaler:latest # Change the tag for the latest stable version 74 | imagePullPolicy: Always 75 | envFrom: 76 | - configMapRef: 77 | name: another-autoscaler 78 | securityContext: 79 | runAsNonRoot: true 80 | allowPrivilegeEscalation: false 81 | readOnlyRootFilesystem: true 82 | runAsUser: 10001 83 | runAsGroup: 10001 84 | capabilities: 85 | drop: 86 | - ALL 87 | resources: 88 | limits: 89 | cpu: 300m 90 | memory: 256Mi 91 | requests: 92 | cpu: 100m 93 | memory: 128Mi --------------------------------------------------------------------------------