├── CronJob.yaml ├── Dockerfile ├── LICENSE ├── README.md ├── _config.yml ├── docs └── imgs │ ├── architecture.png │ ├── logo.png │ ├── sample_run.png │ ├── sample_run_1.png │ └── sample_run_3.png ├── objects ├── .gitignore ├── cluster.py ├── conf │ ├── admission-controllers │ ├── kube-apiserver │ ├── kube-controller-manager │ └── kube-scheduler ├── control_plane.py ├── crds.py ├── daemonsets.py ├── deployments.py ├── images.py ├── ingress.py ├── jobs.py ├── modules │ ├── __init__.py │ ├── __init__.pyc │ ├── get_cm.py │ ├── get_crds.py │ ├── get_deploy.py │ ├── get_ds.py │ ├── get_ingress.py │ ├── get_jobs.py │ ├── get_nodes.py │ ├── get_ns.py │ ├── get_pods.py │ ├── get_rbac.py │ ├── get_sts.py │ ├── get_svc.py │ ├── get_svc_acc.py │ ├── load_kube_config.py │ ├── logging.py │ ├── main.py │ ├── message.py │ ├── output.py │ ├── output.pyc │ └── process.py ├── namespace.py ├── networks.py ├── nodes.py ├── pods.py ├── rbac.py ├── serviceaccounts.py ├── services.py ├── statefulsets.py └── test.py └── requirements.txt /CronJob.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1beta1 2 | kind: CronJob 3 | metadata: 4 | labels: 5 | app: k8s-cluster-checker 6 | name: k8s-cluster-checker 7 | spec: 8 | concurrencyPolicy: Replace 9 | failedJobsHistoryLimit: 1 10 | jobTemplate: 11 | metadata: 12 | creationTimestamp: null 13 | spec: 14 | activeDeadlineSeconds: 3600 15 | template: 16 | metadata: 17 | labels: 18 | app: k8s-cluster-checker 19 | spec: 20 | imagePullSecrets: 21 | - name: quayauth 22 | containers: 23 | - image: dguyhasnoname/k8s-cluster-checker:0.1.0 24 | imagePullPolicy: Always 25 | name: cluster-checker 26 | ports: 27 | - containerPort: 80 28 | resources: 29 | limits: 30 | cpu: 400m 31 | memory: 500Mi 32 | requests: 33 | cpu: 200m 34 | memory: 400Mi 35 | restartPolicy: OnFailure 36 | schedule: 0/15 * * * * 37 | startingDeadlineSeconds: 43200 38 | successfulJobsHistoryLimit: 3 39 | suspend: false 40 | --- 41 | kind: ClusterRole 42 | apiVersion: rbac.authorization.k8s.io/v1 43 | metadata: 44 | name: cluster-checker 45 | rules: 46 | - apiGroups: ["*"] 47 | resources: ["*"] 48 | verbs: ["list", "get"] 49 | --- 50 | kind: ClusterRoleBinding 51 | apiVersion: rbac.authorization.k8s.io/v1 52 | metadata: 53 | name: cluster-checker 54 | subjects: 55 | - kind: ServiceAccount 56 | name: default 57 | namespace: monitoring 58 | roleRef: 59 | kind: ClusterRole 60 | name: cluster-checker 61 | apiGroup: rbac.authorization.k8s.io -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.8-slim-buster 2 | 3 | ENV PYTHONUNBUFFERED=1 4 | 5 | COPY requirements.txt / 6 | RUN pip3 install --no-cache --upgrade -r requirements.txt 7 | 8 | COPY objects/ /app 9 | WORKDIR /app 10 | 11 | ENTRYPOINT ["/bin/bash"] -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # k8s-cluster-checker 2 | 3 | ![Docker Pulls](https://img.shields.io/docker/pulls/dguyhasnoname/k8s-cluster-checker.svg) 4 | [![docker image version](https://images.microbadger.com/badges/version/dguyhasnoname/k8s-cluster-checker.svg)](https://microbadger.com/images/dguyhasnoname/k8s-cluster-checker) 5 | [![PyPI version](https://badge.fury.io/py/kubernetes.svg)](https://badge.fury.io/py/kubernetes) 6 | ![license](https://img.shields.io/github/license/dguyhasnoname/k8s-cluster-checker) 7 | 8 | 9 | ![logo](/docs/imgs/logo.png) 10 | 11 | k8s-cluster-checker is bundle of python scripts which can be used to analyse below configurations in a kubernetes cluster: 12 | 13 | - OS version(supports flatcar OS, coreOS & Ubuntu only) 14 | - Kubernetes version 15 | - Docker version 16 | - Admission Controllers 17 | - security-context of workloads 18 | - health-probes of workloads 19 | - QoS of workloads 20 | - types of services 21 | - workload running with single replica 22 | - rbac analysis 23 | - stale namespaces with no workloads 24 | 25 | Once the tool is run, it generates output in 3 ways: 26 | 1. stdout on the screen to visualise the analysis right away. 27 | 2. report in `csv` files are generated for each analysis. A combined report is generated in excel file. You can use it for your own custom analysis. 28 | 3. json output is generated for each analysis which can be consumed in down-stream scripts. JSON output can also be ingested in Splunk or any other log/data aggregation tool for dashboarding. 29 | 30 | #### Compatible k8s versions: 1.14+ 31 | Running k8s-cluster-check on older k8s version 1.10.x to 1.13.x may result in missing results/exceptions. Tool do not support k8s version previous to 1.10.x. 32 | 33 | This tool performs read-only operations on any k8s cluster. You can make a service account/kubeconfig with full read-only access to all k8s-objects and use the same to run the tool. Else, it will use the in-cluster kubeconfig when deployed in the cluster. 34 | 35 | #### k8s-cluster-checker contains below scripts: 36 | 37 | 1. [cluster.py](objects/cluster.py): gives quick details of cluster and analyses configurations as below: 38 | - cluster name 39 | - node-details 40 | - node-roles 41 | - volumes used 42 | - OS, K8s and docker version. Also checks for latest versions. 43 | - overall and namespaced workload count 44 | - analysis of admision-controllers 45 | - analysis of security configs for workloads 46 | - analysis of health-check probes 47 | - analysis of resource limits/requests of workloads and their QoS 48 | - analysis of image-pull-policy 49 | - RBAC analysis 50 | - analysis of services in the cluster. 51 | 52 | above analysis is the collective result of following scripts. 53 | 2. [nodes.py](objects/nodes.py): gives details of nodes. Finds if docker, kubernetes and docker version are latest or not. 54 | 3. [namespace.py](objects/namespace.py): give details of namespace objects and analyses them 55 | 4. [control_plane.py](objects/control_plane.py): analyses control-plane configuration and reports missing ones 56 | 5. [deployments.py](objects/deployments.py): gives detail for deployments in cluster and analyses them 57 | 6. [daemonsets.py](objects/daemonsets.py): gives detail for daemonsets in cluster and analyses them 58 | 7. [statefulsets.py](objects/statefulsets.py): gives detail for statefulsets in cluster and analyses them 59 | 8. [services.py](objects/services.py): gives detail for services in cluster and analyses them 60 | 9. [jobs.py](objects/jobs.py): gives detail for jobs in cluster and analyses them 61 | 10. [pods.py](objects/pods.py): gives detail for pods in cluster in all namespaces and analyses them 62 | 11. [ingress.py](objects/ingress.py): gives detail for ingress in cluster and analyses them 63 | 12. [rbac.py](objects/rbac.py): gives detail for rbac in cluster and analyses them 64 | 13. [images.py](objects/images.py): gives detail for images used by workloads in cluster and reports back if any old images found. 65 | 66 | ## Pre-requisites 67 | 68 | 1. python3 and [packages](requirements.txt) 69 | 70 | 2. `pip3` needs to be installed to get required packages. You need to install above packages with command: 71 | 72 | ``` 73 | pip3 install 74 | ``` 75 | 76 | A docker image is available on [dockerhub](https://hub.docker.com/repository/docker/dguyhasnoname/k8s-cluster-checker) with all the dependencies installed. Follow this readme for docker image instructions. 77 | 78 | 3. KUBECONFIG for the cluster needs to be exported as env. It is read by k8s-cluster-checker scripts to connect to the cluster when output is generated on stdout. 79 | 80 | ## How to run k8s-cluster-checker scripts 81 | 82 | Once above pre-requisites are installed and configred, you are ready to run k8s-cluster-checker scripts as below: 83 | 84 | 1. Change dir: `cd objects` 85 | 2. Run scripts: 86 | 87 | ``` 88 | python3 cluster.py 89 | ``` 90 | 91 | If you want a ready-made env to run k8s-cluster-checker, please build the docker image using below command: 92 | 93 | docker build -t : . 94 | 95 | e.g. 96 | 97 | 98 | docker build -t dguyhasnoname/k8s-cluster-checker:latest . 99 | 100 | Running through docker image would be much easier than installing dependencies on your machine. The docker image being used is based on `python:3.8-slim-buster` which is a very light weight version of python in docker. 101 | 102 | Alternatively, please check [dockerhub](https://hub.docker.com/repository/docker/dguyhasnoname/k8s-cluster-checker) for latest image, if you do not want to build your own image. You can download the latest image from dockerhub as the dockerhub image build is integrated with this repo and it polls this repo for update. 103 | 104 | docker pull dguyhasnoname/k8s-cluster-checker:latest 105 | 106 | Once your image is ready, run the docker container and export KUBECONFIG inside the container. You can get the kubeconfig inside the container by mapping dir inside the container from your local machine where your KUEBCONFIG file is stored: 107 | 108 | 109 | docker run -it -v :/k8sconfig dguyhasnoname/k8s-cluster-checker:latest 110 | 111 | 112 | Now you should be inside the container. Please export KUBECONFIG: 113 | 114 | 115 | export KUBECONFIG=/k8sconfig/ 116 | 117 | 118 | Now you are ready to run k8s-cluster-checker scripts: 119 | 120 | 121 | cd /apps 122 | python cluster.py 123 | 124 | Flags available: 125 | 126 | - `-n` namespace. If this is not given, it will return data for all namespaces. 127 | - `-v` gives more details, this flag is valid for all scripts 128 | - `-l` gives only JSON ouput on stdout. This data can be forwarded to splunk for dashboarding. 129 | 130 | If you want the json data generated by this tool to be ingested in Splunk for dashboard, please user the [CronJob.yaml](/CronJob.yaml) to deploy k8s-cluster-checker in your cluster. Docker image used for this purpose runs the tool with `-l` flag which only generates JSON data on stdout. 131 | 132 | 133 | kubectl apply -f CronJob.yaml -n monitoring 134 | 135 | ## Architecture 136 | 137 | ![arch](/docs/imgs/architecture.png) 138 | 139 | ### Sample run 140 | 141 | ![sample_run](/docs/imgs/sample_run.png) 142 | ![sample_run_1](/docs/imgs/sample_run_1.png) 143 | ![sample_run_2](/docs/imgs/sample_run_3.png) 144 | 145 | ## Contributions/Issues 146 | 147 | If you find any bug, please feel free to open an issue in this repo. If you want to contribute, PRs are welcome. -------------------------------------------------------------------------------- /_config.yml: -------------------------------------------------------------------------------- 1 | theme: jekyll-theme-minimal -------------------------------------------------------------------------------- /docs/imgs/architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dguyhasnoname/k8s-cluster-checker/99f4175086bc42793da6bca6c4ef25a9f8252c16/docs/imgs/architecture.png -------------------------------------------------------------------------------- /docs/imgs/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dguyhasnoname/k8s-cluster-checker/99f4175086bc42793da6bca6c4ef25a9f8252c16/docs/imgs/logo.png -------------------------------------------------------------------------------- /docs/imgs/sample_run.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dguyhasnoname/k8s-cluster-checker/99f4175086bc42793da6bca6c4ef25a9f8252c16/docs/imgs/sample_run.png -------------------------------------------------------------------------------- /docs/imgs/sample_run_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dguyhasnoname/k8s-cluster-checker/99f4175086bc42793da6bca6c4ef25a9f8252c16/docs/imgs/sample_run_1.png -------------------------------------------------------------------------------- /docs/imgs/sample_run_3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dguyhasnoname/k8s-cluster-checker/99f4175086bc42793da6bca6c4ef25a9f8252c16/docs/imgs/sample_run_3.png -------------------------------------------------------------------------------- /objects/.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__/ 2 | modules/__pycache__/ 3 | .DS_Store 4 | -------------------------------------------------------------------------------- /objects/cluster.py: -------------------------------------------------------------------------------- 1 | import threading as threading 2 | import sys, time, os, re 3 | import pandas as pd 4 | import xlsxwriter, glob 5 | start_time = time.time() 6 | from modules.main import ArgParse 7 | from modules.logging import Logger 8 | from modules.output import Output 9 | from modules import process as k8s 10 | from modules.load_kube_config import kubeConfig 11 | 12 | kubeConfig.load_kube_config() 13 | 14 | class Cluster: 15 | def __init__(self, cluster_name, logger): 16 | self.cluster_name = cluster_name 17 | self.logger = logger 18 | 19 | def get_cluster_name(self): 20 | from modules.get_cm import K8sConfigMap 21 | cm = K8sConfigMap.get_cm('kube-system', self.logger) 22 | for item in cm.items: 23 | if 'kubeadm-config' in item.metadata.name: 24 | if 'clusterName' in item.data['ClusterConfiguration']: 25 | self.cluster_name = re.search(r"clusterName: ([\s\S]+)controlPlaneEndpoint", \ 26 | item.data['ClusterConfiguration']).group(1) 27 | print (k8s.Output.BOLD + "\nCluster name: "+ \ 28 | k8s.Output.RESET + "{}".format(self.cluster_name)) 29 | return self.cluster_name 30 | else: 31 | pass 32 | 33 | # fetching nodes data from nodes.py 34 | def get_node_data(self, v, l): 35 | import nodes as node 36 | nodes = node._Nodes(self.logger) 37 | nodes.get_nodes_details(v, l) 38 | 39 | # fetching control plane data from control_plane.py 40 | def get_ctrl_plane_data(self, v, l): 41 | import control_plane as cp 42 | print ("\nControl plane details:") 43 | ctrl_plane = cp.CtrlPlane(self.logger) 44 | ctrl_plane.get_ctrl_plane_pods(l) 45 | ctrl_plane.check_ctrl_plane_pods_properties(v, l) 46 | 47 | # getting namespaced data 48 | def get_namespaced_data(self, v, l): 49 | # fetching namespaced data from namespace.py 50 | import namespace as namespace 51 | ns = namespace.Namespace(self.logger) 52 | data = ns.get_ns_data(False, '', l) 53 | 54 | # variables to store data from get_ns_data function from namespace.py 55 | cluster_pods_list, cluster_svc_list = data[1], data[2] 56 | 57 | # analysing security context from security_context function in modules/process.py 58 | data_security_context = k8s.Check.security_context('pods', cluster_pods_list, \ 59 | ['NAMESPACE', 'POD', 'CONTAINER_NAME', 'PRIVILEGED_ESC', \ 60 | 'PRIVILEGED', 'READ_ONLY_FS', 'RUN_AS_NON_ROOT', 'RUNA_AS_USER'], \ 61 | v, 'all', l, self.logger) 62 | if l: self.logger.info(data_security_context) 63 | 64 | # analysing health checks from health_probes function in modules/process.py 65 | data_health_probes = k8s.Check.health_probes('pods', cluster_pods_list, \ 66 | ['NAMESPACE', 'POD', 'CONTAINER_NAME', 'READINESS_PROPBE', 'LIVENESS_PROBE'], \ 67 | v, 'all', l, self.logger) 68 | if l: self.logger.info(data_health_probes) 69 | 70 | # analysing limit/requests from resources function in modules/process.py 71 | data_resources = k8s.Check.resources('pods',cluster_pods_list, \ 72 | ['NAMESPACE', 'POD', 'CONTAINER_NAME', 'LIMITS', 'REQUESTS'], v, 'all', l, self.logger) 73 | if l: self.logger.info(data_resources, self.logger) 74 | 75 | # analysing qos context from qos function in modules/process.py 76 | data_qos = k8s.Check.qos('pods', cluster_pods_list, ['NAMESPACE', 'POD', 'QoS'], \ 77 | v, 'all', l, self.logger) 78 | if l: self.logger.info(data_qos) 79 | 80 | # analysing image_pull_policy from image_pull_policy function in modules/process.py 81 | data_image_pull_policy = k8s.Check.image_pull_policy('pods', cluster_pods_list, \ 82 | ['DEPLOYMENT', 'CONTAINER_NAME', 'IMAGE', 'IMAGE_PULL_POLICY'], \ 83 | v, 'all', l, self.logger) 84 | if l: self.logger.info(data_image_pull_policy) 85 | 86 | # analysing services from get_service function in modules/process.py 87 | data_get_service = k8s.Service.get_service('services', cluster_svc_list, \ 88 | ['NAMESPACE', 'SERVICE', 'SERVICE_TYPE', 'IP', 'SELECTOR'], \ 89 | v, 'all', l, self.logger) 90 | if l: self.logger.info(data_get_service[0]) 91 | 92 | 93 | # fetching RBAC data from rbac.py 94 | def get_rbac_details(self, v, l): 95 | import rbac as rbac 96 | print ("\nRBAC details:") 97 | rbac.call_all(v, '', l, self.logger) 98 | 99 | # fetching CRD data from crds.py 100 | def get_crd_details(self, v, l): 101 | import crds as crds 102 | print ("\nCRD details:") 103 | crds.call_all(v, '', l, self.logger) 104 | 105 | # generating combined report for the cluster 106 | def merge_reports(self): 107 | combined_report_file = './reports/combined_cluster_report.xlsx' 108 | csv_report_folder = '/reports/csv' 109 | writer = pd.ExcelWriter(combined_report_file, engine='xlsxwriter') 110 | csv_list = next(os.walk('.' + csv_report_folder))[2] 111 | csv_list.sort() 112 | for host in csv_list: 113 | path = os.path.join(os.getcwd() + csv_report_folder, host) 114 | for f in glob.glob(path): 115 | df = pd.read_csv(f) 116 | df.to_excel(writer, sheet_name=os.path.basename(f)[:31]) 117 | writer.save() 118 | writer.handles = None 119 | 120 | print ("{} reports generated for cluster {}".format(len(csv_list), self.cluster_name)) 121 | print ("Combined cluster report file: {}".format(combined_report_file)) 122 | 123 | def call_all(v, l, logger): 124 | call = Cluster('', logger) 125 | call.get_cluster_name() 126 | k8s.Output.separator(k8s.Output.GREEN,u'\u2581', l) 127 | call.get_node_data(v, l) 128 | k8s.Output.separator(k8s.Output.GREEN,u'\u2581', l) 129 | call.get_ctrl_plane_data(v, l) 130 | k8s.Output.separator(k8s.Output.GREEN,u'\u2581', l) 131 | call.get_namespaced_data(v,l) 132 | k8s.Output.separator(k8s.Output.GREEN,u'\u2581', l) 133 | call.get_crd_details(v, l) 134 | k8s.Output.separator(k8s.Output.GREEN,u'\u2581', l) 135 | call.get_rbac_details(v, l) 136 | k8s.Output.separator(k8s.Output.GREEN,u'\u2581', l) 137 | call.merge_reports() 138 | 139 | def main(): 140 | args = ArgParse.arg_parse() 141 | # args is [u, verbose, ns, l, format, silent] 142 | logger = Logger.get_logger(args.format, args.silent) 143 | if args: 144 | call_all(args.verbose, args.logging, logger) 145 | k8s.Output.time_taken(start_time) 146 | 147 | if __name__ == "__main__": 148 | try: 149 | main() 150 | except KeyboardInterrupt: 151 | print(k8s.Output.RED + "[ERROR] " \ 152 | + k8s.Output.RESET + 'Interrupted from keyboard!') 153 | try: 154 | sys.exit(0) 155 | except SystemExit: 156 | os._exit(0) -------------------------------------------------------------------------------- /objects/conf/admission-controllers: -------------------------------------------------------------------------------- 1 | AlwaysAdmit, AlwaysDeny, AlwaysPullImages, CertificateApproval, CertificateSigning, CertificateSubjectRestriction, DefaultIngressClass, DefaultStorageClass, DefaultTolerationSeconds, DenyEscalatingExec, DenyExecOnPrivileged, EventRateLimit, ExtendedResourceToleration, ImagePolicyWebhook, LimitPodHardAntiAffinityTopology, LimitRanger, MutatingAdmissionWebhook, NamespaceAutoProvision, NamespaceExists, NamespaceLifecycle, NodeRestriction, OwnerReferencesPermissionEnforcement, PersistentVolumeClaimResize, PersistentVolumeLabel, PodNodeSelector, PodPreset, PodSecurityPolicy, PodTolerationRestriction, Priority, ResourceQuota, RuntimeClass, SecurityContextDeny, ServiceAccount, StorageObjectInUseProtection, TaintNodesByCondition, ValidatingAdmissionWebhook -------------------------------------------------------------------------------- /objects/conf/kube-apiserver: -------------------------------------------------------------------------------- 1 | --admission-control 2 | --admission-control-config-file 3 | --advertise-address 4 | --allow-privileged 5 | --anonymous-auth 6 | --apiserver-count 7 | --audit-log-maxage 8 | --audit-log-maxbackup 9 | --audit-log-maxsize 10 | --audit-log-path 11 | --authentication-token-webhook-cache-ttl 12 | --authentication-token-webhook-config-file 13 | --authorization-mode 14 | --authorization-policy-file 15 | --authorization-webhook-cache-authorized-ttl 16 | --authorization-webhook-cache-unauthorized-ttl 17 | --authorization-webhook-config-file 18 | --azure-container-registry-config 19 | --basic-auth-file 20 | --bind-address 21 | --cert-dir 22 | --client-ca-file 23 | --cloud-config 24 | --cloud-provider 25 | --contention-profiling 26 | --cors-allowed-origins 27 | --delete-collection-workers 28 | --deserialization-cache-size 29 | --enable-garbage-collector 30 | --enable-swagger-ui 31 | --etcd-cafile 32 | --etcd-certfile 33 | --etcd-keyfile 34 | --etcd-prefix 35 | --etcd-quorum-read 36 | --etcd-servers 37 | --etcd-servers-overrides 38 | --event-ttl 39 | --experimental-bootstrap-token-auth 40 | --experimental-keystone-ca-file 41 | --experimental-keystone-url 42 | --external-hostname 43 | --feature-gates 44 | --google-json-key 45 | --insecure-allow-any-token 46 | --insecure-bind-address 47 | --insecure-port 48 | --kubelet-certificate-authority 49 | --kubelet-client-certificate 50 | --kubelet-client-key 51 | --kubelet-https 52 | --kubelet-preferred-address-types 53 | --kubelet-read-only-port 54 | --kubelet-timeout 55 | --kubernetes-service-node-port 56 | --master-service-namespace 57 | --max-connection-bytes-per-sec 58 | --max-mutating-requests-inflight 59 | --max-requests-inflight 60 | --min-request-timeout 61 | --oidc-ca-file 62 | --oidc-client-id 63 | --oidc-groups-claim 64 | --oidc-issuer-url 65 | --oidc-username-claim 66 | --profiling 67 | --repair-malformed-updates 68 | --requestheader-allowed-names 69 | --requestheader-client-ca-file 70 | --requestheader-extra-headers-prefix 71 | --requestheader-group-headers 72 | --requestheader-username-headers 73 | --runtime-config 74 | --secure-port 75 | --service-account-key-file 76 | --service-account-lookup 77 | --service-cluster-ip-range 78 | --service-node-port-range 79 | --ssh-keyfile 80 | --ssh-user 81 | --storage-backend 82 | --storage-media-type 83 | --storage-versions 84 | --target-ram-mb 85 | --tls-ca-file 86 | --tls-cert-file 87 | --tls-private-key-file 88 | --tls-sni-cert-key 89 | --token-auth-file 90 | --watch-cache 91 | --watch-cache-sizes -------------------------------------------------------------------------------- /objects/conf/kube-controller-manager: -------------------------------------------------------------------------------- 1 | --add-dir-header 2 | --allocate-node-cidrs 3 | --alsologtostderr 4 | --attach-detach-reconcile-sync-period 5 | --authentication-kubeconfig 6 | --authentication-skip-lookup 7 | --authentication-token-webhook-cache-ttl 8 | --authentication-tolerate-lookup-failure 9 | --authorization-always-allow-paths 10 | --authorization-kubeconfig 11 | --authorization-webhook-cache-authorized-ttl 12 | --authorization-webhook-cache-unauthorized-ttl 13 | --azure-container-registry-config 14 | --cert-dir 15 | --cidr-allocator-type 16 | --client-ca-file 17 | --cloud-config 18 | --cloud-provider 19 | --cluster-cidr 20 | --cluster-name 21 | --cluster-signing-cert-file 22 | --cluster-signing-key-file 23 | --concurrent-deployment-syncs 24 | --concurrent-endpoint-syncs 25 | --concurrent-gc-syncs 26 | --concurrent-namespace-syncs 27 | --concurrent-replicaset-syncs 28 | --concurrent-resource-quota-syncs 29 | --concurrent-service-endpoint-syncs 30 | --concurrent-service-syncs 31 | --concurrent-serviceaccount-token-syncs 32 | --concurrent-statefulset-syncs 33 | --concurrent_rc_syncs 34 | --configure-cloud-routes 35 | --contention-profiling 36 | --controller-start-interval 37 | --controllers 38 | --deployment-controller-sync-period 39 | --disable-attach-detach-reconcile-sync 40 | --enable-dynamic-provisioning 41 | --enable-garbage-collector 42 | --enable-hostpath-provisioner 43 | --enable-taint-manager 44 | --endpoint-updates-batch-period 45 | --endpointslice-updates-batch-period 46 | --experimental-cluster-signing-duration 47 | --external-cloud-volume-plugin 48 | --feature-gates 49 | --flex-volume-plugin-dir 50 | --horizontal-pod-autoscaler-cpu-initialization-period 51 | --horizontal-pod-autoscaler-downscale-stabilization 52 | --horizontal-pod-autoscaler-initial-readiness-delay 53 | --horizontal-pod-autoscaler-sync-period 54 | --horizontal-pod-autoscaler-tolerance 55 | --http2-max-streams-per-connection 56 | --kube-api-burst 57 | --kube-api-content-type 58 | --kube-api-qps 59 | --kubeconfig 60 | --large-cluster-size-threshold 61 | --leader-elect 62 | --leader-elect-lease-duration 63 | --leader-elect-renew-deadline 64 | --leader-elect-resource-lock 65 | --leader-elect-resource-name 66 | --leader-elect-resource-namespace 67 | --leader-elect-retry-period 68 | --log-backtrace-at 69 | --log-dir 70 | --log-file 71 | --log-file-max-size 72 | --log-flush-frequency 73 | --logtostderr 74 | --master 75 | --max-endpoints-per-slice 76 | --min-resync-period 77 | --namespace-sync-period 78 | --node-cidr-mask-size 79 | --node-cidr-mask-size-ipv4 80 | --node-cidr-mask-size-ipv6 81 | --node-eviction-rate 82 | --node-monitor-grace-period 83 | --node-monitor-period 84 | --node-startup-grace-period 85 | --pod-eviction-timeout 86 | --profiling 87 | --pv-recycler-increment-timeout-nfs 88 | --pv-recycler-minimum-timeout-hostpath 89 | --pv-recycler-minimum-timeout-nfs 90 | --pv-recycler-pod-template-filepath-hostpath 91 | --pv-recycler-pod-template-filepath-nfs 92 | --pv-recycler-timeout-increment-hostpath 93 | --pvclaimbinder-sync-period 94 | --requestheader-allowed-names 95 | --requestheader-client-ca-file 96 | --requestheader-extra-headers-prefix 97 | --requestheader-group-headers 98 | --requestheader-username-headers 99 | --resource-quota-sync-period 100 | --root-ca-file 101 | --route-reconciliation-period 102 | --secondary-node-eviction-rate 103 | --secure-port 104 | --service-account-private-key-file 105 | --service-cluster-ip-range 106 | --show-hidden-metrics-for-version 107 | --skip-headers 108 | --skip-log-headers 109 | --stderrthreshold severity 110 | --terminated-pod-gc-threshold 111 | --tls-cert-file 112 | --tls-cipher-suites 113 | --tls-min-version 114 | --tls-private-key-file 115 | --tls-sni-cert-key 116 | --unhealthy-zone-threshold 117 | --use-service-account-credentials 118 | --vmodule -------------------------------------------------------------------------------- /objects/conf/kube-scheduler: -------------------------------------------------------------------------------- 1 | --address 2 | --algorithm-provider 3 | --failure-domains 4 | --feature-gates 5 | --google-json-key 6 | --hard-pod-affinity-symmetric-weight 7 | --kube-api-burst 8 | --kube-api-content-type 9 | --kube-api-qps 10 | --kubeconfig 11 | --leader-elect 12 | --leader-elect-lease-duration 13 | --leader-elect-renew-deadline 14 | --leader-elect-retry-period 15 | --master 16 | --policy-config-file 17 | --port 18 | --profiling 19 | --scheduler-name -------------------------------------------------------------------------------- /objects/control_plane.py: -------------------------------------------------------------------------------- 1 | from kubernetes import client, config 2 | from kubernetes.client.rest import ApiException 3 | import time, os, re, sys 4 | start_time = time.time() 5 | from modules.main import ArgParse 6 | from modules.logging import Logger 7 | from modules import process as k8s 8 | from modules.load_kube_config import kubeConfig 9 | 10 | kubeConfig.load_kube_config() 11 | core = client.CoreV1Api() 12 | 13 | class CtrlPlane: 14 | def __init__(self, logger): 15 | self.logger = logger 16 | self.k8s_object_list = '' 17 | self.namespace = 'kube-system' 18 | self.k8s_object = 'pods' 19 | 20 | global k8s_object, k8s_object_list, namespace 21 | def check_ctrl_plane_pods(self): 22 | try: 23 | print ("\n[INFO] Fetching control plane workload data...") 24 | ctrl_plane_pods = core.list_namespaced_pod(self.namespace, \ 25 | label_selector='tier=control-plane', timeout_seconds=10) 26 | if not ctrl_plane_pods.items: 27 | print (k8s.Output.RED + "[ERROR] " + k8s.Output.RESET \ 28 | + "No control plane pods found with label 'tier=control-plane'") 29 | return 30 | return ctrl_plane_pods 31 | except ApiException as e: 32 | print("Exception when calling CoreV1Api->list_namespaced_pod: %s\n" % e) 33 | 34 | def get_ctrl_plane_pods(self, l): 35 | self.k8s_object_list = CtrlPlane.check_ctrl_plane_pods(self) 36 | if not self.k8s_object_list: return 37 | data = [] 38 | headers = ['NAMESPACE', 'PODS', 'NODE_NAME', 'QoS'] 39 | for item in self.k8s_object_list.items: 40 | data.append([item.metadata.namespace, item.metadata.name, \ 41 | item.spec.node_name]) 42 | data = k8s.Output.append_hyphen(data, '------------') 43 | data.append(["Total pods: ", len(data) - 1, '']) 44 | k8s.Output.print_table(data, headers, True, l) 45 | 46 | def check_ctrl_plane_security(self, v, l): 47 | headers = ['NAMESPACE', 'POD', 'CONTAINER_NAME', 'PRIVILEGED_ESC', \ 48 | 'PRIVILEGED', 'READ_ONLY_FS', 'RUN_AS_NON_ROOT', 'RUNA_AS_USER'] 49 | k8s.Check.security_context(self.k8s_object, self.k8s_object_list, headers, \ 50 | v, self.namespace, l, self.logger) 51 | 52 | def check_ctrl_plane_pods_health_probes(self, v, l): 53 | headers = ['NAMESPACE', 'PODS', 'CONTAINER_NAME', 'READINESS_PROPBE', \ 54 | 'LIVENESS_PROBE'] 55 | k8s.Check.health_probes(self.k8s_object, self.k8s_object_list, headers, \ 56 | v, self.namespace, l, self.logger) 57 | 58 | def check_ctrl_plane_pods_resources(self, v, l): 59 | headers = ['NAMESPACE', 'PODS', 'CONTAINER_NAME', 'LIMITS', 'REQUESTS'] 60 | k8s.Check.resources(self.k8s_object, self.k8s_object_list, headers, v, \ 61 | self.namespace, l, self.logger) 62 | 63 | # gets file name from check_ctrl_plane_pods_properties function 64 | def check_ctrl_plane_pods_properties_operation(item, filename, headers, v, l): 65 | commands = item.spec.containers[0].command 66 | data = k8s.CtrlProp.compare_properties(filename, commands) 67 | k8s.Output.print_table(data, headers, v, l) 68 | 69 | def check_ctrl_plane_pods_qos(self, v, l): 70 | headers = ['NAMESPACE', 'POD', 'QoS'] 71 | k8s.Check.qos(self.k8s_object, self.k8s_object_list, headers, v, \ 72 | self.namespace, l, self.logger) 73 | 74 | def check_ctrl_plane_pods_properties(self, v, l): 75 | if not self.k8s_object_list: return 76 | container_name_check = "" 77 | headers = ['CTRL_PLANE_COMPONENT/ARGS', ''] 78 | k8scc_dir = os.path.dirname(__file__) 79 | for item in self.k8s_object_list.items: 80 | if item.spec.containers[0].name in "kube-controller-manager" \ 81 | and item.spec.containers[0].name not in container_name_check: 82 | CtrlPlane.check_ctrl_plane_pods_properties_operation(item,\ 83 | os.path.join(k8scc_dir, 'conf/kube-controller-manager'), headers, v, l) 84 | 85 | elif item.spec.containers[0].name in "kube-apiserver" \ 86 | and item.spec.containers[0].name not in container_name_check: 87 | CtrlPlane.check_ctrl_plane_pods_properties_operation(item,\ 88 | os.path.join(k8scc_dir, 'conf/kube-apiserver'), headers, v, l) 89 | json_data = k8s.CtrlProp.check_admission_controllers(\ 90 | item.spec.containers[0].command, v, self.namespace, l, k8scc_dir) 91 | 92 | if l: self.logger.info(json_data) 93 | 94 | elif item.spec.containers[0].name in "kube-scheduler" \ 95 | and item.spec.containers[0].name not in container_name_check: 96 | CtrlPlane.check_ctrl_plane_pods_properties_operation(item,\ 97 | os.path.join(k8scc_dir, 'conf/kube-scheduler'), headers, v, l) 98 | k8s.CtrlProp.secure_scheduler_check(\ 99 | item.spec.containers[0].command) 100 | container_name_check = item.spec.containers[0].name 101 | 102 | def call_all(v, ns, l, logger): 103 | call = CtrlPlane(logger) 104 | call.get_ctrl_plane_pods(l) 105 | call.check_ctrl_plane_security(v, l) 106 | call.check_ctrl_plane_pods_health_probes(v, l) 107 | call.check_ctrl_plane_pods_resources(v, l) 108 | call.check_ctrl_plane_pods_qos(v, l) 109 | call.check_ctrl_plane_pods_properties(v, l) 110 | 111 | def main(): 112 | args = ArgParse.arg_parse() 113 | # args is [u, verbose, ns, l, format, silent] 114 | logger = Logger.get_logger(args.format, args.silent) 115 | if args: 116 | call_all(args.verbose, args.namespace, args.logging, logger) 117 | k8s.Output.time_taken(start_time) 118 | 119 | if __name__ == "__main__": 120 | try: 121 | main() 122 | except KeyboardInterrupt: 123 | print(k8s.Output.RED + "[ERROR] " \ 124 | + k8s.Output.RESET + 'Interrupted from keyboard!') 125 | try: 126 | sys.exit(0) 127 | except SystemExit: 128 | os._exit(0) -------------------------------------------------------------------------------- /objects/crds.py: -------------------------------------------------------------------------------- 1 | import sys, time, os, getopt, argparse, re, itertools 2 | start_time = time.time() 3 | from modules.main import ArgParse 4 | from modules import process as k8s 5 | from modules.logging import Logger 6 | from modules.get_crds import K8sCRDs 7 | 8 | class _CRDs: 9 | def __init__(self, logger): 10 | self.logger = logger 11 | self.k8s_object = 'crds' 12 | self.k8s_object_list = K8sCRDs.get_crds(self.logger) 13 | 14 | def get_crds(self, v, ns, l): 15 | data, crd_group, count_crd_group_crds, headers = \ 16 | [], [], [], ['CRD_GROUP', 'CRD_COUNT', 'SCOPE'] 17 | for item in self.k8s_object_list.items: 18 | data.append([item.spec.group, item.metadata.name, item.spec.scope]) 19 | crd_group.append([item.spec.group]) 20 | 21 | data.sort() 22 | crd_group.sort() 23 | # de-duplicate crd groups 24 | crd_group = list(k for k, _ in itertools.groupby(crd_group)) 25 | 26 | # calculate count of crds per crd-group 27 | for i in crd_group: 28 | count_crd_group_crds = 0 29 | for j in data: 30 | if j[0] == i[0]: 31 | count_crd_group_crds += 1 32 | i.append(count_crd_group_crds) 33 | 34 | crd_group = k8s.Output.append_hyphen(crd_group, '---------') 35 | crd_group.append(['Total: ' + str(len(crd_group) - 1), len(data)]) 36 | 37 | k8s.Output.print_table(crd_group, headers, True, l) 38 | 39 | k8s.CRDs.check_ns_crd(self.k8s_object_list, self.k8s_object, data, \ 40 | headers, v, 'all', l, self.logger) 41 | 42 | return data 43 | 44 | def call_all(v, ns, l, logger): 45 | call = _CRDs(logger) 46 | call.get_crds(v, ns, l) 47 | 48 | def main(): 49 | args = ArgParse.arg_parse() 50 | # args is [u, verbose, ns, l, format, silent] 51 | logger = Logger.get_logger(args.format, args.silent) 52 | if args: 53 | call_all(args.verbose, args.namespace, args.logging, logger) 54 | k8s.Output.time_taken(start_time) 55 | 56 | if __name__ == "__main__": 57 | try: 58 | main() 59 | except KeyboardInterrupt: 60 | print(k8s.Output.RED + "[ERROR] " \ 61 | + k8s.Output.RESET + 'Interrupted from keyboard!') 62 | try: 63 | sys.exit(0) 64 | except SystemExit: 65 | os._exit(0) -------------------------------------------------------------------------------- /objects/daemonsets.py: -------------------------------------------------------------------------------- 1 | import sys, time, os, getopt 2 | start_time = time.time() 3 | from modules.main import ArgParse 4 | from modules.logging import Logger 5 | from modules.get_ds import K8sDaemonSet 6 | from modules import process as k8s 7 | 8 | class _Daemonset: 9 | def __init__(self, namespace, logger): 10 | self.namespace = namespace 11 | self.logger = logger 12 | if not self.namespace: 13 | self.namespace = 'all' 14 | self.k8s_object_list = K8sDaemonSet.get_damemonsets(self.namespace, self.logger) 15 | self.k8s_object = 'daemonset' 16 | 17 | def check_damemonset_security(self, v, l): 18 | headers = ['NAMESPACE', 'DAEMONSET', 'CONTAINER_NAME', 'PRIVILEGED_ESC', \ 19 | 'PRIVILEGED', 'READ_ONLY_FS', 'RUN_AS_NON_ROOT', 'RUNA_AS_USER'] 20 | data = k8s.Check.security_context(self.k8s_object, self.k8s_object_list, \ 21 | headers, v, self.namespace, l, self.logger) 22 | if l: self.logger.info(data) 23 | 24 | def check_damemonset_health_probes(self, v, l): 25 | headers = ['NAMESPACE', 'DAEMONSET', 'CONTAINER_NAME', \ 26 | 'READINESS_PROPBE', 'LIVENESS_PROBE'] 27 | data = k8s.Check.health_probes(self.k8s_object, self.k8s_object_list, \ 28 | headers, v, self.namespace, l, self.logger) 29 | if l: self.logger.info(data) 30 | 31 | def check_damemonset_resources(self, v, l): 32 | headers = ['NAMESPACE', 'DAEMONSET', 'CONTAINER_NAME', \ 33 | 'LIMITS', 'REQUESTS'] 34 | data = k8s.Check.resources(self.k8s_object, self.k8s_object_list, \ 35 | headers, v, self.namespace, l, self.logger) 36 | if l: self.logger.info(data) 37 | 38 | def check_damemonset_tolerations_affinity_node_selector_priority(self, v, l): 39 | headers = ['NAMESPACE', 'DAEMONSET', 'NODE_SELECTOR', \ 40 | 'TOLERATIONS', 'AFFINITY', 'PRIORITY_CLASS'] 41 | data = k8s.Check.tolerations_affinity_node_selector_priority(\ 42 | self.k8s_object, self.k8s_object_list, headers, v, self.namespace, l, self.logger) 43 | if l: self.logger.info(data) 44 | 45 | def call_all(v, namespace, l, logger): 46 | call = _Daemonset(namespace, logger) 47 | call.check_damemonset_security(v, l) 48 | call.check_damemonset_health_probes(v, l) 49 | call.check_damemonset_resources(v, l) 50 | call.check_damemonset_tolerations_affinity_node_selector_priority(v, l) 51 | 52 | def main(): 53 | args = ArgParse.arg_parse() 54 | # args is [u, verbose, ns, l, format, silent] 55 | logger = Logger.get_logger(args.format, args.silent) 56 | if args: 57 | call_all(args.verbose, args.namespace, args.logging, logger) 58 | k8s.Output.time_taken(start_time) 59 | 60 | if __name__ == "__main__": 61 | try: 62 | main() 63 | except KeyboardInterrupt: 64 | print(k8s.Output.RED + "[ERROR] " \ 65 | + k8s.Output.RESET + 'Interrupted from keyboard!') 66 | try: 67 | sys.exit(0) 68 | except SystemExit: 69 | os._exit(0) -------------------------------------------------------------------------------- /objects/deployments.py: -------------------------------------------------------------------------------- 1 | import os, time, json, argparse, sys 2 | from time import sleep 3 | start_time = time.time() 4 | from modules.main import ArgParse 5 | from modules.logging import Logger 6 | from modules import process as k8s 7 | from modules.get_deploy import K8sDeploy 8 | 9 | class _Deployment: 10 | def __init__(self, namespace, logger): 11 | self.namespace = namespace 12 | if not self.namespace: self.namespace = 'all' 13 | self.k8s_object = 'deployments' 14 | self.logger = logger 15 | self.k8s_object_list = K8sDeploy.get_deployments(self.namespace, self.logger) 16 | 17 | def check_deployment_security(self, v, l): 18 | headers = ['NAMESPACE', 'DEPLOYMENT', 'CONTAINER_NAME', 'PRIVILEGED_ESC', \ 19 | 'PRIVILEGED', 'READ_ONLY_FS', 'RUN_AS_NON_ROOT', 'RUNA_AS_USER'] 20 | data = k8s.Check.security_context(self.k8s_object, self.k8s_object_list, headers, \ 21 | v, self.namespace, l, self.logger) 22 | if l: self.logger.info(data) 23 | 24 | def check_deployment_health_probes(self, v, l): 25 | headers = ['NAMESPACE', 'DEPLOYMENT', 'CONTAINER_NAME', \ 26 | 'READINESS_PROPBE', 'LIVENESS_PROBE'] 27 | data = k8s.Check.health_probes(self.k8s_object, self.k8s_object_list, headers, \ 28 | v, self.namespace, l, self.logger) 29 | if l: self.logger.info(data) 30 | 31 | def check_deployment_resources(self, v, l): 32 | headers = ['NAMESPACE', 'DEPLOYMENT', 'CONTAINER_NAME', 'LIMITS', \ 33 | 'REQUESTS'] 34 | data = k8s.Check.resources(self.k8s_object, self.k8s_object_list, \ 35 | headers, v, self.namespace, l, self.logger) 36 | if l: self.logger.info(data) 37 | 38 | def check_deployment_strategy(self, v, l): 39 | headers = ['DEPLOYMENT', 'STRATEGY_TYPE'] 40 | data = k8s.Check.strategy(self.k8s_object, self.k8s_object_list, headers, \ 41 | v, self.namespace, l, self.logger) 42 | if l: self.logger.info(data) 43 | 44 | def check_replica(self, v, l): 45 | headers = ['NAMESPACE', 'DEPLOYMENT', 'REPLICA_COUNT'] 46 | data = k8s.Check.replica(self.k8s_object, self.k8s_object_list, headers,\ 47 | v, self.namespace, l, self.logger) 48 | if l: self.logger.info(data) 49 | 50 | def check_deployment_tolerations_affinity_node_selector_priority(self, v, l): 51 | headers = ['NAMESPACE', 'DEPLOYMENT', 'NODE_SELECTOR', 'TOLERATIONS', \ 52 | 'AFFINITY', 'PRIORITY_CLASS'] 53 | data = k8s.Check.tolerations_affinity_node_selector_priority(self.k8s_object, \ 54 | self.k8s_object_list, headers, v, self.namespace, l, self.logger) 55 | if l: self.logger.info(data) 56 | 57 | def call_all(v, ns, l, logger): 58 | call = _Deployment(ns, logger) 59 | call.check_deployment_security(v, l) 60 | call.check_deployment_health_probes(v, l) 61 | call.check_deployment_resources(v, l) 62 | call.check_deployment_strategy(v, l) 63 | call.check_replica(v, l) 64 | call.check_deployment_tolerations_affinity_node_selector_priority(v, l) 65 | 66 | def main(): 67 | args = ArgParse.arg_parse() 68 | # args is [u, verbose, ns, l, format, silent] 69 | logger = Logger.get_logger(args.format, args.silent) 70 | if args: 71 | call_all(args.verbose, args.namespace, args.logging, logger) 72 | k8s.Output.time_taken(start_time) 73 | 74 | if __name__ == "__main__": 75 | try: 76 | main() 77 | except KeyboardInterrupt: 78 | print(k8s.Output.RED + "[ERROR] " \ 79 | + k8s.Output.RESET + 'Interrupted from keyboard!') 80 | try: 81 | sys.exit(0) 82 | except SystemExit: 83 | os._exit(0) -------------------------------------------------------------------------------- /objects/images.py: -------------------------------------------------------------------------------- 1 | import sys, time, os, re, getopt 2 | import requests 3 | start_time = time.time() 4 | from modules.main import GetOpts 5 | from modules import logging as logger 6 | from modules import process as k8s 7 | from modules.get_deploy import K8sDeploy 8 | 9 | class Images: 10 | global _logger, k8s_object 11 | _logger = logger.get_logger('Images') 12 | k8s_object = 'images' 13 | 14 | def __init__(self,ns): 15 | global k8s_object_list 16 | self.ns = ns 17 | if not ns: 18 | ns = 'all' 19 | k8s_object_list = K8sDeploy.get_deployments(ns) 20 | 21 | def get_images(v, ns, l): 22 | data = [] 23 | for item in k8s_object_list.items: 24 | for container in item.spec.template.spec.containers: 25 | data.append([item.metadata.namespace, item.metadata.name, container.name, container.image, \ 26 | container.image_pull_policy]) 27 | return data 28 | 29 | def list_images(v, ns, l): 30 | headers = ['NAMESPACE', 'DEPLOYMENT', 'CONTAINER_NAME', 'IMAGE:TAG', \ 31 | 'IMAGE_PULL_POLICY'] 32 | data = Images.get_images(v, ns, l) 33 | k8s.Output.print_table(data, headers, True, l) 34 | 35 | def get_last_updated_tag(v, ns, l): 36 | repo = [] 37 | data = Images.get_images(v, ns, l) 38 | headers = ['NAMESPACE', 'DEPLOYMENT', 'CONTAINER_NAME', 'IMAGE_PULL_POLICY', \ 39 | 'IMAGE:TAG', 'LATEST_TAG_AVAILABLE'] 40 | print ("\n[INFO] Checking for latest image tags...") 41 | result = [] 42 | for image in data: 43 | image_repo_name = image[3].rsplit(':', 1)[0] 44 | if not any(x in image_repo_name for x in ['gcr','quay','docker.io']): 45 | repo_image_url = "https://hub.docker.com/v2/repositories/{}/tags".format(image_repo_name) 46 | try: 47 | results = requests.get(repo_image_url).json()['results'] 48 | except: 49 | pass 50 | 51 | for repo in results: 52 | if not any(x in repo for x in ['dev','latest','beta','rc']): 53 | repo_name = repo['name'].rsplit('-', 1)[0] 54 | break 55 | # not feasible for google docker registry as oauth token is needed 56 | # elif 'gcr' in image_repo_name: 57 | # repo_image_url = "https://gcr.io/v2/{}/tags/list".format(image_repo_name) 58 | # results = requests.get(repo_image_url).json() 59 | # print (results) 60 | else: 61 | repo_name = u'\u2717' 62 | result.append([image[0], image[1], image[2], image[4], image[3], repo_name]) 63 | k8s.Output.print_table(result, headers, True, l) 64 | 65 | def image_recommendation(v, ns, l): 66 | config_not_defined, if_not_present, always = [], [], [] 67 | headers = ['NAMESPACE', 'DEPLOYMENT', 'CONTAINER_NAME', 'IMAGE:TAG', \ 68 | 'IMAGE_PULL_POLICY'] 69 | data = Images.get_images(v, ns, l) 70 | for image in data: 71 | if not 'Always' in image[-1]: 72 | config_not_defined.append(image[3]) 73 | if 'IfNotPresent' in image[-1]: 74 | if_not_present.append(True) 75 | if 'Always' in image[-1]: 76 | always.append(True) 77 | print ("\n{}: {}".format('images', len(k8s_object_list.items))) 78 | data_if_not_present = k8s.Output.bar(if_not_present, data,'with image pull-policy', \ 79 | 'deployments', '"IfNotPresent"', k8s.Output.YELLOW) 80 | data_always = k8s.Output.bar(always, data,'with image pull-policy', 'deployments',\ 81 | '"Always"', k8s.Output.GREEN) 82 | data_never = k8s.Output.bar(config_not_defined, data, \ 83 | 'has not defined recommended image pull-policy', \ 84 | 'deployments', '"Always"', k8s.Output.RED) 85 | 86 | if l: 87 | # creating analysis data for logging 88 | analysis = {"container_property": "image_pull_policy", 89 | "total_images_count": len(data), 90 | "if_not_present_pull_policy_containers_count": data_if_not_present, 91 | "always_pull_policy_containers_count": data_always, 92 | "never_pull_policy_containers_count": data_never} 93 | json_data = k8s.Output.json_out(data, analysis, headers, k8s_object, 'image_pull_policy', ns) 94 | _logger.info(json_data) 95 | 96 | def call_all(v, ns, l): 97 | Images(ns) 98 | Images.list_images(v, ns, l) 99 | Images.image_recommendation(v, ns, l) 100 | if v: Images.get_last_updated_tag(v, ns, l) 101 | 102 | def main(): 103 | options = GetOpts.get_opts() 104 | if options[0]: 105 | usage() 106 | if options: 107 | call_all(options[1], options[2], options[3]) 108 | k8s.Output.time_taken(start_time) 109 | 110 | if __name__ == "__main__": 111 | try: 112 | main() 113 | except KeyboardInterrupt: 114 | print(k8s.Output.RED + "[ERROR] " \ 115 | + k8s.Output.RESET + 'Interrupted from keyboard!') 116 | try: 117 | sys.exit(0) 118 | except SystemExit: 119 | os._exit(0) -------------------------------------------------------------------------------- /objects/ingress.py: -------------------------------------------------------------------------------- 1 | import sys, time, os, getopt, argparse 2 | start_time = time.time() 3 | from modules.main import ArgParse 4 | from modules import process as k8s 5 | from modules.logging import Logger 6 | from modules.get_ingress import K8sIngress 7 | from modules.get_ns import K8sNameSpace 8 | 9 | class _Ingress: 10 | def __init__(self, namespace, logger): 11 | self.namespace = namespace 12 | self.logger = logger 13 | if not self.namespace: 14 | self.namespace = 'all' 15 | self.k8s_object_list = K8sIngress.get_ingress(self.namespace, self.logger) 16 | 17 | if not len(self.k8s_object_list.items): 18 | logger.warning("No ingress found!") 19 | sys.exit() 20 | self.k8s_object = 'ingress' 21 | 22 | def ingress_count(self, v, l): 23 | data, total_ing = [], 0 24 | ns_list = K8sNameSpace.get_ns(self.logger) 25 | headers = ['NAMESPACE', 'INGRESS'] 26 | for ns in ns_list.items: 27 | ing_count = 0 28 | for item in self.k8s_object_list.items: 29 | 30 | if item.metadata.namespace == ns.metadata.name: 31 | ing_count += 1 32 | if ing_count: data.append([ns.metadata.name, ing_count]) 33 | for i in data: 34 | total_ing = total_ing + i[1] 35 | data = k8s.Output.append_hyphen(data, '-------') 36 | data.append(["Total: " , total_ing]) 37 | k8s.Output.print_table(data, headers, True, l) 38 | 39 | def list_ingress(self, v, l): 40 | headers = ['NAMESPACE', 'INGRESS', 'RULES', 'HOST [SERVICE:PORT]'] 41 | k8s.IngCheck.list_ingress(self.k8s_object_list, self.k8s_object, \ 42 | headers, v, self.namespace, l, self.logger) 43 | 44 | def call_all(v, namespace, l, logger): 45 | call = _Ingress(namespace, logger) 46 | call.ingress_count(v, l) 47 | call.list_ingress(v, l) 48 | 49 | def main(): 50 | args = ArgParse.arg_parse() 51 | # args is [u, verbose, ns, l, format, silent] 52 | logger = Logger.get_logger(args.format, args.silent) 53 | if args: 54 | call_all(args.verbose, args.namespace, args.logging, logger) 55 | k8s.Output.time_taken(start_time) 56 | 57 | if __name__ == "__main__": 58 | try: 59 | main() 60 | except KeyboardInterrupt: 61 | print(k8s.Output.RED + "[ERROR] " \ 62 | + k8s.Output.RESET + 'Interrupted from keyboard!') 63 | try: 64 | sys.exit(0) 65 | except SystemExit: 66 | os._exit(0) -------------------------------------------------------------------------------- /objects/jobs.py: -------------------------------------------------------------------------------- 1 | import time, os, sys 2 | start_time = time.time() 3 | from modules.main import ArgParse 4 | from modules.logging import Logger 5 | from modules import process as k8s 6 | from modules.get_jobs import K8sJobs 7 | 8 | class Jobs: 9 | def __init__(self, namespace, logger): 10 | self.namespace = namespace 11 | self.logger = logger 12 | if not self.namespace: 13 | self.namespace = 'all' 14 | self.k8s_object_list = K8sJobs.get_jobs(self.namespace, self.logger) 15 | try: 16 | len(self.k8s_object_list.items) 17 | except: 18 | logger.warning("No jobs found!") 19 | sys.exit() 20 | self.k8s_object = 'jobs' 21 | 22 | def list_jobs(self, v, l): 23 | data = [] 24 | headers = ['NAMESPACE', 'JOBS'] 25 | for item in self.k8s_object_list.items: 26 | data.append([item.metadata.namespace, item.metadata.name]) 27 | data = k8s.Output.append_hyphen(data, '---------') 28 | data.append(["Total: " , len(data) - 1]) 29 | k8s.Output.print_table(data, headers, True, l) 30 | 31 | def check_jobs_pod_security(self, v, l): 32 | headers = ['NAMESPACE', 'JOBS', 'CONTAINER_NAME', 'PRIVILEGED_ESC', \ 33 | 'PRIVILEGED', 'READ_ONLY_FS', 'RUN_AS_NON_ROOT', 'RUNA_AS_USER'] 34 | data = k8s.Check.security_context(self.k8s_object, self.k8s_object_list, headers, \ 35 | v, self.namespace, l, self.logger) 36 | if l: self.logger.info(data) 37 | 38 | def check_jobs_pod_health_probes(self, v, l): 39 | headers = ['NAMESPACE', 'JOBS', 'CONTAINER_NAME', 'READINESS_PROPBE', \ 40 | 'LIVENESS_PROBE'] 41 | data = k8s.Check.health_probes(self.k8s_object, self.k8s_object_list, headers, \ 42 | v, self.namespace, l, self.logger) 43 | if l: self.logger.info(data) 44 | 45 | def check_jobs_pod_resources(self, v, l): 46 | headers = ['NAMESPACE', 'JOBS', 'CONTAINER_NAME', 'LIMITS', 'REQUESTS'] 47 | data = k8s.Check.resources(self.k8s_object, self.k8s_object_list, headers, \ 48 | v, self.namespace, l, self.logger) 49 | if l: self.logger.info(data) 50 | 51 | def check_jobs_pod_tolerations_affinity_node_selector_priority(self, v, l): 52 | headers = ['NAMESPACE', 'JOBS', 'NODE_SELECTOR', 'TOLERATIONS', \ 53 | 'AFFINITY', 'PRIORITY_CLASS'] 54 | data = k8s.Check.tolerations_affinity_node_selector_priority(self.k8s_object, \ 55 | self.k8s_object_list, headers, v, self.namespace, l, self.logger) 56 | if l: self.logger.info(data) 57 | 58 | def call_all(v, namespace, l, logger): 59 | call = Jobs(namespace, logger) 60 | call.list_jobs(v, l) 61 | call.check_jobs_pod_security(v, l) 62 | call.check_jobs_pod_health_probes(v, l) 63 | call.check_jobs_pod_resources(v, l) 64 | call.check_jobs_pod_tolerations_affinity_node_selector_priority(v, l) 65 | 66 | def main(): 67 | args = ArgParse.arg_parse() 68 | # args is [u, verbose, ns, l, format, silent] 69 | logger = Logger.get_logger(args.format, args.silent) 70 | if args: 71 | call_all(args.verbose, args.namespace, args.logging, logger) 72 | k8s.Output.time_taken(start_time) 73 | 74 | if __name__ == "__main__": 75 | try: 76 | main() 77 | except KeyboardInterrupt: 78 | print(k8s.Output.RED + "[ERROR] " \ 79 | + k8s.Output.RESET + 'Interrupted from keyboard!') 80 | try: 81 | sys.exit(0) 82 | except SystemExit: 83 | os._exit(0) -------------------------------------------------------------------------------- /objects/modules/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dguyhasnoname/k8s-cluster-checker/99f4175086bc42793da6bca6c4ef25a9f8252c16/objects/modules/__init__.py -------------------------------------------------------------------------------- /objects/modules/__init__.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dguyhasnoname/k8s-cluster-checker/99f4175086bc42793da6bca6c4ef25a9f8252c16/objects/modules/__init__.pyc -------------------------------------------------------------------------------- /objects/modules/get_cm.py: -------------------------------------------------------------------------------- 1 | from kubernetes import client 2 | from kubernetes.client.rest import ApiException 3 | #from .load_kube_config import kubeConfig 4 | 5 | #kubeConfig.load_kube_config() 6 | core = client.CoreV1Api() 7 | 8 | class K8sConfigMap: 9 | def get_cm(ns, logger): 10 | try: 11 | if ns != 'all': 12 | logger.info ("Fetching {} namespace configMaps data...".format(ns)) 13 | namespace = ns 14 | configmaps = core.list_namespaced_config_map(namespace, timeout_seconds=10) 15 | else: 16 | logger.info("Fetching all namespace configMaps data.") 17 | configmaps = core.list_config_map_for_all_namespaces(timeout_seconds=10) 18 | return configmaps 19 | except ApiException as e: 20 | logger.warning("Exception when calling CoreV1Api->list_namespaced_config_map: %s\n" % e) -------------------------------------------------------------------------------- /objects/modules/get_crds.py: -------------------------------------------------------------------------------- 1 | from kubernetes import client 2 | from kubernetes.client.rest import ApiException 3 | from .load_kube_config import kubeConfig 4 | 5 | kubeConfig.load_kube_config() 6 | crd = client.ApiextensionsV1beta1Api() 7 | 8 | class K8sCRDs: 9 | def get_crds(logger): 10 | try: 11 | logger.info ("Fetching all crds data...") 12 | crds = crd.list_custom_resource_definition(timeout_seconds=10) 13 | return crds 14 | except ApiException as e: 15 | logger.warning("Exception when calling ApiextensionsV1Api->list_custom_resource_definition: %s\n" % e) 16 | -------------------------------------------------------------------------------- /objects/modules/get_deploy.py: -------------------------------------------------------------------------------- 1 | from kubernetes import client 2 | from kubernetes.client.rest import ApiException 3 | from .load_kube_config import kubeConfig 4 | 5 | kubeConfig.load_kube_config() 6 | apps = client.AppsV1Api() 7 | 8 | class K8sDeploy: 9 | def get_deployments(ns, logger): 10 | try: 11 | if ns != 'all': 12 | logger.info ("Fetching {} namespace deployments data...".format(ns)) 13 | namespace = ns 14 | deployments = apps.list_namespaced_deployment(namespace, timeout_seconds=10) 15 | else: 16 | logger.info ("Fetching all namespace deployments data...") 17 | deployments = apps.list_deployment_for_all_namespaces(timeout_seconds=10) 18 | return deployments 19 | except ApiException as e: 20 | logger.warning("Exception when calling AppsV1Api->list_deployment_for_all_namespaces: %s\n" % e) -------------------------------------------------------------------------------- /objects/modules/get_ds.py: -------------------------------------------------------------------------------- 1 | from kubernetes import client 2 | from kubernetes.client.rest import ApiException 3 | from .load_kube_config import kubeConfig 4 | 5 | kubeConfig.load_kube_config() 6 | apps = client.AppsV1Api() 7 | 8 | class K8sDaemonSet: 9 | def get_damemonsets(ns, logger): 10 | try: 11 | if ns != 'all': 12 | logger.info ("Fetching {} namespace dameonsets data...".format(ns)) 13 | namespace = ns 14 | damemonsets = apps.list_namespaced_daemon_set(namespace, timeout_seconds=10) 15 | else: 16 | logger.info ("Fetching all namespace dameonsets data...") 17 | damemonsets = apps.list_daemon_set_for_all_namespaces(timeout_seconds=10) 18 | return damemonsets 19 | except ApiException as e: 20 | logger.info("Exception when calling AppsV1Api->list_namespaced_daemon_set: %s\n" % e) -------------------------------------------------------------------------------- /objects/modules/get_ingress.py: -------------------------------------------------------------------------------- 1 | from kubernetes import client 2 | from kubernetes.client.rest import ApiException 3 | from .load_kube_config import kubeConfig 4 | 5 | kubeConfig.load_kube_config() 6 | networking = client.NetworkingV1beta1Api() 7 | 8 | class K8sIngress: 9 | def get_ingress(ns, logger): 10 | try: 11 | if ns != 'all': 12 | logger.info ("Fetching {} namespace ingress data...".format(ns)) 13 | namespace = ns 14 | ingress = networking.list_namespaced_ingress(namespace, timeout_seconds=5) 15 | else: 16 | logger.info ("Fetching all namespace ingress data...") 17 | ingress = networking.list_ingress_for_all_namespaces(timeout_seconds=5) 18 | return ingress 19 | except ApiException as e: 20 | logger.warning ("Exception when calling NetworkingV1beta1Api->list_namespaced_ingress: %s\n" % e) 21 | -------------------------------------------------------------------------------- /objects/modules/get_jobs.py: -------------------------------------------------------------------------------- 1 | from kubernetes import client 2 | from kubernetes.client.rest import ApiException 3 | from .load_kube_config import kubeConfig 4 | 5 | kubeConfig.load_kube_config() 6 | batch = client.BatchV1Api() 7 | 8 | class K8sJobs: 9 | def get_jobs(ns, logger): 10 | try: 11 | if ns != 'all': 12 | logger.info ("Fetching {} namespace jobs data...".format(ns)) 13 | namespace = ns 14 | jobs = batch.list_namespaced_job(namespace, timeout_seconds=10) 15 | else: 16 | logger.info ("Fetching all namespace jobs data...") 17 | jobs = batch.list_job_for_all_namespaces(timeout_seconds=10) 18 | return jobs 19 | except ApiException as e: 20 | logger.info("Exception when calling BatchV1Api->list jobs: %s\n" % e) -------------------------------------------------------------------------------- /objects/modules/get_nodes.py: -------------------------------------------------------------------------------- 1 | from kubernetes import client 2 | from kubernetes.client.rest import ApiException 3 | from .load_kube_config import kubeConfig 4 | 5 | kubeConfig.load_kube_config() 6 | core = client.CoreV1Api() 7 | 8 | class K8sNodes: 9 | def get_nodes(logger): 10 | logger.info ("Fetching nodes data...") 11 | try: 12 | node_list = core.list_node(timeout_seconds=10) 13 | return node_list 14 | except ApiException as e: 15 | print("Exception when calling CoreV1Api->list_node: %s\n" % e) 16 | 17 | def read_nodes(node): 18 | print ("\n[INFO] Fetching node {} data...".format(node)) 19 | try: 20 | name = node 21 | node_detail = core.read_node(name) 22 | return node_detail 23 | except ApiException as e: 24 | print("Exception when calling CoreV1Api->read_node: %s\n" % e) -------------------------------------------------------------------------------- /objects/modules/get_ns.py: -------------------------------------------------------------------------------- 1 | from kubernetes import client 2 | from kubernetes.client.rest import ApiException 3 | from .load_kube_config import kubeConfig 4 | 5 | kubeConfig.load_kube_config() 6 | core = client.CoreV1Api() 7 | 8 | class K8sNameSpace: 9 | def get_ns(logger): 10 | logger.info ("Fetching namespaces data...") 11 | try: 12 | ns_list = core.list_namespace(timeout_seconds=10) 13 | return ns_list 14 | except ApiException as e: 15 | logger.warning("Exception when calling CoreV1Api->list_namespace: %s\n" % e) -------------------------------------------------------------------------------- /objects/modules/get_pods.py: -------------------------------------------------------------------------------- 1 | from kubernetes import client 2 | from kubernetes.client.rest import ApiException 3 | from .load_kube_config import kubeConfig 4 | 5 | kubeConfig.load_kube_config() 6 | core = client.CoreV1Api() 7 | 8 | class K8sPods: 9 | def get_pods(ns, logger): 10 | try: 11 | if ns == 'all': 12 | logger.info ("Fetching all namespace pods data...") 13 | pods = core.list_pod_for_all_namespaces(timeout_seconds=10) 14 | else: 15 | logger.info ("Fetching {} namespace pods data...".format(ns)) 16 | namespace = ns 17 | pods = core.list_namespaced_pod(namespace, timeout_seconds=10) 18 | return pods 19 | except ApiException as e: 20 | logger.info("Exception when calling CoreV1Api->list_pod_for_all_namespaces: %s\n" % e) -------------------------------------------------------------------------------- /objects/modules/get_rbac.py: -------------------------------------------------------------------------------- 1 | from kubernetes import client 2 | from kubernetes.client.rest import ApiException 3 | from .load_kube_config import kubeConfig 4 | 5 | kubeConfig.load_kube_config() 6 | rbac = client.RbacAuthorizationV1Api() 7 | 8 | class K8sClusterRole: 9 | def list_cluster_role(logger): 10 | logger.info ("Fetching clusterRoles data...") 11 | try: 12 | cluster_roles = rbac.list_cluster_role(timeout_seconds=10) 13 | return cluster_roles 14 | except ApiException as e: 15 | logger.warning("Exception when calling RbacAuthorizationV1Api->list_cluster_role: %s\n" % e) 16 | 17 | class K8sClusterRoleBinding: 18 | def list_cluster_role_binding(logger): 19 | logger.info ("Fetching clusterRoleBindings data...") 20 | try: 21 | cluster_role_bindings = rbac.list_cluster_role_binding(timeout_seconds=10) 22 | return cluster_role_bindings 23 | except ApiException as e: 24 | logger.warning("Exception when calling RbacAuthorizationV1Api->list_cluster_role_binding: %s\n" % e) 25 | 26 | class K8sNameSpaceRole: 27 | def list_namespaced_role(ns, logger): 28 | try: 29 | if ns != 'all': 30 | logger.info ("Fetching {} namespace roles data...".format(ns)) 31 | namespace = ns 32 | roles = rbac.list_namespaced_role(namespace, timeout_seconds=10) 33 | else: 34 | logger.info ("Fetching all namespace roles data...") 35 | roles = rbac.list_role_for_all_namespaces(timeout_seconds=10) 36 | return roles 37 | except ApiException as e: 38 | logger.warning("Exception when calling RbacAuthorizationV1Api->list_namespaced_role: %s\n" % e) 39 | 40 | class K8sNameSpaceRoleBinding: 41 | def list_namespaced_role_binding(ns, logger): 42 | try: 43 | if ns != 'all': 44 | logger.info ("Fetching {} namespace rolebindings data...".format(ns)) 45 | namespace = ns 46 | role_bindings = rbac.list_namespaced_role_binding(namespace, timeout_seconds=10) 47 | else: 48 | logger.info ("Fetching all namespace roleBindings data...") 49 | role_bindings = rbac.list_role_binding_for_all_namespaces(timeout_seconds=10) 50 | return role_bindings 51 | except ApiException as e: 52 | logger.info("Exception when calling RbacAuthorizationV1Api->list_namespaced_role_binding: %s\n" % e) 53 | 54 | 55 | -------------------------------------------------------------------------------- /objects/modules/get_sts.py: -------------------------------------------------------------------------------- 1 | from kubernetes import client 2 | from kubernetes.client.rest import ApiException 3 | from .load_kube_config import kubeConfig 4 | 5 | kubeConfig.load_kube_config() 6 | apps = client.AppsV1Api() 7 | 8 | class K8sStatefulSet: 9 | def get_sts(ns, logger): 10 | try: 11 | if ns != 'all': 12 | logger.info ("Fetching {} namespace statefulSets data...".format(ns)) 13 | namespace = ns 14 | statefulsets = apps.list_namespaced_stateful_set(namespace, timeout_seconds=10) 15 | else: 16 | logger.info ("Fetching all namespace statefulSets data...") 17 | statefulsets = apps.list_stateful_set_for_all_namespaces(timeout_seconds=10) 18 | return statefulsets 19 | except ApiException as e: 20 | logger.warning("Exception when calling AppsV1Api->list_namespaced_stateful_set: %s\n" % e) -------------------------------------------------------------------------------- /objects/modules/get_svc.py: -------------------------------------------------------------------------------- 1 | from kubernetes import client 2 | from kubernetes.client.rest import ApiException 3 | from .load_kube_config import kubeConfig 4 | 5 | kubeConfig.load_kube_config() 6 | core = client.CoreV1Api() 7 | 8 | class K8sService: 9 | def get_svc(ns, logger): 10 | try: 11 | if ns != 'all': 12 | logger.info ("Fetching {} namespace services data...".format(ns)) 13 | namespace = ns 14 | services = core.list_namespaced_service(namespace, timeout_seconds=10) 15 | else: 16 | logger.info ("Fetching all namespace services data...") 17 | services = core.list_service_for_all_namespaces(timeout_seconds=10) 18 | return services 19 | except ApiException as e: 20 | logger.warning("Exception when calling AppsV1Api->list_namespaced_service: %s\n" % e) -------------------------------------------------------------------------------- /objects/modules/get_svc_acc.py: -------------------------------------------------------------------------------- 1 | from kubernetes import client 2 | from kubernetes.client.rest import ApiException 3 | from .load_kube_config import kubeConfig 4 | 5 | kubeConfig.load_kube_config() 6 | core = client.CoreV1Api() 7 | 8 | class K8sSvcAcc: 9 | def get_svc_acc(ns): 10 | try: 11 | if ns != 'all': 12 | print ("\n[INFO] Fetching {} namespace service account data...".format(ns)) 13 | namespace = ns 14 | sa = core.list_namespaced_service_account(namespace, timeout_seconds=5) 15 | else: 16 | print ("\n[INFO] Fetching all namespace service account data...") 17 | sa = core.list_service_account_for_all_namespaces(timeout_seconds=5) 18 | return sa 19 | except ApiException as e: 20 | print("Exception when calling CoreV1Api->list_service_account_for_all_namespaces: %s\n" % e) -------------------------------------------------------------------------------- /objects/modules/load_kube_config.py: -------------------------------------------------------------------------------- 1 | from kubernetes import config 2 | 3 | class kubeConfig: 4 | def load_kube_config(): 5 | try: 6 | config.load_kube_config() 7 | except: 8 | config.load_incluster_config() -------------------------------------------------------------------------------- /objects/modules/logging.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | class Logger(): 4 | def get_logger(format, silent): 5 | logger = logging.getLogger() 6 | logger.setLevel(logging.INFO) 7 | if format == 'json': 8 | formatter = logging.Formatter('{"time": "%(asctime)s", "origin": "p%(process)s %(filename)s:%(name)s:%(lineno)d", "log_level": "%(levelname)s", "log": "%(message)s"}') 9 | else: 10 | formatter = logging.Formatter("[%(levelname)s] %(asctime)s p%(process)s %(filename)s:%(name)s:%(lineno)d %(message)s") 11 | console_handler = logging.StreamHandler() 12 | 13 | if silent: 14 | console_handler.setLevel(logging.WARNING) 15 | console_handler.setFormatter(formatter) 16 | logger.addHandler(console_handler) 17 | else: 18 | console_handler.setLevel(logging.DEBUG) 19 | console_handler.setFormatter(formatter) 20 | logger.addHandler(console_handler) 21 | logger.propagate = False 22 | 23 | return logger -------------------------------------------------------------------------------- /objects/modules/main.py: -------------------------------------------------------------------------------- 1 | import getopt, sys 2 | 3 | class GetOpts: 4 | def get_opts(): 5 | u, verbose, ns, l, format, silent = [''] * 6 6 | try: 7 | opts, args = getopt.getopt(sys.argv[1:], "hvn:lf:s", ["help", "verbose", \ 8 | "namespace", "logging", 'format', 'silent']) 9 | except getopt.GetoptError as err: 10 | print("[ERROR] {}. ".format(err) + \ 11 | "Please run script with -h flag to see valid options.") 12 | sys.exit(0) 13 | 14 | for o, a in opts: 15 | if o in ("-h", "--help"): 16 | u = True 17 | elif o in ("-v", "--verbose"): 18 | verbose = True 19 | elif o in ("-n", "--namespace"): 20 | ns = a 21 | elif o in ("-l", "--logging"): 22 | l = True 23 | elif o in ("-f", "--format"): 24 | format = a 25 | elif o in ("-s", "--silent"): 26 | silent = True 27 | else: 28 | assert False, "unhandled option" 29 | 30 | options = [u, verbose, ns, l, format, silent] 31 | return options 32 | 33 | import argparse 34 | 35 | class ArgParse: 36 | def arg_parse(): 37 | p = argparse.ArgumentParser(description='k8s-cluster-cheker is a tool to anlayse configurations of a k8s cluster: ' 38 | ' OS version(supports flatcar OS, coreOS & Ubuntu only)' 39 | ', Kubernetes version' 40 | ', Docker version' 41 | ', Admission Controllers' 42 | ', security-context of workloads' 43 | ', health-probes of workloads' 44 | ', QoS of workloads' 45 | ', types of services' 46 | ', workload running with single replica' 47 | ', rbac analysis' 48 | ', stale namespaces with no workloads') 49 | p.add_argument('-v', '--verbose', action='store_true', help='verbose mode. Use this flag to get namespaced pod level config details.') 50 | p.add_argument('-n', '--namespace', help='pass kubeconfig of the cluster. If not passed, picks KUBECONFIG from env') 51 | p.add_argument('-l', '--logging', help='Use this flag to generate logs in json format') 52 | p.add_argument('-f', '--format', help='Use this flag to generate output in given format. csv|json. Default is table format.') 53 | p.add_argument('-s', '--silent', help='Use this flag to silence the logging. Get only proccessed output.') 54 | p.add_argument('-b', '--body', nargs = 1, help="JSON file to be processed", type=argparse.FileType('r')) 55 | p.add_argument('--loglevel', default='INFO', help='sets logging level. default is INFO') 56 | 57 | args = p.parse_args() 58 | return args -------------------------------------------------------------------------------- /objects/modules/message.py: -------------------------------------------------------------------------------- 1 | print ("Fetching data " + u'\u2638' + u'\u2638' + u'\u2638') -------------------------------------------------------------------------------- /objects/modules/output.py: -------------------------------------------------------------------------------- 1 | from columnar import columnar 2 | from click import style 3 | from packaging import version 4 | import os, re, time, requests, json, csv 5 | 6 | class Output: 7 | RED = '\033[31m' 8 | GREEN = '\033[32m' 9 | YELLOW = '\033[33m' 10 | CYAN = '\033[36m' 11 | RESET = '\033[0m' 12 | BOLD = '\033[1;30m' 13 | # u'\u2717' means values is None or not defined 14 | # u'\u2714' means value is defined 15 | 16 | global patterns 17 | patterns = [(u'\u2714', lambda text: style(text, fg='green')), \ 18 | ('True', lambda text: style(text, fg='green')), \ 19 | ('False', lambda text: style(text, fg='yellow'))] 20 | 21 | def time_taken(start_time): 22 | print(Output.GREEN + "\nTotal time taken: " + Output.RESET + \ 23 | "{}s".format(round((time.time() - start_time), 2))) 24 | 25 | # prints separator line between output 26 | def separator(color, char, l): 27 | if l: return 28 | columns, rows = os.get_terminal_size(0) 29 | for i in range(columns): 30 | print (color + char, end="" + Output.RESET) 31 | print ("\n") 32 | 33 | # function to append hyphen to print total of any object 34 | def append_hyphen(data, hyphen): 35 | temp_bar = [] 36 | [temp_bar.append(hyphen) for x in range(len(data[0]))] 37 | data.append(temp_bar) 38 | 39 | return data 40 | 41 | # remove unicode characters for reporting 42 | def remove_unicode(x): 43 | a = ['No' if v in [u'\u2717', None] else v for v in x] 44 | b = ['Yes' if v in [u'\u2714'] else v for v in a] 45 | return b 46 | 47 | # define filename for csv_out and json_out functions 48 | def filename(directory, ns, k8s_object, config, extension): 49 | if 'all' in ns: 50 | filename = directory + "all_ns_" + k8s_object + "_" + \ 51 | config + "_" + extension 52 | else: 53 | filename = directory + ns + "_"+ k8s_object + "_" + \ 54 | config + "_" + extension 55 | return filename 56 | 57 | # converts list data to csv 58 | def csv_out(data, headers, k8s_object, config, ns): 59 | directory = './reports/csv/' 60 | if not os.path.exists(directory): 61 | os.makedirs(directory) 62 | filename = Output.filename(directory, ns, k8s_object, config, 'report.csv') 63 | with open(filename, "w", newline="") as file: 64 | writer = csv.writer(file, delimiter=',') 65 | writer.writerow(i for i in headers) 66 | for j in data: 67 | x = Output.remove_unicode(j) 68 | writer.writerow(x) 69 | file.close() 70 | 71 | # generating json data 72 | def json_out(data, analysis, headers, k8s_object, config, ns): 73 | json_data = [] 74 | headers = [x.lower() for x in headers] 75 | for item in data: 76 | temp_dic = {} 77 | # storing json data in dict for each list in data 78 | for i in range(len(headers)): 79 | for j in range(len(item)): 80 | temp_dic.update({headers[i]:item[i]}) 81 | 82 | # appending all json dicts to form a list 83 | json_data.append(temp_dic) 84 | directory = './reports/json/' 85 | if not os.path.exists(directory): 86 | os.makedirs(directory) 87 | # writing out json data in file based on object type and config being checked 88 | filename = Output.filename(directory, ns, k8s_object, config, 'report.json') 89 | f = open(filename, 'w') 90 | if analysis: 91 | json_data = {"object": k8s_object, 92 | "analysis": analysis, 93 | "data": json_data} 94 | else: 95 | json_data = {"object": k8s_object, 96 | "data": json_data} 97 | 98 | f.write(json.dumps(json_data)) 99 | f.close() 100 | 101 | return json.dumps(json_data) 102 | 103 | # prints table from lists of lists: data 104 | def print_table(data, headers, verbose, l): 105 | if verbose and len(data) != 0 and not l: 106 | table = columnar(data, headers, no_borders=True, \ 107 | patterns=patterns, row_sep='-') 108 | print (table) 109 | else: 110 | return 111 | 112 | # prints analysis in bar format with %age, count and message 113 | def bar(not_defined, data, message, k8s_object, color, l, logger): 114 | show_bar = [] 115 | if len(not_defined) == 0: 116 | return 117 | percentage = round(((100.0 * len(not_defined) / len(data))), 2) 118 | 119 | for i in range(25): 120 | if int(i) < percentage / 4: 121 | show_bar.append(u'\u2588') 122 | else: 123 | show_bar.append(u'\u2591') 124 | 125 | if not l: 126 | if percentage != 0: 127 | print (color + "{}".format("".join(show_bar)) + Output.RESET + \ 128 | " {}% | {} {} {}.".format(percentage, \ 129 | len(not_defined), message, k8s_object)) 130 | else: 131 | print (Output.GREEN + "[OK] All {} have config defined."\ 132 | .format(k8s_object) + Output.RESET) 133 | data = {"count": len(not_defined), 134 | "percent": percentage} 135 | return data 136 | -------------------------------------------------------------------------------- /objects/modules/output.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dguyhasnoname/k8s-cluster-checker/99f4175086bc42793da6bca6c4ef25a9f8252c16/objects/modules/output.pyc -------------------------------------------------------------------------------- /objects/modules/process.py: -------------------------------------------------------------------------------- 1 | from packaging import version 2 | import os, re, time, requests, json, csv 3 | #from .logging import Logger 4 | from .output import Output 5 | 6 | # global logger 7 | # logger = Logger.get_logger('', '') 8 | 9 | 10 | class Check: 11 | # check security context 12 | def security_context(k8s_object, k8s_object_list, headers, v, ns, l, logger): 13 | data, config_not_defined, privileged_containers, run_as_user, \ 14 | allow_privilege_escalation, read_only_root_filesystem, \ 15 | run_as_non_root = [], [], [], [], [], [], [] 16 | for item in k8s_object_list.items: 17 | k8s_object_name = item.metadata.name 18 | if 'pods' in k8s_object: 19 | containers = item.spec.containers 20 | else: 21 | containers = item.spec.template.spec.containers 22 | for container in containers: 23 | if container.security_context is not None: 24 | data.append([item.metadata.namespace, k8s_object_name, container.name, \ 25 | container.security_context.allow_privilege_escalation, \ 26 | container.security_context.privileged, \ 27 | container.security_context.read_only_root_filesystem, \ 28 | container.security_context.run_as_non_root, \ 29 | container.security_context.run_as_user ]) 30 | if container.security_context.privileged: 31 | privileged_containers.append(True) 32 | if container.security_context.run_as_user: 33 | run_as_user.append(True) 34 | if container.security_context.allow_privilege_escalation: 35 | allow_privilege_escalation.append(True) 36 | if container.security_context.read_only_root_filesystem: 37 | read_only_root_filesystem.append(True) 38 | if container.security_context.run_as_non_root: 39 | run_as_non_root.append(True) 40 | else: 41 | data.append([item.metadata.namespace, k8s_object_name, container.name, \ 42 | u'\u2717', u'\u2717', u'\u2717', u'\u2717', u'\u2717']) 43 | config_not_defined.append(True) 44 | 45 | print ("\nsecurity_context definition: {} {}".format(len(k8s_object_list.items), \ 46 | k8s_object)) 47 | data_no_sec_context = Output.bar(config_not_defined, data, \ 48 | "containers have no security_context defined in running", \ 49 | k8s_object, Output.RED, l, logger) 50 | data_privileged = Output.bar(privileged_containers, data, \ 51 | 'containers are in prvilleged mode in running', k8s_object, Output.RED, l, logger) 52 | data_allow_privilege_escalation = Output.bar(allow_privilege_escalation, data, \ 53 | 'containers found in allow_privilege_escalation mode in running', \ 54 | k8s_object, Output.RED, l, logger) 55 | data_run_as_user = Output.bar(run_as_user, data, \ 56 | "containers have some user defined in running", 57 | k8s_object, Output.GREEN, l, logger) 58 | data_run_as_non_root = Output.bar(run_as_non_root, data, \ 59 | "containers are having non_root_user in running", k8s_object, Output.GREEN, l, logger) 60 | data_read_only_root_filesystem = Output.bar(read_only_root_filesystem, data, \ 61 | "containers only have read-only root filesystem in all running", \ 62 | k8s_object, Output.GREEN, l, logger) 63 | Output.print_table(data, headers, v, l) 64 | Output.csv_out(data, headers, k8s_object, 'security_context', ns) 65 | 66 | # creating analysis data for logging 67 | analysis = {"container_property": "security_context", 68 | "total_container_count": len(data), 69 | "no_security_context_defined_containers": data_no_sec_context, 70 | "privileged_containers": data_privileged, 71 | "allow_privilege_escalation_containers": data_allow_privilege_escalation, 72 | "run_as_user": data_run_as_user, 73 | "read_only_root_filesystem_containers": data_read_only_root_filesystem} 74 | json_data = Output.json_out(data, analysis, headers, k8s_object, 'security_context', ns) 75 | 76 | return json_data 77 | 78 | # check health probes defined 79 | def health_probes(k8s_object, k8s_object_list, headers, v, ns, l, logger): 80 | data, config_not_defined, readiness_probe, liveness_probe, both = \ 81 | [], [], [], [], [] 82 | for item in k8s_object_list.items: 83 | k8s_object_name = item.metadata.name 84 | if 'pods' in k8s_object: 85 | containers = item.spec.containers 86 | else: 87 | containers = item.spec.template.spec.containers 88 | for container in containers: 89 | if container.readiness_probe is not None and \ 90 | container.liveness_probe is not None: 91 | data.append([item.metadata.namespace, k8s_object_name, \ 92 | container.name, u'\u2714', u'\u2714']) 93 | both.append(True) 94 | elif container.readiness_probe is None and \ 95 | container.liveness_probe is not None: 96 | data.append([item.metadata.namespace, k8s_object_name, \ 97 | container.name, u'\u2717', u'\u2714']) 98 | liveness_probe.append(True) 99 | elif container.readiness_probe is not None and \ 100 | container.liveness_probe is None: 101 | data.append([item.metadata.namespace, k8s_object_name, \ 102 | container.name, u'\u2714', u'\u2717']) 103 | readiness_probe.append(True) 104 | else: 105 | data.append([item.metadata.namespace, k8s_object_name, \ 106 | container.name, u'\u2717', u'\u2717']) 107 | config_not_defined.append(False) 108 | 109 | logger.info ("health_probes definition: {} {}"\ 110 | .format(len(k8s_object_list.items), k8s_object)) 111 | data_no_health_probes = Output.bar(config_not_defined,data, \ 112 | "containers found with no health probes in running", \ 113 | k8s_object, Output.RED, l, logger) 114 | data_livness_probe = Output.bar(liveness_probe, data, \ 115 | "containers found with only liveness probe defined in running", \ 116 | k8s_object, Output.YELLOW, l, logger) 117 | data_readiness_probe = Output.bar(readiness_probe, data, \ 118 | "containers found with only readiness probe defined in running", \ 119 | k8s_object, Output.YELLOW, l, logger) 120 | data_all_probe = Output.bar(both, data, \ 121 | "containers found having both liveness and readiness probe defined in running", 122 | k8s_object, Output.GREEN, l, logger) 123 | Output.print_table(data, headers, v, l) 124 | Output.csv_out(data, headers, k8s_object, 'health_probes', ns) 125 | 126 | # creating analysis data for logging 127 | analysis = {"container_property": "health_probes", 128 | "total_container_count": len(data), 129 | "no_health_probes_defined_containers": data_no_health_probes, 130 | "only_liveness_probe_defined_containers": data_livness_probe, 131 | "only_readiness_probe_defined_containers": data_readiness_probe, 132 | "all_probes_defined_containers": data_all_probe} 133 | json_data = Output.json_out(data, analysis, headers, k8s_object, 'health_probes', ns) 134 | 135 | return json_data 136 | 137 | # check resource requests/limits 138 | def resources(k8s_object, k8s_object_list, headers, v, ns, l, logger): 139 | data, config_not_defined, limits, requests, both = [], [], [], [], [] 140 | for item in k8s_object_list.items: 141 | k8s_object_name = item.metadata.name 142 | if 'pods' in k8s_object: 143 | containers = item.spec.containers 144 | else: 145 | containers = item.spec.template.spec.containers 146 | for container in containers: 147 | if container.resources.limits is not None and \ 148 | container.resources.requests is not None: 149 | data.append([item.metadata.namespace, k8s_object_name, \ 150 | container.name, u'\u2714', u'\u2714']) 151 | both.append(True) 152 | elif container.resources.limits is None and \ 153 | container.resources.requests is not None: 154 | data.append([item.metadata.namespace, k8s_object_name, \ 155 | container.name, u'\u2717', u'\u2714']) 156 | requests.append(True) 157 | elif container.resources.limits is not None and \ 158 | container.resources.requests is None: 159 | data.append([item.metadata.namespace, k8s_object_name, \ 160 | container.name, u'\u2714', u'\u2717']) 161 | limits.append(True) 162 | else: 163 | data.append([item.metadata.namespace, k8s_object_name, \ 164 | container.name, u'\u2717', u'\u2717']) 165 | config_not_defined.append(False) 166 | logger.info ("resource definition: {} {}".format(len(k8s_object_list.items), \ 167 | k8s_object)) 168 | data_no_resources = Output.bar(config_not_defined,data, \ 169 | "containers found without resources defined in running", \ 170 | k8s_object, Output.RED, l, logger) 171 | data_requests = Output.bar(requests,data, \ 172 | "containers found with only requests defined in running", 173 | k8s_object, Output.YELLOW, l, logger) 174 | data_limits = Output.bar(limits,data, \ 175 | "containers found with only limits defined in running", \ 176 | k8s_object, Output.YELLOW, l, logger) 177 | data_all = Output.bar(both,data, \ 178 | "containers found with both limits and requests defined in running", \ 179 | k8s_object, Output.GREEN, l, logger) 180 | Output.print_table(data, headers, v, l) 181 | Output.csv_out(data, headers, k8s_object, 'resource_definition', ns) 182 | 183 | # creating analysis data for logging 184 | analysis = {"container_property": "resources", 185 | "total_container_count": len(data), 186 | "no_resources_defined_containers": data_no_resources, 187 | "only_limits_defined_containers": data_limits, 188 | "only_requests_defined_containers": data_requests, 189 | "all_resources_defined_containers": data_all} 190 | json_data = Output.json_out(data, analysis, headers, k8s_object, 'resource_definition', ns) 191 | 192 | return json_data 193 | 194 | # check for rollout strategy 195 | def strategy(k8s_object, k8s_object_list, headers, v, ns, l, logger): 196 | data = [] 197 | for item in k8s_object_list.items: 198 | k8s_object_name = item.metadata.name 199 | if item.spec.strategy is not None: 200 | data.append([item.metadata.name, item.spec.strategy.type]) 201 | Output.print_table(data, headers, v, l) 202 | Output.csv_out(data, headers, k8s_object, 'rollout_strategy', ns) 203 | json_data = Output.json_out(data, '', headers, k8s_object, 'rollout_strategy', ns) 204 | return json_data 205 | 206 | # check for single replica 207 | def replica(k8s_object, k8s_object_list, headers, v, ns, l, logger): 208 | data, single_replica_count, multi_replica_count = [], [], [] 209 | for item in k8s_object_list.items: 210 | k8s_object_name = item.metadata.name 211 | if item.spec.replicas is not None: 212 | data.append([item.metadata.namespace, item.metadata.name, \ 213 | item.spec.replicas]) 214 | if item.spec.replicas == 1: 215 | single_replica_count.append(True) 216 | else: 217 | multi_replica_count.append(True) 218 | 219 | if len(single_replica_count) > 0: 220 | logger.info ("single replica check: {} {}".format(len(k8s_object_list.items), \ 221 | k8s_object)) 222 | data_single_replica = Output.bar(single_replica_count, data, str(k8s_object) + \ 223 | ' are running with 1 replica in all', k8s_object, Output.RED, l, logger) 224 | Output.print_table(data, headers, v, l) 225 | Output.csv_out(data, headers, k8s_object, 'single_replica_deployment', ns) 226 | 227 | # creating analysis data for logging 228 | analysis = {"deployment_property": "single_relica", 229 | "total_deployment_count": len(data), 230 | "single_replica_deployment_count": data_single_replica} 231 | 232 | json_data = Output.json_out(data, analysis, headers, k8s_object, 'single_replica_deployment', ns) 233 | return json_data 234 | return data 235 | 236 | def tolerations_affinity_node_selector_priority(k8s_object, k8s_object_list, headers, v, ns, l, logger): 237 | data = [] 238 | affinity, node_selector, toleration = "", "", "" 239 | for item in k8s_object_list.items: 240 | k8s_object_name = item.metadata.name 241 | if 'pods' in k8s_object: 242 | tolerations = item.spec.tolerations 243 | node_selectors = item.spec.node_selector 244 | affinitys = item.spec.affinity 245 | priority_class_name = item.spec.priority_class_name 246 | else: 247 | tolerations = item.spec.template.spec.tolerations 248 | node_selectors = item.spec.template.spec.node_selector 249 | affinitys = item.spec.template.spec.affinity 250 | priority_class_name = item.spec.template.spec.priority_class_name 251 | 252 | if tolerations is not None: 253 | toleration = u'\u2714' 254 | else: 255 | tolerations = u'\u2717' 256 | if node_selectors is not None: 257 | node_selector = u'\u2714' 258 | else: 259 | node_selector = u'\u2717' 260 | if affinitys is None: 261 | affinity = u'\u2717' 262 | elif affinitys.pod_anti_affinity is not None or \ 263 | affinitys.pod_affinity is not None or \ 264 | affinitys.node_affinity is not None: 265 | affinity = u'\u2714' 266 | else: 267 | affinity = u'\u2717' 268 | data.append([item.metadata.namespace, k8s_object_name, \ 269 | node_selector, toleration, affinity, priority_class_name]) 270 | if v or l: print ("\ntolerations_affinity_node_selector_priority check: {} {}"\ 271 | .format(len(k8s_object_list.items), k8s_object)) 272 | Output.print_table(data, headers, v, l) 273 | Output.csv_out(data, headers, k8s_object, \ 274 | 'tolerations_affinity_node_selector_priority', ns) 275 | json_data = Output.json_out(data, '',headers, k8s_object, \ 276 | 'tolerations_affinity_node_selector_priority', ns) 277 | 278 | return json_data 279 | 280 | def qos(k8s_object, k8s_object_list, headers, v, ns, l, logger): 281 | data, guaranteed, besteffort, burstable = [], [], [], [] 282 | if not k8s_object_list: return 283 | for item in k8s_object_list.items: 284 | data.append([item.metadata.namespace, item.metadata.name, \ 285 | item.status.qos_class]) 286 | if 'Guaranteed' in item.status.qos_class: 287 | guaranteed.append([item.metadata.namespace, item.metadata.name]) 288 | elif 'Burstable' in item.status.qos_class: 289 | burstable.append([item.metadata.namespace, item.metadata.name]) 290 | else: 291 | besteffort.append([item.metadata.namespace, item.metadata.name]) 292 | 293 | logger.info ("QoS check: {} {}".format(len(k8s_object_list.items), \ 294 | k8s_object)) 295 | data_guaranteed = Output.bar(guaranteed, data, str(k8s_object) + \ 296 | ' are having Guaranteed QoS out of all', k8s_object, Output.GREEN, l, logger) 297 | data_burstable = Output.bar(burstable, data, str(k8s_object) + \ 298 | ' are having Burstable QoS out of all', k8s_object, Output.YELLOW, l, logger) 299 | data_besteffort = Output.bar(besteffort, data, str(k8s_object) + \ 300 | ' are having BestEffort QoS out of all', k8s_object, Output.RED, l, logger) 301 | Output.print_table(data, headers, v, l) 302 | Output.csv_out(data, headers, k8s_object, 'QoS', ns) 303 | 304 | # creating analysis data for logging 305 | analysis = {"container_property": "qos", 306 | "total_pods_count": len(data), 307 | "guaranteed_pods_count": data_guaranteed, 308 | "burstable_pods_count": data_burstable, 309 | "besteffort_pods_count": data_besteffort} 310 | json_data = Output.json_out(data, analysis, headers, k8s_object, 'QoS', ns) 311 | 312 | return json_data 313 | 314 | def image_pull_policy(k8s_object, k8s_object_list, headers, v, ns, l, logger): 315 | data, if_not_present, always, never= [], [], [], [] 316 | config = 'image pull-policy' 317 | for item in k8s_object_list.items: 318 | if 'pods' in k8s_object: 319 | containers = item.spec.containers 320 | else: 321 | containers = item.spec.template.spec.containers 322 | for container in containers: 323 | data.append([item.metadata.name, container.name, \ 324 | container.image, container.image_pull_policy]) 325 | 326 | for image in data: 327 | if 'Always' in image[-1]: 328 | always.append(image[3]) 329 | elif 'IfNotPresent' in image[-1]: 330 | if_not_present.append(True) 331 | else: 332 | never.append(True) 333 | 334 | print ("\n{}: {} {}".format(config, len(k8s_object_list.items), \ 335 | k8s_object)) 336 | data_if_not_present = Output.bar(if_not_present, data, \ 337 | 'containers have "IfNotPresent" image pull-policy in all', \ 338 | k8s_object, Output.YELLOW, l, logger) 339 | data_always = Output.bar(always, data, \ 340 | 'containers have "Always" image pull-policy in all', \ 341 | k8s_object, Output.GREEN, l, logger) 342 | data_never = Output.bar(never, data, \ 343 | 'containers have "Never" image pull-policy in all', \ 344 | k8s_object, Output.RED, l, logger) 345 | Output.print_table(data, headers, v ,l) 346 | Output.csv_out(data, headers, k8s_object, 'image_pull_policy', ns) 347 | 348 | # creating analysis data for logging 349 | analysis = {"container_property": "image_pull_policy", 350 | "total_containers_count": len(data), 351 | "if_not_present_pull_policy_containers_count": data_if_not_present, 352 | "always_pull_policy_containers_count": data_always, 353 | "never_pull_policy_containers_count": data_never} 354 | json_data = Output.json_out(data, analysis, headers, k8s_object, 'image_pull_policy', ns) 355 | 356 | return json_data 357 | 358 | class IngCheck: 359 | # checking mapping of ingress 360 | def get_ing_rules(ingress_rule, v): 361 | data = "" 362 | for i in ingress_rule: 363 | for j in i.http.paths: 364 | if i.host is None: 365 | data = data + "-" + " [" + j.backend.service_name + ":" + \ 366 | str(j.backend.service_port) + "]" + "\n" 367 | else: 368 | data = data + i.host + " [" + j.backend.service_name + ":" \ 369 | + str(j.backend.service_port) + "]" + "\n" 370 | return data 371 | 372 | def list_ingress(k8s_object_list, k8s_object, headers, v, ns , l, logger): 373 | data, total_rules_count = [], 0 374 | for i in k8s_object_list.items: 375 | data.append([i.metadata.namespace, i.metadata.name, \ 376 | len(i.spec.rules), IngCheck.get_ing_rules(i.spec.rules,v)]) 377 | total_rules_count += len(i.spec.rules) 378 | Output.print_table(data, headers, v, l) 379 | Output.csv_out(data, headers, k8s_object, 'ingress', ns) 380 | 381 | # creating analysis data for logging 382 | analysis = {"ingress_property": "ingress_rules", 383 | "total_ingress_count": len(data), 384 | "total_ingress_rules": total_rules_count 385 | } 386 | json_data = Output.json_out(data, analysis, headers, k8s_object, 'ingress', ns) 387 | if l: logger.info(json_data) 388 | 389 | return json_data 390 | 391 | class CtrlProp: 392 | def read_admission_controllers(k8scc_dir): 393 | admission_controllers_list = [] 394 | with open(os.path.join(k8scc_dir, 'conf/admission-controllers'), "r") as file: 395 | admission_controllers_list = file.read() 396 | return admission_controllers_list 397 | 398 | # read property file from conf dir, file name being passed from compare_properties function 399 | def read_object_file(filename): 400 | object_args = [] 401 | with open(filename, "r") as file: 402 | for line in file: 403 | object_args.append(line.split(None, 1)[0]) 404 | return object_args 405 | 406 | # gets file name from check_ctrl_plane_pods_properties_operation function in ctrl-plane.py 407 | def compare_properties(filename, commands): 408 | data, command_list = [], [] 409 | object_args = CtrlProp.read_object_file(filename) 410 | for c in commands: 411 | command = c.rsplit("=")[0] 412 | command_list.append(command) 413 | data.append([c, u'\u2714']) 414 | # compares the properties in conf file and commands args set in ctrl pods 415 | diff_list = list(set(object_args).difference(command_list)) 416 | diff_list.sort() 417 | for i in diff_list: 418 | data.append([i, u'\u2717']) 419 | return data 420 | 421 | def check_admission_controllers(commands, v, ns, l, k8scc_dir): 422 | data, admission_plugins_enabled, admission_plugins_not_enabled, \ 423 | headers = [], [], [], ['ADMISSION_PLUGINS', 'ENABLED'] 424 | important_admission_plugins = ['AlwaysPullImages', \ 425 | 'DenyEscalatingExec', 'LimitRange', 'NodeRestriction', \ 426 | 'PodSecurityPolicy', 'ResourceQuota', 'SecurityContextDeny'] 427 | 428 | # checking which addmission controllers are enabled 429 | for c in commands: 430 | if 'enable-admission-plugins' in c: 431 | admission_plugins_enabled = (c.rsplit("=")[1]).split(",") 432 | for i in admission_plugins_enabled: 433 | data.append([i, u'\u2714']) 434 | admission_plugins_list = CtrlProp.read_admission_controllers(k8scc_dir) 435 | 436 | # checking difference in addmission controllers 437 | admission_plugins_not_enabled = list(set(important_admission_plugins) - \ 438 | set(admission_plugins_enabled)) 439 | 440 | for i in admission_plugins_not_enabled: 441 | data.append([i, u'\u2717']) 442 | 443 | if v: 444 | # converting string admission_plugins_list into list and looping over 445 | for i in admission_plugins_list.split(", "): 446 | data.append([i, u'\u2717']) 447 | if not v: print ("\nStatus of important admission controllers:") 448 | Output.print_table(data, headers, True, l) 449 | Output.csv_out(data, headers, 'admission_controllers', '', ns) 450 | 451 | analysis = {"ctrl_plane_property": "admission_controllers_status", 452 | "admission_plugins_enabled_count": len(admission_plugins_enabled), 453 | "admission_plugins_enabled": admission_plugins_enabled, 454 | "admission_plugins_not_enabled_count": len(admission_plugins_not_enabled), 455 | "admission_plugins_not_enabled": admission_plugins_not_enabled, 456 | "admission_plugins_available": admission_plugins_list 457 | } 458 | json_data = Output.json_out(data, analysis, headers, 'admission_controllers', '', ns) 459 | 460 | return json_data 461 | 462 | def secure_scheduler_check(commands): 463 | for c in commands: 464 | if '--address' in c and '--address=127.0.0.1' not in c: 465 | print (Output.RED + "[ALERT] " + Output.RESET + \ 466 | "Scheduler is not bound to a non-loopback insecure address \n") 467 | if c in '--profiling': 468 | print (Output.RED + "[ALERT] " + Output.RESET + \ 469 | "Disable profiling for reduced attack surface.\n") 470 | 471 | class Service: 472 | # checking type of services 473 | def check_service(k8s_object, k8s_object_list, l, logger): 474 | cluster_ip_svc, lb_svc, others_svc = [], [], [] 475 | for item in k8s_object_list.items: 476 | if 'ClusterIP' in item.spec.type: 477 | cluster_ip_svc.append([item.metadata.namespace, item.metadata.name]) 478 | elif 'LoadBalancer' in item.spec.type: 479 | lb_svc.append([item.metadata.namespace, item.metadata.name]) 480 | else: 481 | others_svc.append([item.metadata.namespace, item.metadata.name]) 482 | print ("\n{}: {} {}".format('service type: ', \ 483 | len(k8s_object_list.items), k8s_object)) 484 | data_cluster_ip = Output.bar(cluster_ip_svc, k8s_object_list.items, \ 485 | 'ClusterIP type', k8s_object, Output.CYAN, l, logger) 486 | data_lb = Output.bar(lb_svc, k8s_object_list.items, \ 487 | 'LoadBalancer type', k8s_object, Output.CYAN, l, logger) 488 | data_others = Output.bar(others_svc, k8s_object_list.items, \ 489 | 'others type', k8s_object, Output.RED, l, logger) 490 | 491 | return [data_cluster_ip, data_lb, data_others] 492 | 493 | def get_service(k8s_object, k8s_object_list, headers, v, ns, l, logger): 494 | data = [] 495 | for item in k8s_object_list.items: 496 | if item.spec.selector: 497 | for i in item.spec.selector: 498 | app_label = i 499 | break 500 | data.append([item.metadata.namespace, item.metadata.name, item.spec.type, \ 501 | item.spec.cluster_ip, app_label + ": " + item.spec.selector[app_label]]) 502 | else: 503 | data.append([item.metadata.namespace, item.metadata.name, item.spec.type, \ 504 | item.spec.cluster_ip, "None"]) 505 | analysis = Service.check_service(k8s_object, k8s_object_list, l, logger) 506 | Output.csv_out(data, headers, k8s_object, 'service', ns) 507 | 508 | # creating analysis data for logging 509 | analysis = {"service_property": "service_type", 510 | "total_service_count": len(data), 511 | "cluster_ip_type_count": analysis[0], 512 | "lb_type_count": analysis[1], 513 | "others_type_count": analysis[2] 514 | } 515 | json_data = Output.json_out(data, analysis, headers, k8s_object, 'service', ns) 516 | if l: logger.info(json_data) 517 | return [json_data, data] 518 | 519 | class Rbac: 520 | def get_rules(rules): 521 | data, api_groups, resources, verbs, rules_count = [], "", "", "", 0 522 | for i in rules: 523 | current_api_group = re.sub("[']", '', str(i.api_groups)) 524 | if current_api_group != "": 525 | api_groups = api_groups + current_api_group + "\n" 526 | else: 527 | api_groups = api_groups + "''" + "\n" 528 | resources = resources + re.sub("[']", '', str(i.resources)) + "\n" 529 | verbs = verbs + re.sub("[']", '', str(i.verbs)) + "\n" 530 | rules_count = rules_count + len(rules) 531 | data = [api_groups, resources, verbs, rules_count] 532 | return data 533 | 534 | # analysis RBAC for permissions 535 | def analyse_role(data, headers, k8s_object, ns, l, logger): 536 | full_perm, full_perm_list, full_perm_list_json, delete_perm, \ 537 | impersonate_perm, exec_perm = [], [], [], [], [], [] 538 | data_api_specific_delete_perm, data_impersonate_perm, \ 539 | data_full_delete_perm, data_exec_perm = '', '', '', '' 540 | for i in data: 541 | if '*' in i[-1] and '*' in i[-2] and '*' in i[-3]: 542 | full_perm.append([i[0]]) 543 | # creating data for roles with full permissions on all resources and apigroups 544 | if k8s_object == 'roles': 545 | full_perm_list.append([i[0], i[1], i[2], i[3], i[4], i[5]]) 546 | full_perm_list_json.append({"role_name": i[0], 547 | "namespace": i[1], 548 | "api_groups": i[3].strip('\n'), 549 | "resources": i[4].strip('\n'), 550 | "verbs": i[5].strip('\n') 551 | }) 552 | # creating separate data(due to difference in columns) for clusterroles with full permissions on all resources and apigroups 553 | else: 554 | full_perm_list.append([i[0], i[1], i[2], i[3], i[4]]) 555 | full_perm_list_json.append({"cluster_role_name": i[0], 556 | "api_groups": i[2].strip('\n'), 557 | "resources": i[3].strip('\n'), 558 | "verbs": i[4].strip('\n') 559 | }) 560 | if 'delete' in i[-1] and '*' in i[-2]: delete_perm.append([i[0]]) 561 | if 'impersonate' in i[4]: impersonate_perm.append([i[0]]) 562 | if 'exec' in i[-2]: exec_perm.append([i[0]]) 563 | 564 | print ("\n{}: {}".format(k8s_object, len(data))) 565 | if len(full_perm): 566 | data_full_delete_perm = Output.bar(full_perm, data, \ 567 | 'full permission(on ALL RESOURCES and APIs) ' \ 568 | + Output.RED + u'\u2620' + u'\u2620' + u'\u2620' + Output.RESET, \ 569 | k8s_object, Output.RED, l, logger) 570 | Output.print_table(full_perm_list, headers, True, l) 571 | else: 572 | print (Output.GREEN + "[OK] " + Output.RESET + \ 573 | "No {} full permission ".format(k8s_object)) 574 | if len(delete_perm): 575 | data_api_specific_delete_perm = Output.bar(delete_perm, data, \ 576 | 'delete permission(on ALL RESOURCES on specfic APIs)', \ 577 | k8s_object, Output.RED, l, logger) 578 | if len(impersonate_perm): 579 | data_impersonate_perm = Output.bar(impersonate_perm, data, \ 580 | 'impersonate permission(on specfic APIs)', \ 581 | k8s_object, Output.RED, l, logger) 582 | if len(exec_perm): 583 | data_exec_perm = Output.bar(exec_perm, data, \ 584 | 'exec permission(on pods)', \ 585 | k8s_object, Output.RED, l, logger) 586 | Output.csv_out(data, headers, 'rbac', k8s_object, ns) 587 | 588 | # creating analysis data for logging 589 | analysis = {"rbac_type": k8s_object, 590 | "total_rbac_type_count": len(data), 591 | "full_delete_perm_role": data_full_delete_perm, 592 | "full_permission_role_list": full_perm_list_json, 593 | "api_specific_delete_perm_role": data_api_specific_delete_perm, 594 | "impersonate_perm_role": data_impersonate_perm, 595 | "exec_perm_role": data_exec_perm 596 | } 597 | json_data = Output.json_out(data, analysis, headers, 'rbac', k8s_object, ns) 598 | if l: logger.info(json_data) 599 | return json_data 600 | 601 | class NameSpace: 602 | #calculating count for a speific object in a namespace 603 | def get_ns_object_details(deployments, ds, sts, pods, svc, ingress, jobs,\ 604 | roles, role_bindings, ns, ns_data): 605 | ns_deployments, ns_ds, ns_sts, ns_pods, ns_svc, ns_ing, \ 606 | ns_jobs, ns_roles, ns_role_bindings = \ 607 | [], [], [], [], [], [], [], [], [] 608 | for item in deployments.items: 609 | if item.metadata.namespace == ns: 610 | ns_deployments.append([item.metadata.namespace, \ 611 | item.metadata.name]) 612 | for item in ds.items: 613 | if item.metadata.namespace == ns: 614 | ns_ds.append([item.metadata.namespace, item.metadata.name]) 615 | for item in sts.items: 616 | if item.metadata.namespace == ns: 617 | ns_sts.append([item.metadata.namespace, item.metadata.name]) 618 | for item in pods.items: 619 | if item.metadata.namespace == ns: 620 | ns_pods.append([item.metadata.namespace, item.metadata.name]) 621 | for item in svc.items: 622 | if item.metadata.namespace == ns: 623 | ns_svc.append([item.metadata.namespace, item.metadata.name]) 624 | if ingress: 625 | for item in ingress.items: 626 | if item.metadata.namespace == ns: 627 | ns_ing.append([item.metadata.namespace, item.metadata.name]) 628 | for item in jobs.items: 629 | if item.metadata.namespace == ns: 630 | ns_jobs.append([item.metadata.namespace, item.metadata.name]) 631 | for item in roles.items: 632 | if item.metadata.namespace == ns: 633 | ns_roles.append([item.metadata.namespace, item.metadata.name]) 634 | for item in role_bindings.items: 635 | if item.metadata.namespace == ns: 636 | ns_role_bindings.append([item.metadata.namespace, \ 637 | item.metadata.name]) 638 | ns_data.append([ns, len(ns_deployments), len(ns_ds), len(ns_sts), \ 639 | len(ns_pods), len(ns_svc), len(ns_ing), len(ns_jobs), len(ns_roles), \ 640 | len(ns_role_bindings)]) 641 | 642 | return ns_data 643 | 644 | # calculating count of different objects type in all namespaces 645 | def get_ns_details(ns_list, deployments, ds, sts, pods, svc, ingress, \ 646 | jobs, roles, role_bindings): 647 | ns_data = [] 648 | if type(ns_list) != str: 649 | for item in ns_list.items: 650 | ns = item.metadata.name 651 | data = NameSpace.get_ns_object_details(deployments, ds, sts,\ 652 | pods, svc, ingress, jobs, roles, role_bindings, ns, ns_data) 653 | else: 654 | ns = ns_list 655 | data = NameSpace.get_ns_object_details(deployments, ds, sts, \ 656 | pods, svc, ingress, jobs, roles, role_bindings, ns, ns_data) 657 | return data 658 | 659 | class Nodes: 660 | global version_check 661 | version_check = [] 662 | outdated = Output.RED + 'outdated' + Output.RESET 663 | latest = Output.GREEN + 'latest' + Output.RESET 664 | # checking latest k8s version and comparing it with installed k8s version 665 | def get_latest_k8s_version(kubelet_version, logger): 666 | session = requests.Session() 667 | ver = requests.get("https://storage.googleapis.com/kubernetes-release/release/stable.txt") 668 | session.close() 669 | latest_k8s_version = ver.text 670 | if version.parse(str(kubelet_version)) < version.parse(str(latest_k8s_version)): 671 | logger.warning("Cluster is not running with latest kubernetes version: {}"\ 672 | .format(latest_k8s_version)) 673 | status = Nodes.outdated 674 | else: 675 | status = Nodes.latest 676 | 677 | return version_check.append(['K8S', latest_k8s_version, kubelet_version, status]) 678 | 679 | # checking latest OS version and comparing it with installed OS version 680 | def get_latest_os_version(os, logger): 681 | latest_os_version, current_os_version, status = [''] * 3 682 | if 'Flatcar' in os: 683 | session = requests.Session() 684 | ver = session.get("https://stable.release.flatcar-linux.net/amd64-usr/current/version.txt") 685 | session.close() 686 | latest_os_version = re.findall('(FLATCAR_VERSION=)(.+)', ver.text) 687 | current_os_version = os.split()[5] 688 | 689 | if version.parse(str(current_os_version)) < version.parse(str(latest_os_version[0][1])): 690 | logger.warning("Cluster nodes are not running on latest {}{}"\ 691 | .format(latest_os_version[0][0], latest_os_version[0][1])) 692 | status = Nodes.outdated 693 | latest_os_version = latest_os_version[0][1] 694 | else: 695 | status = Nodes.latest 696 | latest_os_version = latest_os_version[0][1] 697 | 698 | elif 'CoreOS' in os: 699 | logger.warning("Cluster nodes are running on CoreOS which is DPERECATED: https://coreos.com/os/eol/. " + \ 700 | "PLEASE CONSIDER CHANGING THE DEPRECATED OS!") 701 | latest_os_version = 'EOL' 702 | status = 'EOL' 703 | elif 'Ubuntu' in os: 704 | current_os_version = re.sub('[^0-9.]','', os) 705 | session = requests.Session() 706 | ver = requests.get("https://api.launchpad.net/devel/ubuntu/series") 707 | session.close() 708 | for x in ver.json()['entries']: 709 | if 'Current Stable Release' in x['status']: 710 | latest_os_version = x['version'] 711 | if version.parse(str(current_os_version)) < version.parse(str(latest_os_version)): 712 | logger.warning("Cluster nodes are not running on latest Ubuntu version.") 713 | status = Nodes.outdated 714 | else: 715 | status = Nodes.latest 716 | else: 717 | latest_os_version, current_os_version, status = ['OS not supported'] * 3 718 | 719 | return version_check.append(['OS', latest_os_version, current_os_version, status]) 720 | 721 | # checking latest docker version and comparing it with installed docker version 722 | def get_latest_docker_version(docker_version, logger): 723 | ver = requests.get("https://api.github.com/repositories/7691631/releases/latest") 724 | latest_docker_version = ver.json()['tag_name'] 725 | if version.parse(str(docker_version)) < version.parse(str(latest_docker_version)): 726 | logger.warning("Cluster nodes are not running on latest docker version: {}"\ 727 | .format(latest_docker_version)) 728 | status = Nodes.outdated 729 | else: 730 | status = Nodes.latest 731 | 732 | return version_check.append(['DOCKER', latest_docker_version, docker_version, status]) 733 | 734 | def node_version_check(current_os_version, docker_version, kubelet_version, l, logger): 735 | headers, outdated_nodes = ['COMPONENT', 'LATEST_VERSION', 'INSTALLED_VERSION', 'STATUS'], [] 736 | Nodes.get_latest_os_version(current_os_version, logger) 737 | Nodes.get_latest_docker_version(docker_version, logger) 738 | Nodes.get_latest_k8s_version(kubelet_version, logger) 739 | for i in version_check: 740 | if i[3] in ['outdated', 'EOL']: 741 | outdated_nodes.append(True) 742 | data_outdated = Output.bar(outdated_nodes, version_check, \ 743 | 'version checks reported as', 'outdated', Output.RED, l, logger) 744 | Output.print_table(version_check, headers, True, l) 745 | Output.csv_out(version_check, headers, 'node', 'version', '') 746 | 747 | # creating analysis report 748 | analysis = {"node_property": "version", 749 | "total_version_check": len(version_check), 750 | "outdated_version_components": data_outdated 751 | } 752 | json_data = Output.json_out(version_check, analysis, headers, 'node', 'version', '') 753 | 754 | return json_data 755 | 756 | class CRDs: 757 | def check_ns_crd(k8s_object_list, k8s_object, data, headers, v, ns, l, logger): 758 | ns_crds, cluster_crds, other_crds = [], [], [] 759 | for item in k8s_object_list.items: 760 | if 'Namespaced' in item.spec.scope: 761 | ns_crds.append([item.spec.group, item.metadata.name, \ 762 | item.spec.scope]) 763 | elif 'Cluster' in item.spec.scope: 764 | cluster_crds.append([item.spec.group, item.metadata.name, \ 765 | item.spec.scope]) 766 | else: 767 | other_crds.append([item.spec.group, item.metadata.name, \ 768 | item.spec.scope]) 769 | data_ns_scope = Output.bar(ns_crds, k8s_object_list.items, \ 770 | 'Namespaced scope', k8s_object, Output.CYAN, l, logger) 771 | data_cluster_scope = Output.bar(cluster_crds, k8s_object_list.items, \ 772 | 'Cluster scope', k8s_object, Output.CYAN, l, logger) 773 | data_cluster_scope = Output.bar(other_crds, k8s_object_list.items, \ 774 | 'Other scope', k8s_object, Output.CYAN, l, logger) 775 | 776 | Output.print_table(data, headers, v, l) 777 | Output.csv_out(data, headers, k8s_object, '', ns) 778 | 779 | # creating analysis data for logging 780 | analysis = {"crd_property": "crd_scope", 781 | "total_crd_count": len(data), 782 | "namespace_scope_crd_count": data_ns_scope, 783 | "cluster_scope_crd_count": data_cluster_scope, 784 | "other_scope_crd_count": data_cluster_scope 785 | } 786 | json_data = Output.json_out(data, analysis, headers, k8s_object, '', ns) 787 | if l: logger.info(json_data) 788 | return json_data -------------------------------------------------------------------------------- /objects/namespace.py: -------------------------------------------------------------------------------- 1 | import time, os, argparse, json, sys 2 | from concurrent.futures import ThreadPoolExecutor 3 | start_time = time.time() 4 | from modules.main import ArgParse 5 | from modules import process as k8s 6 | from modules.logging import Logger 7 | from modules.get_pods import K8sPods 8 | from modules.get_svc import K8sService 9 | from modules.get_deploy import K8sDeploy 10 | from modules.get_ds import K8sDaemonSet 11 | from modules.get_sts import K8sStatefulSet 12 | from modules.get_ns import K8sNameSpace 13 | from modules.get_ingress import K8sIngress 14 | from modules.get_jobs import K8sJobs 15 | from modules.get_svc import K8sService 16 | from modules.get_rbac import K8sNameSpaceRole, K8sNameSpaceRoleBinding 17 | 18 | class Namespace: 19 | def __init__(self, logger): 20 | self.logger = logger 21 | self.all_ns_list = K8sNameSpace.get_ns(self.logger) 22 | 23 | def get_object_data(self, fun, k8s_object, ns, v, l): 24 | k8s_object_list = fun 25 | if len(k8s_object_list.items): 26 | if not 'services' in k8s_object: 27 | k8s.Check.security_context(k8s_object, k8s_object_list, \ 28 | ['NAMESPACE', 'POD', 'CONTAINER_NAME', 'PRIVILEGED_ESC', \ 29 | 'PRIVILEGED', 'READ_ONLY_FS', 'RUN_AS_NON_ROOT', \ 30 | 'RUNA_AS_USER'], v, ns, l, self.logger) 31 | 32 | k8s.Check.health_probes(k8s_object, k8s_object_list, \ 33 | ['NAMESPACE', 'POD', 'CONTAINER_NAME', 'READINESS_PROPBE', \ 34 | 'LIVENESS_PROBE'], v, ns, l, self.logger) 35 | 36 | k8s.Check.resources(k8s_object, k8s_object_list, \ 37 | ['NAMESPACE', 'POD', 'CONTAINER_NAME', 'LIMITS', 'REQUESTS'], \ 38 | v, ns, l, self.logger) 39 | 40 | if k8s_object in ['deployments','statefulsets']: 41 | k8s.Check.replica(k8s_object + 'ns', k8s_object_list, \ 42 | ['NAMESPACE', 'DEPLOYMENT', 'REPLICA_COUNT'], v, ns, l, self.logger) 43 | else: 44 | k8s.Service.get_service(k8s_object, k8s_object_list, \ 45 | ['NAMESPACE', 'SERVICE', 'SERVICE_TYPE', 'CLUSTER_IP', \ 46 | 'SELECTOR'], v, ns, l, self.logger) 47 | else: 48 | self.logger.warning ("No {} found!".format(k8s_object)) 49 | 50 | def get_ns_data(self, v, ns, l): 51 | data, sum_list, empty_ns = [], [], [] 52 | if not ns: 53 | ns = 'all' 54 | ns_list = self.all_ns_list 55 | else: 56 | ns_list = ns 57 | 58 | # getting objects list in threads 59 | with ThreadPoolExecutor(max_workers=10) as executor: 60 | temp_deploy = executor.submit(K8sDeploy.get_deployments, ns, self.logger) 61 | temp_ds = executor.submit(K8sDaemonSet.get_damemonsets, ns, self.logger) 62 | temp_sts = executor.submit(K8sStatefulSet.get_sts, ns, self.logger) 63 | temp_pods = executor.submit(K8sPods.get_pods, ns, self.logger) 64 | temp_svc = executor.submit(K8sService.get_svc, ns, self.logger) 65 | temp_ingress = executor.submit(K8sIngress.get_ingress, ns, self.logger) 66 | temp_jobs = executor.submit(K8sJobs.get_jobs, ns, self.logger) 67 | temp_role = executor.submit(K8sNameSpaceRole.list_namespaced_role, ns, self.logger) 68 | temp_role_binding = \ 69 | executor.submit(K8sNameSpaceRoleBinding.list_namespaced_role_binding, ns, self.logger) 70 | 71 | # stroing data from threads ran above 72 | deployments = temp_deploy.result() 73 | ds = temp_ds.result() 74 | sts = temp_sts.result() 75 | pods = temp_pods.result() 76 | svc = temp_svc.result() 77 | ingress = temp_ingress.result() 78 | jobs = temp_jobs.result() 79 | roles = temp_role.result() 80 | role_bindings = temp_role_binding.result() 81 | 82 | # getting count of each ns objects and printing in table 83 | print ("\n{} namespace details:".format(ns)) 84 | data = k8s.NameSpace.get_ns_details(ns_list, deployments, ds, sts, \ 85 | pods, svc, ingress, jobs, roles, role_bindings) 86 | 87 | # getting total object-wise count across the cluster 88 | total_ns, total_deploy, total_ds, total_sts, total_pods, total_svc, \ 89 | total_ing , total_jobs, total_roles, total_role_bindings \ 90 | = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 91 | for i in data: 92 | total_ns += 1 93 | total_deploy = total_deploy + i[1] 94 | total_ds = total_ds + i[2] 95 | total_sts = total_sts + i[3] 96 | total_pods = total_pods + i[4] 97 | total_svc = total_svc + i[5] 98 | total_ing = total_ing + i[6] 99 | total_jobs = total_jobs + i[7] 100 | total_roles = total_roles + i[8] 101 | total_role_bindings = total_role_bindings + i[9] 102 | 103 | if i[1] == 0 and i[2] == 0 and i[3] == 0 and i[4] == 0 and \ 104 | not i[0] in ['default', 'kube-node-lease', 'kube-public', 'local']: 105 | empty_ns.append([i[0]]) 106 | 107 | # calculating cluster-wide count of objects if namespace is no provided 108 | if type(ns_list) != str: 109 | data = k8s.Output.append_hyphen(data, '--------') 110 | data.append(["Total: " + str(total_ns), total_deploy, total_ds, 111 | total_sts, total_pods, total_svc, total_ing, total_jobs, \ 112 | total_roles, total_role_bindings ]) 113 | 114 | headers = ['NAMESPACE', 'DEPLOYMENTS', 'DAEMONSETS', 'STATEFULSETS', \ 115 | 'PODS', 'SERVICE', 'INGRESS', 'JOBS', 'ROLES', 'ROLE_BINDINGS'] 116 | k8s.Output.print_table(data, headers, True, l) 117 | 118 | analysis = {"namespace_namespace_property": "namespace_object_count", 119 | "total_namespaces": total_ns, 120 | "total_deployments": total_deploy, 121 | "total_daemonsets": total_ds, 122 | "total_statefulsets": total_sts, 123 | "total_servcies": total_svc, 124 | "total_ingresses": total_ing, 125 | "total_jobs": total_jobs, 126 | "total_roles": total_roles, 127 | "total_rolebindings": total_role_bindings} 128 | 129 | json_data_all_ns_detail = k8s.Output.json_out(data[:-2], analysis, headers, 'namespace', 'namespace_details', '') 130 | if l: self.logger.info(json_data_all_ns_detail) 131 | 132 | # get namespace wise object details. Will give output in verbose mode 133 | def get_all_object_data(self, ns, v, l): 134 | print (k8s.Output.BOLD + "\nNamespace: " + \ 135 | k8s.Output.RESET + "{}".format(ns)) 136 | 137 | Namespace.get_object_data(self, K8sDeploy.get_deployments(ns, self.logger), \ 138 | 'deployments', ns, v, l) 139 | Namespace.get_object_data(self, K8sDaemonSet.get_damemonsets(ns, self.logger), \ 140 | 'damemonsets', ns, v, l) 141 | Namespace.get_object_data(self, K8sStatefulSet.get_sts(ns, self.logger), \ 142 | 'statefulsets', ns, v, l) 143 | Namespace.get_object_data(self, K8sJobs.get_jobs(ns, self.logger), \ 144 | 'jobs', ns, v, l) 145 | Namespace.get_object_data(self, K8sService.get_svc(ns, self.logger), \ 146 | 'services', ns, v, l) 147 | 148 | if v: 149 | if type(ns_list) != str: 150 | for item in ns_list.items: 151 | ns = item.metadata.name 152 | k8s.Output.separator(k8s.Output.GREEN, '-', l) 153 | get_all_object_data(self, ns, True, l) 154 | else: 155 | get_all_object_data(self, ns, v, l) 156 | 157 | # getting namespaces which are empty 158 | if len(empty_ns) > 0: 159 | k8s.Output.separator(k8s.Output.GREEN, '-', l) 160 | print (k8s.Output.YELLOW + "\n[WARNING] " + k8s.Output.RESET + \ 161 | "Below {} namespaces have no workloads running: "\ 162 | .format(len(empty_ns))) 163 | k8s.Output.print_table(empty_ns, headers, True, l) 164 | # creating single list of namespace for json parsing 165 | empyt_ns_list = [item for sublist in empty_ns for item in sublist] 166 | analysis = {"namespace_property": "empty_namespace", 167 | "empty_namespace_count": len(empty_ns), 168 | "empty_namespace_list": empyt_ns_list 169 | } 170 | 171 | if l: self.logger.info(json.dumps(analysis)) 172 | 173 | return [ data , pods, svc, deployments, ds, jobs, ingress ] 174 | 175 | def call_all(v, ns, l, logger): 176 | call = Namespace(logger) 177 | call.get_ns_data(v, ns, l) 178 | 179 | def main(): 180 | args = ArgParse.arg_parse() 181 | # args is [u, verbose, ns, l, format, silent] 182 | logger = Logger.get_logger(args.format, args.silent) 183 | if args: 184 | call_all(args.verbose, args.namespace, args.logging, logger) 185 | k8s.Output.time_taken(start_time) 186 | 187 | if __name__ == "__main__": 188 | try: 189 | main() 190 | except KeyboardInterrupt: 191 | print(k8s.Output.RED + "[ERROR] " \ 192 | + k8s.Output.RESET + 'Interrupted from keyboard!') 193 | try: 194 | sys.exit(0) 195 | except SystemExit: 196 | os._exit(0) -------------------------------------------------------------------------------- /objects/networks.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dguyhasnoname/k8s-cluster-checker/99f4175086bc42793da6bca6c4ef25a9f8252c16/objects/networks.py -------------------------------------------------------------------------------- /objects/nodes.py: -------------------------------------------------------------------------------- 1 | import sys, time, os, getopt, argparse, re 2 | start_time = time.time() 3 | from modules.main import ArgParse 4 | from modules.logging import Logger 5 | from modules import process as k8s 6 | from modules.get_nodes import K8sNodes 7 | 8 | class _Nodes: 9 | def __init__(self, logger): 10 | self.logger = logger 11 | 12 | def get_nodes_details(self, v, l): 13 | data = [] 14 | logger = self.logger 15 | k8s_object_list = K8sNodes.get_nodes(logger) 16 | headers = ['NODE_NAME', 'K8S_VERSION', 'ROLE', 'NODE_CPU', 'NODE_MEM_GB', \ 17 | 'VOLUMES_USED/ATTACHED', 'POD_CIDR', 'OS_NAME', 'DOCKER_VERSION', 'INSTANCE_TYPE', 'REGION'] 18 | for item in k8s_object_list.items: 19 | node_memory_gb = round((int(re.sub('\D', '', item.status.capacity['memory'])) / 1000000), 1) 20 | docker_version = item.status.node_info.container_runtime_version.rsplit('//', 1)[1] 21 | 22 | if 'kubernetes.io/role' in item.metadata.labels: 23 | tag = item.metadata.labels['kubernetes.io/role'] 24 | elif 'node.kubernetes.io/role' in item.metadata.labels: 25 | tag = item.metadata.labels['node.kubernetes.io/role'] 26 | elif 'node-role.kubernetes.io/master' in item.metadata.labels: 27 | tag = 'master' 28 | elif 'node-role.kubernetes.io/node' in item.metadata.labels: 29 | tag = 'node' 30 | elif 'node-role.kubernetes.io/etcd' in item.metadata.labels: 31 | tag = 'etcd' 32 | else: 33 | tag = 'others' 34 | if 'node.kubernetes.io/instance-type' in item.metadata.labels: 35 | instance_type = item.metadata.labels['node.kubernetes.io/instance-type'] 36 | else: 37 | instance_type = u'\u2717' 38 | if 'topology.kubernetes.io/region' in item.metadata.labels: 39 | region = item.metadata.labels['topology.kubernetes.io/region'] 40 | else: 41 | region = u'\u2717' 42 | if item.status.volumes_in_use: 43 | volumes_used = len(item.status.volumes_in_use) 44 | else: 45 | volumes_used = u'\u2717' 46 | volumes = "" 47 | if item.status.volumes_attached: 48 | volumes_attached = len(item.status.volumes_attached) 49 | volumes = str(volumes_used) + '/' + str(volumes_attached) 50 | else: 51 | volumes_attached = u'\u2717' 52 | volumes = u'\u2717' 53 | 54 | data.append([item.metadata.name, item.status.node_info.kubelet_version, \ 55 | tag, item.status.capacity['cpu'], \ 56 | node_memory_gb, volumes, item.spec.pod_cidr, item.status.node_info.os_image, \ 57 | docker_version, instance_type, region, volumes_used, volumes_attached]) 58 | 59 | k8s.Output.csv_out(data, headers, 'nodes', 'detail', '') 60 | json_out = k8s.Output.json_out(data, '', headers, 'nodes', 'detail', '') 61 | 62 | if l: logger.info(json_out) 63 | 64 | total_cpu, total_mem, masters, nodes, etcd, others, \ 65 | total_vol = 0, 0, 0, 0, 0, 0, 0 66 | for i in data: 67 | total_cpu += int(i[3]) 68 | total_mem += i[4] 69 | if i[2] == 'master': masters += 1 70 | if i[2] == 'node': nodes += 1 71 | if i[2] == 'etcd': etcd += 1 72 | if i[2] == 'others': others += 1 73 | if i[11] != u'\u2717': total_vol += i[11] 74 | 75 | total_nodes = 'total: ' + str(masters+nodes+etcd+others) 76 | node_types = 'masters: ' + str(masters) + "\n" + 'worker: ' \ 77 | + str(nodes) + "\n" + 'etcd: ' + str(etcd) + "\n" + \ 78 | "others: " + str(others) 79 | data = k8s.Output.append_hyphen(data, '---------') 80 | 81 | data.append([total_nodes, item.status.node_info.kubelet_version, \ 82 | node_types, total_cpu, f'{round(total_mem, 2)}GB', total_vol, u'\u2717', \ 83 | item.status.node_info.os_image, docker_version, u'\u2717', \ 84 | u'\u2717', '', '']) 85 | if v: 86 | k8s.Output.print_table(data, headers, v, l) 87 | else: 88 | # print summary of nodes from last line of data list 89 | for i in data[-1:]: 90 | short_data = [[i[2], i[1], i[3], i[4], i[7], i[8], i[5]]] 91 | short_data.append([''] * 7) 92 | short_data.append(['total: ' \ 93 | + str(masters+nodes+etcd+others), '', '', '', '', '', '']) 94 | headers = ['TOTAL_NODES', 'K8S_VERSION', 'TOTAL_CPU', \ 95 | 'TOTAL_MEM_GB', 'OS_NAME', 'DOCKER_VERSION', 'VOLUMES_IN_USE'] 96 | k8s.Output.print_table(short_data, headers, True, l) 97 | 98 | logger.info ("Checking for latest and installed versions.") 99 | data_version_check = k8s.Nodes.node_version_check(item.status.node_info.os_image, \ 100 | docker_version, item.status.node_info.kubelet_version, l, logger) 101 | 102 | if l: logger.info(data_version_check) 103 | 104 | def call_all(v, l, logger): 105 | call = _Nodes(logger) 106 | call.get_nodes_details(v, l) 107 | 108 | def main(): 109 | args = ArgParse.arg_parse() 110 | # args is [u, verbose, ns, l, format, silent] 111 | logger = Logger.get_logger(args.format, args.silent) 112 | if args: 113 | call_all(args.verbose, args.logging, logger) 114 | k8s.Output.time_taken(start_time) 115 | 116 | if __name__ == "__main__": 117 | try: 118 | main() 119 | except KeyboardInterrupt: 120 | print(k8s.Output.RED + "[ERROR] " \ 121 | + k8s.Output.RESET + 'Interrupted from keyboard!') 122 | try: 123 | sys.exit(0) 124 | except SystemExit: 125 | os._exit(0) -------------------------------------------------------------------------------- /objects/pods.py: -------------------------------------------------------------------------------- 1 | import time, os, argparse, sys 2 | start_time = time.time() 3 | from modules.main import ArgParse 4 | from modules.logging import Logger 5 | from modules import process as k8s 6 | from modules.get_pods import K8sPods 7 | 8 | class _Pods: 9 | def __init__(self, namespace, logger): 10 | self.logger = logger 11 | if not namespace: 12 | self.namespace = 'all' 13 | else: 14 | self.namespace = namespace 15 | self.k8s_object_list = K8sPods.get_pods(self.namespace, self.logger) 16 | self.k8s_object = 'pods' 17 | 18 | def get_namespaced_pod_list(self, v, l): 19 | data = [] 20 | headers = ['NAMESPACE', 'POD'] 21 | for item in self.k8s_object_list.items: 22 | data.append([item.metadata.namespace, item.metadata.name]) 23 | if v: print ("Total pods: {}".format(len(data))) 24 | k8s.Output.print_table(data, headers, v) 25 | return json.dumps(data) 26 | 27 | def check_pod_security(self, v, l): 28 | headers = ['NAMESPACE', 'POD', 'CONTAINER_NAME', 'PRIVILEGED_ESC', \ 29 | 'PRIVILEGED', 'READ_ONLY_FS', 'RUN_AS_NON_ROOT', 'RUNA_AS_USER'] 30 | data = k8s.Check.security_context(self.k8s_object, self.k8s_object_list, headers, \ 31 | v, self.namespace, l, self.logger) 32 | if l: self.logger.info(data) 33 | 34 | def check_pod_health_probes(self, v, l): 35 | headers = ['NAMESPACE', 'POD', 'CONTAINER_NAME', 'READINESS_PROPBE', \ 36 | 'LIVENESS_PROBE'] 37 | data = k8s.Check.health_probes(self.k8s_object, self.k8s_object_list, headers, \ 38 | v, self.namespace, l, self.logger) 39 | if l: self.logger.info(data) 40 | 41 | def check_pod_resources(self, v, l): 42 | headers = ['NAMESPACE', 'POD', 'CONTAINER_NAME', 'LIMITS', 'REQUESTS'] 43 | data = k8s.Check.resources(self.k8s_object, self.k8s_object_list, headers, \ 44 | v, self.namespace, l, self.logger) 45 | if l: self.logger.info(data) 46 | 47 | def check_pod_qos(self, v, l): 48 | headers = ['NAMESPACE', 'POD', 'QoS'] 49 | data = k8s.Check.qos(self.k8s_object, self.k8s_object_list, headers, \ 50 | v, self.namespace, l, self.logger) 51 | if l: self.logger.info(data) 52 | 53 | def check_pod_tolerations_affinity_node_selector_priority(self, v, l): 54 | headers = ['NAMESPACE', 'POD', 'NODE_SELECTOR', 'TOLERATIONS', \ 55 | 'AFFINITY', 'PRIORITY_CLASS'] 56 | data = k8s.Check.tolerations_affinity_node_selector_priority(self.k8s_object, \ 57 | self.k8s_object_list, headers, v, self.namespace, l, self.logger) 58 | if l: self.logger.info(data) 59 | 60 | def check_image_pull_policy(self, v, l): 61 | headers = ['DEPLOYMENT', 'CONTAINER_NAME', 'IMAGE', 'IMAGE_PULL_POLICY'] 62 | data = k8s.Check.image_pull_policy(self.k8s_object, self.k8s_object_list, headers, \ 63 | v, self.namespace, l, self.logger) 64 | if l: self.logger.info(data) 65 | 66 | def call_all(v, ns, l, logger): 67 | call = _Pods(ns, logger) 68 | call.check_pod_security(v, l) 69 | call.check_pod_health_probes(v, l) 70 | call.check_pod_resources(v, l) 71 | call.check_pod_qos(v, l) 72 | call.check_image_pull_policy(v, l) 73 | call.check_pod_tolerations_affinity_node_selector_priority(v, l) 74 | 75 | def main(): 76 | args = ArgParse.arg_parse() 77 | # args is [u, verbose, ns, l, format, silent] 78 | logger = Logger.get_logger(args.format, args.silent) 79 | if args: 80 | call_all(args.verbose, args.namespace, args.logging, logger) 81 | k8s.Output.time_taken(start_time) 82 | 83 | if __name__ == "__main__": 84 | try: 85 | main() 86 | except KeyboardInterrupt: 87 | print(k8s.Output.RED + "[ERROR] " \ 88 | + k8s.Output.RESET + 'Interrupted from keyboard!') 89 | try: 90 | sys.exit(0) 91 | except SystemExit: 92 | os._exit(0) -------------------------------------------------------------------------------- /objects/rbac.py: -------------------------------------------------------------------------------- 1 | import time, os, argparse, re, sys 2 | from concurrent.futures import ThreadPoolExecutor 3 | start_time = time.time() 4 | from modules.main import ArgParse 5 | from modules.logging import Logger 6 | from modules import process as k8s 7 | from modules.get_rbac import K8sClusterRole, K8sClusterRoleBinding, K8sNameSpaceRole, K8sNameSpaceRoleBinding 8 | 9 | class ClusterRBAC: 10 | def __init__(self, namespace, logger): 11 | self.namespace = namespace 12 | self.logger = logger 13 | if not self.namespace: self.namespace = 'all' 14 | # pulling rbac data in threads for fast execution 15 | 16 | with ThreadPoolExecutor(max_workers=5) as executor: 17 | tmp_cluster_role_list = \ 18 | executor.submit(K8sClusterRole.list_cluster_role, self.logger) 19 | tmp_cluster_role_binding_list = \ 20 | executor.submit(K8sClusterRoleBinding.list_cluster_role_binding, self.logger) 21 | tmp_ns_role_list = \ 22 | executor.submit(K8sNameSpaceRole.list_namespaced_role, self.namespace, self.logger) 23 | tmp_ns_role_binding_list = \ 24 | executor.submit(K8sNameSpaceRoleBinding.list_namespaced_role_binding, self.namespace, self.logger) 25 | 26 | self.cluster_role_list = tmp_cluster_role_list.result() 27 | self.cluster_role_binding_list = tmp_cluster_role_binding_list.result() 28 | self.ns_role_list = tmp_ns_role_list.result() 29 | self.ns_role_binding_list = tmp_ns_role_binding_list.result() 30 | 31 | def get_rbac_count(self, v, l): 32 | headers = ['CLUSTER_ROLE', 'CLUSTER_ROLE_BINDING', 'ROLE', \ 33 | 'ROLE_BINDING'] 34 | k8s.Output.print_table([[len(self.cluster_role_list.items), \ 35 | len(self.cluster_role_binding_list.items), len(self.ns_role_list.items), \ 36 | len(self.ns_role_binding_list.items)]], headers, True, l) 37 | 38 | def get_cluster_role(self, v, l): 39 | k8s_object = "clusterroles" 40 | data = [] 41 | headers = ['CLUSTER_ROLE', 'RULES', 'API_GROUPS', 'RESOURCES', 'VERBS'] 42 | 43 | for item in self.cluster_role_list.items: 44 | if item.rules: 45 | rules = k8s.Rbac.get_rules(item.rules) 46 | data.append([item.metadata.name, len(item.rules), \ 47 | rules[0], rules[1], rules[2]]) 48 | else: 49 | data.append([item.metadata.name, "-", "-", "-", "-"]) 50 | k8s.Rbac.analyse_role(data, headers, k8s_object, 'all', l, self.logger) 51 | data = k8s.Output.append_hyphen(data, '-----------') 52 | data.append(["Total: " + str(len(self.cluster_role_list.items)), \ 53 | rules[3], "-", "-", "-"]) 54 | k8s.Output.print_table(data, headers, v, l) 55 | 56 | def get_cluster_role_binding(self, v, l): 57 | data, rules_count = [], 0 58 | headers = ['CLUSTER_ROLE_BINDING', 'CLUSTER_ROLE', \ 59 | 'SERVICE_ACCOUNT', 'NAMESPACE'] 60 | 61 | for item in self.cluster_role_binding_list.items: 62 | if item.subjects: 63 | for i in item.subjects: 64 | data.append([item.metadata.name, item.role_ref.name, \ 65 | i.name, i.namespace]) 66 | else: 67 | data.append([item.metadata.name, item.role_ref.name, '', '']) 68 | data = k8s.Output.append_hyphen(data, '-----------') 69 | data.append(["Total: " + str(len(self.cluster_role_binding_list.items)), \ 70 | "-", "-", "-"]) 71 | k8s.Output.print_table(data, headers, v, l) 72 | k8s.Output.csv_out(data, headers, 'rbac', 'cluster_role_binding', 'all') 73 | json_data = k8s.Output.json_out(data[:-2], '', headers, 'rbac', \ 74 | 'cluster_role_binding', 'all') 75 | if l: self.logger.info(json_data) 76 | 77 | def get_ns_role(self, v, l): 78 | data = [] 79 | k8s_object = 'roles' 80 | headers = ['ROLE', 'NAMESPACE', 'RULES', 'API_GROUPS', 'RESOURCES', 'VERBS'] 81 | for item in self.ns_role_list.items: 82 | if item.rules: 83 | rules = k8s.Rbac.get_rules(item.rules) 84 | data.append([item.metadata.name, item.metadata.namespace, \ 85 | len(item.rules), rules[0], rules[1], rules[2]]) 86 | else: 87 | data.append([item.metadata.name, item.metadata.namespace, \ 88 | "-", "-", "-", "-"]) 89 | k8s.Rbac.analyse_role(data, headers, k8s_object, self.namespace, l, self.logger) 90 | data = k8s.Output.append_hyphen(data, '---------') 91 | data.append(["Total: " + str(len(self.ns_role_list.items)), \ 92 | "-", "-", "-", "-", "-"]) 93 | k8s.Output.print_table(data, headers, v, l) 94 | 95 | def get_ns_role_binding(self, v, l): 96 | data = [] 97 | headers = ['ROLE_BINDING', 'NAMESPACE', 'ROLE', 'GROUP_BINDING'] 98 | for item in self.ns_role_binding_list.items: 99 | if item.subjects: 100 | subjects = "" 101 | for i in item.subjects: 102 | if len(item.subjects) > 1: 103 | subjects = subjects + i.name + '\n' 104 | else: 105 | subjects = i.name 106 | data.append([item.metadata.name, item.metadata.namespace, \ 107 | item.role_ref.name, subjects]) 108 | else: 109 | data.append([item.metadata.name, item.metadata.namespace, \ 110 | item.role_ref.name, 'None']) 111 | data = k8s.Output.append_hyphen(data, '---------') 112 | data.append(["Total: " + str(len(self.ns_role_binding_list.items)), \ 113 | "-", "-", "-"]) 114 | k8s.Output.print_table(data, headers, v, l) 115 | k8s.Output.csv_out(data, headers, 'rbac', 'ns_role_binding', self.namespace) 116 | json_data = k8s.Output.json_out(data[:-2], '', headers, 'rbac', \ 117 | 'ns_role_binding', self.namespace) 118 | if l: self.logger.info(json_data) 119 | 120 | def call_all(v, namespace, l, logger): 121 | call = ClusterRBAC(namespace, logger) 122 | call.get_rbac_count(v, l) 123 | if not namespace: 124 | call.get_cluster_role(v, l) 125 | call.get_cluster_role_binding(v, l) 126 | call.get_ns_role(v, l) 127 | call.get_ns_role_binding(v, l) 128 | 129 | def main(): 130 | args = ArgParse.arg_parse() 131 | # args is [u, verbose, ns, l, format, silent] 132 | logger = Logger.get_logger(args.format, args.silent) 133 | if args: 134 | call_all(args.verbose, args.namespace, args.logging, logger) 135 | k8s.Output.time_taken(start_time) 136 | 137 | if __name__ == "__main__": 138 | try: 139 | main() 140 | except KeyboardInterrupt: 141 | print(k8s.Output.RED + "[ERROR] " + \ 142 | k8s.Output.RESET + 'Interrupted from keyboard!') 143 | try: 144 | sys.exit(0) 145 | except SystemExit: 146 | os._exit(0) -------------------------------------------------------------------------------- /objects/serviceaccounts.py: -------------------------------------------------------------------------------- 1 | import time, os, json, sys 2 | start_time = time.time() 3 | from modules.main import ArgParse 4 | from modules.logging import Logger 5 | from modules import process as k8s 6 | from modules.get_svc_acc import K8sSvcAcc 7 | 8 | class ServiceAccount: 9 | def __init__(self, namespace, logger): 10 | self.namespace = namespace 11 | self.logger = logger 12 | if not self.namespace: 13 | self.namespace = 'all' 14 | self.k8s_object_list = K8sSvcAcc.get_svc_acc(self.namespace) 15 | self.k8s_object = 'serviceaccount' 16 | 17 | def get_namespaced_sa_list(self, v, l): 18 | data = [] 19 | headers = ['NAMESPACE', 'SERVICE_ACCOUNT', 'SECRET'] 20 | for item in self.k8s_object_list.items: 21 | for j in item.secrets: 22 | data.append([item.metadata.namespace, item.metadata.name, j.name]) 23 | if v: print ("Total service accounts: {}".format(len(data))) 24 | k8s.Output.print_table(data, headers, True, l) 25 | return data 26 | 27 | def call_all(v, namespace, l, logger): 28 | call = ServiceAccount(namespace, logger) 29 | call.get_namespaced_sa_list(v, l) 30 | 31 | def main(): 32 | args = ArgParse.arg_parse() 33 | # args is [u, verbose, ns, l, format, silent] 34 | logger = Logger.get_logger(args.format, args.silent) 35 | if args: 36 | call_all(args.verbose, args.namespace, args.logging, logger) 37 | k8s.Output.time_taken(start_time) 38 | 39 | if __name__ == "__main__": 40 | try: 41 | main() 42 | except KeyboardInterrupt: 43 | print(k8s.Output.RED + "[ERROR] " \ 44 | + k8s.Output.RESET + 'Interrupted from keyboard!') 45 | try: 46 | sys.exit(0) 47 | except SystemExit: 48 | os._exit(0) -------------------------------------------------------------------------------- /objects/services.py: -------------------------------------------------------------------------------- 1 | import sys, time, os, getopt 2 | start_time = time.time() 3 | from modules.main import ArgParse 4 | from modules.logging import Logger 5 | from modules import process as k8s 6 | from modules.get_svc import K8sService 7 | 8 | class _Service: 9 | def __init__(self, namespace, logger): 10 | self.logger = logger 11 | if not namespace: 12 | self.namespace = 'all' 13 | else: 14 | self.namespace = namespace 15 | self.k8s_object_list = K8sService.get_svc(self.namespace, self.logger) 16 | self.k8s_object = 'services' 17 | 18 | def list_service(self, v, l): 19 | headers = ['NAMESPACE', 'SERVICE', 'SERVICE_TYPE', 'CLUSTER_IP', \ 20 | 'SELECTOR'] 21 | data = k8s.Service.get_service(self.k8s_object, self.k8s_object_list, headers, \ 22 | v, self.namespace, l, self.logger) 23 | 24 | data = k8s.Output.append_hyphen(data[1], '---------') 25 | data.append(["Total: " , len(data) - 1 , '-', '-', '-']) 26 | k8s.Output.print_table(data, headers, v, l) 27 | 28 | def call_all(v, namespace, l, logger): 29 | call = _Service(namespace, logger) 30 | call.list_service(v, l) 31 | 32 | def main(): 33 | args = ArgParse.arg_parse() 34 | # args is [u, verbose, ns, l, format, silent] 35 | logger = Logger.get_logger(args.format, args.silent) 36 | if args: 37 | call_all(args.verbose, args.namespace, args.logging, logger) 38 | k8s.Output.time_taken(start_time) 39 | 40 | if __name__ == "__main__": 41 | try: 42 | main() 43 | except KeyboardInterrupt: 44 | print(k8s.Output.RED + "[ERROR] " \ 45 | + k8s.Output.RESET + 'Interrupted from keyboard!') 46 | try: 47 | sys.exit(0) 48 | except SystemExit: 49 | os._exit(0) -------------------------------------------------------------------------------- /objects/statefulsets.py: -------------------------------------------------------------------------------- 1 | import sys, time, os, getopt 2 | start_time = time.time() 3 | from modules.main import ArgParse 4 | from modules.logging import Logger 5 | from modules import process as k8s 6 | from modules.get_sts import K8sStatefulSet 7 | 8 | class _Sts: 9 | def __init__(self, namespace, logger): 10 | self.namespace = namespace 11 | self.logger = logger 12 | if not self.namespace: 13 | self.namespace = 'all' 14 | self.k8s_object_list = K8sStatefulSet.get_sts(self.namespace, self.logger) 15 | self.k8s_object = 'statefulsets' 16 | 17 | def check_sts_security(self, v, l): 18 | headers = ['NAMESPACE', 'STATEFULSET', 'CONTAINER_NAME', 'PRIVILEGED_ESC', \ 19 | 'PRIVILEGED', 'READ_ONLY_FS', 'RUN_AS_NON_ROOT', 'RUNA_AS_USER'] 20 | data = k8s.Check.security_context(self.k8s_object, self.k8s_object_list, headers, \ 21 | v, self.namespace, l, self.logger) 22 | if l: self.logger.info(data) 23 | 24 | def check_sts_health_probes(self, v, l): 25 | headers = ['NAMESPACE', 'STATEFULSET', 'CONTAINER_NAME', \ 26 | 'READINESS_PROPBE', 'LIVENESS_PROBE'] 27 | data = k8s.Check.health_probes(self.k8s_object, self.k8s_object_list, headers, \ 28 | v, self.namespace, l, self.logger) 29 | if l: self.logger.info(data) 30 | 31 | def check_sts_resources(self, v, l): 32 | headers = ['NAMESPACE', 'STATEFULSET', 'CONTAINER_NAME', \ 33 | 'LIMITS', 'REQUESTS'] 34 | data = k8s.Check.resources(self.k8s_object, self.k8s_object_list, headers, \ 35 | v, self.namespace, l, self.logger) 36 | if l: self.logger.info(data) 37 | 38 | def check_sts_tolerations_affinity_node_selector_priority(self, v, l): 39 | headers = ['NAMESPACE', 'STATEFULSET', 'NODE_SELECTOR', \ 40 | 'TOLERATIONS', 'AFFINITY', 'PRIORITY_CLASS'] 41 | data = k8s.Check.tolerations_affinity_node_selector_priority(self.k8s_object, \ 42 | self.k8s_object_list, headers, v, self.namespace, l, self.logger) 43 | if l: self.logger.info(data) 44 | 45 | def call_all(v, ns, l, logger): 46 | call = _Sts(ns, logger) 47 | call.check_sts_security(v, l) 48 | call.check_sts_health_probes(v, l) 49 | call.check_sts_resources(v, l) 50 | call.check_sts_tolerations_affinity_node_selector_priority(v, l) 51 | 52 | def main(): 53 | args = ArgParse.arg_parse() 54 | # args is [u, verbose, ns, l, format, silent] 55 | logger = Logger.get_logger(args.format, args.silent) 56 | if args: 57 | call_all(args.verbose, args.namespace, args.logging, logger) 58 | k8s.Output.time_taken(start_time) 59 | 60 | if __name__ == "__main__": 61 | try: 62 | main() 63 | except KeyboardInterrupt: 64 | print(k8s.Output.RED + "[ERROR] " \ 65 | + k8s.Output.RESET + 'Interrupted from keyboard!') 66 | try: 67 | sys.exit(0) 68 | except SystemExit: 69 | os._exit(0) -------------------------------------------------------------------------------- /objects/test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import warnings 3 | from modules.logging import Logger 4 | 5 | 6 | logger = Logger.get_logger('', '') 7 | class TestK8sClusterChecker(unittest.TestCase): 8 | def test_cluster(self): 9 | import cluster as cluster 10 | warnings.filterwarnings("ignore", category=ResourceWarning, message="unclosed.*") 11 | self.assertRaises(TypeError, cluster.call_all(True, True, logger), True) 12 | with self.assertRaises(Exception) as x: 13 | print("Exception ignored: {}".format(x.exception)) 14 | def test_nodes(self): 15 | import nodes as nodes 16 | warnings.filterwarnings("ignore", category=ResourceWarning, message="unclosed.*") 17 | with self.assertRaises(Exception) as x: 18 | print("Exception ignored: {}".format(x.exception)) 19 | def test_namespace(self): 20 | import namespace as namespace 21 | warnings.filterwarnings("ignore", category=ResourceWarning, message="unclosed.*") 22 | self.assertRaises(TypeError, namespace.call_all(True, 'all', True, logger), True) 23 | with self.assertRaises(Exception) as x: 24 | print("Exception ignored: {}".format(x.exception)) 25 | def test_pods(self): 26 | import pods as pods 27 | warnings.filterwarnings("ignore", category=ResourceWarning, message="unclosed.*") 28 | self.assertRaises(TypeError, pods.call_all(True, 'all', True, logger), True) 29 | with self.assertRaises(Exception) as x: 30 | print("Exception ignored: {}".format(x.exception)) -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | pandas 2 | kubernetes 3 | argparse 4 | columnar 5 | click 6 | requests 7 | futures 8 | packaging 9 | xlsxwriter --------------------------------------------------------------------------------