├── .github ├── dependabot.yml ├── mlc_config.json └── workflows │ ├── codeql-analysis.yml │ ├── docs.yml │ ├── python.yml │ └── shellcheck.yml ├── .gitignore ├── AWS ├── README.md ├── aws_cspm_benchmark.py ├── requirements.txt └── setup.cfg ├── Azure ├── README.md ├── azure_cspm_benchmark.py ├── requirements.txt └── setup.cfg ├── CODE_OF_CONDUCT.md ├── DEVELOPMENT.md ├── GCP ├── README.md ├── gcp_cspm_benchmark.py ├── requirements.txt └── setup.cfg ├── LICENSE ├── README.md ├── benchmark.sh └── setup.cfg /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: 2 3 | updates: 4 | - package-ecosystem: pip 5 | directory: "/AWS" 6 | schedule: 7 | interval: weekly 8 | open-pull-requests-limit: 10 9 | - package-ecosystem: pip 10 | directory: "/Azure" 11 | schedule: 12 | interval: weekly 13 | open-pull-requests-limit: 10 14 | - package-ecosystem: github-actions 15 | directory: "/" 16 | schedule: 17 | interval: monthly 18 | open-pull-requests-limit: 10 19 | -------------------------------------------------------------------------------- /.github/mlc_config.json: -------------------------------------------------------------------------------- 1 | { 2 | } 3 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | # 7 | # ******** NOTE ******** 8 | # We have attempted to detect the languages in your repository. Please check 9 | # the `language` matrix defined below to confirm you have the correct set of 10 | # supported CodeQL languages. 11 | # 12 | name: "CodeQL" 13 | 14 | on: 15 | push: 16 | branches: [ main ] 17 | pull_request: 18 | # The branches below must be a subset of the branches above 19 | branches: [ main ] 20 | schedule: 21 | - cron: '30 7 * * 5' 22 | 23 | jobs: 24 | analyze: 25 | name: Analyze 26 | runs-on: ubuntu-latest 27 | permissions: 28 | actions: read 29 | contents: read 30 | security-events: write 31 | 32 | strategy: 33 | fail-fast: false 34 | matrix: 35 | language: [ 'python' ] 36 | 37 | steps: 38 | - name: Checkout repository 39 | uses: actions/checkout@v4 40 | 41 | # Initializes the CodeQL tools for scanning. 42 | - name: Initialize CodeQL 43 | uses: github/codeql-action/init@v3 44 | with: 45 | languages: ${{ matrix.language }} 46 | # If you wish to specify custom queries, you can do so here or in a config file. 47 | # By default, queries listed here will override any specified in a config file. 48 | # Prefix the list here with "+" to use these queries and those in the config file. 49 | # queries: ./path/to/local/query, your-org/your-repo/queries@main 50 | 51 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 52 | # If this step fails, then you should remove it and run the build manually (see below) 53 | - name: Autobuild 54 | uses: github/codeql-action/autobuild@v3 55 | 56 | # ℹ️ Command-line programs to run using the OS shell. 57 | # 📚 https://git.io/JvXDl 58 | 59 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines 60 | # and modify them (or add more) to build your code if your project 61 | # uses a compiled language 62 | 63 | #- run: | 64 | # make bootstrap 65 | # make release 66 | 67 | - name: Perform CodeQL Analysis 68 | uses: github/codeql-action/analyze@v3 69 | -------------------------------------------------------------------------------- /.github/workflows/docs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Docs 3 | on: 4 | push: 5 | branches: [ main ] 6 | pull_request: 7 | branches: [ main ] 8 | paths: 9 | - '**.md' 10 | jobs: 11 | markdown-link-check: 12 | name: Broken Links 13 | runs-on: ubuntu-latest 14 | steps: 15 | - name: Checkout 16 | uses: actions/checkout@v4 17 | with: 18 | submodules: recursive 19 | - name: Run link check 20 | uses: gaurav-nelson/github-action-markdown-link-check@v1 21 | with: 22 | use-quiet-mode: 'no' 23 | use-verbose-mode: 'yes' 24 | check-modified-files-only: 'yes' 25 | config-file: '.github/mlc_config.json' 26 | base-branch: main 27 | -------------------------------------------------------------------------------- /.github/workflows/python.yml: -------------------------------------------------------------------------------- 1 | name: Python Lint 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | pull_request: 7 | branches: [ main ] 8 | 9 | jobs: 10 | aws: 11 | runs-on: ubuntu-latest 12 | strategy: 13 | matrix: 14 | python-version: ['3.x'] 15 | 16 | steps: 17 | - uses: actions/checkout@v4 18 | - name: Set up Python ${{ matrix.python-version }} 19 | uses: actions/setup-python@v5 20 | with: 21 | python-version: ${{ matrix.python-version }} 22 | - name: Install dependencies 23 | run: | 24 | cd AWS 25 | python -m pip install -r requirements.txt 26 | pip install flake8 pylint bandit 27 | - name: Lint with flake8 28 | run: | 29 | cd AWS 30 | flake8 aws_cspm_benchmark.py 31 | - name: Lint with pylint 32 | run: | 33 | cd AWS 34 | pylint aws_cspm_benchmark.py 35 | - name: Lint with bandit 36 | run: | 37 | cd AWS 38 | bandit -l -i -r . 39 | 40 | azure: 41 | runs-on: ubuntu-latest 42 | strategy: 43 | matrix: 44 | python-version: ['3.x'] 45 | steps: 46 | - uses: actions/checkout@v4 47 | - name: Set up Python ${{ matrix.python-version }} 48 | uses: actions/setup-python@v5 49 | with: 50 | python-version: ${{ matrix.python-version }} 51 | - name: Install dependencies 52 | run: | 53 | cd Azure 54 | python -m pip install -r requirements.txt 55 | pip install flake8 pylint bandit 56 | - name: Lint with flake8 57 | run: | 58 | cd Azure 59 | flake8 azure_cspm_benchmark.py 60 | - name: Lint with pylint 61 | run: | 62 | cd Azure 63 | pylint azure_cspm_benchmark.py 64 | - name: Lint with bandit 65 | run: | 66 | cd Azure 67 | bandit -l -i -r . 68 | 69 | gcp: 70 | runs-on: ubuntu-latest 71 | strategy: 72 | matrix: 73 | python-version: ['3.x'] 74 | steps: 75 | - uses: actions/checkout@v4 76 | - name: Set up Python ${{ matrix.python-version }} 77 | uses: actions/setup-python@v5 78 | with: 79 | python-version: ${{ matrix.python-version }} 80 | - name: Install dependencies 81 | run: | 82 | cd GCP 83 | python -m pip install -r requirements.txt 84 | pip install flake8 pylint bandit 85 | - name: Lint with flake8 86 | run: | 87 | cd GCP 88 | flake8 gcp_cspm_benchmark.py 89 | - name: Lint with pylint 90 | run: | 91 | cd GCP 92 | pylint gcp_cspm_benchmark.py 93 | - name: Lint with bandit 94 | run: | 95 | cd GCP 96 | bandit -l -i -r . 97 | -------------------------------------------------------------------------------- /.github/workflows/shellcheck.yml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | branches: [ main ] 4 | pull_request: 5 | branches: [ main ] 6 | 7 | jobs: 8 | bash: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/checkout@v4 12 | - name: Run ShellCheck 13 | uses: ludeeus/action-shellcheck@master 14 | with: 15 | format: tty 16 | scandir: './' 17 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore everything in the cloud-benchmark directory 2 | cloud-benchmark/* 3 | 4 | # Byte-compiled / optimized / DLL files 5 | __pycache__/ 6 | *.py[cod] 7 | *$py.class 8 | 9 | # C extensions 10 | *.so 11 | 12 | # Distribution / packaging 13 | .Python 14 | build/ 15 | develop-eggs/ 16 | dist/ 17 | downloads/ 18 | eggs/ 19 | .eggs/ 20 | lib/ 21 | lib64/ 22 | parts/ 23 | sdist/ 24 | var/ 25 | wheels/ 26 | pip-wheel-metadata/ 27 | share/python-wheels/ 28 | *.egg-info/ 29 | .installed.cfg 30 | *.egg 31 | MANIFEST 32 | 33 | # PyInstaller 34 | # Usually these files are written by a python script from a template 35 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 36 | *.manifest 37 | *.spec 38 | 39 | # Installer logs 40 | pip-log.txt 41 | pip-delete-this-directory.txt 42 | 43 | # Unit test / coverage reports 44 | htmlcov/ 45 | .tox/ 46 | .nox/ 47 | .coverage 48 | .coverage.* 49 | .cache 50 | nosetests.xml 51 | coverage.xml 52 | *.cover 53 | *.py,cover 54 | .hypothesis/ 55 | .pytest_cache/ 56 | 57 | # Translations 58 | *.mo 59 | *.pot 60 | 61 | # Django stuff: 62 | *.log 63 | local_settings.py 64 | db.sqlite3 65 | db.sqlite3-journal 66 | 67 | # Flask stuff: 68 | instance/ 69 | .webassets-cache 70 | 71 | # Scrapy stuff: 72 | .scrapy 73 | 74 | # Sphinx documentation 75 | docs/_build/ 76 | 77 | # PyBuilder 78 | target/ 79 | 80 | # Jupyter Notebook 81 | .ipynb_checkpoints 82 | 83 | # IPython 84 | profile_default/ 85 | ipython_config.py 86 | 87 | # pyenv 88 | .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 98 | __pypackages__/ 99 | 100 | # Celery stuff 101 | celerybeat-schedule 102 | celerybeat.pid 103 | 104 | # SageMath parsed files 105 | *.sage.py 106 | 107 | # Environments 108 | .env 109 | .venv 110 | env/ 111 | venv/ 112 | ENV/ 113 | env.bak/ 114 | venv.bak/ 115 | 116 | # Spyder project settings 117 | .spyderproject 118 | .spyproject 119 | 120 | # Rope project settings 121 | .ropeproject 122 | 123 | # mkdocs documentation 124 | /site 125 | 126 | # mypy 127 | .mypy_cache/ 128 | .dmypy.json 129 | dmypy.json 130 | 131 | # Pyre type checker 132 | .pyre/ 133 | -------------------------------------------------------------------------------- /AWS/README.md: -------------------------------------------------------------------------------- 1 | # Cloud-Benchmark - AWS 2 | 3 | This script is a read-only utility that counts cloud resources in your AWS account. If you run this in your organization account, it will discover resources in all accounts in your organization. 4 | 5 | No changes will be made to your account. No data will be sent anywhere and will remain in your cloud shell environment. 6 | 7 | ## How it works 8 | This script can run against an individual AWS account or all child accounts in an AWS Organization. When running the script in CloudShell, it will establish the session using the AWS Identity currently signed in. When running the script in your local environment, it will establish the session based on your AWS CLI configuration. Please see [Local Environment Instructions](../README.md) for more details. If your AWS Identity is in the AWS Organization Management account, the script will use the default role `OrganizationAccountAccessRole` (or custom role if provided) to switch into each child account. If your AWS Identity is not in an AWS Organization Management account, the script will only process resources in this single account. Upon completion, a CSV report is generated with the findings. 9 | 10 | ## Reported Resources 11 | Reported Resources will include a count of each of the following resource types per AWS Region: 12 | 13 | | Resource | Description | 14 | | :--- | :--- | 15 | | Terminated VMs | Terminated EC2 Instances | 16 | | Running VMs | Running EC2 Instances | 17 | | Terminated Kubernetes Nodes | Terminated EKS Nodes | 18 | | Running Kubernetes Nodes | Running EKS Nodes | 19 | | Active EKS Fargate Profiles | Active EKS Fargate Profiles for each EKS Cluster. Excludes any existing Falcon Profiles eg. fp-falcon* | 20 | | ECS Service Fargate Tasks | DesiredCount of tasks in Active ECS Services. Excludes standalone tasks or tasks that are scheduled outside of Services | 21 | 22 | ## How to use 23 | 24 | ### Initialize execution environment 25 | 26 | Open AWS Cloud Shell ([overview](https://aws.amazon.com/cloudshell/), [documentation](https://docs.aws.amazon.com/cloudshell/latest/userguide/welcome.html)) using one of the direct links: 27 | 28 | | Region | Link | 29 | | :--- | :--- | 30 | | us-east-1 | **[Virginia, United States](https://us-east-1.console.aws.amazon.com/cloudshell/home?region=us-east-1)** | 31 | | us-east-2 | **[Ohio, United States](https://us-east-2.console.aws.amazon.com/cloudshell/home?region=us-east-2)** | 32 | | us-west-2 | **[Oregon, United States](https://us-west-2.console.aws.amazon.com/cloudshell/home?region=us-west-2)** | 33 | | eu-west-1 | **[Ireland](https://eu-west-1.console.aws.amazon.com/cloudshell/home?region=eu-west-1)** | 34 | | ap-northeast-1 | **[Tokyo, Japan](https://ap-northeast-1.console.aws.amazon.com/cloudshell/home?region=ap-northeast-1)** | 35 | 36 | ### Example 37 | 38 | ```shell 39 | curl https://raw.githubusercontent.com/CrowdStrike/cloud-resource-estimator/main/benchmark.sh | bash 40 | ``` 41 | 42 | ### Collect the findings 43 | 44 | ```shell 45 | cat ./cloud-benchmark/*benchmark.csv 46 | ``` 47 | 48 | ### Provide Custom IAM Role Name 49 | 50 | ```shell 51 | export AWS_ASSUME_ROLE_NAME="custom-role-name" 52 | ``` 53 | -------------------------------------------------------------------------------- /AWS/aws_cspm_benchmark.py: -------------------------------------------------------------------------------- 1 | """ 2 | aws-cspm-benchmark.py 3 | 4 | Assists with provisioning calculations by retrieving a count of 5 | all billable resources attached to an AWS account. 6 | """ 7 | import argparse 8 | import csv 9 | import boto3 10 | import botocore 11 | from tabulate import tabulate 12 | 13 | 14 | data = [] 15 | headers = { 16 | 'account_id': 'AWS Account ID', 17 | "region": "Region", 18 | "vms_terminated": "Terminated VMs", 19 | "vms_running": "Running VMs", 20 | 'kubenodes_terminated': "Terminated Kubernetes Nodes", 21 | 'kubenodes_running': "Running Kubernetes Nodes", 22 | 'fargate_profiles': "Active EKS Fargate Profiles", 23 | 'fargate_tasks': "ECS Service Fargate Tasks" 24 | } 25 | totals = { 26 | "region": "TOTAL", 27 | 'account_id': 'TOTAL', 28 | "vms_terminated": 0, 29 | "vms_running": 0, 30 | 'kubenodes_terminated': 0, 31 | 'kubenodes_running': 0, 32 | 'fargate_profiles': 0, 33 | 'fargate_tasks': 0 34 | } 35 | 36 | 37 | def parse_args(): 38 | parser = argparse.ArgumentParser( 39 | description="Analyze AWS accounts and regions for EC2 instances and Kubernetes nodes.") 40 | parser.add_argument( 41 | "-r", "--role_name", 42 | default="OrganizationAccountAccessRole", 43 | help="Specify a custom role name to assume into.") 44 | parser.add_argument( 45 | "-R", "--regions", 46 | help="Specify which AWS regions to analyze.") 47 | return parser.parse_args() 48 | 49 | 50 | class AWSOrgAccess: 51 | def __init__(self): 52 | self.master_session = boto3.session.Session() 53 | self.master_sts = self.master_session.client('sts') 54 | self.master_account_id = self.master_sts.get_caller_identity()["Account"] 55 | 56 | def accounts(self): 57 | try: 58 | client = boto3.client('organizations') 59 | response = client.list_accounts() 60 | accounts = response['Accounts'] 61 | next_token = response.get('NextToken', None) 62 | 63 | while next_token: 64 | response = client.list_accounts(NextToken=next_token) 65 | accounts += response['Accounts'] 66 | next_token = response.get('NextToken', None) 67 | 68 | # We only want accounts that are in ACTIVE state 69 | # Permissable values are: 'ACTIVE'|'SUSPENDED'|'PENDING_CLOSURE' 70 | active_accounts = [a for a in accounts if a['Status'] == 'ACTIVE'] 71 | 72 | return [self.aws_handle(a) for a in active_accounts if self.aws_handle(a)] 73 | except client.exceptions.AccessDeniedException: 74 | print("Cannot autodiscover adjacent accounts: cannot list accounts within the AWS organization") 75 | return [AWSHandle()] 76 | except client.exceptions.AWSOrganizationsNotInUseException: 77 | print("This account is not a member of an AWS Organization") 78 | return [AWSHandle()] 79 | 80 | def aws_handle(self, account): 81 | if account['Id'] == self.master_account_id: 82 | return AWSHandle(aws_session=self.master_session, account_id=self.master_account_id) 83 | 84 | # Check if new_session returns a session object 85 | session = self.new_session(account['Id']) 86 | if session: 87 | return AWSHandle(aws_session=session, account_id=account['Id']) 88 | 89 | return None 90 | 91 | def new_session(self, account_id): 92 | try: 93 | credentials = self.master_sts.assume_role( 94 | RoleArn=f'arn:aws:iam::{account_id}:role/{args.role_name}', 95 | RoleSessionName=account_id 96 | ) 97 | return boto3.session.Session( 98 | aws_access_key_id=credentials['Credentials']['AccessKeyId'], 99 | aws_secret_access_key=credentials['Credentials']['SecretAccessKey'], 100 | aws_session_token=credentials['Credentials']['SessionToken'], 101 | region_name='us-east-1' 102 | ) 103 | except self.master_sts.exceptions.ClientError as exc: 104 | # Print the error and continue. 105 | # Handle what to do with accounts that cannot be accessed 106 | # due to assuming role errors. 107 | print("Cannot access adjacent account: ", account_id, exc) 108 | return None 109 | 110 | 111 | class AWSHandle: 112 | EKS_TAGS = ['eks:cluster-name', 'alpha.eksctl.io/nodegroup-type', 'aws:eks:cluster-name', 'eks:nodegroup-name'] 113 | 114 | def __init__(self, aws_session=None, account_id=None): 115 | self.aws_session = aws_session if aws_session else boto3.session.Session() 116 | self.acc_id = account_id 117 | 118 | @property 119 | def regions(self): 120 | active_regions = [] 121 | response = self.ec2.describe_regions() 122 | active_regions = [region['RegionName'] for region in response['Regions']] 123 | return active_regions 124 | 125 | def ec2_instances(self, aws_region): 126 | client = self.aws_session.client('ec2', aws_region) 127 | 128 | response = client.describe_instances(MaxResults=1000) 129 | instances = response['Reservations'] 130 | next_token = response['NextToken'] if 'NextToken' in response else None 131 | 132 | while next_token: 133 | response = client.describe_instances(MaxResults=1000, NextToken=next_token) 134 | instances += response['Reservations'] 135 | next_token = response['NextToken'] if 'NextToken' in response else None 136 | 137 | return instances 138 | 139 | @property 140 | def ec2(self): 141 | return self.aws_session.client("ec2") 142 | 143 | @classmethod 144 | def is_vm_kubenode(cls, vm): 145 | return any(True for tag in vm.get('Tags', []) if tag['Key'] in cls.EKS_TAGS) 146 | 147 | @classmethod 148 | def is_vm_running(cls, vm): 149 | return vm['State']['Name'] != 'stopped' 150 | 151 | @property 152 | def account_id(self): 153 | if self.acc_id is None: 154 | sts = self.aws_session.client('sts') 155 | self.acc_id = sts.get_caller_identity()["Account"] 156 | 157 | return self.acc_id 158 | 159 | def fargate_profiles(self, aws_region): 160 | active_profiles = 0 161 | 162 | client = self.aws_session.client('eks', aws_region) 163 | 164 | response = client.list_clusters(maxResults=100) 165 | clusters = response['clusters'] 166 | next_token = response['NextToken'] if 'NextToken' in response else None 167 | 168 | while next_token: 169 | response = client.list_clusters(maxResults=100, NextToken=next_token) 170 | clusters += response['clusters'] 171 | next_token = response['NextToken'] if 'NextToken' in response else None 172 | 173 | for c in clusters: 174 | response = client.list_fargate_profiles(clusterName=c, maxResults=100) 175 | fargate_profiles = response['fargateProfileNames'] 176 | next_token = response['NextToken'] if 'NextToken' in response else None 177 | 178 | while next_token: 179 | response = client.list_fargate_profiles(clusterName=c, maxResults=100, NextToken=next_token) 180 | fargate_profiles += response['fargateProfileNames'] 181 | next_token = response['NextToken'] if 'NextToken' in response else None 182 | 183 | # Generate active_profiles from 'active' Fargate profiles in each EKS Cluster 184 | for p in fargate_profiles: 185 | if 'fp-falcon' not in p: 186 | response = client.describe_fargate_profile(clusterName=c, fargateProfileName=p) 187 | if 'ACTIVE' in response['fargateProfile']['status']: 188 | active_profiles += 1 189 | 190 | return active_profiles 191 | 192 | def fargate_tasks(self, aws_region): 193 | active_tasks = 0 194 | 195 | client = self.aws_session.client('ecs', aws_region) 196 | 197 | response = client.list_clusters(maxResults=100) 198 | cluster_arns = response['clusterArns'] 199 | next_token = response['NextToken'] if 'NextToken' in response else None 200 | 201 | while next_token: 202 | response = client.list_clusters(maxResults=100, NextToken=next_token) 203 | cluster_arns += response['clusterArns'] 204 | next_token = response['NextToken'] if 'NextToken' in response else None 205 | 206 | for c in cluster_arns: 207 | response = client.list_services(cluster=c, maxResults=100, launchType='FARGATE') 208 | service_arns = response['serviceArns'] 209 | next_token = response['NextToken'] if 'NextToken' in response else None 210 | 211 | while next_token: 212 | response = client.list_services(cluster=c, launchType='FARGATE') 213 | service_arns += response['serviceArns'] 214 | next_token = response['NextToken'] if 'NextToken' in response else None 215 | 216 | # Generate active_tasks from 'desiredCount' in each ECS Service definition 217 | for a in service_arns: 218 | response = client.describe_services(cluster=c, services=[a]) 219 | for s in response['services']: 220 | if 'ACTIVE' in s['status']: 221 | active_tasks += s['desiredCount'] 222 | 223 | return active_tasks 224 | 225 | 226 | args = parse_args() 227 | 228 | for aws in AWSOrgAccess().accounts(): # noqa: C901 229 | if args.regions: 230 | regions = [x.strip() for x in args.regions.split(',')] 231 | else: 232 | regions = aws.regions 233 | for RegionName in regions: 234 | 235 | # Setup the branch 236 | print(f"Processing {RegionName}") 237 | # Create the row for our output table 238 | row = {'account_id': aws.account_id, 'region': RegionName, 239 | 'vms_terminated': 0, 'vms_running': 0, 240 | 'kubenodes_terminated': 0, 'kubenodes_running': 0, 241 | 'fargate_profiles': 0, 'fargate_tasks': 0} 242 | 243 | # Count ec2 instances 244 | try: 245 | for reservation in aws.ec2_instances(RegionName): 246 | for instance in reservation['Instances']: 247 | typ = 'kubenode' if AWSHandle.is_vm_kubenode(instance) else 'vm' 248 | state = 'running' if AWSHandle.is_vm_running(instance) else 'terminated' 249 | key = f"{typ}s_{state}" 250 | row[key] += 1 251 | except botocore.exceptions.ClientError as e: 252 | print(e) 253 | try: 254 | # Count Fargate Profiles 255 | profile_count = aws.fargate_profiles(RegionName) 256 | key = "fargate_profiles" 257 | row[key] += profile_count 258 | except botocore.exceptions.ClientError as e: 259 | print(e) 260 | try: 261 | # Count Fargate Tasks 262 | task_count = aws.fargate_tasks(RegionName) 263 | key = "fargate_tasks" 264 | row[key] += task_count 265 | except botocore.exceptions.ClientError as e: 266 | print(e) 267 | 268 | for k in ['vms_terminated', 'vms_running', 'kubenodes_terminated', 269 | 'kubenodes_running', 'fargate_profiles', 'fargate_tasks']: 270 | totals[k] += row[k] 271 | 272 | # Add the row to our display table 273 | data.append(row) 274 | # Add in our grand totals to the display table 275 | data.append(totals) 276 | 277 | # Output our results 278 | print(tabulate(data, headers=headers, tablefmt="grid")) 279 | 280 | with open('aws-benchmark.csv', 'w', newline='', encoding='utf-8') as csv_file: 281 | csv_writer = csv.DictWriter(csv_file, fieldnames=headers.keys()) 282 | csv_writer.writeheader() 283 | csv_writer.writerows(data) 284 | 285 | print("\nCSV file stored in: ./aws-benchmark.csv\n\n") 286 | 287 | 288 | # .wwwwwwww. 289 | # .w" "WW" "w. 290 | # ." /\ /\ ". 291 | # |\ o o /| 292 | # \| ___\/___ |/ 293 | # / \ \_v__v_/ / \ 294 | # / | \________/ | \ 295 | # > \ WWWW / < 296 | # \ \ "" / / 297 | # \ \ / / 298 | # The Count says... 299 | # 300 | # That's ONE server, TWO servers ... AH AH AH! 301 | -------------------------------------------------------------------------------- /AWS/requirements.txt: -------------------------------------------------------------------------------- 1 | tabulate 2 | boto3 3 | botocore 4 | -------------------------------------------------------------------------------- /AWS/setup.cfg: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 120 3 | max-complexity = 10 4 | 5 | [pylint.MASTER] 6 | disable=C0301,C0116,C0115,C0103 -------------------------------------------------------------------------------- /Azure/README.md: -------------------------------------------------------------------------------- 1 | # Cloud-Benchmark - Azure 2 | 3 | This script is a read-only utility that counts cloud resources in your Azure account. 4 | No changes will be made to your account. No data will be sent anywhere and will remain in your cloud shell environment. 5 | 6 | ## How to use 7 | 8 | ### Initialize execution environment 9 | 10 | - Log-in with azure. Using the account that has read access to all your azure tenants/subscriptions 11 | - Navigate to [Azure Cloud Shell](https://shell.azure.com) and choose bash option 12 | 13 | ### Run the script 14 | 15 | ```shell 16 | curl https://raw.githubusercontent.com/CrowdStrike/cloud-resource-estimator/main/benchmark.sh | bash 17 | ``` 18 | 19 | ### Collect the findings 20 | 21 | ```shell 22 | cat ./cloud-benchmark/*benchmark.csv 23 | ``` 24 | -------------------------------------------------------------------------------- /Azure/azure_cspm_benchmark.py: -------------------------------------------------------------------------------- 1 | """ 2 | azure-cspm-benchmark.py 3 | 4 | Assists with provisioning calculations by retrieving a count 5 | of all billable resources attached to an Azure subscription. 6 | """ 7 | 8 | import csv 9 | import logging 10 | 11 | from functools import cached_property, lru_cache 12 | from azure.identity import AzureCliCredential 13 | from azure.mgmt.resource import ResourceManagementClient, SubscriptionClient 14 | from azure.mgmt.containerservice import ContainerServiceClient 15 | from azure.mgmt.compute import ComputeManagementClient 16 | from azure.mgmt.containerinstance import ContainerInstanceManagementClient 17 | import msrestazure.tools 18 | from tabulate import tabulate 19 | 20 | headers = { 21 | 'tenant_id': 'Azure Tenant ID', 22 | 'subscription_id': 'Azure Subscription ID', 23 | 'aks_nodes': 'Kubernetes Nodes', 24 | 'vms': 'Virtual Machines', 25 | 'aci_containers': 'Container Instances' 26 | } 27 | 28 | 29 | class AzureHandle: 30 | def __init__(self): 31 | # Acquire a credential object using CLI-based authentication. 32 | self.creds = AzureCliCredential() 33 | 34 | @cached_property 35 | def subscriptions(self): 36 | return list(self.subscription_client.subscriptions.list()) 37 | 38 | @property 39 | def tenants(self): 40 | return list(self.subscription_client.tenants.list()) 41 | 42 | def aci_resources(self, subscription_id): 43 | client = self.resource_client(subscription_id) 44 | return client.resources.list(filter="resourceType eq 'microsoft.containerinstance/containergroups'") 45 | 46 | def aks_resources(self, subscription_id): 47 | client = self.resource_client(subscription_id) 48 | return client.resources.list(filter="resourceType eq 'microsoft.containerservice/managedclusters'") 49 | 50 | def vmss_resources(self, subscription_id): 51 | client = self.resource_client(subscription_id) 52 | return client.resources.list(filter="resourceType eq 'Microsoft.Compute/virtualMachineScaleSets'") 53 | 54 | def vms_resources(self, subscription_id): 55 | client = self.resource_client(subscription_id) 56 | return client.resources.list(filter="resourceType eq 'Microsoft.Compute/virtualMachines'") 57 | 58 | def managed_clusters(self, subscription_id): 59 | return self.container_client(subscription_id).managed_clusters.list() 60 | 61 | def rhos_clusters(self, subscription_id): 62 | return self.container_client(subscription_id).open_shift_managed_clusters.list() 63 | 64 | def container_vmss(self, aks_resource): 65 | parsed_id = msrestazure.tools.parse_resource_id(aks_resource.id) 66 | client = self.container_client(parsed_id['subscription']) 67 | return client.agent_pools.list(resource_group_name=parsed_id['resource_group'], 68 | resource_name=parsed_id['resource_name']) 69 | 70 | def container_aci(self, aci_resource): 71 | parsed_id = msrestazure.tools.parse_resource_id(aci_resource.id) 72 | client = self.container_instance_client(parsed_id['subscription']) 73 | return client.container_groups.get(resource_group_name=parsed_id['resource_group'], 74 | container_group_name=parsed_id['resource_name']).containers 75 | 76 | def vms_inside_vmss(self, vmss_resource): 77 | parsed_id = msrestazure.tools.parse_resource_id(vmss_resource.id) 78 | client = ComputeManagementClient(self.creds, parsed_id['subscription']) 79 | return client.virtual_machine_scale_set_vms.list(resource_group_name=parsed_id['resource_group'], 80 | virtual_machine_scale_set_name=vmss_resource.name) 81 | 82 | @lru_cache 83 | def container_client(self, subscription_id): 84 | return ContainerServiceClient(self.creds, subscription_id) 85 | 86 | @lru_cache 87 | def container_instance_client(self, subscription_id): 88 | return ContainerInstanceManagementClient(self.creds, subscription_id) 89 | 90 | @lru_cache 91 | def resource_client(self, subscription_id): 92 | return ResourceManagementClient(self.creds, subscription_id) 93 | 94 | @cached_property 95 | def subscription_client(self): 96 | return SubscriptionClient(self.creds) 97 | 98 | 99 | LOG_LEVEL = logging.INFO 100 | LOG_LEVEL = logging.DEBUG 101 | log = logging.getLogger('azure') 102 | log.setLevel(LOG_LEVEL) 103 | ch = logging.StreamHandler() 104 | ch.setLevel(LOG_LEVEL) 105 | formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s', '%Y-%m-%d %H:%M:%S') 106 | ch.setFormatter(formatter) 107 | log.addHandler(ch) 108 | 109 | for mod in ['azure.identity._internal.decorators', 'azure.core.pipeline.policies.http_logging_policy']: 110 | logging.getLogger(mod).setLevel(logging.WARNING) 111 | 112 | 113 | data = [] 114 | totals = {'tenant_id': 'totals', 'subscription_id': 'totals', 'aks_nodes': 0, 'vms': 0, 'aci_containers': 0} 115 | az = AzureHandle() 116 | 117 | log.info("You have access to %d subscription(s) within %s tenant(s)", len(az.subscriptions), len(az.tenants)) 118 | for subscription in az.subscriptions: 119 | row = {'tenant_id': subscription.tenant_id, 'subscription_id': subscription.subscription_id, 120 | 'aks_nodes': 0, 'vms': 0, 'aci_containers': 0} 121 | log.info("Processing Azure subscription: %s (id=%s)", subscription.display_name, subscription.subscription_id) 122 | 123 | vmss_list = list(az.vmss_resources(subscription.subscription_id)) 124 | 125 | # (1) Process AKS 126 | for aks in az.aks_resources(subscription.subscription_id): 127 | for node_pool in az.container_vmss(aks): 128 | log.info("Identified node pool: '%s' within AKS: '%s' with %d node(s)", 129 | node_pool.name, aks.name, node_pool.count) 130 | row['aks_nodes'] += node_pool.count 131 | 132 | # (2) Process VMSS 133 | for vmss in az.vmss_resources(subscription.subscription_id): 134 | if vmss.tags is not None and 'aks-managed-createOperationID' in vmss.tags: 135 | # AKS resources already accounted for above 136 | continue 137 | 138 | vm_count = sum(1 for vm in az.vms_inside_vmss(vmss)) 139 | log.info("Identified %d vm resource(s) inside Scale Set: '%s'", vm_count, vmss.name) 140 | row['vms'] += vm_count 141 | 142 | # # (3) Process ACI 143 | for aci in az.aci_resources(subscription.subscription_id): 144 | container_count = sum(1 for container in az.container_aci(aci)) 145 | log.info("Identified %d container resource(s) inside Container Group: '%s'", container_count, aci.name) 146 | row['aci_containers'] += container_count 147 | 148 | # (4) Process VMs 149 | vm_count = sum((1 for vm in az.vms_resources(subscription.subscription_id))) 150 | log.info('Identified %d vm resource(s) outside of Scale Sets', vm_count) 151 | row['vms'] += vm_count 152 | data.append(row) 153 | 154 | totals['vms'] += row['vms'] 155 | totals['aks_nodes'] += row['aks_nodes'] 156 | totals['aci_containers'] += row['aci_containers'] 157 | 158 | data.append(totals) 159 | 160 | # Output our results 161 | print(tabulate(data, headers=headers, tablefmt="grid")) 162 | 163 | with open('az-benchmark.csv', 'w', newline='', encoding='utf-8') as csv_file: 164 | csv_writer = csv.DictWriter(csv_file, fieldnames=headers.keys()) 165 | csv_writer.writeheader() 166 | csv_writer.writerows(data) 167 | 168 | log.info("CSV summary has been exported to ./az-benchmark.csv file") 169 | -------------------------------------------------------------------------------- /Azure/requirements.txt: -------------------------------------------------------------------------------- 1 | # 2 | # This file is autogenerated by pip-compile 3 | # To update, run: 4 | # 5 | # pip-compile requirements.in 6 | # 7 | azure-identity 8 | azure-mgmt-resource 9 | azure-mgmt-containerservice 10 | azure-mgmt-compute 11 | azure-mgmt-containerinstance 12 | msrestazure 13 | pyjwt>=2.4.0 # not directly required, pinned by Snyk to avoid a vulnerability 14 | tabulate -------------------------------------------------------------------------------- /Azure/setup.cfg: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 120 3 | max-complexity = 10 4 | 5 | [pylint.MASTER] 6 | disable=C0301,C0116,C0115 -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Cloud Benchmark Community Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | We as members, contributors, and leaders pledge to make participation in our 6 | community a harassment-free experience for everyone, regardless of age, body 7 | size, visible or invisible disability, ethnicity, sex characteristics, gender 8 | identity and expression, level of experience, education, socio-economic status, 9 | nationality, personal appearance, race, religion, or sexual identity 10 | and orientation. 11 | 12 | We pledge to act and interact in ways that contribute to an open, welcoming, 13 | diverse, inclusive, and healthy community. 14 | 15 | ## Our Standards 16 | 17 | Examples of behavior that contributes to a positive environment for our 18 | community include: 19 | 20 | * Demonstrating empathy and kindness toward other people 21 | * Being respectful of differing opinions, viewpoints, and experiences 22 | * Giving and gracefully accepting constructive feedback 23 | * Accepting responsibility and apologizing to those affected by our mistakes, 24 | and learning from the experience 25 | * Focusing on what is best not just for us as individuals, but for the 26 | overall community 27 | 28 | Examples of unacceptable behavior include: 29 | 30 | * The use of sexualized language or imagery, and sexual attention or 31 | advances of any kind 32 | * Trolling, insulting or derogatory comments, and personal or political attacks 33 | * Public or private harassment 34 | * Publishing others' private information, such as a physical or email 35 | address, without their explicit permission 36 | * Other conduct which could reasonably be considered inappropriate in a 37 | professional setting 38 | 39 | ## Enforcement Responsibilities 40 | 41 | Community leaders are responsible for clarifying and enforcing our standards of 42 | acceptable behavior and will take appropriate and fair corrective action in 43 | response to any behavior that they deem inappropriate, threatening, offensive, 44 | or harmful. 45 | 46 | Community leaders have the right and responsibility to remove, edit, or reject 47 | comments, commits, code, wiki edits, issues, and other contributions that are 48 | not aligned to this Code of Conduct, and will communicate reasons for moderation 49 | decisions when appropriate. 50 | 51 | ## Scope 52 | 53 | This Code of Conduct applies within all community spaces, and also applies when 54 | an individual is officially representing the community in public spaces. 55 | Examples of representing our community include using an official e-mail address, 56 | posting via an official social media account, or acting as an appointed 57 | representative at an online or offline event. 58 | 59 | ## Enforcement 60 | 61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 62 | reported to the community leaders responsible for enforcement at 63 | CrowdStrike. 64 | All complaints will be reviewed and investigated promptly and fairly. 65 | 66 | All community leaders are obligated to respect the privacy and security of the 67 | reporter of any incident. 68 | 69 | ## Enforcement Guidelines 70 | 71 | Community leaders will follow these Community Impact Guidelines in determining 72 | the consequences for any action they deem in violation of this Code of Conduct: 73 | 74 | ### 1. Correction 75 | 76 | **Community Impact**: Use of inappropriate language or other behavior deemed 77 | unprofessional or unwelcome in the community. 78 | 79 | **Consequence**: A private, written warning from community leaders, providing 80 | clarity around the nature of the violation and an explanation of why the 81 | behavior was inappropriate. A public apology may be requested. 82 | 83 | ### 2. Warning 84 | 85 | **Community Impact**: A violation through a single incident or series 86 | of actions. 87 | 88 | **Consequence**: A warning with consequences for continued behavior. No 89 | interaction with the people involved, including unsolicited interaction with 90 | those enforcing the Code of Conduct, for a specified period of time. This 91 | includes avoiding interactions in community spaces as well as external channels 92 | like social media. Violating these terms may lead to a temporary or 93 | permanent ban. 94 | 95 | ### 3. Temporary Ban 96 | 97 | **Community Impact**: A serious violation of community standards, including 98 | sustained inappropriate behavior. 99 | 100 | **Consequence**: A temporary ban from any sort of interaction or public 101 | communication with the community for a specified period of time. No public or 102 | private interaction with the people involved, including unsolicited interaction 103 | with those enforcing the Code of Conduct, is allowed during this period. 104 | Violating these terms may lead to a permanent ban. 105 | 106 | ### 4. Permanent Ban 107 | 108 | **Community Impact**: Demonstrating a pattern of violation of community 109 | standards, including sustained inappropriate behavior, harassment of an 110 | individual, or aggression toward or disparagement of classes of individuals. 111 | 112 | **Consequence**: A permanent ban from any sort of public interaction within 113 | the community. 114 | 115 | ## Attribution 116 | 117 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 118 | version 2.0, available at 119 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. 120 | 121 | Community Impact Guidelines were inspired by [Mozilla's code of conduct 122 | enforcement ladder](https://github.com/mozilla/diversity). 123 | 124 | [homepage]: https://www.contributor-covenant.org 125 | 126 | For answers to common questions about this code of conduct, see the FAQ at 127 | https://www.contributor-covenant.org/faq. Translations are available at 128 | https://www.contributor-covenant.org/translations. -------------------------------------------------------------------------------- /DEVELOPMENT.md: -------------------------------------------------------------------------------- 1 | # Developer Guide 2 | 3 | This guide is intended to provide an overview of the CrowdStrike CWP / Horizon Benchmark Utilities project and explain how to contribute to the development of the benchmark scripts for AWS, Azure, and GCP. 4 | 5 | ## Project Overview 6 | 7 | The project aims to provide a set of scripts for auditing cloud resources across AWS, Azure, and GCP. The main `benchmark.sh` script handles argument parsing, checking for Python3 and pip installations, and running the appropriate benchmarking script for each supported cloud provider. The benchmarking scripts themselves are written in Python, and the main script downloads the necessary files and installs Python dependencies before running them. 8 | 9 | ## Directory Structure 10 | 11 | The project is structured as follows: 12 | 13 | ```terminal 14 | . 15 | ├── AWS 16 | │ ├── README.md 17 | │ ├── requirements.txt 18 | │ └── aws_cspm_benchmark.py 19 | ├── Azure 20 | │ ├── README.md 21 | │ ├── requirements.txt 22 | │ └── azure_cspm_benchmark.py 23 | ├── GCP 24 | │ ├── README.md 25 | │ ├── requirements.txt 26 | │ └── gcp_cspm_benchmark.py 27 | └── benchmark.sh 28 | ``` 29 | 30 | Each cloud provider has its own directory, containing a README file, a requirements.txt file for Python dependencies, and the corresponding benchmark script. 31 | 32 | ## Contributing to the Benchmark Scripts 33 | 34 | To contribute to the development of the benchmark scripts, follow these steps: 35 | 36 | 1. **Fork the repository**: Create a fork of the main repository on your GitHub account. 37 | 38 | 2. **Clone your fork**: Clone your fork of the repository to your local machine. 39 | 40 | 3. **Set up a virtual environment**: It's a good practice to set up a virtual environment for your development work. You can do this by running: 41 | 42 | ```shell 43 | python3 -m venv ./cloud-benchmark-dev 44 | source ./cloud-benchmark-dev/bin/activate 45 | ``` 46 | 47 | 4. **Install Python dependencies**: Install the necessary Python dependencies for the cloud provider you're working on: 48 | 49 | ```shell 50 | pip3 install -r path/to/provider/requirements.txt 51 | ``` 52 | 53 | 5. **Modify the benchmark script**: Make changes to the appropriate benchmark script (e.g., `aws_cspm_benchmark.py`, `azure_cspm_benchmark.py`, or `gcp_cspm_benchmark.py`) according to your contribution. 54 | 55 | 6. **Test your changes**: Run the modified benchmark script to test your changes and ensure they work as expected. 56 | 57 | 7. **Commit and push your changes**: Commit your changes to your fork and push them to your remote GitHub repository. 58 | 59 | 8. **Create a pull request**: Open a pull request to merge your changes into the main repository. 60 | 61 | ## Coding Guidelines 62 | 63 | When contributing to the benchmark scripts, keep these coding guidelines in mind: 64 | 65 | - Follow the [PEP 8 style guide](https://www.python.org/dev/peps/pep-0008/) for Python code. 66 | - Use meaningful variable and function names. 67 | - Include docstrings for functions and classes to explain their purpose and usage. 68 | - Keep functions small and focused on a single task. 69 | 70 | By following these guidelines and the contribution steps outlined above, you can help improve this project and make it more useful for everyone. 71 | -------------------------------------------------------------------------------- /GCP/README.md: -------------------------------------------------------------------------------- 1 | # Cloud-Benchmark - GCP 2 | 3 | This script is a read-only utility that counts cloud resources in your GCP account. It will autodiscover all GCP projects. 4 | 5 | No changes will be made to your account. No data will be sent anywhere and will remain in your cloud shell environment. 6 | 7 | ## How to use 8 | 9 | ### Initialize execution environment 10 | 11 | [![Open GCP Cloud Shell](https://gstatic.com/cloudssh/images/open-btn.svg)](https://shell.cloud.google.com/cloudshell/editor?cloudshell_git_repo=https%3A%2F%2Fgithub.com%2FCrowdStrike%2Fcloud-resource-estimator) 12 | 13 | ### Run the script 14 | 15 | ```shell 16 | ./benchmark.sh 17 | ``` 18 | 19 | ### Collect the findings 20 | 21 | ```shell 22 | cat ./cloud-benchmark/*benchmark.csv 23 | ``` 24 | -------------------------------------------------------------------------------- /GCP/gcp_cspm_benchmark.py: -------------------------------------------------------------------------------- 1 | """ 2 | gcp-cspm-benchmark.py 3 | 4 | Assists with provisioning calculations by retrieving a count 5 | of all billable resources attached to a GCP project. 6 | """ 7 | 8 | import csv 9 | import logging 10 | import os 11 | from functools import cached_property 12 | from typing import List, Dict, Any 13 | from tabulate import tabulate 14 | import google.api_core.exceptions 15 | from google.cloud.resourcemanager import ProjectsClient 16 | from google.cloud.resourcemanager_v3.types import Project 17 | from google.cloud import compute 18 | from googleapiclient import discovery 19 | from googleapiclient.errors import HttpError 20 | 21 | # Suppress gRPC and absl logs 22 | os.environ["GRPC_VERBOSITY"] = "ERROR" 23 | os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" 24 | 25 | # Configuration for logging 26 | LOG_LEVEL = logging.DEBUG 27 | log = logging.getLogger("gcp") 28 | log.setLevel(LOG_LEVEL) 29 | ch = logging.StreamHandler() 30 | ch.setLevel(LOG_LEVEL) 31 | formatter = logging.Formatter("%(asctime)s %(levelname)-8s %(message)s", "%Y-%m-%d %H:%M:%S") 32 | ch.setFormatter(formatter) 33 | log.addHandler(ch) 34 | 35 | 36 | class GCP: 37 | def projects(self) -> List[Project]: 38 | return ProjectsClient().search_projects() 39 | 40 | def list_instances(self, project_id: str): 41 | request = compute.AggregatedListInstancesRequest(max_results=50, project=project_id) 42 | return self.instances_client.aggregated_list(request=request) 43 | 44 | def clusters(self, project_id: str) -> List[Dict[str, Any]]: 45 | service = discovery.build("container", "v1") 46 | endpoint = service.projects().zones().clusters() # pylint: disable=no-member 47 | request = endpoint.list(projectId=project_id, zone="-") 48 | response = request.execute() 49 | return response.get("clusters", []) 50 | 51 | def list_cloud_run_services(self, project_id: str) -> List[Dict[str, Any]]: 52 | service = discovery.build("run", "v1") 53 | parent = f"projects/{project_id}/locations/-" 54 | request = service.projects().locations().services().list(parent=parent) # pylint: disable=no-member 55 | response = request.execute() 56 | return response.get("items", []) 57 | 58 | def list_cloud_run_jobs(self, project_id: str) -> List[Dict[str, Any]]: 59 | service = discovery.build("run", "v1") 60 | parent = f"namespaces/{project_id}" 61 | request = service.namespaces().jobs().list(parent=parent) # pylint: disable=no-member 62 | response = request.execute() 63 | return response.get("items", []) 64 | 65 | @cached_property 66 | def instances_client(self) -> compute.InstancesClient: 67 | return compute.InstancesClient() 68 | 69 | @classmethod 70 | def is_vm_kubenode(cls, instance: compute.Instance) -> bool: 71 | if any(k.key == "kubeconfig" for k in instance.metadata.items): 72 | return True 73 | 74 | if instance.labels: 75 | gke_indicators = ["goog-gke-node", "gke-cluster", "k8s-", "kubernetes"] 76 | for key, _ in instance.labels.items(): 77 | if any(indicator in key.lower() for indicator in gke_indicators): 78 | return True 79 | 80 | if instance.name and "gke-" in instance.name: 81 | return True 82 | 83 | return False 84 | 85 | @classmethod 86 | def is_vm_running(cls, instance: compute.Instance) -> bool: 87 | return instance.status != "TERMINATED" 88 | 89 | @classmethod 90 | def is_cluster_autopilot(cls, cluster: Dict[str, Any]) -> bool: 91 | return cluster.get("autopilot", {}).get("enabled", False) 92 | 93 | @classmethod 94 | def get_autopilot_active_nodes(cls, cluster: Dict[str, Any]) -> int: 95 | return cluster.get("currentNodeCount", 0) 96 | 97 | 98 | def process_gcp_project(gcp_project: Project) -> Dict[str, Any]: 99 | if gcp_project.state == Project.State.DELETE_REQUESTED: 100 | log.info("Skipping GCP project %s (project pending deletion)", gcp_project.display_name) 101 | return {} 102 | 103 | result = { 104 | "project_id": gcp_project.project_id, 105 | "kubenodes_running": 0, 106 | "kubenodes_terminated": 0, 107 | "vms_running": 0, 108 | "vms_terminated": 0, 109 | "autopilot_clusters": 0, 110 | "autopilot_nodes": 0, 111 | "cloud_run_services": 0, 112 | "cloud_run_jobs": 0, 113 | } 114 | 115 | log.info("Processing GCP project: %s", gcp_project.display_name) 116 | 117 | fail_safe(count_instances, gcp_project, result) 118 | fail_safe(count_autopilot_clusters, gcp_project, result) 119 | fail_safe(count_cloud_run_services, gcp_project, result) 120 | fail_safe(count_cloud_run_jobs, gcp_project, result) 121 | fail_safe(validate_and_adjust_kube_counts, gcp_project, result) 122 | 123 | return result 124 | 125 | 126 | def fail_safe(count_func, *args) -> None: 127 | try: 128 | count_func(*args) 129 | except google.api_core.exceptions.Forbidden as exc: 130 | if "Compute Engine API has not been used" in str(exc): 131 | log_warning("compute.googleapis.com", project.display_name) 132 | add_message(project.project_id, exc.errors[0]["message"]) 133 | else: 134 | log.error("Unexpected error for project: %s: %s", project.display_name, exc) 135 | except HttpError as exc: 136 | if exc.status_code == 403 and "SERVICE_DISABLED" in str(exc): 137 | log_warning(get_service_disabled_name(exc), project.display_name) 138 | add_message(project.project_id, exc.reason) 139 | else: 140 | log.error("Unexpected error for project: %s: %s", project.display_name, exc) 141 | except Exception as exc: # pylint: disable=broad-except 142 | log.error("Unexpected error for project: %s: %s", project.display_name, exc) 143 | 144 | 145 | def log_warning(api: str, project_name: str) -> None: 146 | api_names = { 147 | "compute.googleapis.com": "Compute Engine", 148 | "container.googleapis.com": "Kubernetes Engine", 149 | "run.googleapis.com": "Cloud Run", 150 | } 151 | message = f"Unable to process {api_names[api]} API for project: {project_name}." 152 | log.warning(message) 153 | 154 | 155 | def add_message(project_id: str, message: str) -> None: 156 | if project_id not in service_disabled_calls: 157 | service_disabled_calls[project_id] = [] 158 | service_disabled_calls[project_id].append(message) 159 | 160 | 161 | def get_service_disabled_name(exc: HttpError) -> str: 162 | for detail in exc.error_details: 163 | if detail.get("@type") == "type.googleapis.com/google.rpc.ErrorInfo": 164 | return detail["metadata"]["service"] 165 | return None 166 | 167 | 168 | def validate_and_adjust_kube_counts(gcp_project: Project, result: Dict[str, Any]) -> None: 169 | """Compare instance-detected kube nodes with GKE API reported nodes and adjust if needed.""" 170 | try: 171 | if gcp_project.project_id in service_disabled_calls: 172 | api_errors = service_disabled_calls[gcp_project.project_id] 173 | if any("container" in err.lower() for err in api_errors): 174 | message = ( 175 | f"Skipping validation for project {gcp_project.project_id} due to container API access issues" 176 | ) 177 | log.debug(message) 178 | return 179 | 180 | standard_node_count = 0 181 | for cluster in gcp.clusters(gcp_project.project_id): 182 | if not GCP.is_cluster_autopilot(cluster): 183 | standard_node_count += cluster.get("currentNodeCount", 0) 184 | 185 | detected_nodes = result["kubenodes_running"] 186 | 187 | if standard_node_count > detected_nodes: 188 | 189 | discrepancy = standard_node_count - detected_nodes 190 | message = ( 191 | f"Project {gcp_project.project_id}: GKE API reports {standard_node_count} nodes, " 192 | f"but only {detected_nodes} were detected via instance metadata. " 193 | f"Adjusting count to {standard_node_count} (added {discrepancy} nodes)" 194 | ) 195 | log.warning(message) 196 | 197 | result["kubenodes_running"] = standard_node_count 198 | 199 | except Exception as e: # pylint: disable=broad-except 200 | message = f"Error validating node counts for project {gcp_project.project_id}: {str(e)}" 201 | log.error(message) 202 | 203 | 204 | def count_autopilot_clusters(gcp_project: Project, result: Dict[str, int]): 205 | for cluster in gcp.clusters(gcp_project.project_id): 206 | if GCP.is_cluster_autopilot(cluster): 207 | result["autopilot_clusters"] += 1 208 | result["autopilot_nodes"] += GCP.get_autopilot_active_nodes(cluster) 209 | 210 | 211 | def count_instances(gcp_project: Project, result: Dict[str, int]): 212 | for _zone, response in gcp.list_instances(gcp_project.project_id): 213 | if response.instances: 214 | for instance in response.instances: 215 | typ = "kubenode" if GCP.is_vm_kubenode(instance) else "vm" 216 | state = "running" if GCP.is_vm_running(instance) else "terminated" 217 | key = f"{typ}s_{state}" 218 | result[key] += 1 219 | 220 | 221 | def count_cloud_run_services(gcp_project: Project, result: Dict[str, int]): 222 | services = gcp.list_cloud_run_services(gcp_project.project_id) 223 | result["cloud_run_services"] = len(services) 224 | 225 | 226 | def count_cloud_run_jobs(gcp_project: Project, result: Dict[str, int]): 227 | jobs = gcp.list_cloud_run_jobs(gcp_project.project_id) 228 | result["cloud_run_jobs"] = len(jobs) 229 | 230 | 231 | data = [] 232 | service_disabled_calls = {} 233 | headers = { 234 | "project_id": "Project ID", 235 | "kubenodes_running": "K8s Nodes (Running)", 236 | "kubenodes_terminated": "K8s Nodes (Terminated)", 237 | "vms_running": "VMs (Running)", 238 | "vms_terminated": "VMs (Terminated)", 239 | "autopilot_clusters": "Autopilot Clusters", 240 | "autopilot_nodes": "Autopilot Nodes (Running)", 241 | "cloud_run_services": "Cloud Run Services", 242 | "cloud_run_jobs": "Cloud Run Jobs", 243 | } 244 | totals = { 245 | "project_id": "totals", 246 | "kubenodes_running": 0, 247 | "kubenodes_terminated": 0, 248 | "vms_running": 0, 249 | "vms_terminated": 0, 250 | "autopilot_clusters": 0, 251 | "autopilot_nodes": 0, 252 | "cloud_run_services": 0, 253 | "cloud_run_jobs": 0, 254 | } 255 | 256 | gcp = GCP() 257 | 258 | projects = gcp.projects() 259 | if not projects: 260 | log.error("No GCP projects found") 261 | exit(1) # pylint: disable=consider-using-sys-exit 262 | 263 | for project in gcp.projects(): 264 | row = process_gcp_project(project) 265 | if row: 266 | data.append(row) 267 | for k in totals: 268 | if k != "project_id": 269 | totals[k] += row[k] 270 | 271 | data.append(totals) 272 | 273 | # Output our results 274 | print(tabulate(data, headers=headers, tablefmt="grid", maxheadercolwidths=[10, 15, 15, 10, 15, 15, 15, 15, 12])) 275 | 276 | with open("gcp-benchmark.csv", "w", newline="", encoding="utf-8") as csv_file: 277 | csv_writer = csv.DictWriter(csv_file, fieldnames=headers.keys()) 278 | csv_writer.writeheader() 279 | csv_writer.writerows(data) 280 | 281 | log.info("CSV file saved to: ./gcp-benchmark.csv") 282 | 283 | if service_disabled_calls: 284 | MSG = ( 285 | "Some API service calls were disabled in certain projects, preventing data processing. " 286 | "These APIs might be intentionally disabled in your environment. " 287 | "Details have been captured and saved to: ./gcp-exceptions.txt for your review." 288 | ) 289 | log.warning(MSG) 290 | 291 | with open("gcp-exceptions.txt", "w", encoding="utf-8") as f: 292 | for project, messages in service_disabled_calls.items(): 293 | f.write(f"Project ID: {project}\n") 294 | for msg in set(messages): 295 | f.write(f"- {msg}\n") 296 | f.write("\n") 297 | -------------------------------------------------------------------------------- /GCP/requirements.txt: -------------------------------------------------------------------------------- 1 | google-cloud-compute 2 | google-cloud-run 3 | google-cloud-resource-manager 4 | google-api-python-client 5 | oauth2client 6 | tabulate 7 | -------------------------------------------------------------------------------- /GCP/setup.cfg: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 120 3 | max-complexity = 10 4 | 5 | [pylint.MASTER] 6 | disable=C0301,C0116,C0115,C0114 -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | This is free and unencumbered software released into the public domain. 2 | 3 | Anyone is free to copy, modify, publish, use, compile, sell, or 4 | distribute this software, either in source code form or as a compiled 5 | binary, for any purpose, commercial or non-commercial, and by any 6 | means. 7 | 8 | In jurisdictions that recognize copyright laws, the author or authors 9 | of this software dedicate any and all copyright interest in the 10 | software to the public domain. We make this dedication for the benefit 11 | of the public at large and to the detriment of our heirs and 12 | successors. We intend this dedication to be an overt act of 13 | relinquishment in perpetuity of all present and future rights to this 14 | software under copyright law. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 17 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 18 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 19 | IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 | OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 | ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 | OTHER DEALINGS IN THE SOFTWARE. 23 | 24 | For more information, please refer to 25 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![CrowdStrike Falcon](https://raw.githubusercontent.com/CrowdStrike/falconpy/main/docs/asset/cs-logo.png)
2 | 3 | 4 | # CrowdStrike CWP / Horizon Benchmark Utilities 5 | 6 | These utilities have been developed to assist you in calculating the overall size of a cloud deployment. 7 | 8 | ## Running an audit 9 | 10 | The `benchmark.sh` entrypoint script helps you to perform sizing calculations for your cloud resources. It detects the cloud provider (AWS, Azure, or GCP) and downloads the necessary scripts to perform the calculation. You can also pass one or more cloud providers as arguments. 11 | 12 | ***Configuration:*** 13 | 14 | The script recognizes the following environmental variables: 15 | 16 | - `AWS_ASSUME_ROLE_NAME`: The name of the AWS role to assume (optional) 17 | - `AWS_REGIONS`: The name of the AWS Region or a comma-delimited list of AWS Regions to target (optional) 18 | 19 | To use, please export the variable in your environment prior to running the script: 20 | 21 | ```shell 22 | export AWS_ASSUME_ROLE_NAME="Example-Role-Name" 23 | export AWS_REGIONS="us-east-1,us-east-2" 24 | ``` 25 | 26 | ***Usage:*** 27 | 28 | ```shell 29 | ./benchmark.sh [aws|azure|gcp]... 30 | ``` 31 | 32 | Below are two different ways to execute the script. 33 | 34 | ### In Cloud Shell 35 | 36 | To execute the script in your environment using Cloud Shell, follow the appropriate guide based on your cloud provider: 37 | 38 | - [AWS](AWS/README.md) 39 | - [Azure](Azure/README.md) 40 | - [GCP](GCP/README.md) 41 | 42 | ### In your Local Environment 43 | 44 | For those who prefer to run the script locally, or would like to run the script against more than one cloud provider at a time, follow the instructions below: 45 | 46 | #### Requirements 47 | 48 | - Python 3 49 | - pip 50 | - curl 51 | - Approprate cloud provider CLI ([AWS](https://aws.amazon.com/cli/), [Azure](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli), [GCP](https://cloud.google.com/sdk/docs/install)) 52 | 53 | #### Steps 54 | 55 | 1. Download the script: 56 | 57 | ```shell 58 | curl -O https://raw.githubusercontent.com/CrowdStrike/cloud-resource-estimator/main/benchmark.sh 59 | ``` 60 | 61 | 1. Set execution permissions: 62 | 63 | ```shell 64 | chmod +x benchmark.sh 65 | ``` 66 | 67 | 1. Example: Run the script against AWS and Azure: 68 | 69 | ```shell 70 | ./benchmark.sh aws azure 71 | ``` 72 | 73 | --- 74 | 75 | **Alternatively, you can run the script directly from the URL:** 76 | 77 | - Run the script against AWS and Azure: 78 | 79 | ```shell 80 | curl https://raw.githubusercontent.com/CrowdStrike/cloud-resource-estimator/main/benchmark.sh | bash -s -- aws azure 81 | ``` 82 | 83 | - Run the script and let it determine the available cloud providers: 84 | 85 | ```shell 86 | curl https://raw.githubusercontent.com/CrowdStrike/cloud-resource-estimator/main/benchmark.sh | bash 87 | ``` 88 | 89 | ## Development 90 | 91 | Please review our [Developer Guide](DEVELOPMENT.md) for more information on how to contribute to this project. 92 | 93 | ## License 94 | 95 | These scripts are provided to the community, for free, under the Unlicense license. As such, these scripts 96 | carry no formal support, express or implied. 97 | 98 | ## Questions? 99 | 100 | Please review our [Code of Conduct](CODE_OF_CONDUCT.md) and then submit an issue or pull request. 101 | We will address the issue as quickly as possible. 102 | -------------------------------------------------------------------------------- /benchmark.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Universal cloud provider provisioning calculator 3 | # Based on the cloud provider, downloads the necessary scripts 4 | # to perform a sizing calculation. 5 | 6 | base_url=https://raw.githubusercontent.com/CrowdStrike/Cloud-Benchmark/main 7 | 8 | # Usage message 9 | usage() { 10 | echo """ 11 | Usage: $0 [aws|azure|gcp]... 12 | 13 | More than one cloud provider can be specified. 14 | If no cloud provider is specified, the script will attempt to detect the provider. 15 | ---------------------------------------------------------------------------------- 16 | 17 | The script recognizes the following environment variables: 18 | 19 | - AWS_ASSUME_ROLE_NAME: The name of the AWS role to assume (optional) 20 | - AWS_REGIONS: The name of the AWS Region to target or a comma-delimited list of AWS Regions to target (optional)""" 21 | } 22 | 23 | # Check if the system has Python3 and pip installed 24 | check_python3() { 25 | if ! type python3 >/dev/null 2>&1; then 26 | echo "Python3 not found. Please install Python3 and try again." 27 | exit 1 28 | fi 29 | if ! type pip3 >/dev/null 2>&1; then 30 | echo "Pip not found. Please install pip and try again." 31 | exit 1 32 | fi 33 | } 34 | 35 | # Ensures the provided cloud provider arg is valid 36 | is_valid_cloud() { 37 | local cloud="$1" 38 | local lower_cloud 39 | lower_cloud=$(echo "$cloud" | tr '[:upper:]' '[:lower:]') 40 | 41 | case "$lower_cloud" in 42 | aws) 43 | echo "AWS" 44 | return 0 45 | ;; 46 | azure) 47 | echo "Azure" 48 | return 0 49 | ;; 50 | gcp) 51 | echo "GCP" 52 | return 0 53 | ;; 54 | *) 55 | return 1 56 | ;; 57 | esac 58 | } 59 | 60 | # Calls the python script for the specified cloud provider with the 61 | # appropriate arguments 62 | call_benchmark_script() { 63 | local cloud="$1" 64 | local file="$2" 65 | local args=() 66 | 67 | case "$cloud" in 68 | AWS) 69 | [[ -n $AWS_ASSUME_ROLE_NAME ]] && args+=("-r" "$AWS_ASSUME_ROLE_NAME") 70 | [[ -n $AWS_REGIONS ]] && args+=("-R" "$AWS_REGIONS") 71 | # Below is how we would pass in additional arguments if needed 72 | # [[ -n $AWS_EXAMPLE ]] && args+=("-t" "$AWS_EXAMPLE") 73 | ;; 74 | Azure) 75 | ;; 76 | GCP) 77 | ;; 78 | *) 79 | echo "Invalid cloud provider specified: $cloud" 80 | usage 81 | exit 1 82 | ;; 83 | esac 84 | 85 | python3 "${file}" "${args[@]}" 86 | } 87 | 88 | audit() { 89 | CLOUD="$1" 90 | echo "Working in cloud: ${CLOUD}" 91 | cloud=$(echo "$CLOUD" | tr '[:upper:]' '[:lower:]') 92 | 93 | curl -s -o requirements.txt "${base_url}/${CLOUD}/requirements.txt" 94 | echo "Installing python dependencies for communicating with ${CLOUD} into (~/cloud-benchmark)" 95 | 96 | python3 -m pip install --disable-pip-version-check -qq -r requirements.txt 97 | file="${cloud}_cspm_benchmark.py" 98 | curl -s -o "${file}" "${base_url}/${CLOUD}/${file}" 99 | 100 | call_benchmark_script "$CLOUD" "${file}" 101 | } 102 | 103 | check_python3 104 | 105 | python3 -m venv ./cloud-benchmark 106 | pushd ./cloud-benchmark >/dev/null || exit 107 | # shellcheck source=/dev/null 108 | source ./bin/activate 109 | 110 | # MAIN ROUTINE 111 | found_provider=false 112 | 113 | # If arguments are provided, audit the specified providers 114 | for arg in "$@"; do 115 | result=$(is_valid_cloud "$arg") 116 | # shellcheck disable=SC2181 117 | if [ $? -eq 0 ]; then 118 | audit "$result" 119 | found_provider=true 120 | else 121 | echo "Invalid cloud provider specified: $arg" 122 | # Exit only if found_provider is false. This means that if the user 123 | # specifies a valid cloud provider, but also an invalid one, we will 124 | # still run the audit for the valid provider. 125 | if [ "$found_provider" = false ]; then 126 | usage 127 | popd >/dev/null && exit 1 128 | fi 129 | fi 130 | done 131 | 132 | # If no arguments provided, auto-detect the available cloud providers 133 | if [ $# -eq 0 ]; then 134 | echo "Determining cloud provider..." 135 | if type aws >/dev/null 2>&1; then 136 | audit "AWS" 137 | found_provider=true 138 | fi 139 | if type az >/dev/null 2>&1; then 140 | audit "Azure" 141 | found_provider=true 142 | fi 143 | 144 | if type gcloud >/dev/null 2>&1; then 145 | audit "GCP" 146 | found_provider=true 147 | fi 148 | fi 149 | 150 | if [ "$found_provider" = false ]; then 151 | echo "No supported cloud provider found." 152 | usage 153 | popd >/dev/null && exit 1 154 | fi 155 | 156 | popd >/dev/null || exit 157 | deactivate 158 | 159 | echo "Type following command to export cloud counts:" 160 | echo "cat ./cloud-benchmark/*benchmark.csv" 161 | 162 | # END 163 | # 164 | # -''--. 165 | # _`> `\.-'< 166 | # _.' _ '._ 167 | # .' _.=' '=._ '. 168 | # >_ / /_\ /_\ \ _< - jgs 169 | # / ( \o/\\o/ ) \ 170 | # >._\ .-,_)-. /_.< 171 | # /__/ \__\ 172 | # '---' E=mc^2 173 | # 174 | # 175 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | # General flake8, pylint settings 2 | [flake8] 3 | max-line-length = 120 4 | max-complexity = 10 5 | 6 | [pylint.MASTER] 7 | disable=C0301,C0116,C0115,C0114 8 | --------------------------------------------------------------------------------