├── .gitignore ├── README.md ├── outpost.py └── requirements.txt /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # Virtualenv 7 | bin/ 8 | pyvenv.cfg 9 | 10 | # C extensions 11 | *.so 12 | 13 | # Distribution / packaging 14 | .Python 15 | build/ 16 | develop-eggs/ 17 | dist/ 18 | downloads/ 19 | eggs/ 20 | .eggs/ 21 | lib/ 22 | lib64/ 23 | parts/ 24 | sdist/ 25 | var/ 26 | wheels/ 27 | share/python-wheels/ 28 | *.egg-info/ 29 | .installed.cfg 30 | *.egg 31 | MANIFEST 32 | 33 | # PyInstaller 34 | # Usually these files are written by a python script from a template 35 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 36 | *.manifest 37 | *.spec 38 | 39 | # Installer logs 40 | pip-log.txt 41 | pip-delete-this-directory.txt 42 | 43 | # Unit test / coverage reports 44 | htmlcov/ 45 | .tox/ 46 | .nox/ 47 | .coverage 48 | .coverage.* 49 | .cache 50 | nosetests.xml 51 | coverage.xml 52 | *.cover 53 | *.py,cover 54 | .hypothesis/ 55 | .pytest_cache/ 56 | cover/ 57 | 58 | # Translations 59 | *.mo 60 | *.pot 61 | 62 | # Django stuff: 63 | *.log 64 | local_settings.py 65 | db.sqlite3 66 | db.sqlite3-journal 67 | 68 | # Flask stuff: 69 | instance/ 70 | .webassets-cache 71 | 72 | # Scrapy stuff: 73 | .scrapy 74 | 75 | # Sphinx documentation 76 | docs/_build/ 77 | 78 | # PyBuilder 79 | .pybuilder/ 80 | target/ 81 | 82 | # Jupyter Notebook 83 | .ipynb_checkpoints 84 | 85 | # IPython 86 | profile_default/ 87 | ipython_config.py 88 | 89 | # pyenv 90 | # For a library or package, you might want to ignore these files since the code is 91 | # intended to run in multiple environments; otherwise, check them in: 92 | # .python-version 93 | 94 | # pipenv 95 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 96 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 97 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 98 | # install all needed dependencies. 99 | #Pipfile.lock 100 | 101 | # poetry 102 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 103 | # This is especially recommended for binary packages to ensure reproducibility, and is more 104 | # commonly ignored for libraries. 105 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 106 | #poetry.lock 107 | 108 | # pdm 109 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 110 | #pdm.lock 111 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 112 | # in version control. 113 | # https://pdm.fming.dev/#use-with-ide 114 | .pdm.toml 115 | 116 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 117 | __pypackages__/ 118 | 119 | # Celery stuff 120 | celerybeat-schedule 121 | celerybeat.pid 122 | 123 | # SageMath parsed files 124 | *.sage.py 125 | 126 | # Environments 127 | .env 128 | .venv 129 | env/ 130 | venv/ 131 | ENV/ 132 | env.bak/ 133 | venv.bak/ 134 | 135 | # Spyder project settings 136 | .spyderproject 137 | .spyproject 138 | 139 | # Rope project settings 140 | .ropeproject 141 | 142 | # mkdocs documentation 143 | /site 144 | 145 | # mypy 146 | .mypy_cache/ 147 | .dmypy.json 148 | dmypy.json 149 | 150 | # Pyre type checker 151 | .pyre/ 152 | 153 | # pytype static type analyzer 154 | .pytype/ 155 | 156 | # Cython debug symbols 157 | cython_debug/ 158 | 159 | # PyCharm 160 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 161 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 162 | # and can be added to the global gitignore or merged into this file. For a more nuclear 163 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 164 | #.idea/ 165 | # 166 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Outpost 2 | ## AWS Testing and Reporting Management 3 | 4 | Outpost is a simple tool to generate AWS configuration files for AssumeRole, a testing capability for verifying accounts work, and a report generator for ScoutSuite scan results. 5 | 6 | - Run ScoutSuite 7 | - Parse the results 8 | - ✨Generate Report Findings✨ 9 | 10 | ## Installation 11 | 12 | Outpost requires Python3 and boto3 to run. 13 | 14 | Install the dependencies. 15 | 16 | ```sh 17 | virtualenv -p python3 . 18 | source bin/activate 19 | pip install -r requirements.txt 20 | python outpost.py --help 21 | ``` 22 | 23 | ## Usage 24 | 25 | ```sh 26 | usage: outpost.py [-h] --command COMMAND [--config-creds] [--assume ASSUME] [--accounts ACCOUNTS] [--primary PRIMARY] [--token TOKEN] 27 | [--secret-key SECRET_KEY] [--access-key ACCESS_KEY] [--directory DIRECTORY] [--risk RISK] [--project PROJECT] 28 | 29 | optional arguments: 30 | -h, --help show this help message and exit 31 | --command COMMAND Commands: generate, report, testaccounts 32 | --config-creds Configure creds while using the generate command 33 | --assume ASSUME Optional assume role ARN, use ACCOUNT_ID for placeholder. (i.e. arn:aws:iam::ACCOUNT_ID:role/ROLENAME 34 | --accounts ACCOUNTS File containing account numbers (one account per line) 35 | --primary PRIMARY Primary account used to assume roles 36 | --token TOKEN Primary AWS session token 37 | --secret-key SECRET_KEY 38 | Primary AWS secret access key 39 | --access-key ACCESS_KEY 40 | Primary AWS access key 41 | --directory DIRECTORY 42 | Parent directory of ScoutSuite report(s) 43 | --risk RISK Select finding risk to report: danger, warning 44 | --project PROJECT Project name for report details 45 | ``` 46 | 47 | ## Example Usage 48 | Outpost will recursively go through all report folders in search of the `scoutsuite_results*.js` files for parsing. This means if the root folder for reports contains separate ScoutSuite reports, you should provide the root folder as the `--directory` flag. 49 | An example structure could be: 50 | ~/Engagements/CustomerName/reports/account1/ 51 | ~/Engagements/CustomerName/reports/account2/ 52 | ~/Engagements/CustomerName/reports/account3/ 53 | You would provide the following command to parse all 3 account reports: 54 | ```sh 55 | python outpost.py --command report --directory ~/Engagements/CustomerName/reports/ --project Customer_2022-123 56 | ``` 57 | 58 | ## Results 59 | The tool creates a folder called `data_archive` within the root folder of the ScoutSuite results. It will generate a file called `.txt` with the findings that will copy/paste directly into the report. Additionally, supplemental files for each finding will be created within the folder which outline the account and specific resources that are vulnerable. 60 | 61 | -------------------------------------------------------------------------------- /outpost.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | import os 4 | import re 5 | import boto3 6 | from pathlib import Path 7 | 8 | parser = argparse.ArgumentParser() 9 | parser.add_argument('--command', type=str, required=True, help='Commands: generate, report, testaccounts') 10 | parser.add_argument('--config-creds', action='store_true', help='Configure creds while using the generate command') 11 | 12 | # Generator 13 | parser.add_argument('--assume', type=str, help='Optional assume role ARN, use ACCOUNT_ID for placeholder. (i.e. arn:aws:iam::ACCOUNT_ID:role/ROLENAME') 14 | parser.add_argument('--accounts', type=str, help='File containing account numbers (one account per line)') 15 | parser.add_argument('--primary', default=None, type=str, help='Primary account used to assume roles') 16 | parser.add_argument('--token', default=None, type=str, help='Primary AWS session token') 17 | parser.add_argument('--secret-key', default=None, type=str, help='Primary AWS secret access key') 18 | parser.add_argument('--access-key', default=None, type=str, help='Primary AWS access key') 19 | 20 | # Reporter 21 | parser.add_argument('--directory', type=str, help='Parent directory of ScoutSuite report(s)') 22 | parser.add_argument('--risk', type=str, default='danger', help='Select finding risk to report: danger, warning') 23 | parser.add_argument('--project', type=str, default='aws-report', help='Project name for report details') 24 | 25 | 26 | def main(args): 27 | if args.command == 'report': 28 | #print('Reporting...') 29 | if args.directory is None: 30 | #print('Error...') 31 | parser.error('Error with --command report: requires --directory') 32 | 33 | #print('Trying...') 34 | findings = find_results_files(args.directory) 35 | #pretty_print_findings(findings) 36 | show_findings_by_risk(findings, args.risk, args.directory, args.project) 37 | save_finding_details(args.directory, findings, args.risk) 38 | 39 | elif args.command == 'testaccounts': 40 | 41 | if args.accounts is None: 42 | parser.error('Error with --command testaccounts: requires --accounts') 43 | 44 | accounts = load_accounts(args.accounts) 45 | test_accounts(accounts) 46 | 47 | elif args.command == 'generate': 48 | configure_creds_ready = False 49 | 50 | if args.assume is None or args.accounts is None or args.primary is None: 51 | parser.error('Error with --command generate: requires --assume, --accounts, --primary') 52 | 53 | if args.config_creds: 54 | if args.primary is None or args.secret_key is None or args.access_key is None: 55 | parser.error('Error with --config-creds: requires --primary, --access-key, and --secret-key') 56 | configure_creds_ready = True 57 | 58 | accounts = load_accounts(args.accounts) 59 | config = generate_config(args.assume, accounts, args.primary) 60 | 61 | print('Copy the following configuration entries into ~/.aws/config or %USERPROFILE%\.aws\config') 62 | print('----------------------------------------------------------------------------------------') 63 | for config_entry in config: 64 | entry = json.loads(config_entry) 65 | profile_name = entry['profile_name'] 66 | role_arn = entry['role_arn'] 67 | source_profile = entry['source_profile'] 68 | 69 | print( 70 | f'[profile {profile_name}]\n', 71 | 'output = json\n', 72 | 'cli_pager = \n', 73 | f'role_arn = {role_arn}\n', 74 | f'source_profile = {source_profile}\n' 75 | ) 76 | 77 | if configure_creds_ready: 78 | configure_creds(args.primary, args.access_key, args.secret_key, args.token) 79 | 80 | 81 | def load_accounts(account_file): 82 | with open(account_file,'r') as fh: 83 | return [x.strip() for x in fh.readlines()] 84 | 85 | 86 | def generate_config(arn, accounts, primary): 87 | aws_config = [] 88 | for account in accounts: 89 | new_arn = re.sub('account_id', account, arn, flags=re.IGNORECASE) 90 | 91 | config_entry = json.dumps( 92 | { 93 | 'profile_name': account, 94 | 'role_arn': new_arn, 95 | 'source_profile': primary 96 | } 97 | ) 98 | aws_config.append(config_entry) 99 | return aws_config 100 | 101 | 102 | def test_accounts(accounts): 103 | print(f'Testing accounts: {len(accounts)}') 104 | print('----------------------------------------------------------------------------------------') 105 | for account in accounts: 106 | try: 107 | session = boto3.Session(profile_name=account) 108 | sts = session.client('sts') 109 | identity = sts.get_caller_identity() 110 | 111 | user_id = identity['UserId'] 112 | arn = identity['Arn'] 113 | print(f'Account: {account} -> Status: VALID') 114 | except: 115 | print(f'Account: {account} -> Status: UNAUTHORIZED OR INVALID\n') 116 | 117 | 118 | def configure_creds(primary, access_key, secret_key, session_token=None): 119 | token = '' 120 | 121 | if session_token is not None: 122 | token = f'aws_session_token={session_token}' 123 | 124 | print('Copy the following credentials into ~/.aws/credentials or %USERPROFILE%\.aws\credentials') 125 | print('----------------------------------------------------------------------------------------') 126 | 127 | print( 128 | f'[{primary}]\n', 129 | 'output=json\n', 130 | 'cli_pager=\n', 131 | f'aws_access_key_id={access_key}\n', 132 | f'aws_secret_access_key={secret_key}\n', 133 | f'{token}\n' 134 | ) 135 | 136 | def friendly_finding_name(description): 137 | description = re.sub(r"[^a-zA-Z0-9 ]","",description) 138 | return description 139 | 140 | def file_finding_name(description): 141 | description = re.sub(r"[^a-zA-Z0-9 ]","", description) 142 | description = description.replace(' ','_').lower() 143 | return description 144 | 145 | def show_findings_by_risk(findings, risk, directory, project): 146 | append_report_details(directory, project, 'FSH:Amazon Web Services (AWS) Findings\n') 147 | 148 | for finding in findings: 149 | details = findings[finding] 150 | 151 | if details['level'].lower() == risk.lower(): 152 | accounts = details['accounts'] 153 | total_accounts = len(details['accounts']) 154 | description = details['description'] 155 | rationale = details['rationale'] 156 | service = details['service'] 157 | compliance = details['compliance'] 158 | remediation = details['remediation'] 159 | 160 | if not remediation: 161 | remediation = '{{ TODO Fill in recommendations }}' 162 | 163 | clean_finding = friendly_finding_name(description) 164 | file_finding = file_finding_name(description) 165 | finding = f'{clean_finding}' 166 | observation = f'Observation:\nWe observed that {total_accounts} active AWS account(s) had {service} resources with security issues pertaining to {description}.\n\n' 167 | observation += f'Details pertaining to the affected resources have been recorded in the data archive file named: {file_finding}.txt\n\n' 168 | for acct in accounts: 169 | observation += f'{acct}\n' 170 | 171 | discussion = f'Discussion:\n {rationale}\n{{ TODO Explain more information }}\n' 172 | recommendation = f'Recommendations:\nIn efforts to remediate these issues, we suggest:\n- {remediation}' 173 | 174 | finding_details = f'{finding}\n\n{observation}\n\n{discussion}\n\n{recommendation}\n\n' 175 | append_report_details(directory, project, f'SEVM:{finding_details}\n') 176 | 177 | print(finding_details) 178 | print('----------------------------------------------------------------------------') 179 | 180 | def pretty_print_findings(findings): 181 | print(json.dumps(findings, sort_keys=True, indent=4)) 182 | 183 | def pretty_findings(findings): 184 | return json.dumps(findings, sort_keys=True, indent=4) 185 | 186 | def check_finding(dict, key): 187 | if key in dict.keys(): 188 | return True 189 | else: 190 | return False 191 | 192 | def enum_finding(findings, include_value=True, recursive=True): 193 | stack = list(findings.items()) 194 | visited = set() 195 | while stack: 196 | k, v = stack.pop() 197 | if isinstance(v, dict) and recursive: 198 | if k not in visited: 199 | stack.extend(v.items()) 200 | else: 201 | if include_value: 202 | print(f'{k}: {v}') 203 | else: 204 | print(k) 205 | visited.add(k) 206 | 207 | def append_report_details(directory, project, details): 208 | save_folder = f'{directory}/data_archive' 209 | report_path = f'{save_folder}/{project}.txt' 210 | if not os.path.exists(save_folder): 211 | os.mkdir(save_folder) 212 | 213 | with open(report_path,'a+') as rf: 214 | rf.write(details) 215 | 216 | def save_finding_details(directory, findings, risk): 217 | save_folder = f'{directory}/data_archive' 218 | if not os.path.exists(save_folder): 219 | os.mkdir(save_folder) 220 | 221 | for finding in findings: 222 | details = findings[finding] 223 | 224 | if details['level'].lower() == risk.lower(): 225 | accounts = details['accounts'] 226 | description = details['description'] 227 | 228 | file_name = file_finding_name(description) 229 | with open(f'{save_folder}/{file_name}.txt', 'a+') as sf: 230 | for account_id in accounts: 231 | for resource_id in details['accounts'][account_id]['account_resources']: 232 | sf.write(f'{account_id},{resource_id}\n') 233 | 234 | def save_service_details(directory, account_id, service_name, service_details): 235 | save_folder = f'{directory}/services' 236 | if not os.path.exists(save_folder): 237 | os.mkdir(save_folder) 238 | 239 | with open(f'{save_folder}/{service_name}_{account_id}.json', 'w') as sf: 240 | sf.write(json.dumps(service_details)) 241 | 242 | def find_results_files(directory): 243 | all_results = [path for path in Path(directory).rglob('scoutsuite_results*.js')] 244 | all_findings = {} 245 | service_data_cats = {} 246 | 247 | for all_result in all_results: 248 | with open(all_result.resolve(), 'r') as rf: 249 | next(rf) 250 | json_results = json.load(rf) 251 | 252 | account_id = json_results['account_id'] 253 | provider_name = json_results['provider_name'] 254 | 255 | for service_name in json_results['services']: 256 | service_details = json_results['services'][service_name] 257 | 258 | if not service_name in service_data_cats.keys(): 259 | service_data_cats[service_name] = set() 260 | 261 | for service_detail in service_details: 262 | service_data_cats[service_name].add(service_detail) 263 | 264 | for finding_name in json_results['services'][service_name]['findings']: 265 | finding = json_results['services'][service_name]['findings'][finding_name] 266 | 267 | finding_path = finding.get('path', '') 268 | finding_rationale = finding.get('rationale', '') 269 | finding_remediation = finding.get('remediation', '') 270 | finding_service = finding.get('service', '') 271 | finding_checked_items = finding.get('checked_items', '') 272 | finding_compliance = finding.get('compliance', '') 273 | finding_description = finding.get('description', '') 274 | finding_flagged_items = finding.get('flagged_items', '') 275 | finding_items = finding.get('items', '') 276 | finding_level = finding.get('level', '') 277 | 278 | if not check_finding(all_findings, finding_description): 279 | if finding_flagged_items > 0: 280 | all_findings[finding_description] = {} 281 | all_findings[finding_description]['rationale'] = finding_rationale 282 | all_findings[finding_description]['remediation'] = finding_remediation 283 | all_findings[finding_description]['service'] = finding_service 284 | all_findings[finding_description]['compliance'] = finding_compliance 285 | all_findings[finding_description]['description'] = finding_description 286 | all_findings[finding_description]['level'] = finding_level 287 | all_findings[finding_description]['accounts'] = {} 288 | 289 | if finding_flagged_items > 0: 290 | all_findings[finding_description]['accounts'][account_id] = {} 291 | all_findings[finding_description]['accounts'][account_id]['account_resources'] = finding_items 292 | all_findings[finding_description]['accounts'][account_id]['checked_items'] = finding_checked_items 293 | all_findings[finding_description]['accounts'][account_id]['flagged_items'] = finding_flagged_items 294 | 295 | return all_findings 296 | 297 | 298 | if __name__ == '__main__': 299 | args = parser.parse_args() 300 | main(args) 301 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | boto3 2 | --------------------------------------------------------------------------------