├── .gitignore ├── INSTALL.md ├── LICENSE ├── Makefile ├── README.md ├── aws-inventory ├── Makefile ├── cloudformation │ └── Inventory-Template.yaml ├── lambda │ ├── Makefile │ ├── common.py │ ├── get_billing_data.py │ ├── html_templates │ │ ├── account_inventory.html │ │ ├── foreign_inventory.html │ │ └── vpc_inventory.html │ ├── inventory-accessanalyzer-analyzers.py │ ├── inventory-accessanalyzer-findings.py │ ├── inventory-ami.py │ ├── inventory-buckets.py │ ├── inventory-cft.py │ ├── inventory-client-vpn.py │ ├── inventory-cloudfront.py │ ├── inventory-cloudtrail.py │ ├── inventory-cw-alarm.py │ ├── inventory-dx.py │ ├── inventory-ebs-snapshot.py │ ├── inventory-ebs-volume.py │ ├── inventory-ecr.py │ ├── inventory-ecs.py │ ├── inventory-elb.py │ ├── inventory-eni.py │ ├── inventory-es.py │ ├── inventory-firehose.py │ ├── inventory-guardduty.py │ ├── inventory-health-report.py │ ├── inventory-iam.py │ ├── inventory-instances-sg.py │ ├── inventory-kms.py │ ├── inventory-lambdas.py │ ├── inventory-rds.py │ ├── inventory-redshift.py │ ├── inventory-route53.py │ ├── inventory-sagemaker.py │ ├── inventory-secrets.py │ ├── inventory-shield.py │ ├── inventory-ssm.py │ ├── inventory-support-cases.py │ ├── inventory-transit-gateway.py │ ├── inventory-trusted-advisor.py │ ├── inventory-vpc.py │ ├── inventory-waf.py │ ├── inventory-worklink.py │ ├── new_account_handler.py │ ├── pull_organization_data.py │ ├── report-accounts.py │ ├── report-foreign.py │ ├── report-unified-credential-report.py │ ├── report-vpcs.py │ ├── requirements.txt │ ├── setup.cfg │ └── trigger_account_actions.py └── test-events │ └── test_event.json ├── bin ├── ddb_backups.sh ├── promote.py ├── pull_errors.py ├── sync_resources.sh └── trigger_inventory.sh ├── cloudformation ├── antiope-Template.yaml ├── antiope-bucket-ImportTemplate.yaml └── antiope-bucket-Template.yaml ├── cognito ├── Makefile ├── cloudformation │ └── Cognito-Template.yaml └── post-deploy.sh ├── docs ├── AddingNewResources.md ├── AntiopeBucket.md ├── CONTRIBUTING.md ├── ConfigServiceNotes.md ├── Customizations.md ├── cloudformation │ └── SecurityCrossAccountRoleTemplate.yaml ├── resource_types.md └── sample-custom-stack │ ├── Makefile │ ├── cloudformation │ ├── COMPANY-Template.yaml │ └── PREFIX-ENV-COMPANY-customization-Manifest-SAMPLE.yaml │ └── lambda │ ├── Makefile │ ├── requirements.txt │ └── setup.cfg ├── get_bucket.sh ├── lambda_layer ├── Makefile ├── requirements.txt └── setup.cfg ├── requirements.txt └── search-cluster ├── Makefile ├── cloudformation └── SearchCluster-Template.yaml ├── lambda ├── Makefile ├── ingest_s3.py ├── requirements.txt └── setup.cfg ├── mappings-v7 ├── default.json ├── resources_cloudformation_stack.json ├── resources_cloudtrail_trail.json ├── resources_ec2_vpc.json ├── resources_lambda_function.json ├── resources_route53_hostedzone.json ├── resources_secretsmanager_secret.json └── resources_ssm_managedinstance.json ├── mappings ├── azure_resources_vm_instance.json ├── default.json ├── resources_cloudformation_stack.json ├── resources_cloudtrail_trail.json ├── resources_ec2_vpc.json ├── resources_lambda_function.json ├── resources_route53_hostedzone.json ├── resources_secretsmanager_secret.json └── resources_ssm_managedinstance.json └── scripts ├── Makefile ├── create_index.py ├── create_kibana_index.py ├── delete_es_index.py ├── es_snapshot.py ├── list_es_index.py ├── mappings.py ├── post-deploy.sh ├── reindex_resources.py └── setup.cfg /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Generic places I've shoved things from a scratch/perspective 3 | Scratch/ 4 | Notes.md 5 | 6 | # Build-time crud 7 | *.zip 8 | *-test-event.json 9 | *-config.json 10 | *dist-info 11 | 12 | # Deploy specific env vars passed to makefile. Not to be shared. 13 | config.* 14 | 15 | aws-inventory/lambda/antiope/* 16 | 17 | 18 | # Build-time crud for the search cluster lambda 19 | search-cluster/*/*dist-info 20 | search-cluster/*/*.zip 21 | search-cluster/*/urllib3* 22 | search-cluster/*/requests* 23 | search-cluster/*/lib/ 24 | search-cluster/*/chardet/ 25 | search-cluster/*/certifi/ 26 | search-cluster/*/idna/ 27 | search-cluster/*/bin/ 28 | search-cluster/*/*.rst 29 | search-cluster/*/LICENSE 30 | search-cluster/*/NOTICE 31 | search-cluster/*/elasticsearch5/ 32 | search-cluster/*/elasticsearch/ 33 | 34 | # Build-time crud for the inventory lambda 35 | lambda_layer/python/ 36 | 37 | # don't commit the config-files 38 | config-files/ 39 | 40 | # Manifests are inputs to Cloudformation. They contain deploy specific things (like payer accounts) 41 | *Manifest.yaml 42 | cloudformation/*Transformed*.yaml 43 | 44 | # Test events also contain sensitive datad 45 | *test-events/* 46 | 47 | # Do not want creds in github! 48 | azure_cred.json 49 | 50 | # Don't commit error reports 51 | *-Errors.html 52 | 53 | # Created by https://www.gitignore.io/api/osx,python 54 | 55 | ### OSX ### 56 | *.DS_Store 57 | .AppleDouble 58 | .LSOverride 59 | 60 | # Icon must end with two \r 61 | Icon 62 | 63 | # Thumbnails 64 | ._* 65 | 66 | # Files that might appear in the root of a volume 67 | .DocumentRevisions-V100 68 | .fseventsd 69 | .Spotlight-V100 70 | .TemporaryItems 71 | .Trashes 72 | .VolumeIcon.icns 73 | .com.apple.timemachine.donotpresent 74 | 75 | # Directories potentially created on remote AFP share 76 | .AppleDB 77 | .AppleDesktop 78 | Network Trash Folder 79 | Temporary Items 80 | .apdisk 81 | 82 | ### Python ### 83 | # Byte-compiled / optimized / DLL files 84 | __pycache__/ 85 | *.py[cod] 86 | *$py.class 87 | 88 | # C extensions 89 | *.so 90 | 91 | # Distribution / packaging 92 | .Python 93 | env/ 94 | build/ 95 | develop-eggs/ 96 | dist/ 97 | downloads/ 98 | eggs/ 99 | .eggs/ 100 | # lib/ 101 | lib64/ 102 | parts/ 103 | sdist/ 104 | var/ 105 | wheels/ 106 | *.egg-info/ 107 | .installed.cfg 108 | *.egg 109 | 110 | # PyInstaller 111 | # Usually these files are written by a python script from a template 112 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 113 | *.manifest 114 | *.spec 115 | 116 | # Installer logs 117 | pip-log.txt 118 | pip-delete-this-directory.txt 119 | 120 | # Unit test / coverage reports 121 | htmlcov/ 122 | .tox/ 123 | .coverage 124 | .coverage.* 125 | .cache 126 | nosetests.xml 127 | coverage.xml 128 | *,cover 129 | .hypothesis/ 130 | 131 | # Translations 132 | *.mo 133 | *.pot 134 | 135 | # Django stuff: 136 | *.log 137 | local_settings.py 138 | 139 | # Flask stuff: 140 | instance/ 141 | .webassets-cache 142 | 143 | # Scrapy stuff: 144 | .scrapy 145 | 146 | # Sphinx documentation 147 | docs/_build/ 148 | 149 | # PyBuilder 150 | target/ 151 | 152 | # Jupyter Notebook 153 | .ipynb_checkpoints 154 | 155 | # pyenv 156 | .python-version 157 | 158 | # celery beat schedule file 159 | celerybeat-schedule 160 | 161 | # SageMath parsed files 162 | *.sage.py 163 | 164 | # dotenv 165 | .env 166 | 167 | # virtualenv 168 | .venv 169 | venv/ 170 | ENV/ 171 | 172 | # Spyder project settings 173 | .spyderproject 174 | .spyproject 175 | 176 | # Rope project settings 177 | .ropeproject 178 | 179 | # mkdocs documentation 180 | /site 181 | 182 | 183 | # End of https://www.gitignore.io/api/osx,python 184 | 185 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Antiope 2 | AWS Inventory & Compliance Framework 3 | 4 | 5 | # Purpose 6 | Antiope (PRONO An-Tie-Oh-Pee) is intended to be an open sourced framework for managing resources across hundreds of AWS Accounts. From a trusted Security Account, Antiope will leverage Cross Account Assume Roles to gather up resource data and store them in an inventory bucket. This bucket can then be index by ELK or your SIEM of choice to provide easy searching of resources across hundreds of AWS accounts. 7 | 8 | ## What it current collects 9 | Antiope is given a list of AWS Organizational parent accounts, and will inventory all of the AWS accounts under those parents. For each of the parent & child accounts it will then gather: 10 | 11 | 1. S3 Buckets, and associated attributes of the bucket 12 | 1. VPCs, and the number of EC2 Instances in each VPC 13 | 1. Route53 Hosted Zones 14 | 1. Route53 Registered Domains 15 | 1. EC2 Instances 16 | 1. EC2 Security Groups 17 | 1. IAM Users 18 | 1. IAM Roles (and the AWS accounts that are trusted by the roles) 19 | 1. All Elastic Network Interfaces (ENIs) in each VPC, and any PublicIP addresses associated to the ENIs 20 | 21 | All resources are dropped as individual json files into the S3 Bucket of your choosing under `/Resources/-.json` 22 | 23 | ## What you can do with what it collects 24 | Right now, the primary function of the collection is to solve the needle-in-the-haystack problem. By aggregating all the resources across all accounts & regions into a single place finding resources like IP addresses becomes much easier. Antiope is also starting to track where inter-account trust is occurring and creating a record of accounts outside your organization that are trusted by one or more accounts in your organization. 25 | 26 | Finally, the elastic search cluster is being used as a data repository for threat hunting scripts to find things like open Elastic Search or de-referenced CloudFront origins. Those hunting scripts can be found in antiope/search-cluster/scripts/hunt-* 27 | 28 | 29 | # Modular Structure 30 | Antiope is somewhat modular in that there are currently three nested CloudFormation stacks that comprise the core functionality. These are intended to be used if needed and ignore if your enterprise has a better solution. The current three modules are: 31 | 32 | ## Cognito Stack 33 | This stack creates the UserPool and IDPool for Cognito authentication to the Kibana endpoint in the Search Cluster and as a way to authenticate access to the reports stored in the S3 bucket. It's a fairly bare-bones Cognito install and can be extended to leverage SAML federation with your enterprise identity store. 34 | 35 | ## AWS Inventory Stack 36 | This stack creates the Lambda, DynamoDB, StepFunctions, and associated glue required to collect the resource data across all of the accounts under all of your organizational parents. 37 | 38 | ## Search Cluster Stack 39 | This stack creates the (optional) Amazon Elastic Search cluster for searching the resource objects gathered by the inventory stack. This stack also creates the pipeline for SQS & Lambda to detect when new objects are added to the bucket and make sure those objects are indexed. 40 | 41 | ## GCP Inventory Stack 42 | Currently a work-in-progress, this stack replicates the aws-inventory stack functionality for GCP Projects. 43 | 44 | ## Compliance Stack 45 | This will be a future addition to Antiope where Turner open-sources the Cloud Security Scorecards we've built for creating executive and technical owner visibility into security issues in each account. 46 | 47 | ## Local Customizations 48 | Because the trigger if inventory and account detection is based on SNS Topics and state machines, it is easy to add your own enterprises customizations into the Antiope fold. We're just implementing this now so more details will be here soon. 49 | 50 | 51 | ## Structure of the Bucket 52 | 53 |
54 | 	/CredentialReports/ - Individual Credential Reports for all the accounts and combined reports to see them all as a single CSV
55 |     /Reports/ - Reports of AWS accounts generated by the inventory phase
56 |     /Resources/ - All the json files collected in the Inventory Phase
57 |     /Health/ - All the Personal Health Events
58 |     /PublicIPs/ - All the public IP address in your accounts.
59 |     /deploy-packages/ - location of the zip files hosting the lambda & cloudformation templates
60 |     /config-files/ - place to sync config files used to manage Antiope
61 | 
62 | 63 | ### Resource Prefix: 64 | Most resources use the normal resource prefix (vpc- for VPC, i- for Instances, etc). Where the unique identifier for the resource didn't have a prefix, or where the resource name can be duplicated across accounts, Antiope prepends a resource prefix. The following prefixes are inventoried: 65 | 66 | * bucket 67 | * domain - Domains Registered via Route53 Domains. Each domain is globally unique, so AWS accounts aren't part of the object key 68 | * hostedzone - Domains hosted in Route53. There can be multiple hosted zones with the same domain name, so the HostedZone ID is used 69 | * role - IAM Roles. These are not globally unique, so the account_id is part of the object name 70 | * user - IAM Users. These are not globally unique, so the account_id is part of the object name 71 | -------------------------------------------------------------------------------- /aws-inventory/Makefile: -------------------------------------------------------------------------------- 1 | 2 | ifndef env 3 | # $(error env is not set) 4 | env ?= dev 5 | endif 6 | 7 | include ../config-files/config.$(env) 8 | export 9 | 10 | # MAIN_STACK_NAME is custom to your deployment and should be the same for all Antiope Stacks 11 | ifndef MAIN_STACK_NAME 12 | $(error MAIN_STACK_NAME is not set) 13 | endif 14 | 15 | ifndef BUCKET 16 | $(error BUCKET is not set) 17 | endif 18 | 19 | ifndef version 20 | export version := $(shell date +%Y%b%d-%H%M) 21 | endif 22 | 23 | RESOURCE_PREFIX=$(MAIN_STACK_NAME)-aws-inventory 24 | 25 | # Name of the Zip file with all the function code and dependencies 26 | export LAMBDA_PACKAGE=$(RESOURCE_PREFIX)-lambda-$(version).zip 27 | 28 | # List of all the functions deployed by this stack. Required for "make update" to work. 29 | FUNCTIONS = $(RESOURCE_PREFIX)-pull-organization-data \ 30 | $(RESOURCE_PREFIX)-ia2-analyzer-inventory \ 31 | $(RESOURCE_PREFIX)-ia2-org-findings \ 32 | $(RESOURCE_PREFIX)-ami-inventory \ 33 | $(RESOURCE_PREFIX)-bucket-inventory \ 34 | $(RESOURCE_PREFIX)-cloudformation-inventory \ 35 | $(RESOURCE_PREFIX)-cloudfront-inventory \ 36 | $(RESOURCE_PREFIX)-cloudtrail-inventory \ 37 | $(RESOURCE_PREFIX)-cw-alarm-inventory \ 38 | $(RESOURCE_PREFIX)-client-vpn-inventory \ 39 | $(RESOURCE_PREFIX)-dx-inventory \ 40 | $(RESOURCE_PREFIX)-ebs-snap-inventory \ 41 | $(RESOURCE_PREFIX)-ebs-vol-inventory \ 42 | $(RESOURCE_PREFIX)-ecr-inventory \ 43 | $(RESOURCE_PREFIX)-ecs-inventory \ 44 | $(RESOURCE_PREFIX)-elb-inventory \ 45 | $(RESOURCE_PREFIX)-eni-inventory \ 46 | $(RESOURCE_PREFIX)-es-inventory \ 47 | $(RESOURCE_PREFIX)-firehose-inventory \ 48 | $(RESOURCE_PREFIX)-transit-gw-inventory \ 49 | $(RESOURCE_PREFIX)-guardduty-inventory \ 50 | $(RESOURCE_PREFIX)-health-inventory \ 51 | $(RESOURCE_PREFIX)-iam-inventory \ 52 | $(RESOURCE_PREFIX)-instances-sg-inventory \ 53 | $(RESOURCE_PREFIX)-kms-inventory \ 54 | $(RESOURCE_PREFIX)-lambdas-inventory \ 55 | $(RESOURCE_PREFIX)-rds-inventory \ 56 | $(RESOURCE_PREFIX)-route53-inventory \ 57 | $(RESOURCE_PREFIX)-secrets-inventory \ 58 | $(RESOURCE_PREFIX)-shield-inventory \ 59 | $(RESOURCE_PREFIX)-ssm-inventory \ 60 | $(RESOURCE_PREFIX)-sqs-inventory \ 61 | $(RESOURCE_PREFIX)-support-inventory \ 62 | $(RESOURCE_PREFIX)-ta-inventory \ 63 | $(RESOURCE_PREFIX)-vpc-inventory \ 64 | $(RESOURCE_PREFIX)-waf-inventory \ 65 | $(RESOURCE_PREFIX)-trigger-inventory \ 66 | $(RESOURCE_PREFIX)-get-billing-data \ 67 | $(RESOURCE_PREFIX)-create-account-report \ 68 | $(RESOURCE_PREFIX)-create-cred-report \ 69 | $(RESOURCE_PREFIX)-create-foreign-account-report \ 70 | $(RESOURCE_PREFIX)-create-vpc-report \ 71 | $(RESOURCE_PREFIX)-worklink-inventory \ 72 | $(RESOURCE_PREFIX)-new_account_handler 73 | 74 | .PHONY: $(FUNCTIONS) 75 | 76 | # 77 | # Lambda Targets 78 | # 79 | clean: 80 | cd lambda && $(MAKE) clean 81 | 82 | test: 83 | cd lambda && $(MAKE) test 84 | 85 | package: 86 | cd lambda && $(MAKE) package 87 | 88 | zipfile: 89 | cd lambda && $(MAKE) zipfile 90 | 91 | # Update the Lambda Code without modifying the CF Stack 92 | update: package $(FUNCTIONS) 93 | for f in $(FUNCTIONS) ; do \ 94 | aws lambda update-function-code --region $(AWS_DEFAULT_REGION) --function-name $$f --zip-file fileb://lambda/$(LAMBDA_PACKAGE) ; \ 95 | done 96 | 97 | # Update one specific function. Called as "make fupdate function=-aws-inventory-ecs-inventory" 98 | fupdate: zipfile 99 | aws lambda update-function-code --region $(AWS_DEFAULT_REGION) --function-name $(function) --zip-file fileb://lambda/$(LAMBDA_PACKAGE) ; \ 100 | 101 | # 102 | # Purging Targets 103 | # 104 | purge-tables: 105 | purge_ddb_table.py --table $(RESOURCE_PREFIX)-accounts --key_attribute account_id --force 106 | purge_ddb_table.py --table $(RESOURCE_PREFIX)-billing-data --key_attribute account_id --force 107 | purge_ddb_table.py --table $(RESOURCE_PREFIX)-vpc-inventory --key_attribute vpc_id --force 108 | 109 | purge-logs: 110 | for f in $(FUNCTIONS) ; do \ 111 | aws logs delete-log-group --log-group-name /aws/lambda/$$f ; \ 112 | done 113 | 114 | # 115 | # Post Install Targets 116 | # 117 | 118 | post-deploy: expire-logs 119 | 120 | expire-logs: 121 | for f in $(FUNCTIONS) ; do \ 122 | aws logs put-retention-policy --log-group-name /aws/lambda/$$f --retention-in-days 5 ; \ 123 | done 124 | -------------------------------------------------------------------------------- /aws-inventory/lambda/Makefile: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Static, not sure if needed?? 4 | PYTHON=python3 5 | PIP=pip3 6 | 7 | FILES = common.py\ 8 | get_billing_data.py \ 9 | inventory-accessanalyzer-analyzers.py \ 10 | inventory-accessanalyzer-findings.py \ 11 | inventory-ami.py \ 12 | inventory-buckets.py \ 13 | inventory-cft.py \ 14 | inventory-cloudfront.py \ 15 | inventory-cloudtrail.py \ 16 | inventory-client-vpn.py \ 17 | inventory-cw-alarm.py \ 18 | inventory-dx.py \ 19 | inventory-ebs-snapshot.py \ 20 | inventory-ebs-volume.py \ 21 | inventory-ecr.py \ 22 | inventory-ecs.py \ 23 | inventory-elb.py \ 24 | inventory-eni.py \ 25 | inventory-es.py \ 26 | inventory-firehose.py \ 27 | inventory-guardduty.py \ 28 | inventory-health-report.py \ 29 | inventory-iam.py \ 30 | inventory-instances-sg.py \ 31 | inventory-kms.py \ 32 | inventory-lambdas.py \ 33 | inventory-redshift.py \ 34 | inventory-rds.py \ 35 | inventory-route53.py \ 36 | inventory-sagemaker.py \ 37 | inventory-secrets.py \ 38 | inventory-shield.py \ 39 | inventory-ssm.py \ 40 | inventory-support-cases.py \ 41 | inventory-trusted-advisor.py \ 42 | inventory-vpc.py \ 43 | inventory-transit-gateway.py \ 44 | inventory-waf.py \ 45 | inventory-worklink.py \ 46 | new_account_handler.py \ 47 | pull_organization_data.py \ 48 | report-accounts.py \ 49 | report-foreign.py \ 50 | report-unified-credential-report.py \ 51 | report-vpcs.py \ 52 | trigger_account_actions.py 53 | 54 | DEPENDENCIES=antiope 55 | 56 | package: test clean deps zipfile 57 | 58 | # 59 | # Lambda function management 60 | # 61 | 62 | clean: 63 | rm -rf __pycache__ *.zip *.dist-info $(DEPENDENCIES) 64 | 65 | # # Create the package Zip. Assumes all tests were done 66 | zipfile: $(FILES) html_templates 67 | cp -a ../../antiope-aws-module/antiope . 68 | zip -r $(LAMBDA_PACKAGE) $^ $(DEPENDENCIES) 69 | 70 | test: $(FILES) 71 | for f in $^; do $(PYTHON) -m py_compile $$f; if [ $$? -ne 0 ] ; then echo "$$f FAILS" ; exit 1; fi done 72 | 73 | deps: 74 | $(PIP) install -r requirements.txt -t . --upgrade 75 | cp -a ../../antiope-aws-module/antiope . 76 | 77 | 78 | pep8: $(FILES) 79 | pycodestyle $^ 80 | -------------------------------------------------------------------------------- /aws-inventory/lambda/common.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import time 4 | import datetime 5 | from dateutil import tz 6 | import logging 7 | 8 | import boto3 9 | from botocore.exceptions import ClientError 10 | 11 | from antiope.aws_account import * 12 | from antiope.foreign_aws_account import * 13 | 14 | 15 | def parse_tags(tagset): 16 | """Convert the tagset as returned by AWS into a normal dict of {"tagkey": "tagvalue"}""" 17 | output = {} 18 | for tag in tagset: 19 | # aws is inconsistent with tags sometimes they use caps and sometimes not 20 | if 'Key' in tag: 21 | output[tag['Key']] = tag['Value'] 22 | if 'key' in tag: 23 | output[tag['key']] = tag['value'] 24 | return(output) 25 | 26 | 27 | def save_resource_to_s3(prefix, resource_id, resource): 28 | """Saves the resource to S3 in prefix with the object name of resource_id.json""" 29 | if "/" in resource_id: 30 | logger.error(f"{resource_id} contains a / character and cannot be safely stored in S3 under {prefix}") 31 | resource_id.replace("/", "-") 32 | 33 | s3client = boto3.client('s3') 34 | try: 35 | object_key = "Resources/{}/{}.json".format(prefix, resource_id) 36 | s3client.put_object( 37 | Body=json.dumps(resource, sort_keys=True, default=str, indent=2), 38 | Bucket=os.environ['INVENTORY_BUCKET'], 39 | ContentType='application/json', 40 | Key=object_key, 41 | ) 42 | except ClientError as e: 43 | logger.error("Unable to save object {}: {}".format(object_key, e)) 44 | 45 | 46 | def get_active_accounts(table_name=None): 47 | """Returns an array of all active AWS accounts as AWSAccount objects""" 48 | 49 | # Reuse an AntiopeConfig object to avoid breaking on the 1024 file limit in lambda 50 | antiope_config = AntiopeConfig() 51 | 52 | account_ids = get_account_ids(status="ACTIVE", table_name=table_name) 53 | output = [] 54 | for a in account_ids: 55 | output.append(AWSAccount(a, config=antiope_config)) 56 | return(output) 57 | 58 | 59 | def get_foreign_accounts(): 60 | """Returns an array of all active AWS accounts as AWSAccount objects""" 61 | foreign_account_ids = get_account_ids(status="FOREIGN") 62 | trusted_account_ids = get_account_ids(status="TRUSTED") 63 | output = [] 64 | for a in trusted_account_ids: 65 | output.append(ForeignAWSAccount(a)) 66 | for a in foreign_account_ids: 67 | output.append(ForeignAWSAccount(a)) 68 | return(output) 69 | 70 | 71 | def get_account_ids(status=None, table_name=None): 72 | """return an array of account_ids from the Accounts table. Optionally, filter by status""" 73 | dynamodb = boto3.resource('dynamodb') 74 | if table_name: 75 | account_table = dynamodb.Table(table_name) 76 | else: 77 | account_table = dynamodb.Table(os.environ['ACCOUNT_TABLE']) 78 | 79 | account_list = [] 80 | response = account_table.scan( 81 | AttributesToGet=['account_id', 'account_status'] 82 | ) 83 | while 'LastEvaluatedKey' in response: 84 | # Means that dynamoDB didn't return the full set, so ask for more. 85 | account_list = account_list + response['Items'] 86 | response = account_table.scan( 87 | AttributesToGet=['account_id', 'account_status'], 88 | ExclusiveStartKey=response['LastEvaluatedKey'] 89 | ) 90 | account_list = account_list + response['Items'] 91 | output = [] 92 | for a in account_list: 93 | if status is None: # Then we get everything 94 | output.append(a['account_id']) 95 | elif a['account_status'] == status: # this is what we asked for 96 | output.append(a['account_id']) 97 | # Otherwise, don't bother. 98 | return(output) 99 | 100 | 101 | def capture_error(event, context, error, message): 102 | '''When an exception is thrown, this function will publish a SQS message for later retrival''' 103 | sqs_client = boto3.client('sqs') 104 | 105 | queue_url = os.environ['ERROR_QUEUE'] 106 | 107 | body = { 108 | 'event': event, 109 | 'function_name': context.function_name, 110 | 'aws_request_id': context.aws_request_id, 111 | 'log_group_name': context.log_group_name, 112 | 'log_stream_name': context.log_stream_name, 113 | 'error': str(error), 114 | 'message': message 115 | } 116 | 117 | logger.info(f"Sending Lambda Exception Message: {body}") 118 | response = sqs_client.send_message(QueueUrl=queue_url, MessageBody=json.dumps(body)) 119 | return(body) 120 | 121 | 122 | def set_debug(event, logger): 123 | """Given the event, and using the environment, decide if the logger default should be overridden.""" 124 | if 'debug' in event and event['debug']: 125 | logger.setLevel(logging.DEBUG) 126 | 127 | if 'DEBUG' in os.environ and os.environ['DEBUG'] == "True": 128 | logger.setLevel(logging.DEBUG) 129 | return(logger) 130 | 131 | 132 | class LambdaRunningOutOfTime(Exception): 133 | '''raised by functions when the timeout is about to be hit''' 134 | -------------------------------------------------------------------------------- /aws-inventory/lambda/get_billing_data.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | from botocore.exceptions import ClientError 3 | 4 | import json 5 | import os 6 | import time 7 | import datetime 8 | from dateutil import tz 9 | 10 | from antiope.aws_account import * 11 | from common import * 12 | 13 | import logging 14 | logger = logging.getLogger() 15 | logger.setLevel(getattr(logging, os.getenv('LOG_LEVEL', default='INFO'))) 16 | logging.getLogger('botocore').setLevel(logging.WARNING) 17 | logging.getLogger('boto3').setLevel(logging.WARNING) 18 | logging.getLogger('urllib3').setLevel(logging.WARNING) 19 | 20 | 21 | # Lambda main routine 22 | def handler(event, context): 23 | set_debug(event, logger) 24 | 25 | logger.debug("Received event: " + json.dumps(event, sort_keys=True)) 26 | message = json.loads(event['Records'][0]['Sns']['Message']) 27 | logger.info("Received message: " + json.dumps(message, sort_keys=True)) 28 | 29 | try: 30 | dynamodb = boto3.resource('dynamodb') 31 | billing_table = dynamodb.Table(os.environ['BILLING_TABLE']) 32 | 33 | # We process the account we're told to via the SNS Message that invoked us. 34 | account_id = message['account_id'] 35 | target_account = AWSAccount(account_id) 36 | 37 | billing_data = get_current_spend(target_account) 38 | if billing_data is None: 39 | logger.error("No billing data returned for {}".format(account_id)) 40 | return(event) 41 | 42 | response = billing_table.put_item( 43 | Item={ 44 | 'account_id': target_account.account_id, 45 | 'datetime': str(billing_data['Timestamp']), 46 | 'estimated_charges': str(billing_data['Maximum']) 47 | } 48 | ) 49 | logger.info("Saved new est charges of {} for {}({})".format(str(billing_data['Maximum']), target_account.account_name, target_account.account_id)) 50 | 51 | return(event) 52 | except Exception as e: 53 | logger.error("{}\nMessage: {}\nContext: {}".format(e, message, vars(context))) 54 | raise 55 | # end handler() 56 | 57 | 58 | def get_current_spend(account): 59 | cwm_client = account.get_client('cloudwatch', region="us-east-1") 60 | 61 | try: 62 | response = cwm_client.get_metric_statistics( 63 | Namespace='AWS/Billing', 64 | MetricName='EstimatedCharges', 65 | Dimensions=[ 66 | { 67 | 'Name': 'Currency', 68 | 'Value': 'USD' 69 | }, 70 | ], 71 | StartTime=datetime.datetime.now() - datetime.timedelta(hours = 24), 72 | EndTime=datetime.datetime.now(), 73 | Period=21600, # 6 hours 74 | Statistics=['Maximum'], 75 | Unit='None' 76 | ) 77 | logger.debug(json.dumps(response, sort_keys=True, indent=2, default=str)) 78 | max_point = None 79 | for point in response['Datapoints']: 80 | if max_point is None: 81 | max_point = point 82 | continue 83 | if point['Maximum'] > max_point['Maximum']: 84 | # logger.info("{} is more than {}".format(point['Maximum'], max_point['Maximum'])) 85 | max_point = point 86 | return(max_point) 87 | except KeyError as e: 88 | logger.error("KeyError getting spend: {} -- Response: {}".format(e, response)) 89 | return(None) 90 | except IndexError as e: 91 | logger.error("IndexError getting spend: {} -- Response: {}".format(e, response)) 92 | return(None) 93 | except ClientError as e: 94 | logger.error("ClientError getting spend: {}".format(e)) 95 | return(None) 96 | -------------------------------------------------------------------------------- /aws-inventory/lambda/html_templates/account_inventory.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | AWS Account Inventory 4 | 5 | 6 | 7 |

AWS Account Inventory

8 | Total Active Accounts: ${account_count} 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | % for row in accounts: 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | %endfor 32 | 33 |
Account NameAccount IDParentRoot EmailStatusAssume Role Link
${row['account_name']}${row['account_id']}${row['payer_name']}${row['root_email']}${row['account_status']}${row['assume_role_link']}
34 | Page Generated on ${timestamp} 35 | -------------------------------------------------------------------------------- /aws-inventory/lambda/html_templates/foreign_inventory.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | Foreign AWS Account Inventory 4 | 5 | 6 | 7 |

Foreign AWS Account Inventory

8 | Total Active Accounts: ${account_count} 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | % for row in accounts: 21 | 22 | 23 | 24 | 25 | 26 | 27 | %endfor 28 | 29 |
Account NameAccount IDStatusAMI Source
${row['account_name']}${row['account_id']}${row['account_status']}${row['ami_source']}
30 | Page Generated on ${timestamp} 31 | -------------------------------------------------------------------------------- /aws-inventory/lambda/html_templates/vpc_inventory.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | VPC Inventory 5 | 6 | 7 | 8 |

VPC Inventory

9 | Total Active Accounts: ${account_count}
10 | Total Active VPCs: ${vpc_count} 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | % for row in vpcs: 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | %endfor 40 | 41 |
Account NameAccount IDVPC IDVPC NameRegionCIDR BlockRunning InstancesStopped InstancesLast Update Time
${row['account']['account_name']}${row['account']['account_id']}${row['vpc']['vpc_id']}${row['vpc']['name']}${row['vpc']['region']}${row['vpc']['cidr_block']}${row['vpc']['instance_states']['running']}${row['vpc']['instance_states']['stopped']}${row['vpc']['last_seen']}
42 | Page Generated on ${timestamp} UTC 43 | 44 | -------------------------------------------------------------------------------- /aws-inventory/lambda/inventory-accessanalyzer-analyzers.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | from botocore.exceptions import ClientError 3 | import json 4 | import os 5 | import time 6 | from datetime import datetime, timezone 7 | from dateutil import tz 8 | 9 | from antiope.aws_account import * 10 | from common import * 11 | 12 | import logging 13 | logger = logging.getLogger() 14 | logger.setLevel(getattr(logging, os.getenv('LOG_LEVEL', default='INFO'))) 15 | logging.getLogger('botocore').setLevel(logging.WARNING) 16 | logging.getLogger('boto3').setLevel(logging.WARNING) 17 | logging.getLogger('urllib3').setLevel(logging.WARNING) 18 | 19 | RESOURCE_PATH = "accessanalyzer/analyzer" 20 | RESOURCE_TYPE = "AWS::AccessAnalyzer::Analyzer" 21 | 22 | def lambda_handler(event, context): 23 | logger.debug("Received event: " + json.dumps(event, sort_keys=True)) 24 | message = json.loads(event['Records'][0]['Sns']['Message']) 25 | logger.info("Received message: " + json.dumps(message, sort_keys=True)) 26 | 27 | try: 28 | 29 | account_id = message['account_id'] 30 | target_account = AWSAccount(message['account_id']) 31 | 32 | regions = target_account.get_regions() 33 | if 'region' in message: 34 | regions = [message['region']] 35 | 36 | # describe ec2 instances 37 | for r in regions: 38 | client = target_account.get_client('accessanalyzer', region=r) 39 | analyzer = get_analyzer(target_account, client, r) 40 | 41 | 42 | except AntiopeAssumeRoleError as e: 43 | logger.error("Unable to assume role into account {}({})".format(target_account.account_name, target_account.account_id)) 44 | return() 45 | except AntiopeAssumeRoleError as e: 46 | logger.error("Unable to assume role into account {}({})".format(target_account.account_name, target_account.account_id)) 47 | return() 48 | except ClientError as e: 49 | if e.response['Error']['Code'] == 'AccessDeniedException' or e.response['Error']['Code'] == 'UnauthorizedOperation': 50 | logger.error(f"AccessDeniedException/UnauthorizedOperation for access-analyzer in {target_account.account_name}({target_account.account_id}): {e}") 51 | return() 52 | else: 53 | logger.critical("AWS Error getting info for {}: {}".format(account_id, e)) 54 | capture_error(message, context, e, "ClientError for {}: {}".format(account_id, e)) 55 | raise 56 | except Exception as e: 57 | logger.critical("{}\nMessage: {}\nContext: {}".format(e, message, vars(context))) 58 | capture_error(message, context, e, "General Exception for {}: {}".format(account_id, e)) 59 | raise 60 | 61 | 62 | def get_analyzer(target_account, client, region): 63 | analyzers = [] 64 | response = client.list_analyzers() 65 | while 'nextToken' in response: 66 | analyzers += response['analyzers'] 67 | response = client.list_analyzers(nextToken=response['nextToken']) 68 | analyzers += response['analyzers'] 69 | 70 | # There may be no analyzers in this region, so we return none 71 | if len(analyzers) == 0: 72 | return(None) 73 | 74 | for a in analyzers: 75 | resource_item = {} 76 | resource_item['awsAccountId'] = target_account.account_id 77 | resource_item['awsAccountName'] = target_account.account_name 78 | resource_item['resourceType'] = RESOURCE_TYPE 79 | resource_item['source'] = "Antiope" 80 | resource_item['configurationItemCaptureTime'] = str(datetime.datetime.now()) 81 | resource_item['awsRegion'] = region 82 | resource_item['configuration'] = a 83 | resource_item['supplementaryConfiguration'] = {} 84 | resource_item['resourceId'] = f"{target_account.account_id}-{region}-{a['name']}" 85 | resource_item['ARN'] = a['arn'] 86 | resource_item['errors'] = {} 87 | save_resource_to_s3(RESOURCE_PATH, resource_item['resourceId'], resource_item) 88 | 89 | # There can currently only be one analyzer. When that changes this part needs to be fixed 90 | return(analyzers[0]['arn']) # Arn is needed for getting findings 91 | 92 | -------------------------------------------------------------------------------- /aws-inventory/lambda/inventory-cft.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | from botocore.exceptions import ClientError 3 | 4 | import json 5 | import os 6 | import time 7 | from datetime import datetime, timezone 8 | from dateutil import tz 9 | 10 | from antiope.aws_account import * 11 | from common import * 12 | 13 | import logging 14 | logger = logging.getLogger() 15 | logger.setLevel(getattr(logging, os.getenv('LOG_LEVEL', default='INFO'))) 16 | logging.getLogger('botocore').setLevel(logging.WARNING) 17 | logging.getLogger('boto3').setLevel(logging.WARNING) 18 | logging.getLogger('urllib3').setLevel(logging.WARNING) 19 | 20 | 21 | RESOURCE_PATH = "cloudformation/stack" 22 | RESOURCE_TYPE = "AWS::CloudFormation::Stack" 23 | 24 | 25 | def lambda_handler(event, context): 26 | set_debug(event, logger) 27 | 28 | logger.debug("Received event: " + json.dumps(event, sort_keys=True)) 29 | message = json.loads(event['Records'][0]['Sns']['Message']) 30 | logger.info("Received message: " + json.dumps(message, sort_keys=True)) 31 | 32 | last_run_time = datetime.datetime.now(timezone.utc) - datetime.timedelta(minutes=35) # FIXME from being hardcoded time 33 | 34 | try: 35 | target_account = AWSAccount(message['account_id']) 36 | 37 | regions = target_account.get_regions() 38 | if 'region' in message: 39 | regions = [message['region']] 40 | 41 | for r in regions: 42 | cf_client = target_account.get_client('cloudformation', region=r) 43 | response = cf_client.describe_stacks() 44 | while 'NextToken' in response: 45 | process_stacks(target_account, cf_client, r, response['Stacks'], last_run_time) 46 | response = cf_client.describe_stacks(NextToken=response['NextToken']) 47 | process_stacks(target_account, cf_client, r, response['Stacks'], last_run_time) 48 | 49 | except AntiopeAssumeRoleError as e: 50 | logger.error("Unable to assume role into account {}({})".format(target_account.account_name, target_account.account_id)) 51 | return() 52 | except ClientError as e: 53 | logger.critical("AWS Error getting info for {}: {}".format(message['account_id'], e)) 54 | capture_error(message, context, e, "ClientError for {}: {}".format(message['account_id'], e)) 55 | raise 56 | except Exception as e: 57 | logger.critical("{}\nMessage: {}\nContext: {}".format(e, message, vars(context))) 58 | capture_error(message, context, e, "General Exception for {}: {}".format(message['account_id'], e)) 59 | raise 60 | 61 | 62 | def process_stacks(target_account, cf_client, region, stacks, last_run_time): 63 | 64 | start_time = int(time.time()) 65 | count = 0 66 | 67 | for stack in stacks: 68 | 69 | # FIXME, support a full inventory somehow 70 | if 'LastUpdatedTime' in stack and stack['LastUpdatedTime'] < last_run_time: 71 | # Don't inventory what's been done before 72 | continue 73 | if 'LastUpdatedTime' not in stack and stack['CreationTime'] < last_run_time: 74 | # Don't inventory what's been done before 75 | continue 76 | 77 | logger.debug("Processing stack {} for {} in {}".format(stack['StackId'], target_account.account_id, region)) 78 | count += 1 79 | 80 | resource_item = {} 81 | resource_item['awsAccountId'] = target_account.account_id 82 | resource_item['awsAccountName'] = target_account.account_name 83 | resource_item['resourceType'] = RESOURCE_TYPE 84 | resource_item['source'] = "Antiope" 85 | resource_item['configurationItemCaptureTime'] = str(datetime.datetime.now()) 86 | resource_item['awsRegion'] = region 87 | resource_item['configuration'] = stack 88 | if 'Tags' in stack: 89 | resource_item['tags'] = parse_tags(stack['Tags']) 90 | resource_item['supplementaryConfiguration'] = {} 91 | # StackId is really an ARN which isn't suitable as an S3 Key. The part after the last "/" is unique, but the name is helpful too. 92 | resource_item['resourceId'] = stack['StackId'].split(":")[-1].replace("/", "-") 93 | resource_item['errors'] = {} 94 | resource_item['resourceName'] = stack['StackName'] 95 | resource_item['ARN'] = stack['StackId'] 96 | resource_item['resourceCreationTime'] = stack['CreationTime'] 97 | save_resource_to_s3(RESOURCE_PATH, resource_item['resourceId'], resource_item) 98 | 99 | end_time = int(time.time()) 100 | logger.debug(f"process_stacks() took {end_time - start_time} sec to process {count} stacks") 101 | -------------------------------------------------------------------------------- /aws-inventory/lambda/inventory-cloudfront.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | from botocore.exceptions import ClientError 3 | 4 | import json 5 | import os 6 | import time 7 | import datetime 8 | from dateutil import tz 9 | 10 | from antiope.aws_account import * 11 | from common import * 12 | 13 | import logging 14 | logger = logging.getLogger() 15 | logger.setLevel(getattr(logging, os.getenv('LOG_LEVEL', default='INFO'))) 16 | logging.getLogger('botocore').setLevel(logging.WARNING) 17 | logging.getLogger('boto3').setLevel(logging.WARNING) 18 | logging.getLogger('urllib3').setLevel(logging.WARNING) 19 | 20 | RESOURCE_PATH = "cloudfront/distribution" 21 | RESOURCE_TYPE = "AWS::CloudFront::Distribution" 22 | 23 | 24 | def lambda_handler(event, context): 25 | set_debug(event, logger) 26 | logger.debug("Received event: " + json.dumps(event, sort_keys=True)) 27 | message = json.loads(event['Records'][0]['Sns']['Message']) 28 | logger.info("Received message: " + json.dumps(message, sort_keys=True)) 29 | 30 | try: 31 | 32 | target_account = AWSAccount(message['account_id']) 33 | 34 | # Cloudfront is a global service 35 | cf_client = target_account.get_client('cloudfront') 36 | 37 | resource_item = {} 38 | resource_item['awsAccountId'] = target_account.account_id 39 | resource_item['awsAccountName'] = target_account.account_name 40 | resource_item['resourceType'] = RESOURCE_TYPE 41 | resource_item['source'] = "Antiope" 42 | 43 | distributions = list_distributions(cf_client, target_account) 44 | logger.debug(f"Found {len(distributions)} distributions for account {target_account.account_name}({target_account.account_id}") 45 | for distribution in distributions: 46 | 47 | resource_item['configurationItemCaptureTime'] = str(datetime.datetime.now()) 48 | resource_item['configuration'] = distribution 49 | resource_item['supplementaryConfiguration'] = {} 50 | resource_item['resourceId'] = distribution['Id'] 51 | resource_item['resourceName'] = distribution['DomainName'] 52 | resource_item['ARN'] = distribution['ARN'] 53 | resource_item['errors'] = {} 54 | 55 | save_resource_to_s3(RESOURCE_PATH, distribution['Id'], resource_item) 56 | 57 | except AntiopeAssumeRoleError as e: 58 | logger.error("Unable to assume role into account {}({})".format(target_account.account_name, target_account.account_id)) 59 | return() 60 | except ClientError as e: 61 | logger.critical("AWS Error getting info for {}: {}".format(message['account_id'], e)) 62 | capture_error(message, context, e, "ClientError for {}: {}".format(message['account_id'], e)) 63 | raise 64 | except Exception as e: 65 | logger.critical("{}\nMessage: {}\nContext: {}".format(e, message, vars(context))) 66 | capture_error(message, context, e, "General Exception for {}: {}".format(message['account_id'], e)) 67 | raise 68 | 69 | 70 | def list_distributions(cf_client, target_account): 71 | distributions = [] 72 | response = cf_client.list_distributions() 73 | while 'NextMarker' in response['DistributionList']: 74 | for i in response['DistributionList']['Items']: 75 | distributions.append(i) 76 | response = cf_client.list_distributions(Marker=response['DistributionList']['NextMarker']) 77 | if 'Items' not in response['DistributionList']: 78 | return(distributions) 79 | for i in response['DistributionList']['Items']: 80 | distributions.append(i) 81 | return(distributions) 82 | -------------------------------------------------------------------------------- /aws-inventory/lambda/inventory-cloudtrail.py: -------------------------------------------------------------------------------- 1 | 2 | import boto3 3 | from botocore.exceptions import ClientError 4 | 5 | import json 6 | import os 7 | import time 8 | import datetime 9 | from dateutil import tz 10 | 11 | from antiope.aws_account import * 12 | from common import * 13 | 14 | import logging 15 | logger = logging.getLogger() 16 | logger.setLevel(getattr(logging, os.getenv('LOG_LEVEL', default='INFO'))) 17 | logging.getLogger('botocore').setLevel(logging.WARNING) 18 | logging.getLogger('boto3').setLevel(logging.WARNING) 19 | logging.getLogger('urllib3').setLevel(logging.WARNING) 20 | 21 | RESOURCE_PATH = "cloudtrail/trail" 22 | RESOURCE_TYPE = "AWS::CloudTrail::Trail" 23 | 24 | 25 | def lambda_handler(event, context): 26 | logger.debug("Received event: " + json.dumps(event, sort_keys=True)) 27 | message = json.loads(event['Records'][0]['Sns']['Message']) 28 | logger.info("Received message: " + json.dumps(message, sort_keys=True)) 29 | 30 | try: 31 | target_account = AWSAccount(message['account_id']) 32 | for r in target_account.get_regions(): 33 | discover_trails(target_account, r) 34 | 35 | except AntiopeAssumeRoleError as e: 36 | logger.error("Unable to assume role into account {}({})".format(target_account.account_name, target_account.account_id)) 37 | return() 38 | except ClientError as e: 39 | if e.response['Error']['Code'] == 'UnauthorizedOperation': 40 | logger.error("Antiope doesn't have proper permissions to this account") 41 | return(event) 42 | logger.critical("AWS Error getting info for {}: {}".format(message['account_id'], e)) 43 | capture_error(message, context, e, "ClientError for {}: {}".format(message['account_id'], e)) 44 | raise 45 | except Exception as e: 46 | logger.critical("{}\nMessage: {}\nContext: {}".format(e, message, vars(context))) 47 | capture_error(message, context, e, "General Exception for {}: {}".format(message['account_id'], e)) 48 | raise 49 | 50 | 51 | def discover_trails(target_account, region): 52 | '''Iterate across all regions to discover CloudTrails''' 53 | 54 | ct_client = target_account.get_client('cloudtrail', region=region) 55 | response = ct_client.describe_trails() 56 | 57 | resource_item = {} 58 | resource_item['awsAccountId'] = target_account.account_id 59 | resource_item['awsAccountName'] = target_account.account_name 60 | resource_item['resourceType'] = RESOURCE_TYPE 61 | resource_item['awsRegion'] = region 62 | resource_item['source'] = "Antiope" 63 | 64 | for trail in response['trailList']: 65 | 66 | # CloudTrail will return trails from other regions if that trail is collecting events from the region where the api call was made 67 | if region != trail['TrailARN'].split(":")[3]: 68 | # Move along if the region of the trail is not the region we're making the call to 69 | continue 70 | 71 | resource_item['configurationItemCaptureTime'] = str(datetime.datetime.now()) 72 | resource_item['configuration'] = trail 73 | # resource_item['tags'] = ct_client.list_tags(ResourceIdList=[ trail['TrailARN'] ] ) 74 | resource_item['supplementaryConfiguration'] = {} 75 | resource_item['resourceId'] = "{}-{}-{}".format(trail['Name'], target_account.account_id, region) 76 | resource_item['resourceName'] = trail['Name'] 77 | resource_item['ARN'] = trail['TrailARN'] 78 | resource_item['errors'] = {} 79 | 80 | event_response = ct_client.get_event_selectors(TrailName=trail['Name']) 81 | resource_item['supplementaryConfiguration']['EventSelectors'] = event_response['EventSelectors'] 82 | 83 | status_response = ct_client.get_trail_status(Name=trail['Name']) 84 | resource_item['supplementaryConfiguration']['Status'] = status_response 85 | del(resource_item['supplementaryConfiguration']['Status']['ResponseMetadata']) 86 | 87 | save_resource_to_s3(RESOURCE_PATH, resource_item['resourceId'], resource_item) 88 | -------------------------------------------------------------------------------- /aws-inventory/lambda/inventory-ebs-snapshot.py: -------------------------------------------------------------------------------- 1 | 2 | import boto3 3 | from botocore.exceptions import ClientError 4 | 5 | import json 6 | import os 7 | import time 8 | from datetime import datetime, timezone 9 | from dateutil import tz 10 | 11 | from antiope.aws_account import * 12 | from common import * 13 | 14 | import logging 15 | logger = logging.getLogger() 16 | logger.setLevel(getattr(logging, os.getenv('LOG_LEVEL', default='INFO'))) 17 | logging.getLogger('botocore').setLevel(logging.WARNING) 18 | logging.getLogger('boto3').setLevel(logging.WARNING) 19 | logging.getLogger('urllib3').setLevel(logging.WARNING) 20 | 21 | 22 | SNAPSHOT_RESOURCE_PATH = "ec2/snapshot" 23 | SNAPSHOT_TYPE = "AWS::EC2::Snapshot" 24 | 25 | 26 | def lambda_handler(event, context): 27 | logger.debug("Received event: " + json.dumps(event, sort_keys=True)) 28 | message = json.loads(event['Records'][0]['Sns']['Message']) 29 | logger.info("Received message: " + json.dumps(message, sort_keys=True)) 30 | 31 | try: 32 | target_account = AWSAccount(message['account_id']) 33 | for r in target_account.get_regions(): 34 | discover_snapshots(target_account, r) 35 | 36 | except AntiopeAssumeRoleError as e: 37 | logger.error("Unable to assume role into account {}({})".format(target_account.account_name, target_account.account_id)) 38 | return() 39 | except ClientError as e: 40 | if e.response['Error']['Code'] == 'UnauthorizedOperation': 41 | logger.error("Antiope doesn't have proper permissions to this account") 42 | return(event) 43 | logger.critical("AWS Error getting info for {}: {}".format(message['account_id'], e)) 44 | capture_error(message, context, e, "ClientError for {}: {}".format(message['account_id'], e)) 45 | raise 46 | except Exception as e: 47 | logger.critical("{}\nMessage: {}\nContext: {}".format(e, message, vars(context))) 48 | capture_error(message, context, e, "General Exception for {}: {}".format(message['account_id'], e)) 49 | raise 50 | 51 | 52 | def discover_snapshots(account, region): 53 | ''' 54 | Discover EBS Snapshots owned by this account 55 | 56 | ''' 57 | 58 | snapshots = [] 59 | 60 | ec2_client = account.get_client('ec2', region=region) 61 | response = ec2_client.describe_snapshots( OwnerIds=[account.account_id ]) 62 | while 'NextToken' in response: # Gotta Catch 'em all! 63 | snapshots += response['Snapshots'] 64 | response = ec2_client.describe_snapshots(OwnerIds=[account.account_id ], NextToken=response['NextToken']) 65 | snapshots += response['Snapshots'] 66 | logger.info(f"Retrieved {len(snapshots)} snapshots for {account.account_name}({account.account_id})") 67 | 68 | for snap in snapshots: 69 | resource_item = {} 70 | resource_item['awsAccountId'] = account.account_id 71 | resource_item['awsAccountName'] = account.account_name 72 | resource_item['resourceType'] = SNAPSHOT_TYPE 73 | resource_item['source'] = "Antiope" 74 | resource_item['awsRegion'] = region 75 | resource_item['configurationItemCaptureTime'] = str(datetime.datetime.now()) 76 | resource_item['configuration'] = snap 77 | if 'Tags' in snap: 78 | resource_item['tags'] = parse_tags(snap['Tags']) 79 | resource_item['supplementaryConfiguration'] = {} 80 | resource_item['resourceId'] = snap['SnapshotId'] 81 | resource_item['resourceName'] = snap['SnapshotId'] 82 | resource_item['errors'] = {} 83 | save_resource_to_s3(SNAPSHOT_RESOURCE_PATH, resource_item['resourceId'], resource_item) 84 | 85 | 86 | def json_serial(obj): 87 | """JSON serializer for objects not serializable by default json code""" 88 | 89 | if isinstance(obj, (datetime, date)): 90 | return obj.isoformat() 91 | raise TypeError("Type %s not serializable" % type(obj)) 92 | -------------------------------------------------------------------------------- /aws-inventory/lambda/inventory-ebs-volume.py: -------------------------------------------------------------------------------- 1 | 2 | import boto3 3 | from botocore.exceptions import ClientError 4 | import json 5 | import os 6 | import time 7 | from datetime import datetime, timezone 8 | from dateutil import tz 9 | 10 | from antiope.aws_account import * 11 | from common import * 12 | 13 | import logging 14 | logger = logging.getLogger() 15 | logger.setLevel(getattr(logging, os.getenv('LOG_LEVEL', default='INFO'))) 16 | logging.getLogger('botocore').setLevel(logging.WARNING) 17 | logging.getLogger('boto3').setLevel(logging.WARNING) 18 | logging.getLogger('urllib3').setLevel(logging.WARNING) 19 | 20 | VOLUME_RESOURCE_PATH = "ec2/volume" 21 | VOLUME_TYPE = "AWS::EC2::Volume" 22 | 23 | 24 | def lambda_handler(event, context): 25 | logger.debug("Received event: " + json.dumps(event, sort_keys=True)) 26 | message = json.loads(event['Records'][0]['Sns']['Message']) 27 | logger.info("Received message: " + json.dumps(message, sort_keys=True)) 28 | 29 | try: 30 | target_account = AWSAccount(message['account_id']) 31 | for r in target_account.get_regions(): 32 | discover_volumes(target_account, r) 33 | 34 | except AntiopeAssumeRoleError as e: 35 | logger.error("Unable to assume role into account {}({})".format(target_account.account_name, target_account.account_id)) 36 | return() 37 | except ClientError as e: 38 | if e.response['Error']['Code'] == 'UnauthorizedOperation': 39 | logger.error("Antiope doesn't have proper permissions to this account") 40 | return(event) 41 | logger.critical("AWS Error getting info for {}: {}".format(message['account_id'], e)) 42 | capture_error(message, context, e, "ClientError for {}: {}".format(message['account_id'], e)) 43 | raise 44 | except Exception as e: 45 | logger.critical("{}\nMessage: {}\nContext: {}".format(e, message, vars(context))) 46 | capture_error(message, context, e, "General Exception for {}: {}".format(message['account_id'], e)) 47 | raise 48 | 49 | 50 | def discover_volumes(account, region): 51 | ''' 52 | Discover EBS Volumes 53 | 54 | ''' 55 | 56 | volumes = [] 57 | 58 | ec2_client = account.get_client('ec2', region=region) 59 | response = ec2_client.describe_volumes() 60 | while 'NextToken' in response: # Gotta Catch 'em all! 61 | volumes += response['Volumes'] 62 | response = ec2_client.describe_volumes(NextToken=response['NextToken']) 63 | volumes += response['Volumes'] 64 | 65 | for vol in volumes: 66 | resource_item = {} 67 | resource_item['awsAccountId'] = account.account_id 68 | resource_item['awsAccountName'] = account.account_name 69 | resource_item['resourceType'] = VOLUME_TYPE 70 | resource_item['source'] = "Antiope" 71 | resource_item['awsRegion'] = region 72 | resource_item['configurationItemCaptureTime'] = str(datetime.datetime.now()) 73 | resource_item['configuration'] = vol 74 | if 'Tags' in vol: 75 | resource_item['tags'] = parse_tags(vol['Tags']) 76 | resource_item['supplementaryConfiguration'] = {} 77 | resource_item['resourceId'] = vol['VolumeId'] 78 | resource_item['resourceName'] = vol['VolumeId'] 79 | resource_item['errors'] = {} 80 | save_resource_to_s3(VOLUME_RESOURCE_PATH, resource_item['resourceId'], resource_item) 81 | 82 | 83 | 84 | 85 | 86 | def json_serial(obj): 87 | """JSON serializer for objects not serializable by default json code""" 88 | 89 | if isinstance(obj, (datetime, date)): 90 | return obj.isoformat() 91 | raise TypeError("Type %s not serializable" % type(obj)) 92 | -------------------------------------------------------------------------------- /aws-inventory/lambda/inventory-ecr.py: -------------------------------------------------------------------------------- 1 | 2 | import boto3 3 | from botocore.exceptions import ClientError 4 | import json 5 | import os 6 | import time 7 | from datetime import datetime, timezone 8 | from dateutil import tz 9 | 10 | from antiope.aws_account import * 11 | from common import * 12 | 13 | import logging 14 | logger = logging.getLogger() 15 | logger.setLevel(getattr(logging, os.getenv('LOG_LEVEL', default='INFO'))) 16 | logging.getLogger('botocore').setLevel(logging.WARNING) 17 | logging.getLogger('boto3').setLevel(logging.WARNING) 18 | logging.getLogger('urllib3').setLevel(logging.WARNING) 19 | 20 | RESOURCE_PATH = "ecr/repository" 21 | RESOURCE_TYPE = "AWS::ECR::Repository" 22 | 23 | 24 | def lambda_handler(event, context): 25 | logger.debug("Received event: " + json.dumps(event, sort_keys=True)) 26 | message = json.loads(event['Records'][0]['Sns']['Message']) 27 | logger.info("Received message: " + json.dumps(message, sort_keys=True)) 28 | 29 | try: 30 | target_account = AWSAccount(message['account_id']) 31 | for r in target_account.get_regions(): 32 | try: 33 | discover_repos(target_account, r) 34 | except ClientError as e: 35 | # Move onto next region if we get access denied. This is probably SCPs 36 | if e.response['Error']['Code'] == 'AccessDeniedException': 37 | logger.error(f"AccessDeniedException for region {r} in function {context.function_name} for {target_account.account_name}({target_account.account_id})") 38 | continue 39 | else: 40 | raise # pass on to the next handlier 41 | 42 | 43 | except AntiopeAssumeRoleError as e: 44 | logger.error("Unable to assume role into account {}({})".format(target_account.account_name, target_account.account_id)) 45 | return() 46 | except ClientError as e: 47 | if e.response['Error']['Code'] == 'UnauthorizedOperation': 48 | logger.error("Antiope doesn't have proper permissions to this account") 49 | return(event) 50 | logger.critical("AWS Error getting info for {}: {}".format(message['account_id'], e)) 51 | capture_error(message, context, e, "ClientError for {}: {}".format(message['account_id'], e)) 52 | raise 53 | except Exception as e: 54 | logger.critical("{}\nMessage: {}\nContext: {}".format(e, message, vars(context))) 55 | capture_error(message, context, e, "General Exception for {}: {}".format(message['account_id'], e)) 56 | raise 57 | 58 | 59 | def discover_repos(target_account, region): 60 | '''Iterate across all regions to discover Cloudsecrets''' 61 | 62 | repos = [] 63 | client = target_account.get_client('ecr', region=region) 64 | response = client.describe_repositories(registryId=target_account.account_id) 65 | while 'nextToken' in response: # Gotta Catch 'em all! 66 | repos += response['repositories'] 67 | response = client.describe_repositories(nextToken=response['nextToken']) 68 | repos += response['repositories'] 69 | 70 | for r in repos: 71 | process_repo(client, r, target_account, region) 72 | 73 | 74 | def process_repo(client, repo, target_account, region): 75 | resource_item = {} 76 | resource_item['awsAccountId'] = target_account.account_id 77 | resource_item['awsAccountName'] = target_account.account_name 78 | resource_item['resourceType'] = RESOURCE_TYPE 79 | resource_item['awsRegion'] = region 80 | resource_item['source'] = "Antiope" 81 | resource_item['configurationItemCaptureTime'] = str(datetime.datetime.now()) 82 | resource_item['configuration'] = repo 83 | resource_item['supplementaryConfiguration'] = {} 84 | resource_item['resourceId'] = "{}-{}-{}".format(target_account.account_id, region, repo['repositoryName'].replace("/", "-")) 85 | resource_item['resourceName'] = repo['repositoryName'] 86 | resource_item['ARN'] = repo['repositoryArn'] 87 | resource_item['resourceCreationTime'] = repo['createdAt'] 88 | resource_item['errors'] = {} 89 | 90 | try: 91 | response = client.get_repository_policy(repositoryName=repo['repositoryName']) 92 | if 'policyText' in response: 93 | resource_item['supplementaryConfiguration']['ResourcePolicy'] = json.loads(response['policyText']) 94 | except ClientError as e: 95 | if e.response['Error']['Code'] == 'RepositoryPolicyNotFoundException': 96 | pass 97 | else: 98 | raise 99 | 100 | save_resource_to_s3(RESOURCE_PATH, resource_item['resourceId'], resource_item) 101 | -------------------------------------------------------------------------------- /aws-inventory/lambda/inventory-es.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | from botocore.exceptions import ClientError 3 | import json 4 | import os 5 | import time 6 | from datetime import datetime, timezone 7 | from dateutil import tz 8 | 9 | from antiope.aws_account import * 10 | from common import * 11 | 12 | import logging 13 | logger = logging.getLogger() 14 | logger.setLevel(getattr(logging, os.getenv('LOG_LEVEL', default='INFO'))) 15 | logging.getLogger('botocore').setLevel(logging.WARNING) 16 | logging.getLogger('boto3').setLevel(logging.WARNING) 17 | logging.getLogger('urllib3').setLevel(logging.WARNING) 18 | 19 | RESOURCE_PATH = "es/domain" 20 | RESOURCE_TYPE = "AWS::Elasticsearch::Domain" 21 | 22 | 23 | def lambda_handler(event, context): 24 | logger.debug("Received event: " + json.dumps(event, sort_keys=True)) 25 | message = json.loads(event['Records'][0]['Sns']['Message']) 26 | logger.info("Received message: " + json.dumps(message, sort_keys=True)) 27 | 28 | try: 29 | 30 | target_account = AWSAccount(message['account_id']) 31 | 32 | regions = target_account.get_regions() 33 | if 'region' in message: 34 | regions = [message['region']] 35 | 36 | # describe ES Domains 37 | for r in regions: 38 | try: 39 | es_client = target_account.get_client('es', region=r) 40 | 41 | resource_item = {} 42 | resource_item['awsAccountId'] = target_account.account_id 43 | resource_item['awsAccountName'] = target_account.account_name 44 | resource_item['resourceType'] = RESOURCE_TYPE 45 | resource_item['awsRegion'] = r 46 | resource_item['source'] = "Antiope" 47 | 48 | for domain_name in list_domains(es_client, target_account, r): 49 | response = es_client.describe_elasticsearch_domain(DomainName=domain_name) 50 | domain = response['DomainStatus'] 51 | 52 | resource_item['configurationItemCaptureTime'] = str(datetime.datetime.now()) 53 | resource_item['configuration'] = domain 54 | resource_item['supplementaryConfiguration'] = {} 55 | resource_item['resourceId'] = domain['DomainId'] 56 | resource_item['resourceName'] = domain['DomainName'] 57 | resource_item['ARN'] = domain['ARN'] 58 | resource_item['errors'] = {} 59 | 60 | if domain['AccessPolicies']: 61 | # The ES Domains' Access policy is returned as a string. Here we parse the json and reapply it to the dict 62 | resource_item['supplementaryConfiguration']['AccessPolicies'] = json.loads(domain['AccessPolicies']) 63 | 64 | object_name = "{}-{}-{}".format(domain_name, r, target_account.account_id) 65 | save_resource_to_s3(RESOURCE_PATH, object_name, resource_item) 66 | 67 | except ClientError as e: 68 | # Move onto next region if we get access denied. This is probably SCPs 69 | if e.response['Error']['Code'] == 'AccessDeniedException': 70 | logger.error(f"AccessDeniedException for region {r} in function {context.function_name} for {target_account.account_name}({target_account.account_id})") 71 | continue 72 | else: 73 | raise # pass on to the next handlier 74 | 75 | except AntiopeAssumeRoleError as e: 76 | logger.error("Unable to assume role into account {}({})".format(target_account.account_name, target_account.account_id)) 77 | return() 78 | except ClientError as e: 79 | if e.response['Error']['Code'] == 'UnauthorizedOperation': 80 | logger.error("Antiope doesn't have proper permissions to this account") 81 | return(event) 82 | logger.critical("AWS Error getting info for {}: {}".format(message['account_id'], e)) 83 | capture_error(message, context, e, "ClientError for {}: {}".format(message['account_id'], e)) 84 | raise 85 | except Exception as e: 86 | logger.critical("{}\nMessage: {}\nContext: {}".format(e, message, vars(context))) 87 | capture_error(message, context, e, "General Exception for {}: {}".format(message['account_id'], e)) 88 | raise 89 | 90 | 91 | def list_domains(es_client, target_account, region): 92 | domain_names = [] 93 | response = es_client.list_domain_names() # This call doesn't support paganiation 94 | if 'DomainNames' not in response: 95 | logger.info("No ElasticSearch domains returned by list_domain_names() for {}({}) in {}".format( 96 | target_account.account_name, 97 | target_account.account_id, 98 | region 99 | )) 100 | else: 101 | for d in response['DomainNames']: 102 | domain_names.append(d['DomainName']) 103 | return(domain_names) 104 | -------------------------------------------------------------------------------- /aws-inventory/lambda/inventory-firehose.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | from botocore.exceptions import ClientError 3 | import json 4 | import os 5 | import time 6 | from datetime import datetime, timezone 7 | from dateutil import tz 8 | 9 | from antiope.aws_account import * 10 | from common import * 11 | 12 | import logging 13 | logger = logging.getLogger() 14 | logger.setLevel(getattr(logging, os.getenv('LOG_LEVEL', default='INFO'))) 15 | logging.getLogger('botocore').setLevel(logging.WARNING) 16 | logging.getLogger('boto3').setLevel(logging.WARNING) 17 | logging.getLogger('urllib3').setLevel(logging.WARNING) 18 | 19 | FIREHOSE_PATH = "kinesisfirehose/deliverystream" 20 | 21 | 22 | def lambda_handler(event, context): 23 | logger.debug("Received event: " + json.dumps(event, sort_keys=True)) 24 | message = json.loads(event['Records'][0]['Sns']['Message']) 25 | logger.info("Received message: " + json.dumps(message, sort_keys=True)) 26 | 27 | try: 28 | target_account = AWSAccount(message['account_id']) 29 | for r in target_account.get_regions(): 30 | try: 31 | discover_firehose(target_account, r) 32 | except ClientError as e: 33 | # Move onto next region if we get access denied. This is probably SCPs 34 | if e.response['Error']['Code'] == 'AccessDeniedException': 35 | logger.error(f"AccessDeniedException for region {r} in function {context.function_name} for {target_account.account_name}({target_account.account_id})") 36 | continue 37 | else: 38 | raise # pass on to the next handlier 39 | 40 | except AntiopeAssumeRoleError as e: 41 | logger.error("Unable to assume role into account {}({})".format(target_account.account_name, target_account.account_id)) 42 | return() 43 | except ClientError as e: 44 | if e.response['Error']['Code'] == 'UnauthorizedOperation': 45 | logger.error("Antiope doesn't have proper permissions to this account") 46 | return(event) 47 | logger.critical("AWS Error getting info for {}: {}".format(message['account_id'], e)) 48 | capture_error(message, context, e, "ClientError for {}: {}".format(message['account_id'], e)) 49 | raise 50 | except Exception as e: 51 | logger.critical("{}\nMessage: {}\nContext: {}".format(e, message, vars(context))) 52 | capture_error(message, context, e, "General Exception for {}: {}".format(message['account_id'], e)) 53 | raise 54 | 55 | 56 | def discover_firehose(target_account, region): 57 | '''Find and process all the REGIONAL AWS WAFv2''' 58 | 59 | firehoses = [] 60 | client = target_account.get_client('firehose', region=region) 61 | response = client.list_delivery_streams() 62 | while 'HasMoreDeliveryStreams' in response and response['HasMoreDeliveryStreams'] is True: # Gotta Catch 'em all! 63 | firehoses += response['DeliveryStreamNames'] 64 | response = client.list_delivery_streams(ExclusiveStartDeliveryStreamName=response['DeliveryStreamNames'][-1]) 65 | firehoses += response['DeliveryStreamNames'] 66 | 67 | logger.debug(f"Discovered {len(firehoses)} Firehose Delivery Streams in {target_account.account_name}") 68 | 69 | for firehose_name in firehoses: 70 | delivery_stream = client.describe_delivery_stream(DeliveryStreamName=firehose_name)['DeliveryStreamDescription'] 71 | 72 | resource_item = {} 73 | resource_item['awsAccountId'] = target_account.account_id 74 | resource_item['awsAccountName'] = target_account.account_name 75 | resource_item['resourceType'] = "AWS::KinesisFirehose::DeliveryStream" 76 | resource_item['source'] = "Antiope" 77 | 78 | resource_item['configurationItemCaptureTime'] = str(datetime.datetime.now()) 79 | resource_item['awsRegion'] = region 80 | resource_item['configuration'] = delivery_stream 81 | # TODO Tags? 82 | resource_item['supplementaryConfiguration'] = {} 83 | resource_item['resourceId'] = f"{target_account.account_id}-{region}-{delivery_stream['DeliveryStreamName']}" 84 | resource_item['resourceName'] = delivery_stream['DeliveryStreamName'] 85 | resource_item['ARN'] = delivery_stream['DeliveryStreamARN'] 86 | resource_item['errors'] = {} 87 | 88 | save_resource_to_s3(FIREHOSE_PATH, resource_item['resourceId'], resource_item) 89 | 90 | 91 | -------------------------------------------------------------------------------- /aws-inventory/lambda/inventory-guardduty.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | from botocore.exceptions import ClientError 3 | import json 4 | import os 5 | import time 6 | from datetime import datetime, timezone 7 | from dateutil import tz 8 | 9 | from antiope.aws_account import * 10 | from common import * 11 | 12 | import logging 13 | logger = logging.getLogger() 14 | logger.setLevel(getattr(logging, os.getenv('LOG_LEVEL', default='INFO'))) 15 | logging.getLogger('botocore').setLevel(logging.WARNING) 16 | logging.getLogger('boto3').setLevel(logging.WARNING) 17 | logging.getLogger('urllib3').setLevel(logging.WARNING) 18 | 19 | RESOURCE_PATH = "guardduty/detector" 20 | RESOURCE_TYPE = "AWS::GuardDuty::Detector" 21 | 22 | 23 | def lambda_handler(event, context): 24 | logger.debug("Received event: " + json.dumps(event, sort_keys=True)) 25 | message = json.loads(event['Records'][0]['Sns']['Message']) 26 | logger.info("Received message: " + json.dumps(message, sort_keys=True)) 27 | 28 | try: 29 | target_account = AWSAccount(message['account_id']) 30 | for r in target_account.get_regions(): 31 | discover_detectors(target_account, r) 32 | 33 | except AntiopeAssumeRoleError as e: 34 | logger.error("Unable to assume role into account {}({})".format(target_account.account_name, target_account.account_id)) 35 | return() 36 | except ClientError as e: 37 | if e.response['Error']['Code'] == 'UnauthorizedOperation': 38 | logger.error("Antiope doesn't have proper permissions to this account") 39 | return(event) 40 | logger.critical("AWS Error getting info for {}: {}".format(message['account_id'], e)) 41 | capture_error(message, context, e, "ClientError for {}: {}".format(message['account_id'], e)) 42 | raise 43 | except Exception as e: 44 | logger.critical("{}\nMessage: {}\nContext: {}".format(e, message, vars(context))) 45 | capture_error(message, context, e, "General Exception for {}: {}".format(message['account_id'], e)) 46 | raise 47 | 48 | 49 | def discover_detectors(target_account, region): 50 | '''Iterate across all regions to discover Cloudsecrets''' 51 | 52 | detector_ids = [] 53 | client = target_account.get_client('guardduty', region=region) 54 | response = client.list_detectors() 55 | while 'nextToken' in response: # Gotta Catch 'em all! 56 | detector_ids += response['DetectorIds'] 57 | response = client.list_detectors(nextToken=response['NextToken']) 58 | detector_ids += response['DetectorIds'] 59 | 60 | for d in detector_ids: 61 | process_detector(client, d, target_account, region) 62 | 63 | 64 | def process_detector(client, detector_id, target_account, region): 65 | 66 | response = client.get_detector(DetectorId=detector_id) 67 | 68 | del response['ResponseMetadata'] # We don't need this 69 | 70 | resource_item = {} 71 | resource_item['awsAccountId'] = target_account.account_id 72 | resource_item['awsAccountName'] = target_account.account_name 73 | resource_item['resourceType'] = RESOURCE_TYPE 74 | resource_item['awsRegion'] = region 75 | resource_item['source'] = "Antiope" 76 | resource_item['configurationItemCaptureTime'] = str(datetime.datetime.now()) 77 | resource_item['configuration'] = response 78 | resource_item['supplementaryConfiguration'] = {} 79 | resource_item['resourceId'] = detector_id 80 | resource_item['resourceName'] = f"Detector-{target_account.account_id}-{region}" 81 | resource_item['resourceCreationTime'] = response['CreatedAt'] 82 | resource_item['errors'] = {} 83 | 84 | response = client.get_master_account(DetectorId=detector_id) 85 | if 'Master' in response: 86 | resource_item['supplementaryConfiguration']['Master'] = response['Master'] 87 | 88 | save_resource_to_s3(RESOURCE_PATH, resource_item['resourceId'], resource_item) 89 | -------------------------------------------------------------------------------- /aws-inventory/lambda/inventory-health-report.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | from botocore.exceptions import ClientError 3 | import json 4 | import os 5 | import time 6 | from datetime import datetime, timezone 7 | from dateutil import tz 8 | 9 | from antiope.aws_account import * 10 | from common import * 11 | 12 | import logging 13 | logger = logging.getLogger() 14 | logger.setLevel(getattr(logging, os.getenv('LOG_LEVEL', default='INFO'))) 15 | logging.getLogger('botocore').setLevel(logging.WARNING) 16 | logging.getLogger('boto3').setLevel(logging.WARNING) 17 | logging.getLogger('urllib3').setLevel(logging.WARNING) 18 | 19 | 20 | def lambda_handler(event, context): 21 | logger.debug("Received event: " + json.dumps(event, sort_keys=True)) 22 | message = json.loads(event['Records'][0]['Sns']['Message']) 23 | logger.info("Received message: " + json.dumps(message, sort_keys=True)) 24 | 25 | try: 26 | target_account = AWSAccount(message['account_id']) 27 | health_client = target_account.get_client('health') 28 | 29 | data = {} 30 | 31 | arn_list = [] 32 | try: 33 | response = health_client.describe_events( 34 | filter={ 35 | 'eventStatusCodes': ['upcoming'], 36 | 'eventTypeCodes': ['AWS_EC2_INSTANCE_REBOOT_MAINTENANCE_SCHEDULED'] 37 | } 38 | ) 39 | for e in response['events']: 40 | arn_list.append(e['arn']) 41 | 42 | logger.info("Got {} events for account {}".format(len(arn_list), target_account.account_name)) 43 | 44 | if len(arn_list) != 0: 45 | response = health_client.describe_event_details(eventArns=arn_list) 46 | data['details'] = response['successfulSet'] 47 | 48 | response = health_client.describe_affected_entities(filter={'eventArns': arn_list}) 49 | data['entities'] = response['entities'] 50 | except ClientError as e: 51 | if e.response['Error']['Code'] == 'SubscriptionRequiredException': 52 | msg = "{}({}) does not have Enterprise subscription".format(target_account.account_name, target_account.account_id) 53 | data['error'] = msg 54 | logger.error(msg) 55 | 56 | s3client = boto3.client('s3') 57 | s3response = s3client.put_object( 58 | # ACL='public-read', #FIXME 59 | Body=json.dumps(data, sort_keys=True, default=str, indent=2), 60 | Bucket=os.environ['INVENTORY_BUCKET'], 61 | ContentType='application/json', 62 | Key="Health/{}.json".format(target_account.account_id), 63 | ) 64 | 65 | except AntiopeAssumeRoleError as e: 66 | logger.error("Unable to assume role into account {}({})".format(target_account.account_name, target_account.account_id)) 67 | return() 68 | except ClientError as e: 69 | if e.response['Error']['Code'] == 'UnauthorizedOperation': 70 | logger.error("Antiope doesn't have proper permissions to this account") 71 | return(event) 72 | logger.critical("AWS Error getting info for {}: {}".format(message['account_id'], e)) 73 | capture_error(message, context, e, "ClientError for {}: {}".format(message['account_id'], e)) 74 | raise 75 | except Exception as e: 76 | logger.critical("{}\nMessage: {}\nContext: {}".format(e, message, vars(context))) 77 | capture_error(message, context, e, "General Exception for {}: {}".format(message['account_id'], e)) 78 | raise 79 | 80 | 81 | def json_serial(obj): 82 | """JSON serializer for objects not serializable by default json code""" 83 | 84 | if isinstance(obj, (datetime, date)): 85 | return obj.isoformat() 86 | raise TypeError("Type %s not serializable" % type(obj)) 87 | -------------------------------------------------------------------------------- /aws-inventory/lambda/inventory-redshift.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | from botocore.exceptions import ClientError 3 | import json 4 | import os 5 | import time 6 | from datetime import datetime, timezone 7 | from dateutil import tz 8 | 9 | from antiope.aws_account import * 10 | from common import * 11 | 12 | import logging 13 | logger = logging.getLogger() 14 | logger.setLevel(getattr(logging, os.getenv('LOG_LEVEL', default='INFO'))) 15 | logging.getLogger('botocore').setLevel(logging.WARNING) 16 | logging.getLogger('boto3').setLevel(logging.WARNING) 17 | logging.getLogger('urllib3').setLevel(logging.WARNING) 18 | 19 | CLUSTER_RESOURCE_PATH = "redshift/clusters" 20 | CLUSTER_TYPE = "AWS::Redshift::Cluster" 21 | 22 | def lambda_handler(event, context): 23 | logger.debug("Received event: " + json.dumps(event, sort_keys=True)) 24 | message = json.loads(event['Records'][0]['Sns']['Message']) 25 | logger.info("Received message: " + json.dumps(message, sort_keys=True)) 26 | 27 | try: 28 | target_account = AWSAccount(message['account_id']) 29 | for r in target_account.get_regions(): 30 | discover_clusters(target_account, r) 31 | 32 | except AntiopeAssumeRoleError as e: 33 | logger.error("Unable to assume role into account {}({})".format(target_account.account_name, target_account.account_id)) 34 | return() 35 | except ClientError as e: 36 | if e.response['Error']['Code'] == 'UnauthorizedOperation': 37 | logger.error("Antiope doesn't have proper permissions to this account") 38 | return(event) 39 | logger.critical("AWS Error getting info for {}: {}".format(message['account_id'], e)) 40 | capture_error(message, context, e, "ClientError for {}: {}".format(message['account_id'], e)) 41 | raise 42 | except Exception as e: 43 | logger.critical("{}\nMessage: {}\nContext: {}".format(e, message, vars(context))) 44 | capture_error(message, context, e, "General Exception for {}: {}".format(message['account_id'], e)) 45 | raise 46 | 47 | 48 | def discover_clusters(account, region): 49 | '''Discover all Database Instances (RDS)''' 50 | 51 | clusters = [] 52 | 53 | client = account.get_client('redshift', region=region) 54 | response = client.describe_clusters() 55 | while 'Marker' in response: # Gotta Catch 'em all! 56 | clusters += response['Clusters'] 57 | response = client.describe_clusters(Marker=response['Marker']) 58 | clusters += response['Clusters'] 59 | 60 | for c in clusters: 61 | name = c['ClusterIdentifier'] 62 | 63 | resource_item = {} 64 | resource_item['awsAccountId'] = account.account_id 65 | resource_item['awsAccountName'] = account.account_name 66 | resource_item['resourceType'] = CLUSTER_TYPE 67 | resource_item['source'] = "Antiope" 68 | resource_item['configurationItemCaptureTime'] = str(datetime.datetime.now()) 69 | resource_item['awsRegion'] = region 70 | resource_item['configuration'] = c 71 | resource_item['supplementaryConfiguration'] = {} 72 | resource_item['resourceName'] = c['ClusterIdentifier'] 73 | resource_item['resourceId'] = f"{account.account_id}-{region}-{c['ClusterIdentifier']}" 74 | resource_item['resourceCreationTime'] = c['ClusterCreateTime'] 75 | resource_item['errors'] = {} 76 | resource_item['tags'] = parse_tags(c['Tags']) 77 | 78 | 79 | save_resource_to_s3(CLUSTER_RESOURCE_PATH, resource_item['resourceId'], resource_item) 80 | 81 | -------------------------------------------------------------------------------- /aws-inventory/lambda/inventory-sagemaker.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | from botocore.exceptions import ClientError 3 | import json 4 | import os 5 | import time 6 | from datetime import datetime, timezone 7 | from dateutil import tz 8 | 9 | from antiope.aws_account import * 10 | from common import * 11 | 12 | import logging 13 | logger = logging.getLogger() 14 | logger.setLevel(getattr(logging, os.getenv('LOG_LEVEL', default='INFO'))) 15 | logging.getLogger('botocore').setLevel(logging.WARNING) 16 | logging.getLogger('boto3').setLevel(logging.WARNING) 17 | logging.getLogger('urllib3').setLevel(logging.WARNING) 18 | 19 | NOTBOOK_RESOURCE_PATH = "sagemaker/notebook" 20 | NOTEBOOK_TYPE = "AWS::SageMaker::NotebookInstance" 21 | 22 | def lambda_handler(event, context): 23 | logger.debug("Received event: " + json.dumps(event, sort_keys=True)) 24 | message = json.loads(event['Records'][0]['Sns']['Message']) 25 | logger.info("Received message: " + json.dumps(message, sort_keys=True)) 26 | 27 | try: 28 | target_account = AWSAccount(message['account_id']) 29 | for r in target_account.get_regions(): 30 | discover_notebooks(target_account, r) 31 | 32 | except AntiopeAssumeRoleError as e: 33 | logger.error("Unable to assume role into account {}({})".format(target_account.account_name, target_account.account_id)) 34 | return() 35 | except ClientError as e: 36 | if e.response['Error']['Code'] == 'UnauthorizedOperation': 37 | logger.error("Antiope doesn't have proper permissions to this account") 38 | return(event) 39 | logger.critical("AWS Error getting info for {}: {}".format(message['account_id'], e)) 40 | capture_error(message, context, e, "ClientError for {}: {}".format(message['account_id'], e)) 41 | raise 42 | except Exception as e: 43 | logger.critical("{}\nMessage: {}\nContext: {}".format(e, message, vars(context))) 44 | capture_error(message, context, e, "General Exception for {}: {}".format(message['account_id'], e)) 45 | raise 46 | 47 | 48 | def discover_notebooks(account, region): 49 | '''Discover all Database Instances (RDS)''' 50 | 51 | notebooks = [] 52 | 53 | client = account.get_client('sagemaker', region=region) 54 | response = client.list_notebook_instances() 55 | while 'NextToken' in response: # Gotta Catch 'em all! 56 | notebooks += response['NotebookInstances'] 57 | response = client.list_notebook_instances(NextToken=response['NextToken']) 58 | notebooks += response['NotebookInstances'] 59 | 60 | for nb in notebooks: 61 | name = nb['NotebookInstanceName'] 62 | 63 | details = client.describe_notebook_instance(NotebookInstanceName=name) 64 | 65 | resource_item = {} 66 | resource_item['awsAccountId'] = account.account_id 67 | resource_item['awsAccountName'] = account.account_name 68 | resource_item['resourceType'] = NOTEBOOK_TYPE 69 | resource_item['source'] = "Antiope" 70 | resource_item['configurationItemCaptureTime'] = str(datetime.datetime.now()) 71 | resource_item['awsRegion'] = region 72 | resource_item['configuration'] = details 73 | resource_item['supplementaryConfiguration'] = {} 74 | resource_item['resourceName'] = details['NotebookInstanceName'] 75 | resource_item['resourceId'] = f"{account.account_id}-{region}-{details['NotebookInstanceName']}" 76 | resource_item['ARN'] = details['NotebookInstanceArn'] 77 | resource_item['resourceCreationTime'] = details['CreationTime'] 78 | resource_item['errors'] = {} 79 | 80 | try: 81 | tags = client.list_tags(ResourceArn=details['NotebookInstanceArn']) 82 | resource_item['tags'] = parse_tags(tags['Tags']) 83 | except (ClientError, KeyError, IndexError): 84 | pass # If Tags aren't present or whatever, just ignore 85 | 86 | save_resource_to_s3(NOTBOOK_RESOURCE_PATH, resource_item['resourceId'], resource_item) 87 | 88 | -------------------------------------------------------------------------------- /aws-inventory/lambda/inventory-secrets.py: -------------------------------------------------------------------------------- 1 | 2 | import boto3 3 | from botocore.exceptions import ClientError, EndpointConnectionError 4 | import json 5 | import os 6 | import time 7 | from datetime import datetime, timezone 8 | from dateutil import tz 9 | 10 | from antiope.aws_account import * 11 | from common import * 12 | 13 | import logging 14 | logger = logging.getLogger() 15 | logger.setLevel(getattr(logging, os.getenv('LOG_LEVEL', default='INFO'))) 16 | logging.getLogger('botocore').setLevel(logging.WARNING) 17 | logging.getLogger('boto3').setLevel(logging.WARNING) 18 | logging.getLogger('urllib3').setLevel(logging.WARNING) 19 | 20 | 21 | RESOURCE_PATH = "secretsmanager/secret" 22 | 23 | 24 | def lambda_handler(event, context): 25 | logger.debug("Received event: " + json.dumps(event, sort_keys=True)) 26 | message = json.loads(event['Records'][0]['Sns']['Message']) 27 | logger.info("Received message: " + json.dumps(message, sort_keys=True)) 28 | 29 | try: 30 | target_account = AWSAccount(message['account_id']) 31 | for r in target_account.get_regions(): 32 | try: 33 | discover_secrets(target_account, r) 34 | except ClientError as e: 35 | # Move onto next region if we get access denied. This is probably SCPs 36 | if e.response['Error']['Code'] == 'AccessDeniedException': 37 | logger.error(f"AccessDeniedException for region {r} in function {context.function_name} for {target_account.account_name}({target_account.account_id})") 38 | continue 39 | else: 40 | raise # pass on to the next handlier 41 | 42 | except AntiopeAssumeRoleError as e: 43 | logger.error("Unable to assume role into account {}({})".format(target_account.account_name, target_account.account_id)) 44 | return() 45 | except ClientError as e: 46 | if e.response['Error']['Code'] == 'UnauthorizedOperation': 47 | logger.error("Antiope doesn't have proper permissions to this account") 48 | return(event) 49 | logger.critical("AWS Error getting info for {}: {}".format(message['account_id'], e)) 50 | capture_error(message, context, e, "ClientError for {}: {}".format(message['account_id'], e)) 51 | raise 52 | except Exception as e: 53 | logger.critical("{}\nMessage: {}\nContext: {}".format(e, message, vars(context))) 54 | capture_error(message, context, e, "General Exception for {}: {}".format(message['account_id'], e)) 55 | raise 56 | 57 | 58 | def discover_secrets(target_account, region): 59 | '''Iterate across all regions to discover Cloudsecrets''' 60 | 61 | try: 62 | secrets = [] 63 | client = target_account.get_client('secretsmanager', region=region) 64 | response = client.list_secrets() 65 | while 'NextToken' in response: # Gotta Catch 'em all! 66 | secrets += response['SecretList'] 67 | response = client.list_secrets(NextToken=response['NextToken']) 68 | secrets += response['SecretList'] 69 | 70 | for s in secrets: 71 | process_secret(client, s, target_account, region) 72 | 73 | except EndpointConnectionError as e: 74 | logger.info("Region {} not supported".format(region)) 75 | 76 | 77 | def process_secret(client, secret, target_account, region): 78 | resource_item = {} 79 | resource_item['awsAccountId'] = target_account.account_id 80 | resource_item['awsAccountName'] = target_account.account_name 81 | resource_item['resourceType'] = "AWS::SecretsManager::Secret" 82 | resource_item['source'] = "Antiope" 83 | resource_item['awsRegion'] = region 84 | resource_item['configurationItemCaptureTime'] = str(datetime.datetime.now()) 85 | resource_item['configuration'] = secret 86 | resource_item['supplementaryConfiguration'] = {} 87 | resource_item['resourceId'] = "{}-{}-{}".format(target_account.account_id, region, secret['Name'].replace("/", "-")) 88 | resource_item['resourceName'] = secret['Name'] 89 | resource_item['errors'] = {} 90 | resource_item['ARN'] = secret['ARN'] 91 | 92 | try: 93 | response = client.get_resource_policy(SecretId=secret['ARN']) 94 | if 'ResourcePolicy' in response: 95 | resource_item['supplementaryConfiguration']['ResourcePolicy'] = json.loads(response['ResourcePolicy']) 96 | except ClientError as e: 97 | if e.response['Error']['Code'] == "AccessDeniedException": 98 | resource_item['errors']['ResourcePolicy'] = e.response['Error']['Message'] 99 | else: 100 | raise 101 | 102 | if 'Tags' in secret: 103 | resource_item['tags'] = parse_tags(secret['Tags']) 104 | 105 | save_resource_to_s3(RESOURCE_PATH, resource_item['resourceId'], resource_item) 106 | -------------------------------------------------------------------------------- /aws-inventory/lambda/inventory-ssm.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | from botocore.exceptions import ClientError 3 | import json 4 | import os 5 | import time 6 | from datetime import datetime, timezone 7 | from dateutil import tz 8 | 9 | from antiope.aws_account import * 10 | from common import * 11 | 12 | import logging 13 | logger = logging.getLogger() 14 | logger.setLevel(getattr(logging, os.getenv('LOG_LEVEL', default='INFO'))) 15 | logging.getLogger('botocore').setLevel(logging.WARNING) 16 | logging.getLogger('boto3').setLevel(logging.WARNING) 17 | logging.getLogger('urllib3').setLevel(logging.WARNING) 18 | 19 | INSTANCE_RESOURCE_PATH = "ssm/managedinstance" 20 | 21 | 22 | def lambda_handler(event, context): 23 | logger.debug("Received event: " + json.dumps(event, sort_keys=True)) 24 | message = json.loads(event['Records'][0]['Sns']['Message']) 25 | logger.info("Received message: " + json.dumps(message, sort_keys=True)) 26 | 27 | try: 28 | target_account = AWSAccount(message['account_id']) 29 | 30 | regions = target_account.get_regions() 31 | if 'region' in message: 32 | regions = [message['region']] 33 | 34 | # describe ec2 instances 35 | for r in regions: 36 | try: 37 | client = target_account.get_client('ssm', region=r) 38 | process_instances(target_account, client, r) 39 | except ClientError as e: 40 | # Move onto next region if we get access denied. This is probably SCPs 41 | if e.response['Error']['Code'] == 'AccessDeniedException': 42 | logger.error(f"AccessDeniedException for region {r} in function {context.function_name} for {target_account.account_name}({target_account.account_id})") 43 | continue 44 | else: 45 | raise # pass on to the next handlier 46 | 47 | except AntiopeAssumeRoleError as e: 48 | logger.error("Unable to assume role into account {}({})".format(target_account.account_name, target_account.account_id)) 49 | return() 50 | except ClientError as e: 51 | if e.response['Error']['Code'] == 'UnauthorizedOperation': 52 | logger.error("Antiope doesn't have proper permissions to this account") 53 | return(event) 54 | logger.critical("AWS Error getting info for {}: {}".format(message['account_id'], e)) 55 | capture_error(message, context, e, "ClientError for {}: {}".format(message['account_id'], e)) 56 | raise 57 | except Exception as e: 58 | logger.critical("{}\nMessage: {}\nContext: {}".format(e, message, vars(context))) 59 | capture_error(message, context, e, "General Exception for {}: {}".format(message['account_id'], e)) 60 | raise 61 | 62 | 63 | def process_instances(target_account, client, region): 64 | 65 | instances = get_all_instances(client) 66 | logger.info("Found {} managed instances for {} in {}".format(len(instances), target_account.account_id, region)) 67 | 68 | # dump info about instances to S3 as json 69 | for instance in instances: 70 | resource_item = {} 71 | resource_item['awsAccountId'] = target_account.account_id 72 | resource_item['awsAccountName'] = target_account.account_name 73 | resource_item['resourceType'] = "AWS::SSM::ManagedInstanceInventory" 74 | resource_item['source'] = "Antiope" 75 | resource_item['configurationItemCaptureTime'] = str(datetime.datetime.now()) 76 | resource_item['awsRegion'] = region 77 | resource_item['configuration'] = instance 78 | resource_item['supplementaryConfiguration'] = {} 79 | resource_item['resourceId'] = instance['InstanceId'] 80 | resource_item['errors'] = {} 81 | save_resource_to_s3(INSTANCE_RESOURCE_PATH, resource_item['resourceId'], resource_item) 82 | 83 | 84 | def get_all_instances(client): 85 | output = [] 86 | response = client.describe_instance_information() 87 | while 'NextToken' in response: 88 | output += response['InstanceInformationList'] 89 | response = client.describe_instance_information(NextToken=response['NextToken']) 90 | output += response['InstanceInformationList'] 91 | return(output) 92 | -------------------------------------------------------------------------------- /aws-inventory/lambda/inventory-support-cases.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | from botocore.exceptions import ClientError 3 | import json 4 | import os 5 | import time 6 | from datetime import datetime, timezone 7 | from dateutil import tz 8 | 9 | from antiope.aws_account import * 10 | from common import * 11 | 12 | import logging 13 | logger = logging.getLogger() 14 | logger.setLevel(getattr(logging, os.getenv('LOG_LEVEL', default='INFO'))) 15 | logging.getLogger('botocore').setLevel(logging.WARNING) 16 | logging.getLogger('boto3').setLevel(logging.WARNING) 17 | logging.getLogger('urllib3').setLevel(logging.WARNING) 18 | 19 | RESOURCE_PATH = "support/case" 20 | 21 | 22 | def lambda_handler(event, context): 23 | logger.debug("Received event: " + json.dumps(event, sort_keys=True)) 24 | message = json.loads(event['Records'][0]['Sns']['Message']) 25 | logger.info("Received message: " + json.dumps(message, sort_keys=True)) 26 | 27 | get_all = False 28 | if 'get-all-support-cases' in message: 29 | get_all = True 30 | 31 | try: 32 | target_account = AWSAccount(message['account_id']) 33 | support_client = target_account.get_client('support', region="us-east-1") # Support API is in us-east-1 only 34 | cases = get_cases(target_account, support_client, get_all) 35 | except AntiopeAssumeRoleError as e: 36 | logger.error("Unable to assume role into account {}({})".format(target_account.account_name, target_account.account_id)) 37 | return() 38 | except ClientError as e: 39 | if e.response['Error']['Code'] == "SubscriptionRequiredException": 40 | logger.error("Premium support is not enabled in {}({})".format(target_account.account_name, target_account.account_id)) 41 | capture_error(message, context, e, "Premium support is not enabled in {}({})".format(target_account.account_name, target_account.account_id)) 42 | return() 43 | else: 44 | logger.critical("AWS Error getting info for {}: {}".format(target_account.account_name, e)) 45 | capture_error(message, context, e, "ClientError for {}: {}".format(message['account_id'], e)) 46 | raise 47 | except Exception as e: 48 | logger.critical("{}\nMessage: {}\nContext: {}".format(e, message, vars(context))) 49 | capture_error(message, context, e, "General Exception for {}: {}".format(message['account_id'], e)) 50 | raise 51 | 52 | 53 | def get_cases(target_account, client, get_all): 54 | '''Get a List of all the trusted advisor cases, return those that match CATEGORIES''' 55 | cases = [] 56 | response = client.describe_cases(includeResolvedCases=get_all) 57 | while 'NextToken' in response: 58 | for c in response['cases']: 59 | process_case(target_account, client, c) 60 | response = client.describe_cases(includeResolvedCases=get_all, NextToken=response['NextToken']) 61 | for c in response['cases']: 62 | process_case(target_account, client, c) 63 | 64 | 65 | def process_case(target_account, client, c): 66 | '''Get the check results for each check''' 67 | 68 | resource_item = {} 69 | resource_item['awsAccountId'] = target_account.account_id 70 | resource_item['awsAccountName'] = target_account.account_name 71 | resource_item['resourceType'] = "AWS::Support::Case" 72 | resource_item['source'] = "Antiope" 73 | 74 | resource_item['configurationItemCaptureTime'] = str(datetime.datetime.now()) 75 | resource_item['configuration'] = c 76 | resource_item['supplementaryConfiguration'] = {} 77 | resource_item['resourceId'] = c['caseId'] 78 | resource_item['resourceName'] = c['displayId'] 79 | resource_item['errors'] = {} 80 | 81 | save_resource_to_s3(RESOURCE_PATH, f"{target_account.account_id}-{c['caseId']}", resource_item) 82 | -------------------------------------------------------------------------------- /aws-inventory/lambda/new_account_handler.py: -------------------------------------------------------------------------------- 1 | 2 | import boto3 3 | from botocore.exceptions import ClientError 4 | from boto3.dynamodb.types import TypeDeserializer 5 | import json 6 | import os 7 | import time 8 | from datetime import datetime, timezone 9 | from dateutil import tz 10 | 11 | from antiope.aws_account import * 12 | from common import * 13 | 14 | import logging 15 | logger = logging.getLogger() 16 | logger.setLevel(getattr(logging, os.getenv('LOG_LEVEL', default='INFO'))) 17 | logging.getLogger('botocore').setLevel(logging.WARNING) 18 | logging.getLogger('boto3').setLevel(logging.WARNING) 19 | logging.getLogger('urllib3').setLevel(logging.WARNING) 20 | 21 | 22 | def lambda_handler(event, context): 23 | logger.debug("Received event: " + json.dumps(event, sort_keys=True)) 24 | 25 | try: 26 | for record in event['Records']: 27 | if record['eventSource'] != "aws:dynamodb": 28 | next 29 | if record['eventName'] == "INSERT": 30 | ddb_record = record['dynamodb']['NewImage'] 31 | logger.debug(ddb_record) 32 | account_id = ddb_record['account_id']['S'] 33 | account_type = ddb_record['account_status']['S'] 34 | json_record = deseralize(ddb_record) 35 | if account_type == "ACTIVE": 36 | send_message(json_record, os.environ['ACTIVE_TOPIC']) 37 | elif account_type == "FOREIGN": 38 | send_message(json_record, os.environ['FOREIGN_TOPIC']) 39 | except ClientError as e: 40 | logger.critical("AWS Error for {}: {}".format(account_id, e)) 41 | capture_error(event, context, e, f"ClientError for {account_id}") 42 | raise 43 | except Exception as e: 44 | logger.critical("{}\nMessage: {}\nContext: {}".format(e, event, vars(context))) 45 | capture_error(event, context, e, f"General Exception for {account_id}") 46 | raise 47 | 48 | 49 | 50 | def send_message(record, topic): 51 | print("Sending Message: {}".format(record)) 52 | sns_client = boto3.client('sns') 53 | try: 54 | sns_client.publish( 55 | TopicArn=topic, 56 | Subject="NewAccount", 57 | Message=json.dumps(record, sort_keys=True, default=str), 58 | ) 59 | except ClientError as e: 60 | logger.error('Error publishing message: {}'.format(e)) 61 | 62 | 63 | def deseralize(ddb_record): 64 | # This is probablt a semi-dangerous hack. 65 | # https://github.com/boto/boto3/blob/e353ecc219497438b955781988ce7f5cf7efae25/boto3/dynamodb/types.py#L233 66 | ds = TypeDeserializer() 67 | output = {} 68 | for k, v in ddb_record.items(): 69 | output[k] = ds.deserialize(v) 70 | return(output) 71 | -------------------------------------------------------------------------------- /aws-inventory/lambda/report-accounts.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | from botocore.exceptions import ClientError 3 | import json 4 | import os 5 | import time 6 | import datetime 7 | from mako.template import Template 8 | 9 | from antiope.aws_account import * 10 | from antiope.config import AccountLookupError 11 | from common import * 12 | 13 | import logging 14 | logger = logging.getLogger() 15 | logger.setLevel(getattr(logging, os.getenv('LOG_LEVEL', default='INFO'))) 16 | logging.getLogger('botocore').setLevel(logging.WARNING) 17 | logging.getLogger('boto3').setLevel(logging.WARNING) 18 | logging.getLogger('urllib3').setLevel(logging.WARNING) 19 | 20 | assume_role_link = "{}" 21 | 22 | 23 | # Lambda main routine 24 | def handler(event, context): 25 | logger.info("Received event: " + json.dumps(event, sort_keys=True)) 26 | 27 | # We will make a HTML Table and a Json file with this data 28 | 29 | json_data = [] 30 | 31 | # Cache account_name for all the parent accounts 32 | payers = {} 33 | 34 | # Data to be saved to S3 and used to generate the template report 35 | json_data = {"accounts": []} 36 | 37 | # account_list.txt file comes from this 38 | account_list = [] 39 | 40 | # Get and then sort the list of accounts by name, case insensitive. 41 | active_accounts = get_active_accounts() 42 | active_accounts.sort(key=lambda x: x.account_name.lower()) 43 | 44 | for a in active_accounts: 45 | logger.info(a.account_name) 46 | 47 | # Add the account ID to this array 48 | account_list.append(str(a.account_id)) 49 | 50 | # We don't want to save the entire object's attributes. 51 | j = a.db_record.copy() 52 | 53 | try: 54 | if str(a.payer_id) in payers: 55 | j['payer_name'] = payers[str(a.payer_id)] 56 | else: 57 | payer = AWSAccount(str(a.payer_id)) 58 | j['payer_name'] = payer.account_name 59 | payers[payer.account_id] = payer.account_name 60 | except LookupError: 61 | logger.debug("Unable to find the payer in the database. Must be an orphan") 62 | j['payer_name'] = "Unknown Payer" 63 | payers[str(a.payer_id)] = "Unknown Payer" 64 | 65 | # Build the cross account role link 66 | if hasattr(a, 'cross_account_role') and a.cross_account_role is not None: 67 | j['assume_role_link'] = assume_role_link.format(a.account_id, os.environ['ROLE_NAME'], a.account_name, os.environ['ROLE_NAME']) 68 | else: 69 | j['assume_role_link'] = "No Cross Account Role" 70 | json_data['accounts'].append(j) 71 | 72 | json_data['timestamp'] = datetime.datetime.now() 73 | json_data['account_count'] = len(active_accounts) 74 | json_data['bucket'] = os.environ['INVENTORY_BUCKET'] 75 | 76 | fh = open("html_templates/account_inventory.html", "r") 77 | mako_body = fh.read() 78 | result = Template(mako_body).render(**json_data) 79 | 80 | # Save HTML and json to S3 81 | s3_client = boto3.client('s3') 82 | try: 83 | response = s3_client.put_object( 84 | # ACL='public-read', 85 | Body=result, 86 | Bucket=os.environ['INVENTORY_BUCKET'], 87 | ContentType='text/html', 88 | Key='Reports/account_inventory.html', 89 | ) 90 | 91 | # Save a txt file of all the active account IDs 92 | response = s3_client.put_object( 93 | # ACL='public-read', 94 | Body="\n".join(account_list), 95 | Bucket=os.environ['INVENTORY_BUCKET'], 96 | ContentType='text/plain', 97 | Key='Reports/account_list.txt', 98 | ) 99 | 100 | # Save the JSON to S3 101 | response = s3_client.put_object( 102 | # ACL='public-read', 103 | Body=json.dumps(json_data, sort_keys=True, indent=2, default=str), 104 | Bucket=os.environ['INVENTORY_BUCKET'], 105 | ContentType='application/json', 106 | Key='Reports/account_inventory.json', 107 | ) 108 | except ClientError as e: 109 | logger.error("ClientError saving report: {}".format(e)) 110 | raise 111 | 112 | return(event) 113 | -------------------------------------------------------------------------------- /aws-inventory/lambda/report-foreign.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | from botocore.exceptions import ClientError 3 | import json 4 | import os 5 | import time 6 | import datetime 7 | from mako.template import Template 8 | 9 | from antiope.foreign_aws_account import * 10 | from antiope.aws_account import * 11 | from common import * 12 | 13 | import logging 14 | logger = logging.getLogger() 15 | logger.setLevel(getattr(logging, os.getenv('LOG_LEVEL', default='INFO'))) 16 | logging.getLogger('botocore').setLevel(logging.WARNING) 17 | logging.getLogger('boto3').setLevel(logging.WARNING) 18 | logging.getLogger('urllib3').setLevel(logging.WARNING) 19 | 20 | assume_role_link = "{}" 21 | 22 | 23 | # Lambda main routine 24 | def handler(event, context): 25 | logger.info("Received event: " + json.dumps(event, sort_keys=True)) 26 | 27 | # We will make a HTML Table and a Json file with this data 28 | 29 | json_data = [] 30 | 31 | # Cache account_name for all the parent accounts 32 | payers = {} 33 | 34 | # Data to be saved to S3 and used to generate the template report 35 | json_data = {"accounts": []} 36 | 37 | # Get and then sort the list of accounts by name, case insensitive. 38 | active_accounts = get_foreign_accounts() 39 | active_accounts.sort(key=lambda x: x.account_name.lower()) 40 | 41 | for a in active_accounts: 42 | logger.info(a.account_name) 43 | 44 | # We don't want to save the entire object's attributes. 45 | j = a.db_record.copy() 46 | if 'ami_source' not in j: 47 | j['ami_source'] = False 48 | 49 | # Build the cross account role link 50 | json_data['accounts'].append(j) 51 | 52 | json_data['timestamp'] = datetime.datetime.now() 53 | json_data['account_count'] = len(active_accounts) 54 | json_data['bucket'] = os.environ['INVENTORY_BUCKET'] 55 | 56 | # Render the Webpage 57 | fh = open("html_templates/foreign_inventory.html", "r") 58 | mako_body = fh.read() 59 | result = Template(mako_body).render(**json_data) 60 | 61 | # Save HTML and json to S3 62 | s3_client = boto3.client('s3') 63 | try: 64 | response = s3_client.put_object( 65 | # ACL='public-read', 66 | Body=result, 67 | Bucket=os.environ['INVENTORY_BUCKET'], 68 | ContentType='text/html', 69 | Key='Reports/foreign_inventory.html', 70 | ) 71 | 72 | # Save the JSON to S3 73 | response = s3_client.put_object( 74 | # ACL='public-read', 75 | Body=json.dumps(json_data, sort_keys=True, indent=2, default=str), 76 | Bucket=os.environ['INVENTORY_BUCKET'], 77 | ContentType='application/json', 78 | Key='Reports/foreign_inventory.json', 79 | ) 80 | except ClientError as e: 81 | logger.error("ClientError saving report: {}".format(e)) 82 | raise 83 | 84 | return(event) 85 | -------------------------------------------------------------------------------- /aws-inventory/lambda/report-vpcs.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | from botocore.exceptions import ClientError 3 | import json 4 | import os 5 | import time 6 | import datetime 7 | from mako.template import Template 8 | 9 | from antiope.aws_account import * 10 | from antiope.vpc import * 11 | from common import * 12 | 13 | import logging 14 | logger = logging.getLogger() 15 | logger.setLevel(getattr(logging, os.getenv('LOG_LEVEL', default='INFO'))) 16 | logging.getLogger('botocore').setLevel(logging.WARNING) 17 | logging.getLogger('boto3').setLevel(logging.WARNING) 18 | logging.getLogger('urllib3').setLevel(logging.WARNING) 19 | 20 | 21 | # Lambda main routine 22 | def handler(event, context): 23 | set_debug(event, logger) 24 | logger.debug("Received event: " + json.dumps(event, sort_keys=True)) 25 | 26 | # We will make a HTML Table and a Json file with this data 27 | json_data = {"vpcs": []} 28 | 29 | # Get and then sort the list of accounts by name, case insensitive. 30 | active_accounts = get_active_accounts() 31 | active_accounts.sort(key=lambda x: x.account_name.lower()) 32 | 33 | for a in active_accounts: 34 | logger.debug(a.account_name) 35 | 36 | for v in a.get_vpcs(): 37 | logger.debug(f"\tv.vpc_id") 38 | j = {} 39 | j['vpc'] = v.db_record.copy() 40 | j['account'] = a.db_record.copy() 41 | 42 | # Skip VPCs that have nothing in them 43 | if not hasattr(v, "instance_states"): 44 | continue 45 | if v.instance_states['running'] == 0 and v.instance_states['stopped'] == 0: 46 | continue 47 | 48 | j['instance_states'] = v.instance_states 49 | json_data['vpcs'].append(j) 50 | 51 | # Add some summary data for the Template 52 | json_data['timestamp'] = datetime.datetime.now() 53 | json_data['vpc_count'] = len(json_data['vpcs']) 54 | json_data['account_count'] = len(active_accounts) 55 | json_data['bucket'] = os.environ['INVENTORY_BUCKET'] 56 | 57 | # Render the Webpage 58 | fh = open("html_templates/vpc_inventory.html", "r") 59 | mako_body = fh.read() 60 | result = Template(mako_body).render(**json_data) 61 | 62 | # Save HTML and json to S3 63 | s3_client = boto3.client('s3') 64 | try: 65 | response = s3_client.put_object( 66 | # ACL='public-read', 67 | Body=result, 68 | Bucket=os.environ['INVENTORY_BUCKET'], 69 | ContentType='text/html', 70 | Key='Reports/vpc_inventory.html', 71 | ) 72 | 73 | # Save the JSON to S3 74 | response = s3_client.put_object( 75 | # ACL='public-read', 76 | Body=json.dumps(json_data, sort_keys=True, indent=2, default=str), 77 | Bucket=os.environ['INVENTORY_BUCKET'], 78 | ContentType='application/json', 79 | Key='Reports/vpc_inventory.json', 80 | ) 81 | except ClientError as e: 82 | logger.error("ClientError saving report: {}".format(e)) 83 | raise 84 | 85 | return(event) 86 | 87 | 88 | if __name__ == '__main__': 89 | 90 | # Logging idea stolen from: https://docs.python.org/3/howto/logging.html#configuring-logging 91 | # create console handler and set level to debug 92 | ch = logging.StreamHandler() 93 | ch.setLevel(logging.DEBUG) 94 | 95 | # create formatter 96 | # formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 97 | formatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s') 98 | # add formatter to ch 99 | ch.setFormatter(formatter) 100 | # add ch to logger 101 | logger.addHandler(ch) 102 | 103 | os.environ['VPC_TABLE'] = "turner-antiope-dev-aws-inventory-vpc-inventory" 104 | os.environ['ACCOUNT_TABLE'] = "turner-antiope-dev-aws-inventory-accounts" 105 | os.environ['INVENTORY_BUCKET'] = "turner-antiope-dev" 106 | os.environ['ROLE_NAME'] = "GTO-ISO-Audit" 107 | os.environ['ROLE_SESSION_NAME'] = "Antiope" 108 | 109 | handler({}, {}) 110 | -------------------------------------------------------------------------------- /aws-inventory/lambda/requirements.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /aws-inventory/lambda/setup.cfg: -------------------------------------------------------------------------------- 1 | [install] 2 | prefix= 3 | -------------------------------------------------------------------------------- /aws-inventory/lambda/trigger_account_actions.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | from botocore.exceptions import ClientError 3 | import json 4 | import os 5 | import time 6 | 7 | import logging 8 | logger = logging.getLogger() 9 | logger.setLevel(getattr(logging, os.getenv('LOG_LEVEL', default='INFO'))) 10 | logging.getLogger('botocore').setLevel(logging.WARNING) 11 | logging.getLogger('boto3').setLevel(logging.WARNING) 12 | logging.getLogger('urllib3').setLevel(logging.WARNING) 13 | 14 | hurry_up = 800.00 # after this number of seconds, stop delaying between publish and just send the rest. We want to finish before we expire. 15 | # TODO - have this function return unfinished work to the step function for another pass. 16 | 17 | # Increase this number to shorten the interval between SNS Publish calls. 18 | # The last digit of the account_id is divided by this number to create the number of seconds of delay. 19 | accel_factor = int(os.environ['ACCEL_FACTOR']) 20 | 21 | 22 | # Lambda main routine 23 | def handler(event, context): 24 | logger.info("Received event: " + json.dumps(event, sort_keys=True)) 25 | 26 | client = boto3.client('sns') 27 | 28 | start_time = time.time() 29 | 30 | for account_id in event['account_list']: 31 | 32 | message = event.copy() 33 | del(message['account_list']) # Don't need to send this along to each lamdba 34 | message['account_id'] = account_id # Which account to process 35 | 36 | # Sleep between 0 and 9 seconds before sending the message. 37 | if 'nowait' in event and event['nowait'] is True: 38 | response = client.publish( 39 | TopicArn=os.environ['TRIGGER_ACCOUNT_INVENTORY_ARN'], 40 | Message=json.dumps(message) 41 | ) 42 | else: 43 | # if we've still got more than hurry_up time left, do the delay. 44 | logger.debug(f"{time.time()} - {start_time} < {hurry_up}") 45 | if time.time() - start_time < hurry_up: 46 | delay = int(message['account_id'][-1:]) / accel_factor 47 | logger.debug(f"Delaying {delay} sec for account {account_id}") 48 | time.sleep(delay) 49 | logger.debug(f"Publishing for {account_id}") 50 | response = client.publish( 51 | TopicArn=os.environ['TRIGGER_ACCOUNT_INVENTORY_ARN'], 52 | Message=json.dumps(message) 53 | ) 54 | 55 | return(event) 56 | 57 | # end handler() 58 | 59 | ############################################## 60 | -------------------------------------------------------------------------------- /aws-inventory/test-events/test_event.json: -------------------------------------------------------------------------------- 1 | { 2 | "Records": [ 3 | { 4 | "EventSource": "aws:sns", 5 | "EventSubscriptionArn": "arn:aws:sns:us-west-2:123456789012:PREFIX-ENV-aws-inventory-TriggerAccountInventoryFunctionTopic-FNORD:globldygook", 6 | "EventVersion": "1.0", 7 | "Sns": { 8 | "Message": "{\"account_id\": \"ACCOUNT_ID_GOES_HERE\"}", 9 | "MessageAttributes": {}, 10 | "SignatureVersion": "1", 11 | "SigningCertUrl": "https://sns.us-west-2.amazonaws.com/SimpleNotificationService-ac565b8b1a6c5d002d285f9598aa1d9b.pem", 12 | "Subject": null, 13 | "Timestamp": "2019-01-08T16:46:19.869Z", 14 | "TopicArn": "arn:aws:sns:us-west-2:123456789012:PREFIX-ENV-aws-inventory-TriggerAccountInventoryFunctionTopic-FNORD", 15 | "Type": "Notification", 16 | "UnsubscribeUrl": "https://sns.us-west-2.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-west-2:123456789012:PREFIX-ENV-aws-inventory-TriggerAccountInventoryFunctionTopic-FNORD:globldygook" 17 | } 18 | } 19 | ] 20 | } -------------------------------------------------------------------------------- /bin/ddb_backups.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | PREFIX=$1 4 | 5 | if [ -z $PREFIX ] ; then 6 | echo "Usage: $0 " 7 | exit 1 8 | fi 9 | 10 | DATE=`date +%Y-%m-%d-%H%M` 11 | 12 | TABLES=`aws dynamodb list-tables --output text | awk '{print $NF}' | grep $PREFIX` 13 | 14 | for t in $TABLES ; do 15 | BACKUPNAME="${t}-${DATE}" 16 | aws dynamodb create-backup --table-name $t --backup-name $BACKUPNAME 17 | done -------------------------------------------------------------------------------- /bin/sync_resources.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | BUCKET=$1 5 | if [ -z $BUCKET ] ; then 6 | echo "Must specify bucket name" 7 | exit 1 8 | fi 9 | 10 | 11 | aws s3 sync s3://$BUCKET/Resources/ Resources 12 | 13 | open Resources -------------------------------------------------------------------------------- /bin/trigger_inventory.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | RESOURCEID="MasterStateMachine" 4 | 5 | # if [ ! -x jq ] ; then 6 | # echo "jq not installed or not in path" 7 | # exit 1 8 | # fi 9 | 10 | STACKNAME=$1 11 | EVENT=$2 12 | if [ -z $STACKNAME ] ; then 13 | echo "Must specify STACKNAME" 14 | exit 1 15 | fi 16 | 17 | # If the user didn't pass in an event file, then get the payer list via the stack's parameters (using this ugly jq command) 18 | if [ -z $EVENT ] ; then 19 | EVENT="${STACKNAME}-test-event.json" # file to save the event as 20 | EVENTJSON=`aws cloudformation describe-stacks --stack-name ${STACKNAME} | jq -r '.Stacks[].Parameters[]|select(.ParameterKey=="pEventJson").ParameterValue'` 21 | if [ -z "$EVENTJSON" ] ; then 22 | echo "Didn't find the payerlist in stack ${STACKNAME}. Aborting..." 23 | exit 1 24 | fi 25 | echo "$EVENTJSON" > $EVENT 26 | elif [ ! -f $EVENT ] ; then 27 | echo "Cannot find file $EVENT. Aborting..." 28 | exit 1 29 | fi 30 | 31 | DATE=`date +%Y-%m-%d-%H-%M` 32 | STATEMACHINE_ARN=`aws cloudformation describe-stack-resources --stack-name ${STACKNAME} --output text | grep ${RESOURCEID} | awk '{print $3}'` 33 | if [ -z $STATEMACHINE_ARN ] ; then 34 | echo "Unable to find StateMachine Arn for Stack ${STACKNAME}. Aborting.." 35 | exit 1 36 | fi 37 | 38 | aws stepfunctions start-execution --state-machine-arn ${STATEMACHINE_ARN} --name "make-trigger-${DATE}" --input file://$EVENT 39 | -------------------------------------------------------------------------------- /cloudformation/antiope-bucket-ImportTemplate.yaml: -------------------------------------------------------------------------------- 1 | AWSTemplateFormatVersion: '2010-09-09' 2 | Description: Create and Manage the Antiope S3 Bucket (and event notifications) 3 | 4 | Parameters: 5 | 6 | pBucketName: 7 | Description: Name of the Antiope Bucket to hold all the data 8 | Type: String 9 | 10 | Resources: 11 | AntiopeBucket: 12 | Type: AWS::S3::Bucket 13 | DeletionPolicy: Retain 14 | # DependsOn: AntiopeBucketNotificationTopicPolicy 15 | Properties: 16 | AccessControl: Private 17 | BucketEncryption: 18 | ServerSideEncryptionConfiguration: 19 | - ServerSideEncryptionByDefault: 20 | SSEAlgorithm: AES256 21 | BucketName: !Ref pBucketName 22 | 23 | 24 | -------------------------------------------------------------------------------- /cloudformation/antiope-bucket-Template.yaml: -------------------------------------------------------------------------------- 1 | AWSTemplateFormatVersion: '2010-09-09' 2 | Description: Create and Manage the Antiope S3 Bucket (and event notifications) 3 | 4 | Parameters: 5 | 6 | pBucketName: 7 | Description: Name of the Antiope Bucket to hold all the data 8 | Type: String 9 | 10 | Resources: 11 | 12 | AntiopeBucket: 13 | Type: AWS::S3::Bucket 14 | DeletionPolicy: Retain 15 | DependsOn: AntiopeBucketNotificationTopicPolicy 16 | Properties: 17 | AccessControl: Private 18 | BucketEncryption: 19 | ServerSideEncryptionConfiguration: 20 | - ServerSideEncryptionByDefault: 21 | SSEAlgorithm: AES256 22 | BucketName: !Ref pBucketName 23 | # Additional Configuration options to come back and revisit. 24 | # LifecycleConfiguration: <- I don't think we'd ever want to expire resources, but maybe over time? 25 | # LoggingConfiguration: <- Probably unnecessary, but if someone needs it for compliance 26 | # MetricsConfigurations: <- Might be useful to see metrics on the primary keys of the bucket 27 | # InventoryConfiguration: <- Might be useful to pull out the Resources/ objects into a specific report 28 | NotificationConfiguration: 29 | TopicConfigurations: 30 | - Event: 's3:ObjectCreated:*' 31 | Topic: !Ref ResourceNotificationTopic 32 | Filter: 33 | S3Key: 34 | Rules: 35 | - Name: prefix 36 | Value: "Resources/" 37 | - Name: suffix 38 | Value: ".json" 39 | OwnershipControls: 40 | Rules: 41 | - ObjectOwnership: BucketOwnerPreferred 42 | PublicAccessBlockConfiguration: 43 | BlockPublicAcls: True 44 | BlockPublicPolicy: True 45 | IgnorePublicAcls: True 46 | RestrictPublicBuckets: False # This rule also prohibits Cross-Account bucket access 47 | 48 | # TODO 49 | # What Bucket Policy is needed? 50 | 51 | ResourceNotificationTopic: 52 | Type: AWS::SNS::Topic 53 | Properties: 54 | DisplayName: !Sub "Destination of PutObject calls from ${pBucketName}" 55 | TopicName: !Sub "${pBucketName}-Resources-PutObject" 56 | 57 | # This Policy can be reused for any future Topics 58 | AntiopeBucketNotificationTopicPolicy: 59 | Type: AWS::SNS::TopicPolicy 60 | Properties: 61 | Topics: 62 | - !Ref ResourceNotificationTopic 63 | PolicyDocument: 64 | Version: '2012-10-17' 65 | Id: AllowAntiopeBucket 66 | Statement: 67 | - Sid: AllowAntiopeBucketPublish 68 | Effect: Allow 69 | Principal: 70 | AWS: "*" 71 | Action: 72 | - SNS:Publish 73 | Resource: 74 | - !Ref ResourceNotificationTopic 75 | Condition: 76 | ArnLike: 77 | aws:SourceArn: !Sub "arn:aws:s3:*:*:${pBucketName}" 78 | StringEquals: 79 | aws:SourceAccount: !Ref AWS::AccountId 80 | 81 | Outputs: 82 | 83 | Bucket: 84 | Value: !Ref pBucketName 85 | Description: Antiope Bucket Name 86 | 87 | BucketArn: 88 | Value: !GetAtt AntiopeBucket.Arn 89 | Description: Antiope Bucket ARN 90 | 91 | BucketDomainName: 92 | Value: !GetAtt AntiopeBucket.DomainName 93 | Description: The IPv4 DNS name of the Antiope Bucket 94 | 95 | ResourceNotificationTopicArn: 96 | Value: !Ref ResourceNotificationTopic 97 | Description: ARN of the Topic where Resources PutObject events are Sent 98 | 99 | ResourceNotificationTopicName: 100 | Value: !GetAtt ResourceNotificationTopic.TopicName 101 | Description: Name of the Topic where Resources PutObject events are Sent -------------------------------------------------------------------------------- /cognito/Makefile: -------------------------------------------------------------------------------- 1 | 2 | include ../config-files/config.$(env) 3 | export 4 | 5 | ifndef MAIN_STACK_NAME 6 | $(error MAIN_STACK_NAME is not set) 7 | endif 8 | 9 | ifndef BUCKET 10 | $(error BUCKET is not set) 11 | endif 12 | 13 | .PHONY: $(FUNCTIONS) 14 | 15 | # Do everything 16 | post-deploy: 17 | ./post-deploy.sh 18 | 19 | # 20 | # Cloudformation Targets 21 | # 22 | 23 | 24 | 25 | templates: 26 | aws s3 sync public s3://$(BUCKET)/public 27 | 28 | api-deploy: 29 | $(eval API_ID := $(shell aws apigateway get-rest-apis --query 'items[?name==`$(COGNITO_STACK_NAME)`].id' --output text --region $(AWS_DEFAULT_REGION))) 30 | aws apigateway create-deployment --rest-api-id $(API_ID) --region $(AWS_DEFAULT_REGION) --stage-name "Reports" 31 | -------------------------------------------------------------------------------- /cognito/post-deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Execute Post-Deploy steps for Cognito Stack 4 | 5 | if [ -z $MAIN_STACK_NAME ] ; then 6 | echo "MAIN_STACK_NAME is not in the environment, is this being executed from makefile or with env config.ENV" 7 | exit 1 8 | fi 9 | 10 | if [ -z $BUCKET ] ; then 11 | echo "BUCKET is not in the environment, is this being executed from makefile or with env config.ENV" 12 | exit 1 13 | fi 14 | 15 | if [ -z $AWS_DEFAULT_REGION ] ; then 16 | echo "AWS_DEFAULT_REGION is not in the environment, is this being executed from makefile or with env config.ENV" 17 | exit 1 18 | fi 19 | 20 | export COGNITO_STACK_NAME=`aws cloudformation describe-stacks --stack-name $MAIN_STACK_NAME --query 'Stacks[0].Outputs[?OutputKey==\`CognitoStackName\`].OutputValue' --output text --region $AWS_DEFAULT_REGION` 21 | echo "Discovered Cognito Stack is $COGNITO_STACK_NAME" 22 | 23 | ID=`aws cloudformation describe-stacks --stack-name $COGNITO_STACK_NAME --query 'Stacks[0].Outputs[?OutputKey==\`CognitoUserPoolId\`].OutputValue' --output text --region $AWS_DEFAULT_REGION` 24 | if [ -z "`aws cognito-idp describe-user-pool-domain --domain $MAIN_STACK_NAME --output text --region $AWS_DEFAULT_REGION`" ] ; then 25 | echo "Configuring Cognito domain $MAIN_STACK_NAME for $ID" 26 | aws cognito-idp create-user-pool-domain --user-pool-id $ID --domain $MAIN_STACK_NAME --region $AWS_DEFAULT_REGION 27 | else 28 | echo "Cognito already configured for $ID" 29 | fi 30 | 31 | # aws cognito-idp delete-user-pool-domain --user-pool-id $ID --domain $MAIN_STACK_NAME --region us-east-1 -------------------------------------------------------------------------------- /docs/AntiopeBucket.md: -------------------------------------------------------------------------------- 1 | # Managing the Antiope Bucket 2 | 3 | 4 | ## Creating a New Antiope Bucket 5 | 6 | To create a fresh antiope bucket, leverage the CFT in [cloudformation/antiope-bucket-Template.yaml](../cloudformation/antiope-bucket-Template.yaml). 7 | 8 | Steps to deploy: 9 | 1. Generate a manifest: 10 | ```bash 11 | cft-generate-manifest -m config-files/antiope-bucket-Manifest.yaml -t cloudformation/antiope-bucket-Template.yaml 12 | ``` 13 | 2. Edit the `config-files/antiope-bucket-Manifest.yaml` and set the stackname and pBucketName 14 | 3. Deploy the CloudFormation Stack with: 15 | ```bash 16 | cft-deploy -m config-files/antiope-bucket-Manifest.yaml 17 | ``` 18 | 19 | Now you can proceed to deploy the rest of Antiope 20 | 21 | ## Importing an existing Antiope Bucket into Cloudformation 22 | 23 | If for whatever reason, you have an existing Antiope bucket you wish to use, you can use CloudFormation import to import the existing Antiope Bucket into CloudFormation, then update the bucket stack to include the other resources. 24 | 25 | CloudFormation import has some significant limitations. Not all resources _can_ be imported, and all resources in a template _must_ be imported. To work around these limitations, there is a barebones CFT that can be used to import the existing bucket into CloudFormation. Once imported, the stack can be updated to use the main template. The steps to import an existing bucket are as follows: 26 | 27 | 1. Create an import change set: 28 | ```bash 29 | aws cloudformation create-change-set --output text \ 30 | --stack-name antiope-bucket \ 31 | --change-set-name bucket-import \ 32 | --parameters ParameterKey=pBucketName,ParameterValue=REPLACE_WITH_YOUR_BUCKET_NAME \ 33 | --template-body file://cloudformation/antiope-bucket-ImportTemplate.yaml \ 34 | --change-set-type IMPORT \ 35 | --resources-to-import ResourceType=AWS::S3::Bucket,LogicalResourceId=AntiopeBucket,ResourceIdentifier={BucketName=REPLACE_WITH_YOUR_BUCKET_NAME} 36 | ``` 37 | 2. Review the change set 38 | ```bash 39 | aws cloudformation describe-change-set --change-set-name bucket-import --stack-name antiope-bucket 40 | ``` 41 | 3. Execute the change set 42 | ```bash 43 | aws cloudformation execute-change-set --change-set-name bucket-import --stack-name antiope-bucket 44 | ``` 45 | 4. Validate the new stack is in `IMPORT_COMPLETE` state 46 | 5. Now update the new stack with the full-featured template. First Generate a manifest: 47 | ```bash 48 | cft-generate-manifest -m config-files/antiope-bucket-Manifest.yaml -t cloudformation/antiope-bucket-Template.yaml 49 | ``` 50 | 6. Edit the `config-files/antiope-bucket-Manifest.yaml` and set the stackname and pBucketName to the values used for the import 51 | 7. Deploy the CloudFormation Stack with: 52 | ```bash 53 | cft-deploy -m config-files/antiope-bucket-Manifest.yaml --force 54 | ``` -------------------------------------------------------------------------------- /docs/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to Antiope 2 | 3 | * See notes on [Adding New Resources](AddingNewResources.md) 4 | * See notes on [Creating your own enterprise customizations](Customizations.md) 5 | 6 | ### Deploying for development 7 | The individual module Makefiles have many targets for ease of development. 8 | 9 | * `make install` will bundle the lambda zip file, push to S3 and create/update the cloudformation stack _and_ run the post-deploy scripts 10 | * `make deploy` will bundle the lambda zip file, push to S3 and create/update the cloudformation stack (not running the post-deploy scripts) 11 | * `make update` will bundle the lambda zip and update each function directly, bypassing CloudFormation - useful for iterative development. 12 | * `make fupdate function=` will bundle the lambda zip, and update only a single function - also useful for development 13 | * `make purge` will purge the DDB Tables. 14 | * `make clean` gets rid of the python build artifacts 15 | 16 | These targets do part of the process 17 | * `make test` validates the Python & CFT Syntax 18 | * `make package` and `make upload` create the lambda zipfile and push it to S3 19 | * In the lambda subdirectory `make deps` will pip install the requirements and bring in the library files (done prior to the lambda bundle) 20 | 21 | ### Promoting code from lower environments 22 | Once you've got functional code in your development environment, promotion to a QA or Prod environment is easy. 23 | 1. First make sure your cloudformation stacks are running the latest & greatest by running a `make deploy`. If you've run `make update` or `make fupdate` you won't be promoting the code that's been bundled by Cloudformation 24 | 2. Find the TemplateURL in the outputs of your CloudFormation stack. This is a pointer to the transformed nested stacks and packaged code. 25 | 3. Create a cft-deploy manifest for the new environment. 26 | 3. create a config.ENV for the new environment (where env is something like "qa" or "prod") 27 | 4. If you're moving across accounts, make sure your production account has bucket-policy access to your dev environment's `deploy-scripts` prefix 28 | 5. With environment credentials to the production account, run `make promote template=TEMPLATE_URL_FROM_STEP_2 env=prod` 29 | 6. Make sure to run the post deploy tasks if this is the first time deploying to prod. `make post-deploy env=prod` -------------------------------------------------------------------------------- /docs/Customizations.md: -------------------------------------------------------------------------------- 1 | # Making Enterprise customizations to Antiope 2 | 3 | Antiope is designed to be a framework and starting point for your Cloud Security Inventory, Compliance and Vulnerability needs. Your organization may have have other things you need to track and the Antiope framework _should_ help support that. 4 | 5 | ## Integration Points 6 | 7 | ### Inventory Topic 8 | There are three SNS Topic that are created by the inventory stack: 9 | 10 | * TriggerAccountInventoryFunctionTopic: All enterprise accounts get a message published to this topic during the inventory run. You can subscribe additional inventory lambda functions to this topic and they will be inventoried during the inventory pass. 11 | * TriggerPayerInventoryFunctionTopic: The same as the TriggerAccountInventoryFunctionTopic, but is run only for the various payers 12 | * NewAccountNotificationTopic: When Antiope discovers a new AWS account in your organization, a message is published to this topic. 13 | * ForeignAccountNotificationTopic: When Antiope discovers a new AWS account that is _trusted_ but not part of your organization, a message is published to this topic. 14 | 15 | ### Antiope StepFunction 16 | At the conclusion of the Inventory StepFunctions, Antiope can pass off to another custom StepFunction. Here you can create additional reports or conduct post-inventory analysis of the results. Pass the ARN of this function to the `pDeployCustomStackStateMachineArn` parameter of the main Antiope template 17 | 18 | 19 | ### SNS Messages Published to each topic 20 | 21 | TODO: Document these. 22 | 23 | 24 | ## Creating your own Enterprise Customization Stack 25 | 26 | **Note:** Do not put your Enterprise stack into the main Antiope Repo. The Enterprise stack is meant to include things that probably don't need to be open-sourced. 27 | 28 | A sample framework for a company custom stack is in `docs/sample-custom-stack` 29 | 30 | ### Deployment Order 31 | 32 | Because the customized Lambda Functions and StepFunctions tie into the existing Antiope structure, you must first deploy Antiope. You can use cft-deploy's ability to import it's parameters from other stacks to provide the references for that structure. 33 | 34 | 1. Deploy the main Antiope 35 | 2. Create manifest in the custom repo to point to the Antiope Stacks 36 | 3. Deploy the custom stack 37 | 4. Get the StepFunction ARN from the custom stack 38 | 5. Add the StepFunction ARN to the manifest file for Antiope and re-deploy. -------------------------------------------------------------------------------- /docs/cloudformation/SecurityCrossAccountRoleTemplate.yaml: -------------------------------------------------------------------------------- 1 | AWSTemplateFormatVersion: '2010-09-09' 2 | Description: This template creates a cross account role for audit from the defined Security account 3 | Parameters: 4 | 5 | SecurityAccountNumber: 6 | Description: The 12 digit AWS account number to grant access to. 7 | MinLength: '12' 8 | AllowedPattern: '[0-9]+' 9 | MaxLength: '12' 10 | Type: String 11 | 12 | RoleName: 13 | Description: The name of the Role to be created 14 | Default: 'Security-Audit' 15 | Type: String 16 | 17 | Resources: 18 | SecurityCrossAccountRole: 19 | Type: AWS::IAM::Role 20 | Properties: 21 | RoleName: !Ref RoleName 22 | ManagedPolicyArns: 23 | - arn:aws:iam::aws:policy/SecurityAudit 24 | - arn:aws:iam::aws:policy/ReadOnlyAccess 25 | Path: / 26 | AssumeRolePolicyDocument: 27 | Version: '2012-10-17' 28 | Statement: 29 | - Action: sts:AssumeRole 30 | Sid: '' 31 | Effect: Allow 32 | Principal: 33 | AWS: !Join ['', ['arn:aws:iam::', !Ref 'SecurityAccountNumber',':root']] 34 | Policies: 35 | - PolicyName: IAMSimulateResources 36 | PolicyDocument: 37 | Version: "2012-10-17" 38 | Statement: 39 | - Effect: "Allow" 40 | Action: "iam:Simulate*" 41 | Resource: "*" 42 | - PolicyName: SecurityTools 43 | PolicyDocument: 44 | Version: "2012-10-17" 45 | Statement: 46 | - Effect: "Allow" 47 | Action: 48 | - "trustedAdvisor:*" 49 | - "health:*" 50 | - "aws-portal:View*" 51 | - "acm:*" 52 | - "inspector:*" 53 | - "support:*" 54 | Resource: "*" 55 | - PolicyName: ProtectSensitiveData 56 | PolicyDocument: 57 | Version: "2012-10-17" 58 | Statement: 59 | - Sid: DenyGetObject 60 | Effect: "Deny" 61 | Action: 62 | - "s3:GetObject*" 63 | - "s3:ListMultipartUploadParts" 64 | - "s3:GetReplicationConfiguration" 65 | - "s3:ListObjects" 66 | Resource: "*" 67 | - Sid: DenyListBucketOnTaggedBuckets 68 | Effect: "Deny" 69 | Action: 70 | - "s3:ListBucket*" 71 | Resource: "*" 72 | - Sid: BlockDynamoDB 73 | Effect: "Deny" 74 | Action: 75 | - dynamodb:GetItem 76 | - dynamodb:BatchGetItem 77 | - dynamodb:Query 78 | - dynamodb:GetRecords 79 | - dynamodb:Scan 80 | Resource: "*" 81 | - PolicyName: PermitEnableGuardDuty 82 | PolicyDocument: 83 | Version: "2012-10-17" 84 | Statement: 85 | - Sid: FullGuardDuty 86 | Effect: "Allow" 87 | Action: 88 | - "guardduty:*" 89 | Resource: "*" 90 | - Sid: EnableGuardDutyRole 91 | Effect: "Allow" 92 | Action: 93 | - "iam:CreateRole" 94 | - "iam:AttachRolePolicy" 95 | - "iam:CreatePolicy" 96 | - "iam:PutRolePolicy" 97 | - "iam:PassRole" 98 | - "iam:Delete*" 99 | Resource: "*" #FIXME 100 | - Effect: "Allow" 101 | Action: "iam:CreateServiceLinkedRole" 102 | Resource: "*" 103 | Condition: 104 | StringLike: 105 | iam:AWSServiceName: "guardduty.amazonaws.com" 106 | 107 | Outputs: 108 | RoleARN: 109 | Description: The ARN of the Audit role that can be assumed by the Security account. 110 | Value: !GetAtt [SecurityCrossAccountRole, Arn] 111 | TemplateVersion: 112 | Value: 1.0.0 113 | -------------------------------------------------------------------------------- /docs/resource_types.md: -------------------------------------------------------------------------------- 1 | # Antiope Resource Type Registry 2 | 3 | In order to eventually support either Lambda or Config Service collection of data, I'm aligning the Resource Type values for both of methods. In general these also look to map to what the [resource type in CloudFormation](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html) is. 4 | 5 | ## Config Service Resource Types 6 | 7 | * AWS::ACM::Certificate (not yet inventoried by Antiope) 8 | * AWS::AutoScaling::AutoScalingGroup (not yet inventoried by Antiope) 9 | * AWS::AutoScaling::LaunchConfiguration (not yet inventoried by Antiope) 10 | * AWS::CloudFormation::Stack 11 | * AWS::CloudFront::Distribution 12 | * AWS::CloudTrail::Trail 13 | * AWS::CloudWatch::Alarm (not yet inventoried by Antiope) 14 | * AWS::CodeBuild::Project (not yet inventoried by Antiope) 15 | * AWS::CodePipeline::Pipeline (not yet inventoried by Antiope) 16 | * AWS::Config::ResourceCompliance (not yet inventoried by Antiope) 17 | * AWS::DynamoDB::Table (not yet inventoried by Antiope) 18 | * AWS::EC2::EIP 19 | * AWS::EC2::Instance 20 | * AWS::EC2::InternetGateway (not yet inventoried by Antiope) 21 | * AWS::EC2::NetworkAcl (not yet inventoried by Antiope) 22 | * AWS::EC2::NetworkInterface 23 | * AWS::EC2::RouteTable (not yet inventoried by Antiope) 24 | * AWS::EC2::SecurityGroup 25 | * AWS::EC2::Subnet (not yet inventoried by Antiope) 26 | * AWS::EC2::VPC 27 | * AWS::EC2::VPNGateway 28 | * AWS::EC2::Volume 29 | * AWS::ElasticBeanstalk::Application (not yet inventoried by Antiope) 30 | * AWS::ElasticBeanstalk::ApplicationVersion (not yet inventoried by Antiope) 31 | * AWS::ElasticBeanstalk::Environment (not yet inventoried by Antiope) 32 | * AWS::ElasticLoadBalancing::LoadBalancer 33 | * AWS::ElasticLoadBalancingV2::LoadBalancer 34 | * AWS::IAM::Group (not yet inventoried by Antiope) 35 | * AWS::IAM::Policy (not yet inventoried by Antiope) 36 | * AWS::IAM::Role 37 | * AWS::IAM::User 38 | * AWS::Lambda::Function 39 | * AWS::RDS::DBInstance (not yet inventoried by Antiope) 40 | * AWS::RDS::DBSecurityGroup (not yet inventoried by Antiope) 41 | * AWS::RDS::DBSnapshot (not yet inventoried by Antiope) 42 | * AWS::RDS::DBSubnetGroup (not yet inventoried by Antiope) 43 | * AWS::S3::Bucket 44 | 45 | 46 | ## Antiope Custom Resource Types (not supported by Config Service) 47 | 48 | * AWS::ECR::Repository 49 | * AWS::ECS::Cluster 50 | * AWS::ECS::Task 51 | * AWS::KMS::Key 52 | * AWS::Route53::Domain 53 | * AWS::Route53::HostedZone 54 | * AWS::Elasticsearch::Domain 55 | 56 | -------------------------------------------------------------------------------- /docs/sample-custom-stack/Makefile: -------------------------------------------------------------------------------- 1 | 2 | ifndef env 3 | # $(error env is not set) 4 | env ?= dev 5 | endif 6 | 7 | ifdef CONFIG 8 | include $(CONFIG) 9 | export 10 | else 11 | include config.$(env) 12 | export 13 | endif 14 | 15 | # STACK_PREFIX is custom to your deployment and should be the same for all Antiope Stacks 16 | ifndef STACK_PREFIX 17 | $(error STACK_PREFIX is not set) 18 | endif 19 | 20 | ifndef BUCKET 21 | $(error BUCKET is not set) 22 | endif 23 | 24 | 25 | ifndef version 26 | export version := $(shell date +%Y%b%d-%H%M) 27 | endif 28 | 29 | # Specific to this stack 30 | export STACK_NAME=COMPANY-customization 31 | # Filename for the CFT to deploy 32 | export STACK_TEMPLATE=cloudformation/COMPANY-Template.yaml 33 | 34 | # Name of the Zip file with all the function code and dependencies 35 | export LAMBDA_PACKAGE=$(STACK_NAME)-lambda-$(version).zip 36 | 37 | # The full name of the stack in Cloudformation. This must match the manifest file 38 | export FULL_STACK_NAME=$(STACK_PREFIX)-$(env)-$(STACK_NAME) 39 | 40 | # Name of the manifest file. 41 | export manifest=cloudformation/$(FULL_STACK_NAME)-Manifest.yaml 42 | 43 | # location in the Antiope bucket where we drop lambda-packages 44 | export OBJECT_KEY=deploy-packages/$(LAMBDA_PACKAGE) 45 | 46 | # For uploading CFT to S3 47 | export TEMPLATE_KEY ?= deploy-packages/$(STACK_NAME)-Template-$(version).yaml 48 | export TEMPLATE_URL ?= https://s3.amazonaws.com/$(BUCKET)/$(TEMPLATE_KEY) 49 | 50 | # List of all the functions deployed by this stack. Required for "make update" to work. 51 | FUNCTIONS = $(FULL_STACK_NAME)-populate-contacts \ 52 | $(FULL_STACK_NAME)-create-account-report 53 | 54 | .PHONY: $(FUNCTIONS) 55 | 56 | # Run all tests 57 | test: cfn-validate 58 | cd lambda && $(MAKE) test 59 | 60 | # Do everything 61 | deploy: package upload cfn-deploy templates 62 | 63 | clean: 64 | cd lambda && $(MAKE) clean 65 | 66 | # 67 | # Cloudformation Targets 68 | # 69 | 70 | # Upload template to S3 71 | cfn-upload: $(STACK_TEMPLATE) 72 | aws s3 cp $(STACK_TEMPLATE) s3://$(BUCKET)/$(TEMPLATE_KEY) 73 | 74 | # Validate the template 75 | cfn-validate: cfn-upload $(STACK_TEMPLATE) 76 | cft-validate --region $(AWS_DEFAULT_REGION) --s3-url s3://$(BUCKET)/$(TEMPLATE_KEY) 77 | 78 | # Deploy the stack 79 | cfn-deploy: cfn-validate $(manifest) 80 | cft-deploy -m $(manifest) --template-url $(TEMPLATE_URL) pLambdaZipFile=$(OBJECT_KEY) pBucketName=$(BUCKET) --force 81 | 82 | # 83 | # Lambda Targets 84 | # 85 | package: 86 | cd lambda && $(MAKE) package 87 | 88 | zipfile: 89 | cd lambda && $(MAKE) zipfile 90 | 91 | upload: package 92 | aws s3 cp lambda/$(LAMBDA_PACKAGE) s3://$(BUCKET)/$(OBJECT_KEY) 93 | 94 | # # Update the Lambda Code without modifying the CF Stack 95 | update: package $(FUNCTIONS) 96 | for f in $(FUNCTIONS) ; do \ 97 | aws lambda update-function-code --function-name $$f --zip-file fileb://lambda/$(LAMBDA_PACKAGE) ; \ 98 | done 99 | 100 | # Update one specific function. Called as "make fupdate function=-aws-inventory-ecs-inventory" 101 | fupdate: zipfile 102 | aws lambda update-function-code --function-name $(function) --zip-file fileb://lambda/$(LAMBDA_PACKAGE) ; \ 103 | 104 | 105 | templates: 106 | aws s3 sync html_templates s3://$(BUCKET)/Templates/ 107 | 108 | purge-logs: 109 | for f in $(FUNCTIONS) ; do \ 110 | aws logs delete-log-group --log-group-name /aws/lambda/$$f ; \ 111 | done 112 | 113 | expire-logs: 114 | for f in $(FUNCTIONS) ; do \ 115 | aws logs put-retention-policy --log-group-name /aws/lambda/$$f --retention-in-days 5 ; \ 116 | done 117 | 118 | get_stepfunction_arn: 119 | aws cloudformation describe-stacks --stack-name $(FULL_STACK_NAME) --query 'Stacks[0].Outputs[?OutputKey==`CustomProcessStateMachineArn`].OutputValue' --output text --region $(AWS_DEFAULT_REGION) 120 | 121 | 122 | -------------------------------------------------------------------------------- /docs/sample-custom-stack/cloudformation/PREFIX-ENV-COMPANY-customization-Manifest-SAMPLE.yaml: -------------------------------------------------------------------------------- 1 | # deploy_stack.rb Manifest file generated from cloudformation/COMPANY-Template.yaml on Sun Jan 20 14:24:18 EST 2019 2 | 3 | 4 | # These control how and where the cloudformation is executed 5 | StackName: PREFIX-ENV-COMPANY-customization 6 | OnFailure: DO_NOTHING # accepts DO_NOTHING, ROLLBACK, DELETE 7 | Region: us-west-2 8 | TimeOut: 15m 9 | 10 | # Paramaters: 11 | # There are two kinds of parameters, regular and sourced. 12 | # Regular parameters are static and defined in the Parameters: section of this yaml file 13 | # Sourced are parameters that cfnDeploy will go and fetch from other Stacks. 14 | # This simple Serverless app does not depend on any other stacks. However if we start using VPC based 15 | # Lambdas, or have multiple stacks that need to interact, we will want to use Sourced Parameters 16 | 17 | ########### 18 | # Parameters to the cloudformation stack that are defined manually. 19 | ########### 20 | Parameters: 21 | 22 | # Memory footprint for Lambdas processing lots of resources 23 | pLargeLambdaSize: 768 24 | 25 | # Default name of the AssumeRole to assume into accounts 26 | pRoleName: Security-Audit 27 | 28 | # Memory footprint for smaller Lambda 29 | pSmallLambdaSize: 128 30 | 31 | ########### 32 | # These stacks are needed by the SourcedParameters section 33 | ########### 34 | DependsOnStacks: 35 | - PREFIX-ENV-aws-inventory 36 | 37 | ########### 38 | # Parameters that come from other deployed stacks. 39 | # Valid Sections are Resources, Outputs Parameters 40 | # 41 | # Hint. Get your list of resources this way: 42 | # aws cloudformation describe-stack-resources --stack-name MSC-DEV-VPC-EAST-1 --output text | awk '{print $2, " ", $3, " " $5}' 43 | ########### 44 | SourcedParameters: 45 | 46 | # Name of Account Table from Antiope Inventory Stack 47 | pAccountTable: PREFIX-ENV-aws-inventory.Resources.AccountDBTable 48 | 49 | # Name of BillingData Table from Antiope Inventory Stack 50 | pBillingDataTable: PREFIX-ENV-aws-inventory.Resources.HistoricalBillingDataTable 51 | 52 | # Name of VPC Table from Antiope Inventory Stack 53 | pVPCTable: PREFIX-ENV-aws-inventory.Resources.VpcInventoryDBTable 54 | 55 | # Main Antiope Bucket Name 56 | pBucketName: PREFIX-ENV-aws-inventory.Parameters.pBucketName 57 | 58 | ########### 59 | # Tags that apply to the stack. Will be inherited by some resources. 60 | ########### 61 | Tags: 62 | Name: PREFIX-ENV-COMPANY-customization 63 | 64 | 65 | ########### 66 | # Stack Policies protect resources from accidential deletion or replacement 67 | # for the definition of stack policies see: 68 | # see http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html#stack-policy-reference 69 | ########### 70 | StackPolicy: 71 | # All other resources should be modifiable. 72 | - Resource: "*" 73 | Effect: Allow 74 | Principal: "*" 75 | Action: 76 | - "Update:Modify" 77 | - "Update:Delete" 78 | - "Update:Replace" 79 | 80 | 81 | # Preinstall script will build the zip upload the Lambda code to the S3 bucket 82 | # Anything inside a {{ }} is a stack parameter (Regular or Sourced) 83 | # PreInstallScript: | 84 | # #!/bin/bash -xe 85 | 86 | # PostInstall and PostUpdate Script. Anything inside {{ }} is replaced by an stack output 87 | # PostInstallScript: | 88 | # #!/bin/bash -xe 89 | 90 | # PostUpdateScript: | 91 | # #!/bin/bash -xe 92 | 93 | 94 | # End of Manifest 95 | -------------------------------------------------------------------------------- /docs/sample-custom-stack/lambda/Makefile: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Static, not sure if needed?? 4 | PYTHON=python3 5 | PIP=pip3 6 | 7 | FILES=PUT Your Function Files Here 8 | 9 | DEPENDENCIES=PUT Any Dependency Directories to Package Here 10 | 11 | package: test clean deps zipfile 12 | 13 | # 14 | # Lambda function management 15 | # 16 | 17 | clean: 18 | rm -rf __pycache__ *.zip *.dist-info $(DEPENDENCIES) lib 19 | 20 | # # Create the package Zip. Assumes all tests were done 21 | zipfile: $(FILES) $(DEPENDENCIES) 22 | cp -a $(ANTIOPE_PATH)/lib . 23 | zip -r $(LAMBDA_PACKAGE) $^ lib 24 | 25 | test: $(FILES) 26 | for f in $^; do $(PYTHON) -m py_compile $$f; if [ $$? -ne 0 ] ; then echo "$$f FAILS" ; exit 1; fi done 27 | 28 | deps: 29 | $(PIP) install -r requirements.txt -t . --upgrade 30 | -------------------------------------------------------------------------------- /docs/sample-custom-stack/lambda/requirements.txt: -------------------------------------------------------------------------------- 1 | PyYAML 2 | mako -------------------------------------------------------------------------------- /docs/sample-custom-stack/lambda/setup.cfg: -------------------------------------------------------------------------------- 1 | [install] 2 | prefix= 3 | -------------------------------------------------------------------------------- /get_bucket.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | while `/usr/bin/true` ; do 4 | aws s3 mb s3://warnermedia-antiope-qa 5 | if [ $? -eq 0 ] ; then 6 | exit 0 7 | fi 8 | sleep 150 9 | done 10 | -------------------------------------------------------------------------------- /lambda_layer/Makefile: -------------------------------------------------------------------------------- 1 | 2 | ifndef pythonver 3 | # $(error env is not set) 4 | pythonver ?= python3.6 5 | endif 6 | 7 | ifndef env 8 | # $(error env is not set) 9 | env ?= dev 10 | endif 11 | 12 | include ../config-files/config.$(env) 13 | export 14 | 15 | 16 | ifndef BUCKET 17 | $(error BUCKET is not set) 18 | endif 19 | 20 | ifndef version 21 | export version := $(shell date +%Y%b%d-%H%M) 22 | endif 23 | 24 | export LAYER_PACKAGE=Antiope-$(env)-aws-lambda-layer-$(version).zip 25 | export OBJECT_KEY=$(DEPLOY_PREFIX)/$(LAYER_PACKAGE) 26 | 27 | 28 | # Static, not sure if needed?? 29 | PYTHON=python3 30 | PIP=pip3 31 | 32 | layer: clean deps zipfile upload 33 | 34 | # 35 | # Lambda function management 36 | # 37 | 38 | clean: 39 | rm -rf python *.zip 40 | 41 | # # Create the package Zip. Assumes all tests were done 42 | zipfile: 43 | zip -r $(LAYER_PACKAGE) python 44 | 45 | deps: 46 | $(PIP) install -r requirements.txt -t python/lib/python3.6/site-packages/ --upgrade 47 | 48 | upload: 49 | aws s3 cp $(LAYER_PACKAGE) s3://$(BUCKET)/$(OBJECT_KEY) 50 | @echo "LAYER_URL=$(OBJECT_KEY)" >> ../config.$(env) 51 | 52 | pep8: $(FILES) 53 | pycodestyle $^ 54 | -------------------------------------------------------------------------------- /lambda_layer/requirements.txt: -------------------------------------------------------------------------------- 1 | #openpyxl==2.5.0 2 | PyYAML 3 | semver 4 | mako 5 | boto3 6 | requests 7 | requests_aws4auth 8 | elasticsearch 9 | -------------------------------------------------------------------------------- /lambda_layer/setup.cfg: -------------------------------------------------------------------------------- 1 | [install] 2 | prefix= 3 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | boto3 2 | elasticsearch 3 | python-dateutil 4 | PyYAML 5 | requests 6 | requests_aws4auth 7 | semver 8 | -------------------------------------------------------------------------------- /search-cluster/Makefile: -------------------------------------------------------------------------------- 1 | 2 | 3 | ifndef env 4 | # $(error env is not set) 5 | env ?= dev 6 | endif 7 | 8 | include ../config-files/config.$(env) 9 | export 10 | 11 | ifndef MAIN_STACK_NAME 12 | $(error MAIN_STACK_NAME is not set) 13 | endif 14 | 15 | ifndef BUCKET 16 | $(error BUCKET is not set) 17 | endif 18 | 19 | ifndef version 20 | export version := $(shell date +%Y%b%d-%H%M) 21 | endif 22 | 23 | # Specific to this stack 24 | RESOURCE_PREFIX=$(MAIN_STACK_NAME)-search-cluster 25 | 26 | # Name of the Zip file with all the function code and dependencies 27 | export LAMBDA_PACKAGE ?= $(RESOURCE_PREFIX)-lambda-$(version).zip 28 | 29 | # List of all the functions deployed by this stack. Required for "make update" to work. 30 | FUNCTIONS = $(RESOURCE_PREFIX)-ingest-s3 31 | 32 | .PHONY: $(FUNCTIONS) 33 | 34 | post-deploy: 35 | ./scripts/post-deploy.sh 36 | 37 | # 38 | # Lambda Targets 39 | # 40 | test: 41 | cd lambda && $(MAKE) test 42 | 43 | clean: 44 | cd lambda && $(MAKE) clean 45 | cd scripts && $(MAKE) clean 46 | rm -f notification_template-$(MAIN_STACK_NAME).json 47 | 48 | package: 49 | cd lambda && $(MAKE) package 50 | 51 | # # Update the Lambda Code without modifying the CF Stack 52 | update: package $(FUNCTIONS) 53 | for f in $(FUNCTIONS) ; do \ 54 | aws lambda update-function-code --region $(AWS_DEFAULT_REGION) --function-name $$f --zip-file fileb://lambda/$(LAMBDA_PACKAGE) ; \ 55 | done 56 | 57 | # Update one specific function. Called as "make fupdate function=-aws-inventory-ecs-inventory" 58 | fupdate: package 59 | aws lambda update-function-code --region $(AWS_DEFAULT_REGION) --function-name $(function) --zip-file fileb://lambda/$(LAMBDA_PACKAGE) ; \ 60 | 61 | # 62 | # House Cleaning Functions (for the Purge) 63 | # 64 | purge-logs: 65 | for f in $(FUNCTIONS) ; do \ 66 | aws --region $(AWS_DEFAULT_REGION) logs delete-log-group --log-group-name /aws/lambda/$$f ; \ 67 | done 68 | 69 | 70 | -------------------------------------------------------------------------------- /search-cluster/lambda/Makefile: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Static, not sure if needed?? 4 | PYTHON=python3 5 | PIP=pip3 6 | 7 | FILES=ingest_s3.py 8 | 9 | DEPENDENCIES= 10 | 11 | package: test clean deps zipfile 12 | 13 | # 14 | # Lambda function management 15 | # 16 | 17 | clean: 18 | rm -rf __pycache__ *.zip *.dist-info HISTORY.rst bin README.rst LICENSE NOTICE bin $(DEPENDENCIES) 19 | 20 | 21 | # # Create the package Zip. Assumes all tests were done 22 | zipfile: $(FILES) $(DEPENDENCIES) 23 | zip -r $(LAMBDA_PACKAGE) $^ 24 | 25 | test: $(FILES) 26 | for f in $^; do $(PYTHON) -m py_compile $$f; if [ $$? -ne 0 ] ; then echo "$$f FAILS" ; exit 1; fi done 27 | 28 | deps: 29 | $(PIP) install -r requirements.txt -t . --upgrade 30 | 31 | pep8: $(FILES) 32 | pycodestyle $^ 33 | -------------------------------------------------------------------------------- /search-cluster/lambda/requirements.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/turnerlabs/antiope/ca141dbc467168902b96250e3bb054ced24b4ca8/search-cluster/lambda/requirements.txt -------------------------------------------------------------------------------- /search-cluster/lambda/setup.cfg: -------------------------------------------------------------------------------- 1 | [install] 2 | prefix= 3 | -------------------------------------------------------------------------------- /search-cluster/mappings-v7/default.json: -------------------------------------------------------------------------------- 1 | { 2 | "mappings": { 3 | "properties": { 4 | "ARN": { 5 | "fields": { 6 | "keyword": { 7 | "ignore_above": 256, 8 | "type": "keyword" 9 | } 10 | }, 11 | "type": "keyword" 12 | }, 13 | "awsAccountId": { 14 | "fields": { 15 | "keyword": { 16 | "ignore_above": 13, 17 | "type": "keyword" 18 | } 19 | }, 20 | "type": "keyword" 21 | }, 22 | "awsAccountName": { 23 | "fields": { 24 | "keyword": { 25 | "ignore_above": 256, 26 | "type": "keyword" 27 | } 28 | }, 29 | "type": "keyword" 30 | }, 31 | "awsRegion": { 32 | "fields": { 33 | "keyword": { 34 | "ignore_above": 20, 35 | "type": "keyword" 36 | } 37 | }, 38 | "type": "keyword" 39 | }, 40 | "configuration": { 41 | "type": "object" 42 | }, 43 | "configurationItemCaptureTime": { 44 | "format": "yyyy-MM-dd HH:mm:ss.SSSSSS", 45 | "type": "date" 46 | }, 47 | "errors": { 48 | "type": "object" 49 | }, 50 | "resourceId": { 51 | "fields": { 52 | "keyword": { 53 | "ignore_above": 256, 54 | "type": "keyword" 55 | } 56 | }, 57 | "type": "keyword" 58 | }, 59 | "resourceName": { 60 | "fields": { 61 | "keyword": { 62 | "ignore_above": 256, 63 | "type": "keyword" 64 | } 65 | }, 66 | "type": "text" 67 | }, 68 | "resourceType": { 69 | "fields": { 70 | "keyword": { 71 | "ignore_above": 256, 72 | "type": "keyword" 73 | } 74 | }, 75 | "type": "keyword" 76 | }, 77 | "source": { 78 | "fields": { 79 | "keyword": { 80 | "ignore_above": 256, 81 | "type": "keyword" 82 | } 83 | }, 84 | "type": "keyword" 85 | }, 86 | "supplementaryConfiguration": { 87 | "type": "object" 88 | }, 89 | "tags": { 90 | "type": "object", 91 | "enabled": false 92 | } 93 | }, 94 | "_meta": { 95 | "antiope_mapping_version": "2019-03-27" 96 | } 97 | } 98 | 99 | } 100 | -------------------------------------------------------------------------------- /search-cluster/mappings-v7/resources_cloudformation_stack.json: -------------------------------------------------------------------------------- 1 | { 2 | "mappings": { 3 | "properties": { 4 | "ARN": { 5 | "fields": { 6 | "keyword": { 7 | "ignore_above": 256, 8 | "type": "keyword" 9 | } 10 | }, 11 | "type": "keyword" 12 | }, 13 | "awsAccountId": { 14 | "fields": { 15 | "keyword": { 16 | "ignore_above": 13, 17 | "type": "keyword" 18 | } 19 | }, 20 | "type": "keyword" 21 | }, 22 | "awsAccountName": { 23 | "fields": { 24 | "keyword": { 25 | "ignore_above": 256, 26 | "type": "keyword" 27 | } 28 | }, 29 | "type": "keyword" 30 | }, 31 | "awsRegion": { 32 | "fields": { 33 | "keyword": { 34 | "ignore_above": 20, 35 | "type": "keyword" 36 | } 37 | }, 38 | "type": "keyword" 39 | }, 40 | "configuration": { 41 | "type": "object", 42 | "properties": { 43 | "Outputs": { 44 | "type": "nested", 45 | "include_in_parent": true, 46 | "properties": { 47 | "OutputKey": {"type": "keyword"}, 48 | "OutputValue": {"type": "keyword"} 49 | } 50 | }, 51 | "Parameters": { 52 | "type": "nested", 53 | "include_in_parent": true, 54 | "properties": { 55 | "ParameterKey": {"type": "keyword"}, 56 | "ParameterValue": {"type": "keyword"} 57 | } 58 | } 59 | } 60 | }, 61 | "configurationItemCaptureTime": { 62 | "format": "yyyy-MM-dd HH:mm:ss.SSSSSS", 63 | "type": "date" 64 | }, 65 | "errors": { 66 | "type": "object" 67 | }, 68 | "resourceId": { 69 | "fields": { 70 | "keyword": { 71 | "ignore_above": 256, 72 | "type": "keyword" 73 | } 74 | }, 75 | "type": "keyword" 76 | }, 77 | "resourceName": { 78 | "fields": { 79 | "keyword": { 80 | "ignore_above": 256, 81 | "type": "keyword" 82 | } 83 | }, 84 | "type": "text" 85 | }, 86 | "resourceType": { 87 | "fields": { 88 | "keyword": { 89 | "ignore_above": 256, 90 | "type": "keyword" 91 | } 92 | }, 93 | "type": "keyword" 94 | }, 95 | "source": { 96 | "fields": { 97 | "keyword": { 98 | "ignore_above": 256, 99 | "type": "keyword" 100 | } 101 | }, 102 | "type": "keyword" 103 | }, 104 | "supplementaryConfiguration": { 105 | "type": "object" 106 | }, 107 | "tags": { 108 | "type": "object", 109 | "enabled": false 110 | } 111 | }, 112 | "_meta": { 113 | "antiope_mapping_version": "2019-03-27" 114 | } 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /search-cluster/mappings-v7/resources_cloudtrail_trail.json: -------------------------------------------------------------------------------- 1 | { 2 | "settings": { 3 | "index.mapping.ignore_malformed": true 4 | }, 5 | "mappings": { 6 | "properties": { 7 | "ARN": { 8 | "fields": { 9 | "keyword": { 10 | "ignore_above": 256, 11 | "type": "keyword" 12 | } 13 | }, 14 | "type": "keyword" 15 | }, 16 | "awsAccountId": { 17 | "fields": { 18 | "keyword": { 19 | "ignore_above": 13, 20 | "type": "keyword" 21 | } 22 | }, 23 | "type": "keyword" 24 | }, 25 | "awsAccountName": { 26 | "fields": { 27 | "keyword": { 28 | "ignore_above": 256, 29 | "type": "keyword" 30 | } 31 | }, 32 | "type": "keyword" 33 | }, 34 | "awsRegion": { 35 | "fields": { 36 | "keyword": { 37 | "ignore_above": 20, 38 | "type": "keyword" 39 | } 40 | }, 41 | "type": "keyword" 42 | }, 43 | "configuration": { 44 | "type": "object" 45 | }, 46 | "configurationItemCaptureTime": { 47 | "format": "yyyy-MM-dd HH:mm:ss.SSSSSS", 48 | "type": "date" 49 | }, 50 | "errors": { 51 | "type": "object" 52 | }, 53 | "resourceId": { 54 | "fields": { 55 | "keyword": { 56 | "ignore_above": 256, 57 | "type": "keyword" 58 | } 59 | }, 60 | "type": "keyword" 61 | }, 62 | "resourceName": { 63 | "fields": { 64 | "keyword": { 65 | "ignore_above": 256, 66 | "type": "keyword" 67 | } 68 | }, 69 | "type": "text" 70 | }, 71 | "resourceType": { 72 | "fields": { 73 | "keyword": { 74 | "ignore_above": 256, 75 | "type": "keyword" 76 | } 77 | }, 78 | "type": "keyword" 79 | }, 80 | "source": { 81 | "fields": { 82 | "keyword": { 83 | "ignore_above": 256, 84 | "type": "keyword" 85 | } 86 | }, 87 | "type": "keyword" 88 | }, 89 | "supplementaryConfiguration": { 90 | "type": "object", 91 | "properties": { 92 | "EventSelectors": { 93 | "type": "nested", 94 | "include_in_parent": true 95 | } 96 | } 97 | }, 98 | "tags": { 99 | "type": "object", 100 | "enabled": false 101 | } 102 | }, 103 | "_meta": { 104 | "antiope_mapping_version": "2019-03-27" 105 | } 106 | } 107 | } 108 | -------------------------------------------------------------------------------- /search-cluster/mappings-v7/resources_ec2_vpc.json: -------------------------------------------------------------------------------- 1 | { 2 | "mappings": { 3 | "properties": { 4 | "ARN": { 5 | "fields": { 6 | "keyword": { 7 | "ignore_above": 256, 8 | "type": "keyword" 9 | } 10 | }, 11 | "type": "keyword" 12 | }, 13 | "awsAccountId": { 14 | "fields": { 15 | "keyword": { 16 | "ignore_above": 13, 17 | "type": "keyword" 18 | } 19 | }, 20 | "type": "keyword" 21 | }, 22 | "awsAccountName": { 23 | "fields": { 24 | "keyword": { 25 | "ignore_above": 256, 26 | "type": "keyword" 27 | } 28 | }, 29 | "type": "keyword" 30 | }, 31 | "awsRegion": { 32 | "fields": { 33 | "keyword": { 34 | "ignore_above": 20, 35 | "type": "keyword" 36 | } 37 | }, 38 | "type": "keyword" 39 | }, 40 | "configuration": { 41 | "type": "object" 42 | }, 43 | "configurationItemCaptureTime": { 44 | "format": "yyyy-MM-dd HH:mm:ss.SSSSSS", 45 | "type": "date" 46 | }, 47 | "errors": { 48 | "type": "object" 49 | }, 50 | "resourceId": { 51 | "fields": { 52 | "keyword": { 53 | "ignore_above": 256, 54 | "type": "keyword" 55 | } 56 | }, 57 | "type": "keyword" 58 | }, 59 | "resourceName": { 60 | "fields": { 61 | "keyword": { 62 | "ignore_above": 256, 63 | "type": "keyword" 64 | } 65 | }, 66 | "type": "text" 67 | }, 68 | "resourceType": { 69 | "fields": { 70 | "keyword": { 71 | "ignore_above": 256, 72 | "type": "keyword" 73 | } 74 | }, 75 | "type": "keyword" 76 | }, 77 | "source": { 78 | "fields": { 79 | "keyword": { 80 | "ignore_above": 256, 81 | "type": "keyword" 82 | } 83 | }, 84 | "type": "keyword" 85 | }, 86 | "supplementaryConfiguration": { 87 | "type": "object", 88 | "properties": { 89 | "DXVirtualInterfaces": { 90 | "type": "nested", 91 | "include_in_parent": true, 92 | "properties": { 93 | "virtualInterfaceId": {"type": "keyword"}, 94 | "connectionId": {"type": "keyword"} 95 | } 96 | }, 97 | "VpnConnections": { 98 | "type": "nested", 99 | "include_in_parent": true, 100 | "properties": { 101 | "CustomerGatewayId": {"type": "keyword"}, 102 | "VpnGatewayId": {"type": "keyword"}, 103 | "VpnConnectionId": {"type": "keyword"} 104 | } 105 | }, 106 | "directConnectGatewayAssociations": { 107 | "type": "nested", 108 | "include_in_parent": true, 109 | "properties": { 110 | "virtualGatewayId": {"type": "keyword"}, 111 | "directConnectGatewayId": {"type": "keyword"} 112 | } 113 | } 114 | } 115 | }, 116 | "tags": { 117 | "type": "object", 118 | "enabled": false 119 | } 120 | }, 121 | "_meta": { 122 | "antiope_mapping_version": "2019-03-27" 123 | } 124 | } 125 | } 126 | -------------------------------------------------------------------------------- /search-cluster/mappings-v7/resources_lambda_function.json: -------------------------------------------------------------------------------- 1 | { 2 | "mappings": { 3 | "properties": { 4 | "ARN": { 5 | "fields": { 6 | "keyword": { 7 | "ignore_above": 256, 8 | "type": "keyword" 9 | } 10 | }, 11 | "type": "keyword" 12 | }, 13 | "awsAccountId": { 14 | "fields": { 15 | "keyword": { 16 | "ignore_above": 13, 17 | "type": "keyword" 18 | } 19 | }, 20 | "type": "keyword" 21 | }, 22 | "awsAccountName": { 23 | "fields": { 24 | "keyword": { 25 | "ignore_above": 256, 26 | "type": "keyword" 27 | } 28 | }, 29 | "type": "keyword" 30 | }, 31 | "awsRegion": { 32 | "fields": { 33 | "keyword": { 34 | "ignore_above": 20, 35 | "type": "keyword" 36 | } 37 | }, 38 | "type": "keyword" 39 | }, 40 | "configuration": { 41 | "type": "object", 42 | "properties": { 43 | "Environment": { 44 | "dynamic": false, 45 | "properties": {} 46 | } 47 | } 48 | }, 49 | "configurationItemCaptureTime": { 50 | "format": "yyyy-MM-dd HH:mm:ss.SSSSSS", 51 | "type": "date" 52 | }, 53 | "errors": { 54 | "type": "object" 55 | }, 56 | "resourceId": { 57 | "fields": { 58 | "keyword": { 59 | "ignore_above": 256, 60 | "type": "keyword" 61 | } 62 | }, 63 | "type": "keyword" 64 | }, 65 | "resourceName": { 66 | "fields": { 67 | "keyword": { 68 | "ignore_above": 256, 69 | "type": "keyword" 70 | } 71 | }, 72 | "type": "text" 73 | }, 74 | "resourceType": { 75 | "fields": { 76 | "keyword": { 77 | "ignore_above": 256, 78 | "type": "keyword" 79 | } 80 | }, 81 | "type": "keyword" 82 | }, 83 | "source": { 84 | "fields": { 85 | "keyword": { 86 | "ignore_above": 256, 87 | "type": "keyword" 88 | } 89 | }, 90 | "type": "keyword" 91 | }, 92 | "supplementaryConfiguration": { 93 | "type": "object" 94 | }, 95 | "tags": { 96 | "type": "object", 97 | "enabled": false 98 | } 99 | }, 100 | "_meta": { 101 | "antiope_mapping_version": "2019-03-27" 102 | } 103 | }, 104 | "settings": { 105 | "index.mapping.total_fields.limit": 1000, 106 | "index.mapping.ignore_malformed": true 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /search-cluster/mappings-v7/resources_route53_hostedzone.json: -------------------------------------------------------------------------------- 1 | { 2 | "mappings": { 3 | "properties": { 4 | "ARN": { 5 | "fields": { 6 | "keyword": { 7 | "ignore_above": 256, 8 | "type": "keyword" 9 | } 10 | }, 11 | "type": "keyword" 12 | }, 13 | "awsAccountId": { 14 | "fields": { 15 | "keyword": { 16 | "ignore_above": 13, 17 | "type": "keyword" 18 | } 19 | }, 20 | "type": "keyword" 21 | }, 22 | "awsAccountName": { 23 | "fields": { 24 | "keyword": { 25 | "ignore_above": 256, 26 | "type": "keyword" 27 | } 28 | }, 29 | "type": "keyword" 30 | }, 31 | "awsRegion": { 32 | "fields": { 33 | "keyword": { 34 | "ignore_above": 20, 35 | "type": "keyword" 36 | } 37 | }, 38 | "type": "keyword" 39 | }, 40 | "configuration": { 41 | "type": "object" 42 | }, 43 | "configurationItemCaptureTime": { 44 | "format": "yyyy-MM-dd HH:mm:ss.SSSSSS", 45 | "type": "date" 46 | }, 47 | "errors": { 48 | "type": "object" 49 | }, 50 | "resourceId": { 51 | "fields": { 52 | "keyword": { 53 | "ignore_above": 256, 54 | "type": "keyword" 55 | } 56 | }, 57 | "type": "keyword" 58 | }, 59 | "resourceName": { 60 | "fields": { 61 | "keyword": { 62 | "ignore_above": 256, 63 | "type": "keyword" 64 | } 65 | }, 66 | "type": "text" 67 | }, 68 | "resourceType": { 69 | "fields": { 70 | "keyword": { 71 | "ignore_above": 256, 72 | "type": "keyword" 73 | } 74 | }, 75 | "type": "keyword" 76 | }, 77 | "source": { 78 | "fields": { 79 | "keyword": { 80 | "ignore_above": 256, 81 | "type": "keyword" 82 | } 83 | }, 84 | "type": "keyword" 85 | }, 86 | "supplementaryConfiguration": { 87 | "type": "object", 88 | "properties": { 89 | "ResourceRecordSets": { 90 | "type": "nested", 91 | "include_in_parent": true, 92 | "properties": { 93 | "Type": {"type": "keyword"}, 94 | "Name": {"type": "keyword"}, 95 | "AliasTarget": {"type": "object"} 96 | } 97 | } 98 | } 99 | }, 100 | "tags": { 101 | "type": "object", 102 | "enabled": false 103 | } 104 | }, 105 | "_meta": { 106 | "antiope_mapping_version": "2019-03-27" 107 | } 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /search-cluster/mappings-v7/resources_secretsmanager_secret.json: -------------------------------------------------------------------------------- 1 | { 2 | "mappings": { 3 | "properties": { 4 | "ARN": { 5 | "fields": { 6 | "keyword": { 7 | "ignore_above": 256, 8 | "type": "keyword" 9 | } 10 | }, 11 | "type": "keyword" 12 | }, 13 | "awsAccountId": { 14 | "fields": { 15 | "keyword": { 16 | "ignore_above": 13, 17 | "type": "keyword" 18 | } 19 | }, 20 | "type": "keyword" 21 | }, 22 | "awsAccountName": { 23 | "fields": { 24 | "keyword": { 25 | "ignore_above": 256, 26 | "type": "keyword" 27 | } 28 | }, 29 | "type": "keyword" 30 | }, 31 | "awsRegion": { 32 | "fields": { 33 | "keyword": { 34 | "ignore_above": 20, 35 | "type": "keyword" 36 | } 37 | }, 38 | "type": "keyword" 39 | }, 40 | "configuration": { 41 | "type": "object", 42 | "properties": { 43 | "SecretVersionsToStages": { 44 | "dynamic": false, 45 | "properties": {} 46 | } 47 | } 48 | }, 49 | "configurationItemCaptureTime": { 50 | "format": "yyyy-MM-dd HH:mm:ss.SSSSSS", 51 | "type": "date" 52 | }, 53 | "errors": { 54 | "type": "object" 55 | }, 56 | "resourceId": { 57 | "fields": { 58 | "keyword": { 59 | "ignore_above": 256, 60 | "type": "keyword" 61 | } 62 | }, 63 | "type": "keyword" 64 | }, 65 | "resourceName": { 66 | "fields": { 67 | "keyword": { 68 | "ignore_above": 256, 69 | "type": "keyword" 70 | } 71 | }, 72 | "type": "text" 73 | }, 74 | "resourceType": { 75 | "fields": { 76 | "keyword": { 77 | "ignore_above": 256, 78 | "type": "keyword" 79 | } 80 | }, 81 | "type": "keyword" 82 | }, 83 | "source": { 84 | "fields": { 85 | "keyword": { 86 | "ignore_above": 256, 87 | "type": "keyword" 88 | } 89 | }, 90 | "type": "keyword" 91 | }, 92 | "supplementaryConfiguration": { 93 | "type": "object" 94 | }, 95 | "tags": { 96 | "type": "object", 97 | "enabled": false 98 | } 99 | }, 100 | "_meta": { 101 | "antiope_mapping_version": "2019-03-27" 102 | } 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /search-cluster/mappings-v7/resources_ssm_managedinstance.json: -------------------------------------------------------------------------------- 1 | { 2 | "settings": { 3 | "index.mapping.ignore_malformed": true 4 | }, 5 | "mappings": { 6 | "properties": { 7 | "ARN": { 8 | "fields": { 9 | "keyword": { 10 | "ignore_above": 256, 11 | "type": "keyword" 12 | } 13 | }, 14 | "type": "keyword" 15 | }, 16 | "awsAccountId": { 17 | "fields": { 18 | "keyword": { 19 | "ignore_above": 13, 20 | "type": "keyword" 21 | } 22 | }, 23 | "type": "keyword" 24 | }, 25 | "awsAccountName": { 26 | "fields": { 27 | "keyword": { 28 | "ignore_above": 256, 29 | "type": "keyword" 30 | } 31 | }, 32 | "type": "keyword" 33 | }, 34 | "awsRegion": { 35 | "fields": { 36 | "keyword": { 37 | "ignore_above": 20, 38 | "type": "keyword" 39 | } 40 | }, 41 | "type": "keyword" 42 | }, 43 | "configuration": { 44 | "type": "object", 45 | "properties": { 46 | "LastPingDateTime": { 47 | "format": "epoch_second", 48 | "type": "date" 49 | }, 50 | "LastSuccessfulAssociationExecutionDate": { 51 | "format": "yyyy-MM-dd HH:mm:ss+00:00", 52 | "type": "date" 53 | }, 54 | "LastAssociationExecutionDate": { 55 | "format": "yyyy-MM-dd HH:mm:ss+00:00", 56 | "type": "date" 57 | } 58 | } 59 | }, 60 | "configurationItemCaptureTime": { 61 | "format": "yyyy-MM-dd HH:mm:ss.SSSSSS", 62 | "type": "date" 63 | }, 64 | "errors": { 65 | "type": "object" 66 | }, 67 | "resourceId": { 68 | "fields": { 69 | "keyword": { 70 | "ignore_above": 256, 71 | "type": "keyword" 72 | } 73 | }, 74 | "type": "keyword" 75 | }, 76 | "resourceName": { 77 | "fields": { 78 | "keyword": { 79 | "ignore_above": 256, 80 | "type": "keyword" 81 | } 82 | }, 83 | "type": "text" 84 | }, 85 | "resourceType": { 86 | "fields": { 87 | "keyword": { 88 | "ignore_above": 256, 89 | "type": "keyword" 90 | } 91 | }, 92 | "type": "keyword" 93 | }, 94 | "source": { 95 | "fields": { 96 | "keyword": { 97 | "ignore_above": 256, 98 | "type": "keyword" 99 | } 100 | }, 101 | "type": "keyword" 102 | }, 103 | "supplementaryConfiguration": { 104 | "type": "object", 105 | "properties": { 106 | "EventSelectors": { 107 | "type": "nested", 108 | "include_in_parent": true 109 | } 110 | } 111 | }, 112 | "tags": { 113 | "type": "object", 114 | "enabled": false 115 | } 116 | }, 117 | "_meta": { 118 | "antiope_mapping_version": "2019-03-27" 119 | } 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /search-cluster/mappings/azure_resources_vm_instance.json: -------------------------------------------------------------------------------- 1 | { 2 | "mappings": { 3 | "_doc": { 4 | "properties": { 5 | "azureSubscriptionId": { 6 | "fields": { 7 | "keyword": { 8 | "ignore_above": 40, 9 | "type": "keyword" 10 | } 11 | }, 12 | "type": "keyword" 13 | }, 14 | "azureSubscriptionName": { 15 | "fields": { 16 | "keyword": { 17 | "ignore_above": 256, 18 | "type": "keyword" 19 | } 20 | }, 21 | "type": "keyword" 22 | }, 23 | "azureTenantId": { 24 | "fields": { 25 | "keyword": { 26 | "ignore_above": 40, 27 | "type": "keyword" 28 | } 29 | }, 30 | "type": "keyword" 31 | }, 32 | "azureTenantName": { 33 | "fields": { 34 | "keyword": { 35 | "ignore_above": 256, 36 | "type": "keyword" 37 | } 38 | }, 39 | "type": "keyword" 40 | }, 41 | "configuration": { 42 | "type": "object" 43 | }, 44 | "configurationItemCaptureTime": { 45 | "format": "yyyy-MM-dd HH:mm:ss.SSSSSS", 46 | "type": "date" 47 | }, 48 | "errors": { 49 | "type": "object" 50 | }, 51 | "resourceId": { 52 | "fields": { 53 | "keyword": { 54 | "ignore_above": 256, 55 | "type": "keyword" 56 | } 57 | }, 58 | "type": "keyword" 59 | }, 60 | "resourceType": { 61 | "fields": { 62 | "keyword": { 63 | "ignore_above": 256, 64 | "type": "keyword" 65 | } 66 | }, 67 | "type": "keyword" 68 | }, 69 | "source": { 70 | "fields": { 71 | "keyword": { 72 | "ignore_above": 256, 73 | "type": "keyword" 74 | } 75 | }, 76 | "type": "keyword" 77 | }, 78 | "supplementaryConfiguration": { 79 | "type": "object" 80 | } 81 | }, 82 | "_meta": { 83 | "antiope_mapping_version": "2020-11-19" 84 | } 85 | } 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /search-cluster/mappings/default.json: -------------------------------------------------------------------------------- 1 | { 2 | "mappings": { 3 | "_doc": { 4 | "properties": { 5 | "ARN": { 6 | "fields": { 7 | "keyword": { 8 | "ignore_above": 256, 9 | "type": "keyword" 10 | } 11 | }, 12 | "type": "keyword" 13 | }, 14 | "awsAccountId": { 15 | "fields": { 16 | "keyword": { 17 | "ignore_above": 13, 18 | "type": "keyword" 19 | } 20 | }, 21 | "type": "keyword" 22 | }, 23 | "awsAccountName": { 24 | "fields": { 25 | "keyword": { 26 | "ignore_above": 256, 27 | "type": "keyword" 28 | } 29 | }, 30 | "type": "keyword" 31 | }, 32 | "awsRegion": { 33 | "fields": { 34 | "keyword": { 35 | "ignore_above": 20, 36 | "type": "keyword" 37 | } 38 | }, 39 | "type": "keyword" 40 | }, 41 | "configuration": { 42 | "type": "object" 43 | }, 44 | "configurationItemCaptureTime": { 45 | "format": "yyyy-MM-dd HH:mm:ss.SSSSSS", 46 | "type": "date" 47 | }, 48 | "errors": { 49 | "type": "object" 50 | }, 51 | "resourceId": { 52 | "fields": { 53 | "keyword": { 54 | "ignore_above": 256, 55 | "type": "keyword" 56 | } 57 | }, 58 | "type": "keyword" 59 | }, 60 | "resourceName": { 61 | "fields": { 62 | "keyword": { 63 | "ignore_above": 256, 64 | "type": "keyword" 65 | } 66 | }, 67 | "type": "text" 68 | }, 69 | "resourceType": { 70 | "fields": { 71 | "keyword": { 72 | "ignore_above": 256, 73 | "type": "keyword" 74 | } 75 | }, 76 | "type": "keyword" 77 | }, 78 | "source": { 79 | "fields": { 80 | "keyword": { 81 | "ignore_above": 256, 82 | "type": "keyword" 83 | } 84 | }, 85 | "type": "keyword" 86 | }, 87 | "supplementaryConfiguration": { 88 | "type": "object" 89 | }, 90 | "tags": { 91 | "type": "object", 92 | "enabled": false 93 | } 94 | }, 95 | "_meta": { 96 | "antiope_mapping_version": "2019-03-27" 97 | } 98 | } 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /search-cluster/mappings/resources_cloudformation_stack.json: -------------------------------------------------------------------------------- 1 | { 2 | "mappings": { 3 | "_doc": { 4 | "properties": { 5 | "ARN": { 6 | "fields": { 7 | "keyword": { 8 | "ignore_above": 256, 9 | "type": "keyword" 10 | } 11 | }, 12 | "type": "keyword" 13 | }, 14 | "awsAccountId": { 15 | "fields": { 16 | "keyword": { 17 | "ignore_above": 13, 18 | "type": "keyword" 19 | } 20 | }, 21 | "type": "keyword" 22 | }, 23 | "awsAccountName": { 24 | "fields": { 25 | "keyword": { 26 | "ignore_above": 256, 27 | "type": "keyword" 28 | } 29 | }, 30 | "type": "keyword" 31 | }, 32 | "awsRegion": { 33 | "fields": { 34 | "keyword": { 35 | "ignore_above": 20, 36 | "type": "keyword" 37 | } 38 | }, 39 | "type": "keyword" 40 | }, 41 | "configuration": { 42 | "type": "object", 43 | "properties": { 44 | "Outputs": { 45 | "type": "nested", 46 | "include_in_parent": true, 47 | "properties": { 48 | "OutputKey": {"type": "keyword"}, 49 | "OutputValue": {"type": "keyword"} 50 | } 51 | }, 52 | "Parameters": { 53 | "type": "nested", 54 | "include_in_parent": true, 55 | "properties": { 56 | "ParameterKey": {"type": "keyword"}, 57 | "ParameterValue": {"type": "keyword"} 58 | } 59 | } 60 | } 61 | }, 62 | "configurationItemCaptureTime": { 63 | "format": "yyyy-MM-dd HH:mm:ss.SSSSSS", 64 | "type": "date" 65 | }, 66 | "errors": { 67 | "type": "object" 68 | }, 69 | "resourceId": { 70 | "fields": { 71 | "keyword": { 72 | "ignore_above": 256, 73 | "type": "keyword" 74 | } 75 | }, 76 | "type": "keyword" 77 | }, 78 | "resourceName": { 79 | "fields": { 80 | "keyword": { 81 | "ignore_above": 256, 82 | "type": "keyword" 83 | } 84 | }, 85 | "type": "text" 86 | }, 87 | "resourceType": { 88 | "fields": { 89 | "keyword": { 90 | "ignore_above": 256, 91 | "type": "keyword" 92 | } 93 | }, 94 | "type": "keyword" 95 | }, 96 | "source": { 97 | "fields": { 98 | "keyword": { 99 | "ignore_above": 256, 100 | "type": "keyword" 101 | } 102 | }, 103 | "type": "keyword" 104 | }, 105 | "supplementaryConfiguration": { 106 | "type": "object" 107 | }, 108 | "tags": { 109 | "type": "object", 110 | "enabled": false 111 | } 112 | }, 113 | "_meta": { 114 | "antiope_mapping_version": "2019-03-27" 115 | } 116 | } 117 | } 118 | } 119 | -------------------------------------------------------------------------------- /search-cluster/mappings/resources_cloudtrail_trail.json: -------------------------------------------------------------------------------- 1 | { 2 | "settings": { 3 | "index.mapping.ignore_malformed": true 4 | }, 5 | "mappings": { 6 | "_doc": { 7 | "properties": { 8 | "ARN": { 9 | "fields": { 10 | "keyword": { 11 | "ignore_above": 256, 12 | "type": "keyword" 13 | } 14 | }, 15 | "type": "keyword" 16 | }, 17 | "awsAccountId": { 18 | "fields": { 19 | "keyword": { 20 | "ignore_above": 13, 21 | "type": "keyword" 22 | } 23 | }, 24 | "type": "keyword" 25 | }, 26 | "awsAccountName": { 27 | "fields": { 28 | "keyword": { 29 | "ignore_above": 256, 30 | "type": "keyword" 31 | } 32 | }, 33 | "type": "keyword" 34 | }, 35 | "awsRegion": { 36 | "fields": { 37 | "keyword": { 38 | "ignore_above": 20, 39 | "type": "keyword" 40 | } 41 | }, 42 | "type": "keyword" 43 | }, 44 | "configuration": { 45 | "type": "object" 46 | }, 47 | "configurationItemCaptureTime": { 48 | "format": "yyyy-MM-dd HH:mm:ss.SSSSSS", 49 | "type": "date" 50 | }, 51 | "errors": { 52 | "type": "object" 53 | }, 54 | "resourceId": { 55 | "fields": { 56 | "keyword": { 57 | "ignore_above": 256, 58 | "type": "keyword" 59 | } 60 | }, 61 | "type": "keyword" 62 | }, 63 | "resourceName": { 64 | "fields": { 65 | "keyword": { 66 | "ignore_above": 256, 67 | "type": "keyword" 68 | } 69 | }, 70 | "type": "text" 71 | }, 72 | "resourceType": { 73 | "fields": { 74 | "keyword": { 75 | "ignore_above": 256, 76 | "type": "keyword" 77 | } 78 | }, 79 | "type": "keyword" 80 | }, 81 | "source": { 82 | "fields": { 83 | "keyword": { 84 | "ignore_above": 256, 85 | "type": "keyword" 86 | } 87 | }, 88 | "type": "keyword" 89 | }, 90 | "supplementaryConfiguration": { 91 | "type": "object", 92 | "properties": { 93 | "EventSelectors": { 94 | "type": "nested", 95 | "include_in_parent": true 96 | } 97 | } 98 | }, 99 | "tags": { 100 | "type": "object", 101 | "enabled": false 102 | } 103 | }, 104 | "_meta": { 105 | "antiope_mapping_version": "2019-03-27" 106 | } 107 | } 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /search-cluster/mappings/resources_ec2_vpc.json: -------------------------------------------------------------------------------- 1 | { 2 | "mappings": { 3 | "_doc": { 4 | "properties": { 5 | "ARN": { 6 | "fields": { 7 | "keyword": { 8 | "ignore_above": 256, 9 | "type": "keyword" 10 | } 11 | }, 12 | "type": "keyword" 13 | }, 14 | "awsAccountId": { 15 | "fields": { 16 | "keyword": { 17 | "ignore_above": 13, 18 | "type": "keyword" 19 | } 20 | }, 21 | "type": "keyword" 22 | }, 23 | "awsAccountName": { 24 | "fields": { 25 | "keyword": { 26 | "ignore_above": 256, 27 | "type": "keyword" 28 | } 29 | }, 30 | "type": "keyword" 31 | }, 32 | "awsRegion": { 33 | "fields": { 34 | "keyword": { 35 | "ignore_above": 20, 36 | "type": "keyword" 37 | } 38 | }, 39 | "type": "keyword" 40 | }, 41 | "configuration": { 42 | "type": "object" 43 | }, 44 | "configurationItemCaptureTime": { 45 | "format": "yyyy-MM-dd HH:mm:ss.SSSSSS", 46 | "type": "date" 47 | }, 48 | "errors": { 49 | "type": "object" 50 | }, 51 | "resourceId": { 52 | "fields": { 53 | "keyword": { 54 | "ignore_above": 256, 55 | "type": "keyword" 56 | } 57 | }, 58 | "type": "keyword" 59 | }, 60 | "resourceName": { 61 | "fields": { 62 | "keyword": { 63 | "ignore_above": 256, 64 | "type": "keyword" 65 | } 66 | }, 67 | "type": "text" 68 | }, 69 | "resourceType": { 70 | "fields": { 71 | "keyword": { 72 | "ignore_above": 256, 73 | "type": "keyword" 74 | } 75 | }, 76 | "type": "keyword" 77 | }, 78 | "source": { 79 | "fields": { 80 | "keyword": { 81 | "ignore_above": 256, 82 | "type": "keyword" 83 | } 84 | }, 85 | "type": "keyword" 86 | }, 87 | "supplementaryConfiguration": { 88 | "type": "object", 89 | "properties": { 90 | "DXVirtualInterfaces": { 91 | "type": "nested", 92 | "include_in_parent": true, 93 | "properties": { 94 | "virtualInterfaceId": {"type": "keyword"}, 95 | "connectionId": {"type": "keyword"} 96 | } 97 | }, 98 | "VpnConnections": { 99 | "type": "nested", 100 | "include_in_parent": true, 101 | "properties": { 102 | "CustomerGatewayId": {"type": "keyword"}, 103 | "VpnGatewayId": {"type": "keyword"}, 104 | "VpnConnectionId": {"type": "keyword"} 105 | } 106 | }, 107 | "directConnectGatewayAssociations": { 108 | "type": "nested", 109 | "include_in_parent": true, 110 | "properties": { 111 | "virtualGatewayId": {"type": "keyword"}, 112 | "directConnectGatewayId": {"type": "keyword"} 113 | } 114 | } 115 | } 116 | }, 117 | "tags": { 118 | "type": "object", 119 | "enabled": false 120 | } 121 | }, 122 | "_meta": { 123 | "antiope_mapping_version": "2019-03-27" 124 | } 125 | } 126 | } 127 | } 128 | -------------------------------------------------------------------------------- /search-cluster/mappings/resources_lambda_function.json: -------------------------------------------------------------------------------- 1 | { 2 | "mappings": { 3 | "_doc": { 4 | "properties": { 5 | "ARN": { 6 | "fields": { 7 | "keyword": { 8 | "ignore_above": 256, 9 | "type": "keyword" 10 | } 11 | }, 12 | "type": "keyword" 13 | }, 14 | "awsAccountId": { 15 | "fields": { 16 | "keyword": { 17 | "ignore_above": 13, 18 | "type": "keyword" 19 | } 20 | }, 21 | "type": "keyword" 22 | }, 23 | "awsAccountName": { 24 | "fields": { 25 | "keyword": { 26 | "ignore_above": 256, 27 | "type": "keyword" 28 | } 29 | }, 30 | "type": "keyword" 31 | }, 32 | "awsRegion": { 33 | "fields": { 34 | "keyword": { 35 | "ignore_above": 20, 36 | "type": "keyword" 37 | } 38 | }, 39 | "type": "keyword" 40 | }, 41 | "configuration": { 42 | "type": "object", 43 | "properties": { 44 | "Environment": { 45 | "dynamic": false, 46 | "properties": {} 47 | } 48 | } 49 | }, 50 | "configurationItemCaptureTime": { 51 | "format": "yyyy-MM-dd HH:mm:ss.SSSSSS", 52 | "type": "date" 53 | }, 54 | "errors": { 55 | "type": "object" 56 | }, 57 | "resourceId": { 58 | "fields": { 59 | "keyword": { 60 | "ignore_above": 256, 61 | "type": "keyword" 62 | } 63 | }, 64 | "type": "keyword" 65 | }, 66 | "resourceName": { 67 | "fields": { 68 | "keyword": { 69 | "ignore_above": 256, 70 | "type": "keyword" 71 | } 72 | }, 73 | "type": "text" 74 | }, 75 | "resourceType": { 76 | "fields": { 77 | "keyword": { 78 | "ignore_above": 256, 79 | "type": "keyword" 80 | } 81 | }, 82 | "type": "keyword" 83 | }, 84 | "source": { 85 | "fields": { 86 | "keyword": { 87 | "ignore_above": 256, 88 | "type": "keyword" 89 | } 90 | }, 91 | "type": "keyword" 92 | }, 93 | "supplementaryConfiguration": { 94 | "type": "object" 95 | }, 96 | "tags": { 97 | "type": "object", 98 | "enabled": false 99 | } 100 | }, 101 | "_meta": { 102 | "antiope_mapping_version": "2019-03-27" 103 | } 104 | } 105 | }, 106 | "settings": { 107 | "index.mapping.total_fields.limit": 1000, 108 | "index.mapping.ignore_malformed": true 109 | } 110 | } 111 | -------------------------------------------------------------------------------- /search-cluster/mappings/resources_route53_hostedzone.json: -------------------------------------------------------------------------------- 1 | { 2 | "mappings": { 3 | "_doc": { 4 | "properties": { 5 | "ARN": { 6 | "fields": { 7 | "keyword": { 8 | "ignore_above": 256, 9 | "type": "keyword" 10 | } 11 | }, 12 | "type": "keyword" 13 | }, 14 | "awsAccountId": { 15 | "fields": { 16 | "keyword": { 17 | "ignore_above": 13, 18 | "type": "keyword" 19 | } 20 | }, 21 | "type": "keyword" 22 | }, 23 | "awsAccountName": { 24 | "fields": { 25 | "keyword": { 26 | "ignore_above": 256, 27 | "type": "keyword" 28 | } 29 | }, 30 | "type": "keyword" 31 | }, 32 | "awsRegion": { 33 | "fields": { 34 | "keyword": { 35 | "ignore_above": 20, 36 | "type": "keyword" 37 | } 38 | }, 39 | "type": "keyword" 40 | }, 41 | "configuration": { 42 | "type": "object" 43 | }, 44 | "configurationItemCaptureTime": { 45 | "format": "yyyy-MM-dd HH:mm:ss.SSSSSS", 46 | "type": "date" 47 | }, 48 | "errors": { 49 | "type": "object" 50 | }, 51 | "resourceId": { 52 | "fields": { 53 | "keyword": { 54 | "ignore_above": 256, 55 | "type": "keyword" 56 | } 57 | }, 58 | "type": "keyword" 59 | }, 60 | "resourceName": { 61 | "fields": { 62 | "keyword": { 63 | "ignore_above": 256, 64 | "type": "keyword" 65 | } 66 | }, 67 | "type": "text" 68 | }, 69 | "resourceType": { 70 | "fields": { 71 | "keyword": { 72 | "ignore_above": 256, 73 | "type": "keyword" 74 | } 75 | }, 76 | "type": "keyword" 77 | }, 78 | "source": { 79 | "fields": { 80 | "keyword": { 81 | "ignore_above": 256, 82 | "type": "keyword" 83 | } 84 | }, 85 | "type": "keyword" 86 | }, 87 | "supplementaryConfiguration": { 88 | "type": "object", 89 | "properties": { 90 | "ResourceRecordSets": { 91 | "type": "nested", 92 | "include_in_parent": true, 93 | "properties": { 94 | "Type": {"type": "keyword"}, 95 | "Name": {"type": "keyword"}, 96 | "AliasTarget": {"type": "object"} 97 | } 98 | } 99 | } 100 | }, 101 | "tags": { 102 | "type": "object", 103 | "enabled": false 104 | } 105 | }, 106 | "_meta": { 107 | "antiope_mapping_version": "2019-03-27" 108 | } 109 | } 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /search-cluster/mappings/resources_secretsmanager_secret.json: -------------------------------------------------------------------------------- 1 | { 2 | "mappings": { 3 | "_doc": { 4 | "properties": { 5 | "ARN": { 6 | "fields": { 7 | "keyword": { 8 | "ignore_above": 256, 9 | "type": "keyword" 10 | } 11 | }, 12 | "type": "keyword" 13 | }, 14 | "awsAccountId": { 15 | "fields": { 16 | "keyword": { 17 | "ignore_above": 13, 18 | "type": "keyword" 19 | } 20 | }, 21 | "type": "keyword" 22 | }, 23 | "awsAccountName": { 24 | "fields": { 25 | "keyword": { 26 | "ignore_above": 256, 27 | "type": "keyword" 28 | } 29 | }, 30 | "type": "keyword" 31 | }, 32 | "awsRegion": { 33 | "fields": { 34 | "keyword": { 35 | "ignore_above": 20, 36 | "type": "keyword" 37 | } 38 | }, 39 | "type": "keyword" 40 | }, 41 | "configuration": { 42 | "type": "object", 43 | "properties": { 44 | "SecretVersionsToStages": { 45 | "dynamic": false, 46 | "properties": {} 47 | } 48 | } 49 | }, 50 | "configurationItemCaptureTime": { 51 | "format": "yyyy-MM-dd HH:mm:ss.SSSSSS", 52 | "type": "date" 53 | }, 54 | "errors": { 55 | "type": "object" 56 | }, 57 | "resourceId": { 58 | "fields": { 59 | "keyword": { 60 | "ignore_above": 256, 61 | "type": "keyword" 62 | } 63 | }, 64 | "type": "keyword" 65 | }, 66 | "resourceName": { 67 | "fields": { 68 | "keyword": { 69 | "ignore_above": 256, 70 | "type": "keyword" 71 | } 72 | }, 73 | "type": "text" 74 | }, 75 | "resourceType": { 76 | "fields": { 77 | "keyword": { 78 | "ignore_above": 256, 79 | "type": "keyword" 80 | } 81 | }, 82 | "type": "keyword" 83 | }, 84 | "source": { 85 | "fields": { 86 | "keyword": { 87 | "ignore_above": 256, 88 | "type": "keyword" 89 | } 90 | }, 91 | "type": "keyword" 92 | }, 93 | "supplementaryConfiguration": { 94 | "type": "object" 95 | }, 96 | "tags": { 97 | "type": "object", 98 | "enabled": false 99 | } 100 | }, 101 | "_meta": { 102 | "antiope_mapping_version": "2019-03-27" 103 | } 104 | } 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /search-cluster/mappings/resources_ssm_managedinstance.json: -------------------------------------------------------------------------------- 1 | { 2 | "settings": { 3 | "index.mapping.ignore_malformed": true 4 | }, 5 | "mappings": { 6 | "_doc": { 7 | "properties": { 8 | "ARN": { 9 | "fields": { 10 | "keyword": { 11 | "ignore_above": 256, 12 | "type": "keyword" 13 | } 14 | }, 15 | "type": "keyword" 16 | }, 17 | "awsAccountId": { 18 | "fields": { 19 | "keyword": { 20 | "ignore_above": 13, 21 | "type": "keyword" 22 | } 23 | }, 24 | "type": "keyword" 25 | }, 26 | "awsAccountName": { 27 | "fields": { 28 | "keyword": { 29 | "ignore_above": 256, 30 | "type": "keyword" 31 | } 32 | }, 33 | "type": "keyword" 34 | }, 35 | "awsRegion": { 36 | "fields": { 37 | "keyword": { 38 | "ignore_above": 20, 39 | "type": "keyword" 40 | } 41 | }, 42 | "type": "keyword" 43 | }, 44 | "configuration": { 45 | "type": "object", 46 | "properties": { 47 | "LastPingDateTime": { 48 | "format": "epoch_second", 49 | "type": "date" 50 | }, 51 | "LastSuccessfulAssociationExecutionDate": { 52 | "format": "yyyy-MM-dd HH:mm:ss+00:00", 53 | "type": "date" 54 | }, 55 | "LastAssociationExecutionDate": { 56 | "format": "yyyy-MM-dd HH:mm:ss+00:00", 57 | "type": "date" 58 | } 59 | } 60 | }, 61 | "configurationItemCaptureTime": { 62 | "format": "yyyy-MM-dd HH:mm:ss.SSSSSS", 63 | "type": "date" 64 | }, 65 | "errors": { 66 | "type": "object" 67 | }, 68 | "resourceId": { 69 | "fields": { 70 | "keyword": { 71 | "ignore_above": 256, 72 | "type": "keyword" 73 | } 74 | }, 75 | "type": "keyword" 76 | }, 77 | "resourceName": { 78 | "fields": { 79 | "keyword": { 80 | "ignore_above": 256, 81 | "type": "keyword" 82 | } 83 | }, 84 | "type": "text" 85 | }, 86 | "resourceType": { 87 | "fields": { 88 | "keyword": { 89 | "ignore_above": 256, 90 | "type": "keyword" 91 | } 92 | }, 93 | "type": "keyword" 94 | }, 95 | "source": { 96 | "fields": { 97 | "keyword": { 98 | "ignore_above": 256, 99 | "type": "keyword" 100 | } 101 | }, 102 | "type": "keyword" 103 | }, 104 | "supplementaryConfiguration": { 105 | "type": "object", 106 | "properties": { 107 | "EventSelectors": { 108 | "type": "nested", 109 | "include_in_parent": true 110 | } 111 | } 112 | }, 113 | "tags": { 114 | "type": "object", 115 | "enabled": false 116 | } 117 | }, 118 | "_meta": { 119 | "antiope_mapping_version": "2019-03-27" 120 | } 121 | } 122 | } 123 | } 124 | -------------------------------------------------------------------------------- /search-cluster/scripts/Makefile: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Static, not sure if needed?? 4 | PYTHON=python3 5 | PIP=pip3 6 | 7 | DEPENDENCIES=requests lib requests_aws4auth requests_aws4auth certifi chardet idna urllib3 elasticsearch elasticsearch5 8 | 9 | # 10 | # Lambda function management 11 | # 12 | 13 | clean: 14 | rm -rf __pycache__ *.zip *.dist-info HISTORY.rst bin README.rst LICENSE NOTICE bin $(DEPENDENCIES) 15 | 16 | test: $(FILES) 17 | for f in $^; do $(PYTHON) -m py_compile $$f; if [ $$? -ne 0 ] ; then echo "$$f FAILS" ; exit 1; fi done 18 | 19 | deps: 20 | $(PIP) install -r requirements.txt -t . --upgrade 21 | -------------------------------------------------------------------------------- /search-cluster/scripts/create_kibana_index.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from dateutil import tz 4 | from elasticsearch import Elasticsearch, RequestsHttpConnection, ElasticsearchException, RequestError, NotFoundError 5 | from requests_aws4auth import AWS4Auth 6 | import boto3 7 | import datetime 8 | import json 9 | import os 10 | import re 11 | import requests 12 | import time 13 | 14 | import logging 15 | logger = logging.getLogger() 16 | logging.getLogger('botocore').setLevel(logging.WARNING) 17 | logging.getLogger('boto3').setLevel(logging.WARNING) 18 | logging.getLogger('urllib3').setLevel(logging.WARNING) 19 | logging.getLogger('elasticsearch').setLevel(logging.ERROR) 20 | 21 | 22 | # Lambda execution starts here 23 | def main(args, logger): 24 | 25 | host = get_endpoint(args.domain, args.region) 26 | if host is None: 27 | print("Failed to get Endpoint. Aborting....") 28 | exit(1) 29 | 30 | region = os.environ['AWS_DEFAULT_REGION'] 31 | service = 'es' 32 | credentials = boto3.Session().get_credentials() 33 | awsauth = AWS4Auth(credentials.access_key, credentials.secret_key, region, service, session_token=credentials.token) 34 | headers = { "Content-Type": "application/json" } 35 | 36 | es = Elasticsearch( 37 | hosts=[{'host': host, 'port': 443}], 38 | http_auth=awsauth, 39 | use_ssl=True, 40 | verify_certs=True, 41 | connection_class=RequestsHttpConnection 42 | ) 43 | if args.debug: 44 | logger.debug(es.info()) 45 | 46 | doc = { 47 | "index-pattern": { 48 | "timeFieldName": "configurationItemCaptureTime" 49 | }, 50 | "type": "index-pattern" 51 | } 52 | 53 | 54 | # print(es.indices) 55 | if not args.index: 56 | search_index = "resources_*" 57 | else: 58 | search_index = args.index 59 | for index_name in es.indices.get(search_index): 60 | print(f"Index: {index_name}") 61 | doc['index-pattern']['title'] = index_name 62 | 63 | es.index(index=".kibana", doc_type="doc", id=f"index-pattern:{index_name}", body=doc) 64 | 65 | 66 | 67 | def get_endpoint(domain, region): 68 | ''' using the boto3 api, gets the URL endpoint for the cluster ''' 69 | es_client = boto3.client('es', region_name=region) 70 | 71 | response = es_client.describe_elasticsearch_domain(DomainName=domain) 72 | if 'DomainStatus' in response: 73 | if 'Endpoint' in response['DomainStatus']: 74 | return(response['DomainStatus']['Endpoint']) 75 | 76 | logger.error("Unable to get ES Endpoint for {}".format(domain)) 77 | return(None) 78 | 79 | 80 | 81 | 82 | def do_args(): 83 | import argparse 84 | parser = argparse.ArgumentParser() 85 | parser.add_argument("--debug", help="print debugging info", action='store_true') 86 | parser.add_argument("--error", help="print error info only", action='store_true') 87 | 88 | # parser.add_argument("--env_file", help="Environment File to source", default="config.env") 89 | 90 | parser.add_argument("--domain", help="Elastic Search Domain", required=True) 91 | parser.add_argument("--index", help="Ony dump the mapping for this index") 92 | parser.add_argument("--region", help="AWS Region") 93 | 94 | args = parser.parse_args() 95 | 96 | return(args) 97 | 98 | if __name__ == '__main__': 99 | 100 | args = do_args() 101 | 102 | # Logging idea stolen from: https://docs.python.org/3/howto/logging.html#configuring-logging 103 | # create console handler and set level to debug 104 | ch = logging.StreamHandler() 105 | if args.error: 106 | logger.setLevel(logging.ERROR) 107 | elif args.debug: 108 | logger.setLevel(logging.DEBUG) 109 | else: 110 | logger.setLevel(logging.INFO) 111 | 112 | # create formatter 113 | # formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 114 | formatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s') 115 | # add formatter to ch 116 | ch.setFormatter(formatter) 117 | # add ch to logger 118 | logger.addHandler(ch) 119 | 120 | # Sanity check region 121 | if args.region: 122 | os.environ['AWS_DEFAULT_REGION'] = args.region 123 | 124 | if 'AWS_DEFAULT_REGION' not in os.environ: 125 | logger.error("AWS_DEFAULT_REGION Not set. Aborting...") 126 | exit(1) 127 | 128 | main(args, logger) 129 | 130 | -------------------------------------------------------------------------------- /search-cluster/scripts/delete_es_index.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from dateutil import tz 4 | from elasticsearch import Elasticsearch, RequestsHttpConnection, ElasticsearchException, RequestError, NotFoundError 5 | from requests_aws4auth import AWS4Auth 6 | import boto3 7 | import datetime 8 | import json 9 | import os 10 | import re 11 | import requests 12 | import time 13 | 14 | import logging 15 | logger = logging.getLogger() 16 | logging.getLogger('botocore').setLevel(logging.WARNING) 17 | logging.getLogger('boto3').setLevel(logging.WARNING) 18 | logging.getLogger('urllib3').setLevel(logging.WARNING) 19 | logging.getLogger('elasticsearch').setLevel(logging.WARNING) 20 | 21 | 22 | # Lambda execution starts here 23 | def main(args, logger): 24 | logger.debug("Purging index {} in {}".format(args.index, args.domain)) 25 | 26 | host = get_endpoint(args.domain, args.region) 27 | if host is None: 28 | print("Failed to get Endpoint. Aborting....") 29 | exit(1) 30 | 31 | region = os.environ['AWS_DEFAULT_REGION'] 32 | service = 'es' 33 | credentials = boto3.Session().get_credentials() 34 | awsauth = AWS4Auth(credentials.access_key, credentials.secret_key, region, service, session_token=credentials.token) 35 | 36 | es = Elasticsearch( 37 | hosts=[{'host': host, 'port': 443}], 38 | http_auth=awsauth, 39 | use_ssl=True, 40 | verify_certs=True, 41 | connection_class=RequestsHttpConnection 42 | ) 43 | if args.debug: 44 | logger.debug(es.info()) 45 | 46 | es_idx=es.indices 47 | if args.index: 48 | logger.info(f"Deleting {args.index}") 49 | es_idx.delete(index=args.index) 50 | else: 51 | ans = input(f"You are about to delete all the indices in {args.domain}. Are you sure? (type 'yes' to proceed) ").lower().strip() 52 | if ans != "yes": 53 | print("Probably a good choice. Aborting now....") 54 | exit(0) 55 | 56 | # They said yes. The fools 57 | for i in es.indices.get('*'): 58 | logger.info(f"Deleting {i}") 59 | es_idx.delete(index=i) 60 | 61 | 62 | def get_endpoint(domain, region): 63 | ''' using the boto3 api, gets the URL endpoint for the cluster ''' 64 | es_client = boto3.client('es', region_name=region) 65 | 66 | response = es_client.describe_elasticsearch_domain(DomainName=domain) 67 | if 'DomainStatus' in response: 68 | if 'Endpoint' in response['DomainStatus']: 69 | return(response['DomainStatus']['Endpoint']) 70 | 71 | logger.error("Unable to get ES Endpoint for {}".format(domain)) 72 | return(None) 73 | 74 | 75 | def do_args(): 76 | import argparse 77 | parser = argparse.ArgumentParser() 78 | parser.add_argument("--debug", help="print debugging info", action='store_true') 79 | parser.add_argument("--error", help="print error info only", action='store_true') 80 | 81 | # parser.add_argument("--env_file", help="Environment File to source", default="config.env") 82 | 83 | parser.add_argument("--domain", help="Elastic Search Domain", required=True) 84 | parser.add_argument("--index", help="Index to purge") 85 | parser.add_argument("--region", help="AWS Region") 86 | 87 | args = parser.parse_args() 88 | 89 | return(args) 90 | 91 | if __name__ == '__main__': 92 | 93 | args = do_args() 94 | 95 | # Logging idea stolen from: https://docs.python.org/3/howto/logging.html#configuring-logging 96 | # create console handler and set level to debug 97 | ch = logging.StreamHandler() 98 | if args.error: 99 | logger.setLevel(logging.ERROR) 100 | elif args.debug: 101 | logger.setLevel(logging.DEBUG) 102 | else: 103 | logger.setLevel(logging.INFO) 104 | 105 | # create formatter 106 | # formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 107 | formatter = logging.Formatter('%(levelname)s - %(message)s') 108 | # add formatter to ch 109 | ch.setFormatter(formatter) 110 | # add ch to logger 111 | logger.addHandler(ch) 112 | 113 | # Sanity check region 114 | if args.region: 115 | os.environ['AWS_DEFAULT_REGION'] = args.region 116 | 117 | if 'AWS_DEFAULT_REGION' not in os.environ: 118 | logger.error("AWS_DEFAULT_REGION Not set. Aborting...") 119 | exit(1) 120 | 121 | main(args, logger) 122 | 123 | -------------------------------------------------------------------------------- /search-cluster/scripts/list_es_index.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from dateutil import tz 4 | from elasticsearch import Elasticsearch, RequestsHttpConnection, ElasticsearchException, RequestError, NotFoundError 5 | from requests_aws4auth import AWS4Auth 6 | import boto3 7 | import datetime 8 | import json 9 | import os 10 | import re 11 | import requests 12 | import time 13 | 14 | import logging 15 | logger = logging.getLogger() 16 | logging.getLogger('botocore').setLevel(logging.WARNING) 17 | logging.getLogger('boto3').setLevel(logging.WARNING) 18 | logging.getLogger('urllib3').setLevel(logging.WARNING) 19 | logging.getLogger('elasticsearch').setLevel(logging.WARNING) 20 | 21 | 22 | # Lambda execution starts here 23 | def main(args, logger): 24 | logger.debug("Listing all indicies in {}".format(args.domain)) 25 | 26 | host = get_endpoint(args.domain, args.region) 27 | if host is None: 28 | print("Failed to get Endpoint. Aborting....") 29 | exit(1) 30 | 31 | region = os.environ['AWS_DEFAULT_REGION'] 32 | service = 'es' 33 | credentials = boto3.Session().get_credentials() 34 | awsauth = AWS4Auth(credentials.access_key, credentials.secret_key, region, service, session_token=credentials.token) 35 | 36 | es = Elasticsearch( 37 | hosts=[{'host': host, 'port': 443}], 38 | http_auth=awsauth, 39 | use_ssl=True, 40 | verify_certs=True, 41 | connection_class=RequestsHttpConnection 42 | ) 43 | if args.debug: 44 | logger.debug(es.info()) 45 | 46 | for i in es.indices.get('*'): 47 | print(i) 48 | 49 | 50 | def get_endpoint(domain, region): 51 | ''' using the boto3 api, gets the URL endpoint for the cluster ''' 52 | es_client = boto3.client('es', region_name=region) 53 | 54 | response = es_client.describe_elasticsearch_domain(DomainName=domain) 55 | if 'DomainStatus' in response: 56 | if 'Endpoint' in response['DomainStatus']: 57 | return(response['DomainStatus']['Endpoint']) 58 | 59 | logger.error("Unable to get ES Endpoint for {}".format(domain)) 60 | return(None) 61 | 62 | 63 | def do_args(): 64 | import argparse 65 | parser = argparse.ArgumentParser() 66 | parser.add_argument("--debug", help="print debugging info", action='store_true') 67 | parser.add_argument("--error", help="print error info only", action='store_true') 68 | parser.add_argument("--domain", help="Elastic Search Domain", required=True) 69 | parser.add_argument("--region", help="AWS Region") 70 | 71 | args = parser.parse_args() 72 | 73 | return(args) 74 | 75 | if __name__ == '__main__': 76 | 77 | args = do_args() 78 | 79 | # Logging idea stolen from: https://docs.python.org/3/howto/logging.html#configuring-logging 80 | # create console handler and set level to debug 81 | ch = logging.StreamHandler() 82 | if args.error: 83 | logger.setLevel(logging.ERROR) 84 | elif args.debug: 85 | logger.setLevel(logging.DEBUG) 86 | else: 87 | logger.setLevel(logging.INFO) 88 | 89 | # create formatter 90 | # formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 91 | formatter = logging.Formatter('%(levelname)s - %(message)s') 92 | # add formatter to ch 93 | ch.setFormatter(formatter) 94 | # add ch to logger 95 | logger.addHandler(ch) 96 | 97 | # Sanity check region 98 | if args.region: 99 | os.environ['AWS_DEFAULT_REGION'] = args.region 100 | 101 | if 'AWS_DEFAULT_REGION' not in os.environ: 102 | logger.error("AWS_DEFAULT_REGION Not set. Aborting...") 103 | exit(1) 104 | 105 | main(args, logger) 106 | 107 | -------------------------------------------------------------------------------- /search-cluster/scripts/reindex_resources.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from dateutil import tz 4 | from elasticsearch import Elasticsearch, RequestsHttpConnection, ElasticsearchException 5 | from requests_aws4auth import AWS4Auth 6 | import boto3 7 | import datetime 8 | import json 9 | import os 10 | import re 11 | import requests 12 | import time 13 | 14 | import logging 15 | logger = logging.getLogger() 16 | logging.getLogger('botocore').setLevel(logging.WARNING) 17 | logging.getLogger('boto3').setLevel(logging.WARNING) 18 | logging.getLogger('urllib3').setLevel(logging.WARNING) 19 | logging.getLogger('elasticsearch').setLevel(logging.ERROR) 20 | 21 | 22 | # This number will bang into the Lambda Timeout, so adjust with care. 23 | BATCH_SIZE = 50 24 | 25 | # Lambda execution starts here 26 | def main(args, logger): 27 | 28 | stack_info = get_stack(args.stackname) 29 | bucket = get_bucket_name(stack_info) 30 | queue_url = get_queue_url(stack_info) 31 | 32 | sqs_client = boto3.client('sqs') 33 | s3_client = boto3.client('s3') 34 | 35 | counter = 0 36 | file_count = 0 37 | 38 | 39 | # Start iterating the objects 40 | response = s3_client.list_objects_v2(Bucket=bucket, MaxKeys=BATCH_SIZE, Prefix=args.prefix) 41 | while response['IsTruncated']: 42 | 43 | files = [] 44 | for o in response['Contents']: 45 | files.append(o['Key']) 46 | file_count += send_message(sqs_client, queue_url, bucket, files) 47 | counter += 1 48 | 49 | response = s3_client.list_objects_v2(Bucket=bucket, MaxKeys=BATCH_SIZE, Prefix=args.prefix, ContinuationToken=response['NextContinuationToken']) 50 | 51 | # Do last stuff 52 | files = [] 53 | for o in response['Contents']: 54 | files.append(o['Key']) 55 | file_count += send_message(sqs_client, queue_url, bucket, files) 56 | counter += 1 57 | 58 | print(f"Sent {counter} messages to index {file_count} objects") 59 | 60 | def send_message(sqs_client, queue_url, bucket, files): 61 | 62 | body = { 63 | 'Records': [] 64 | } 65 | 66 | for f in files: 67 | body['Records'].append({'s3': {'bucket': {'name': bucket }, 'object': {'key': f } } }) 68 | 69 | print(f"Sending {len(files)} Records to SQS" ) 70 | response = sqs_client.send_message(QueueUrl=queue_url, MessageBody=json.dumps(body)) 71 | # print(response) 72 | # print(queue_url) 73 | # print(json.dumps(body)) 74 | return(len(files)) 75 | 76 | 77 | def get_bucket_name(stack_info): 78 | for p in stack_info['Parameters']: 79 | if p['ParameterKey'] == "pBucketName": 80 | return(p['ParameterValue']) 81 | 82 | # Crap, didn't find it. Better error out 83 | print(f"Error getting bucket name for stack {stack_info['StackName']}. Aborting... ") 84 | exit(1) 85 | 86 | def get_queue_url(stack_info): 87 | for o in stack_info['Outputs']: 88 | if o['OutputKey'] == "SearchIngestEventQueueUrl": 89 | return(o['OutputValue']) 90 | 91 | # Crap, didn't find it. Better error out 92 | print(f"Error getting Queue URL for stack {stack_info['StackName']}. Aborting... ") 93 | exit(1) 94 | 95 | def get_stack(stackname): 96 | 97 | cf_client = boto3.client('cloudformation') 98 | 99 | try: 100 | response = cf_client.describe_stacks(StackName=stackname) 101 | return(response['Stacks'][0]) 102 | except ClientError as e: 103 | print(f"Failed to find CF Stack {stackname}: {e}. Aborting...") 104 | exit(1) 105 | except KeyError as e: 106 | print(f"Failed to find CF Stack {stackname}: {e}. Aborting...") 107 | exit(1) 108 | except IndexError as e: 109 | print(f"Failed to find CF Stack {stackname}: {e}. Aborting...") 110 | exit(1) 111 | 112 | 113 | def do_args(): 114 | import argparse 115 | parser = argparse.ArgumentParser() 116 | parser.add_argument("--debug", help="print debugging info", action='store_true') 117 | parser.add_argument("--error", help="print error info only", action='store_true') 118 | 119 | parser.add_argument("--stackname", help="CF Stack with Bucket & SQS", required=True) 120 | parser.add_argument("--prefix", help="Re-Index resources with this prefix", required=True) 121 | 122 | args = parser.parse_args() 123 | 124 | return(args) 125 | 126 | if __name__ == '__main__': 127 | 128 | args = do_args() 129 | 130 | # Logging idea stolen from: https://docs.python.org/3/howto/logging.html#configuring-logging 131 | # create console handler and set level to debug 132 | ch = logging.StreamHandler() 133 | if args.error: 134 | logger.setLevel(logging.ERROR) 135 | elif args.debug: 136 | logger.setLevel(logging.DEBUG) 137 | else: 138 | logger.setLevel(logging.INFO) 139 | 140 | # create formatter 141 | # formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 142 | formatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s') 143 | # add formatter to ch 144 | ch.setFormatter(formatter) 145 | # add ch to logger 146 | logger.addHandler(ch) 147 | 148 | main(args, logger) 149 | 150 | -------------------------------------------------------------------------------- /search-cluster/scripts/setup.cfg: -------------------------------------------------------------------------------- 1 | [install] 2 | prefix= 3 | --------------------------------------------------------------------------------