├── requirements.txt ├── flow.png ├── common ├── constants.py └── shared_tools.py ├── remediation ├── codebuild_handler.py ├── cross_handler.py ├── sts_handler.py ├── ssm_handler.py ├── autoscale_handler.py ├── ec2_handler.py ├── lambda_handler.py ├── cf_handler.py ├── sagemaker_handler.py ├── remediation_handler.py └── iam_handler.py ├── Dockerfile ├── LICENSE ├── required_permissions.json ├── bob.json ├── main ├── revert.py ├── apply.py ├── plan.py └── audit.py ├── misc ├── case_insensitive_dict.py └── gathering.py ├── iam_deesc.py └── README.md /requirements.txt: -------------------------------------------------------------------------------- 1 | boto3 > 1.13 2 | packaging 3 | python-dateutil 4 | -------------------------------------------------------------------------------- /flow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PaloAltoNetworks/IAM-Deescalate/HEAD/flow.png -------------------------------------------------------------------------------- /common/constants.py: -------------------------------------------------------------------------------- 1 | OUTPUT_DIR = './output' 2 | AUDIT_FILE = 'privesc.json' 3 | POLICY__PREFIX = 'iam_deescalate' 4 | ADMIN_RND_TEST_CNT = 10 5 | BANNER = """ 6 | _____ __ __ _____ 7 | |_ _| /\ | \/ | | __ \ 8 | | | / \ | \ / |______| | | | ___ ___ ___ ___ 9 | | | / /\ \ | |\/| |______| | | |/ _ \/ _ \/ __|/ __| 10 | _| |_ / ____ \| | | | | |__| | __/ __/\__ \ (__ 11 | |_____/_/ \_\_| |_| |_____/ \___|\___||___/\___| 12 | """ -------------------------------------------------------------------------------- /remediation/codebuild_handler.py: -------------------------------------------------------------------------------- 1 | from remediation.remediation_handler import RemediationStrategy, RemediationHandler, AWS_SVC_TYPE 2 | import logging 3 | 4 | class CodebuildHandler(RemediationHandler): 5 | def __init__(self) -> None: 6 | super().__init__(AWS_SVC_TYPE.CODEBUILD) 7 | 8 | 9 | def get_remediation_plan(self, src_principal, dst_principal, reason): 10 | if 'can use CodeBuild with an existing project to access' in reason or 'can create a project in CodeBuild to access' in reason or 'can update a project in CodeBuild to access' in reason: 11 | return self._plan_scenario1(src_principal) 12 | else: 13 | logging.error('Unrecognized PMapper reason') 14 | 15 | def _plan_scenario1(self, src_principal): 16 | strategy1 = RemediationStrategy(src_principal, 'codebuild') 17 | action_list = ['codebuild:StartBuild', 'codebuild:StartBuildBatch'] 18 | resource_list = ['*'] 19 | strategy1.add_process('Deny', action_list, resource_list) 20 | return[strategy1] 21 | 22 | -------------------------------------------------------------------------------- /remediation/cross_handler.py: -------------------------------------------------------------------------------- 1 | from remediation.remediation_handler import RemediationStrategy, RemediationHandler, AWS_SVC_TYPE 2 | import logging 3 | 4 | class CrossHandler(RemediationHandler): 5 | def __init__(self) -> None: 6 | super().__init__(AWS_SVC_TYPE.IAM) 7 | 8 | def get_remediation_plan(self, src_principal, dst_principal, reason): 9 | ''' 10 | condition1: sts:AssumeRole 11 | ''' 12 | if 'can call sts:AssumeRole to access' in reason: 13 | return self._plan_scenario1(src_principal, dst_principal) 14 | else: 15 | logging.error('Unrecognized PMapper reason {}'.format(reason)) 16 | 17 | def _plan_scenario1(self, src_principal, dst_principal): 18 | ''' Deny src_principal to perform autoscaling:CreateAutoScalingGroup ''' 19 | plan = RemediationStrategy(src_principal, 'sts') 20 | action_list = ['sts:AssumeRole'] 21 | resource_list = ['{}'.format(dst_principal)] 22 | plan.add_process('Deny', action_list, resource_list) 23 | return [plan] -------------------------------------------------------------------------------- /remediation/sts_handler.py: -------------------------------------------------------------------------------- 1 | 2 | import logging 3 | from remediation.remediation_handler import RemediationStrategy, RemediationHandler, AWS_SVC_TYPE 4 | 5 | class STSHandler(RemediationHandler): 6 | def __init__(self) -> None: 7 | super().__init__(AWS_SVC_TYPE.STS) 8 | 9 | def get_remediation_plan(self, src_principal, dst_principal, reason): 10 | ''' 11 | condition1: sts:AssumeRole 12 | condition2: 13 | ''' 14 | if 'can access via sts:AssumeRole' in reason: 15 | return self._plan_scenario1(src_principal, dst_principal) 16 | else: 17 | logging.error('Unrecognized PMapper reason {}'.format(reason)) 18 | 19 | def _plan_scenario1(self, src_principal, dst_principal): 20 | ''' Deny src_principal to perform autoscaling:CreateAutoScalingGroup ''' 21 | plan = RemediationStrategy(src_principal, 'sts') 22 | action_list = ['sts:AssumeRole'] 23 | resource_list = ['{}'.format(dst_principal)] 24 | plan.add_process('Deny', action_list, resource_list) 25 | return [plan] -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # FROM python:3-slim as builder 2 | 3 | FROM python:3.8-slim as builder 4 | 5 | LABEL maintainer="Jay Chen " 6 | 7 | RUN apt-get update && apt-get install git -y && mkdir /app 8 | 9 | WORKDIR /app 10 | 11 | RUN git clone https://github.com/nccgroup/PMapper.git 12 | 13 | COPY ./ ./IAM-Deescalate 14 | 15 | RUN cp IAM-Deescalate/misc/gathering.py PMapper/principalmapper/graphing/gathering.py 16 | 17 | RUN cp IAM-Deescalate/misc/case_insensitive_dict.py PMapper/principalmapper/util/case_insensitive_dict.py 18 | 19 | WORKDIR /app/IAM-Deescalate 20 | 21 | RUN pip3 --disable-pip-version-check install -r requirements.txt 22 | 23 | 24 | FROM gcr.io/distroless/python3 25 | 26 | COPY --from=builder /app /app 27 | 28 | COPY --from=builder /usr/local/lib/python3.8 /usr/local/lib/python3.8 29 | 30 | ENV PYTHONPATH=/usr/local/lib/python3.8/site-packages 31 | 32 | ENV AWS_SHARED_CREDENTIALS_FILE=/.aws/credentials 33 | 34 | WORKDIR /app/IAM-Deescalate 35 | 36 | VOLUME [ "/app/IAM-Deescalate/output/" ] 37 | 38 | ENTRYPOINT [ "python3", "iam_desc.py" ] 39 | 40 | CMD ["-h"] -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Palo Alto Networks 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /common/shared_tools.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import re 4 | import logging 5 | 6 | def loadFromJson(filepath): 7 | ''' Load a json file to an python object ''' 8 | if not os.path.exists(filepath): 9 | logging.error('File %s does not exist' %filepath) 10 | exit() 11 | with open(filepath, newline='', encoding='utf-8') as fhand: 12 | try: 13 | return json.load(fhand) 14 | except json.decoder.JSONDecodeError as e: 15 | logging.error(e) 16 | return 17 | 18 | def dumpToJson(obj, filePath, indent=None): 19 | ''' dump a python object to Json''' 20 | with open(filePath, 'w') as fp: 21 | json.dump(obj, fp, default=str, indent = indent) 22 | 23 | principal_re = re.compile(r':\d{12}:(.+)') 24 | def parse_principal_from_arn(arn): 25 | ''' parse out the principal part starting from user, role, or group from an input arn''' 26 | search = principal_re.search(arn) 27 | if search: 28 | return search.group(1).strip() 29 | 30 | def check_boto3_response(resp): 31 | ''' check the response of boto3 APIs ''' 32 | return 'ResponseMetadata' in resp and resp['ResponseMetadata']['HTTPStatusCode'] >= 200 and resp['ResponseMetadata']['HTTPStatusCode'] < 300 33 | -------------------------------------------------------------------------------- /required_permissions.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Sid": "PMapperPerms", 6 | "Effect": "Allow", 7 | "Action": [ 8 | "iam:List*", 9 | "iam:Get*", 10 | "iam:PutUserPolicy", 11 | "iam:PutRolePolicy", 12 | "iam:PutGroupPolicy", 13 | "iam:DeleteRolePolicy", 14 | "iam:DeleteUserPolicy", 15 | "iam:DeleteGroupPolicy", 16 | "sts:GetCallerIdentity", 17 | "organizations:List*", 18 | "organizations:Describe*", 19 | "s3:ListAllMyBuckets", 20 | "s3:ListBucket", 21 | "s3:GetBucketPolicy", 22 | "kms:ListKeys", 23 | "kms:GetKeyPolicy", 24 | "sns:ListTopics", 25 | "sns:GetTopicAttributes", 26 | "sqs:ListQueues", 27 | "sqs:GetQueueAttributes", 28 | "secretsmanager:ListSecrets", 29 | "secretsmanager:GetResourcePolicy", 30 | "cloudformation:DescribeStacks", 31 | "lambda:ListFunctions", 32 | "codebuild:ListProjects", 33 | "codebuild:BatchGetProjects", 34 | "autoscaling:DescribeLaunchConfigurations" 35 | ], 36 | "Resource": "*" 37 | } 38 | ] 39 | } -------------------------------------------------------------------------------- /remediation/ssm_handler.py: -------------------------------------------------------------------------------- 1 | 2 | from remediation.remediation_handler import RemediationStrategy, RemediationHandler, AWS_SVC_TYPE 3 | import logging 4 | 5 | class SSMHandler(RemediationHandler): 6 | def __init__(self) -> None: 7 | super().__init__(AWS_SVC_TYPE.SSM) 8 | 9 | def get_remediation_plan(self, src_principal, dst_principal, reason): 10 | # better if we can identify a list of instance with priviledged permissions (https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-sessions-start.html#start-sys-console) 11 | if 'can call ssm:SendCommand to access an EC2' in reason: 12 | return self._plan_scenario1(src_principal) 13 | elif 'can call ssm:StartSession to access an EC2' in reason: 14 | return self._plan_scenario2(src_principal) 15 | else: 16 | logging.error('Unrecognized PMapper reason {}'.format(reason)) 17 | 18 | def _plan_scenario1(self, src_principal): 19 | ''' deny src to perform ssm:SendCommand ''' 20 | strategy1 = RemediationStrategy(src_principal, 'ssm') 21 | action_list = ['ssm:SendCommand'] 22 | resource_list = ['*'] 23 | strategy1.add_process('Deny', action_list, resource_list) 24 | return [strategy1] 25 | 26 | def _plan_scenario2(self, src_principal): 27 | ''' deny src to perform ssm:StartSession ''' 28 | strategy1 = RemediationStrategy(src_principal, 'ssm') 29 | action_list = ['ssm:StartSession'] 30 | resource_list = ['*'] 31 | strategy1.add_process('Deny', action_list, resource_list) 32 | return [strategy1] -------------------------------------------------------------------------------- /remediation/autoscale_handler.py: -------------------------------------------------------------------------------- 1 | from remediation.remediation_handler import RemediationStrategy, RemediationHandler, AWS_SVC_TYPE 2 | import logging 3 | 4 | class AutoscalHandler(RemediationHandler): 5 | def __init__(self) -> None: 6 | super().__init__(AWS_SVC_TYPE.AUTOSCALING) 7 | 8 | 9 | def get_remediation_plan(self, src_principal, dst_principal, reason): 10 | if 'can use the EC2 Auto Scaling service role and an existing Launch Configuration to access' in reason or 'can create the EC2 Auto Scaling service role and an existing Launch Configuration to access' in reason: 11 | return self._plan_scenario1(src_principal) 12 | elif 'create a launch configuration to access' in reason: 13 | return self._plan_scenario2(src_principal, dst_principal) 14 | else: 15 | logging.error('Unrecognized PMapper reason') 16 | 17 | def _plan_scenario1(self, src_principal): 18 | ''' Deny src_principal to perform autoscaling:CreateAutoScalingGroup ''' 19 | plan = RemediationStrategy(src_principal, 'autoscaling') 20 | action_list = ['autoscaling:CreateAutoScalingGroup'] 21 | resource_list = ['*'] 22 | plan.add_process('Deny', action_list, resource_list) 23 | return [plan] 24 | 25 | 26 | def _plan_scenario2(self, src_principal, dst_principal): 27 | ''' deny src_principal to perform autoscaling:CreateLaunchConfiguration or deny src_principal to perform iam:PassRole dst on ec2.amazonaws.com ''' 28 | plan1 = RemediationStrategy(src_principal, 'autoscaling') 29 | action_list = ['autoscaling:CreateLaunchConfiguration'] 30 | resource_list = ['*'] 31 | plan1.add_process('Deny', action_list, resource_list) 32 | 33 | plan2 = RemediationStrategy(src_principal, 'autoscaling') 34 | action_list = ['iam:PassRole'] 35 | resource_list = ['{}'.format(dst_principal)] 36 | plan2.add_process('Deny', action_list, resource_list) 37 | 38 | return[plan1, plan2] 39 | 40 | -------------------------------------------------------------------------------- /remediation/ec2_handler.py: -------------------------------------------------------------------------------- 1 | from remediation.remediation_handler import RemediationStrategy, RemediationHandler, AWS_SVC_TYPE 2 | import logging 3 | 4 | class EC2Handler(RemediationHandler): 5 | def __init__(self) -> None: 6 | super().__init__(AWS_SVC_TYPE.EC2) 7 | 8 | def get_remediation_plan(self, src_principal, dst_principal, reason): 9 | if 'can use EC2 to run an instance with' in reason: 10 | return self._plan_scenario1(src_principal, dst_principal) 11 | elif 'can use EC2 to run an instance and then' in reason: 12 | return self._plan_scenario2(src_principal, dst_principal) 13 | else: 14 | logging.error('Unrecognized PMapper reason {}'.format(reason)) 15 | 16 | def _plan_scenario1(self, src_principal, dst_principal): 17 | ''' deny src to perform iam:PassRole on dst or deny src ec2:RunInstances ''' 18 | strategy1 = RemediationStrategy(src_principal, 'ec2') 19 | action_list = ['iam:PassRole'] 20 | resource_list = ['{}'.format(dst_principal)] 21 | strategy1.add_process('Deny', action_list, resource_list) 22 | 23 | strategy2 = RemediationStrategy(src_principal, 'ec2') 24 | action_list = ['ec2:RunInstances'] 25 | resource_list = ['*'] 26 | strategy2.add_process('Deny', action_list, resource_list) 27 | return [strategy1, strategy2] 28 | 29 | 30 | def _plan_scenario2(self, src_principal, dst_principal): 31 | ''' deny src to perform iam:PassRole on dst or deny src ec2:AssociateIamInstanceProfile ''' 32 | strategy1 = RemediationStrategy(src_principal, 'ec2') 33 | action_list = ['iam:PassRole'] 34 | resource_list = ['{}'.format(dst_principal)] 35 | strategy1.add_process('Deny', action_list, resource_list) 36 | 37 | strategy2 = RemediationStrategy(src_principal, 'ec2') 38 | action_list = ['ec2:AssociateIamInstanceProfile'] 39 | resource_list = ['*'] 40 | strategy2.add_process('Deny', action_list, resource_list) 41 | 42 | return[strategy1, strategy2] -------------------------------------------------------------------------------- /remediation/lambda_handler.py: -------------------------------------------------------------------------------- 1 | from remediation.remediation_handler import RemediationStrategy, RemediationHandler, AWS_SVC_TYPE 2 | import re 3 | import logging 4 | 5 | class LambdaHandler(RemediationHandler): 6 | def __init__(self) -> None: 7 | super().__init__(AWS_SVC_TYPE.LAMBDA) 8 | 9 | def get_remediation_plan(self, src_principal, dst_principal, reason): 10 | ''' 11 | scenario1: iam:PassRole to lambda.amazonaws.com and lambda:CreateFunction 12 | scenario2: lambda:UpdateFunctionCode 13 | scenario3: iam:PassRole to lambda.amazonaws.com and lambda:UpdateFunctionConfiguration and lambda:UpdateFunctionCode 14 | ''' 15 | if 'can use Lambda to create a new function with arbitrary code' in reason: 16 | return self._plan_scenario1(src_principal, dst_principal) 17 | elif 'can use Lambda to edit an existing function' in reason: 18 | return self._plan_scenario2(src_principal, reason) 19 | else: 20 | logging.error('Unrecognized PMapper reason') 21 | 22 | def _plan_scenario1(self, src_principal, dst_principal): 23 | ''' Deny src_principal to perform iam:PassRole on dst_principal' or 'deny lambda:CreateFunction ''' 24 | plan1 = RemediationStrategy(src_principal, 'lambda') 25 | action_list = ['lambda:CreateFunction'] 26 | resource_list = ['*'] 27 | plan1.add_process('Deny', action_list, resource_list) 28 | 29 | plan2 = RemediationStrategy(src_principal, 'lambda') 30 | action_list = ['iam:PassRole'] 31 | resource_list = ['{}'.format(dst_principal)] 32 | plan2.add_process('Deny', action_list, resource_list) 33 | return [plan1, plan2] 34 | 35 | 36 | def _plan_scenario2(self, src_principal, reason): 37 | ''' deny src to perform lambda:UpdateFunctionCode on specific function ''' 38 | # Parse the function arn 39 | lambda_re = re.compile(r'.+\((arn:aws:lambda:\S+)\).+') 40 | search = lambda_re.search(reason) 41 | if search: 42 | lambda_arn = search.group(1).strip() 43 | 44 | plan1 = RemediationStrategy(src_principal, 'lambda') 45 | action_list = ['lambda:UpdateFunctionCode'] 46 | resource_list = [lambda_arn] 47 | plan1.add_process('Deny', action_list, resource_list) 48 | 49 | return[plan1] 50 | 51 | 52 | -------------------------------------------------------------------------------- /bob.json: -------------------------------------------------------------------------------- 1 | { 2 | "vulnerable_principal": "arn:aws:iam::123456789012:user/bob", 3 | "reachable_admin_principals": { 4 | "arn:aws:iam::123456789012:user/it_admin": [ 5 | { 6 | "reason": "can create access keys to authenticate as", 7 | "remediations": [ 8 | { 9 | "policy": [ 10 | { 11 | "Effect": "Deny", 12 | "Action": [ 13 | "iam:CreateAccessKey" 14 | ], 15 | "Resource": [ 16 | "arn:aws:iam::123456789012:user/it_admin" 17 | ] 18 | } 19 | ], 20 | "patch_me": 0 21 | } 22 | ] 23 | } 24 | ], 25 | "arn:aws:iam::123456789012:role/it_admin_role": [ 26 | { 27 | "reason": "can use EC2 to run an instance and then associate an existing instance profile to access", 28 | "remediations": [ 29 | { 30 | "policy": [ 31 | { 32 | "Effect": "Deny", 33 | "Action": [ 34 | "iam:PassRole" 35 | ], 36 | "Resource": [ 37 | "arn:aws:iam::123456789012:role/it_admin_role" 38 | ] 39 | } 40 | ], 41 | "patch_me": 0 42 | }, 43 | { 44 | "policy": [ 45 | { 46 | "Effect": "Deny", 47 | "Action": [ 48 | "ec2:AssociateIamInstanceProfile" 49 | ], 50 | "Resource": [ 51 | "*" 52 | ] 53 | } 54 | ], 55 | "patch_me": 0 56 | } 57 | ] 58 | } 59 | ] 60 | } 61 | } 62 | 63 | -------------------------------------------------------------------------------- /remediation/cf_handler.py: -------------------------------------------------------------------------------- 1 | from remediation.remediation_handler import RemediationStrategy, RemediationHandler, AWS_SVC_TYPE 2 | import re 3 | import logging 4 | 5 | class CFHandler(RemediationHandler): 6 | def __init__(self) -> None: 7 | super().__init__(AWS_SVC_TYPE.CLOUDFORMATION) 8 | 9 | def get_remediation_plan(self, src_principal, dst_principal, reason): 10 | if 'can create a stack in CloudFormation to access' in reason: 11 | return self._plan_scenario1(src_principal, dst_principal) 12 | elif 'can update the CloudFormation stack' in reason: 13 | return self._plan_scenario2(src_principal, reason) 14 | elif 'can create and execute a changeset in CloudFormation for stack' in reason: 15 | return self._plan_scenario3(src_principal, reason) 16 | else: 17 | logging.error('Unrecognized PMapper reason') 18 | 19 | def _plan_scenario1(self, src_principal, dst_principal): 20 | ''' Deny src_principal to perform cloudformation:CreateStack or deny src_principal to perform iam:PassRole on dst_principal ''' 21 | strategy1 = RemediationStrategy(src_principal, 'cloudformation') 22 | action_list = ['cloudformation:CreateStack'] 23 | resource_list = ['*'] 24 | strategy1.add_process('Deny', action_list, resource_list) 25 | 26 | strategy2 = RemediationStrategy(src_principal, 'cloudformation') 27 | action_list = ['iam:PassRole'] 28 | resource_list = ['{}'.format(dst_principal)] 29 | strategy2.add_process('Deny', action_list, resource_list) 30 | return [strategy1, strategy2] 31 | 32 | 33 | def _plan_scenario2(self, src_principal, reason): 34 | ''' Deny src_principal to perform cloudformation:UpdateStack on a specific stack ''' 35 | # Parse the function arn 36 | cf_re = re.compile(r'.+(arn:aws:cloudformation:\S+).+') 37 | search = cf_re.search(reason) 38 | if search: 39 | cf_arn = search.group(1).strip() 40 | 41 | strategy1 = RemediationStrategy(src_principal, 'cloudformation') 42 | action_list = ['cloudformation:UpdateStack'] 43 | resource_list = [cf_arn] 44 | strategy1.add_process('Deny', action_list, resource_list) 45 | 46 | return[strategy1] 47 | 48 | def _plan_scenario3(self, src_principal, reason): 49 | ''' Deny src_principal to perform cloudformation:ExecuteChangeSet on specific stack ''' 50 | # Parse the function arn 51 | cf_re = re.compile(r'.+(arn:aws:cloudformation:\S+).+') 52 | search = cf_re.search(reason) 53 | if search: 54 | cf_arn = search.group(1).strip() 55 | 56 | strategy1 = RemediationStrategy(src_principal, 'cloudformation') 57 | action_list = ['cloudformation:ExecuteChangeSet'] 58 | resource_list = [cf_arn] 59 | strategy1.add_process('Deny', action_list, resource_list) 60 | 61 | return[strategy1] 62 | 63 | 64 | -------------------------------------------------------------------------------- /remediation/sagemaker_handler.py: -------------------------------------------------------------------------------- 1 | 2 | from remediation.remediation_handler import RemediationStrategy, RemediationHandler, AWS_SVC_TYPE 3 | import logging 4 | 5 | class SagemakerHandler(RemediationHandler): 6 | def __init__(self) -> None: 7 | super().__init__(AWS_SVC_TYPE.SAGEMAKER) 8 | 9 | def get_remediation_plan(self, src_principal, dst_principal, reason): 10 | if 'can use SageMaker to launch a notebook and access' in reason: 11 | return self._plan_scenario1(src_principal, dst_principal) 12 | elif 'can use SageMaker to create a training job and access' in reason: 13 | return self._plan_scenario2(src_principal, dst_principal) 14 | elif 'can use SageMaker to create a processing job and access' in reason: 15 | return self._plan_scenario3(src_principal, dst_principal) 16 | else: 17 | logging.error('Unrecognized PMapper reason {}'.format(reason)) 18 | 19 | def _plan_scenario1(self, src_principal, dst_principal): 20 | ''' deny src to perform iam:PassRole on dst or 'deny sagemaker:CreateNotebookInstance ''' 21 | strategy1 = RemediationStrategy(src_principal, 'sagemaker') 22 | action_list = ['iam:PassRole'] 23 | resource_list = ['{}'.format(dst_principal)] 24 | strategy1.add_process('Deny', action_list, resource_list) 25 | 26 | strategy2 = RemediationStrategy(src_principal, 'sagemaker') 27 | action_list = ['sagemaker:CreateNotebookInstance'] 28 | resource_list = ['*'] 29 | strategy2.add_process('Deny', action_list, resource_list) 30 | return [strategy1, strategy2] 31 | 32 | 33 | def _plan_scenario2(self, src_principal, dst_principal): 34 | ''' deny src to perform iam:PassRole on dst or 'deny sagemaker:CreateTrainingJob ''' 35 | strategy1 = RemediationStrategy(src_principal, 'sagemaker') 36 | action_list = ['iam:PassRole'] 37 | resource_list = ['{}'.format(dst_principal)] 38 | strategy1.add_process('Deny', action_list, resource_list) 39 | 40 | strategy2 = RemediationStrategy(src_principal, 'sagemaker') 41 | action_list = ['sagemaker:CreateTrainingJob'] 42 | resource_list = ['*'] 43 | strategy2.add_process('Deny', action_list, resource_list) 44 | return [strategy1, strategy2] 45 | 46 | def _plan_scenario3(self, src_principal, dst_principal): 47 | ''' deny src to perform iam:PassRole on dst or 'deny sagemaker:CreateProcessingJob ''' 48 | strategy1 = RemediationStrategy(src_principal, 'sagemaker') 49 | action_list = ['iam:PassRole'] 50 | resource_list = ['{}'.format(dst_principal)] 51 | strategy1.add_process('Deny', action_list, resource_list) 52 | 53 | strategy2 = RemediationStrategy(src_principal, 'sagemaker') 54 | action_list = ['sagemaker:CreateProcessingJob'] 55 | resource_list = ['*'] 56 | strategy2.add_process('Deny', action_list, resource_list) 57 | return [strategy1, strategy2] -------------------------------------------------------------------------------- /remediation/remediation_handler.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | from enum import Enum 3 | from typing import List 4 | import logging 5 | 6 | class AWS_SVC_TYPE(Enum): 7 | AUTOSCALING = 'autoscaling' 8 | CLOUDFORMATION = 'cloudformation' 9 | CODEBUILD = 'cloudbuild' 10 | IAM = 'iam' 11 | EC2 = 'ec2' 12 | LAMBDA = 'lambda' 13 | SAGEMAKER = 'sagemaker' 14 | SSM = 'ssm' 15 | STS = 'sts' 16 | 17 | class RemediationStrategy(object): 18 | ''' A remediation strategy breaks one edge on a privilege escalation path. It creates a remediation plan based on the reason stored in PMapper's edge object. 19 | Reference: https://github.com/nccgroup/PMapper/tree/master/principalmapper/graphing. ''' 20 | 21 | def __init__(self, principal, target_service) -> None: 22 | self.principal = principal 23 | self.service = target_service 24 | self.process_list = list() # A list of processes that all should be performed to complete a strategy. 25 | 26 | def add_process(self, effect, action_list, resource_list, condition_list = None): 27 | ''' Add one process necessary to break an edge. Multiple processes may be added to each strategy. To break the edge, all the processes need to be taken. ''' 28 | process = dict() 29 | if not effect in ['Allow', 'Deny']: 30 | raise ValueError('Invalid effect. Only Allow or Deny is allowed') 31 | process['Effect'] = effect 32 | process['Action'] = action_list 33 | process['Resource'] = resource_list 34 | if condition_list: 35 | process['Condition'] = condition_list 36 | self.process_list.append(process) 37 | 38 | def describe_strategy(self): 39 | str = '' 40 | for process in self.process_list: 41 | str += 'Effect: {}\n'.format(process['Effect']) 42 | str += 'Action: {}\n'.format(','.join(process['Action'])) 43 | str += 'Resource: {}\n'.format(','.join(process['Resource'])) 44 | if 'Condition' in process: 45 | str += 'Condition: {}\n'.format(','.join(process['Condition'])) 46 | str += '\n' 47 | return str 48 | 49 | def to_policy_obj(self): 50 | ''' Return an AWS policy document ''' 51 | policy = dict() 52 | policy['Version'] = '2012-10-17' 53 | policy['Statement'] = self.process_list 54 | return policy 55 | 56 | class RemediationHandler(ABC): 57 | ''' Each subclass of RemediationHandler is an analyzer that parses the privilege escalation reasons of a specific service. Each PMapper's edge object has a reason property that explains how one node (principal) can transition to another node (principal). RemediationHander outputs a RemediationStrategy after analyzing the reasons. ''' 58 | def __init__(self, svc_type) -> None: 59 | if not isinstance(svc_type, AWS_SVC_TYPE): 60 | logging.error('svc_type must be of type AWS_SVC_TYPE') 61 | raise(ValueError) 62 | 63 | super().__init__() 64 | 65 | @abstractmethod 66 | def get_remediation_plan(self, src_principal, dst_principal, reason) -> List[RemediationStrategy]: 67 | pass 68 | 69 | 70 | -------------------------------------------------------------------------------- /remediation/iam_handler.py: -------------------------------------------------------------------------------- 1 | from remediation.remediation_handler import RemediationStrategy, RemediationHandler, AWS_SVC_TYPE 2 | import logging 3 | 4 | class IAMHandler(RemediationHandler): 5 | def __init__(self) -> None: 6 | super().__init__(AWS_SVC_TYPE.IAM) 7 | 8 | def get_remediation_plan(self, src_principal, dst_principal, reason): 9 | if 'can create access keys to authenticate as' in reason: 10 | return self._plan_scenario1(src_principal, dst_principal) 11 | elif 'can set the password to authenticate as' in reason: 12 | return self._plan_scenario2(src_principal, dst_principal) 13 | elif 'can update the trust document to access' in reason: 14 | return self._plan_scenario3(src_principal, dst_principal) 15 | elif 'to escalate itself to admin' in reason: 16 | if 'iam:CreateRole and iam:AttachRolePolicy' in reason: 17 | return self._plan_scenarios(src_principal, ['iam:AttachRolePolicy'], ['iam:CreateRole']) 18 | elif 'iam:CreateRole and iam:PutRolePolicy' in reason: 19 | return self._plan_scenarios(src_principal, ['iam:PutRolePolicy'], ['iam:CreateRole']) 20 | elif 'iam:PutUserPolicy' in reason: 21 | return self._plan_scenarios(src_principal, ['iam:PutUserPolicy']) 22 | elif 'iam:PutRolePolicy' in reason: 23 | return self._plan_scenarios(src_principal, ['iam:PutRolePolicy']) 24 | elif 'iam:AttachUserPolicy' in reason: 25 | return self._plan_scenarios(src_principal, ['iam:AttachUserPolicy']) 26 | elif 'iam:AttachRolePolicy' in reason: 27 | return self._plan_scenarios(src_principal, ['iam:AttachRolePolicy']) 28 | elif 'iam:CreatePolicyVersion' in reason: 29 | return self._plan_scenarios(src_principal, ['iam:CreatePolicyVersion']) 30 | elif 'iam:PutGroupPolicy' in reason: 31 | return self._plan_scenarios(src_principal, ['iam:PutGroupPolicy']) 32 | elif 'iam:AttachGroupPolicy' in reason: 33 | return self._plan_scenarios(src_principal, ['iam:AttachGroupPolicy']) 34 | else: 35 | logging.error('Unrecognized self-escalation reason') 36 | else: 37 | logging.error('Unrecognized PMapper reason') 38 | 39 | def _plan_scenario1(self, src_principal, dst_principal): 40 | strategy1 = RemediationStrategy(src_principal, 'iam') 41 | action_list = ['iam:CreateAccessKey'] 42 | resource_list = ['{}'.format(dst_principal)] 43 | strategy1.add_process('Deny', action_list, resource_list) 44 | return [strategy1] 45 | 46 | def _plan_scenario2(self, src_principal, dst_principal): 47 | strategy1 = RemediationStrategy(src_principal, 'iam') 48 | action_list = ['iam:CreateLoginProfile', 'iam:UpdateLoginProfile'] 49 | resource_list = ['{}'.format(dst_principal)] 50 | strategy1.add_process('Deny', action_list, resource_list) 51 | return [strategy1] 52 | 53 | def _plan_scenario3(self, src_principal, dst_principal): 54 | strategy1 = RemediationStrategy(src_principal, 'iam') 55 | action_list = ['iam:UpdateAssumeRolePolicy'] 56 | resource_list = ['{}'.format(dst_principal)] 57 | strategy1.add_process('Deny', action_list, resource_list) 58 | return [strategy1] 59 | 60 | def _plan_scenarios(self, src_principal, *action_lists): 61 | ''' each action_list represetns a single strategy to break the edge ''' 62 | strategy_list = list() 63 | for action_list in action_lists: 64 | strategy = RemediationStrategy(src_principal, 'iam') 65 | resource_list = ['*'] 66 | strategy.add_process('Deny', action_list, resource_list) 67 | strategy_list.append(strategy) 68 | return strategy_list 69 | -------------------------------------------------------------------------------- /main/revert.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import botocore 4 | import logging 5 | import common.shared_tools as st 6 | import common.constants as CONST 7 | 8 | 9 | def revert_remediation(plan_path, aws_id, all_principal = False): 10 | ''' Remove the previously injected inline policies attached to vulnerable principals ''' 11 | aws_session = aws_id['session'] 12 | if all_principal: 13 | # Enumerate through all the users and roles in this account 14 | client = aws_session.client('iam') 15 | resp = client.list_users( 16 | MaxItems=1000 17 | ) 18 | for user in resp['Users']: 19 | src_principal = st.parse_principal_from_arn(user['Arn']) 20 | unpatch_principal(src_principal, aws_session) 21 | 22 | resp = client.list_roles( 23 | MaxItems=1000 24 | ) 25 | for role in resp['Roles']: 26 | src_principal = st.parse_principal_from_arn(role['Arn']) 27 | unpatch_principal(src_principal, aws_session) 28 | return 29 | 30 | if not os.path.exists(plan_path): 31 | sys.exit('There is no remediation plan for this AWS account {}. You may try again with the --all flag'.format(aws_id['account_id'])) 32 | 33 | # Walk through every remediation plan 34 | for root, dirs, files in os.walk(plan_path, topdown=False): 35 | for f_name in files: 36 | f_path = os.path.join(root, f_name) 37 | plan = st.loadFromJson(f_path) 38 | if 'vulnerable_principal' in plan: 39 | src_principal = st.parse_principal_from_arn(plan['vulnerable_principal']) 40 | unpatch_principal(src_principal, aws_session) 41 | 42 | def unpatch_principal(src_principal, aws_session): 43 | ''' Retrieve all the inline policies of the principal and delete the policies starting with iamdeescalate prefix ''' 44 | client = aws_session.client('iam') 45 | principal_name = os.path.basename(src_principal) 46 | if src_principal.startswith('user/'): 47 | resp = client.list_user_policies( 48 | UserName=principal_name, 49 | MaxItems=1000 50 | ) 51 | elif src_principal.startswith('role/'): 52 | resp = client.list_role_policies( 53 | RoleName=principal_name, 54 | MaxItems=1000 55 | ) 56 | elif src_principal.startswith('group/'): 57 | resp = client.list_group_policies( 58 | GroupName=principal_name, 59 | MaxItems=1000 60 | ) 61 | if not 'PolicyNames' in resp: 62 | return 63 | for policy_name in resp['PolicyNames']: 64 | if policy_name.startswith('{}@'.format(CONST.POLICY__PREFIX)): 65 | # delete this policy 66 | delete_policy(src_principal, policy_name, aws_session) 67 | 68 | def delete_policy(src_principal, policy_name, aws_session): 69 | client = aws_session.client('iam') 70 | principal_name = os.path.basename(src_principal) 71 | try: 72 | if src_principal.startswith('user/'): 73 | resp = client.delete_user_policy( 74 | UserName=principal_name, 75 | PolicyName=policy_name 76 | ) 77 | elif src_principal.startswith('role/'): 78 | resp = client.delete_role_policy( 79 | RoleName=principal_name, 80 | PolicyName=policy_name 81 | ) 82 | elif src_principal.startswith('group/'): 83 | resp = client.delete_group_policy( 84 | GroupName=principal_name, 85 | PolicyName=policy_name 86 | ) 87 | except (botocore.exceptions.ClientError) as error: 88 | logging.error('Fail to delete inline {}\'s inline policy {}. {}'.format(src_principal, policy_name, error)) 89 | return 90 | if st.check_boto3_response(resp): 91 | logging.info('Inline policy for {} has been deleted'.format(src_principal)) 92 | -------------------------------------------------------------------------------- /misc/case_insensitive_dict.py: -------------------------------------------------------------------------------- 1 | """Python code for a case-insensitive dictionary.""" 2 | 3 | # Copyright (c) NCC Group and Erik Steringer 2021. This file is part of Principal Mapper. 4 | # 5 | # Principal Mapper is free software: you can redistribute it and/or modify 6 | # it under the terms of the GNU Affero General Public License as published by 7 | # the Free Software Foundation, either version 3 of the License, or 8 | # (at your option) any later version. 9 | # 10 | # Principal Mapper is distributed in the hope that it will be useful, 11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 | # GNU Affero General Public License for more details. 14 | # 15 | # You should have received a copy of the GNU Affero General Public License 16 | # along with Principal Mapper. If not, see . 17 | # 18 | # This file incorporates work covered by the following copyright and permission notice: 19 | # 20 | # Copyright 2019 Kenneth Reitz 21 | # 22 | # Licensed under the Apache License, Version 2.0 (the "License"); 23 | # you may not use this file except in compliance with the License. 24 | # You may obtain a copy of the License at 25 | # 26 | # http://www.apache.org/licenses/LICENSE-2.0 27 | # 28 | # Unless required by applicable law or agreed to in writing, software 29 | # distributed under the License is distributed on an "AS IS" BASIS, 30 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 31 | # See the License for the specific language governing permissions and 32 | # limitations under the License. 33 | 34 | from collections.abc import Mapping, MutableMapping 35 | from collections import OrderedDict 36 | 37 | 38 | class CaseInsensitiveDict(MutableMapping): 39 | """A case-insensitive ``dict``-like object. 40 | Implements all methods and operations of 41 | ``MutableMapping`` as well as dict's ``copy``. Also 42 | provides ``lower_items``. 43 | 44 | All keys are expected to be strings. The structure remembers the 45 | case of the last key to be set, and ``iter(instance)``, 46 | ``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()`` 47 | will contain case-sensitive keys. However, querying and contains 48 | testing is case insensitive:: 49 | cid = CaseInsensitiveDict() 50 | cid['aws:SourceIp'] = '128.223.0.1' 51 | cid['aws:sourceip'] == '128.223.0.1' # True 52 | 53 | For example, ``context['ec2:InstanceType']`` will return the 54 | value of ec2:InstanceType condition context, regardless 55 | of how the name was originally stored. 56 | 57 | If the constructor, ``.update``, or equality comparison 58 | operations are given keys that have equal ``.lower()``s, the 59 | behavior is undefined. 60 | """ 61 | 62 | def __init__(self, data=None, **kwargs): 63 | self._store = OrderedDict() 64 | if data is None: 65 | data = {} 66 | self.update(data, **kwargs) 67 | 68 | def __setitem__(self, key, value): 69 | # Use the lowercased key for lookups, but store the actual 70 | # key alongside the value. 71 | self._store[key.lower()] = (key, value) 72 | 73 | def __getitem__(self, key): 74 | return self._store[key.lower()][1] 75 | 76 | def __delitem__(self, key): 77 | del self._store[key.lower()] 78 | 79 | def __iter__(self): 80 | return (casedkey for casedkey, mappedvalue in self._store.values()) 81 | 82 | def __len__(self): 83 | return len(self._store) 84 | 85 | def lower_items(self): 86 | """Like iteritems(), but with all lowercase keys.""" 87 | return ( 88 | (lowerkey, keyval[1]) 89 | for (lowerkey, keyval) 90 | in self._store.items() 91 | ) 92 | 93 | def __eq__(self, other): 94 | if isinstance(other, Mapping): 95 | other = CaseInsensitiveDict(other) 96 | else: 97 | return NotImplemented 98 | # Compare insensitively 99 | return dict(self.lower_items()) == dict(other.lower_items()) 100 | 101 | # Copy is required 102 | def copy(self): 103 | return CaseInsensitiveDict(self._store.values()) 104 | 105 | def __repr__(self): 106 | return str(dict(self.items())) 107 | -------------------------------------------------------------------------------- /main/apply.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import json 4 | import hashlib 5 | import botocore 6 | import logging 7 | import common.shared_tools as st 8 | import common.constants as CONST 9 | 10 | 11 | def apply_remediation(plan_dir_path, aws_id): 12 | ''' Go through every plan under the plan_dir_path and apply the remediation policy flaged as patch_me. If is_auto is true, a remediation strategy will be selected automatically ''' 13 | # Read the remediation plan 14 | if not os.path.exists(plan_dir_path): 15 | sys.exit('There is no remediation plan for AWS account {}. Please run the plan command for this account first.'.format(aws_id['account_id'])) 16 | 17 | aws_session = aws_id['session'] 18 | caller_arn = aws_id['arn'] 19 | 20 | # Walk through every plan 21 | for root, dirs, files in os.walk(plan_dir_path, topdown=False): 22 | for f_name in files: 23 | f_path = os.path.join(root, f_name) 24 | patch_principal(st.loadFromJson(f_path), aws_session, caller_arn) 25 | 26 | def patch_principal(plan, aws_session, caller_arn): 27 | ''' Input is a json object of a remediation plan that contains the remediation strategies of a vulnerable principal. The function will find the the strategies marked as "patch_me" and apply it to the aws account as an inline policy. ''' 28 | if not ('vulnerable_principal' in plan and 'reachable_admin_principals' in plan ): 29 | return 30 | src_principal = plan['vulnerable_principal'] 31 | # Skip caller 32 | if src_principal == caller_arn: 33 | return 34 | 35 | src_principal = st.parse_principal_from_arn(src_principal) 36 | 37 | # Combine all the plan into one policy. This policy needs to be futher optimized to reduce the size! 38 | policy_dict = dict() 39 | for dst_principal, edge_list in plan['reachable_admin_principals'].items(): 40 | for edge in edge_list: 41 | for patch_strategy in edge['remediations']: 42 | if patch_strategy['patch_me']: 43 | # put_inline_policy(src_principal, patch_strategy['policy'], aws_session) 44 | _aggregate_policy(patch_strategy['policy'], policy_dict) 45 | 46 | if not policy_dict: 47 | return 48 | 49 | policy_list = list() 50 | for rsc, action_list in policy_dict.items(): 51 | policy = {'Effect': 'Deny', 'Action': action_list, 'Resource': [rsc]} 52 | policy_list.append(policy) 53 | put_inline_policy(src_principal, policy_list, aws_session) 54 | 55 | 56 | def _aggregate_policy(policy_list, policy_dict): 57 | ''' Combine policies. Combine common actions for each resource ''' 58 | for policy in policy_list: 59 | for rsc in policy['Resource']: 60 | if not rsc in policy_dict: 61 | policy_dict[rsc] = list() 62 | for act in policy['Action']: 63 | if not act in policy_dict[rsc]: 64 | policy_dict[rsc].append(act) 65 | 66 | 67 | def put_inline_policy(src_principal, policy, aws_session): 68 | ''' Apply the de-escalate policy to vulnerable princiapl specified in the plan ''' 69 | inline_policy = dict() 70 | inline_policy['Version'] = '2012-10-17' 71 | inline_policy['Statement'] = policy 72 | 73 | client = aws_session.client('iam') 74 | principal_name = os.path.basename(src_principal) 75 | rnd_str = _generate_policy_hash(inline_policy) 76 | policy_name = '{}@{}'.format(CONST.POLICY__PREFIX, rnd_str) 77 | try: 78 | if src_principal.startswith('user/'): 79 | # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/iam.html#IAM.Client.put_user_policy 80 | resp = client.put_user_policy( 81 | PolicyDocument=json.dumps(inline_policy), 82 | PolicyName=policy_name, 83 | UserName=principal_name, 84 | ) 85 | elif src_principal.startswith('role/'): 86 | resp = client.put_role_policy( 87 | PolicyDocument=json.dumps(inline_policy), 88 | PolicyName=policy_name, 89 | RoleName=principal_name, 90 | ) 91 | elif src_principal.startswith('group/'): 92 | resp = client.put_group_policy( 93 | PolicyDocument=json.dumps(inline_policy), 94 | PolicyName=policy_name, 95 | GroupName=principal_name, 96 | ) 97 | except (botocore.exceptions.ClientError) as error: 98 | logging.error('Fail to apply inline policy to {}. {}'.format(src_principal, error)) 99 | return 100 | 101 | if st.check_boto3_response(resp): 102 | logging.info('Inline policy has been successfully applied to {}'.format(src_principal)) 103 | 104 | def _generate_policy_hash(policy_obj): 105 | ''' generate a hash of the policy object ''' 106 | 107 | policy_str = json.dumps(policy_obj, default=str) 108 | hash_result = hashlib.md5(policy_str.encode()) 109 | return hash_result.hexdigest() 110 | -------------------------------------------------------------------------------- /main/plan.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import shutil 4 | import logging 5 | import common.shared_tools as st 6 | 7 | from remediation import autoscale_handler, cf_handler, codebuild_handler, cross_handler, ec2_handler, iam_handler, lambda_handler, sagemaker_handler, ssm_handler,sts_handler 8 | 9 | 10 | def plan_remediation(audit_file_path, output_dir_path, aws_id, is_auto = False): 11 | ''' Read the audit output from audit_file_path. For each possible privilege escalation path, create a remediation plan that contains one or multiple strategy. If is_auto is set to True, the first strategy's "patch_me" field of every plan will be set to 1. ''' 12 | if not os.path.exists(audit_file_path): 13 | sys.exit('There is no audit result for this AWS account. Please run the audit command on this account first.') 14 | audit_result = st.loadFromJson(audit_file_path) 15 | if not 'privesc_principal' in audit_result: 16 | # https://unicode.org/emoji/charts/full-emoji-list.html 17 | logging.info('No non-admin principal is vulnerable to privilege escalation in AWS account {} \N{slightly smiling face}'.format(audit_result['aws_id'])) 18 | shutil.rmtree(output_dir_path, ignore_errors = True) 19 | sys.exit(0) 20 | plan_dict = dict() 21 | for principal_arn, esc_edge_list in audit_result['privesc_principal'].items(): 22 | if principal_arn == aws_id['arn']: 23 | # skip caller 24 | continue 25 | for esc_edge in esc_edge_list: 26 | src_principal = esc_edge['src_principal'] 27 | short_reason = esc_edge['short_reason'] 28 | reason = esc_edge['reason'] 29 | dst_principal = esc_edge['dst_principal'] 30 | 31 | if not src_principal in plan_dict: 32 | plan_dict[src_principal] = dict() 33 | if not dst_principal in plan_dict[src_principal]: 34 | plan_dict[src_principal][dst_principal] = list() # a list of possible edges from src_principal to dst_principal 35 | 36 | edge_remediation = {'reason': reason, 'remediations': list()} 37 | # get a list of strategy that can break this edge. Any strategy in this list can break the edge 38 | strategy_list = get_remediation_plan(src_principal, dst_principal, short_reason, reason) 39 | if not strategy_list: 40 | logging.warn('No remediation plan for: {} {} {}'.format(src_principal, reason, dst_principal)) 41 | continue 42 | idx = 0 43 | for strategy in strategy_list: 44 | # Pick the first strategy and mark patch_me to 1 45 | patch_me = 1 if (idx == 0 and is_auto) else 0 46 | edge_remediation['remediations'].append({'policy': strategy.process_list, 'patch_me':patch_me}) 47 | idx += 1 48 | 49 | plan_dict[src_principal][dst_principal].append(edge_remediation) 50 | 51 | output_remediation_plan(plan_dict, output_dir_path) 52 | 53 | def get_remediation_plan(src_principal, dst_principal, short_reason, reason): 54 | ''' The four input parameters represent an edge, a lateral movement from one principal to another. 55 | The function return a list of strategies. Any strategy in the list can break the edge (de-escalate). 56 | :param src_principal: a user or role that can be escalated 57 | :param dst_principal: the escalated user or role that src_principal can eventually achieve 58 | :param short_reason: the short_reason output from PMapper 59 | :param reason: the reason output from PMapper 60 | :return a list of RemediationStrategy 61 | :rtype list[RemediationStrategy] ''' 62 | 63 | if short_reason == 'EC2 Auto Scaling': 64 | handler = autoscale_handler.AutoscalHandler() 65 | elif short_reason == 'Cloudformation': 66 | handler = cf_handler.CFHandler() 67 | elif short_reason == 'CodeBuild': 68 | handler = codebuild_handler.CodebuildHandler() 69 | elif short_reason == 'STS': 70 | handler = cross_handler.CrossHandler() 71 | elif short_reason == 'EC2': 72 | handler = ec2_handler.EC2Handler() 73 | elif short_reason == 'IAM': 74 | handler = iam_handler.IAMHandler() 75 | elif short_reason == 'Lambda': 76 | handler = lambda_handler.LambdaHandler() 77 | elif short_reason == 'SageMaker': 78 | handler = sagemaker_handler.SagemakerHandler() 79 | elif short_reason == 'SSM': 80 | handler = ssm_handler.SSMHandler() 81 | elif short_reason == 'AssumeRole': 82 | handler = sts_handler.STSHandler() 83 | else: 84 | logging.error('Unrecognized PMapper short reason {}'.format(short_reason)) 85 | return 86 | return handler.get_remediation_plan(src_principal, dst_principal, reason) 87 | 88 | def output_remediation_plan(plan_dict, output_dir_path): 89 | ''' Create one file for each vulnerable principal. The file contains the destinations that this principal can escalate to, and the policies to remediate the issues. There may be multiple edges between each src_principal and dst_principal. In each edge, there are multiple ways that this edge can be broken. 90 | Users just need to set one of the "patch_me" to 1 under each remediations block. ''' 91 | # create a directory for this AWS account's plan 92 | shutil.rmtree(output_dir_path, ignore_errors = True) 93 | os.makedirs(output_dir_path, exist_ok=True) 94 | 95 | # create one file for each principal suspectable to privesc attack 96 | for src_principal, dst_list in plan_dict.items(): 97 | # Parse the path of src_principal 98 | principal = st.parse_principal_from_arn(src_principal) 99 | dir_path = os.path.dirname(principal) 100 | os.makedirs(os.path.join(output_dir_path, dir_path), exist_ok=True) 101 | # Add account info to each file 102 | out_dict = { 103 | 'vulnerable_principal':src_principal, 104 | 'reachable_admin_principals':dst_list 105 | } 106 | # Create a file for each vulnerable principal 107 | st.dumpToJson(out_dict, '{}/{}.json'.format(output_dir_path,principal), indent=4) -------------------------------------------------------------------------------- /iam_deesc.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import argparse 3 | import sys 4 | import boto3 5 | import botocore 6 | import os 7 | import logging 8 | import logging.config 9 | 10 | import common.constants as CONST 11 | import common.shared_tools as st 12 | import main.audit as audit_aws 13 | import main.plan as plan_desc 14 | import main.apply as apply_plan 15 | import main.revert as revert_plan 16 | 17 | AWS = dict() 18 | 19 | def audit(parsed_args): 20 | ''' Use PMapper to identify the principals vulnerable to priviledge escalation attacks. The result is output as a json file undert output/account_id/audit/ ''' 21 | logging.info('Auditing AWS account {} ...'.format(AWS['account_id'])) 22 | result = audit_aws.audit_aws(parsed_args, AWS) 23 | # Display 24 | if 'admins' in result: 25 | logging.info('Principals with AdministratorAccess permissions:') 26 | for p in result['admins']: 27 | logging.info(' {}'.format(p)) 28 | logging.info('') 29 | else: 30 | logging.info('There is no principal with AdministratorAccess permissions \N{slightly smiling face}') 31 | if 'privesc_principal' in result: 32 | logging.info('Non-admin principals vulnerable to privilege escalation:') 33 | for principal in result['privesc_principal'].keys(): 34 | msg = ' {}'.format(st.parse_principal_from_arn(principal)) 35 | if principal == AWS['arn']: 36 | msg += ' (Caller will be excluded from the remediaton process!)' 37 | logging.info(msg) 38 | 39 | else: 40 | logging.info('There is no principal with privilege escalation risk \N{slightly smiling face}') 41 | logging.info('\nThe audit output is stored at {}'.format(os.path.join(os.path.abspath(CONST.OUTPUT_DIR), AWS['account_id'], 'audit', CONST.AUDIT_FILE))) 42 | logging.info('You can exclude specific principals from the remediation process by removing them from the \"privesc_principal\" block.\n') 43 | 44 | def plan_remediation(is_auto = False): 45 | ''' Read the audit output and create an output strategy for each vulnerable principal. The proposed remediation strategies are output to output/account_id/plan''' 46 | audit_file_path = os.path.join(CONST.OUTPUT_DIR, AWS['account_id'], 'audit', CONST.AUDIT_FILE) 47 | output_dir_path = os.path.join(CONST.OUTPUT_DIR, AWS['account_id'], 'plan') 48 | logging.info('Creating remediation plans for AWS account {} ...'.format(AWS['account_id'])) 49 | plan_desc.plan_remediation(audit_file_path, output_dir_path, AWS, is_auto = is_auto) 50 | logging.info('The remediation plans have been successfully created under {}/. Each file under this directory represents one vulnerable principal.'.format(os.path.join(os.path.abspath(CONST.OUTPUT_DIR), AWS['account_id'], 'plan'))) 51 | logging.info('Please review the plans and mark "patch_me" to 1 for the policies to be applied.\n') 52 | 53 | def apply_remediation(): 54 | ''' Read the remediation plan from output/account_id/plan and apply the policies to the aws account ''' 55 | logging.info('Applying the remediation strategy for AWS account {} ...'.format(AWS['account_id'])) 56 | plan_path = os.path.join(CONST.OUTPUT_DIR, AWS['account_id'], 'plan') 57 | apply_plan.apply_remediation(plan_path, AWS) 58 | 59 | def revert_remediation(is_all = False): 60 | ''' Read the remediation plan from output/account_id/plan and revert all the changes.''' 61 | logging.info('Reverting the remediation strategy previously applied to AWS account {} ...'.format(AWS['account_id'])) 62 | plan_path = os.path.join(CONST.OUTPUT_DIR, AWS['account_id'], 'plan') 63 | revert_plan.revert_remediation(plan_path, AWS, all_principal=is_all) 64 | 65 | 66 | def retrieve_aws_info(aws_profile): 67 | ''' Populaate profile, session, account_id, caller_arn into global variable AWS ''' 68 | global AWS 69 | AWS['profile'] = aws_profile 70 | try: 71 | AWS['session'] = boto3.Session(profile_name=aws_profile) 72 | caller_id = AWS['session'].client('sts').get_caller_identity() 73 | AWS['account_id'] = caller_id['Account'] 74 | AWS['arn'] = caller_id['Arn'] 75 | except (botocore.exceptions.ClientError, botocore.exceptions.ProfileNotFound) as error: 76 | sys.exit('Invalid AWS profile {}. {}'.format(aws_profile, error)) 77 | 78 | def parseArgs(): 79 | argument_parser = argparse.ArgumentParser() 80 | 81 | argument_parser.add_argument('--profile', help='AWS profile to use. If not provided, the default profile will be used', default=None) 82 | # argument_parser.add_argument('--account', help='AWS account id. If not provided, the default profile\`s account ID will be used', default=None) 83 | 84 | # Create subparser for various subcommands 85 | subparser = argument_parser.add_subparsers( 86 | title='subcommand', 87 | description='The subcommand to use among this suite of tools', 88 | dest='sub_cmd', 89 | help='Select a subcommand to execute' 90 | ) 91 | 92 | # Add a subcommand 93 | audit_cmd_parser = subparser.add_parser( 94 | 'audit', 95 | description='''Search for pricipals with privilege escalation risks. Under the hood, it models the principals as a graph using PMapper and searchs for privilege escalation paths from non-admin principals to admin principals. --principal parameter restricts the search to only the specified principals ''', 96 | help=''' Pull the IAM information from an AWS account and serach for principals with privilege escalation risks. 97 | E.g., python3 iam_desc.py --profile my_prof audit ''' 98 | ) 99 | # Add arguments for the subcommand 100 | audit_aws.provide_graph_arguments(audit_cmd_parser) 101 | 102 | plan_cmd_parser = subparser.add_parser( 103 | 'plan', 104 | description=''' Create a remediation plan for every principal with previlege escalation risks. This command needs to be run after the audit commmand finishes. E.g., python3 iam_desc.py --profile my_prof plan ''', 105 | help='Use the audit result to create a remediation plan. If --auto flag is specified, the "patch_me" fields in each plan will be set to 1' 106 | ) 107 | plan_cmd_parser.add_argument('--auto', action='store_true', help='Automatically pick a remediation strategy for each vulnerable principal and marks its patch_me field as 1. This will NOT actually apply the remediation.') 108 | 109 | subparser.add_parser( 110 | 'apply', 111 | description=''' Find the proposed remediation plans stored in ./output/AWS_ID/plan/. Each file under this directory represents a non-admin principal that can be escalated to an admin principal. A non-admin principal may be escalated to multiple admin principals. Between each non-admin principal and admin principal, there may be multiple "escalation edgess". For each escalation edgeh, the remediation plan may have multiple remediation strategy. Applying any remediation strategy can break the attack path and eliminate the risk. To manually pick the remediation strategy, set at least one "patch_me" field to 1 under each remediations block. E.g., python3 iam_desc.py --profile my_prof revert''', 112 | help='Apply the remediation plan.This command needs to be run after the plan commmand finishes. E.g., python3 iam_desc.py --profile my_profile apply' 113 | ) 114 | 115 | revert_cmd_parser = subparser.add_parser( 116 | 'revert', 117 | description=''' Revert the change made by the apply command. The inline policies attached to the vulnerable principals will be deleted. E.g., python3 iam_desc.py --profile my_prof revert ''', 118 | help='Revert the changes made by the apply command. If --all flag is specified, IAM-Deescalate will check EVERY user and role in this account' 119 | ) 120 | revert_cmd_parser.add_argument('--all', action='store_true', help='Enumerate every user and role to remove previously inserted remediation policies') 121 | 122 | main_args = argument_parser.parse_args() 123 | if main_args.profile: 124 | retrieve_aws_info(main_args.profile) 125 | else: 126 | retrieve_aws_info('default') 127 | 128 | if main_args.sub_cmd == 'audit': 129 | logging.info(CONST.BANNER) 130 | audit(main_args) 131 | elif main_args.sub_cmd == 'plan': 132 | plan_remediation(is_auto = main_args.auto) 133 | elif main_args.sub_cmd == 'apply': 134 | apply_remediation() 135 | elif main_args.sub_cmd == 'revert': 136 | revert_remediation(is_all = main_args.all) 137 | 138 | def main(): 139 | logging.config.dictConfig({ 140 | 'version': 1, 141 | 'disable_existing_loggers': True, 142 | }) 143 | logging.basicConfig(level=logging.INFO, format='%(message)s') 144 | parseArgs() 145 | 146 | 147 | if __name__ == '__main__': 148 | sys.exit(main()) 149 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ``` 2 | _____ __ __ _____ 3 | |_ _| /\ | \/ | | __ \ 4 | | | / \ | \ / |______| | | | ___ ___ ___ ___ 5 | | | / /\ \ | |\/| |______| | | |/ _ \/ _ \/ __|/ __| 6 | _| |_ / ____ \| | | | | |__| | __/ __/\__ \ (__ 7 | |_____/_/ \_\_| |_| |_____/ \___|\___||___/\___| 8 | ``` 9 | # IAM-Deescalate 10 | IAM-Deescalate helps mitigate privilege escalation risk in AWS identity and access management (IAM). It identifies the IAM users and roles with privilege escalation risk using NCC Group's [PMapper](https://github.com/nccgroup/PMapper) and creates a set of policies to "deescalate" the risk. IAM-Deescalate was developed when we were working on the [Cloud Threat Report Vol. 6](https://cloudthreat.report). More information about the tool can be found in this [blog](https://unit42.paloaltonetworks.com/iam-deescalate/). 11 | 12 | IAM-Deescalate has only four commands, `audit`, `plan`, `apply`, and `revert`. 13 | 14 | + **audit** command uses PMapper to model an AWS account as a graph, and searches for principals risky for privilege escalation. 15 | 16 | + **plan** command analyzes the privilege escalation risk and creates a remediation plan for each risky principal. Users should review the plans and decide which policies to apply. 17 | 18 | + **apply** command inserts inline policies to risky principals based on the plans. Note that applying these policies may impact operations if the remediated identities require the revoked permissions. 19 | 20 | + **revert** command removes all the inserted policies previously applied. 21 | 22 | ![Flow](./flow.png) 23 | 24 | # How does it work? 25 | PMapper models the identities in an AWS account as a directed graph, in which each node represents a principal (user or role) and each edge represents a transition from one principal to another. node A -> node B indicates that principal A can authenticate as principal B. For example, if user A can assume role B, then there is an edge directed from node A to node B. Similarly, if user C can pass role B to an EC2 instance that user C controls, then there is an edge directed from user C to role B. With this graph, one can find the possible paths between any two nodes. 26 | 27 | A node (principal) is risky for privilege escalation if this node is NOT an admin principal but has a path to an admin principal. 28 | 29 | IAM-Deescalate calculates all the possible edges from non-admin principals to admin principals. These are the edges that allow privilege escalation. IAM-Deescalate attempts to break these edge to eliminate the privilege escalation risk. To break an edge from node A to node B, IAM-Deescalate revokes a few permissions on principal A so that principal A can no longer authenticate as principal B. IAM-Deescalate revokes these permissions by inserting an [inline policy](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html#inline-policies) with explicit [deny](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html). 30 | 31 | To understand why one principal can authenticate as another principal, IAM-Deescalate analyzes the `reason` property in each PMapper's [Edge](https://github.com/nccgroup/PMapper/blob/master/principalmapper/common/edges.py) instance. The `reason` property stores human-readable text explaining the relationship between the two connected principals. Note that if PMapper updates these descriptions, IAM-Deescalate will likely fail to interpret the relationship correctly. 32 | 33 | # Let's get started 34 | To get started, you need a [Python3](https://www.python.org/downloads/) environment and a credential with sufficient permissions to access the targeted AWS account. The credential needs to be stored in a [credential file(https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html)] on the host. By default, AWS's client tools and libraries look for the credential file at `~/.aws/credentials`. However, this default path can be changed by setting the environment variable `AWS_SHARED_CREDENTIALS_FILE` 35 | 36 | ## Required permissions 37 | [required_permissions.json](required_permissions.json) lists the minimal set of AWS permission required for both IAM-Deescalate and PMapper (v1.1.5). 38 | 39 | 40 | ## Installation 41 | [requirements.txt](requirements.txt) lists the three dependencies for both IAM-Deescalate and PMapper (v1.1.5). 42 | 43 | 44 | ### Host installation: 45 | ```bash 46 | mkdir iam && cd iam 47 | git clone https://github.com/nccgroup/PMapper.git 48 | git clone https://github.com/PaloAltoNetworks/IAM-Deescalate.git 49 | cp IAM-Deescalate/misc/gathering.py PMapper/principalmapper/graphing/gathering.py 50 | cp IAM-Deescalate/misc/case_insensitive_dict.py PMapper/principalmapper/util/case_insensitive_dict.py 51 | cd IAM-Deescalate 52 | pip3 install -r requirements.txt 53 | ``` 54 | 55 | ### Build a Docker image 56 | ```bash 57 | git clone https://github.com/PaloAltoNetworks/IAM-Deescalate.git 58 | cd IAM-Deescalate 59 | docker build -t iam-deesc . 60 | ``` 61 | 62 | 63 | 64 | 65 | # Usage 66 | ``` 67 | usage: iam_deesc.py [-h] [--profile PROFILE] {audit,plan,apply,revert} ... 68 | 69 | optional arguments: 70 | -h, --help show this help message and exit 71 | --profile PROFILE AWS profile to use. If not provided, the default profile will be used 72 | 73 | subcommand: 74 | The subcommand to use among this suite of tools 75 | 76 | {audit,plan,apply,revert} 77 | Select a subcommand to execute 78 | audit Pull the IAM information from an AWS account and serach for principals with privilege escalation risks. E.g., python3 iam_deesc.py --profile my_prof audit 79 | plan Use the audit result to create a remediation plan. If --auto flag is specified, the "patch_me" fields in each plan will be set to 1 80 | apply Apply the remediation plan.This command needs to be run after the plan commmand finishes. E.g., python3 iam_deesc.py --profile my_profile apply 81 | revert Revert the changes made by the apply command. If --all flag is specified, IAM-Deescalate will check EVERY user and role in this account 82 | ``` 83 | 84 | ## audit 85 | The `audit` command models the principals as a graph using [PMapper](https://github.com/nccgroup/PMapper/wiki/CLI-Reference#graph-create) and identifies the principals with privilege escalation risks. 86 | ```bash 87 | python3 iam_deesc.py --profile my_prof audit 88 | ``` 89 | ## plan 90 | The `plan` command creates remediation plans based on the audit result. The plans are stored under `./output/plan/`. By default, all the "patch_me" fields are marked as 0, meaning that no actions will be taken when running the `apply` command. Users need to go through each plan and manually mark patch_me to 1. Read the [Sample Remediation Plan](#sample-remediation-plan) section for more detail. 91 | ```bash 92 | python3 iam_deesc.py --profile my_prof plan 93 | ``` 94 | If `--auto` flag is specified, the `plan` command will automatically select a remediation strategy for each principal. 95 | ```bash 96 | python3 iam_deesc.py --profile my_prof plan --auto 97 | ``` 98 | ## apply 99 | The `apply` command takes the policies with "patch_me" marked as 1 and attaches them to the risky principals 100 | ```bash 101 | python3 iam_deesc.py --profile my_prof apply 102 | ``` 103 | ## revert 104 | The `revert` command removes all the attached remediation policies. 105 | ```bash 106 | python3 iam_deesc.py --profile my_prof revert 107 | ``` 108 | 109 | 110 | # Using Docker 111 | First build an image following the [Build a Docker image](#build-a-docker-image) instruction. 112 | Use Docker to execute every command. 113 | ```bash 114 | docker run --rm -it -v [path to aws credential file]:/.aws/credentials -v [path to output directory]:/app/IAM-Deescalate/output iam-deesc [command] 115 | ``` 116 | Examples: 117 | ```bash 118 | docker run --rm -it -v /home/ec2-user/.aws/credentials:/.aws/credentials -v /home/ec2-user/iam/IAM-Deescalate/output:/app/IAM-Deescalate/output iam-deesc --profile my_prof audit 119 | 120 | docker run --rm -it -v /home/ec2-user/.aws/credentials:/.aws/credentials -v /home/ec2-user/iam/IAM-Deescalate/output:/app/IAM-Deescalate/output iam-deesc --profile my_prof plan --auto 121 | ``` 122 | 123 | # Test 124 | BishopFox's Seth Art created a cool project called [IAM Vulnerable](https://github.com/BishopFox/iam-vulnerable) that can quickly build 31 different AWS IAM privilege escalation scenarios. It uses [Terraform](https://learn.hashicorp.com/collections/terraform/aws-get-started) to easily provision and deprovision each scenario. IAM-Deescalate used IAM Vulnerable extensively during development. 125 | 126 | # Sample Remediation Plan 127 | [Here](bob.json) is a sample remediation plan. In this plan, user bob is risky for privilege escalation. Two possible privilege escalation paths are identified. The first path allows bob to escalate to user it_admin, and the second path allows bob to escalate to role it_admin_role. 128 | 129 | To remediate the first attack path, we need to prevent bob from creating an access key for it_admin. If you agree to apply this policy, set the "patch_me" field to 1. 130 | 131 | There are two options to remediate the second attack path. Each option is represented as a policy, and applying one of these two policies can remediate the risk. The first policy prevents bob from passing the it_admin_role to EC2 instances. The second policy prevents bob from associating an instance profile to EC2 instances. If you agree with any of the policies, set its "patch_me" field to 1. -------------------------------------------------------------------------------- /main/audit.py: -------------------------------------------------------------------------------- 1 | """ 2 | Use PMapper to identify principals vulnerable to privilege escalation attacks. 3 | """ 4 | 5 | import logging 6 | import sys 7 | import contextlib 8 | import io 9 | import os 10 | import shutil 11 | from os import path 12 | from typing import Optional, List 13 | from argparse import ArgumentParser, Namespace 14 | from common import constants as CONST 15 | from common import shared_tools as st 16 | 17 | sys.path.insert(0, path.abspath('../PMapper/')) 18 | from principalmapper.util import botocore_tools, arns 19 | from principalmapper.common import Graph 20 | from principalmapper.graphing import graph_actions, graph_cli 21 | from principalmapper.querying.presets import privesc 22 | from principalmapper.querying import query_interface 23 | from principalmapper.common import Node, Graph 24 | 25 | 26 | logger = logging.getLogger(__name__) 27 | 28 | def provide_graph_arguments(create_parser: ArgumentParser): 29 | ''' Mostly copy from the graph_cli.py in PMapper''' 30 | # create_parser.add_argument( 31 | # '--ignore-orgs', 32 | # action='store_true', 33 | # help='If specified, skips the check for stored AWS Organizations data and ignores any potentially applicable SCPs during the graph creation process' 34 | # ) 35 | # Specify the principals that we want to search for potential privilege escalation paths 36 | create_parser.add_argument( 37 | '--principal', 38 | default='*', 39 | help='A string matching one or more IAM users or roles in the account, or use * (the default) to include all' 40 | ) 41 | 42 | alt_data_source_group = create_parser.add_mutually_exclusive_group() 43 | alt_data_source_group.add_argument( 44 | '--localstack-endpoint', 45 | help='The HTTP(S) endpoint for a running instance of LocalStack' 46 | ) 47 | 48 | 49 | def create_graph(parsed_args: Namespace): 50 | ''' Use PMapper's graph_cli to parse the argument and create a graph ''' 51 | # parsed_args = Namespace(account=None, profile='aws_prof', picked_graph_cmd = 'create', include_services=None, exclude_services=None, 52 | # localstack_endpoint=None, ignore_orgs = True, include_regions = 'us-east-1', exclude_regions=None) 53 | 54 | # Manually added nenessary parameters for PMapper 55 | parsed_args.picked_graph_cmd = 'create' 56 | parsed_args.account = None # Force to recreate the graph 57 | parsed_args.include_services = None 58 | parsed_args.exclude_services = None 59 | parsed_args.ignore_orgs = True 60 | parsed_args.include_regions = 'us-east-1' # Since IAM is global service, it doesn't matter which region we use 61 | parsed_args.exclude_regions = None 62 | 63 | with contextlib.redirect_stdout(io.StringIO()): 64 | # surpress stdout from pmapper 65 | graph_cli.process_arguments(parsed_args) 66 | 67 | def query_privesc(aws_profile:str, account_num: str, principal_param: Optional[str]) -> dict: 68 | ''' 69 | Return a dictionary that contain 1. all the admin nodes, 2. nodes and their edges that lead to admin nodes 70 | Specify the --principal parameter if you only want to focus on a few principals. 71 | ''' 72 | if account_num is None and aws_profile is None: 73 | raise ValueError('One of the parameters `account` or `session` must not be None') 74 | 75 | if account_num is None: 76 | session = botocore_tools.get_session(aws_profile) 77 | else: 78 | session = None 79 | graph = graph_actions.get_existing_graph(session, account_num) 80 | logger.debug('Querying against graph {}'.format(graph.metadata['account_id'])) 81 | 82 | nodes = [] 83 | if principal_param is None or principal_param == '*': 84 | nodes.extend(graph.nodes) 85 | else: 86 | nodes.append(graph.get_node_by_searchable_name(principal_param)) 87 | if nodes: 88 | return get_privesc_info(graph, nodes) 89 | 90 | 91 | def get_privesc_info(graph: Graph, nodes: List[Node]) -> dict: 92 | ''' Return a dictionary that contain 1. all the identified admin nodes, 2. nodes and their edges that lead to admin nodes ''' 93 | result = dict() 94 | 95 | for node in nodes: 96 | # ignore admin nodes 97 | if node.is_admin: 98 | if not 'admins' in result: 99 | result['admins'] = list() 100 | result['admins'].append(node.searchable_name()) 101 | continue 102 | 103 | # ignore aws-service role 104 | if st.parse_principal_from_arn(node.arn).startswith('role/aws-service-role/'): 105 | continue 106 | 107 | esc_edge_list = check_self_escalate(node, result) 108 | esc_edge_list.extend(check_lateral_escalate(graph, node)) 109 | 110 | if not esc_edge_list: 111 | continue 112 | 113 | if not 'privesc_principal' in result: 114 | result['privesc_principal'] = dict() 115 | result['privesc_principal'][node.arn] = esc_edge_list 116 | 117 | return result 118 | 119 | def check_lateral_escalate(graph: Graph, node: Node) -> List: 120 | ''' Return a list of edges that directly link the node to an admin node. 121 | Return None if no privilege escalation edge is found. 122 | Each edge is represented as a dictionary of (src, short_reason, reason, dst) 123 | ''' 124 | priv_edge_list = list() 125 | path_list = privesc.get_search_list(graph, node) # return a list of edge lists. Each edge list represents a path to a new unique node. 126 | priv_edge_set = set() # use set to eliminate duplication 127 | for path in path_list: 128 | edge = path[0] # we care only the neighbor edge directly connected to the node 129 | if edge.destination.is_admin: # successful reach an admin node 130 | priv_edge_set.add((edge.source.arn, edge.short_reason, edge.reason, edge.destination.arn)) 131 | # priv_edge_set.add((edge.source.searchable_name(), edge.short_reason, edge.reason, edge.destination.searchable_name())) 132 | 133 | # convert each edge tuple to dict 134 | if priv_edge_set: 135 | for edge in priv_edge_set: 136 | priv_edge_list.append({ 137 | 'src_principal': edge[0], 138 | 'short_reason': edge[1], 139 | 'reason': edge[2], 140 | 'dst_principal': edge[3] 141 | }) 142 | return priv_edge_list 143 | 144 | def check_self_escalate(node, result): 145 | ''' 146 | This function will update the node.is_admin property and the result dictionary. 147 | 148 | Check if the node is a real admin. PMapper treats principals with any risky action that can escalate itself to the admin as admins. As a result, a princial may be seen as admin even if it has only 1 risk permission like iam:PutUserPolicy. We want to identify thess types of principals and try to "deescalate" them. It is time-consuming to truly verify that a node is a true admin and can accesss every possible actions. In our new definition, we see a node as an admin only if it can access all risky IAM actions and a randomly selected X actions from all AWS's actions. 149 | 150 | Ref: update_admin_status() in https://github.com/nccgroup/PMapper/blob/master/principalmapper/graphing/gathering.py. 151 | ''' 152 | esc_path = list() 153 | # The minimal number of escalation paths that a true admin user or role must have 154 | # A principal with admin policy may not be a real admin. It may be restricted by other policies or SCPs. 155 | admin_policy_condition = {'iam:PolicyARN': 'arn:aws:iam::aws:policy/AdministratorAccess'} 156 | node_type = arns.get_resource(node.arn).split('/')[0] 157 | if node_type == 'user': 158 | action = 'iam:PutUserPolicy' 159 | else: 160 | action = 'iam:PutRolePolicy' 161 | if query_interface.local_check_authorization_handling_mfa(node, action, node.arn, {})[0]: 162 | edge = _build_self_esc_edge(node.arn, 'IAM', 'can use {} to escalate itself to admin'.format(action)) 163 | esc_path.append(edge) 164 | 165 | if node_type == 'user': 166 | action = 'iam:AttachUserPolicy' 167 | else: 168 | action = 'iam:AttachRolePolicy' 169 | if query_interface.local_check_authorization_handling_mfa(node, action, node.arn, admin_policy_condition)[0]: 170 | edge = _build_self_esc_edge(node.arn, 'IAM', 'can use {} to escalate itself to admin'.format(action)) 171 | esc_path.append(edge) 172 | 173 | if query_interface.local_check_authorization_handling_mfa(node, 'iam:CreateRole', '*', {})[0]: 174 | # iam:PutRolePolicy is for inline policy, iam:AttachRolePolicy is for managed policy 175 | if query_interface.local_check_authorization_handling_mfa(node, 'iam:AttachRolePolicy', '*', admin_policy_condition)[0]: 176 | edge = _build_self_esc_edge(node.arn, 'IAM', 'can use {} and {} to escalate itself to admin'.format('iam:CreateRole', 'iam:AttachRolePolicy')) 177 | esc_path.append(edge) 178 | if query_interface.local_check_authorization_handling_mfa(node, 'iam:PutRolePolicy', '*', admin_policy_condition)[0]: 179 | edge = _build_self_esc_edge(node.arn, 'IAM', 'can use {} and {} to escalate itself to admin'.format('iam:CreateRole', 'iam:PutRolePolicy')) 180 | esc_path.append(edge) 181 | 182 | for attached_policy in node.attached_policies: 183 | if attached_policy.arn != node.arn and ':aws:policy/' not in attached_policy.arn: 184 | # Check if the principal can create a new policy version for custom-managed policy. Not all principals may have custom policies attached 185 | if query_interface.local_check_authorization_handling_mfa(node, 'iam:CreatePolicyVersion', attached_policy.arn, {})[0]: 186 | edge = _build_self_esc_edge(node.arn, 'IAM', 'can use {} to escalate itself to admin'.format('iam:CreatePolicyVersion')) 187 | esc_path.append(edge) 188 | break 189 | 190 | if node_type == 'user': 191 | # Not every user belongs to a group. No need to update action_cnt 192 | for group in node.group_memberships: 193 | if query_interface.local_check_authorization_handling_mfa(node, 'iam:PutGroupPolicy', group.arn, {})[0]: 194 | edge = _build_self_esc_edge(node.arn, 'IAM', 'can use {} to escalate itself to admin'.format('iam:PutGroupPolicy')) 195 | esc_path.append(edge) 196 | break 197 | for group in node.group_memberships: 198 | if query_interface.local_check_authorization_handling_mfa(node, 'iam:AttachGroupPolicy', group.arn, admin_policy_condition)[0]: 199 | edge = _build_self_esc_edge(node.arn, 'IAM', 'can use {} to escalate itself to admin'.format('iam:AttachGroupPolicy')) 200 | esc_path.append(edge) 201 | break 202 | for group in node.group_memberships: 203 | keep_checking = True 204 | for attached_policy in group.attached_policies: 205 | if attached_policy.arn != group.arn and ':aws:policy/' not in attached_policy.arn: 206 | if query_interface.local_check_authorization_handling_mfa(node, 'iam:CreatePolicyVersion', attached_policy.arn, {})[0]: 207 | edge = _build_self_esc_edge(node.arn, 'IAM', 'can use {} to escalate itself to admin'.format('iam:CreatePolicyVersion')) 208 | esc_path.append(edge) 209 | keep_checking = False 210 | break 211 | if not keep_checking: # break out group loop 212 | break 213 | return esc_path 214 | 215 | def _build_self_esc_edge(principal_arn, short_reason, reason): 216 | edge = { 217 | 'src_principal': principal_arn, 218 | 'short_reason': short_reason, 219 | 'reason': reason, 220 | 'dst_principal': principal_arn 221 | } 222 | return edge 223 | 224 | def audit_aws(parsed_args, aws_id): 225 | ''' Use PMapper to create a graph, identify all the admins, and identify principals vulnerable to privilege escalation. 226 | Input parse_args use the same syntax as PMapper's argquery. aws_id is a dict containing the callerid information. ''' 227 | account_id = aws_id['account_id'] 228 | aws_profile = aws_id['profile'] 229 | 230 | create_graph(parsed_args) 231 | parsed_args.account = aws_id['account_id'] # added account_id here so that query_privesc dosn't need to query again 232 | result = query_privesc(parsed_args.profile, parsed_args.account, parsed_args.principal) 233 | 234 | dir_path = '{}/{}/audit'.format(CONST.OUTPUT_DIR, account_id) 235 | # delete the existing audit results 236 | shutil.rmtree(dir_path, ignore_errors = True) 237 | if result: 238 | # Add AWS account info 239 | result['aws_profile'] = aws_profile 240 | result['aws_id'] = account_id 241 | os.makedirs(dir_path, exist_ok=True) 242 | st.dumpToJson(result,'{}/{}'.format(dir_path, CONST.AUDIT_FILE), indent = 4) 243 | else: 244 | logging.info('The audit did not find any admin principals or principals vulnerable to privilege escalation !') 245 | st.dumpToJson('{}','{}/{}'.format(dir_path, CONST.AUDIT_FILE), indent = 4) # write an empty file 246 | 247 | return result 248 | -------------------------------------------------------------------------------- /misc/gathering.py: -------------------------------------------------------------------------------- 1 | """ 2 | This is a slightly updated version of gathering.py (https://github.com/nccgroup/PMapper/blob/master/principalmapper/graphing/gathering.py). 3 | update_admin_status() is the only function got updated. 4 | The script will replace the original gathering.py in PMapper 5 | Python code for gathering IAM-related information from an AWS account""" 6 | 7 | # Copyright (c) NCC Group and Erik Steringer 2019. This file is part of Principal Mapper. 8 | # 9 | # Principal Mapper is free software: you can redistribute it and/or modify 10 | # it under the terms of the GNU Affero General Public License as published by 11 | # the Free Software Foundation, either version 3 of the License, or 12 | # (at your option) any later version. 13 | # 14 | # Principal Mapper is distributed in the hope that it will be useful, 15 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 16 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 | # GNU Affero General Public License for more details. 18 | # 19 | # You should have received a copy of the GNU Affero General Public License 20 | # along with Principal Mapper. If not, see . 21 | 22 | import io 23 | import json 24 | import logging 25 | import os 26 | import random 27 | from xmlrpc.client import Boolean 28 | 29 | import botocore.session 30 | import botocore.exceptions 31 | import principalmapper 32 | from principalmapper.common import Node, Group, Policy, Graph, OrganizationTree, OrganizationNode, OrganizationAccount 33 | from principalmapper.graphing import edge_identification 34 | from principalmapper.querying import query_interface 35 | from principalmapper.util import arns 36 | from principalmapper.util.botocore_tools import get_regions_to_search 37 | from typing import List, Optional, Tuple 38 | 39 | logger = logging.getLogger(__name__) 40 | 41 | 42 | def create_graph(session: botocore.session.Session, service_list: list, region_allow_list: Optional[List[str]] = None, 43 | region_deny_list: Optional[List[str]] = None, scps: Optional[List[List[dict]]] = None, 44 | client_args_map: Optional[dict] = None) -> Graph: 45 | """Constructs a Graph object. 46 | 47 | Information about the graph as it's built will be written to the IO parameter `output`. 48 | 49 | The region allow/deny lists are mutually-exclusive (i.e. at least one of which has the value None) lists of 50 | allowed/denied regions to pull data from. Note that we don't do the same allow/deny list parameters for the 51 | service list, because that is a fixed property of what pmapper supports as opposed to an unknown/uncontrolled 52 | list of regions that AWS supports. 53 | 54 | The `client_args_map` is either None (default) or a dictionary containing a mapping of service -> keyword args for 55 | when the client is created for the service. For example, if you want to specify a different endpoint URL 56 | when calling IAM, your map should look like: 57 | 58 | ``` 59 | client_args_map = {'iam': {'endpoint_url': 'http://localhost:4456'}} 60 | ``` 61 | 62 | Later on, when calling create_client('iam', ...) the map will be added via kwargs 63 | """ 64 | 65 | if client_args_map is None: 66 | client_args_map = {} 67 | 68 | stsargs = client_args_map.get('sts', {}) 69 | stsclient = session.create_client('sts', **stsargs) 70 | logger.debug(stsclient.meta.endpoint_url) 71 | caller_identity = stsclient.get_caller_identity() 72 | logger.debug("Caller Identity: {}".format(caller_identity['Arn'])) 73 | metadata = { 74 | 'account_id': caller_identity['Account'], 75 | 'pmapper_version': principalmapper.__version__ 76 | } 77 | 78 | iamargs = client_args_map.get('iam', {}) 79 | iamclient = session.create_client('iam', **iamargs) 80 | 81 | results = get_nodes_groups_and_policies(iamclient) 82 | nodes_result = results['nodes'] 83 | groups_result = results['groups'] 84 | policies_result = results['policies'] 85 | 86 | # Determine which nodes are admins and update node objects 87 | update_admin_status(nodes_result, scps) 88 | 89 | # Generate edges, generate Edge objects 90 | edges_result = edge_identification.obtain_edges( 91 | session, 92 | service_list, 93 | nodes_result, 94 | region_allow_list, 95 | region_deny_list, 96 | scps, 97 | client_args_map 98 | ) 99 | 100 | # Pull S3, SNS, SQS, KMS, and Secrets Manager resource policies 101 | try: 102 | policies_result.extend(get_s3_bucket_policies(session, client_args_map)) 103 | policies_result.extend(get_sns_topic_policies(session, region_allow_list, region_deny_list, client_args_map)) 104 | policies_result.extend(get_sqs_queue_policies(session, caller_identity['Account'], region_allow_list, region_deny_list, client_args_map)) 105 | policies_result.extend(get_kms_key_policies(session, region_allow_list, region_deny_list, client_args_map)) 106 | policies_result.extend(get_secrets_manager_policies(session, region_allow_list, region_deny_list, client_args_map)) 107 | except: 108 | pass 109 | 110 | return Graph(nodes_result, edges_result, policies_result, groups_result, metadata) 111 | 112 | 113 | def get_nodes_groups_and_policies(iamclient) -> dict: 114 | """Using an IAM.Client object, return a dictionary containing nodes, groups, and policies to be 115 | added to a Graph object. Admin status for the nodes are not updated. 116 | 117 | Writes high-level information on progress to the output stream. 118 | """ 119 | logger.info('Obtaining IAM Users/Roles/Groups/Policies in the account.') 120 | result_paginator = iamclient.get_paginator('get_account_authorization_details') 121 | user_results = [] 122 | group_results = [] 123 | role_results = [] 124 | policy_results = [] 125 | for page in result_paginator.paginate(): 126 | user_results += page['UserDetailList'] 127 | group_results += page['GroupDetailList'] 128 | role_results += page['RoleDetailList'] 129 | policy_results += page['Policies'] 130 | 131 | logger.info('Sorting users, roles, groups, policies, and their relationships.') 132 | 133 | # this is the result we return: dictionary with nodes/groups/users all filled out 134 | result = { 135 | 'nodes': [], 136 | 'groups': [], 137 | 'policies': [] 138 | } 139 | 140 | for p in policy_results: 141 | # go through each policy and update policy_results 142 | doc = [x['Document'] for x in p['PolicyVersionList'] if x['IsDefaultVersion']][0] 143 | result['policies'].append( 144 | Policy( 145 | p['Arn'], 146 | p['PolicyName'], 147 | doc 148 | ) 149 | ) 150 | 151 | for g in group_results: 152 | # go through all inline policies and update policy_results 153 | group_policies = [] 154 | if 'GroupPolicyList' in g: # have to key-check these 155 | for p in g['GroupPolicyList']: 156 | group_policies.append( 157 | Policy( 158 | g['Arn'], # inline policies get the same Arn as their principal 159 | p['PolicyName'], 160 | p['PolicyDocument'] 161 | ) 162 | ) 163 | result['policies'] += group_policies # this is just adding the inline policies for the group 164 | 165 | for p in g['AttachedManagedPolicies']: 166 | group_policies.append(_get_policy_by_arn_or_raise(p['PolicyArn'], result['policies'])) 167 | 168 | result['groups'].append( 169 | Group( 170 | g['Arn'], 171 | group_policies 172 | ) 173 | ) 174 | 175 | for u in user_results: 176 | # go through all inline policies and update policy_results 177 | user_policies = [] 178 | if 'UserPolicyList' in u: # have to key-check these 179 | for p in u['UserPolicyList']: 180 | user_policies.append( 181 | Policy( 182 | u['Arn'], # inline policies inherit the Arn of their principal for the purposes of tracking 183 | p['PolicyName'], 184 | p['PolicyDocument'] 185 | ) 186 | ) 187 | result['policies'] += user_policies 188 | 189 | for p in u['AttachedManagedPolicies']: 190 | user_policies.append(_get_policy_by_arn_or_raise(p['PolicyArn'], result['policies'])) 191 | 192 | if 'PermissionsBoundary' in u: 193 | boundary_policy = _get_policy_by_arn_or_raise(u['PermissionsBoundary']['PermissionsBoundaryArn'], 194 | result['policies']) 195 | else: 196 | boundary_policy = None 197 | 198 | group_list = [] 199 | for group_name in u['GroupList']: 200 | for group in result['groups']: 201 | if arns.get_resource(group.arn).split('/')[-1] == group_name: 202 | group_list.append(group) 203 | break 204 | 205 | _tags = {} 206 | if 'Tags' in u: 207 | for tag in u['Tags']: 208 | _tags[tag['Key']] = tag['Value'] 209 | 210 | # still need to figure out access keys 211 | result['nodes'].append( 212 | Node( 213 | u['Arn'], u['UserId'], user_policies, group_list, None, None, 0, 'PasswordLastUsed' in u, False, 214 | boundary_policy, False, _tags 215 | ) 216 | ) 217 | 218 | for r in role_results: 219 | # go through all inline policies and update policy_results 220 | role_policies = [] 221 | for p in r['RolePolicyList']: 222 | role_policies.append( 223 | Policy( 224 | r['Arn'], # inline policies inherit the Arn of their principal for the purposes of tracking 225 | p['PolicyName'], 226 | p['PolicyDocument'] 227 | ) 228 | ) 229 | result['policies'] += role_policies 230 | 231 | for p in r['AttachedManagedPolicies']: 232 | role_policies.append(_get_policy_by_arn_or_raise(p['PolicyArn'], result['policies'])) 233 | 234 | _tags = {} 235 | if 'Tags' in r: 236 | for tag in r['Tags']: 237 | _tags[tag['Key']] = tag['Value'] 238 | 239 | result['nodes'].append( 240 | Node( 241 | r['Arn'], r['RoleId'], role_policies, None, r['AssumeRolePolicyDocument'], 242 | [x['Arn'] for x in r['InstanceProfileList']], 0, False, False, 243 | None, False, _tags 244 | ) 245 | ) 246 | 247 | logger.info("Obtaining Access Keys data for IAM users") 248 | for node in result['nodes']: 249 | if arns.get_resource(node.arn).startswith('user/'): 250 | # Grab access-key count and update node 251 | user_name = arns.get_resource(node.arn)[5:] 252 | if '/' in user_name: 253 | user_name = user_name.split('/')[-1] 254 | access_keys_data = iamclient.list_access_keys(UserName=user_name) 255 | node.access_keys = len(access_keys_data['AccessKeyMetadata']) 256 | # logger.debug('Access Key Count for {}: {}'.format(user_name, len(access_keys_data['AccessKeyMetadata']))) 257 | # Grab password data and update node 258 | try: 259 | login_profile_data = iamclient.get_login_profile(UserName=user_name) 260 | if 'LoginProfile' in login_profile_data: 261 | node.active_password = True 262 | except Exception as ex: 263 | if 'NoSuchEntity' in str(ex): 264 | node.active_password = False # expecting this 265 | else: 266 | raise ex 267 | 268 | logger.info('Gathering MFA virtual device information') 269 | mfa_paginator = iamclient.get_paginator('list_virtual_mfa_devices') 270 | for page in mfa_paginator.paginate(AssignmentStatus='Assigned'): 271 | for device in page['VirtualMFADevices']: 272 | user_arn = device['User']['Arn'] 273 | logger.debug('Found virtual MFA device for {}'.format(user_arn)) 274 | for node in result['nodes']: 275 | if node.arn == user_arn: 276 | node.has_mfa = True 277 | break 278 | 279 | logger.info('Gathering MFA physical device information') 280 | for node in result['nodes']: 281 | node_resource_name = arns.get_resource(node.arn) 282 | if node_resource_name.startswith('user/'): 283 | user_name = node_resource_name.split('/')[-1] 284 | mfa_devices_response = iamclient.list_mfa_devices(UserName=user_name) 285 | if len(mfa_devices_response['MFADevices']) > 0: 286 | node.has_mfa = True 287 | 288 | return result 289 | 290 | 291 | def get_s3_bucket_policies(session: botocore.session.Session, client_args_map: Optional[dict] = None) -> List[Policy]: 292 | """Using a botocore Session object, return a list of Policy objects representing the bucket policies of each 293 | S3 bucket in this account. 294 | """ 295 | result = [] 296 | s3args = client_args_map.get('s3', {}) 297 | s3client = session.create_client('s3', **s3args) 298 | buckets = [x['Name'] for x in s3client.list_buckets()['Buckets']] 299 | for bucket in buckets: 300 | bucket_arn = 'arn:aws:s3:::{}'.format(bucket) # TODO: allow different partition 301 | try: 302 | bucket_policy = json.loads(s3client.get_bucket_policy(Bucket=bucket)['Policy']) 303 | result.append(Policy( 304 | bucket_arn, 305 | bucket, 306 | bucket_policy 307 | )) 308 | logger.info('Caching policy for {}'.format(bucket_arn)) 309 | except botocore.exceptions.ClientError as ex: 310 | if 'NoSuchBucketPolicy' in str(ex): 311 | logger.info('Bucket {} does not have a bucket policy, adding a "stub" policy instead.'.format( 312 | bucket 313 | )) 314 | result.append(Policy( 315 | bucket_arn, 316 | bucket, 317 | { 318 | "Statement": [], 319 | "Version": "2012-10-17" 320 | } 321 | )) 322 | else: 323 | logger.info('Unable to retrieve bucket policy for {}. You should add this manually. Continuing.'.format(bucket)) 324 | logger.debug('Exception was: {}'.format(ex)) 325 | 326 | return result 327 | 328 | 329 | def get_kms_key_policies(session: botocore.session.Session, region_allow_list: Optional[List[str]] = None, 330 | region_deny_list: Optional[List[str]] = None, client_args_map: Optional[dict] = None) -> List[Policy]: 331 | """Using a botocore Session object, return a list of Policy objects representing the key policies of each 332 | KMS key in this account. 333 | 334 | The region allow/deny lists are mutually-exclusive (i.e. at least one of which has the value None) lists of 335 | allowed/denied regions to pull data from. 336 | """ 337 | result = [] 338 | 339 | kmsargs = client_args_map.get('kms', {}) 340 | 341 | # Iterate through all regions of KMS where possible 342 | for kms_region in get_regions_to_search(session, 'kms', region_allow_list, region_deny_list): 343 | try: 344 | # Grab the keys 345 | cmks = [] 346 | kmsclient = session.create_client('kms', region_name=kms_region, **kmsargs) 347 | kms_paginator = kmsclient.get_paginator('list_keys') 348 | for page in kms_paginator.paginate(): 349 | cmks.extend([x['KeyArn'] for x in page['Keys']]) 350 | 351 | # Grab the key policies 352 | for cmk in cmks: 353 | policy_str = kmsclient.get_key_policy(KeyId=cmk, PolicyName='default')['Policy'] 354 | result.append(Policy( 355 | cmk, 356 | cmk.split('/')[-1], # CMK ARN Format: arn::kms:::key/ 357 | json.loads(policy_str) 358 | )) 359 | logger.info('Caching policy for {}'.format(cmk)) 360 | except botocore.exceptions.ClientError as ex: 361 | logger.info('Unable to search KMS in region {} for key policies. The region may be disabled, or the current principal may not be authorized to access the service. Continuing.'.format(kms_region)) 362 | logger.debug('Exception was: {}'.format(ex)) 363 | continue 364 | 365 | return result 366 | 367 | 368 | def get_sns_topic_policies(session: botocore.session.Session, region_allow_list: Optional[List[str]] = None, 369 | region_deny_list: Optional[List[str]] = None, client_args_map: Optional[dict] = None) -> List[Policy]: 370 | """Using a botocore Session object, return a list of Policy objects representing the topic policies of each 371 | SNS topic in this account. 372 | 373 | The region allow/deny lists are mutually-exclusive (i.e. at least one of which has the value None) lists of 374 | allowed/denied regions to pull data from. 375 | """ 376 | result = [] 377 | 378 | snsargs = client_args_map.get('sns', {}) 379 | 380 | # Iterate through all regions of SNS where possible 381 | for sns_region in get_regions_to_search(session, 'sns', region_allow_list, region_deny_list): 382 | try: 383 | # Grab the topics 384 | topics = [] 385 | snsclient = session.create_client('sns', region_name=sns_region, **snsargs) 386 | sns_paginator = snsclient.get_paginator('list_topics') 387 | for page in sns_paginator.paginate(): 388 | topics.extend([x['TopicArn'] for x in page['Topics']]) 389 | 390 | # Grab the topic policies 391 | for topic in topics: 392 | policy_str = snsclient.get_topic_attributes(TopicArn=topic)['Attributes']['Policy'] 393 | result.append(Policy( 394 | topic, 395 | topic.split(':')[-1], # SNS Topic ARN Format: arn::sns::: 396 | json.loads(policy_str) 397 | )) 398 | logger.info('Caching policy for {}'.format(topic)) 399 | except botocore.exceptions.ClientError as ex: 400 | logger.info('Unable to search SNS in region {} for topic policies. The region may be disabled, or the current principal may not be authorized to access the service. Continuing.'.format(sns_region)) 401 | logger.debug('Exception was: {}'.format(ex)) 402 | continue 403 | 404 | return result 405 | 406 | 407 | def get_sqs_queue_policies(session: botocore.session.Session, account_id: str, 408 | region_allow_list: Optional[List[str]] = None, region_deny_list: Optional[List[str]] = None, 409 | client_args_map: Optional[dict] = None) -> List[Policy]: 410 | """Using a botocore Session object, return a list of Policy objects representing the queue policies of each 411 | SQS queue in this account. 412 | 413 | The region allow/deny lists are mutually-exclusive (i.e. at least one of which has the value None) lists of 414 | allowed/denied regions to pull data from. 415 | """ 416 | result = [] 417 | 418 | sqsargs = client_args_map.get('sqs', {}) 419 | 420 | # Iterate through all regions of SQS where possible 421 | for sqs_region in get_regions_to_search(session, 'sqs', region_allow_list, region_deny_list): 422 | try: 423 | # Grab the queue names 424 | queue_urls = [] 425 | sqsclient = session.create_client('sqs', region_name=sqs_region, **sqsargs) 426 | response = sqsclient.list_queues() 427 | if 'QueueUrls' in response: 428 | queue_urls.extend(response['QueueUrls']) 429 | else: 430 | continue 431 | 432 | # Grab the queue policies 433 | for queue_url in queue_urls: 434 | queue_name = queue_url.split('/')[-1] 435 | sqs_policy_response = sqsclient.get_queue_attributes(QueueUrl=queue_url, AttributeNames=['Policy']) 436 | if 'Policy' in sqs_policy_response: 437 | sqs_policy_doc = json.loads(sqs_policy_response['Policy']) 438 | result.append(Policy( 439 | 'arn:aws:sqs:{}:{}:{}'.format(sqs_region, account_id, queue_name), 440 | queue_name, 441 | json.loads(sqs_policy_doc) 442 | )) 443 | logger.info('Caching policy for {}'.format('arn:aws:sqs:{}:{}:{}'.format(sqs_region, account_id, queue_name))) 444 | else: 445 | result.append(Policy( 446 | 'arn:aws:sqs:{}:{}:{}'.format(sqs_region, account_id, queue_name), 447 | queue_name, 448 | { 449 | "Statement": [], 450 | "Version": "2012-10-17" 451 | } 452 | )) 453 | logger.info('Queue {} does not have a queue policy, adding a "stub" policy instead.'.format(queue_name)) 454 | except botocore.exceptions.ClientError as ex: 455 | logger.info('Unable to search SQS in region {} for queues. The region may be disabled, or the current principal may not be authorized to access the service. Continuing.'.format(sqs_region)) 456 | logger.debug('Exception was: {}'.format(ex)) 457 | 458 | return result 459 | 460 | 461 | def get_secrets_manager_policies(session: botocore.session.Session, region_allow_list: Optional[List[str]] = None, 462 | region_deny_list: Optional[List[str]] = None, client_args_map: Optional[dict] = None) -> List[Policy]: 463 | """Using a botocore Session object, return a list of Policy objects representing the resource policies 464 | of the secrets in AWS Secrets Manager. 465 | 466 | The region allow/deny lists are mutually-exclusive (i.e. at least one of which has the value None) lists of 467 | allowed/denied regions to pull data from. 468 | """ 469 | result = [] 470 | 471 | smargs = client_args_map.get('secretsmanager', {}) 472 | 473 | # Iterate through all regions of Secrets Manager where possible 474 | for sm_region in get_regions_to_search(session, 'secretsmanager', region_allow_list, region_deny_list): 475 | try: 476 | # Grab the ARNs of the secrets in this region 477 | secret_arns = [] 478 | smclient = session.create_client('secretsmanager', region_name=sm_region, **smargs) 479 | list_secrets_paginator = smclient.get_paginator('list_secrets') 480 | for page in list_secrets_paginator.paginate(): 481 | if 'SecretList' in page: 482 | for entry in page['SecretList']: 483 | if 'PrimaryRegion' in entry and entry['PrimaryRegion'] != sm_region: 484 | continue # skip things we're supposed to find in other regions 485 | secret_arns.append(entry['ARN']) 486 | 487 | # Grab resource policies for each secret 488 | for secret_arn in secret_arns: 489 | sm_response = smclient.get_resource_policy(SecretId=secret_arn) 490 | 491 | # verify that it is in the response and not None/empty 492 | if 'ResourcePolicy' in sm_response and sm_response['ResourcePolicy']: 493 | sm_policy_doc = json.loads(sm_response['ResourcePolicy']) 494 | result.append(Policy( 495 | secret_arn, 496 | sm_response['Name'], 497 | sm_policy_doc 498 | )) 499 | logger.info('Storing the resource policy for secret {}'.format(secret_arn)) 500 | else: 501 | result.append(Policy( 502 | secret_arn, 503 | sm_response['Name'], 504 | { 505 | "Statement": [], 506 | "Version": "2012-10-17" 507 | } 508 | )) 509 | logger.info('Secret {} does not have a resource policy, inserting a "stub" policy instead'.format(secret_arn)) 510 | 511 | except botocore.exceptions.ClientError as ex: 512 | logger.info('Unable to search Secrets Manager in region {} for secrets. The region may be disabled, or ' 513 | 'the current principal may not be authorized to access the service. ' 514 | 'Continuing.'.format(sm_region)) 515 | logger.debug('Exception was: {}'.format(ex)) 516 | 517 | return result 518 | 519 | 520 | def get_unfilled_nodes(iamclient) -> List[Node]: 521 | """Using an IAM.Client object, return a list of Node object for each IAM user and role in an account. 522 | 523 | Does not set Group or Policy objects, does not set permissions boundary attr. Those have to be filled in later. 524 | 525 | Writes high-level information on progress to the output file 526 | """ 527 | result = [] 528 | # Get users, paginating results, still need to handle policies + group memberships + is_admin 529 | logger.info("Obtaining IAM users in account") 530 | user_paginator = iamclient.get_paginator('list_users') 531 | for page in user_paginator.paginate(PaginationConfig={'PageSize': 25}): 532 | logger.debug('list_users page: {}'.format(page)) 533 | for user in page['Users']: 534 | # grab permission boundary ARN if applicable 535 | # TODO: iam:ListUsers does not return boundary information. may need to wait for a fix. 536 | if 'PermissionsBoundary' in user: 537 | _pb = user['PermissionsBoundary']['PermissionsBoundaryArn'] 538 | else: 539 | _pb = None 540 | result.append(Node( 541 | arn=user['Arn'], 542 | id_value=user['UserId'], 543 | attached_policies=[], 544 | group_memberships=[], 545 | trust_policy=None, 546 | instance_profile=None, 547 | num_access_keys=0, 548 | active_password='PasswordLastUsed' in user, 549 | is_admin=False, 550 | permissions_boundary=_pb, 551 | has_mfa=False, 552 | tags=None # TODO: fix tags for old user-gathering method 553 | )) 554 | logger.debug('Adding Node for user ' + user['Arn']) 555 | 556 | # Get roles, paginating results, still need to handle policies + is_admin 557 | logger.info("Obtaining IAM roles in account") 558 | role_paginator = iamclient.get_paginator('list_roles') 559 | for page in role_paginator.paginate(PaginationConfig={'PageSize': 25}): 560 | logger.debug('list_roles page: {}'.format(page)) 561 | for role in page['Roles']: 562 | # grab permission boundary ARN if applicable 563 | if 'PermissionsBoundary' in role: 564 | _pb = role['PermissionsBoundary']['PermissionsBoundaryArn'] 565 | else: 566 | _pb = None 567 | result.append(Node( 568 | arn=role['Arn'], 569 | id_value=role['RoleId'], 570 | attached_policies=[], 571 | group_memberships=[], 572 | trust_policy=role['AssumeRolePolicyDocument'], 573 | instance_profile=None, 574 | num_access_keys=0, 575 | active_password=False, 576 | is_admin=False, 577 | permissions_boundary=_pb, 578 | has_mfa=False, 579 | tags=None # TODO: fix tags for old role-gathering method 580 | )) 581 | 582 | # Get instance profiles, paginating results, and attach to roles as appropriate 583 | logger.info("Obtaining EC2 instance profiles in account") 584 | ip_paginator = iamclient.get_paginator('list_instance_profiles') 585 | for page in ip_paginator.paginate(PaginationConfig={'PageSize': 25}): 586 | logger.debug('list_instance_profiles page: {}'.format(page)) 587 | for iprofile in page['InstanceProfiles']: 588 | iprofile_arn = iprofile['Arn'] 589 | role_arns = [] 590 | for role in iprofile['Roles']: 591 | role_arns.append(role['Arn']) 592 | for node in result: 593 | if ':role/' in node.arn and node.arn in role_arns: 594 | node.instance_profile = iprofile_arn 595 | 596 | # Handle access keys 597 | logger.info("Obtaining Access Keys data for IAM users") 598 | for node in result: 599 | if arns.get_resource(node.arn).startswith('user/'): 600 | # Grab access-key count and update node 601 | user_name = arns.get_resource(node.arn)[5:] 602 | if '/' in user_name: 603 | user_name = user_name.split('/')[-1] 604 | logger.debug('removed path from username {}'.format(user_name)) 605 | access_keys_data = iamclient.list_access_keys(UserName=user_name) 606 | node.access_keys = len(access_keys_data['AccessKeyMetadata']) 607 | logger.debug('Access Key Count for {}: {}'.format(user_name, len(access_keys_data['AccessKeyMetadata']))) 608 | 609 | return result 610 | 611 | 612 | def get_unfilled_groups(iamclient, nodes: List[Node]) -> List[Group]: 613 | """Using an IAM.Client object, returns a list of Group objects. Adds to each passed Node's group_memberships 614 | property. 615 | 616 | Does not set Policy objects. Those have to be filled in later. 617 | 618 | Writes high-level progress information to parameter output 619 | """ 620 | result = [] 621 | 622 | # paginate through groups and build result 623 | logger.info("Obtaining IAM groups in the account.") 624 | group_paginator = iamclient.get_paginator('list_groups') 625 | for page in group_paginator.paginate(PaginationConfig={'PageSize': 25}): 626 | logger.debug('list_groups page: {}'.format(page)) 627 | for group in page['Groups']: 628 | result.append(Group( 629 | arn=group['Arn'], 630 | attached_policies=[] 631 | )) 632 | 633 | # loop through group memberships 634 | logger.info("Connecting IAM users to their groups.") 635 | for node in nodes: 636 | if not arns.get_resource(node.arn).startswith('user/'): 637 | continue # skip when not an IAM user 638 | logger.debug('finding groups for user {}'.format(node.arn)) 639 | user_name = arns.get_resource(node.arn)[5:] 640 | if '/' in user_name: 641 | user_name = user_name.split('/')[-1] 642 | logger.debug('removed path from username {}'.format(user_name)) 643 | group_list = iamclient.list_groups_for_user(UserName=user_name) 644 | for group in group_list['Groups']: 645 | for group_obj in result: 646 | if group['Arn'] == group_obj.arn: 647 | node.group_memberships.append(group_obj) 648 | 649 | return result 650 | 651 | 652 | def get_policies_and_fill_out(iamclient, nodes: List[Node], groups: List[Group]) -> List[Policy]: 653 | """Using an IAM.Client object, return a list of Policy objects. Adds references to each passed Node and 654 | Group object where applicable. Updates boundary policies. 655 | 656 | Writes high-level progress information to parameter output. 657 | """ 658 | result = [] 659 | 660 | # navigate through nodes and add policy objects if they do not already exist in result 661 | logger.info("Obtaining policies used by all IAM users and roles") 662 | for node in nodes: 663 | node_name_components = arns.get_resource(node.arn).split('/') 664 | node_type, node_name = node_name_components[0], node_name_components[-1] 665 | logger.debug('Grabbing inline policies for {}'.format(node.arn)) 666 | # get inline policies 667 | if node_type == 'user': 668 | inline_policy_arns = iamclient.list_user_policies(UserName=node_name) 669 | # get each inline policy, append it to node's policies and result list 670 | for policy_name in inline_policy_arns['PolicyNames']: 671 | logger.debug('Grabbing inline policy: {}'.format(policy_name)) 672 | inline_policy = iamclient.get_user_policy(UserName=node_name, PolicyName=policy_name) 673 | policy_object = Policy(arn=node.arn, name=policy_name, policy_doc=inline_policy['PolicyDocument']) 674 | node.attached_policies.append(policy_object) 675 | result.append(policy_object) 676 | elif node_type == 'role': 677 | inline_policy_arns = iamclient.list_role_policies(RoleName=node_name) 678 | # get each inline policy, append it to the node's policies and result list 679 | # in hindsight, it's possible this could be folded with the above code, assuming the API doesn't change 680 | for policy_name in inline_policy_arns['PolicyNames']: 681 | logger.debug('Grabbing inline policy: {}'.format(policy_name)) 682 | inline_policy = iamclient.get_role_policy(RoleName=node_name, PolicyName=policy_name) 683 | policy_object = Policy(arn=node.arn, name=policy_name, policy_doc=inline_policy['PolicyDocument']) 684 | node.attached_policies.append(policy_object) 685 | result.append(policy_object) 686 | 687 | # get attached policies for users and roles 688 | if node_type == 'user': 689 | attached_policies = iamclient.list_attached_user_policies(UserName=node_name) 690 | else: # node_type == 'role': 691 | attached_policies = iamclient.list_attached_role_policies(RoleName=node_name) 692 | for attached_policy in attached_policies['AttachedPolicies']: 693 | policy_arn = attached_policy['PolicyArn'] 694 | logger.debug('Grabbing managed policy: {}'.format(policy_arn)) 695 | # reduce API calls, search existing policies for matching arns 696 | policy_object = _get_policy_by_arn(policy_arn, result) 697 | if policy_object is None: 698 | # Gotta retrieve the policy's current default version 699 | logger.debug('Policy cache miss, calling API') 700 | policy_response = iamclient.get_policy(PolicyArn=policy_arn) 701 | logger.debug('Policy version: {}'.format(policy_response['Policy']['DefaultVersionId'])) 702 | policy_version_response = iamclient.get_policy_version( 703 | PolicyArn=policy_arn, 704 | VersionId=policy_response['Policy']['DefaultVersionId'] 705 | ) 706 | policy_object = Policy( 707 | arn=policy_arn, 708 | name=policy_response['Policy']['PolicyName'], 709 | policy_doc=policy_version_response['PolicyVersion']['Document'] 710 | ) 711 | result.append(policy_object) 712 | node.attached_policies.append(policy_object) 713 | 714 | # get permission boundaries for users/roles 715 | logger.debug("perm boundary of {}: {}".format(node.searchable_name(), node.permissions_boundary)) 716 | if node.permissions_boundary is not None and isinstance(node.permissions_boundary, str): 717 | logger.debug('Getting boundary policy: {}'.format(node.permissions_boundary)) 718 | # reduce API calls, search existing policies for matching ARNs 719 | policy_object = _get_policy_by_arn(node.permissions_boundary, result) 720 | if policy_object is None: 721 | # Retrieve the policy's current default version 722 | logger.debug('Policy cache miss, calling API') 723 | policy_response = iamclient.get_policy(PolicyArn=node.permissions_boundary) 724 | logger.debug('Policy version: {}'.format(policy_response['Policy']['DefaultVersionId'])) 725 | policy_version_response = iamclient.get_policy_version( 726 | PolicyArn=node.permissions_boundary, 727 | VersionId=policy_response['Policy']['DefaultVersionId'] 728 | ) 729 | policy_object = Policy( 730 | arn=node.permissions_boundary, 731 | name=policy_response['Policy']['PolicyName'], 732 | policy_doc=policy_version_response['PolicyVersion']['Document'] 733 | ) 734 | result.append(policy_object) 735 | node.permissions_boundary = policy_object 736 | 737 | logger.info("Obtaining policies used by IAM groups") 738 | for group in groups: 739 | group_name = arns.get_resource(group.arn).split('/')[-1] # split by slashes and take the final item 740 | logger.debug('Getting policies for: {}'.format(group.arn)) 741 | # get inline policies 742 | inline_policies = iamclient.list_group_policies(GroupName=group_name) 743 | for policy_name in inline_policies['PolicyNames']: 744 | logger.debug('Grabbing inline policy: {}'.format(policy_name)) 745 | inline_policy = iamclient.get_group_policy(GroupName=group_name, PolicyName=policy_name) 746 | policy_object = Policy(arn=group.arn, name=policy_name, policy_doc=inline_policy['PolicyDocument']) 747 | group.attached_policies.append(policy_object) 748 | result.append(policy_object) 749 | 750 | # get attached policies 751 | attached_policies = iamclient.list_attached_group_policies(GroupName=group_name) 752 | for attached_policy in attached_policies['AttachedPolicies']: 753 | policy_arn = attached_policy['PolicyArn'] 754 | logger.debug('Grabbing managed policy: {}'.format(policy_arn)) 755 | # check cached policies first 756 | policy_object = _get_policy_by_arn(policy_arn, result) 757 | if policy_object is None: 758 | logger.debug('Policy cache miss, calling API') 759 | policy_response = iamclient.get_policy(PolicyArn=policy_arn) 760 | logger.debug('Policy version: {}'.format(policy_response['Policy']['DefaultVersionId'])) 761 | policy_version_response = iamclient.get_policy_version( 762 | PolicyArn=policy_arn, 763 | VersionId=policy_response['Policy']['DefaultVersionId'] 764 | ) 765 | policy_object = Policy( 766 | arn=policy_arn, 767 | name=policy_response['Policy']['PolicyName'], 768 | policy_doc=policy_version_response['PolicyVersion']['Document'] 769 | ) 770 | result.append(policy_object) 771 | group.attached_policies.append(policy_object) 772 | 773 | return result 774 | 775 | 776 | def update_admin_status(nodes: List[Node], scps: Optional[List[List[dict]]] = None) -> None: 777 | """Given a list of nodes, goes through and updates each node's is_admin data.""" 778 | logger.info('Determining which principals have administrative privileges') 779 | for node in nodes: 780 | node.is_admin = is_admin(node, scps) 781 | 782 | def is_admin(node: Node, scps: Optional[List[List[dict]]] = None) -> Boolean: 783 | ''' An admin needs to pass all the tests ''' 784 | node_type = arns.get_resource(node.arn).split('/')[0] 785 | # check if node can modify its own inline policies 786 | if node_type == 'user': 787 | action = 'iam:PutUserPolicy' 788 | else: # node_type == 'role' 789 | action = 'iam:PutRolePolicy' 790 | if not query_interface.local_check_authorization_handling_mfa(node, action, node.arn, {}, 791 | service_control_policy_groups=scps)[0]: 792 | return False 793 | 794 | # check if node can attach the AdministratorAccess policy to itself 795 | if node_type == 'user': 796 | action = 'iam:AttachUserPolicy' 797 | else: 798 | action = 'iam:AttachRolePolicy' 799 | condition_keys = {'iam:PolicyARN': 'arn:aws:iam::aws:policy/AdministratorAccess'} 800 | if not query_interface.local_check_authorization_handling_mfa(node, action, node.arn, condition_keys, 801 | service_control_policy_groups=scps)[0]: 802 | return False 803 | 804 | # check if node can create a role and attach the AdministratorAccess policy or an inline policy 805 | if not query_interface.local_check_authorization_handling_mfa(node, 'iam:CreateRole', '*', {})[0]: 806 | return False 807 | else: 808 | if not query_interface.local_check_authorization_handling_mfa(node, 'iam:AttachRolePolicy', '*', 809 | condition_keys, 810 | service_control_policy_groups=scps)[0]: 811 | return False 812 | if not query_interface.local_check_authorization_handling_mfa(node, 'iam:PutRolePolicy', '*', condition_keys, 813 | service_control_policy_groups=scps)[0]: 814 | return False 815 | 816 | # check if node can update an attached customer-managed policy (assumes SetAsDefault is set to True) 817 | for attached_policy in node.attached_policies: 818 | if attached_policy.arn != node.arn and ':aws:policy/' not in attached_policy.arn: 819 | if not query_interface.local_check_authorization_handling_mfa(node, 'iam:CreatePolicyVersion', 820 | attached_policy.arn, {}, 821 | service_control_policy_groups=scps)[0]: 822 | return False 823 | 824 | # check if node is a user, and if it can attach or modify any of its groups's policies 825 | if node_type == 'user': 826 | for group in node.group_memberships: 827 | if not query_interface.local_check_authorization_handling_mfa(node, 'iam:PutGroupPolicy', group.arn, {}, 828 | service_control_policy_groups=scps)[0]: 829 | return False 830 | if not query_interface.local_check_authorization_handling_mfa(node, 'iam:AttachGroupPolicy', group.arn, 831 | condition_keys, 832 | service_control_policy_groups=scps)[0]: 833 | return False 834 | for attached_policy in group.attached_policies: 835 | if attached_policy.arn != group.arn and ':aws:policy/' not in attached_policy.arn: 836 | if not query_interface.local_check_authorization_handling_mfa(node, 'iam:CreatePolicyVersion', 837 | attached_policy.arn, {}, 838 | service_control_policy_groups=scps)[0]: 839 | return False 840 | 841 | return _rnd_action_check(node) 842 | 843 | def _rnd_action_check(node: Node) -> Boolean: 844 | ''' Check if node can access a set of randomly chosen actions. 845 | Return the True if the node passes all the test ''' 846 | test_cnt = 10 847 | pass_cnt = 0 848 | with open('./misc/aws_actions.txt') as fp: 849 | content = fp.readlines() 850 | length = len(content) 851 | for _ in range(0,test_cnt): 852 | action = content[random.randint(0, length-1)] 853 | if query_interface.local_check_authorization_handling_mfa(node, action, node.arn, {})[0]: 854 | pass_cnt += 1 855 | return pass_cnt == test_cnt 856 | 857 | def get_organizations_data(session: botocore.session.Session) -> OrganizationTree: 858 | """Given a botocore Session object, generate an OrganizationTree object. This throws a RuntimeError if the session 859 | is for an Account that is not able to gather Organizations data, along with the reason why. 860 | 861 | The edge_list field of the OrganizationTree object is not populated. """ 862 | 863 | # grab account data 864 | stsclient = session.create_client('sts') 865 | account_data = stsclient.get_caller_identity() 866 | 867 | # try to grab org data, raising RuntimeError if appropriate 868 | try: 869 | orgsclient = session.create_client('organizations') 870 | organization_data = orgsclient.describe_organization() 871 | except botocore.exceptions.ClientError as ex: 872 | if 'AccessDeniedException' in str(ex): 873 | raise RuntimeError('Encountered a permission error. Either the current principal ({}) is not authorized to ' 874 | 'interact with AWS Organizations, or the current account ({}) is not the ' 875 | 'management account'.format(account_data['Arn'], account_data['Account'])) 876 | else: 877 | raise ex 878 | 879 | # compose the OrganizationTree object 880 | logger.info('Generating data for organization {} through management account {}'.format( 881 | organization_data['Organization']['Id'], 882 | organization_data['Organization']['MasterAccountId'] 883 | )) 884 | result = OrganizationTree( 885 | organization_data['Organization']['Id'], 886 | organization_data['Organization']['MasterAccountId'], 887 | None, # fill in `root_ous` later 888 | None, # get SCPs later 889 | None, # get account list later 890 | [], # caller is responsible for creating and setting the edge list 891 | {'pmapper_version': principalmapper.__version__} 892 | ) 893 | 894 | scp_list = [] 895 | root_ous = [] 896 | account_ids = [] 897 | 898 | # get root IDs to start 899 | logger.info('Going through roots of organization') 900 | root_ids_and_names = [] 901 | list_roots_paginator = orgsclient.get_paginator('list_roots') 902 | for page in list_roots_paginator.paginate(): 903 | root_ids_and_names.extend([(x['Id'], x['Name']) for x in page['Roots']]) 904 | 905 | def _get_scps_for_target(target_id: str) -> List[Policy]: 906 | """This method takes an ID for a target (root, OU, or account), then composes and returns a list of Policy 907 | objects for that target.""" 908 | scps_result = [] 909 | policy_name_arn_list = [] 910 | list_policies_paginator = orgsclient.get_paginator('list_policies_for_target') 911 | for lpp_page in list_policies_paginator.paginate(TargetId=target_id, Filter='SERVICE_CONTROL_POLICY'): 912 | for policy in lpp_page['Policies']: 913 | policy_name_arn_list.append((policy['Name'], policy['Arn'])) 914 | 915 | for name_arn_pair in policy_name_arn_list: 916 | policy_name, policy_arn = name_arn_pair 917 | desc_policy_resp = orgsclient.describe_policy(PolicyId=policy_arn.split('/')[-1]) 918 | scps_result.append(Policy(policy_arn, policy_name, json.loads(desc_policy_resp['Policy']['Content']))) 919 | 920 | logger.debug('SCPs of {}: {}'.format(target_id, [x.arn for x in scps_result])) 921 | 922 | scp_list.extend(scps_result) 923 | return scps_result 924 | 925 | def _get_tags_for_target(target_id: str) -> dict: 926 | """This method takes an ID for a target (root/OU/account) then composes and returns a dictionary for the 927 | tags for that target""" 928 | target_tags = {} 929 | list_tags_paginator = orgsclient.get_paginator('list_tags_for_resource') 930 | for ltp_page in list_tags_paginator.paginate(ResourceId=target_id): 931 | for tag in ltp_page['Tags']: 932 | target_tags[tag['Key']] = tag['Value'] 933 | 934 | logger.debug('Tags for {}: {}'.format(target_id, target_tags)) 935 | return target_tags 936 | 937 | # for each root, recursively grab child OUs while filling out OrganizationNode/OrganizationAccount objects 938 | # need to get tags, SCPs too 939 | def _compose_ou(parent_id: str, parent_name: str) -> OrganizationNode: 940 | """This method takes an OU's ID and Name to compose and return an OrganizationNode object for that OU. This 941 | grabs the accounts in the OU, tags for the OU, SCPs for the OU, and then gets the child OUs to recursively 942 | compose those OrganizationNode objects.""" 943 | 944 | logger.info('Composing data for "{}" ({})'.format(parent_name, parent_id)) 945 | 946 | # Get tags for the OU 947 | ou_tags = _get_tags_for_target(parent_id) 948 | 949 | # Get SCPs for the OU 950 | ou_scps = _get_scps_for_target(parent_id) 951 | 952 | # Get accounts under the OU 953 | org_account_objs = [] # type: List[OrganizationAccount] 954 | list_accounts_paginator = orgsclient.get_paginator('list_accounts_for_parent') 955 | ou_child_account_list = [] 956 | for lap_page in list_accounts_paginator.paginate(ParentId=parent_id): 957 | for child_account_data in lap_page['Accounts']: 958 | ou_child_account_list.append(child_account_data['Id']) 959 | logger.debug('Accounts: {}'.format(ou_child_account_list)) 960 | 961 | account_ids.extend(ou_child_account_list) 962 | for ou_child_account_id in ou_child_account_list: 963 | child_account_tags = _get_tags_for_target(ou_child_account_id) 964 | child_account_scps = _get_scps_for_target(ou_child_account_id) 965 | org_account_objs.append(OrganizationAccount(ou_child_account_id, child_account_scps, child_account_tags)) 966 | 967 | # get child OUs (pairs of Ids and Names) 968 | child_ou_ids = [] 969 | list_children_paginator = orgsclient.get_paginator('list_children') 970 | for lcp_page in list_children_paginator.paginate(ParentId=parent_id, ChildType='ORGANIZATIONAL_UNIT'): 971 | for child in lcp_page['Children']: 972 | child_ou_ids.append(child['Id']) 973 | 974 | child_ous = [] # type: List[OrganizationNode] 975 | for child_ou_id in child_ou_ids: 976 | desc_ou_resp = orgsclient.describe_organizational_unit(OrganizationalUnitId=child_ou_id) 977 | child_ous.append(_compose_ou(child_ou_id, desc_ou_resp['OrganizationalUnit']['Name'])) 978 | 979 | return OrganizationNode(parent_id, parent_name, org_account_objs, child_ous, ou_scps, ou_tags) 980 | 981 | for root_id_and_name in root_ids_and_names: 982 | root_ou_id, root_ou_name = root_id_and_name 983 | root_ous.append(_compose_ou(root_ou_id, root_ou_name)) 984 | 985 | # apply root OUs to result 986 | result.root_ous = root_ous 987 | 988 | # apply collected SCPs to result 989 | filtered_scp_list = [] 990 | filtered_arns = [] 991 | for scp in scp_list: 992 | if scp.arn in filtered_arns: 993 | continue 994 | filtered_scp_list.append(scp) 995 | filtered_arns.append(scp.arn) 996 | 997 | result.all_scps = filtered_scp_list 998 | 999 | # apply collected account IDs to result 1000 | result.accounts = account_ids 1001 | 1002 | return result 1003 | 1004 | 1005 | def _get_policy_by_arn(arn: str, policies: List[Policy]) -> Optional[Policy]: 1006 | """Helper function: pull a Policy object with the same ARN from a list or return None""" 1007 | for policy in policies: 1008 | if arn == policy.arn: 1009 | return policy 1010 | return None 1011 | 1012 | 1013 | def _get_policy_by_arn_or_raise(arn: str, policies: List[Policy]) -> Policy: 1014 | """Helper function: pull a Policy object with the same ARN from a List, or raise a ValueError""" 1015 | for policy in policies: 1016 | if arn == policy.arn: 1017 | return policy 1018 | raise ValueError('Could not locate policy {}.'.format(arn)) 1019 | --------------------------------------------------------------------------------