├── .env.example ├── .gitignore ├── Dockerfile ├── LICENSE ├── README.md ├── aws_security_mcp ├── __init__.py ├── config.py ├── formatters │ ├── README.md │ ├── __init__.py │ ├── cloudfront.py │ ├── ecr.py │ ├── ecs_formatter.py │ ├── guardduty.py │ ├── iam_formatter.py │ ├── lambda_formatter.py │ ├── load_balancer.py │ ├── org_formatter.py │ ├── resource_tagging.py │ ├── route53.py │ ├── s3_formatter.py │ ├── securityhub.py │ ├── shield.py │ └── waf.py ├── main.py ├── services │ ├── README.md │ ├── __init__.py │ ├── access_analyzer.py │ ├── base.py │ ├── cloudfront.py │ ├── ec2.py │ ├── ecr.py │ ├── ecs.py │ ├── guardduty.py │ ├── iam.py │ ├── lambda_service.py │ ├── load_balancer.py │ ├── organizations.py │ ├── resource_tagging.py │ ├── route53.py │ ├── s3.py │ ├── securityhub.py │ ├── shield.py │ ├── trusted_advisor.py │ └── waf.py ├── tools │ ├── __init__.py │ ├── access_analyzer_tools.py │ ├── cloudfront_tools.py │ ├── ec2_tools.py │ ├── ecr_tools.py │ ├── ecs_tools.py │ ├── guardduty_tools.py │ ├── iam_tools.py │ ├── lambda_tools.py │ ├── load_balancer_tools.py │ ├── org_tools.py │ ├── registry.py │ ├── resource_tagging_tools.py │ ├── route53_tools.py │ ├── s3_tools.py │ ├── securityhub_tools.py │ ├── shield_tools.py │ ├── trusted_advisor_tools.py │ ├── waf_tools.py │ └── wrappers │ │ ├── __init__.py │ │ ├── access_analyzer_wrapper.py │ │ ├── cloudfront_wrapper.py │ │ ├── ec2_wrapper.py │ │ ├── ecr_wrapper.py │ │ ├── ecs_wrapper.py │ │ ├── guardduty_wrapper.py │ │ ├── iam_wrapper.py │ │ ├── lambda_wrapper.py │ │ ├── load_balancer_wrapper.py │ │ ├── org_wrapper.py │ │ ├── resource_tagging_wrapper.py │ │ ├── route53_wrapper.py │ │ ├── s3_wrapper.py │ │ ├── securityhub_wrapper.py │ │ ├── shield_wrapper.py │ │ ├── trusted_advisor_wrapper.py │ │ └── waf_wrapper.py └── utils │ ├── __init__.py │ ├── helpers.py │ └── policy_evaluator.py ├── images └── README.txt ├── pyproject.toml ├── requirements.txt └── run_aws_security.sh /.env.example: -------------------------------------------------------------------------------- 1 | # AWS Credentials Configuration 2 | # Option 1: Use AWS credentials directly 3 | AWS_ACCESS_KEY_ID=YOUR_ACCESS_KEY_ID 4 | AWS_SECRET_ACCESS_KEY=YOUR_SECRET_ACCESS_KEY 5 | AWS_SESSION_TOKEN=YOUR_SESSION_TOKEN 6 | 7 | # Option 2: Use AWS profile 8 | AWS_PROFILE=your-profile-name 9 | 10 | # AWS Region Configuration 11 | AWS_REGION=us-east-1 12 | AWS_ENDPOINT_URL= 13 | AWS_RETRY_MAX_ATTEMPTS=5 14 | AWS_RETRY_MODE=standard 15 | 16 | # Server Configuration 17 | SERVER_HOST=0.0.0.0 18 | SERVER_PORT=8000 19 | SERVER_WORKERS=1 20 | ALLOW_CREDENTIALS=false 21 | ALLOW_ORIGINS=* 22 | ENABLE_SSE_TRANSPORT=true 23 | ENABLE_STDIO_TRANSPORT=true 24 | 25 | # Logging Configuration 26 | LOG_LEVEL=INFO 27 | LOG_FORMAT=%(asctime)s - %(name)s - %(levelname)s - %(message)s 28 | LOG_JSON=false 29 | LOG_TO_FILE=false 30 | LOG_FILE_PATH= 31 | LOG_ROTATION=true 32 | MAX_LOG_SIZE_MB=10 33 | LOG_BACKUP_COUNT=5 34 | 35 | # Application Configuration 36 | ENVIRONMENT=development 37 | DEBUG=false 38 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # AWS Security MCP .gitignore 2 | 3 | # Environment and credentials 4 | .env 5 | .aws/ 6 | aws_config.json 7 | aws_settings.json 8 | *credentials* 9 | *.pem 10 | *.key 11 | *.crt 12 | 13 | # Python 14 | __pycache__/ 15 | *.py[cod] 16 | *$py.class 17 | *.so 18 | .Python 19 | env/ 20 | build/ 21 | develop-eggs/ 22 | dist/ 23 | downloads/ 24 | eggs/ 25 | .eggs/ 26 | lib/ 27 | lib64/ 28 | parts/ 29 | sdist/ 30 | var/ 31 | *.egg-info/ 32 | .installed.cfg 33 | *.egg 34 | .pytest_cache/ 35 | 36 | # Virtual environments 37 | venv/ 38 | env/ 39 | ENV/ 40 | .venv/ 41 | .virtualenv/ 42 | 43 | # IDE files 44 | .idea/ 45 | .vscode/ 46 | *.sublime-* 47 | .project 48 | .pydevproject 49 | 50 | # OS generated files 51 | .DS_Store 52 | .DS_Store? 53 | ._* 54 | .Spotlight-V100 55 | .Trashes 56 | ehthumbs.db 57 | Thumbs.db 58 | 59 | # Backups and logs 60 | *.log 61 | *.bak 62 | *.backup 63 | *.swp 64 | *~ 65 | 66 | # BFG report files 67 | bfg-report/ 68 | 69 | # Python-specific 70 | *.egg-info/ 71 | *.egg 72 | MANIFEST 73 | 74 | # Virtual environments 75 | env.bak/ 76 | venv.bak/ 77 | 78 | # IDE and editor-specific files 79 | *.swp 80 | *.swo 81 | *~ 82 | 83 | # Log files 84 | logs/ 85 | 86 | # Temporary files 87 | .coverage 88 | htmlcov/ 89 | .tox/ 90 | .nox/ 91 | .hypothesis/ 92 | .pytest_cache/ 93 | nosetests.xml 94 | coverage.xml 95 | *.cover 96 | *.py,cover 97 | 98 | # Runtime data 99 | pids 100 | *.pid 101 | *.seed 102 | *.pid.lock 103 | 104 | # Secret files 105 | .env.local 106 | .env.development.local 107 | .env.test.local 108 | .env.production.local 109 | *config.json 110 | *-config.json 111 | *_config.json 112 | secrets.json 113 | *secret*.json 114 | *key*.json 115 | 116 | # Package specific 117 | .pyenv 118 | .python-version 119 | uv.lock 120 | .cursor -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Create this file: Dockerfile 2 | FROM python:3.11-slim 3 | 4 | # Set working directory 5 | WORKDIR /app 6 | 7 | # Install system dependencies 8 | RUN apt-get update && apt-get install -y \ 9 | curl \ 10 | && rm -rf /var/lib/apt/lists/* 11 | 12 | # Copy requirements first for better caching 13 | COPY requirements.txt . 14 | 15 | # Install Python dependencies 16 | RUN pip install --no-cache-dir -r requirements.txt 17 | 18 | # Copy application code 19 | COPY aws_security_mcp/ ./aws_security_mcp/ 20 | COPY run_aws_security.sh . 21 | 22 | # Make the script executable 23 | RUN chmod +x run_aws_security.sh 24 | 25 | # Create non-root user for security 26 | RUN useradd -m -u 1000 mcpuser && chown -R mcpuser:mcpuser /app 27 | USER mcpuser 28 | 29 | # Set Python path so the aws_security_mcp module can be found 30 | ENV PYTHONPATH=/app 31 | 32 | # Expose the SSE port 33 | EXPOSE 8000 34 | 35 | # Health check 36 | HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ 37 | CMD curl -f http://localhost:8000/health || exit 1 38 | 39 | # Run the SSE server - Execute main.py with proper PYTHONPATH 40 | CMD ["python", "aws_security_mcp/main.py", "sse"] -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [2025] [Saransh Rana] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /aws_security_mcp/__init__.py: -------------------------------------------------------------------------------- 1 | """AWS Security MCP - A comprehensive AWS security inspection and remediation tool.""" 2 | 3 | __version__ = "0.1.0" -------------------------------------------------------------------------------- /aws_security_mcp/config.py: -------------------------------------------------------------------------------- 1 | """Configuration management for AWS Security MCP.""" 2 | 3 | import os 4 | from pathlib import Path 5 | from typing import Any, Dict, Optional, Union 6 | import logging 7 | 8 | from dotenv import load_dotenv 9 | from pydantic import BaseModel, Field, validator 10 | 11 | # Load environment variables from .env file if present 12 | load_dotenv() 13 | 14 | class AWSConfig(BaseModel): 15 | """AWS configuration settings.""" 16 | 17 | aws_access_key_id: Optional[str] = Field( 18 | default=None, 19 | description="AWS access key ID" 20 | ) 21 | aws_secret_access_key: Optional[str] = Field( 22 | default=None, 23 | description="AWS secret access key" 24 | ) 25 | aws_session_token: Optional[str] = Field( 26 | default=None, 27 | description="AWS session token for temporary credentials" 28 | ) 29 | aws_region: str = Field( 30 | default="ap-south-1", 31 | description="AWS region for API calls" 32 | ) 33 | aws_profile: Optional[str] = Field( 34 | default=None, 35 | description="AWS profile name to use" 36 | ) 37 | 38 | @validator('aws_region') 39 | def validate_region(cls, v: str) -> str: 40 | """Validate AWS region format.""" 41 | if not v: 42 | return "us-east-1" 43 | 44 | # Basic format validation for common region prefixes 45 | valid_prefixes = ["us-", "eu-", "ap-", "ca-", "sa-", "af-", "me-"] 46 | if not any(v.startswith(prefix) for prefix in valid_prefixes): 47 | raise ValueError(f"Invalid AWS region format: {v}. Must start with one of {valid_prefixes}") 48 | 49 | return v 50 | 51 | @property 52 | def has_iam_credentials(self) -> bool: 53 | """Check if IAM access key credentials are set.""" 54 | return bool(self.aws_access_key_id and self.aws_secret_access_key) 55 | 56 | @property 57 | def has_sts_credentials(self) -> bool: 58 | """Check if STS temporary credentials are set.""" 59 | return bool(self.aws_access_key_id and self.aws_secret_access_key and self.aws_session_token) 60 | 61 | @property 62 | def has_profile(self) -> bool: 63 | """Check if an AWS profile is set.""" 64 | return bool(self.aws_profile) 65 | 66 | @property 67 | def credentials_source(self) -> str: 68 | """Determine the source of credentials to use.""" 69 | if self.has_profile: 70 | return "profile" 71 | elif self.has_sts_credentials: 72 | return "sts" 73 | elif self.has_iam_credentials: 74 | return "iam" 75 | else: 76 | return "auto" # Let boto3 handle credential resolution (ECS task role, instance profile, etc.) 77 | 78 | @property 79 | def is_ecs_environment(self) -> bool: 80 | """Check if running in ECS environment.""" 81 | import os 82 | # ECS provides these environment variables 83 | return bool( 84 | os.getenv("AWS_EXECUTION_ENV") or 85 | os.getenv("ECS_CONTAINER_METADATA_URI") or 86 | os.getenv("ECS_CONTAINER_METADATA_URI_V4") or 87 | os.getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI") 88 | ) 89 | 90 | @property 91 | def is_ec2_environment(self) -> bool: 92 | """Check if running in EC2 environment with instance profile.""" 93 | import os 94 | # EC2 instance metadata service availability (simplified check) 95 | return bool(os.getenv("AWS_EXECUTION_ENV") == "EC2-Instance") 96 | 97 | def validate_ecs_credentials(self) -> bool: 98 | """Validate that ECS task role credentials are accessible. 99 | 100 | Returns: 101 | True if ECS credentials are accessible, False otherwise 102 | """ 103 | if not self.is_ecs_environment: 104 | return False 105 | 106 | try: 107 | import boto3 108 | # Try to create a session and get caller identity 109 | session = boto3.Session(region_name=self.aws_region) 110 | sts_client = session.client('sts') 111 | identity = sts_client.get_caller_identity() 112 | 113 | # If we get here, credentials are working 114 | logging.getLogger(__name__).info(f"✅ ECS task role validated: {identity.get('Arn', 'Unknown ARN')}") 115 | return True 116 | 117 | except Exception as e: 118 | logging.getLogger(__name__).error(f"❌ ECS task role validation failed: {e}") 119 | return False 120 | 121 | class CrossAccountConfig(BaseModel): 122 | """Cross-account credential configuration settings.""" 123 | 124 | role_name: str = Field( 125 | default="aws-security-mcp-cross-account-access", 126 | description="Name of the role to assume in target accounts" 127 | ) 128 | session_name: str = Field( 129 | default="aws-security-mcp-session", 130 | description="Session name for assumed roles" 131 | ) 132 | session_duration_seconds: int = Field( 133 | default=3600, 134 | description="Duration of assumed role sessions in seconds" 135 | ) 136 | refresh_threshold_minutes: int = Field( 137 | default=10, 138 | description="Refresh sessions when they expire within this many minutes" 139 | ) 140 | auto_setup_on_startup: bool = Field( 141 | default=True, 142 | description="Automatically set up cross-account sessions on server startup" 143 | ) 144 | auto_refresh_enabled: bool = Field( 145 | default=True, 146 | description="Automatically refresh expiring sessions" 147 | ) 148 | max_concurrent_assumptions: int = Field( 149 | default=5, 150 | description="Maximum number of concurrent role assumptions" 151 | ) 152 | 153 | class MCPServerConfig(BaseModel): 154 | """MCP server configuration settings.""" 155 | 156 | host: str = Field( 157 | default="127.0.0.1", 158 | description="Host address to bind the server" 159 | ) 160 | port: int = Field( 161 | default=8000, 162 | description="Port to run the server on" 163 | ) 164 | debug: bool = Field( 165 | default=False, 166 | description="Enable debug mode" 167 | ) 168 | log_level: str = Field( 169 | default="info", 170 | description="Logging level" 171 | ) 172 | max_concurrent_requests: int = Field( 173 | default=10, 174 | description="Maximum number of concurrent AWS API requests" 175 | ) 176 | client_cache_ttl: int = Field( 177 | default=3600, 178 | description="Time to live for cached AWS clients in seconds" 179 | ) 180 | 181 | @validator('log_level') 182 | def validate_log_level(cls, v: str) -> str: 183 | """Validate log level.""" 184 | valid_levels = ["debug", "info", "warning", "error", "critical"] 185 | if v.lower() not in valid_levels: 186 | raise ValueError(f"Invalid log level: {v}. Must be one of {valid_levels}") 187 | return v.lower() 188 | 189 | class AppConfig(BaseModel): 190 | """Main application configuration.""" 191 | 192 | aws: AWSConfig = Field(default_factory=AWSConfig) 193 | server: MCPServerConfig = Field(default_factory=MCPServerConfig) 194 | cross_account: CrossAccountConfig = Field(default_factory=CrossAccountConfig) 195 | 196 | class Config: 197 | """Pydantic config options.""" 198 | extra = "ignore" 199 | 200 | def load_config() -> AppConfig: 201 | """Load configuration from environment variables. 202 | 203 | Returns: 204 | AppConfig instance with loaded configuration 205 | """ 206 | # Extract AWS configuration from environment 207 | # For ECS tasks, AWS_DEFAULT_REGION is more commonly used than AWS_REGION 208 | aws_region = os.getenv("AWS_REGION") or os.getenv("AWS_DEFAULT_REGION", "ap-south-1") 209 | 210 | aws_config = { 211 | "aws_access_key_id": os.getenv("AWS_ACCESS_KEY_ID"), 212 | "aws_secret_access_key": os.getenv("AWS_SECRET_ACCESS_KEY"), 213 | "aws_session_token": os.getenv("AWS_SESSION_TOKEN"), 214 | "aws_region": aws_region, 215 | "aws_profile": os.getenv("AWS_PROFILE"), 216 | } 217 | 218 | # Extract server configuration from environment 219 | server_config = { 220 | "host": os.getenv("MCP_HOST", "127.0.0.1"), 221 | "port": int(os.getenv("MCP_PORT", "8000")), 222 | "debug": os.getenv("MCP_DEBUG", "False").lower() in ("true", "1", "yes"), 223 | "log_level": os.getenv("MCP_LOG_LEVEL", "info"), 224 | "max_concurrent_requests": int(os.getenv("MCP_MAX_CONCURRENT_REQUESTS", "10")), 225 | "client_cache_ttl": int(os.getenv("MCP_CLIENT_CACHE_TTL", "3600")), 226 | } 227 | 228 | # Extract cross-account configuration from environment 229 | cross_account_config = { 230 | "role_name": os.getenv("MCP_CROSS_ACCOUNT_ROLE_NAME", "aws-security-mcp-cross-account-access"), 231 | "session_name": os.getenv("MCP_CROSS_ACCOUNT_SESSION_NAME", "aws-security-mcp-session"), 232 | "session_duration_seconds": int(os.getenv("MCP_SESSION_DURATION_SECONDS", "3600")), 233 | "refresh_threshold_minutes": int(os.getenv("MCP_REFRESH_THRESHOLD_MINUTES", "10")), 234 | "auto_setup_on_startup": os.getenv("MCP_AUTO_SETUP_SESSIONS", "True").lower() in ("true", "1", "yes"), 235 | "auto_refresh_enabled": os.getenv("MCP_AUTO_REFRESH_ENABLED", "True").lower() in ("true", "1", "yes"), 236 | "max_concurrent_assumptions": int(os.getenv("MCP_MAX_CONCURRENT_ASSUMPTIONS", "5")), 237 | } 238 | 239 | # Create the config object 240 | app_config = AppConfig( 241 | aws=AWSConfig(**aws_config), 242 | server=MCPServerConfig(**server_config), 243 | cross_account=CrossAccountConfig(**cross_account_config), 244 | ) 245 | 246 | # Verify AWS credential configuration and log information 247 | logging.getLogger(__name__).info(f"AWS Region: {app_config.aws.aws_region}") 248 | 249 | if app_config.aws.has_profile: 250 | logging.getLogger(__name__).info(f"AWS credentials source: Profile ({app_config.aws.aws_profile})") 251 | elif app_config.aws.has_sts_credentials: 252 | logging.getLogger(__name__).info("AWS credentials source: STS temporary credentials") 253 | elif app_config.aws.has_iam_credentials: 254 | logging.getLogger(__name__).info("AWS credentials source: IAM access key credentials") 255 | else: 256 | # Provide more specific logging for container environments 257 | if app_config.aws.is_ecs_environment: 258 | logging.getLogger(__name__).info("AWS credentials source: ECS Task Role (auto-resolution)") 259 | elif app_config.aws.is_ec2_environment: 260 | logging.getLogger(__name__).info("AWS credentials source: EC2 Instance Profile (auto-resolution)") 261 | else: 262 | logging.getLogger(__name__).info( 263 | "AWS credentials source: Auto-resolution (environment variables, ~/.aws/credentials, ECS task role, or instance profile)" 264 | ) 265 | 266 | return app_config 267 | 268 | # Global config instance 269 | config = load_config() -------------------------------------------------------------------------------- /aws_security_mcp/formatters/README.md: -------------------------------------------------------------------------------- 1 | # AWS Security MCP - Formatter Modules 2 | 3 | This directory contains formatter modules that transform the responses from AWS service APIs into standardized, user-friendly formats. These formatters ensure consistent response structures across all MCP tools. 4 | 5 | ## Formatter Structure 6 | 7 | Each formatter module contains functions that format responses from a specific AWS service. These functions: 8 | 9 | 1. Extract relevant information from AWS API responses 10 | 2. Transform complex data structures into simpler, more readable formats 11 | 3. Add additional context or derived information where useful 12 | 4. Standardize error responses 13 | 14 | ## Formatter Modules 15 | 16 | ### Resource Tagging Formatters (`resource_tagging.py`) 17 | 18 | The Resource Tagging Formatters module provides functions to format responses from the AWS Resource Groups Tagging API. 19 | 20 | #### Key Functions: 21 | 22 | 1. **`format_resource_details`**: Formats a single resource's details, extracting information from the ARN and tags. 23 | 2. **`format_resources_response`**: Formats the response from `get_resources_by_tags`, transforming resource mappings into a user-friendly format. 24 | 3. **`format_tag_keys_response`**: Formats the response from `get_tag_keys`, providing tag keys and pagination information. 25 | 4. **`format_tag_values_response`**: Formats the response from `get_tag_values`, providing tag key, values, and pagination information. 26 | 27 | ### Load Balancer Formatters (`load_balancer.py`) 28 | 29 | The Load Balancer Formatters module provides functions to format responses from the AWS Elastic Load Balancing API. 30 | 31 | #### Key Functions: 32 | 33 | 1. **`format_load_balancer_response`**: Formats the response from `get_all_load_balancers_v2`, providing load balancer details in a standardized format. 34 | 2. **`format_target_group_response`**: Formats the response from `get_all_target_groups`, providing target group details in a standardized format. 35 | 3. **`format_listener_response`**: Formats the response from `get_all_listeners`, providing listener details in a standardized format. 36 | 37 | ## Standardized Response Format 38 | 39 | All formatter functions follow a standardized response format: 40 | 41 | ```json 42 | { 43 | "resource_type": [ 44 | { 45 | "id": "resource-id", 46 | "name": "resource-name", 47 | "arn": "resource-arn", 48 | ... additional resource-specific fields ... 49 | } 50 | ], 51 | "resource_count": 1, 52 | "next_token": "pagination-token" 53 | } 54 | ``` 55 | 56 | In case of errors, the response includes an `error` field: 57 | 58 | ```json 59 | { 60 | "resource_type": [], 61 | "resource_count": 0, 62 | "error": "Error message" 63 | } 64 | ``` 65 | 66 | ## Usage Example 67 | 68 | ```python 69 | from aws_security_mcp.services.resource_tagging import get_resources_by_tags 70 | from aws_security_mcp.formatters.resource_tagging import format_resources_response 71 | 72 | # Get raw response from service 73 | raw_response = await get_resources_by_tags( 74 | tag_key="Environment", 75 | tag_value="Production" 76 | ) 77 | 78 | # Format the response 79 | formatted_response = format_resources_response(raw_response) 80 | 81 | # Convert to JSON 82 | import json 83 | json_response = json.dumps(formatted_response) 84 | ``` -------------------------------------------------------------------------------- /aws_security_mcp/formatters/__init__.py: -------------------------------------------------------------------------------- 1 | """Formatters for AWS resources in the AWS Security MCP.""" 2 | 3 | from typing import Any, Dict, List, Optional, Union 4 | 5 | # Common text formatting utilities 6 | def truncate_text(text: str, max_length: int = 100) -> str: 7 | """Truncate text to a maximum length with ellipsis if needed. 8 | 9 | Args: 10 | text: Text to truncate 11 | max_length: Maximum length before truncation 12 | 13 | Returns: 14 | Truncated text with ellipsis if needed 15 | """ 16 | if not text or len(text) <= max_length: 17 | return text 18 | return text[:max_length - 3] + "..." 19 | 20 | def format_key_value(key: str, value: Any, indent: int = 0) -> str: 21 | """Format a key-value pair for display. 22 | 23 | Args: 24 | key: The label or key 25 | value: The value to display 26 | indent: Number of spaces to indent 27 | 28 | Returns: 29 | Formatted key-value string 30 | """ 31 | indent_str = " " * indent 32 | return f"{indent_str}{key}: {value}" 33 | 34 | def format_list(items: List[Any], indent: int = 0) -> str: 35 | """Format a list of items with bullet points. 36 | 37 | Args: 38 | items: List of items to format 39 | indent: Number of spaces to indent 40 | 41 | Returns: 42 | Formatted list as a string 43 | """ 44 | if not items: 45 | return "" 46 | 47 | indent_str = " " * indent 48 | return "\n".join(f"{indent_str}- {item}" for item in items) 49 | 50 | # Export formatters for easier imports 51 | # Removed IAM formatters - to be rebuilt 52 | # from aws_security_mcp.formatters.iam import ( 53 | # format_iam_role_json, 54 | # format_iam_policy_json, 55 | # format_iam_user_json, 56 | # format_iam_access_key_json, 57 | # format_iam_permission_set_json, 58 | # format_iam_group_json, 59 | # format_iam_policy_document_json, 60 | # format_iam_mfa_device_json, 61 | # format_iam_user_detail_json, 62 | # format_iam_role_detail_json, 63 | # format_iam_summary_json, 64 | # format_iam_credential_report_json, 65 | # format_iam_permissions_boundary_json, 66 | # ) 67 | 68 | from aws_security_mcp.formatters.s3_formatter import ( 69 | format_bucket_simple, 70 | format_bucket_details, 71 | format_public_buckets_assessment, 72 | calculate_security_rating, 73 | format_acl_grants 74 | ) 75 | 76 | from aws_security_mcp.formatters.org_formatter import ( 77 | format_organization_simple, 78 | format_account_simple, 79 | format_policy_simple, 80 | format_policy_detail, 81 | format_policy_target, 82 | format_policy_with_targets, 83 | format_org_hierarchy, 84 | format_effective_policies 85 | ) 86 | 87 | # EC2 formatters removed as they're now replaced with direct JSON formatting in the tools 88 | 89 | from aws_security_mcp.formatters.guardduty import ( 90 | format_guardduty_detector_json, 91 | format_guardduty_finding_json, 92 | format_guardduty_findings_statistics_json, 93 | format_guardduty_ip_set_json, 94 | format_guardduty_threat_intel_set_json, 95 | format_guardduty_filter_json, 96 | format_guardduty_detectors_summary_json, 97 | ) 98 | 99 | from aws_security_mcp.formatters.lambda_formatter import ( 100 | format_lambda_function_json, 101 | format_lambda_layer_json, 102 | format_lambda_functions_summary_json, 103 | format_lambda_alias_json, 104 | format_lambda_event_source_mapping_json, 105 | ) 106 | 107 | # Legacy formatters - will be migrated in future updates 108 | from aws_security_mcp.formatters.load_balancer import ( 109 | format_load_balancer, 110 | format_target_group, 111 | format_listener, 112 | ) 113 | 114 | from aws_security_mcp.formatters.cloudfront import ( 115 | format_distribution, 116 | format_cache_policy, 117 | format_origin_request_policy, 118 | ) 119 | 120 | from aws_security_mcp.formatters.route53 import ( 121 | format_hosted_zone, 122 | format_record_set, 123 | format_health_check, 124 | ) 125 | 126 | from aws_security_mcp.formatters.securityhub import ( 127 | format_finding, 128 | format_securityhub_finding, 129 | format_finding_resources, 130 | format_finding_summary, 131 | format_insight, 132 | format_standard, 133 | format_control, 134 | ) -------------------------------------------------------------------------------- /aws_security_mcp/formatters/cloudfront.py: -------------------------------------------------------------------------------- 1 | """Formatters for CloudFront resources.""" 2 | 3 | import json 4 | from typing import Any, Dict, List, Optional, Union 5 | 6 | 7 | def format_distribution(distribution: Dict[str, Any]) -> str: 8 | """Format a CloudFront distribution into a readable string. 9 | 10 | Args: 11 | distribution: CloudFront distribution data dictionary 12 | 13 | Returns: 14 | Formatted string representation of the distribution 15 | """ 16 | dist_config = distribution.get('DistributionConfig', {}) 17 | origins = dist_config.get('Origins', {}).get('Items', []) 18 | 19 | origins_str = "\n ".join([f"{o.get('Id', 'Unknown')}: {o.get('DomainName', 'Unknown')}" for o in origins]) 20 | if not origins_str: 21 | origins_str = "None" 22 | 23 | return f""" 24 | Distribution ID: {distribution.get('Id', 'Unknown')} 25 | Domain Name: {distribution.get('DomainName', 'Unknown')} 26 | Status: {distribution.get('Status', 'Unknown')} 27 | Enabled: {'Yes' if dist_config.get('Enabled') else 'No'} 28 | Price Class: {dist_config.get('PriceClass', 'Unknown')} 29 | HTTP Version: {dist_config.get('HttpVersion', 'Unknown')} 30 | Default Root Object: {dist_config.get('DefaultRootObject', 'None')} 31 | Origins: 32 | {origins_str} 33 | SSL Certificate: {dist_config.get('ViewerCertificate', {}).get('CertificateSource', 'Unknown')} 34 | """ 35 | 36 | 37 | def format_cache_policy(policy: Dict[str, Any]) -> str: 38 | """Format a CloudFront cache policy into a readable string. 39 | 40 | Args: 41 | policy: CloudFront cache policy data dictionary 42 | 43 | Returns: 44 | Formatted string representation of the cache policy 45 | """ 46 | cache_policy_config = policy.get('CachePolicyConfig', {}) 47 | 48 | return f""" 49 | Cache Policy: {cache_policy_config.get('Name', 'Unknown')} 50 | ID: {policy.get('Id', 'Unknown')} 51 | Min TTL: {cache_policy_config.get('MinTTL', 'Unknown')} 52 | Max TTL: {cache_policy_config.get('MaxTTL', 'Unknown')} 53 | Default TTL: {cache_policy_config.get('DefaultTTL', 'Unknown')} 54 | """ 55 | 56 | 57 | def format_origin_request_policy(policy: Dict[str, Any]) -> str: 58 | """Format a CloudFront origin request policy into a readable string. 59 | 60 | Args: 61 | policy: CloudFront origin request policy data dictionary 62 | 63 | Returns: 64 | Formatted string representation of the origin request policy 65 | """ 66 | policy_config = policy.get('OriginRequestPolicyConfig', {}) 67 | 68 | return f""" 69 | Origin Request Policy: {policy_config.get('Name', 'Unknown')} 70 | ID: {policy.get('Id', 'Unknown')} 71 | Headers Behavior: {policy_config.get('HeadersConfig', {}).get('HeaderBehavior', 'Unknown')} 72 | Cookies Behavior: {policy_config.get('CookiesConfig', {}).get('CookieBehavior', 'Unknown')} 73 | Query Strings Behavior: {policy_config.get('QueryStringsConfig', {}).get('QueryStringBehavior', 'Unknown')} 74 | """ -------------------------------------------------------------------------------- /aws_security_mcp/formatters/ecr.py: -------------------------------------------------------------------------------- 1 | """ECR formatter module for AWS Security MCP. 2 | 3 | This module provides functions to format ECR information 4 | for better readability and security assessment. 5 | """ 6 | 7 | import logging 8 | from typing import Any, Dict, List 9 | import json 10 | from datetime import datetime 11 | 12 | # Configure logging 13 | logger = logging.getLogger(__name__) 14 | 15 | def format_repository_simple(repository: Dict[str, Any]) -> Dict[str, Any]: 16 | """Format a repository into a simplified representation. 17 | 18 | Args: 19 | repository: Raw repository data from AWS 20 | 21 | Returns: 22 | Dict containing simplified repository representation 23 | """ 24 | try: 25 | return { 26 | 'name': repository.get('repositoryName'), 27 | 'uri': repository.get('repositoryUri'), 28 | 'arn': repository.get('repositoryArn'), 29 | 'created_at': repository.get('createdAt').isoformat() if repository.get('createdAt') else None 30 | } 31 | except Exception as e: 32 | logger.error(f"Error formatting repository info: {str(e)}") 33 | return repository # Return original data if formatting fails 34 | 35 | def extract_repository_uris(repositories: List[Dict[str, Any]]) -> List[str]: 36 | """Extract only repository URIs from repository data. 37 | 38 | Args: 39 | repositories: List of repository data from AWS 40 | 41 | Returns: 42 | List containing only repository URIs 43 | """ 44 | try: 45 | return [repo.get('repositoryUri', '') for repo in repositories if repo.get('repositoryUri')] 46 | except Exception as e: 47 | logger.error(f"Error extracting repository URIs: {str(e)}") 48 | return [] # Return empty list if extraction fails 49 | 50 | def extract_repository_names(repositories: List[Dict[str, Any]]) -> List[str]: 51 | """Extract only repository names from repository data. 52 | 53 | Args: 54 | repositories: List of repository data from AWS 55 | 56 | Returns: 57 | List containing only repository names 58 | """ 59 | try: 60 | return [repo.get('repositoryName', '') for repo in repositories if repo.get('repositoryName')] 61 | except Exception as e: 62 | logger.error(f"Error extracting repository names: {str(e)}") 63 | return [] # Return empty list if extraction fails 64 | 65 | def format_repository_detail(repository: Dict[str, Any]) -> Dict[str, Any]: 66 | """Format a repository with detailed information. 67 | 68 | Args: 69 | repository: Raw repository data with extended details 70 | 71 | Returns: 72 | Dict containing formatted repository details 73 | """ 74 | try: 75 | # Format basic repository information 76 | formatted = { 77 | 'name': repository.get('repositoryName'), 78 | 'uri': repository.get('repositoryUri'), 79 | 'arn': repository.get('repositoryArn'), 80 | 'created_at': repository.get('createdAt').isoformat() if repository.get('createdAt') else None, 81 | 'images_count': repository.get('images_count', 0) 82 | } 83 | 84 | # Format policy information 85 | policy = repository.get('policy') 86 | if policy: 87 | try: 88 | # Policy might be a string or already parsed 89 | if isinstance(policy, str): 90 | policy_json = json.loads(policy) 91 | else: 92 | policy_json = policy 93 | 94 | formatted['policy'] = { 95 | 'version': policy_json.get('Version'), 96 | 'statements': policy_json.get('Statement', []) 97 | } 98 | except Exception as e: 99 | logger.warning(f"Error parsing repository policy: {str(e)}") 100 | formatted['policy'] = {'raw': policy} 101 | else: 102 | formatted['policy'] = None 103 | 104 | # Format image information 105 | latest_images = repository.get('latest_images', []) 106 | formatted_images = [] 107 | 108 | for image in latest_images: 109 | try: 110 | image_tags = image.get('imageTags', []) 111 | formatted_image = { 112 | 'digest': image.get('imageDigest'), 113 | 'tags': image_tags, 114 | 'pushed_at': image.get('imagePushedAt').isoformat() if image.get('imagePushedAt') else None, 115 | 'size_in_mb': round(image.get('imageSizeInBytes', 0) / (1024 * 1024), 2) if image.get('imageSizeInBytes') else 0, 116 | 'scan_status': image.get('imageScanStatus', {}).get('status', 'UNKNOWN'), 117 | 'scan_findings': image.get('imageScanFindingsSummary', {}).get('findingSeverityCounts', {}) 118 | } 119 | formatted_images.append(formatted_image) 120 | except Exception as e: 121 | logger.error(f"Error formatting image: {str(e)}") 122 | 123 | formatted['latest_images'] = formatted_images 124 | 125 | return formatted 126 | except Exception as e: 127 | logger.error(f"Error formatting repository details: {str(e)}") 128 | return repository # Return original data if formatting fails 129 | 130 | def format_repository_search_results(search_results: Dict[str, Any]) -> Dict[str, Any]: 131 | """Format repository search results. 132 | 133 | Args: 134 | search_results: Raw search results from ECR service 135 | 136 | Returns: 137 | Dict containing formatted search results 138 | """ 139 | try: 140 | repositories = search_results.get('repositories', []) 141 | formatted_repositories = [format_repository_detail(repo) for repo in repositories] 142 | 143 | return { 144 | 'search_term': search_results.get('search_term'), 145 | 'repositories': formatted_repositories, 146 | 'count': len(formatted_repositories), 147 | 'scan_timestamp': datetime.utcnow().isoformat() 148 | } 149 | except Exception as e: 150 | logger.error(f"Error formatting repository search results: {str(e)}") 151 | return search_results # Return original data if formatting fails -------------------------------------------------------------------------------- /aws_security_mcp/formatters/load_balancer.py: -------------------------------------------------------------------------------- 1 | """Formatters for Load Balancer resources.""" 2 | 3 | import json 4 | from typing import Any, Dict, List, Optional, Union 5 | 6 | 7 | def format_load_balancer(lb: Dict[str, Any]) -> str: 8 | """Format a load balancer into a readable string. 9 | 10 | Args: 11 | lb: Load balancer data dictionary 12 | 13 | Returns: 14 | Formatted string representation of the load balancer 15 | """ 16 | # Determine load balancer type 17 | lb_type = lb.get('Type', 'Unknown').lower() 18 | 19 | # Common fields 20 | formatted = f""" 21 | Load Balancer: {lb.get('LoadBalancerName', 'Unknown')} 22 | DNS Name: {lb.get('DNSName', 'Unknown')} 23 | Type: {lb_type.title()} 24 | Scheme: {lb.get('Scheme', 'Unknown')} 25 | """ 26 | 27 | # Add VPC info if available 28 | if 'VpcId' in lb: 29 | formatted += f"VPC: {lb.get('VpcId', 'Unknown')}\n" 30 | 31 | # Add state if available 32 | if isinstance(lb.get('State'), dict): 33 | formatted += f"State: {lb.get('State', {}).get('Code', 'Unknown')}\n" 34 | else: 35 | formatted += f"State: {lb.get('State', 'Unknown')}\n" 36 | 37 | # Format AZs based on load balancer type 38 | if lb_type == 'classic': 39 | # Classic load balancer AZ format 40 | if 'AvailabilityZones' in lb and isinstance(lb['AvailabilityZones'], list): 41 | formatted += f"AZs: {', '.join(lb.get('AvailabilityZones', []))}\n" 42 | else: 43 | # ELBv2 AZ format (more complex object) 44 | if 'AvailabilityZones' in lb and isinstance(lb['AvailabilityZones'], list): 45 | az_names = [az.get('ZoneName', 'Unknown') for az in lb.get('AvailabilityZones', [])] 46 | formatted += f"AZs: {', '.join(az_names)}\n" 47 | 48 | # Add security groups 49 | formatted += f"Security Groups: {', '.join(lb.get('SecurityGroups', []))}\n" 50 | 51 | # Add creation time 52 | if 'CreatedTime' in lb: 53 | formatted += f"Created: {lb.get('CreatedTime', 'Unknown')}\n" 54 | 55 | # Add type-specific details 56 | if lb_type == 'application': 57 | formatted += f"IP Address Type: {lb.get('IpAddressType', 'Unknown')}\n" 58 | if 'LoadBalancerAttributes' in lb: 59 | attrs = lb.get('LoadBalancerAttributes', []) 60 | for attr in attrs: 61 | if attr.get('Key') == 'idle_timeout.timeout_seconds': 62 | formatted += f"Idle Timeout: {attr.get('Value', 'Unknown')} seconds\n" 63 | if attr.get('Key') == 'routing.http2.enabled': 64 | formatted += f"HTTP/2 Enabled: {attr.get('Value', 'Unknown')}\n" 65 | 66 | elif lb_type == 'network': 67 | formatted += f"IP Address Type: {lb.get('IpAddressType', 'Unknown')}\n" 68 | 69 | elif lb_type == 'classic': 70 | if 'ListenerDescriptions' in lb: 71 | listeners = lb.get('ListenerDescriptions', []) 72 | if listeners: 73 | formatted += "\nListeners:\n" 74 | for listener in listeners: 75 | l_conf = listener.get('Listener', {}) 76 | formatted += f" {l_conf.get('Protocol', 'Unknown')}:{l_conf.get('LoadBalancerPort', 'Unknown')} -> {l_conf.get('InstanceProtocol', 'Unknown')}:{l_conf.get('InstancePort', 'Unknown')}\n" 77 | 78 | # Add ARN if available (ELBv2 only) 79 | if 'LoadBalancerArn' in lb: 80 | formatted += f"\nARN: {lb.get('LoadBalancerArn', 'Unknown')}\n" 81 | 82 | return formatted 83 | 84 | 85 | def format_target_group(tg: Dict[str, Any]) -> str: 86 | """Format a target group into a readable string. 87 | 88 | Args: 89 | tg: Target group data dictionary 90 | 91 | Returns: 92 | Formatted string representation of the target group 93 | """ 94 | formatted = f""" 95 | Target Group: {tg.get('TargetGroupName', 'Unknown')} 96 | ARN: {tg.get('TargetGroupArn', 'Unknown')} 97 | Protocol: {tg.get('Protocol', 'Unknown')} 98 | Port: {tg.get('Port', 'Unknown')} 99 | Target Type: {tg.get('TargetType', 'Unknown')} 100 | VPC: {tg.get('VpcId', 'Unknown')} 101 | Health Check: 102 | Protocol: {tg.get('HealthCheckProtocol', 'Unknown')} 103 | Port: {tg.get('HealthCheckPort', 'Unknown')} 104 | Path: {tg.get('HealthCheckPath', 'Unknown')} 105 | Interval: {tg.get('HealthCheckIntervalSeconds', 'Unknown')} 106 | Timeout: {tg.get('HealthCheckTimeoutSeconds', 'Unknown')} 107 | Healthy Threshold: {tg.get('HealthyThresholdCount', 'Unknown')} 108 | Unhealthy Threshold: {tg.get('UnhealthyThresholdCount', 'Unknown')} 109 | """ 110 | 111 | # Add load balancer ARNs if available 112 | if 'LoadBalancerArns' in tg and tg['LoadBalancerArns']: 113 | formatted += "\nAssociated Load Balancers:\n" 114 | for arn in tg['LoadBalancerArns']: 115 | formatted += f" {arn}\n" 116 | 117 | return formatted 118 | 119 | 120 | def format_listener(listener: Dict[str, Any]) -> str: 121 | """Format a listener into a readable string. 122 | 123 | Args: 124 | listener: Listener data dictionary 125 | 126 | Returns: 127 | Formatted string representation of the listener 128 | """ 129 | formatted = f""" 130 | Listener ARN: {listener.get('ListenerArn', 'Unknown')} 131 | Protocol: {listener.get('Protocol', 'Unknown')} 132 | Port: {listener.get('Port', 'Unknown')} 133 | """ 134 | 135 | # Add default actions 136 | default_actions = listener.get('DefaultActions', []) 137 | if default_actions: 138 | formatted += "Default Actions:\n" 139 | for action in default_actions: 140 | action_type = action.get('Type', 'Unknown') 141 | formatted += f" Type: {action_type}\n" 142 | 143 | if action_type == 'forward': 144 | if 'TargetGroupArn' in action: 145 | formatted += f" Target Group: {action.get('TargetGroupArn', 'Unknown')}\n" 146 | elif 'ForwardConfig' in action: 147 | tg_configs = action.get('ForwardConfig', {}).get('TargetGroups', []) 148 | for tg_config in tg_configs: 149 | weight = tg_config.get('Weight', 1) 150 | formatted += f" Target Group: {tg_config.get('TargetGroupArn', 'Unknown')} (Weight: {weight})\n" 151 | 152 | elif action_type == 'redirect': 153 | redirect = action.get('RedirectConfig', {}) 154 | formatted += f" Redirect: {redirect.get('Protocol', 'Unknown')}://{redirect.get('Host', 'Unknown')}:{redirect.get('Port', 'Unknown')}{redirect.get('Path', 'Unknown')}\n" 155 | formatted += f" Status Code: {redirect.get('StatusCode', 'Unknown')}\n" 156 | 157 | elif action_type == 'fixed-response': 158 | fixed = action.get('FixedResponseConfig', {}) 159 | formatted += f" Status Code: {fixed.get('StatusCode', 'Unknown')}\n" 160 | formatted += f" Content Type: {fixed.get('ContentType', 'Unknown')}\n" 161 | 162 | # Add SSL policy if present 163 | if 'SslPolicy' in listener: 164 | formatted += f"SSL Policy: {listener.get('SslPolicy', 'N/A')}\n" 165 | 166 | # Add certificates if present 167 | certificates = listener.get('Certificates', []) 168 | if certificates: 169 | formatted += "Certificates:\n" 170 | for cert in certificates: 171 | formatted += f" {cert.get('CertificateArn', 'Unknown')}\n" 172 | 173 | return formatted 174 | 175 | 176 | def format_target_health(target_health: List[Dict[str, Any]]) -> str: 177 | """Format target health descriptions into a readable string. 178 | 179 | Args: 180 | target_health: List of target health descriptions 181 | 182 | Returns: 183 | Formatted string representation of target health 184 | """ 185 | if not target_health: 186 | return "No targets registered" 187 | 188 | formatted = "\nTarget Health:\n" 189 | 190 | for th in target_health: 191 | target = th.get('Target', {}) 192 | health = th.get('TargetHealth', {}) 193 | 194 | target_id = target.get('Id', 'Unknown') 195 | port = target.get('Port', 'Unknown') 196 | health_state = health.get('State', 'Unknown') 197 | reason = health.get('Reason', '') 198 | description = health.get('Description', '') 199 | 200 | formatted += f" {target_id}:{port} - {health_state}" 201 | if reason: 202 | formatted += f" ({reason})" 203 | if description: 204 | formatted += f": {description}" 205 | formatted += "\n" 206 | 207 | return formatted 208 | 209 | 210 | def format_load_balancer_summary(lb: Dict[str, Any]) -> str: 211 | """Format a load balancer into a concise summary string. 212 | 213 | Args: 214 | lb: Load balancer data dictionary 215 | 216 | Returns: 217 | Formatted summary string representation of the load balancer 218 | """ 219 | lb_type = lb.get('Type', 'Unknown').lower() 220 | 221 | formatted = f"{lb.get('LoadBalancerName', 'Unknown')} " 222 | formatted += f"({lb_type.upper()}) - " 223 | formatted += f"{lb.get('DNSName', 'Unknown')} - " 224 | 225 | # Add state 226 | if isinstance(lb.get('State'), dict): 227 | formatted += f"{lb.get('State', {}).get('Code', 'Unknown')} - " 228 | else: 229 | formatted += f"{lb.get('State', 'Unknown')} - " 230 | 231 | # Add VPC if available 232 | if 'VpcId' in lb: 233 | formatted += f"VPC: {lb.get('VpcId', 'Unknown')}" 234 | 235 | return formatted -------------------------------------------------------------------------------- /aws_security_mcp/formatters/org_formatter.py: -------------------------------------------------------------------------------- 1 | """Organizations formatter module for AWS Security MCP. 2 | 3 | This module provides functions to format AWS Organizations information 4 | for better readability and security assessment. 5 | """ 6 | 7 | import logging 8 | from typing import Any, Dict, List, Optional 9 | from datetime import datetime 10 | 11 | # Configure logging 12 | logger = logging.getLogger(__name__) 13 | 14 | def format_organization_simple(org_info: Dict[str, Any]) -> Dict[str, Any]: 15 | """Format organization information into a simplified representation. 16 | 17 | Args: 18 | org_info: Raw organization data from AWS 19 | 20 | Returns: 21 | Dict containing simplified organization representation 22 | """ 23 | try: 24 | return { 25 | 'id': org_info.get('Organization', {}).get('Id'), 26 | 'arn': org_info.get('Organization', {}).get('Arn'), 27 | 'feature_set': org_info.get('Organization', {}).get('FeatureSet'), 28 | 'master_account_id': org_info.get('Organization', {}).get('MasterAccountId'), 29 | 'master_account_email': org_info.get('Organization', {}).get('MasterAccountEmail'), 30 | 'available_policy_types': [ 31 | { 32 | 'type': policy_type.get('Type'), 33 | 'status': policy_type.get('Status') 34 | } 35 | for policy_type in org_info.get('Organization', {}).get('AvailablePolicyTypes', []) 36 | ] 37 | } 38 | except Exception as e: 39 | logger.error(f"Error formatting organization info: {str(e)}") 40 | return org_info # Return original data if formatting fails 41 | 42 | def format_account_simple(account: Dict[str, Any]) -> Dict[str, Any]: 43 | """Format an account into a simplified representation. 44 | 45 | Args: 46 | account: Raw account data from AWS 47 | 48 | Returns: 49 | Dict containing simplified account representation 50 | """ 51 | try: 52 | return { 53 | 'id': account.get('Id'), 54 | 'arn': account.get('Arn'), 55 | 'name': account.get('Name'), 56 | 'email': account.get('Email'), 57 | 'status': account.get('Status'), 58 | 'joined_method': account.get('JoinedMethod'), 59 | 'joined_timestamp': account.get('JoinedTimestamp').isoformat() if account.get('JoinedTimestamp') else None 60 | } 61 | except Exception as e: 62 | logger.error(f"Error formatting account info: {str(e)}") 63 | return account # Return original data if formatting fails 64 | 65 | def format_policy_simple(policy: Dict[str, Any]) -> Dict[str, Any]: 66 | """Format a policy into a simplified representation. 67 | 68 | Args: 69 | policy: Raw policy data from AWS 70 | 71 | Returns: 72 | Dict containing simplified policy representation 73 | """ 74 | try: 75 | return { 76 | 'id': policy.get('Id'), 77 | 'arn': policy.get('Arn'), 78 | 'name': policy.get('Name'), 79 | 'description': policy.get('Description'), 80 | 'type': policy.get('Type'), 81 | 'aws_managed': policy.get('AwsManaged', False) 82 | } 83 | except Exception as e: 84 | logger.error(f"Error formatting policy info: {str(e)}") 85 | return policy # Return original data if formatting fails 86 | 87 | def format_policy_detail(policy: Dict[str, Any]) -> Dict[str, Any]: 88 | """Format detailed policy information. 89 | 90 | Args: 91 | policy: Raw policy data from AWS 92 | 93 | Returns: 94 | Dict containing formatted policy details 95 | """ 96 | try: 97 | formatted = format_policy_simple(policy) 98 | 99 | # Add content if available 100 | content = policy.get('Content') 101 | if content: 102 | try: 103 | # Content is stored as a JSON string 104 | import json 105 | formatted['content'] = json.loads(content) 106 | except Exception as e: 107 | logger.warning(f"Error parsing policy content as JSON: {str(e)}") 108 | formatted['content'] = content 109 | 110 | return formatted 111 | except Exception as e: 112 | logger.error(f"Error formatting policy details: {str(e)}") 113 | return policy # Return original data if formatting fails 114 | 115 | def format_policy_target(target: Dict[str, Any]) -> Dict[str, Any]: 116 | """Format a policy target into a simplified representation. 117 | 118 | Args: 119 | target: Raw target data from AWS 120 | 121 | Returns: 122 | Dict containing simplified target representation 123 | """ 124 | try: 125 | return { 126 | 'target_id': target.get('TargetId'), 127 | 'arn': target.get('Arn'), 128 | 'name': target.get('Name'), 129 | 'type': target.get('Type') 130 | } 131 | except Exception as e: 132 | logger.error(f"Error formatting policy target: {str(e)}") 133 | return target # Return original data if formatting fails 134 | 135 | def format_policy_with_targets(policy: Dict[str, Any], targets: List[Dict[str, Any]]) -> Dict[str, Any]: 136 | """Format a policy with its targets. 137 | 138 | Args: 139 | policy: Raw policy data from AWS 140 | targets: List of targets the policy is attached to 141 | 142 | Returns: 143 | Dict containing policy with targets 144 | """ 145 | try: 146 | formatted_policy = format_policy_detail(policy) 147 | formatted_policy['targets'] = [format_policy_target(target) for target in targets] 148 | return formatted_policy 149 | except Exception as e: 150 | logger.error(f"Error formatting policy with targets: {str(e)}") 151 | return policy # Return original data if formatting fails 152 | 153 | def format_org_hierarchy(hierarchy: Dict[str, Any]) -> Dict[str, Any]: 154 | """Format the organizational hierarchy for better readability. 155 | 156 | Args: 157 | hierarchy: Raw hierarchy data 158 | 159 | Returns: 160 | Dict containing formatted hierarchy 161 | """ 162 | try: 163 | if not hierarchy: 164 | return {} 165 | 166 | formatted = { 167 | 'id': hierarchy.get('Id'), 168 | 'name': hierarchy.get('Name'), 169 | 'type': hierarchy.get('Type'), 170 | 'accounts': [format_account_simple(account) for account in hierarchy.get('Accounts', [])], 171 | 'organizational_units': [] 172 | } 173 | 174 | # Format child OUs recursively 175 | for ou in hierarchy.get('OrganizationalUnits', []): 176 | formatted_ou = format_org_hierarchy(ou) 177 | formatted['organizational_units'].append(formatted_ou) 178 | 179 | return formatted 180 | except Exception as e: 181 | logger.error(f"Error formatting organizational hierarchy: {str(e)}") 182 | return hierarchy # Return original data if formatting fails 183 | 184 | def format_effective_policies(effective_policies: Dict[str, List[Dict[str, Any]]]) -> Dict[str, Any]: 185 | """Format effective policies for better readability. 186 | 187 | Args: 188 | effective_policies: Raw effective policies data 189 | 190 | Returns: 191 | Dict containing formatted effective policies 192 | """ 193 | try: 194 | formatted = {} 195 | 196 | for policy_type, policies in effective_policies.items(): 197 | formatted_type = policy_type.replace('_', ' ').title() 198 | formatted[formatted_type] = [] 199 | 200 | for policy in policies: 201 | formatted_policy = { 202 | 'id': policy.get('Id'), 203 | 'arn': policy.get('Arn'), 204 | 'name': policy.get('Name', 'Unknown') 205 | } 206 | 207 | # Parse policy content if available 208 | content = policy.get('Content') 209 | if content: 210 | try: 211 | import json 212 | formatted_policy['content'] = json.loads(content) 213 | except Exception as e: 214 | logger.warning(f"Error parsing policy content as JSON: {str(e)}") 215 | formatted_policy['content'] = content 216 | 217 | formatted[formatted_type].append(formatted_policy) 218 | 219 | return formatted 220 | except Exception as e: 221 | logger.error(f"Error formatting effective policies: {str(e)}") 222 | return effective_policies # Return original data if formatting fails -------------------------------------------------------------------------------- /aws_security_mcp/formatters/resource_tagging.py: -------------------------------------------------------------------------------- 1 | """ 2 | Formatter module for AWS Resource Groups Tagging API responses. 3 | 4 | This module provides functions to format the responses from 5 | ResourceGroupsTaggingAPI into a more readable format. 6 | """ 7 | from typing import Dict, List, Any, Optional 8 | from collections import defaultdict 9 | 10 | 11 | def format_resource_details(resource: Dict[str, Any]) -> Dict[str, Any]: 12 | """ 13 | Format a single resource from ResourceGroupsTaggingAPI. 14 | 15 | Args: 16 | resource: Resource information from ResourceGroupsTaggingAPI. 17 | 18 | Returns: 19 | Formatted resource information. 20 | """ 21 | arn = resource.get('ResourceARN', '') 22 | tags = resource.get('Tags', []) 23 | 24 | # Extract resource type and name from ARN 25 | resource_parts = arn.split(':') 26 | resource_type = None 27 | resource_name = None 28 | 29 | if len(resource_parts) >= 6: 30 | # Example ARN: arn:aws:ec2:us-west-2:123456789012:instance/i-1234567890abcdef0 31 | service = resource_parts[2] 32 | 33 | # Handle special cases where the resource type information is in a different format 34 | if service == 'ec2': 35 | if '/subnet/' in arn: 36 | resource_type = 'subnet' 37 | elif '/vpc/' in arn: 38 | resource_type = 'vpc' 39 | elif '/security-group/' in arn: 40 | resource_type = 'security-group' 41 | elif '/instance/' in arn: 42 | resource_type = 'instance' 43 | elif '/volume/' in arn: 44 | resource_type = 'volume' 45 | else: 46 | resource_type = resource_parts[5].split('/')[0] if '/' in resource_parts[5] else resource_parts[5] 47 | 48 | resource_name = arn.split('/')[-1] if '/' in arn else None 49 | elif service == 's3': 50 | resource_type = 'bucket' 51 | resource_name = resource_parts[5] 52 | elif service == 'dynamodb': 53 | resource_type = resource_parts[5].split('/')[0] if '/' in resource_parts[5] else resource_parts[5] 54 | resource_name = arn.split('/')[-1] if '/' in arn else None 55 | elif service == 'lambda': 56 | resource_type = 'function' 57 | resource_name = arn.split(':')[-1] if ':' in arn else None 58 | else: 59 | resource_type = resource_parts[5].split('/')[0] if '/' in resource_parts[5] else resource_parts[5] 60 | resource_name = arn.split('/')[-1] if '/' in arn else None 61 | 62 | # Format tags as a dictionary 63 | tags_dict = {tag.get('Key', ''): tag.get('Value', '') for tag in tags} 64 | 65 | return { 66 | 'ResourceARN': arn, 67 | 'ResourceType': resource_type, 68 | 'ResourceName': resource_name, 69 | 'Service': resource_parts[2] if len(resource_parts) >= 3 else None, 70 | 'Region': resource_parts[3] if len(resource_parts) >= 4 else None, 71 | 'AccountId': resource_parts[4] if len(resource_parts) >= 5 else None, 72 | 'Tags': tags_dict 73 | } 74 | 75 | 76 | def format_resources_response(response: Dict[str, Any]) -> Dict[str, Any]: 77 | """ 78 | Format the response from get_resources_by_tags into a standardized format. 79 | 80 | Args: 81 | response: The response from the get_resources_by_tags service function 82 | 83 | Returns: 84 | A dictionary with the resources and pagination information 85 | """ 86 | # Extract resource mappings and transform them into a more user-friendly format 87 | resources = [] 88 | 89 | for resource in response.get("resources", []): 90 | formatted_resource = format_resource_details(resource) 91 | 92 | resources.append({ 93 | "arn": formatted_resource["ResourceARN"], 94 | "resource_type": f"{formatted_resource['Service']}:{formatted_resource['ResourceType']}" 95 | if formatted_resource['Service'] and formatted_resource['ResourceType'] else "", 96 | "resource_name": formatted_resource["ResourceName"], 97 | "region": formatted_resource["Region"], 98 | "account_id": formatted_resource["AccountId"], 99 | "tags": formatted_resource["Tags"] 100 | }) 101 | 102 | result = { 103 | "resources": resources, 104 | "resource_count": len(resources), 105 | "next_token": response.get("next_token") 106 | } 107 | 108 | # Include any error information 109 | if "error" in response: 110 | result["error"] = response["error"] 111 | 112 | return result 113 | 114 | 115 | def format_resources_by_type(response: Dict[str, Any], tag_key: str, tag_value: Optional[str] = None) -> Dict[str, Any]: 116 | """ 117 | Format the response from get_resources_by_tags into a grouped by resource type format. 118 | 119 | Args: 120 | response: The response from the get_resources_by_tags service function 121 | tag_key: The tag key used in the search 122 | tag_value: The optional tag value used in the search 123 | 124 | Returns: 125 | A dictionary with resources grouped by their type, with the format: 126 | {"key": "", "value": "", "resources": {"": []}} 127 | """ 128 | # Group resources by service/type (limiting to 25 resources per service) 129 | grouped_resources = defaultdict(list) 130 | 131 | # Keep count of all resources by service type 132 | resource_counts = defaultdict(int) 133 | 134 | # Services for which we'll only provide counts, not full ARN lists 135 | count_only_services = { 136 | 'cloudwatch', # CloudWatch alarms aren't relevant for security 137 | 'ecs', # ECS tasks aren't needed 138 | 'application-autoscaling', # Auto-scaling resources aren't needed 139 | 'batch' # Batch jobs aren't needed 140 | } 141 | 142 | # Maximum number of ARNs to include per service 143 | MAX_RESOURCES_PER_SERVICE = 25 144 | 145 | # Process the raw response from AWS 146 | for resource in response.get("resources", []): 147 | # The ResourceTagMappingList contains the ARN directly 148 | arn = resource.get('ResourceARN', '') 149 | if not arn: 150 | continue 151 | 152 | arn_parts = arn.split(':') 153 | if len(arn_parts) >= 3: 154 | # Get the service name (application-autoscaling, ec2, etc.) 155 | service = arn_parts[2] 156 | 157 | # Increment the count for this service 158 | resource_counts[service] += 1 159 | 160 | # If this is a service where we only want to show counts, don't add to ARN list 161 | if service.lower() in count_only_services: 162 | continue 163 | 164 | # Only add the ARN if we haven't reached the limit for this service 165 | if len(grouped_resources[service]) < MAX_RESOURCES_PER_SERVICE: 166 | grouped_resources[service].append(arn) 167 | 168 | # Build the final simplified result structure 169 | result = { 170 | "key": tag_key, 171 | "value": tag_value, 172 | "resources": dict(grouped_resources), 173 | "resource_count": len(response.get("resources", [])), 174 | } 175 | 176 | # Add count information for all services 177 | result["resource_counts"] = { 178 | service: count for service, count in resource_counts.items() 179 | if count > 0 180 | } 181 | 182 | # Add next_token only if present 183 | if response.get("next_token"): 184 | result["next_token"] = response.get("next_token") 185 | 186 | # Include any error information 187 | if "error" in response: 188 | result["error"] = response["error"] 189 | 190 | return result 191 | 192 | 193 | def format_tag_keys_response(response: Dict[str, Any]) -> Dict[str, Any]: 194 | """ 195 | Format the response from get_tag_keys into a standardized format. 196 | 197 | Args: 198 | response: The response from the get_tag_keys service function 199 | 200 | Returns: 201 | A dictionary with the tag keys and pagination information 202 | """ 203 | result = { 204 | "tag_keys": response.get("tag_keys", []), 205 | "tag_key_count": response.get("tag_key_count", 0), 206 | "next_token": response.get("next_token") 207 | } 208 | 209 | # Include any error information 210 | if "error" in response: 211 | result["error"] = response["error"] 212 | 213 | return result 214 | 215 | 216 | def format_tag_values_response(response: Dict[str, Any]) -> Dict[str, Any]: 217 | """ 218 | Format the response from get_tag_values into a standardized format. 219 | 220 | Args: 221 | response: The response from the get_tag_values service function 222 | 223 | Returns: 224 | A dictionary with the tag key, values, and pagination information 225 | """ 226 | result = { 227 | "tag_key": response.get("tag_key", ""), 228 | "tag_values": response.get("tag_values", []), 229 | "tag_value_count": response.get("tag_value_count", 0), 230 | "next_token": response.get("next_token") 231 | } 232 | 233 | # Include any error information 234 | if "error" in response: 235 | result["error"] = response["error"] 236 | 237 | return result -------------------------------------------------------------------------------- /aws_security_mcp/formatters/route53.py: -------------------------------------------------------------------------------- 1 | """Formatters for Route53 resources.""" 2 | 3 | import json 4 | from typing import Any, Dict, List, Optional, Union 5 | 6 | 7 | def format_hosted_zone(zone: Dict[str, Any]) -> str: 8 | """Format a Route53 hosted zone into a readable string. 9 | 10 | Args: 11 | zone: Route53 hosted zone data dictionary 12 | 13 | Returns: 14 | Formatted string representation of the hosted zone 15 | """ 16 | return f""" 17 | Hosted Zone ID: {zone.get('Id', 'Unknown').replace('/hostedzone/', '')} 18 | Name: {zone.get('Name', 'Unknown')} 19 | Record Count: {zone.get('ResourceRecordSetCount', 'Unknown')} 20 | Private Zone: {'Yes' if zone.get('Config', {}).get('PrivateZone') else 'No'} 21 | """ 22 | 23 | 24 | def format_record_set(record: Dict[str, Any]) -> str: 25 | """Format a Route53 record set into a readable string. 26 | 27 | Args: 28 | record: Route53 record set data dictionary 29 | 30 | Returns: 31 | Formatted string representation of the record set 32 | """ 33 | resource_records = record.get('ResourceRecords', []) 34 | records_str = "\n ".join([f"{r.get('Value', 'Unknown')}" for r in resource_records]) 35 | if not records_str: 36 | if record.get('AliasTarget'): 37 | records_str = f"ALIAS -> {record.get('AliasTarget', {}).get('DNSName', 'Unknown')}" 38 | else: 39 | records_str = "None" 40 | 41 | return f""" 42 | Name: {record.get('Name', 'Unknown')} 43 | Type: {record.get('Type', 'Unknown')} 44 | TTL: {record.get('TTL', 'N/A')} 45 | Records: 46 | {records_str} 47 | """ 48 | 49 | 50 | def format_health_check(health_check: Dict[str, Any]) -> str: 51 | """Format a Route53 health check into a readable string. 52 | 53 | Args: 54 | health_check: Route53 health check data dictionary 55 | 56 | Returns: 57 | Formatted string representation of the health check 58 | """ 59 | config = health_check.get('HealthCheckConfig', {}) 60 | 61 | return f""" 62 | Health Check ID: {health_check.get('Id', 'Unknown')} 63 | Type: {config.get('Type', 'Unknown')} 64 | Target: {config.get('IPAddress', config.get('FullyQualifiedDomainName', 'Unknown'))} 65 | Port: {config.get('Port', 'Unknown')} 66 | Resource Path: {config.get('ResourcePath', 'N/A')} 67 | Interval: {config.get('RequestInterval', 'Unknown')} seconds 68 | Failure Threshold: {config.get('FailureThreshold', 'Unknown')} 69 | """ -------------------------------------------------------------------------------- /aws_security_mcp/formatters/shield.py: -------------------------------------------------------------------------------- 1 | """Formatters for AWS Shield resources. 2 | 3 | This module provides JSON-based formatting functions for AWS Shield resources 4 | to make them more suitable for API responses and LLM consumption. 5 | """ 6 | 7 | from typing import Any, Dict, List, Optional, Union 8 | from datetime import datetime 9 | 10 | 11 | def format_shield_subscription_json(subscription: Dict[str, Any]) -> Dict[str, Any]: 12 | """Format a Shield Advanced subscription into structured data for JSON output. 13 | 14 | Args: 15 | subscription: Shield subscription data dictionary 16 | 17 | Returns: 18 | Dictionary with formatted Shield subscription data 19 | """ 20 | # Check if there is a subscription 21 | has_subscription = bool(subscription) 22 | 23 | # Format start time if present 24 | start_time = subscription.get('StartTime') 25 | if isinstance(start_time, datetime): 26 | start_time = start_time.isoformat() 27 | 28 | # Format end time if present 29 | end_time = subscription.get('EndTime') 30 | if isinstance(end_time, datetime): 31 | end_time = end_time.isoformat() 32 | 33 | return { 34 | "has_subscription": has_subscription, 35 | "subscription_active": subscription.get('SubscriptionArn') is not None, 36 | "time_commitment_in_seconds": subscription.get('TimeCommitmentInSeconds'), 37 | "auto_renew": subscription.get('AutoRenew', False), 38 | "start_time": start_time, 39 | "end_time": end_time, 40 | "limits": subscription.get('Limits', []) 41 | } 42 | 43 | 44 | def format_shield_protection_json(protection: Dict[str, Any]) -> Dict[str, Any]: 45 | """Format a Shield protection into structured data for JSON output. 46 | 47 | Args: 48 | protection: Shield protection data dictionary 49 | 50 | Returns: 51 | Dictionary with formatted Shield protection data 52 | """ 53 | # Extract resource information from ARN if present 54 | resource_arn = protection.get('ResourceArn', '') 55 | resource_info = parse_resource_arn(resource_arn) 56 | 57 | return { 58 | "id": protection.get('Id', 'Unknown'), 59 | "name": protection.get('Name', 'Unknown'), 60 | "resource_arn": resource_arn, 61 | "protection_arn": protection.get('ProtectionArn', ''), 62 | "resource": resource_info, 63 | "application_layer_automatic_response": protection.get('ApplicationLayerAutomaticResponseConfiguration', {}).get('Status', 'DISABLED'), 64 | "health_check_ids": protection.get('HealthCheckIds', []) 65 | } 66 | 67 | 68 | def format_shield_protected_resource_json(resource: Dict[str, Any]) -> Dict[str, Any]: 69 | """Format a Shield protected resource into structured data for JSON output. 70 | 71 | Args: 72 | resource: Shield protected resource data dictionary 73 | 74 | Returns: 75 | Dictionary with formatted Shield protected resource data 76 | """ 77 | # Extract resource information from ARN 78 | resource_arn = resource.get('ResourceArn', '') 79 | resource_info = parse_resource_arn(resource_arn) 80 | 81 | return { 82 | "resource_arn": resource_arn, 83 | "protection_id": resource.get('ProtectionId'), 84 | "resource_type": resource_info.get('service'), 85 | "resource_id": resource_info.get('resource_id'), 86 | "region": resource_info.get('region'), 87 | "account_id": resource_info.get('account_id') 88 | } 89 | 90 | 91 | def format_shield_attack_json(attack: Dict[str, Any]) -> Dict[str, Any]: 92 | """Format a Shield attack into structured data for JSON output. 93 | 94 | Args: 95 | attack: Shield attack data dictionary 96 | 97 | Returns: 98 | Dictionary with formatted Shield attack data 99 | """ 100 | # Format start time 101 | start_time = attack.get('StartTime') 102 | if isinstance(start_time, datetime): 103 | start_time = start_time.isoformat() 104 | 105 | # Format end time if present 106 | end_time = attack.get('EndTime') 107 | if isinstance(end_time, datetime): 108 | end_time = end_time.isoformat() 109 | 110 | # Extract resource information 111 | resource_arn = attack.get('ResourceArn', '') 112 | resource_info = parse_resource_arn(resource_arn) 113 | 114 | # Format attack vectors 115 | attack_vectors = [] 116 | for vector in attack.get('AttackVectors', []): 117 | attack_vectors.append({ 118 | "vector_type": vector.get('VectorType', 'Unknown'), 119 | "vector_counters": vector.get('VectorCounters', []) 120 | }) 121 | 122 | # Format mitigations 123 | mitigations = [] 124 | for mitigation in attack.get('Mitigations', []): 125 | mitigations.append({ 126 | "mitigation_name": mitigation.get('MitigationName', 'Unknown') 127 | }) 128 | 129 | return { 130 | "id": attack.get('Id', 'Unknown'), 131 | "resource_arn": resource_arn, 132 | "resource": resource_info, 133 | "sub_resources": attack.get('SubResources', []), 134 | "start_time": start_time, 135 | "end_time": end_time, 136 | "attack_vectors": attack_vectors, 137 | "attack_vectors_count": len(attack_vectors), 138 | "mitigations": mitigations, 139 | "mitigations_count": len(mitigations) 140 | } 141 | 142 | 143 | def format_shield_attack_summary_json(attack: Dict[str, Any]) -> Dict[str, Any]: 144 | """Format a Shield attack summary into structured data for JSON output. 145 | 146 | Args: 147 | attack: Shield attack summary data dictionary 148 | 149 | Returns: 150 | Dictionary with formatted Shield attack summary data 151 | """ 152 | # Format start time 153 | start_time = attack.get('StartTime') 154 | if isinstance(start_time, datetime): 155 | start_time = start_time.isoformat() 156 | 157 | # Format end time if present 158 | end_time = attack.get('EndTime') 159 | if isinstance(end_time, datetime): 160 | end_time = end_time.isoformat() 161 | 162 | # Extract resource information 163 | resource_arn = attack.get('ResourceArn', '') 164 | resource_info = parse_resource_arn(resource_arn) 165 | 166 | # Determine attack status 167 | attack_status = 'Stopped' if end_time else 'In Progress' 168 | 169 | return { 170 | "id": attack.get('AttackId', 'Unknown'), 171 | "resource_arn": resource_arn, 172 | "resource_type": resource_info.get('service'), 173 | "resource_id": resource_info.get('resource_id'), 174 | "start_time": start_time, 175 | "end_time": end_time, 176 | "attack_vectors": attack.get('AttackVectors', []), 177 | "attack_status": attack_status 178 | } 179 | 180 | 181 | def format_shield_drt_access_json(drt_access: Dict[str, Any]) -> Dict[str, Any]: 182 | """Format Shield DRT access information into structured data for JSON output. 183 | 184 | Args: 185 | drt_access: Shield DRT access data dictionary 186 | 187 | Returns: 188 | Dictionary with formatted Shield DRT access data 189 | """ 190 | has_role_access = bool(drt_access.get('role_arn')) 191 | has_log_access = bool(drt_access.get('log_bucket_list')) 192 | 193 | return { 194 | "drt_role_arn": drt_access.get('role_arn'), 195 | "drt_has_role_access": has_role_access, 196 | "drt_log_buckets": drt_access.get('log_bucket_list', []), 197 | "drt_has_log_access": has_log_access, 198 | "drt_access_configured": has_role_access or has_log_access 199 | } 200 | 201 | 202 | def format_shield_emergency_contacts_json(contacts: List[Dict[str, Any]]) -> Dict[str, Any]: 203 | """Format Shield emergency contacts into structured data for JSON output. 204 | 205 | Args: 206 | contacts: List of Shield emergency contact dictionaries 207 | 208 | Returns: 209 | Dictionary with formatted Shield emergency contacts data 210 | """ 211 | formatted_contacts = [] 212 | for contact in contacts: 213 | formatted_contacts.append({ 214 | "email_address": contact.get('EmailAddress', 'Unknown'), 215 | "phone_number": contact.get('PhoneNumber') 216 | }) 217 | 218 | return { 219 | "has_emergency_contacts": len(formatted_contacts) > 0, 220 | "contacts_count": len(formatted_contacts), 221 | "contacts": formatted_contacts 222 | } 223 | 224 | 225 | def parse_resource_arn(arn: str) -> Dict[str, str]: 226 | """Parse an AWS ARN to extract resource information. 227 | 228 | Args: 229 | arn: The AWS ARN to parse 230 | 231 | Returns: 232 | Dictionary with extracted resource information 233 | """ 234 | if not arn: 235 | return { 236 | "service": "unknown", 237 | "region": "unknown", 238 | "account_id": "unknown", 239 | "resource_type": "unknown", 240 | "resource_id": "unknown" 241 | } 242 | 243 | # Split ARN into components 244 | arn_parts = arn.split(':') 245 | 246 | # Extract basic information 247 | service = arn_parts[2] if len(arn_parts) > 2 else 'unknown' 248 | region = arn_parts[3] if len(arn_parts) > 3 else 'unknown' 249 | account_id = arn_parts[4] if len(arn_parts) > 4 else 'unknown' 250 | 251 | # Extract resource information 252 | resource_path = arn_parts[5] if len(arn_parts) > 5 else '' 253 | resource_parts = resource_path.split('/') 254 | 255 | resource_type = resource_parts[0] if resource_parts else 'unknown' 256 | resource_id = '/'.join(resource_parts[1:]) if len(resource_parts) > 1 else resource_parts[0] 257 | 258 | return { 259 | "service": service, 260 | "region": region, 261 | "account_id": account_id, 262 | "resource_type": resource_type, 263 | "resource_id": resource_id 264 | } -------------------------------------------------------------------------------- /aws_security_mcp/formatters/waf.py: -------------------------------------------------------------------------------- 1 | """Formatters for AWS WAF resources. 2 | 3 | This module provides JSON-based formatting functions for AWS WAF resources 4 | to make them more suitable for API responses and LLM consumption. 5 | """ 6 | 7 | from typing import Any, Dict, List, Optional, Union 8 | from datetime import datetime 9 | 10 | 11 | def format_waf_web_acl_json(web_acl: Dict[str, Any], scope: str = 'REGIONAL') -> Dict[str, Any]: 12 | """Format a WAF Web ACL into structured data for JSON output. 13 | 14 | Args: 15 | web_acl: WAF Web ACL data dictionary 16 | scope: The scope of the Web ACL ('REGIONAL' or 'CLOUDFRONT') 17 | 18 | Returns: 19 | Dictionary with formatted WAF Web ACL data 20 | """ 21 | # Get creation time and format it 22 | creation_time = web_acl.get('CreatedTime') 23 | if isinstance(creation_time, datetime): 24 | creation_time = creation_time.isoformat() 25 | 26 | # Format rules 27 | rules = [] 28 | for rule in web_acl.get('Rules', []): 29 | rules.append({ 30 | "name": rule.get('Name', 'Unknown'), 31 | "priority": rule.get('Priority'), 32 | "action": rule.get('Action', {}).get('Block') and 'Block' or rule.get('Action', {}).get('Allow') and 'Allow' or 'Count', 33 | "statement_type": get_statement_type(rule.get('Statement', {})), 34 | "visibility_config": { 35 | "sampled_requests_enabled": rule.get('VisibilityConfig', {}).get('SampledRequestsEnabled', False), 36 | "cloud_watch_metrics_enabled": rule.get('VisibilityConfig', {}).get('CloudWatchMetricsEnabled', False), 37 | "metric_name": rule.get('VisibilityConfig', {}).get('MetricName', '') 38 | } 39 | }) 40 | 41 | # Extract ACL details 42 | return { 43 | "id": web_acl.get('Id', 'Unknown'), 44 | "name": web_acl.get('Name', 'Unknown'), 45 | "description": web_acl.get('Description', ''), 46 | "arn": web_acl.get('ARN', 'Unknown'), 47 | "scope": scope, 48 | "capacity": web_acl.get('Capacity', 0), 49 | "default_action": web_acl.get('DefaultAction', {}).get('Block') and 'Block' or 'Allow', 50 | "rules_count": len(rules), 51 | "rules": rules, 52 | "visibility_config": { 53 | "sampled_requests_enabled": web_acl.get('VisibilityConfig', {}).get('SampledRequestsEnabled', False), 54 | "cloud_watch_metrics_enabled": web_acl.get('VisibilityConfig', {}).get('CloudWatchMetricsEnabled', False), 55 | "metric_name": web_acl.get('VisibilityConfig', {}).get('MetricName', '') 56 | }, 57 | "creation_time": creation_time, 58 | "last_modified_time": web_acl.get('LastModifiedTime').isoformat() if isinstance(web_acl.get('LastModifiedTime'), datetime) else web_acl.get('LastModifiedTime') 59 | } 60 | 61 | 62 | def format_waf_ip_set_json(ip_set: Dict[str, Any], scope: str = 'REGIONAL') -> Dict[str, Any]: 63 | """Format a WAF IP set into structured data for JSON output. 64 | 65 | Args: 66 | ip_set: WAF IP set data dictionary 67 | scope: The scope of the IP set ('REGIONAL' or 'CLOUDFRONT') 68 | 69 | Returns: 70 | Dictionary with formatted WAF IP set data 71 | """ 72 | # Get creation time and format it 73 | creation_time = ip_set.get('CreatedTime') 74 | if isinstance(creation_time, datetime): 75 | creation_time = creation_time.isoformat() 76 | 77 | return { 78 | "id": ip_set.get('Id', 'Unknown'), 79 | "name": ip_set.get('Name', 'Unknown'), 80 | "description": ip_set.get('Description', ''), 81 | "arn": ip_set.get('ARN', 'Unknown'), 82 | "scope": scope, 83 | "ip_address_version": ip_set.get('IPAddressVersion', 'Unknown'), 84 | "addresses": ip_set.get('Addresses', []), 85 | "addresses_count": len(ip_set.get('Addresses', [])), 86 | "creation_time": creation_time, 87 | "last_modified_time": ip_set.get('LastModifiedTime').isoformat() if isinstance(ip_set.get('LastModifiedTime'), datetime) else ip_set.get('LastModifiedTime') 88 | } 89 | 90 | 91 | def format_waf_rule_group_json(rule_group: Dict[str, Any], scope: str = 'REGIONAL') -> Dict[str, Any]: 92 | """Format a WAF rule group into structured data for JSON output. 93 | 94 | Args: 95 | rule_group: WAF rule group data dictionary 96 | scope: The scope of the rule group ('REGIONAL' or 'CLOUDFRONT') 97 | 98 | Returns: 99 | Dictionary with formatted WAF rule group data 100 | """ 101 | # Get creation time and format it 102 | creation_time = rule_group.get('CreatedTime') 103 | if isinstance(creation_time, datetime): 104 | creation_time = creation_time.isoformat() 105 | 106 | # Format rules 107 | rules = [] 108 | for rule in rule_group.get('Rules', []): 109 | rules.append({ 110 | "name": rule.get('Name', 'Unknown'), 111 | "priority": rule.get('Priority'), 112 | "action": rule.get('Action', {}).get('Block') and 'Block' or rule.get('Action', {}).get('Allow') and 'Allow' or 'Count', 113 | "statement_type": get_statement_type(rule.get('Statement', {})), 114 | "visibility_config": { 115 | "sampled_requests_enabled": rule.get('VisibilityConfig', {}).get('SampledRequestsEnabled', False), 116 | "cloud_watch_metrics_enabled": rule.get('VisibilityConfig', {}).get('CloudWatchMetricsEnabled', False), 117 | "metric_name": rule.get('VisibilityConfig', {}).get('MetricName', '') 118 | } 119 | }) 120 | 121 | return { 122 | "id": rule_group.get('Id', 'Unknown'), 123 | "name": rule_group.get('Name', 'Unknown'), 124 | "description": rule_group.get('Description', ''), 125 | "arn": rule_group.get('ARN', 'Unknown'), 126 | "scope": scope, 127 | "capacity": rule_group.get('Capacity', 0), 128 | "rules_count": len(rules), 129 | "rules": rules, 130 | "visibility_config": { 131 | "sampled_requests_enabled": rule_group.get('VisibilityConfig', {}).get('SampledRequestsEnabled', False), 132 | "cloud_watch_metrics_enabled": rule_group.get('VisibilityConfig', {}).get('CloudWatchMetricsEnabled', False), 133 | "metric_name": rule_group.get('VisibilityConfig', {}).get('MetricName', '') 134 | }, 135 | "creation_time": creation_time, 136 | "last_modified_time": rule_group.get('LastModifiedTime').isoformat() if isinstance(rule_group.get('LastModifiedTime'), datetime) else rule_group.get('LastModifiedTime') 137 | } 138 | 139 | 140 | def format_waf_web_acl_summary_json(web_acl: Dict[str, Any], scope: str = 'REGIONAL') -> Dict[str, Any]: 141 | """Format a WAF Web ACL summary into structured data for JSON output. 142 | 143 | Args: 144 | web_acl: WAF Web ACL summary data dictionary 145 | scope: The scope of the Web ACL ('REGIONAL' or 'CLOUDFRONT') 146 | 147 | Returns: 148 | Dictionary with formatted WAF Web ACL summary data 149 | """ 150 | return { 151 | "id": web_acl.get('Id', 'Unknown'), 152 | "name": web_acl.get('Name', 'Unknown'), 153 | "description": web_acl.get('Description', ''), 154 | "arn": web_acl.get('ARN', 'Unknown'), 155 | "scope": scope, 156 | "lock_token": web_acl.get('LockToken', 'Unknown') 157 | } 158 | 159 | 160 | def format_waf_ip_set_summary_json(ip_set: Dict[str, Any], scope: str = 'REGIONAL') -> Dict[str, Any]: 161 | """Format a WAF IP set summary into structured data for JSON output. 162 | 163 | Args: 164 | ip_set: WAF IP set summary data dictionary 165 | scope: The scope of the IP set ('REGIONAL' or 'CLOUDFRONT') 166 | 167 | Returns: 168 | Dictionary with formatted WAF IP set summary data 169 | """ 170 | return { 171 | "id": ip_set.get('Id', 'Unknown'), 172 | "name": ip_set.get('Name', 'Unknown'), 173 | "description": ip_set.get('Description', ''), 174 | "arn": ip_set.get('ARN', 'Unknown'), 175 | "scope": scope, 176 | "lock_token": ip_set.get('LockToken', 'Unknown') 177 | } 178 | 179 | 180 | def format_waf_rule_group_summary_json(rule_group: Dict[str, Any], scope: str = 'REGIONAL') -> Dict[str, Any]: 181 | """Format a WAF rule group summary into structured data for JSON output. 182 | 183 | Args: 184 | rule_group: WAF rule group summary data dictionary 185 | scope: The scope of the rule group ('REGIONAL' or 'CLOUDFRONT') 186 | 187 | Returns: 188 | Dictionary with formatted WAF rule group summary data 189 | """ 190 | return { 191 | "id": rule_group.get('Id', 'Unknown'), 192 | "name": rule_group.get('Name', 'Unknown'), 193 | "description": rule_group.get('Description', ''), 194 | "arn": rule_group.get('ARN', 'Unknown'), 195 | "scope": scope, 196 | "lock_token": rule_group.get('LockToken', 'Unknown') 197 | } 198 | 199 | 200 | def format_waf_resources_json(resource_arns: List[str], web_acl_arn: str) -> Dict[str, Any]: 201 | """Format a list of resource ARNs protected by a WAF Web ACL into structured data for JSON output. 202 | 203 | Args: 204 | resource_arns: List of resource ARNs protected by the Web ACL 205 | web_acl_arn: The ARN of the Web ACL 206 | 207 | Returns: 208 | Dictionary with formatted WAF resources data 209 | """ 210 | # Parse resource information from ARNs 211 | resources = [] 212 | for arn in resource_arns: 213 | arn_parts = arn.split(':') 214 | 215 | service = arn_parts[2] if len(arn_parts) > 2 else 'unknown' 216 | region = arn_parts[3] if len(arn_parts) > 3 else 'unknown' 217 | account_id = arn_parts[4] if len(arn_parts) > 4 else 'unknown' 218 | 219 | resource_parts = arn_parts[5].split('/') if len(arn_parts) > 5 else [] 220 | resource_type = resource_parts[0] if resource_parts else 'unknown' 221 | resource_id = '/'.join(resource_parts[1:]) if len(resource_parts) > 1 else '' 222 | 223 | resources.append({ 224 | "arn": arn, 225 | "service": service, 226 | "region": region, 227 | "account_id": account_id, 228 | "resource_type": resource_type, 229 | "resource_id": resource_id 230 | }) 231 | 232 | return { 233 | "web_acl_arn": web_acl_arn, 234 | "resource_count": len(resources), 235 | "resources": resources 236 | } 237 | 238 | 239 | def get_statement_type(statement: Dict[str, Any]) -> str: 240 | """Helper function to determine the type of a WAF rule statement. 241 | 242 | Args: 243 | statement: WAF rule statement dictionary 244 | 245 | Returns: 246 | String indicating the statement type 247 | """ 248 | statement_keys = statement.keys() 249 | 250 | # Check for common statement types 251 | if 'RateBasedStatement' in statement_keys: 252 | return 'Rate-based' 253 | elif 'ByteMatchStatement' in statement_keys: 254 | return 'Byte match' 255 | elif 'SqliMatchStatement' in statement_keys: 256 | return 'SQL injection' 257 | elif 'XssMatchStatement' in statement_keys: 258 | return 'XSS' 259 | elif 'GeoMatchStatement' in statement_keys: 260 | return 'Geo match' 261 | elif 'IPSetReferenceStatement' in statement_keys: 262 | return 'IP set' 263 | elif 'RegexPatternSetReferenceStatement' in statement_keys: 264 | return 'Regex pattern' 265 | elif 'AndStatement' in statement_keys: 266 | return 'AND' 267 | elif 'OrStatement' in statement_keys: 268 | return 'OR' 269 | elif 'NotStatement' in statement_keys: 270 | return 'NOT' 271 | elif 'ManagedRuleGroupStatement' in statement_keys: 272 | return 'Managed rule group' 273 | elif 'RuleGroupReferenceStatement' in statement_keys: 274 | return 'Rule group' 275 | else: 276 | return 'Unknown' -------------------------------------------------------------------------------- /aws_security_mcp/services/README.md: -------------------------------------------------------------------------------- 1 | # AWS Security MCP - Service Modules 2 | 3 | This directory contains service modules for interacting with various AWS services. Each module provides a set of functions to interact with a specific AWS service, handling pagination, error handling, and formatting of responses. 4 | 5 | ## Service Modules 6 | 7 | ### Resource Tagging Service (`resource_tagging.py`) 8 | 9 | The Resource Tagging Service module provides functionality to interact with the AWS Resource Groups Tagging API. It includes functions to retrieve tag keys, tag values, and resources by tags. 10 | 11 | #### Key Functions: 12 | 13 | 1. **`get_tag_keys`**: Retrieves all tag keys used in the AWS account, with support for pagination. 14 | 2. **`get_tag_values`**: Retrieves all values for a specific tag key, with support for pagination. 15 | 3. **`check_tag_key_exists`**: Checks if a specific tag key exists in the AWS account. 16 | 4. **`check_tag_value_exists`**: Checks if a specific tag value exists for a given tag key. 17 | 5. **`get_resources_by_tags`**: Retrieves AWS resources filtered by tag key and optionally tag value, with support for pagination and resource type filtering. 18 | 19 | ### Load Balancer Service (`load_balancer.py`) 20 | 21 | The Load Balancer Service module provides functionality to interact with the AWS Elastic Load Balancing API. It includes functions to retrieve load balancers, target groups, and listeners. 22 | 23 | #### Key Functions: 24 | 25 | 1. **`get_all_load_balancers_v2`**: Retrieves all Application and Network Load Balancers. 26 | 2. **`get_all_classic_load_balancers`**: Retrieves all Classic Load Balancers. 27 | 3. **`get_all_target_groups`**: Retrieves all target groups. 28 | 4. **`get_all_listeners`**: Retrieves all listeners for a load balancer. 29 | 30 | ## Error Handling 31 | 32 | All service functions handle exceptions and return standardized error responses. The error responses include: 33 | 34 | - A clear error message 35 | - The original exception details 36 | - Empty result sets to avoid null reference errors 37 | 38 | ## Pagination 39 | 40 | Service functions support pagination through: 41 | 42 | - `next_token`: A token for retrieving the next set of results 43 | - `max_items`: Maximum number of items to return in a single call 44 | 45 | ## Usage Example 46 | 47 | ```python 48 | from aws_security_mcp.services.resource_tagging import get_resources_by_tags 49 | 50 | # Get all EC2 instances with the tag "Environment=Production" 51 | resources = await get_resources_by_tags( 52 | tag_key="Environment", 53 | tag_value="Production", 54 | resource_types=["ec2:instance"] 55 | ) 56 | ``` -------------------------------------------------------------------------------- /aws_security_mcp/services/__init__.py: -------------------------------------------------------------------------------- 1 | """AWS service clients for security-related operations.""" 2 | 3 | from typing import Any, Dict, Optional, Union 4 | 5 | # Export common utilities 6 | from aws_security_mcp.services.base import get_aws_session, get_client 7 | 8 | # Type alias for AWS responses 9 | AWSResponse = Dict[str, Any] 10 | 11 | # Import service modules for easy access 12 | from aws_security_mcp.services import ( 13 | access_analyzer, 14 | base, 15 | cloudfront, 16 | credentials, 17 | ec2, 18 | ecs, 19 | ecr, 20 | guardduty, 21 | iam, 22 | lambda_service, 23 | load_balancer, 24 | organizations, 25 | resource_tagging, 26 | route53, 27 | s3, 28 | securityhub, 29 | shield, 30 | trusted_advisor, 31 | waf, 32 | ) 33 | 34 | # For backward compatibility, keep the class imports where they exist 35 | try: 36 | from aws_security_mcp.services.shield import ShieldService 37 | except ImportError: 38 | pass 39 | 40 | try: 41 | from aws_security_mcp.services.resource_tagging import ResourceTaggingService 42 | except ImportError: 43 | pass -------------------------------------------------------------------------------- /aws_security_mcp/services/access_analyzer.py: -------------------------------------------------------------------------------- 1 | """AWS IAM Access Analyzer service client module. 2 | 3 | This module provides functions for interacting with AWS IAM Access Analyzer. 4 | """ 5 | 6 | import logging 7 | from typing import Any, Dict, List, Optional, Union, Tuple 8 | 9 | import boto3 10 | from botocore.exceptions import ClientError 11 | 12 | from aws_security_mcp.services.base import get_client, handle_aws_error 13 | 14 | # Configure logging 15 | logger = logging.getLogger(__name__) 16 | 17 | def get_access_analyzer_client(session_context: Optional[str] = None, **kwargs: Any) -> boto3.client: 18 | """Get AWS IAM Access Analyzer client. 19 | 20 | Args: 21 | session_context: Optional session key for cross-account access 22 | **kwargs: Additional arguments to pass to the boto3 client constructor 23 | 24 | Returns: 25 | boto3.client: An initialized IAM Access Analyzer client 26 | """ 27 | return get_client("accessanalyzer", session_context=session_context, **kwargs) 28 | 29 | def list_analyzers( 30 | max_results: int = 100, 31 | next_token: Optional[str] = None, 32 | session_context: Optional[str] = None, 33 | **kwargs: Any 34 | ) -> List[Dict[str, Any]]: 35 | """List IAM Access Analyzers. 36 | 37 | Args: 38 | max_results: Maximum number of analyzers to return (1-1000) 39 | next_token: Token for pagination 40 | session_context: Optional session key for cross-account access 41 | **kwargs: Additional arguments to pass to the list_analyzers API call 42 | 43 | Returns: 44 | List[Dict[str, Any]]: List of analyzers 45 | """ 46 | client = get_access_analyzer_client(session_context) 47 | 48 | params = { 49 | **kwargs 50 | } 51 | 52 | if max_results: 53 | params['maxResults'] = min(1000, max(1, max_results)) 54 | 55 | if next_token: 56 | params['nextToken'] = next_token 57 | 58 | try: 59 | response = client.list_analyzers(**params) 60 | return response.get('analyzers', []) 61 | except ClientError as e: 62 | logger.error(f"Error listing IAM Access Analyzers: {e}") 63 | raise 64 | 65 | def get_analyzer(analyzer_name: str, session_context: Optional[str] = None, **kwargs: Any) -> Dict[str, Any]: 66 | """Get details of a specific IAM Access Analyzer. 67 | 68 | Args: 69 | analyzer_name: The name of the analyzer to retrieve 70 | session_context: Optional session key for cross-account access 71 | **kwargs: Additional arguments to pass to the get_analyzer API call 72 | 73 | Returns: 74 | Dict[str, Any]: Analyzer details 75 | """ 76 | client = get_access_analyzer_client(session_context) 77 | 78 | try: 79 | response = client.get_analyzer( 80 | analyzerName=analyzer_name, 81 | **kwargs 82 | ) 83 | return response.get('analyzer', {}) 84 | except ClientError as e: 85 | logger.error(f"Error getting IAM Access Analyzer details: {e}") 86 | raise 87 | 88 | def list_findings( 89 | analyzer_arn: str, 90 | status: Optional[str] = None, 91 | max_results: int = 100, 92 | next_token: Optional[str] = None, 93 | session_context: Optional[str] = None, 94 | **kwargs: Any 95 | ) -> Tuple[List[Dict[str, Any]], Optional[str]]: 96 | """List IAM Access Analyzer findings. 97 | 98 | Args: 99 | analyzer_arn: ARN of the analyzer 100 | status: Filter findings by status (ACTIVE, ARCHIVED, RESOLVED) 101 | max_results: Maximum number of findings to return (1-100) 102 | next_token: Token for pagination 103 | session_context: Optional session key for cross-account access 104 | **kwargs: Additional arguments to pass to the list_findings API call 105 | 106 | Returns: 107 | Tuple[List[Dict[str, Any]], Optional[str]]: Tuple containing list of findings and next token for pagination 108 | """ 109 | client = get_access_analyzer_client(session_context) 110 | 111 | params = { 112 | 'analyzerArn': analyzer_arn, 113 | **kwargs 114 | } 115 | 116 | if max_results: 117 | params['maxResults'] = min(100, max(1, max_results)) 118 | 119 | if next_token: 120 | params['nextToken'] = next_token 121 | 122 | if status: 123 | params['filter'] = create_status_filter(status) 124 | 125 | try: 126 | response = client.list_findings(**params) 127 | return response.get('findings', []), response.get('nextToken') 128 | except ClientError as e: 129 | logger.error(f"Error listing IAM Access Analyzer findings: {e}") 130 | raise 131 | 132 | def get_finding( 133 | analyzer_arn: str, 134 | finding_id: str, 135 | session_context: Optional[str] = None, 136 | **kwargs: Any 137 | ) -> Dict[str, Any]: 138 | """Get details of a specific finding. 139 | 140 | Args: 141 | analyzer_arn: ARN of the analyzer 142 | finding_id: ID of the finding 143 | session_context: Optional session key for cross-account access 144 | **kwargs: Additional arguments to pass to the get_finding API call 145 | 146 | Returns: 147 | Dict[str, Any]: Finding details 148 | """ 149 | client = get_access_analyzer_client(session_context) 150 | 151 | try: 152 | response = client.get_finding( 153 | analyzerArn=analyzer_arn, 154 | id=finding_id, 155 | **kwargs 156 | ) 157 | return response 158 | except ClientError as e: 159 | logger.error(f"Error getting IAM Access Analyzer finding: {e}") 160 | raise 161 | 162 | def list_findings_by_category( 163 | analyzer_arn: str, 164 | resource_type: str, 165 | status: str = "ACTIVE", 166 | max_results: int = 100, 167 | next_token: Optional[str] = None, 168 | session_context: Optional[str] = None, 169 | **kwargs: Any 170 | ) -> Tuple[List[Dict[str, Any]], Optional[str]]: 171 | """List findings filtered by resource type category. 172 | 173 | Args: 174 | analyzer_arn: ARN of the analyzer 175 | resource_type: Resource type to filter by (e.g., AWS::S3::Bucket, AWS::SQS::Queue) 176 | status: Filter findings by status (ACTIVE, ARCHIVED, RESOLVED) 177 | max_results: Maximum number of findings to return (1-100) 178 | next_token: Token for pagination 179 | session_context: Optional session key for cross-account access 180 | **kwargs: Additional arguments to pass to the list_findings API call 181 | 182 | Returns: 183 | Tuple[List[Dict[str, Any]], Optional[str]]: Tuple containing list of findings matching the category and next token for pagination 184 | """ 185 | client = get_access_analyzer_client(session_context) 186 | 187 | # Create combined filter for status and resource type 188 | filter_criteria = { 189 | 'resourceType': { 190 | 'eq': [resource_type] 191 | } 192 | } 193 | 194 | if status: 195 | filter_criteria['status'] = { 196 | 'eq': [status] 197 | } 198 | 199 | params = { 200 | 'analyzerArn': analyzer_arn, 201 | 'filter': filter_criteria, 202 | **kwargs 203 | } 204 | 205 | if max_results: 206 | params['maxResults'] = min(100, max(1, max_results)) 207 | 208 | if next_token: 209 | params['nextToken'] = next_token 210 | 211 | try: 212 | response = client.list_findings(**params) 213 | return response.get('findings', []), response.get('nextToken') 214 | except ClientError as e: 215 | logger.error(f"Error listing IAM Access Analyzer findings by category: {e}") 216 | raise 217 | 218 | def create_status_filter(status: str) -> Dict[str, Any]: 219 | """Create a filter for findings based on status. 220 | 221 | Args: 222 | status: Status to filter by (ACTIVE, ARCHIVED, RESOLVED) 223 | 224 | Returns: 225 | Dict[str, Any]: Filter for the specified status 226 | """ 227 | valid_statuses = ["ACTIVE", "ARCHIVED", "RESOLVED"] 228 | 229 | if status not in valid_statuses: 230 | logger.warning(f"Invalid status: {status}. Using ACTIVE.") 231 | status = "ACTIVE" 232 | 233 | return { 234 | 'status': { 235 | 'eq': [status] 236 | } 237 | } 238 | 239 | def create_resource_type_filter(resource_type: str) -> Dict[str, Any]: 240 | """Create a filter for findings based on resource type. 241 | 242 | Args: 243 | resource_type: Resource type to filter by 244 | 245 | Returns: 246 | Dict[str, Any]: Filter for the specified resource type 247 | """ 248 | return { 249 | 'resourceType': { 250 | 'eq': [resource_type] 251 | } 252 | } -------------------------------------------------------------------------------- /aws_security_mcp/services/securityhub.py: -------------------------------------------------------------------------------- 1 | """AWS SecurityHub service client module. 2 | 3 | This module provides functions for interacting with the AWS SecurityHub service. 4 | """ 5 | 6 | import logging 7 | from typing import Any, Dict, List, Optional, Union 8 | 9 | import boto3 10 | from botocore.exceptions import ClientError 11 | 12 | from aws_security_mcp.services.base import get_client, handle_aws_error 13 | 14 | # Configure logging 15 | logger = logging.getLogger(__name__) 16 | 17 | def get_securityhub_client(**kwargs: Any) -> boto3.client: 18 | """Get AWS SecurityHub client. 19 | 20 | Args: 21 | **kwargs: Additional arguments to pass to the boto3 client constructor 22 | 23 | Returns: 24 | boto3.client: An initialized SecurityHub client 25 | """ 26 | return get_client('securityhub', **kwargs) 27 | 28 | def get_findings( 29 | filters: Optional[Dict[str, Any]] = None, 30 | max_results: int = 100, 31 | next_token: Optional[str] = None, 32 | **kwargs: Any 33 | ) -> Dict[str, Any]: 34 | """Get findings from SecurityHub with specified filters. 35 | 36 | Args: 37 | filters: Dictionary of filters to apply to the findings 38 | max_results: Maximum number of findings to return (1-100) 39 | next_token: Token for pagination 40 | **kwargs: Additional arguments to pass to the get_findings API call 41 | 42 | Returns: 43 | Dict[str, Any]: Response from the get_findings API call 44 | """ 45 | client = get_securityhub_client() 46 | 47 | # Build request parameters 48 | params = { 49 | 'MaxResults': min(100, max(1, max_results)), # API limits: 1-100 50 | } 51 | 52 | if filters: 53 | params['Filters'] = filters 54 | 55 | if next_token: 56 | params['NextToken'] = next_token 57 | 58 | # Add any additional parameters 59 | params.update(kwargs) 60 | 61 | try: 62 | return client.get_findings(**params) 63 | except ClientError as e: 64 | logger.error(f"Error getting SecurityHub findings: {e}") 65 | raise 66 | 67 | def get_all_findings( 68 | filters: Optional[Dict[str, Any]] = None, 69 | max_items: int = 1000, 70 | **kwargs: Any 71 | ) -> List[Dict[str, Any]]: 72 | """Get all findings from SecurityHub with pagination handling. 73 | 74 | Args: 75 | filters: Dictionary of filters to apply to the findings 76 | max_items: Maximum number of findings to return 77 | **kwargs: Additional arguments to pass to the get_findings API call 78 | 79 | Returns: 80 | List[Dict[str, Any]]: List of findings 81 | """ 82 | client = get_securityhub_client() 83 | findings = [] 84 | next_token = None 85 | 86 | while len(findings) < max_items: 87 | # Prepare request parameters 88 | params = { 89 | 'MaxResults': min(100, max_items - len(findings)), # API max is 100 90 | } 91 | 92 | if filters: 93 | params['Filters'] = filters 94 | 95 | if next_token: 96 | params['NextToken'] = next_token 97 | 98 | # Add any additional parameters 99 | params.update(kwargs) 100 | 101 | try: 102 | response = client.get_findings(**params) 103 | batch_findings = response.get('Findings', []) 104 | 105 | if not batch_findings: 106 | break 107 | 108 | findings.extend(batch_findings) 109 | 110 | # Check if there are more findings 111 | next_token = response.get('NextToken') 112 | if not next_token: 113 | break 114 | 115 | except ClientError as e: 116 | logger.error(f"Error getting SecurityHub findings: {e}") 117 | raise 118 | 119 | return findings 120 | 121 | def filter_findings_by_severity( 122 | findings: List[Dict[str, Any]], 123 | severity: str = "ALL" 124 | ) -> List[Dict[str, Any]]: 125 | """Filter findings by severity level. 126 | 127 | Args: 128 | findings: List of findings to filter 129 | severity: Severity level (CRITICAL, HIGH, MEDIUM, LOW, INFORMATIONAL, or ALL) 130 | 131 | Returns: 132 | List[Dict[str, Any]]: Filtered list of findings 133 | """ 134 | if severity == "ALL": 135 | return findings 136 | 137 | valid_severities = ["CRITICAL", "HIGH", "MEDIUM", "LOW", "INFORMATIONAL"] 138 | if severity not in valid_severities: 139 | raise ValueError(f"Invalid severity level: {severity}") 140 | 141 | return [ 142 | finding for finding in findings 143 | if finding.get('Severity', {}).get('Label') == severity 144 | ] 145 | 146 | def filter_findings_by_text( 147 | findings: List[Dict[str, Any]], 148 | search_term: str = "" 149 | ) -> List[Dict[str, Any]]: 150 | """Filter findings by search term. 151 | 152 | Args: 153 | findings: List of findings to filter 154 | search_term: Term to search for in finding fields 155 | 156 | Returns: 157 | List[Dict[str, Any]]: Filtered list of findings 158 | """ 159 | if not search_term: 160 | return findings 161 | 162 | search_term_lower = search_term.lower() 163 | filtered_findings = [] 164 | 165 | for finding in findings: 166 | # Check if the search term matches any important fields 167 | if (search_term_lower in finding.get('ProductName', '').lower() or 168 | search_term_lower in finding.get('Title', '').lower() or 169 | search_term_lower in finding.get('Description', '').lower() or 170 | any(search_term_lower in str(resource).lower() for resource in finding.get('Resources', [])) or 171 | search_term_lower in finding.get('CompanyName', '').lower()): 172 | filtered_findings.append(finding) 173 | 174 | return filtered_findings 175 | 176 | def create_severity_filter(severity: str) -> Dict[str, Any]: 177 | """Create a filter for findings based on severity. 178 | 179 | Args: 180 | severity: Severity level (CRITICAL, HIGH, MEDIUM, LOW, INFORMATIONAL, or ALL) 181 | 182 | Returns: 183 | Dict[str, Any]: Filter dictionary for the specified severity 184 | """ 185 | if severity == "ALL": 186 | return {} 187 | 188 | valid_severities = ["CRITICAL", "HIGH", "MEDIUM", "LOW", "INFORMATIONAL"] 189 | if severity not in valid_severities: 190 | raise ValueError(f"Invalid severity level: {severity}") 191 | 192 | return { 193 | "SeverityLabel": [{"Value": severity, "Comparison": "EQUALS"}] 194 | } 195 | 196 | def create_search_term_filter(search_term: str) -> Dict[str, Any]: 197 | """Create a filter for findings based on search term. 198 | 199 | Args: 200 | search_term: Term to search for in finding fields 201 | 202 | Returns: 203 | Dict[str, Any]: Filter dictionary for the specified search term 204 | """ 205 | if not search_term: 206 | return {} 207 | 208 | # Apply search term to multiple fields 209 | return { 210 | "$or": [ 211 | {"ProductName": [{"Value": search_term, "Comparison": "CONTAINS"}]}, 212 | {"Title": [{"Value": search_term, "Comparison": "CONTAINS"}]}, 213 | {"Description": [{"Value": search_term, "Comparison": "CONTAINS"}]}, 214 | {"ResourceId": [{"Value": search_term, "Comparison": "CONTAINS"}]}, 215 | {"ResourceType": [{"Value": search_term, "Comparison": "CONTAINS"}]}, 216 | {"CompanyName": [{"Value": search_term, "Comparison": "CONTAINS"}]} 217 | ] 218 | } -------------------------------------------------------------------------------- /aws_security_mcp/services/trusted_advisor.py: -------------------------------------------------------------------------------- 1 | """AWS Trusted Advisor service for security checks and recommendations.""" 2 | 3 | import logging 4 | from typing import Any, Dict, List, Optional, Tuple, Union 5 | 6 | import boto3 7 | from botocore.exceptions import ClientError 8 | 9 | from aws_security_mcp.config import config 10 | from aws_security_mcp.services.base import get_client 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | # Security check categories - we'll filter for these 15 | SECURITY_CATEGORIES = ["security", "fault_tolerance"] 16 | 17 | async def get_security_checks(session_context: Optional[str] = None) -> Dict[str, Any]: 18 | """Retrieve all security-related checks from Trusted Advisor. 19 | 20 | Args: 21 | session_context: Optional session key for cross-account access 22 | 23 | Returns: 24 | Dict containing security checks or error information 25 | """ 26 | try: 27 | client = get_client('trustedadvisor', session_context=session_context) 28 | 29 | # Get all checks and filter for security categories 30 | paginator = client.get_paginator('list_checks') 31 | 32 | all_checks = [] 33 | 34 | # Handle pagination 35 | for page in paginator.paginate(): 36 | checks = page.get('checks', []) 37 | # Filter for security-related checks only 38 | security_checks = [ 39 | check for check in checks 40 | if check.get('category', '').lower() in SECURITY_CATEGORIES 41 | ] 42 | all_checks.extend(security_checks) 43 | 44 | return { 45 | "success": True, 46 | "checks": all_checks, 47 | "count": len(all_checks) 48 | } 49 | 50 | except ClientError as e: 51 | logger.error(f"Error retrieving Trusted Advisor security checks: {str(e)}") 52 | return { 53 | "success": False, 54 | "error": str(e), 55 | "checks": [], 56 | "count": 0 57 | } 58 | 59 | async def get_recommendation_details(recommendation_id: str, session_context: Optional[str] = None) -> Dict[str, Any]: 60 | """Get details for a specific security recommendation. 61 | 62 | Args: 63 | recommendation_id: The ID of the recommendation 64 | session_context: Optional session key for cross-account access 65 | 66 | Returns: 67 | Dict containing recommendation details or error information 68 | """ 69 | try: 70 | client = get_client('trustedadvisor', session_context=session_context) 71 | 72 | response = client.get_recommendation( 73 | recommendationId=recommendation_id 74 | ) 75 | 76 | return { 77 | "success": True, 78 | "recommendation": response.get('recommendation', {}) 79 | } 80 | 81 | except ClientError as e: 82 | logger.error(f"Error retrieving Trusted Advisor recommendation details: {str(e)}") 83 | return { 84 | "success": False, 85 | "error": str(e), 86 | "recommendation": {} 87 | } 88 | 89 | async def list_security_recommendations(session_context: Optional[str] = None) -> Dict[str, Any]: 90 | """List all security recommendations from Trusted Advisor. 91 | 92 | Args: 93 | session_context: Optional session key for cross-account access 94 | 95 | Returns: 96 | Dict containing security recommendations or error information 97 | """ 98 | try: 99 | client = get_client('trustedadvisor', session_context=session_context) 100 | 101 | # Use the paginator to handle large result sets 102 | paginator = client.get_paginator('list_recommendations') 103 | 104 | all_recommendations = [] 105 | 106 | # Handle pagination 107 | for page in paginator.paginate(): 108 | recommendations = page.get('recommendations', []) 109 | # Filter for security-related recommendations 110 | security_recommendations = [ 111 | rec for rec in recommendations 112 | if any(cat.lower() in SECURITY_CATEGORIES for cat in rec.get('categories', [])) 113 | ] 114 | all_recommendations.extend(security_recommendations) 115 | 116 | return { 117 | "success": True, 118 | "recommendations": all_recommendations, 119 | "count": len(all_recommendations) 120 | } 121 | 122 | except ClientError as e: 123 | logger.error(f"Error retrieving Trusted Advisor security recommendations: {str(e)}") 124 | return { 125 | "success": False, 126 | "error": str(e), 127 | "recommendations": [], 128 | "count": 0 129 | } 130 | 131 | async def list_recommendation_resources(recommendation_id: str, session_context: Optional[str] = None) -> Dict[str, Any]: 132 | """List all resources affected by a specific security recommendation. 133 | 134 | Args: 135 | recommendation_id: The ID of the recommendation 136 | session_context: Optional session key for cross-account access 137 | 138 | Returns: 139 | Dict containing affected resources or error information 140 | """ 141 | try: 142 | client = get_client('trustedadvisor', session_context=session_context) 143 | 144 | # Use the paginator to handle large result sets 145 | paginator = client.get_paginator('list_recommendation_resources') 146 | 147 | all_resources = [] 148 | 149 | # Handle pagination 150 | for page in paginator.paginate(recommendationId=recommendation_id): 151 | resources = page.get('resources', []) 152 | all_resources.extend(resources) 153 | 154 | return { 155 | "success": True, 156 | "resources": all_resources, 157 | "count": len(all_resources) 158 | } 159 | 160 | except ClientError as e: 161 | logger.error(f"Error retrieving resources for recommendation {recommendation_id}: {str(e)}") 162 | return { 163 | "success": False, 164 | "error": str(e), 165 | "resources": [], 166 | "count": 0 167 | } -------------------------------------------------------------------------------- /aws_security_mcp/tools/__init__.py: -------------------------------------------------------------------------------- 1 | """MCP tools implementation for AWS security services.""" 2 | 3 | from typing import Any, Callable, Dict, List, Optional, TypeVar, Union 4 | 5 | # Type for tool functions 6 | ToolFunc = TypeVar('ToolFunc', bound=Callable[..., Any]) 7 | 8 | # Registry to store all registered tools 9 | _TOOLS_REGISTRY: Dict[str, Callable] = {} 10 | 11 | def register_tool(name: Optional[str] = None) -> Callable[[ToolFunc], ToolFunc]: 12 | """Decorator to register an MCP tool function. 13 | 14 | Args: 15 | name: Optional custom name for the tool. If None, function name is used. 16 | 17 | Returns: 18 | Decorator function that registers the tool. 19 | """ 20 | def decorator(func: ToolFunc) -> ToolFunc: 21 | tool_name = name or func.__name__ 22 | _TOOLS_REGISTRY[tool_name] = func 23 | return func 24 | return decorator 25 | 26 | def get_all_tools() -> Dict[str, Callable]: 27 | """Get all registered MCP tools. 28 | 29 | Returns: 30 | Dictionary of tool name to function mapping. 31 | """ 32 | return _TOOLS_REGISTRY 33 | 34 | # Import all tool modules to register their tools 35 | import aws_security_mcp.tools.s3_tools 36 | import aws_security_mcp.tools.iam_tools # Re-added IAM tools module 37 | import aws_security_mcp.tools.ec2_tools 38 | import aws_security_mcp.tools.securityhub_tools 39 | import aws_security_mcp.tools.lambda_tools 40 | import aws_security_mcp.tools.guardduty_tools 41 | import aws_security_mcp.tools.access_analyzer_tools 42 | import aws_security_mcp.tools.load_balancer_tools 43 | import aws_security_mcp.tools.cloudfront_tools 44 | import aws_security_mcp.tools.route53_tools 45 | import aws_security_mcp.tools.waf_tools 46 | import aws_security_mcp.tools.shield_tools 47 | import aws_security_mcp.tools.resource_tagging_tools 48 | # Import new tool modules 49 | import aws_security_mcp.tools.trusted_advisor_tools 50 | import aws_security_mcp.tools.ecr_tools 51 | import aws_security_mcp.tools.ecs_tools 52 | import aws_security_mcp.tools.org_tools 53 | 54 | # Import wrapper modules 55 | import aws_security_mcp.tools.wrappers.guardduty_wrapper 56 | import aws_security_mcp.tools.wrappers.ec2_wrapper 57 | import aws_security_mcp.tools.wrappers.load_balancer_wrapper 58 | import aws_security_mcp.tools.wrappers.cloudfront_wrapper -------------------------------------------------------------------------------- /aws_security_mcp/tools/ecr_tools.py: -------------------------------------------------------------------------------- 1 | """ECR tools module for AWS Security MCP. 2 | 3 | This module provides tools for retrieving and analyzing ECR information 4 | for security assessment purposes. 5 | """ 6 | 7 | import json 8 | import logging 9 | from datetime import datetime 10 | from typing import Any, Dict, List, Optional 11 | 12 | from aws_security_mcp.services import ecr 13 | from aws_security_mcp.formatters.ecr import extract_repository_uris, extract_repository_names, format_repository_search_results 14 | from aws_security_mcp.tools import register_tool 15 | 16 | # Configure logging 17 | logger = logging.getLogger(__name__) 18 | 19 | @register_tool() 20 | async def list_ecr_repositories(session_context: Optional[str] = None) -> Dict[str, Any]: 21 | """List all ECR repositories in the AWS account. 22 | 23 | Args: 24 | session_context: Optional session key for cross-account access (e.g., "123456789012_aws_dev") 25 | 26 | Returns: 27 | Dict containing list of ECR repository names 28 | """ 29 | try: 30 | logger.info(f"Listing ECR repositories (session_context={session_context})") 31 | 32 | # Get repositories from the service 33 | result = await ecr.get_repositories(session_context=session_context) 34 | 35 | if not result.get("success", False): 36 | return { 37 | "error": result.get("error", "Unknown error"), 38 | "repository_names": [], 39 | "count": 0, 40 | "scan_timestamp": datetime.utcnow().isoformat() 41 | } 42 | 43 | # Extract only the repository names as requested 44 | repository_names = extract_repository_names(result.get("repositories", [])) 45 | 46 | return { 47 | "repository_names": repository_names, 48 | "count": len(repository_names), 49 | "scan_timestamp": datetime.utcnow().isoformat() 50 | } 51 | 52 | except Exception as e: 53 | logger.error(f"Error listing ECR repositories: {str(e)}") 54 | return { 55 | "repository_names": [], 56 | "count": 0, 57 | "scan_timestamp": datetime.utcnow().isoformat(), 58 | "error": str(e) 59 | } 60 | 61 | @register_tool() 62 | async def get_ecr_repository_policy(repository_name: str, session_context: Optional[str] = None) -> Dict[str, Any]: 63 | """Get the IAM policy for an ECR repository. 64 | 65 | This tool retrieves the repository policy for the specified ECR repository. 66 | 67 | Args: 68 | repository_name: Name of the ECR repository 69 | session_context: Optional session key for cross-account access (e.g., "123456789012_aws_dev") 70 | 71 | Returns: 72 | Dict containing repository policy information 73 | """ 74 | logger.info(f"Getting policy for ECR repository: {repository_name} (session_context={session_context})") 75 | result = await ecr.get_repository_policy(repository_name, session_context=session_context) 76 | return result 77 | 78 | @register_tool() 79 | async def get_ecr_image_scan_findings(repository_name: str, image_tag: str = 'latest', session_context: Optional[str] = None) -> Dict[str, Any]: 80 | """Get vulnerability scan findings for a container image. 81 | 82 | This tool retrieves scan findings for the specified container image. 83 | 84 | Args: 85 | repository_name: Name of the ECR repository 86 | image_tag: Tag of the image to check, defaults to 'latest' 87 | session_context: Optional session key for cross-account access (e.g., "123456789012_aws_dev") 88 | 89 | Returns: 90 | Dict containing vulnerability scan findings information 91 | """ 92 | logger.info(f"Getting scan findings for ECR image: {repository_name}:{image_tag} (session_context={session_context})") 93 | result = await ecr.get_repository_scan_findings(repository_name, image_tag, session_context=session_context) 94 | return result 95 | 96 | @register_tool() 97 | async def get_ecr_repository_images(repository_name: str, session_context: Optional[str] = None) -> Dict[str, Any]: 98 | """Get information about all images in an ECR repository. 99 | 100 | This tool retrieves details about all container images in the specified repository. 101 | 102 | Args: 103 | repository_name: Name of the ECR repository 104 | session_context: Optional session key for cross-account access (e.g., "123456789012_aws_dev") 105 | 106 | Returns: 107 | Dict containing repository images information 108 | """ 109 | logger.info(f"Getting images for ECR repository: {repository_name} (session_context={session_context})") 110 | result = await ecr.get_repository_images(repository_name, session_context=session_context) 111 | return result 112 | 113 | @register_tool() 114 | async def search_ecr_repositories(repository_name: Optional[str] = None, repository_names: Optional[List[str]] = None, session_context: Optional[str] = None) -> Dict[str, Any]: 115 | """Search for ECR repositories and get detailed information. 116 | 117 | This tool allows searching for repositories by exact name match and 118 | returns detailed information about the matched repositories, including 119 | policy information and recent images. 120 | 121 | Args: 122 | repository_name: Optional single repository name to search for exactly 123 | repository_names: Optional list of repository names to search for exactly 124 | If neither parameter is provided, details for all repositories will be returned. 125 | session_context: Optional session key for cross-account access (e.g., "123456789012_aws_dev") 126 | 127 | Returns: 128 | Dict containing detailed information about matching repositories 129 | """ 130 | try: 131 | if repository_name: 132 | logger.info(f"Searching ECR repository with exact name: {repository_name} (session_context={session_context})") 133 | elif repository_names: 134 | logger.info(f"Searching ECR repositories with exact names: {', '.join(repository_names)} (session_context={session_context})") 135 | else: 136 | logger.info(f"Fetching details for all ECR repositories (session_context={session_context})") 137 | 138 | # Get repository search results from the service using exact name matching 139 | search_results = await ecr.search_repositories( 140 | repository_name=repository_name, 141 | repository_names=repository_names, 142 | session_context=session_context 143 | ) 144 | 145 | if not search_results.get("success", False): 146 | return { 147 | "error": search_results.get("error", "Unknown error"), 148 | "repositories": [], 149 | "count": 0, 150 | "search_term": repository_name or (', '.join(repository_names) if repository_names else 'ALL'), 151 | "scan_timestamp": datetime.utcnow().isoformat() 152 | } 153 | 154 | # Format the search results 155 | formatted_results = format_repository_search_results(search_results) 156 | 157 | return formatted_results 158 | 159 | except Exception as e: 160 | logger.error(f"Error searching ECR repositories: {str(e)}") 161 | return { 162 | "repositories": [], 163 | "count": 0, 164 | "search_term": repository_name or (', '.join(repository_names) if repository_names else 'ALL'), 165 | "scan_timestamp": datetime.utcnow().isoformat(), 166 | "error": str(e) 167 | } -------------------------------------------------------------------------------- /aws_security_mcp/tools/ecs_tools.py: -------------------------------------------------------------------------------- 1 | """MCP tools for AWS ECS (Elastic Container Service) security.""" 2 | 3 | import logging 4 | from typing import Any, Dict, List, Optional 5 | 6 | from aws_security_mcp.services import ecs 7 | from aws_security_mcp.tools import register_tool 8 | from aws_security_mcp.formatters.ecs_formatter import ( 9 | format_ecs_service, 10 | format_ecs_task_definition, 11 | format_ecs_task, 12 | format_ecs_container_instance, 13 | format_ecs_cluster 14 | ) 15 | 16 | logger = logging.getLogger(__name__) 17 | 18 | @register_tool() 19 | async def list_ecs_clusters(session_context: Optional[str] = None) -> Dict[str, Any]: 20 | """List all ECS clusters in the AWS account. 21 | 22 | This tool retrieves all ECS clusters and provides information about their 23 | configuration and security settings. 24 | 25 | Args: 26 | session_context: Optional session key for cross-account access (e.g., "123456789012_aws_dev") 27 | 28 | Returns: 29 | Dict containing ECS clusters information 30 | """ 31 | logger.info(f"Listing ECS clusters (session_context={session_context})") 32 | result = await ecs.get_clusters(session_context=session_context) 33 | 34 | if result["success"]: 35 | # Apply security-focused formatting to reduce response size 36 | formatted_clusters = [format_ecs_cluster(cluster) for cluster in result.get("clusters", [])] 37 | result["clusters"] = formatted_clusters 38 | 39 | return result 40 | 41 | @register_tool() 42 | async def list_ecs_task_definitions(family_prefix: Optional[str] = None, status: str = "ACTIVE", session_context: Optional[str] = None) -> Dict[str, Any]: 43 | """List ECS task definitions with their security configurations. 44 | 45 | This tool retrieves all task definitions and extracts security-relevant configurations 46 | such as IAM roles, secrets, volumes, and network settings. 47 | 48 | Args: 49 | family_prefix: Optional family name prefix to filter task definitions 50 | status: Task definition status to filter by (ACTIVE or INACTIVE) 51 | session_context: Optional session key for cross-account access (e.g., "123456789012_aws_dev") 52 | 53 | Returns: 54 | Dict containing task definitions with security information 55 | """ 56 | logger.info(f"Listing ECS task definitions with status {status} (session_context={session_context})") 57 | result = await ecs.get_task_definitions(family_prefix, status, session_context=session_context) 58 | 59 | if result["success"]: 60 | # Apply security-focused formatting to reduce response size 61 | formatted_task_defs = [format_ecs_task_definition(task_def) for task_def in result.get("taskDefinitions", [])] 62 | result["taskDefinitions"] = formatted_task_defs 63 | 64 | return result 65 | 66 | @register_tool() 67 | async def get_ecs_task_definition(task_definition: str, cluster: Optional[str] = None, session_context: Optional[str] = None) -> Dict[str, Any]: 68 | """Get detailed information about the latest active ECS task definition with running tasks. 69 | 70 | This tool focuses on finding task definitions that are currently in use. When a family 71 | name is provided without a specific revision, it will find the latest active revision 72 | that has running tasks. If no running tasks are found, it falls back to the latest revision. 73 | 74 | Args: 75 | task_definition: The task definition family name, ARN, or family:revision 76 | cluster: Optional cluster name to check for running tasks (if not provided, checks all clusters) 77 | session_context: Optional session key for cross-account access (e.g., "123456789012_aws_dev") 78 | 79 | Returns: 80 | Dict containing detailed task definition information with a flag indicating if it has running tasks 81 | """ 82 | logger.info(f"Getting active ECS task definition: {task_definition}" + (f" in cluster: {cluster}" if cluster else "") + f" (session_context={session_context})") 83 | result = await ecs.get_task_definition(task_definition, cluster, session_context=session_context) 84 | 85 | if result["success"] and result.get("taskDefinition"): 86 | # Apply security-focused formatting to reduce response size 87 | result["taskDefinition"] = format_ecs_task_definition(result["taskDefinition"]) 88 | 89 | return result 90 | 91 | @register_tool() 92 | async def list_ecs_services(cluster: str, session_context: Optional[str] = None) -> Dict[str, Any]: 93 | """List ECS services for a specific cluster with security details. 94 | 95 | This tool retrieves all services in the specified cluster and extracts security-relevant 96 | information such as network configuration, IAM roles, and load balancer settings. 97 | 98 | Args: 99 | cluster: The ECS cluster ARN or name 100 | session_context: Optional session key for cross-account access (e.g., "123456789012_aws_dev") 101 | 102 | Returns: 103 | Dict containing services with security information 104 | """ 105 | logger.info(f"Listing ECS services for cluster: {cluster} (session_context={session_context})") 106 | result = await ecs.get_services(cluster, session_context=session_context) 107 | 108 | if result["success"]: 109 | # Apply security-focused formatting to reduce response size 110 | formatted_services = [format_ecs_service(service) for service in result.get("services", [])] 111 | result["services"] = formatted_services 112 | 113 | return result 114 | 115 | @register_tool() 116 | async def list_ecs_tasks(cluster: str, service: Optional[str] = None, session_context: Optional[str] = None) -> Dict[str, Any]: 117 | """List ECS tasks for a specific cluster or service with security details. 118 | 119 | This tool retrieves all tasks in the specified cluster or service and extracts 120 | security-relevant information such as task IAM roles, network configuration, 121 | and container details. 122 | 123 | Args: 124 | cluster: The ECS cluster ARN or name 125 | service: Optional service name to filter tasks 126 | session_context: Optional session key for cross-account access (e.g., "123456789012_aws_dev") 127 | 128 | Returns: 129 | Dict containing tasks with security information 130 | """ 131 | logger.info(f"Listing ECS tasks for cluster: {cluster}" + (f" and service: {service}" if service else "") + f" (session_context={session_context})") 132 | result = await ecs.get_tasks(cluster, service, session_context=session_context) 133 | 134 | if result["success"]: 135 | # Apply security-focused formatting to reduce response size 136 | formatted_tasks = [format_ecs_task(task) for task in result.get("tasks", [])] 137 | result["tasks"] = formatted_tasks 138 | 139 | return result 140 | 141 | @register_tool() 142 | async def list_ecs_container_instances(cluster: str, session_context: Optional[str] = None) -> Dict[str, Any]: 143 | """List ECS container instances for a specific cluster with security details. 144 | 145 | This tool retrieves all container instances in the specified cluster and extracts 146 | security-relevant information such as EC2 instance details, agent status, 147 | and attached resources. 148 | 149 | Args: 150 | cluster: The ECS cluster ARN or name 151 | session_context: Optional session key for cross-account access (e.g., "123456789012_aws_dev") 152 | 153 | Returns: 154 | Dict containing container instances with security information 155 | """ 156 | logger.info(f"Listing ECS container instances for cluster: {cluster} (session_context={session_context})") 157 | result = await ecs.get_container_instances(cluster, session_context=session_context) 158 | 159 | if result["success"]: 160 | # Apply security-focused formatting to reduce response size 161 | formatted_instances = [format_ecs_container_instance(instance) for instance in result.get("containerInstances", [])] 162 | result["containerInstances"] = formatted_instances 163 | 164 | return result 165 | 166 | @register_tool() 167 | async def get_ecs_service(cluster: str, service: str, session_context: Optional[str] = None) -> Dict[str, Any]: 168 | """Get detailed information about a specific ECS service. 169 | 170 | This tool retrieves comprehensive details about a specified service in a cluster 171 | including its network configuration, IAM roles, and security settings. 172 | 173 | Args: 174 | cluster: The ECS cluster ARN or name 175 | service: The service ARN or name 176 | session_context: Optional session key for cross-account access (e.g., "123456789012_aws_dev") 177 | 178 | Returns: 179 | Dict containing detailed service information 180 | """ 181 | logger.info(f"Getting ECS service: {service} in cluster: {cluster} (session_context={session_context})") 182 | result = await ecs.get_service(cluster, service, session_context=session_context) 183 | 184 | if result["success"] and result.get("service"): 185 | # Apply security-focused formatting to reduce response size 186 | result["service"] = format_ecs_service(result["service"]) 187 | 188 | return result 189 | 190 | @register_tool() 191 | async def get_ecs_task(cluster: str, task: str, session_context: Optional[str] = None) -> Dict[str, Any]: 192 | """Get detailed information about a specific ECS task. 193 | 194 | This tool retrieves comprehensive details about a specified task in a cluster 195 | including its network interfaces, security groups, and container details. 196 | 197 | Args: 198 | cluster: The ECS cluster ARN or name 199 | task: The task ARN or ID 200 | session_context: Optional session key for cross-account access (e.g., "123456789012_aws_dev") 201 | 202 | Returns: 203 | Dict containing detailed task information 204 | """ 205 | logger.info(f"Getting ECS task: {task} in cluster: {cluster} (session_context={session_context})") 206 | result = await ecs.get_task(cluster, task, session_context=session_context) 207 | 208 | if result["success"] and result.get("task"): 209 | # Apply security-focused formatting to reduce response size 210 | result["task"] = format_ecs_task(result["task"]) 211 | 212 | return result -------------------------------------------------------------------------------- /aws_security_mcp/tools/org_tools.py: -------------------------------------------------------------------------------- 1 | """Organizations tools module for AWS Security MCP. 2 | 3 | This module provides tools for retrieving and analyzing AWS Organizations information 4 | for security assessment purposes. 5 | """ 6 | 7 | import asyncio 8 | import logging 9 | from datetime import datetime 10 | from typing import Any, Dict, List, Optional 11 | 12 | from aws_security_mcp.services import organizations 13 | from aws_security_mcp.formatters import org_formatter 14 | from aws_security_mcp.tools import register_tool 15 | 16 | # Configure logging 17 | logger = logging.getLogger(__name__) 18 | 19 | @register_tool() 20 | async def fetch_aws_org() -> Dict[str, Any]: 21 | """Fetch information about the AWS Organization. 22 | 23 | Returns: 24 | Dict containing information about the AWS Organization 25 | """ 26 | try: 27 | logger.info("Fetching AWS Organization information") 28 | 29 | # Get organization info from the service 30 | org_info = organizations.get_organization() 31 | 32 | # Format organization information 33 | formatted_org = org_formatter.format_organization_simple(org_info) 34 | 35 | # Get hierarchy information 36 | hierarchy = await get_org_hierarchy_async() 37 | 38 | return { 39 | "organization": formatted_org, 40 | "hierarchy": hierarchy, 41 | "scan_timestamp": datetime.utcnow().isoformat() 42 | } 43 | 44 | except Exception as e: 45 | logger.error(f"Error fetching AWS Organization: {str(e)}") 46 | return { 47 | "organization": {}, 48 | "hierarchy": {}, 49 | "scan_timestamp": datetime.utcnow().isoformat(), 50 | "error": str(e) 51 | } 52 | 53 | async def get_org_hierarchy_async() -> Dict[str, Any]: 54 | """Async wrapper for get_organization_hierarchy. 55 | 56 | Returns: 57 | Dictionary representing the organization structure 58 | """ 59 | try: 60 | # Run the synchronous function in an executor 61 | hierarchy = await organizations.run_in_executor(organizations.get_organization_hierarchy) 62 | 63 | # Format the hierarchy 64 | formatted_hierarchy = org_formatter.format_org_hierarchy(hierarchy) 65 | 66 | return formatted_hierarchy 67 | except Exception as e: 68 | logger.error(f"Error getting organization hierarchy: {str(e)}") 69 | return {} 70 | 71 | @register_tool() 72 | async def details_aws_account(account_id: Optional[str] = None, account_ids: Optional[List[str]] = None) -> Dict[str, Any]: 73 | """Fetch details about AWS accounts in the organization. 74 | 75 | Args: 76 | account_id: Optional single account ID to fetch details for 77 | account_ids: Optional list of account IDs to fetch details for 78 | 79 | Returns: 80 | Dict containing account details 81 | """ 82 | try: 83 | logger.info(f"Fetching AWS account details") 84 | 85 | accounts_to_fetch = [] 86 | 87 | # If both parameters are None, fetch all accounts 88 | if account_id is None and account_ids is None: 89 | logger.info("No account IDs specified, fetching all accounts") 90 | all_accounts = organizations.list_accounts() 91 | accounts_to_fetch = [account.get('Id') for account in all_accounts if account.get('Id')] 92 | 93 | # If single account_id is provided 94 | elif account_id is not None: 95 | accounts_to_fetch = [account_id] 96 | 97 | # If account_ids list is provided 98 | elif account_ids is not None: 99 | accounts_to_fetch = account_ids 100 | 101 | # Get details for each account 102 | account_details = {} 103 | policies_by_account = {} 104 | 105 | # Process accounts in parallel using asyncio 106 | async def get_account_with_policies(acc_id: str): 107 | try: 108 | # Get basic account details 109 | account_detail = await organizations.run_in_executor(organizations.get_account_details, acc_id) 110 | 111 | # Get effective policies 112 | policies = await organizations.run_in_executor(organizations.get_effective_policies_for_account, acc_id) 113 | 114 | return acc_id, account_detail, policies 115 | except Exception as e: 116 | logger.error(f"Error getting details for account {acc_id}: {str(e)}") 117 | return acc_id, {}, {} 118 | 119 | # Create tasks for all accounts 120 | tasks = [get_account_with_policies(acc_id) for acc_id in accounts_to_fetch] 121 | results = await asyncio.gather(*tasks) 122 | 123 | # Process results 124 | for acc_id, account_detail, policies in results: 125 | if account_detail: 126 | account_details[acc_id] = org_formatter.format_account_simple(account_detail) 127 | policies_by_account[acc_id] = org_formatter.format_effective_policies(policies) 128 | 129 | return { 130 | "accounts": account_details, 131 | "effective_policies": policies_by_account, 132 | "count": len(account_details), 133 | "scan_timestamp": datetime.utcnow().isoformat() 134 | } 135 | 136 | except Exception as e: 137 | logger.error(f"Error fetching AWS account details: {str(e)}") 138 | return { 139 | "accounts": {}, 140 | "effective_policies": {}, 141 | "count": 0, 142 | "scan_timestamp": datetime.utcnow().isoformat(), 143 | "error": str(e) 144 | } 145 | 146 | @register_tool() 147 | async def fetch_aws_org_controls() -> Dict[str, Any]: 148 | """Fetch all AWS Organization-level security controls. 149 | 150 | Returns: 151 | Dict containing Organization-level controls (SCPs, etc.) 152 | """ 153 | try: 154 | logger.info("Fetching AWS Organization controls") 155 | 156 | # Get all policy types 157 | policy_types = [ 158 | 'SERVICE_CONTROL_POLICY', 159 | 'TAG_POLICY', 160 | 'BACKUP_POLICY', 161 | 'AISERVICES_OPT_OUT_POLICY' 162 | ] 163 | 164 | policies_by_type = {} 165 | 166 | # Get policies for each type 167 | for policy_type in policy_types: 168 | try: 169 | policies = await organizations.run_in_executor(organizations.list_policies, policy_type) 170 | 171 | if policies: 172 | formatted_type = policy_type.replace('_', ' ').title() 173 | policies_by_type[formatted_type] = [ 174 | org_formatter.format_policy_simple(policy) for policy in policies 175 | ] 176 | except Exception as e: 177 | logger.warning(f"Error fetching policies of type {policy_type}: {str(e)}") 178 | 179 | # Get all roots for reference 180 | roots = await organizations.run_in_executor(organizations.list_roots) 181 | formatted_roots = [] 182 | 183 | for root in roots: 184 | formatted_root = { 185 | 'id': root.get('Id'), 186 | 'name': root.get('Name'), 187 | 'arn': root.get('Arn'), 188 | 'policy_types': [ 189 | { 190 | 'type': pt.get('Type'), 191 | 'status': pt.get('Status') 192 | } 193 | for pt in root.get('PolicyTypes', []) 194 | ] 195 | } 196 | formatted_roots.append(formatted_root) 197 | 198 | return { 199 | "policies": policies_by_type, 200 | "roots": formatted_roots, 201 | "scan_timestamp": datetime.utcnow().isoformat() 202 | } 203 | 204 | except Exception as e: 205 | logger.error(f"Error fetching AWS Organization controls: {str(e)}") 206 | return { 207 | "policies": {}, 208 | "roots": [], 209 | "scan_timestamp": datetime.utcnow().isoformat(), 210 | "error": str(e) 211 | } 212 | 213 | @register_tool() 214 | async def fetch_scp_details(policy_id: str) -> Dict[str, Any]: 215 | """Fetch details of a specific SCP policy and its targets. 216 | 217 | Args: 218 | policy_id: ID of the SCP policy 219 | 220 | Returns: 221 | Dict containing SCP policy details and targets 222 | """ 223 | try: 224 | logger.info(f"Fetching SCP policy details for {policy_id}") 225 | 226 | # Get policy details 227 | policy_details = await organizations.run_in_executor(organizations.get_policy, policy_id) 228 | 229 | if not policy_details: 230 | logger.warning(f"Policy {policy_id} not found") 231 | return { 232 | "policy": {}, 233 | "targets": [], 234 | "scan_timestamp": datetime.utcnow().isoformat(), 235 | "error": f"Policy {policy_id} not found" 236 | } 237 | 238 | # Get targets for the policy 239 | targets = await organizations.run_in_executor(organizations.list_targets_for_policy, policy_id) 240 | 241 | # Format policy with targets 242 | formatted_policy = org_formatter.format_policy_with_targets(policy_details, targets) 243 | 244 | return { 245 | "policy": formatted_policy, 246 | "target_count": len(targets), 247 | "scan_timestamp": datetime.utcnow().isoformat() 248 | } 249 | 250 | except Exception as e: 251 | logger.error(f"Error fetching SCP policy details: {str(e)}") 252 | return { 253 | "policy": {}, 254 | "targets": [], 255 | "scan_timestamp": datetime.utcnow().isoformat(), 256 | "error": str(e) 257 | } -------------------------------------------------------------------------------- /aws_security_mcp/tools/registry.py: -------------------------------------------------------------------------------- 1 | """Tool registration management for AWS Security MCP. 2 | 3 | This module provides selective tool registration to reduce MCP communication 4 | overhead while maintaining functionality through service wrappers. 5 | """ 6 | 7 | from typing import Set 8 | 9 | # Service wrapper tools that consolidate operations - this is our ONLY approach 10 | # No more individual tools, everything goes through service wrappers for consistency 11 | 12 | # Service wrapper tools that consolidate multiple operations 13 | SERVICE_WRAPPER_TOOLS: Set[str] = { 14 | # ✅ Implemented wrapper tools 15 | "guardduty_security_operations", 16 | "discover_guardduty_operations", 17 | "ec2_security_operations", 18 | "discover_ec2_operations", 19 | "load_balancer_operations", 20 | "discover_load_balancer_operations", 21 | "cloudfront_operations", 22 | "discover_cloudfront_operations", 23 | "ecs_security_operations", 24 | "discover_ecs_operations", 25 | "ecr_security_operations", 26 | "discover_ecr_operations", 27 | "iam_security_operations", 28 | "discover_iam_operations", 29 | "lambda_security_operations", 30 | "discover_lambda_operations", 31 | "access_analyzer_security_operations", 32 | "discover_access_analyzer_operations", 33 | "organizations_security_operations", 34 | "discover_organizations_operations", 35 | "s3_security_operations", 36 | "discover_s3_operations", 37 | "route53_security_operations", 38 | "discover_route53_operations", 39 | "securityhub_security_operations", 40 | "discover_securityhub_operations", 41 | "shield_security_operations", 42 | "discover_shield_operations", 43 | "waf_security_operations", 44 | "discover_waf_operations", 45 | "trusted_advisor_security_operations", 46 | "discover_trusted_advisor_operations", 47 | "refresh_aws_session", 48 | "connected_aws_accounts", 49 | "aws_session_operations", 50 | "discover_aws_session_operations", 51 | 52 | # 🚧 Future wrappers (to be implemented) 53 | "account_security_operations", # For account-level tools 54 | "discover_account_operations", 55 | "resource_tagging_operations", 56 | "discover_resource_tagging_operations", 57 | } 58 | 59 | # All utility functions are now part of service wrappers - no separate utility tools needed 60 | 61 | def get_selected_tools() -> Set[str]: 62 | """Get the complete set of tools that should be registered with MCP. 63 | 64 | All tools are now service wrappers for consistency and streamlined design. 65 | 66 | Returns: 67 | Set of tool names that should be registered 68 | """ 69 | return SERVICE_WRAPPER_TOOLS 70 | 71 | # No need for explicit exclusion logic - we simply don't register individual tools 72 | 73 | def should_register_tool(tool_name: str) -> bool: 74 | """Determine if a tool should be registered with MCP. 75 | 76 | Simple logic: only register service wrapper tools. 77 | 78 | Args: 79 | tool_name: Name of the tool to check 80 | 81 | Returns: 82 | True if tool should be registered, False otherwise 83 | """ 84 | return tool_name in SERVICE_WRAPPER_TOOLS 85 | 86 | # No utility functions needed - keep it simple -------------------------------------------------------------------------------- /aws_security_mcp/tools/resource_tagging_tools.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tools module for AWS Resource Groups Tagging API. 3 | 4 | This module provides tools to interact with AWS Resource Groups Tagging API. 5 | It includes tools to retrieve resources by tag key-value pairs, tag keys, and tag values. 6 | """ 7 | import json 8 | import logging 9 | from typing import Dict, List, Optional, Any, Union 10 | 11 | from aws_security_mcp.formatters.resource_tagging import ( 12 | format_resources_response, 13 | format_resources_by_type, 14 | format_tag_keys_response, 15 | format_tag_values_response 16 | ) 17 | from aws_security_mcp.services.resource_tagging import ResourceTaggingService 18 | from aws_security_mcp.tools import register_tool 19 | 20 | logger = logging.getLogger(__name__) 21 | 22 | # Create singleton service instance 23 | _service = ResourceTaggingService() 24 | 25 | 26 | @register_tool("search_resources_by_tag") 27 | async def search_resources_by_tag( 28 | tag_key: str, 29 | tag_value: Optional[str] = None, 30 | resource_types: Optional[List[str]] = None, 31 | next_token: Optional[str] = None, 32 | max_items: Optional[int] = None, 33 | group_by_type: bool = True, 34 | session_context: Optional[str] = None 35 | ) -> str: 36 | """ 37 | Search AWS resources by tag key and optional value. 38 | 39 | Args: 40 | tag_key: The tag key to search for 41 | tag_value: Optional tag value to filter by 42 | resource_types: Optional list of resource types to filter by (e.g., ['ec2:instance', 's3:bucket']) 43 | next_token: Token for pagination 44 | max_items: Maximum number of items to return (no limit if None) 45 | group_by_type: If True, resources will be grouped by service/resource type 46 | session_context: Optional session key for cross-account access (e.g., "123456789012_aws_dev") 47 | 48 | Returns: 49 | JSON string with resources matching the specified tags and pagination details 50 | """ 51 | # Log function invocation only 52 | logger.info(f"Invoked search_resources_by_tag(tag_key={tag_key})") 53 | 54 | try: 55 | # Get resources by tags without hardcoded pagination limits 56 | result = await _service.get_resources_by_tags( 57 | tag_key=tag_key, 58 | tag_value=tag_value, 59 | resource_types=resource_types, 60 | next_token=next_token, 61 | max_items=max_items, 62 | session_context=session_context 63 | ) 64 | 65 | # Format the response based on the grouping preference 66 | if group_by_type: 67 | formatted_result = format_resources_by_type(result, tag_key, tag_value) 68 | else: 69 | formatted_result = format_resources_response(result) 70 | 71 | return json.dumps(formatted_result) 72 | 73 | except Exception as e: 74 | logger.exception("Error searching resources by tag: %s", str(e)) 75 | error_response = { 76 | "key": tag_key, 77 | "value": tag_value, 78 | "resources": {}, 79 | "resource_count": 0, 80 | "error": f"Error searching resources by tag: {str(e)}" 81 | } if group_by_type else { 82 | "resources": [], 83 | "resource_count": 0, 84 | "error": f"Error searching resources by tag: {str(e)}" 85 | } 86 | return json.dumps(error_response) 87 | 88 | 89 | @register_tool("get_all_tag_keys") 90 | async def get_all_tag_keys( 91 | next_token: Optional[str] = None, 92 | max_items: Optional[int] = None, 93 | session_context: Optional[str] = None 94 | ) -> str: 95 | """ 96 | Get all tag keys used in the AWS account. 97 | 98 | Args: 99 | next_token: Token for pagination 100 | max_items: Maximum number of items to return 101 | session_context: Optional session key for cross-account access (e.g., "123456789012_aws_dev") 102 | 103 | Returns: 104 | JSON string with tag keys and pagination details 105 | """ 106 | # Log function invocation only 107 | logger.info(f"Invoked get_all_tag_keys()") 108 | 109 | try: 110 | result = await _service.get_tag_keys( 111 | next_token=next_token, 112 | max_items=max_items, 113 | session_context=session_context 114 | ) 115 | formatted_result = format_tag_keys_response(result) 116 | return json.dumps(formatted_result) 117 | 118 | except Exception as e: 119 | logger.exception("Error getting tag keys: %s", str(e)) 120 | return json.dumps({ 121 | "tag_keys": [], 122 | "tag_key_count": 0, 123 | "error": f"Error getting tag keys: {str(e)}" 124 | }) 125 | 126 | 127 | @register_tool("get_tag_values_for_key") 128 | async def get_tag_values_for_key( 129 | tag_key: str, 130 | next_token: Optional[str] = None, 131 | max_items: Optional[int] = None, 132 | session_context: Optional[str] = None 133 | ) -> str: 134 | """ 135 | Get all values for a specific tag key in the AWS account. 136 | 137 | Args: 138 | tag_key: The tag key to get values for 139 | next_token: Token for pagination 140 | max_items: Maximum number of items to return 141 | session_context: Optional session key for cross-account access (e.g., "123456789012_aws_dev") 142 | 143 | Returns: 144 | JSON string with tag values and pagination details 145 | """ 146 | # Log function invocation only 147 | logger.info(f"Invoked get_tag_values_for_key(tag_key={tag_key})") 148 | 149 | try: 150 | # Skip tag key validation and directly get tag values 151 | result = await _service.get_tag_values( 152 | tag_key=tag_key, 153 | next_token=next_token, 154 | max_items=max_items, 155 | session_context=session_context 156 | ) 157 | formatted_result = format_tag_values_response(result) 158 | return json.dumps(formatted_result) 159 | 160 | except Exception as e: 161 | logger.exception("Error getting tag values for key %s: %s", tag_key, str(e)) 162 | return json.dumps({ 163 | "tag_key": tag_key, 164 | "tag_values": [], 165 | "tag_value_count": 0, 166 | "error": f"Error getting tag values for key '{tag_key}': {str(e)}" 167 | }) -------------------------------------------------------------------------------- /aws_security_mcp/tools/s3_tools.py: -------------------------------------------------------------------------------- 1 | """S3 tools module for AWS Security MCP. 2 | 3 | This module provides tools for retrieving and analyzing S3 bucket information 4 | for security assessment purposes. 5 | """ 6 | 7 | import asyncio 8 | import logging 9 | from datetime import datetime 10 | from typing import Any, Dict, List, Optional 11 | 12 | from aws_security_mcp.services import s3 13 | from aws_security_mcp.formatters import s3_formatter 14 | from aws_security_mcp.tools import register_tool 15 | 16 | # Configure logging 17 | logger = logging.getLogger(__name__) 18 | 19 | @register_tool("list_s3_buckets") 20 | async def list_s3_buckets(session_context: Optional[str] = None) -> Dict[str, Any]: 21 | """List all S3 buckets in the AWS account with basic information. 22 | 23 | Args: 24 | session_context: Optional session key for cross-account access (e.g., "123456789012_aws_dev") 25 | 26 | Returns: 27 | Dict containing list of buckets with basic information 28 | """ 29 | try: 30 | logger.info("Listing S3 buckets") 31 | 32 | # Get buckets from the service 33 | buckets = s3.list_buckets(session_context=session_context) 34 | 35 | # Format bucket information 36 | formatted_buckets = [ 37 | s3_formatter.format_bucket_simple(bucket) for bucket in buckets 38 | ] 39 | 40 | return { 41 | "buckets": formatted_buckets, 42 | "count": len(formatted_buckets), 43 | "scan_timestamp": datetime.utcnow().isoformat() 44 | } 45 | 46 | except Exception as e: 47 | logger.error(f"Error listing S3 buckets: {str(e)}") 48 | return { 49 | "buckets": [], 50 | "count": 0, 51 | "scan_timestamp": datetime.utcnow().isoformat(), 52 | "error": str(e) 53 | } 54 | 55 | @register_tool("get_s3_bucket_details") 56 | async def get_s3_bucket_details( 57 | bucket_name: str, 58 | session_context: Optional[str] = None 59 | ) -> Dict[str, Any]: 60 | """Get detailed information about a specific S3 bucket. 61 | 62 | Args: 63 | bucket_name: Name of the S3 bucket to get details for 64 | session_context: Optional session key for cross-account access (e.g., "123456789012_aws_dev") 65 | 66 | Returns: 67 | Dict containing detailed bucket information 68 | """ 69 | try: 70 | logger.info(f"Getting details for S3 bucket: {bucket_name}") 71 | 72 | # Get bucket details from the service 73 | bucket_details = await get_bucket_details_async(bucket_name, session_context) 74 | 75 | # Format bucket details 76 | formatted_details = s3_formatter.format_bucket_details(bucket_details) 77 | 78 | return { 79 | "bucket_details": formatted_details, 80 | "scan_timestamp": datetime.utcnow().isoformat() 81 | } 82 | 83 | except Exception as e: 84 | logger.error(f"Error getting S3 bucket details: {str(e)}") 85 | return { 86 | "bucket_details": { 87 | "name": bucket_name, 88 | "error": str(e) 89 | }, 90 | "scan_timestamp": datetime.utcnow().isoformat() 91 | } 92 | 93 | async def get_bucket_details_async(bucket_name: str, session_context: Optional[str] = None) -> Dict[str, Any]: 94 | """Async wrapper for get_bucket_details. 95 | 96 | Args: 97 | bucket_name: Name of the S3 bucket 98 | session_context: Optional session key for cross-account access 99 | 100 | Returns: 101 | Dictionary with comprehensive bucket details 102 | """ 103 | # Get the basic details synchronously 104 | bucket_details = s3.get_bucket_details(bucket_name, session_context=session_context) 105 | 106 | # If public_access_block requires async, add it separately 107 | try: 108 | # Make sure to await the coroutine here 109 | public_access_block = await s3.get_bucket_public_access_block(bucket_name, session_context=session_context) 110 | 111 | # The public_access_block is now directly the configuration dict, not a coroutine 112 | bucket_details['PublicAccessBlock'] = { 113 | 'PublicAccessBlockConfiguration': public_access_block 114 | } 115 | except Exception as e: 116 | logger.warning(f"Error getting public access block asynchronously: {str(e)}") 117 | 118 | return bucket_details 119 | 120 | @register_tool("analyze_s3_bucket_security") 121 | async def analyze_s3_bucket_security( 122 | bucket_name: str, 123 | session_context: Optional[str] = None 124 | ) -> Dict[str, Any]: 125 | """Analyze the security configuration of an S3 bucket. 126 | 127 | Args: 128 | bucket_name: Name of the S3 bucket to analyze 129 | session_context: Optional session key for cross-account access (e.g., "123456789012_aws_dev") 130 | 131 | Returns: 132 | Dict containing security analysis for the bucket 133 | """ 134 | try: 135 | logger.info(f"Analyzing security for S3 bucket: {bucket_name}") 136 | 137 | # Get bucket details from the service with async handling 138 | bucket_details = await get_bucket_details_async(bucket_name, session_context) 139 | 140 | # Check if bucket is public 141 | is_public, assessment = s3.is_bucket_public(bucket_name, session_context=session_context) 142 | 143 | # Format bucket details for security analysis 144 | formatted_details = s3_formatter.format_bucket_details(bucket_details) 145 | security_rating = formatted_details.get('security_rating', {}) 146 | 147 | # Extract public access block settings - safely handle nested dictionaries 148 | public_access_block = bucket_details.get('PublicAccessBlock', {}) 149 | 150 | # Extract the configuration directly instead of trying to access it as a coroutine 151 | public_access_config = {} 152 | if public_access_block and isinstance(public_access_block, dict): 153 | public_access_config = public_access_block.get('PublicAccessBlockConfiguration', {}) 154 | 155 | # Get account-level public access block 156 | account_block = bucket_details.get('account_public_access_block') 157 | account_block_config = None 158 | if account_block and isinstance(account_block, dict): 159 | account_block_config = account_block.get('PublicAccessBlockConfiguration') 160 | 161 | # Create the security analysis response 162 | security_analysis = { 163 | "bucket_name": bucket_name, 164 | "is_public": is_public, 165 | "public_access_reasons": { 166 | "acl_public": assessment.get('acl_public', False), 167 | "policy_public": assessment.get('policy_public', False), 168 | "errors": assessment.get('errors', []) 169 | }, 170 | "security_rating": security_rating, 171 | "public_access_block": formatted_details.get('public_access_block'), 172 | "account_public_access_block": account_block_config 173 | } 174 | 175 | return { 176 | "security_analysis": security_analysis, 177 | "scan_timestamp": datetime.utcnow().isoformat() 178 | } 179 | 180 | except Exception as e: 181 | logger.error(f"Error analyzing S3 bucket security: {str(e)}") 182 | return { 183 | "security_analysis": { 184 | "bucket_name": bucket_name, 185 | "error": str(e) 186 | }, 187 | "scan_timestamp": datetime.utcnow().isoformat() 188 | } 189 | 190 | @register_tool("find_public_buckets") 191 | async def find_public_buckets(session_context: Optional[str] = None) -> Dict[str, Any]: 192 | """Find all public S3 buckets in the AWS account. 193 | 194 | Args: 195 | session_context: Optional session key for cross-account access (e.g., "123456789012_aws_dev") 196 | 197 | Returns: 198 | Dict containing assessment of public buckets 199 | """ 200 | try: 201 | logger.info("Finding public S3 buckets") 202 | 203 | # First list all buckets to ensure we get the full list 204 | all_buckets = s3.list_buckets(session_context=session_context) 205 | if not all_buckets: 206 | logger.warning("No S3 buckets found in the account or unable to list buckets") 207 | return { 208 | "assessment": { 209 | "summary": { 210 | "total_buckets": 0, 211 | "public_buckets": 0, 212 | "percentage_public": 0, 213 | "account_protected": False, 214 | "scan_timestamp": datetime.utcnow().isoformat() 215 | }, 216 | "public_buckets": [], 217 | "all_buckets": [] 218 | } 219 | } 220 | 221 | # Get public buckets from the service 222 | public_buckets_data = s3.find_public_buckets(session_context=session_context) 223 | 224 | # Format the public buckets assessment 225 | formatted_assessment = s3_formatter.format_public_buckets_assessment(public_buckets_data) 226 | 227 | # Ensure the result includes the raw bucket data for MCP client 228 | if "all_buckets" not in formatted_assessment: 229 | formatted_assessment["all_buckets"] = [ 230 | s3_formatter.format_bucket_simple(bucket) for bucket in all_buckets 231 | ] 232 | 233 | return { 234 | "assessment": formatted_assessment 235 | } 236 | 237 | except Exception as e: 238 | logger.error(f"Error finding public S3 buckets: {str(e)}") 239 | return { 240 | "assessment": { 241 | "error": str(e), 242 | "summary": { 243 | "total_buckets": 0, 244 | "public_buckets": 0, 245 | "percentage_public": 0, 246 | "account_protected": False, 247 | "scan_timestamp": datetime.utcnow().isoformat() 248 | }, 249 | "public_buckets": [], 250 | "all_buckets": [] 251 | } 252 | } -------------------------------------------------------------------------------- /aws_security_mcp/tools/securityhub_tools.py: -------------------------------------------------------------------------------- 1 | """SecurityHub tools for AWS Security MCP.""" 2 | 3 | import logging 4 | import json 5 | from typing import Any, Dict, List, Optional, Union 6 | 7 | from aws_security_mcp.services import securityhub 8 | from aws_security_mcp.tools import register_tool 9 | from aws_security_mcp.formatters.securityhub import ( 10 | format_finding_json, 11 | format_securityhub_finding_json, 12 | format_finding_summary_json, 13 | format_standard_json, 14 | format_control_json 15 | ) 16 | 17 | # Configure logging 18 | logger = logging.getLogger(__name__) 19 | 20 | 21 | @register_tool() 22 | async def get_securityhub_findings(limit: int = 10, severity: str = "ALL", search_term: str = "") -> str: 23 | """Get findings from AWS SecurityHub. 24 | 25 | Args: 26 | limit: Maximum number of findings to return 27 | severity: Severity level to filter by (CRITICAL, HIGH, MEDIUM, LOW, INFORMATIONAL, or ALL) 28 | search_term: Optional search term to filter findings 29 | 30 | Returns: 31 | JSON formatted string with SecurityHub findings 32 | """ 33 | logger.info(f"Getting SecurityHub findings (limit={limit}, severity={severity}, search_term='{search_term}')") 34 | 35 | try: 36 | # Create filters 37 | filters = {} 38 | 39 | # Add severity filter if provided 40 | if severity != "ALL": 41 | severity_filter = securityhub.create_severity_filter(severity) 42 | filters.update(severity_filter) 43 | 44 | # Add search term filter if provided 45 | if search_term: 46 | search_filter = securityhub.create_search_term_filter(search_term) 47 | filters.update(search_filter) 48 | 49 | # Get findings 50 | findings = securityhub.get_all_findings(filters=filters, max_items=limit) 51 | 52 | if not findings: 53 | return json.dumps({ 54 | "count": 0, 55 | "findings": [], 56 | "severity": severity, 57 | "search_term": search_term if search_term else None, 58 | "message": f"No SecurityHub findings found{' with severity ' + severity if severity != 'ALL' else ''}{' matching ' + search_term if search_term else ''}." 59 | }) 60 | 61 | # Format findings 62 | formatted_findings = [] 63 | for finding in findings: 64 | formatted_findings.append(format_finding_json(finding)) 65 | 66 | result = { 67 | "count": len(findings), 68 | "findings": formatted_findings, 69 | "severity": severity, 70 | "search_term": search_term if search_term else None, 71 | "is_truncated": len(findings) == limit 72 | } 73 | 74 | return json.dumps(result, default=lambda o: str(o)) 75 | except Exception as e: 76 | logger.error(f"Error getting SecurityHub findings: {e}") 77 | return json.dumps({ 78 | "error": { 79 | "message": f"Error retrieving SecurityHub findings: {str(e)}", 80 | "type": type(e).__name__ 81 | } 82 | }) 83 | 84 | 85 | @register_tool() 86 | async def list_failed_security_standards(limit: int = 20) -> str: 87 | """List failed security standards from SecurityHub. 88 | 89 | Args: 90 | limit: Maximum number of failed standards to return 91 | 92 | Returns: 93 | JSON formatted string with failed security standards 94 | """ 95 | logger.info(f"Listing failed security standards (limit={limit})") 96 | 97 | try: 98 | # Get SecurityHub client 99 | client = securityhub.get_securityhub_client() 100 | 101 | # Get standards 102 | standards_response = client.describe_standards() 103 | standards = standards_response.get('Standards', []) 104 | 105 | # Get enabled standards 106 | enabled_standards = [] 107 | for standard in standards: 108 | standard_subscription_arns = [] 109 | 110 | # Get subscriptions for this standard 111 | subscriptions_response = client.get_enabled_standards( 112 | StandardsSubscriptionArns=[standard.get('StandardsArn')] 113 | ) 114 | 115 | standard_subscriptions = subscriptions_response.get('StandardsSubscriptions', []) 116 | if standard_subscriptions: 117 | enabled_standards.extend(standard_subscriptions) 118 | 119 | # Get failed controls for each enabled standard 120 | failed_controls = [] 121 | 122 | for standard in enabled_standards: 123 | standard_arn = standard.get('StandardsSubscriptionArn') 124 | standard_name = standard.get('StandardsArn', '').split('/')[-1] 125 | 126 | # Get controls for this standard 127 | try: 128 | controls_response = client.describe_standards_controls( 129 | StandardsSubscriptionArn=standard_arn 130 | ) 131 | 132 | controls = controls_response.get('Controls', []) 133 | 134 | # Filter failed controls 135 | for control in controls: 136 | if control.get('ControlStatus') == 'FAILED': 137 | control['StandardName'] = standard_name 138 | failed_controls.append(control) 139 | except Exception as e: 140 | logger.warning(f"Error getting controls for standard {standard_arn}: {e}") 141 | 142 | # Limit results 143 | failed_controls = failed_controls[:limit] 144 | 145 | if not failed_controls: 146 | return json.dumps({ 147 | "count": 0, 148 | "controls": [], 149 | "message": "No failed security standards found" 150 | }) 151 | 152 | # Format the results 153 | formatted_controls = [] 154 | for control in failed_controls: 155 | formatted_controls.append({ 156 | "control_id": control.get('ControlId', 'Unknown'), 157 | "standard": control.get('StandardName', 'Unknown'), 158 | "title": control.get('Title', 'Unknown'), 159 | "severity": control.get('SeverityRating', 'Unknown'), 160 | "description": control.get('Description', 'No description available'), 161 | "related_requirements": control.get('RelatedRequirements', []) 162 | }) 163 | 164 | result = { 165 | "count": len(failed_controls), 166 | "controls": formatted_controls, 167 | "is_truncated": len(failed_controls) == limit 168 | } 169 | 170 | return json.dumps(result, default=lambda o: str(o)) 171 | except Exception as e: 172 | logger.error(f"Error listing failed security standards: {e}") 173 | return json.dumps({ 174 | "error": { 175 | "message": f"Error retrieving failed security standards: {str(e)}", 176 | "type": type(e).__name__ 177 | } 178 | }) 179 | 180 | 181 | @register_tool() 182 | async def get_account_security_score() -> str: 183 | """Get the overall security score for the AWS account. 184 | 185 | Returns: 186 | JSON formatted string with account security score 187 | """ 188 | logger.info("Getting account security score") 189 | 190 | try: 191 | # Get SecurityHub client 192 | client = securityhub.get_securityhub_client() 193 | 194 | # Get enabled standards 195 | standards_response = client.get_enabled_standards() 196 | standards = standards_response.get('StandardsSubscriptions', []) 197 | 198 | if not standards: 199 | return json.dumps({ 200 | "message": "No security standards are enabled in SecurityHub", 201 | "standards_enabled": 0, 202 | "score": None 203 | }) 204 | 205 | # Get findings to calculate score 206 | filters = { 207 | 'RecordState': [{'Value': 'ACTIVE', 'Comparison': 'EQUALS'}], 208 | 'WorkflowStatus': [{'Value': 'NEW', 'Comparison': 'EQUALS'}] 209 | } 210 | 211 | findings = securityhub.get_all_findings(filters=filters, max_items=1000) 212 | 213 | # Calculate scores 214 | total_findings = len(findings) 215 | 216 | severity_counts = { 217 | 'CRITICAL': 0, 218 | 'HIGH': 0, 219 | 'MEDIUM': 0, 220 | 'LOW': 0, 221 | 'INFORMATIONAL': 0 222 | } 223 | 224 | for finding in findings: 225 | severity = finding.get('Severity', {}).get('Label', 'INFORMATIONAL') 226 | if severity in severity_counts: 227 | severity_counts[severity] += 1 228 | 229 | # Calculate weighted score 230 | severity_weights = { 231 | 'CRITICAL': 10, 232 | 'HIGH': 5, 233 | 'MEDIUM': 3, 234 | 'LOW': 1, 235 | 'INFORMATIONAL': 0 236 | } 237 | 238 | total_weight = sum( 239 | severity_counts[severity] * severity_weights[severity] 240 | for severity in severity_counts 241 | ) 242 | 243 | max_possible_weight = sum( 244 | total_findings * severity_weights['CRITICAL'] 245 | ) if total_findings > 0 else 1 246 | 247 | # Calculate score (higher is worse) 248 | raw_score = (total_weight / max_possible_weight) * 100 if max_possible_weight > 0 else 0 249 | 250 | # Invert score (higher is better) 251 | security_score = 100 - raw_score 252 | 253 | # Format the results 254 | result = { 255 | "security_score": round(security_score, 1), 256 | "severity_distribution": severity_counts, 257 | "total_findings": total_findings, 258 | "standards_enabled": len(standards), 259 | "standards": [{ 260 | "name": standard.get('StandardsArn', '').split('/')[-1], 261 | "status": standard.get('StandardsStatus', 'Unknown') 262 | } for standard in standards] 263 | } 264 | 265 | return json.dumps(result, default=lambda o: str(o)) 266 | except Exception as e: 267 | logger.error(f"Error getting account security score: {e}") 268 | return json.dumps({ 269 | "error": { 270 | "message": f"Error retrieving account security score: {str(e)}", 271 | "type": type(e).__name__ 272 | } 273 | }) -------------------------------------------------------------------------------- /aws_security_mcp/tools/trusted_advisor_tools.py: -------------------------------------------------------------------------------- 1 | """MCP tools for AWS Trusted Advisor security checks and recommendations.""" 2 | 3 | import logging 4 | from typing import Any, Dict, List, Optional 5 | 6 | from aws_security_mcp.services import trusted_advisor 7 | from aws_security_mcp.tools import register_tool 8 | 9 | logger = logging.getLogger(__name__) 10 | 11 | @register_tool() 12 | async def get_trusted_advisor_security_checks(session_context: Optional[str] = None) -> Dict[str, Any]: 13 | """Get all security-related checks from AWS Trusted Advisor. 14 | 15 | This tool retrieves all security and fault tolerance checks available in AWS Trusted Advisor. 16 | Security checks help identify vulnerabilities and security risks in your AWS environment. 17 | 18 | Args: 19 | session_context: Optional session key for cross-account access 20 | 21 | Returns: 22 | Dict containing security checks information 23 | """ 24 | logger.info("Getting Trusted Advisor security checks") 25 | result = await trusted_advisor.get_security_checks(session_context=session_context) 26 | 27 | # Format the response to focus on security details 28 | if result["success"]: 29 | formatted_checks = [] 30 | for check in result.get("checks", []): 31 | formatted_check = { 32 | "id": check.get("id"), 33 | "name": check.get("name"), 34 | "category": check.get("category"), 35 | "description": check.get("description"), 36 | "status": check.get("status"), 37 | "risk_level": check.get("riskLevel", "unknown"), 38 | "resource_count": check.get("resourcesSummary", {}).get("resourcesCount", 0), 39 | "resources_flagged": check.get("resourcesSummary", {}).get("resourcesFlagged", 0), 40 | "last_updated": check.get("lastUpdatedAt") 41 | } 42 | formatted_checks.append(formatted_check) 43 | 44 | result["checks"] = formatted_checks 45 | 46 | return result 47 | 48 | @register_tool() 49 | async def list_trusted_advisor_security_recommendations(session_context: Optional[str] = None) -> Dict[str, Any]: 50 | """List all security recommendations from AWS Trusted Advisor. 51 | 52 | This tool retrieves security-focused recommendations that help you follow AWS 53 | best practices for security and compliance. These recommendations identify 54 | potential vulnerabilities and suggest mitigations. 55 | 56 | Args: 57 | session_context: Optional session key for cross-account access 58 | 59 | Returns: 60 | Dict containing security recommendations information 61 | """ 62 | logger.info("Listing Trusted Advisor security recommendations") 63 | result = await trusted_advisor.list_security_recommendations(session_context=session_context) 64 | 65 | # Format the response to focus on security details 66 | if result["success"]: 67 | formatted_recommendations = [] 68 | for rec in result.get("recommendations", []): 69 | formatted_rec = { 70 | "id": rec.get("recommendationId"), 71 | "name": rec.get("name"), 72 | "description": rec.get("description"), 73 | "categories": rec.get("categories", []), 74 | "risk_level": rec.get("pillarSpecificInfo", {}).get("SecurityPillar", {}).get("riskLevel", "unknown"), 75 | "affected_resources_count": rec.get("resourcesCount", 0), 76 | "last_updated": rec.get("lastUpdatedAt") 77 | } 78 | formatted_recommendations.append(formatted_rec) 79 | 80 | result["recommendations"] = formatted_recommendations 81 | 82 | return result 83 | 84 | @register_tool() 85 | async def get_trusted_advisor_recommendation_details(recommendation_id: str, session_context: Optional[str] = None) -> Dict[str, Any]: 86 | """Get detailed information about a specific Trusted Advisor security recommendation. 87 | 88 | This tool provides comprehensive details about a security recommendation, 89 | including its description, impact, and remediation suggestions. 90 | 91 | Args: 92 | recommendation_id: The ID of the recommendation to retrieve 93 | session_context: Optional session key for cross-account access 94 | 95 | Returns: 96 | Dict containing recommendation details 97 | """ 98 | logger.info(f"Getting details for Trusted Advisor recommendation: {recommendation_id}") 99 | result = await trusted_advisor.get_recommendation_details(recommendation_id, session_context=session_context) 100 | 101 | # Format the response to focus on security details 102 | if result["success"]: 103 | rec = result.get("recommendation", {}) 104 | formatted_rec = { 105 | "id": rec.get("recommendationId"), 106 | "name": rec.get("name"), 107 | "description": rec.get("description"), 108 | "categories": rec.get("categories", []), 109 | "risk_level": rec.get("pillarSpecificInfo", {}).get("SecurityPillar", {}).get("riskLevel", "unknown"), 110 | "affected_resources_count": rec.get("resourcesCount", 0), 111 | "last_updated": rec.get("lastUpdatedAt"), 112 | "remediation": { 113 | "recommendation_text": rec.get("recommendationText"), 114 | "steps": rec.get("remediationSteps", []) 115 | } 116 | } 117 | 118 | result["recommendation"] = formatted_rec 119 | 120 | return result 121 | 122 | @register_tool() 123 | async def list_trusted_advisor_affected_resources(recommendation_id: str, session_context: Optional[str] = None) -> Dict[str, Any]: 124 | """List resources affected by a specific Trusted Advisor security recommendation. 125 | 126 | This tool retrieves all AWS resources that are flagged by a particular security 127 | recommendation, allowing you to identify and remediate specific security issues. 128 | 129 | Args: 130 | recommendation_id: The ID of the recommendation 131 | session_context: Optional session key for cross-account access 132 | 133 | Returns: 134 | Dict containing affected resources information 135 | """ 136 | logger.info(f"Listing affected resources for Trusted Advisor recommendation: {recommendation_id}") 137 | result = await trusted_advisor.list_recommendation_resources(recommendation_id, session_context=session_context) 138 | 139 | # Format the response to focus on security details 140 | if result["success"]: 141 | formatted_resources = [] 142 | for resource in result.get("resources", []): 143 | formatted_resource = { 144 | "id": resource.get("resourceId"), 145 | "arn": resource.get("resourceArn"), 146 | "status": resource.get("status"), 147 | "region": resource.get("region"), 148 | "metadata": resource.get("metadata", {}), 149 | "updated_at": resource.get("updatedAt") 150 | } 151 | formatted_resources.append(formatted_resource) 152 | 153 | result["resources"] = formatted_resources 154 | 155 | return result -------------------------------------------------------------------------------- /aws_security_mcp/tools/waf_tools.py: -------------------------------------------------------------------------------- 1 | """Tools for working with AWS WAF. 2 | 3 | This module provides tools for retrieving and analyzing AWS WAF resources, 4 | including Web ACLs, IP sets, and rule groups for both WAFv2 and Classic WAF. 5 | """ 6 | 7 | import json 8 | import logging 9 | from typing import Any, Dict, List, Optional, Union 10 | 11 | from aws_security_mcp.formatters.waf import ( 12 | format_waf_web_acl_json, 13 | format_waf_ip_set_json, 14 | format_waf_rule_group_json, 15 | format_waf_web_acl_summary_json, 16 | format_waf_ip_set_summary_json, 17 | format_waf_rule_group_summary_json, 18 | format_waf_resources_json 19 | ) 20 | from aws_security_mcp.services import waf 21 | from aws_security_mcp.tools import register_tool 22 | 23 | # Configure logging 24 | logger = logging.getLogger(__name__) 25 | 26 | 27 | @register_tool() 28 | async def list_waf_web_acls( 29 | scope: str = 'REGIONAL', 30 | limit: int = 100, 31 | next_token: Optional[str] = None, 32 | session_context: Optional[str] = None 33 | ) -> Dict[str, Any]: 34 | """List AWS WAF Web ACLs. 35 | 36 | Args: 37 | scope: The scope of the Web ACLs to retrieve ('REGIONAL' or 'CLOUDFRONT') 38 | limit: Maximum number of Web ACLs to return 39 | next_token: Pagination token for fetching the next set of Web ACLs 40 | session_context: Optional session key for cross-account access 41 | 42 | Returns: 43 | JSON object with Web ACL information 44 | """ 45 | logger.info(f"Listing WAF Web ACLs with scope: {scope}, limit: {limit}") 46 | 47 | result = await waf.list_web_acls( 48 | scope=scope, 49 | max_items=limit, 50 | next_marker=next_token, 51 | session_context=session_context 52 | ) 53 | 54 | # Format the response 55 | formatted_web_acls = [] 56 | for web_acl in result.get('web_acls', []): 57 | formatted_web_acls.append(format_waf_web_acl_summary_json(web_acl)) 58 | 59 | return { 60 | "web_acls": formatted_web_acls, 61 | "next_token": result.get('next_marker'), 62 | "has_more": result.get('has_more', False), 63 | "total_count": len(formatted_web_acls) 64 | } 65 | 66 | 67 | @register_tool() 68 | async def get_waf_web_acl_details( 69 | web_acl_id: str, 70 | web_acl_name: str, 71 | scope: str = 'REGIONAL', 72 | session_context: Optional[str] = None 73 | ) -> Dict[str, Any]: 74 | """Get detailed information about a specific WAF Web ACL. 75 | 76 | Args: 77 | web_acl_id: The ID of the Web ACL 78 | web_acl_name: The name of the Web ACL 79 | scope: The scope of the Web ACL ('REGIONAL' or 'CLOUDFRONT') 80 | session_context: Optional session key for cross-account access 81 | 82 | Returns: 83 | JSON object with detailed Web ACL information 84 | """ 85 | logger.info(f"Getting WAF Web ACL details for: {web_acl_name} ({web_acl_id})") 86 | 87 | web_acl = await waf.get_web_acl( 88 | web_acl_id=web_acl_id, 89 | web_acl_name=web_acl_name, 90 | scope=scope, 91 | session_context=session_context 92 | ) 93 | 94 | return format_waf_web_acl_json(web_acl) 95 | 96 | 97 | @register_tool() 98 | async def list_waf_ip_sets( 99 | scope: str = 'REGIONAL', 100 | limit: int = 100, 101 | next_token: Optional[str] = None, 102 | session_context: Optional[str] = None 103 | ) -> Dict[str, Any]: 104 | """List AWS WAF IP sets. 105 | 106 | Args: 107 | scope: The scope of the IP sets to retrieve ('REGIONAL' or 'CLOUDFRONT') 108 | limit: Maximum number of IP sets to return 109 | next_token: Pagination token for fetching the next set of IP sets 110 | session_context: Optional session key for cross-account access 111 | 112 | Returns: 113 | JSON object with IP set information 114 | """ 115 | logger.info(f"Listing WAF IP sets with scope: {scope}, limit: {limit}") 116 | 117 | result = await waf.list_ip_sets( 118 | scope=scope, 119 | max_items=limit, 120 | next_marker=next_token, 121 | session_context=session_context 122 | ) 123 | 124 | # Format the response 125 | formatted_ip_sets = [] 126 | for ip_set in result.get('ip_sets', []): 127 | formatted_ip_sets.append(format_waf_ip_set_json(ip_set)) 128 | 129 | return { 130 | "ip_sets": formatted_ip_sets, 131 | "next_token": result.get('next_marker'), 132 | "has_more": result.get('has_more', False), 133 | "total_count": len(formatted_ip_sets) 134 | } 135 | 136 | 137 | @register_tool() 138 | async def get_waf_ip_set_details( 139 | ip_set_id: str, 140 | ip_set_name: str, 141 | scope: str = 'REGIONAL', 142 | session_context: Optional[str] = None 143 | ) -> Dict[str, Any]: 144 | """Get detailed information about a specific WAF IP set. 145 | 146 | Args: 147 | ip_set_id: The ID of the IP set 148 | ip_set_name: The name of the IP set 149 | scope: The scope of the IP set ('REGIONAL' or 'CLOUDFRONT') 150 | session_context: Optional session key for cross-account access 151 | 152 | Returns: 153 | JSON object with detailed IP set information 154 | """ 155 | logger.info(f"Getting WAF IP set details for: {ip_set_name} ({ip_set_id})") 156 | 157 | ip_set = await waf.get_ip_set( 158 | ip_set_id=ip_set_id, 159 | ip_set_name=ip_set_name, 160 | scope=scope, 161 | session_context=session_context 162 | ) 163 | 164 | return format_waf_ip_set_json(ip_set) 165 | 166 | 167 | @register_tool() 168 | async def list_waf_rule_groups( 169 | scope: str = 'REGIONAL', 170 | limit: int = 100, 171 | next_token: Optional[str] = None, 172 | session_context: Optional[str] = None 173 | ) -> Dict[str, Any]: 174 | """List AWS WAF rule groups. 175 | 176 | Args: 177 | scope: The scope of the rule groups to retrieve ('REGIONAL' or 'CLOUDFRONT') 178 | limit: Maximum number of rule groups to return 179 | next_token: Pagination token for fetching the next set of rule groups 180 | session_context: Optional session key for cross-account access 181 | 182 | Returns: 183 | JSON object with rule group information 184 | """ 185 | logger.info(f"Listing WAF rule groups with scope: {scope}, limit: {limit}") 186 | 187 | result = await waf.list_rule_groups( 188 | scope=scope, 189 | max_items=limit, 190 | next_marker=next_token, 191 | session_context=session_context 192 | ) 193 | 194 | # Format the response 195 | formatted_rule_groups = [] 196 | for rule_group in result.get('rule_groups', []): 197 | formatted_rule_groups.append(format_waf_rule_group_json(rule_group)) 198 | 199 | return { 200 | "rule_groups": formatted_rule_groups, 201 | "next_token": result.get('next_marker'), 202 | "has_more": result.get('has_more', False), 203 | "total_count": len(formatted_rule_groups) 204 | } 205 | 206 | 207 | @register_tool() 208 | async def get_waf_rule_group_details( 209 | rule_group_id: str, 210 | rule_group_name: str, 211 | scope: str = 'REGIONAL', 212 | session_context: Optional[str] = None 213 | ) -> Dict[str, Any]: 214 | """Get detailed information about a specific WAF rule group. 215 | 216 | Args: 217 | rule_group_id: The ID of the rule group 218 | rule_group_name: The name of the rule group 219 | scope: The scope of the rule group ('REGIONAL' or 'CLOUDFRONT') 220 | session_context: Optional session key for cross-account access 221 | 222 | Returns: 223 | JSON object with detailed rule group information 224 | """ 225 | logger.info(f"Getting WAF rule group details for: {rule_group_name} ({rule_group_id})") 226 | 227 | rule_group = await waf.get_rule_group( 228 | rule_group_id=rule_group_id, 229 | rule_group_name=rule_group_name, 230 | scope=scope, 231 | session_context=session_context 232 | ) 233 | 234 | return format_waf_rule_group_json(rule_group) 235 | 236 | 237 | @register_tool() 238 | async def list_waf_protected_resources( 239 | web_acl_arn: str, 240 | resource_type: str = 'APPLICATION_LOAD_BALANCER', 241 | session_context: Optional[str] = None 242 | ) -> Dict[str, Any]: 243 | """List resources protected by a WAF Web ACL. 244 | 245 | Args: 246 | web_acl_arn: The ARN of the Web ACL 247 | resource_type: The type of resource to list ('APPLICATION_LOAD_BALANCER', 'API_GATEWAY', etc.) 248 | session_context: Optional session key for cross-account access 249 | 250 | Returns: 251 | JSON object with protected resource information 252 | """ 253 | logger.info(f"Listing protected resources for Web ACL: {web_acl_arn}") 254 | 255 | resource_arns = await waf.list_resources_for_web_acl( 256 | web_acl_arn=web_acl_arn, 257 | resource_type=resource_type, 258 | session_context=session_context 259 | ) 260 | 261 | return { 262 | "web_acl_arn": web_acl_arn, 263 | "resource_type": resource_type, 264 | "protected_resources": resource_arns, 265 | "total_count": len(resource_arns) 266 | } 267 | 268 | 269 | # Classic WAF tools (deprecated but kept for backward compatibility) 270 | 271 | @register_tool() 272 | async def list_classic_waf_web_acls( 273 | limit: int = 100, 274 | next_token: Optional[str] = None, 275 | session_context: Optional[str] = None 276 | ) -> Dict[str, Any]: 277 | """List Classic WAF Web ACLs (deprecated - use WAFv2 instead). 278 | 279 | Args: 280 | limit: Maximum number of Web ACLs to return 281 | next_token: Pagination token for fetching the next set of Web ACLs 282 | session_context: Optional session key for cross-account access 283 | 284 | Returns: 285 | JSON object with Classic WAF Web ACL information 286 | """ 287 | logger.warning("Using deprecated Classic WAF API. Consider migrating to WAFv2.") 288 | logger.info(f"Listing Classic WAF Web ACLs with limit: {limit}") 289 | 290 | result = await waf.list_classic_web_acls( 291 | max_items=limit, 292 | next_marker=next_token, 293 | session_context=session_context 294 | ) 295 | 296 | return { 297 | "web_acls": result.get('web_acls', []), 298 | "next_token": result.get('next_marker'), 299 | "has_more": result.get('has_more', False), 300 | "total_count": len(result.get('web_acls', [])) 301 | } 302 | 303 | @register_tool() 304 | async def get_classic_waf_web_acl_details( 305 | web_acl_id: str, 306 | session_context: Optional[str] = None 307 | ) -> Dict[str, Any]: 308 | """Get detailed information about a specific Classic WAF Web ACL (deprecated). 309 | 310 | Args: 311 | web_acl_id: The ID of the Web ACL 312 | session_context: Optional session key for cross-account access 313 | 314 | Returns: 315 | JSON object with detailed Classic WAF Web ACL information 316 | """ 317 | logger.warning("Using deprecated Classic WAF API. Consider migrating to WAFv2.") 318 | logger.info(f"Getting Classic WAF Web ACL details for: {web_acl_id}") 319 | 320 | web_acl = await waf.get_classic_web_acl( 321 | web_acl_id=web_acl_id, 322 | session_context=session_context 323 | ) 324 | 325 | return web_acl -------------------------------------------------------------------------------- /aws_security_mcp/tools/wrappers/__init__.py: -------------------------------------------------------------------------------- 1 | """Service wrapper tools for AWS Security MCP. 2 | 3 | This module contains wrapper tools that consolidate multiple operations 4 | into service-level interfaces while maintaining semantic richness. 5 | """ 6 | 7 | # Import wrapper modules to register their tools 8 | from . import ec2_wrapper 9 | from . import load_balancer_wrapper 10 | from . import cloudfront_wrapper -------------------------------------------------------------------------------- /aws_security_mcp/utils/__init__.py: -------------------------------------------------------------------------------- 1 | """Utility functions for AWS Security MCP.""" 2 | 3 | from aws_security_mcp.utils.helpers import ( 4 | paginate_aws_response, 5 | filter_results, 6 | format_timestamp, 7 | get_result_key, 8 | ) -------------------------------------------------------------------------------- /aws_security_mcp/utils/helpers.py: -------------------------------------------------------------------------------- 1 | """Helper utilities for AWS Security MCP.""" 2 | 3 | import logging 4 | from typing import Any, Dict, List, Optional, Union, Callable 5 | 6 | # Configure logging 7 | logger = logging.getLogger(__name__) 8 | 9 | def paginate_aws_response( 10 | client: Any, 11 | method_name: str, 12 | max_items: int = 1000, 13 | **kwargs: Any 14 | ) -> List[Dict[str, Any]]: 15 | """Paginate through AWS responses that support pagination. 16 | 17 | Args: 18 | client: Boto3 client 19 | method_name: API method name to call 20 | max_items: Maximum number of items to retrieve 21 | **kwargs: Additional arguments to pass to the API method 22 | 23 | Returns: 24 | List of items from all pages 25 | """ 26 | # Get the paginator for the specified method 27 | paginator = client.get_paginator(method_name) 28 | 29 | # Create page iterator with provided parameters 30 | page_iterator = paginator.paginate(**kwargs) 31 | 32 | results = [] 33 | item_count = 0 34 | 35 | # Extract common result keys based on service and method 36 | result_key = get_result_key(method_name) 37 | 38 | # Process each page of results 39 | for page in page_iterator: 40 | if result_key in page: 41 | items = page[result_key] 42 | # Add items up to max_items limit 43 | results.extend(items[:max_items - item_count]) 44 | item_count += len(items) 45 | 46 | # Break if we've reached the maximum 47 | if item_count >= max_items: 48 | break 49 | 50 | return results 51 | 52 | def get_result_key(method_name: str) -> str: 53 | """Get the result key for a specific AWS API method. 54 | 55 | Maps AWS API method names to their corresponding result keys. 56 | 57 | Args: 58 | method_name: The AWS API method name 59 | 60 | Returns: 61 | The result key for the method 62 | """ 63 | # Common mapping of method names to result keys 64 | result_keys = { 65 | # GuardDuty 66 | "list_findings": "FindingIds", 67 | "list_detectors": "DetectorIds", 68 | 69 | # SecurityHub 70 | "get_findings": "Findings", 71 | 72 | # IAM 73 | "list_roles": "Roles", 74 | "list_users": "Users", 75 | "list_policies": "Policies", 76 | "list_access_keys": "AccessKeyMetadata", 77 | 78 | # EC2 79 | "describe_instances": "Reservations", 80 | "describe_security_groups": "SecurityGroups", 81 | "describe_vpcs": "Vpcs", 82 | "describe_route_tables": "RouteTables", 83 | "describe_images": "Images", 84 | "describe_volumes": "Volumes", 85 | 86 | # ELB 87 | "describe_load_balancers": "LoadBalancers", 88 | "describe_target_groups": "TargetGroups", 89 | 90 | # Lambda 91 | "list_functions": "Functions", 92 | 93 | # ECS 94 | "list_clusters": "clusterArns", 95 | "list_services": "serviceArns", 96 | "list_task_definitions": "taskDefinitionArns", 97 | 98 | # ECR 99 | "describe_repositories": "repositories", 100 | 101 | # CloudFront 102 | "list_distributions": "DistributionList", 103 | 104 | # Route53 105 | "list_hosted_zones": "HostedZones", 106 | 107 | # Trusted Advisor 108 | "describe_trusted_advisor_checks": "checks", 109 | 110 | # Access Analyzer 111 | "list_findings": "findings", 112 | 113 | # Default key if not found 114 | "default": "items" 115 | } 116 | 117 | return result_keys.get(method_name, result_keys["default"]) 118 | 119 | def filter_results( 120 | items: List[Dict[str, Any]], 121 | search_term: str = "", 122 | filter_func: Optional[Callable[[Dict[str, Any], str], bool]] = None 123 | ) -> List[Dict[str, Any]]: 124 | """Filter results based on search term and custom filter function. 125 | 126 | Args: 127 | items: List of items to filter 128 | search_term: Search term to match 129 | filter_func: Custom filter function 130 | 131 | Returns: 132 | Filtered list of items 133 | """ 134 | if not search_term and not filter_func: 135 | return items 136 | 137 | filtered_items = [] 138 | 139 | for item in items: 140 | # Use custom filter function if provided 141 | if filter_func and filter_func(item, search_term): 142 | filtered_items.append(item) 143 | # Otherwise use default filter (string match in any value) 144 | elif search_term and any( 145 | isinstance(v, str) and search_term.lower() in v.lower() 146 | for v in str(item).lower().split() 147 | ): 148 | filtered_items.append(item) 149 | 150 | return filtered_items 151 | 152 | def format_timestamp(timestamp: Any) -> str: 153 | """Format timestamp to readable string. 154 | 155 | Args: 156 | timestamp: Timestamp (datetime, string, or timestamp) 157 | 158 | Returns: 159 | Formatted timestamp string 160 | """ 161 | if hasattr(timestamp, "strftime"): 162 | return timestamp.strftime("%Y-%m-%d %H:%M:%S") 163 | return str(timestamp) -------------------------------------------------------------------------------- /aws_security_mcp/utils/policy_evaluator.py: -------------------------------------------------------------------------------- 1 | """Policy evaluation utility functions. 2 | 3 | This module provides utilities for evaluating AWS IAM policy conditions 4 | and determining their security impact across services. 5 | """ 6 | 7 | import logging 8 | from typing import Any, Dict, List, Optional 9 | 10 | # Configure logging 11 | logger = logging.getLogger(__name__) 12 | 13 | def evaluate_policy_conditions(statement: Dict[str, Any]) -> Dict[str, Any]: 14 | """Evaluate conditional statements in a policy statement to determine access restrictions. 15 | 16 | This function analyzes different types of IAM policy conditions to determine: 17 | 1. What types of restrictions are applied (source IP, AWS principal, time-based, etc.) 18 | 2. How restrictive the conditions are (strong, partial, or none) 19 | 3. Whether conditions effectively prevent public access 20 | 21 | Args: 22 | statement: Policy statement containing conditions to evaluate 23 | 24 | Returns: 25 | Dict with evaluation results including restriction level and details 26 | """ 27 | result = { 28 | "has_conditions": False, 29 | "restriction_level": "None", # None, Partial, Strong 30 | "condition_types": [], 31 | "potential_public_access": True, # Default assumption 32 | "details": {} 33 | } 34 | 35 | # Check if conditions exist 36 | conditions = statement.get('Condition', {}) 37 | if not conditions: 38 | return result 39 | 40 | result["has_conditions"] = True 41 | 42 | # Track different condition types 43 | source_ip_conditions = [] 44 | aws_principal_conditions = [] 45 | temporal_conditions = [] 46 | request_conditions = [] 47 | resource_conditions = [] 48 | other_conditions = [] 49 | 50 | # Analyze different condition types 51 | for condition_type, condition_values in conditions.items(): 52 | condition_type_lower = condition_type.lower() 53 | 54 | # Source IP restrictions 55 | if 'ip' in condition_type_lower or 'vpce' in condition_type_lower: 56 | result["condition_types"].append("SourceIP") 57 | 58 | # Extract IP information 59 | for key, value in condition_values.items(): 60 | if key == 'aws:SourceIp': 61 | if isinstance(value, str): 62 | source_ip_conditions.append(value) 63 | elif isinstance(value, list): 64 | source_ip_conditions.extend(value) 65 | 66 | # AWS principal conditions (account, principal type, etc.) 67 | elif 'aws' in condition_type_lower and 'principal' in condition_type_lower: 68 | result["condition_types"].append("AWSPrincipal") 69 | 70 | for key, value in condition_values.items(): 71 | aws_principal_conditions.append({key: value}) 72 | 73 | # Time-based conditions 74 | elif 'date' in condition_type_lower or 'time' in condition_type_lower: 75 | result["condition_types"].append("Temporal") 76 | 77 | for key, value in condition_values.items(): 78 | temporal_conditions.append({key: value}) 79 | 80 | # Request-related conditions (http method, referrer, etc.) 81 | elif 'referer' in condition_type_lower or 'http' in condition_type_lower: 82 | result["condition_types"].append("RequestProperty") 83 | 84 | for key, value in condition_values.items(): 85 | request_conditions.append({key: value}) 86 | 87 | # Resource-related conditions (tags, properties) 88 | elif 'resource' in condition_type_lower or 'tag' in condition_type_lower: 89 | result["condition_types"].append("ResourceProperty") 90 | 91 | for key, value in condition_values.items(): 92 | resource_conditions.append({key: value}) 93 | 94 | # Other condition types 95 | else: 96 | result["condition_types"].append("Other") 97 | 98 | for key, value in condition_values.items(): 99 | other_conditions.append({key: value}) 100 | 101 | # Remove duplicates from condition types 102 | result["condition_types"] = list(set(result["condition_types"])) 103 | 104 | # Determine restriction level and potential public access 105 | # Check for source IP restrictions that would limit public access 106 | if source_ip_conditions: 107 | result["details"]["source_ip"] = source_ip_conditions 108 | 109 | # Check if IP restrictions allow broad public access (0.0.0.0/0 or similar) 110 | has_public_ip_range = False 111 | for ip_range in source_ip_conditions: 112 | if ip_range == '0.0.0.0/0' or ip_range == '::/0': 113 | has_public_ip_range = True 114 | break 115 | 116 | if not has_public_ip_range: 117 | # IP restrictions are limiting access to specific IPs/ranges 118 | result["restriction_level"] = "Strong" 119 | result["potential_public_access"] = False 120 | 121 | # AWS principal conditions generally restrict access to specific AWS identities 122 | if aws_principal_conditions: 123 | result["details"]["aws_principal"] = aws_principal_conditions 124 | 125 | # AWS principal conditions usually indicate strong restrictions 126 | if result["restriction_level"] == "None": 127 | result["restriction_level"] = "Strong" 128 | result["potential_public_access"] = False 129 | 130 | # Temporal conditions provide time-based restrictions, which are partial 131 | if temporal_conditions: 132 | result["details"]["temporal"] = temporal_conditions 133 | 134 | if result["restriction_level"] == "None": 135 | result["restriction_level"] = "Partial" 136 | 137 | # Request conditions can vary in restrictiveness 138 | if request_conditions: 139 | result["details"]["request"] = request_conditions 140 | 141 | if result["restriction_level"] == "None": 142 | result["restriction_level"] = "Partial" 143 | 144 | # Resource conditions typically apply restrictions to specific resources 145 | if resource_conditions: 146 | result["details"]["resource"] = resource_conditions 147 | 148 | if result["restriction_level"] == "None": 149 | result["restriction_level"] = "Partial" 150 | 151 | # Other conditions 152 | if other_conditions: 153 | result["details"]["other"] = other_conditions 154 | 155 | if result["restriction_level"] == "None": 156 | result["restriction_level"] = "Partial" 157 | 158 | return result 159 | 160 | 161 | def evaluate_policy_for_public_access(policy: Dict[str, Any]) -> Dict[str, Any]: 162 | """Evaluate an entire policy document to determine if it allows public access. 163 | 164 | Args: 165 | policy: Complete policy document with statements 166 | 167 | Returns: 168 | Dict with evaluation results including public access determination 169 | """ 170 | if not policy: 171 | return { 172 | "allows_public_access": False, 173 | "public_statements": [], 174 | "has_conditions": False, 175 | "condition_mitigations": [] 176 | } 177 | 178 | statements = policy.get("Statement", []) 179 | if isinstance(statements, dict): 180 | # Handle case where Statement is a single statement object 181 | statements = [statements] 182 | 183 | public_statements = [] 184 | condition_mitigations = [] 185 | 186 | for statement in statements: 187 | # Only check Allow statements 188 | if statement.get("Effect") != "Allow": 189 | continue 190 | 191 | # Check for public principal 192 | principal = statement.get("Principal") 193 | is_public_principal = False 194 | 195 | if principal == "*" or principal == {"AWS": "*"}: 196 | is_public_principal = True 197 | elif isinstance(principal, dict) and ( 198 | principal.get("AWS") == "*" or 199 | (isinstance(principal.get("AWS"), list) and "*" in principal.get("AWS", [])) 200 | ): 201 | is_public_principal = True 202 | 203 | # If public principal, check for condition mitigations 204 | if is_public_principal: 205 | # Evaluate conditions 206 | condition_analysis = evaluate_policy_conditions(statement) 207 | 208 | # Check if conditions provide strong mitigation 209 | if condition_analysis["has_conditions"]: 210 | if condition_analysis["restriction_level"] == "Strong": 211 | condition_mitigations.append({ 212 | "statement_effect": "Allow", 213 | "condition_analysis": condition_analysis 214 | }) 215 | # Still record as public but with strong mitigation 216 | public_statements.append({ 217 | "effect": statement.get("Effect"), 218 | "principal": principal, 219 | "action": statement.get("Action"), 220 | "resource": statement.get("Resource"), 221 | "has_strong_condition": True 222 | }) 223 | else: 224 | # Public with only partial mitigation 225 | public_statements.append({ 226 | "effect": statement.get("Effect"), 227 | "principal": principal, 228 | "action": statement.get("Action"), 229 | "resource": statement.get("Resource"), 230 | "has_strong_condition": False, 231 | "condition_analysis": condition_analysis 232 | }) 233 | else: 234 | # Public with no conditions 235 | public_statements.append({ 236 | "effect": statement.get("Effect"), 237 | "principal": principal, 238 | "action": statement.get("Action"), 239 | "resource": statement.get("Resource"), 240 | "has_condition": False 241 | }) 242 | 243 | has_unmitigated_public_access = any( 244 | not stmt.get("has_strong_condition", False) 245 | for stmt in public_statements 246 | ) 247 | 248 | return { 249 | "allows_public_access": len(public_statements) > 0, 250 | "has_unmitigated_public_access": has_unmitigated_public_access, 251 | "public_statements": public_statements, 252 | "has_condition_mitigations": len(condition_mitigations) > 0, 253 | "condition_mitigations": condition_mitigations 254 | } -------------------------------------------------------------------------------- /images/README.txt: -------------------------------------------------------------------------------- 1 | # Logo Placeholder 2 | 3 | This directory should contain an image file named `aws_security_mcp_logo.png` that will be displayed at the top of the README.md. 4 | 5 | Suggested logo design: 6 | - Include AWS security icons/symbols 7 | - Use AWS color scheme (blue, orange) 8 | - Include "AWS Security MCP" text 9 | - Professional and clean design 10 | - Recommended dimensions: 800x400px -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=42", "wheel"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "aws-security-mcp" 7 | version = "0.1.0" 8 | description = "AWS Security MCP - A comprehensive AWS security inspection and remediation tool" 9 | readme = "README.md" 10 | authors = [ 11 | {name = "AWS Security MCP Team"} 12 | ] 13 | license = {text = "MIT"} 14 | classifiers = [ 15 | "Development Status :: 4 - Beta", 16 | "Intended Audience :: Developers", 17 | "Intended Audience :: System Administrators", 18 | "License :: OSI Approved :: MIT License", 19 | "Programming Language :: Python :: 3", 20 | "Programming Language :: Python :: 3.11", 21 | "Topic :: Security", 22 | "Topic :: System :: Systems Administration", 23 | ] 24 | requires-python = ">=3.11" 25 | dependencies = [ 26 | "boto3>=1.28.0", 27 | "fastapi>=0.100.0", 28 | "uvicorn>=0.22.0", 29 | "pydantic>=2.0.0", 30 | "python-dotenv>=1.0.0", 31 | "httpx>=0.24.1", 32 | ] 33 | 34 | [project.optional-dependencies] 35 | dev = [ 36 | "pytest>=7.3.1", 37 | "pytest-asyncio>=0.21.0", 38 | "pytest-cov>=4.1.0", 39 | "black>=23.3.0", 40 | "isort>=5.12.0", 41 | "mypy>=1.3.0", 42 | "types-boto3>=1.0.2", 43 | ] 44 | 45 | [project.scripts] 46 | aws-security-mcp = "aws_security_mcp.main:run_app" 47 | 48 | [tool.setuptools] 49 | packages = ["aws_security_mcp"] 50 | 51 | [tool.black] 52 | line-length = 88 53 | target-version = ["py311"] 54 | 55 | [tool.isort] 56 | profile = "black" 57 | line_length = 88 58 | 59 | [tool.mypy] 60 | python_version = "3.11" 61 | warn_return_any = true 62 | warn_unused_configs = true 63 | disallow_untyped_defs = true 64 | disallow_incomplete_defs = true 65 | 66 | [tool.pytest.ini_options] 67 | testpaths = ["tests"] 68 | asyncio_mode = "auto" -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # AWS Security MCP Dependencies 2 | # Generated from pyproject.toml 3 | 4 | # Core dependencies 5 | boto3>=1.28.0 6 | fastapi>=0.100.0 7 | uvicorn>=0.22.0 8 | pydantic>=2.0.0 9 | python-dotenv>=1.0.0 10 | httpx>=0.24.1 11 | mcp>=1.0.0 # Model Context Protocol SDK for Claude Desktop 12 | starlette>=0.27.0 # Required for MCP SSE transport 13 | 14 | # Developer dependencies (uncomment if needed) 15 | # pytest>=7.3.1 16 | # pytest-asyncio>=0.21.0 17 | # pytest-cov>=4.1.0 18 | # black>=23.3.0 19 | # isort>=5.12.0 20 | # mypy>=1.3.0 21 | # types-boto3>=1.0.2 -------------------------------------------------------------------------------- /run_aws_security.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # AWS Security MCP Launcher 4 | # This script ensures all dependencies are installed and runs the application 5 | 6 | # Function to display usage 7 | show_usage() { 8 | echo "AWS Security MCP Launcher" 9 | echo "Usage: $0 [mode]" 10 | echo "" 11 | echo "Modes:" 12 | echo " stdio - Standard I/O transport (default, for Claude Desktop)" 13 | echo " http - HTTP REST API server (port 8000)" 14 | echo " sse - Server-Sent Events transport (port 8001)" 15 | echo " help - Show this help message" 16 | echo "" 17 | echo "Examples:" 18 | echo " $0 stdio # For Claude Desktop integration" 19 | echo " $0 http # REST API server" 20 | echo " $0 sse # SSE server for streaming" 21 | } 22 | 23 | # Check for help argument 24 | if [[ "$1" == "help" || "$1" == "-h" || "$1" == "--help" ]]; then 25 | show_usage 26 | exit 0 27 | fi 28 | 29 | # Determine script directory 30 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 31 | cd "$SCRIPT_DIR" 32 | 33 | # Install dependencies directly with uv (no virtual env) 34 | echo "📦 Installing dependencies..." 35 | uv pip install -r requirements.txt 36 | 37 | # Set environment variables for Python to find modules 38 | export PYTHONPATH="$SCRIPT_DIR:$PYTHONPATH" 39 | 40 | # AWS credentials should be set before running 41 | # You can either set them here as environment variables: 42 | # export AWS_ACCESS_KEY_ID=access-key 43 | # export AWS_SECRET_ACCESS_KEY=secret-key 44 | # export AWS_DEFAULT_REGION=your_region 45 | # Or use AWS CLI profiles: 46 | # export AWS_PROFILE=default 47 | # export AWS_DEFAULT_REGION=us-east-1 48 | 49 | # Get mode argument (default to stdio) 50 | MODE=${1:-stdio} 51 | 52 | echo "🚀 Starting AWS Security MCP in '$MODE' mode..." 53 | 54 | case $MODE in 55 | stdio) 56 | echo "📱 Starting for Claude Desktop (stdio transport)" 57 | echo "💡 Make sure this is configured in your claude_desktop_config.json" 58 | ;; 59 | http) 60 | echo "🌐 Starting HTTP REST API server" 61 | echo "📡 Server will be available at: http://127.0.0.1:8000" 62 | ;; 63 | sse) 64 | echo "📡 Starting Server-Sent Events server" 65 | echo "🔗 SSE endpoint: http://127.0.0.1:8001/sse" 66 | echo "📨 Messages endpoint: http://127.0.0.1:8001/messages" 67 | echo "🔍 Health check: http://127.0.0.1:8001/health" 68 | ;; 69 | *) 70 | echo "❌ Unknown mode: $MODE" 71 | echo "" 72 | show_usage 73 | exit 1 74 | ;; 75 | esac 76 | 77 | echo "" 78 | 79 | # Run the module with uv to ensure dependencies are available 80 | uv run aws_security_mcp/main.py $MODE --------------------------------------------------------------------------------